1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
64 			       u16 *allocated_size, bool is_alloc);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static struct hnae3_ae_algo ae_algo;
72 
73 static const struct pci_device_id ae_algo_pci_tbl[] = {
74 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
75 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
76 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
77 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
81 	/* required last entry */
82 	{0, }
83 };
84 
85 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
86 
87 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
88 					 HCLGE_CMDQ_TX_ADDR_H_REG,
89 					 HCLGE_CMDQ_TX_DEPTH_REG,
90 					 HCLGE_CMDQ_TX_TAIL_REG,
91 					 HCLGE_CMDQ_TX_HEAD_REG,
92 					 HCLGE_CMDQ_RX_ADDR_L_REG,
93 					 HCLGE_CMDQ_RX_ADDR_H_REG,
94 					 HCLGE_CMDQ_RX_DEPTH_REG,
95 					 HCLGE_CMDQ_RX_TAIL_REG,
96 					 HCLGE_CMDQ_RX_HEAD_REG,
97 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
98 					 HCLGE_CMDQ_INTR_STS_REG,
99 					 HCLGE_CMDQ_INTR_EN_REG,
100 					 HCLGE_CMDQ_INTR_GEN_REG};
101 
102 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
103 					   HCLGE_VECTOR0_OTER_EN_REG,
104 					   HCLGE_MISC_RESET_STS_REG,
105 					   HCLGE_MISC_VECTOR_INT_STS,
106 					   HCLGE_GLOBAL_RESET_REG,
107 					   HCLGE_FUN_RST_ING,
108 					   HCLGE_GRO_EN_REG};
109 
110 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
111 					 HCLGE_RING_RX_ADDR_H_REG,
112 					 HCLGE_RING_RX_BD_NUM_REG,
113 					 HCLGE_RING_RX_BD_LENGTH_REG,
114 					 HCLGE_RING_RX_MERGE_EN_REG,
115 					 HCLGE_RING_RX_TAIL_REG,
116 					 HCLGE_RING_RX_HEAD_REG,
117 					 HCLGE_RING_RX_FBD_NUM_REG,
118 					 HCLGE_RING_RX_OFFSET_REG,
119 					 HCLGE_RING_RX_FBD_OFFSET_REG,
120 					 HCLGE_RING_RX_STASH_REG,
121 					 HCLGE_RING_RX_BD_ERR_REG,
122 					 HCLGE_RING_TX_ADDR_L_REG,
123 					 HCLGE_RING_TX_ADDR_H_REG,
124 					 HCLGE_RING_TX_BD_NUM_REG,
125 					 HCLGE_RING_TX_PRIORITY_REG,
126 					 HCLGE_RING_TX_TC_REG,
127 					 HCLGE_RING_TX_MERGE_EN_REG,
128 					 HCLGE_RING_TX_TAIL_REG,
129 					 HCLGE_RING_TX_HEAD_REG,
130 					 HCLGE_RING_TX_FBD_NUM_REG,
131 					 HCLGE_RING_TX_OFFSET_REG,
132 					 HCLGE_RING_TX_EBD_NUM_REG,
133 					 HCLGE_RING_TX_EBD_OFFSET_REG,
134 					 HCLGE_RING_TX_BD_ERR_REG,
135 					 HCLGE_RING_EN_REG};
136 
137 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
138 					     HCLGE_TQP_INTR_GL0_REG,
139 					     HCLGE_TQP_INTR_GL1_REG,
140 					     HCLGE_TQP_INTR_GL2_REG,
141 					     HCLGE_TQP_INTR_RL_REG};
142 
143 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
144 	"App    Loopback test",
145 	"Serdes serial Loopback test",
146 	"Serdes parallel Loopback test",
147 	"Phy    Loopback test"
148 };
149 
150 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
151 	{"mac_tx_mac_pause_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
153 	{"mac_rx_mac_pause_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
155 	{"mac_tx_control_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
157 	{"mac_rx_control_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
159 	{"mac_tx_pfc_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
161 	{"mac_tx_pfc_pri0_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
163 	{"mac_tx_pfc_pri1_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
165 	{"mac_tx_pfc_pri2_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
167 	{"mac_tx_pfc_pri3_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
169 	{"mac_tx_pfc_pri4_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
171 	{"mac_tx_pfc_pri5_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
173 	{"mac_tx_pfc_pri6_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
175 	{"mac_tx_pfc_pri7_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
177 	{"mac_rx_pfc_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
179 	{"mac_rx_pfc_pri0_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
181 	{"mac_rx_pfc_pri1_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
183 	{"mac_rx_pfc_pri2_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
185 	{"mac_rx_pfc_pri3_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
187 	{"mac_rx_pfc_pri4_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
189 	{"mac_rx_pfc_pri5_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
191 	{"mac_rx_pfc_pri6_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
193 	{"mac_rx_pfc_pri7_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
195 	{"mac_tx_total_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
197 	{"mac_tx_total_oct_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
199 	{"mac_tx_good_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
201 	{"mac_tx_bad_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
203 	{"mac_tx_good_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
205 	{"mac_tx_bad_oct_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
207 	{"mac_tx_uni_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
209 	{"mac_tx_multi_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
211 	{"mac_tx_broad_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
213 	{"mac_tx_undersize_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
215 	{"mac_tx_oversize_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
217 	{"mac_tx_64_oct_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
219 	{"mac_tx_65_127_oct_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
221 	{"mac_tx_128_255_oct_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
223 	{"mac_tx_256_511_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
225 	{"mac_tx_512_1023_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
227 	{"mac_tx_1024_1518_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
229 	{"mac_tx_1519_2047_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
231 	{"mac_tx_2048_4095_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
233 	{"mac_tx_4096_8191_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
235 	{"mac_tx_8192_9216_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
237 	{"mac_tx_9217_12287_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
239 	{"mac_tx_12288_16383_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
241 	{"mac_tx_1519_max_good_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
243 	{"mac_tx_1519_max_bad_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
245 	{"mac_rx_total_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
247 	{"mac_rx_total_oct_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
249 	{"mac_rx_good_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
251 	{"mac_rx_bad_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
253 	{"mac_rx_good_oct_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
255 	{"mac_rx_bad_oct_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
257 	{"mac_rx_uni_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
259 	{"mac_rx_multi_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
261 	{"mac_rx_broad_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
263 	{"mac_rx_undersize_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
265 	{"mac_rx_oversize_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
267 	{"mac_rx_64_oct_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
269 	{"mac_rx_65_127_oct_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
271 	{"mac_rx_128_255_oct_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
273 	{"mac_rx_256_511_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
275 	{"mac_rx_512_1023_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
277 	{"mac_rx_1024_1518_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
279 	{"mac_rx_1519_2047_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
281 	{"mac_rx_2048_4095_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
283 	{"mac_rx_4096_8191_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
285 	{"mac_rx_8192_9216_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
287 	{"mac_rx_9217_12287_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
289 	{"mac_rx_12288_16383_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
291 	{"mac_rx_1519_max_good_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
293 	{"mac_rx_1519_max_bad_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
295 
296 	{"mac_tx_fragment_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
298 	{"mac_tx_undermin_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
300 	{"mac_tx_jabber_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
302 	{"mac_tx_err_all_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
304 	{"mac_tx_from_app_good_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
306 	{"mac_tx_from_app_bad_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
308 	{"mac_rx_fragment_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
310 	{"mac_rx_undermin_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
312 	{"mac_rx_jabber_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
314 	{"mac_rx_fcs_err_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
316 	{"mac_rx_send_app_good_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
318 	{"mac_rx_send_app_bad_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
320 };
321 
322 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
323 	{
324 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
325 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
326 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
327 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
328 		.i_port_bitmap = 0x1,
329 	},
330 };
331 
332 static const u8 hclge_hash_key[] = {
333 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
334 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
335 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
336 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
337 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
338 };
339 
340 static const u32 hclge_dfx_bd_offset_list[] = {
341 	HCLGE_DFX_BIOS_BD_OFFSET,
342 	HCLGE_DFX_SSU_0_BD_OFFSET,
343 	HCLGE_DFX_SSU_1_BD_OFFSET,
344 	HCLGE_DFX_IGU_BD_OFFSET,
345 	HCLGE_DFX_RPU_0_BD_OFFSET,
346 	HCLGE_DFX_RPU_1_BD_OFFSET,
347 	HCLGE_DFX_NCSI_BD_OFFSET,
348 	HCLGE_DFX_RTC_BD_OFFSET,
349 	HCLGE_DFX_PPP_BD_OFFSET,
350 	HCLGE_DFX_RCB_BD_OFFSET,
351 	HCLGE_DFX_TQP_BD_OFFSET,
352 	HCLGE_DFX_SSU_2_BD_OFFSET
353 };
354 
355 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
356 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
357 	HCLGE_OPC_DFX_SSU_REG_0,
358 	HCLGE_OPC_DFX_SSU_REG_1,
359 	HCLGE_OPC_DFX_IGU_EGU_REG,
360 	HCLGE_OPC_DFX_RPU_REG_0,
361 	HCLGE_OPC_DFX_RPU_REG_1,
362 	HCLGE_OPC_DFX_NCSI_REG,
363 	HCLGE_OPC_DFX_RTC_REG,
364 	HCLGE_OPC_DFX_PPP_REG,
365 	HCLGE_OPC_DFX_RCB_REG,
366 	HCLGE_OPC_DFX_TQP_REG,
367 	HCLGE_OPC_DFX_SSU_REG_2
368 };
369 
370 static const struct key_info meta_data_key_info[] = {
371 	{ PACKET_TYPE_ID, 6},
372 	{ IP_FRAGEMENT, 1},
373 	{ ROCE_TYPE, 1},
374 	{ NEXT_KEY, 5},
375 	{ VLAN_NUMBER, 2},
376 	{ SRC_VPORT, 12},
377 	{ DST_VPORT, 12},
378 	{ TUNNEL_PACKET, 1},
379 };
380 
381 static const struct key_info tuple_key_info[] = {
382 	{ OUTER_DST_MAC, 48},
383 	{ OUTER_SRC_MAC, 48},
384 	{ OUTER_VLAN_TAG_FST, 16},
385 	{ OUTER_VLAN_TAG_SEC, 16},
386 	{ OUTER_ETH_TYPE, 16},
387 	{ OUTER_L2_RSV, 16},
388 	{ OUTER_IP_TOS, 8},
389 	{ OUTER_IP_PROTO, 8},
390 	{ OUTER_SRC_IP, 32},
391 	{ OUTER_DST_IP, 32},
392 	{ OUTER_L3_RSV, 16},
393 	{ OUTER_SRC_PORT, 16},
394 	{ OUTER_DST_PORT, 16},
395 	{ OUTER_L4_RSV, 32},
396 	{ OUTER_TUN_VNI, 24},
397 	{ OUTER_TUN_FLOW_ID, 8},
398 	{ INNER_DST_MAC, 48},
399 	{ INNER_SRC_MAC, 48},
400 	{ INNER_VLAN_TAG_FST, 16},
401 	{ INNER_VLAN_TAG_SEC, 16},
402 	{ INNER_ETH_TYPE, 16},
403 	{ INNER_L2_RSV, 16},
404 	{ INNER_IP_TOS, 8},
405 	{ INNER_IP_PROTO, 8},
406 	{ INNER_SRC_IP, 32},
407 	{ INNER_DST_IP, 32},
408 	{ INNER_L3_RSV, 16},
409 	{ INNER_SRC_PORT, 16},
410 	{ INNER_DST_PORT, 16},
411 	{ INNER_L4_RSV, 32},
412 };
413 
414 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
415 {
416 #define HCLGE_MAC_CMD_NUM 21
417 
418 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
419 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
420 	__le64 *desc_data;
421 	int i, k, n;
422 	int ret;
423 
424 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
425 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
426 	if (ret) {
427 		dev_err(&hdev->pdev->dev,
428 			"Get MAC pkt stats fail, status = %d.\n", ret);
429 
430 		return ret;
431 	}
432 
433 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
434 		/* for special opcode 0032, only the first desc has the head */
435 		if (unlikely(i == 0)) {
436 			desc_data = (__le64 *)(&desc[i].data[0]);
437 			n = HCLGE_RD_FIRST_STATS_NUM;
438 		} else {
439 			desc_data = (__le64 *)(&desc[i]);
440 			n = HCLGE_RD_OTHER_STATS_NUM;
441 		}
442 
443 		for (k = 0; k < n; k++) {
444 			*data += le64_to_cpu(*desc_data);
445 			data++;
446 			desc_data++;
447 		}
448 	}
449 
450 	return 0;
451 }
452 
453 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
454 {
455 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
456 	struct hclge_desc *desc;
457 	__le64 *desc_data;
458 	u16 i, k, n;
459 	int ret;
460 
461 	/* This may be called inside atomic sections,
462 	 * so GFP_ATOMIC is more suitalbe here
463 	 */
464 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
465 	if (!desc)
466 		return -ENOMEM;
467 
468 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
469 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
470 	if (ret) {
471 		kfree(desc);
472 		return ret;
473 	}
474 
475 	for (i = 0; i < desc_num; i++) {
476 		/* for special opcode 0034, only the first desc has the head */
477 		if (i == 0) {
478 			desc_data = (__le64 *)(&desc[i].data[0]);
479 			n = HCLGE_RD_FIRST_STATS_NUM;
480 		} else {
481 			desc_data = (__le64 *)(&desc[i]);
482 			n = HCLGE_RD_OTHER_STATS_NUM;
483 		}
484 
485 		for (k = 0; k < n; k++) {
486 			*data += le64_to_cpu(*desc_data);
487 			data++;
488 			desc_data++;
489 		}
490 	}
491 
492 	kfree(desc);
493 
494 	return 0;
495 }
496 
497 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
498 {
499 	struct hclge_desc desc;
500 	__le32 *desc_data;
501 	u32 reg_num;
502 	int ret;
503 
504 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
505 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
506 	if (ret)
507 		return ret;
508 
509 	desc_data = (__le32 *)(&desc.data[0]);
510 	reg_num = le32_to_cpu(*desc_data);
511 
512 	*desc_num = 1 + ((reg_num - 3) >> 2) +
513 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
514 
515 	return 0;
516 }
517 
518 static int hclge_mac_update_stats(struct hclge_dev *hdev)
519 {
520 	u32 desc_num;
521 	int ret;
522 
523 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
524 
525 	/* The firmware supports the new statistics acquisition method */
526 	if (!ret)
527 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
528 	else if (ret == -EOPNOTSUPP)
529 		ret = hclge_mac_update_stats_defective(hdev);
530 	else
531 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
532 
533 	return ret;
534 }
535 
536 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
537 {
538 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
539 	struct hclge_vport *vport = hclge_get_vport(handle);
540 	struct hclge_dev *hdev = vport->back;
541 	struct hnae3_queue *queue;
542 	struct hclge_desc desc[1];
543 	struct hclge_tqp *tqp;
544 	int ret, i;
545 
546 	for (i = 0; i < kinfo->num_tqps; i++) {
547 		queue = handle->kinfo.tqp[i];
548 		tqp = container_of(queue, struct hclge_tqp, q);
549 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
550 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
551 					   true);
552 
553 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
554 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
555 		if (ret) {
556 			dev_err(&hdev->pdev->dev,
557 				"Query tqp stat fail, status = %d,queue = %d\n",
558 				ret, i);
559 			return ret;
560 		}
561 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
562 			le32_to_cpu(desc[0].data[1]);
563 	}
564 
565 	for (i = 0; i < kinfo->num_tqps; i++) {
566 		queue = handle->kinfo.tqp[i];
567 		tqp = container_of(queue, struct hclge_tqp, q);
568 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
569 		hclge_cmd_setup_basic_desc(&desc[0],
570 					   HCLGE_OPC_QUERY_TX_STATUS,
571 					   true);
572 
573 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
574 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
575 		if (ret) {
576 			dev_err(&hdev->pdev->dev,
577 				"Query tqp stat fail, status = %d,queue = %d\n",
578 				ret, i);
579 			return ret;
580 		}
581 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
582 			le32_to_cpu(desc[0].data[1]);
583 	}
584 
585 	return 0;
586 }
587 
588 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
589 {
590 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
591 	struct hclge_tqp *tqp;
592 	u64 *buff = data;
593 	int i;
594 
595 	for (i = 0; i < kinfo->num_tqps; i++) {
596 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
597 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
598 	}
599 
600 	for (i = 0; i < kinfo->num_tqps; i++) {
601 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
603 	}
604 
605 	return buff;
606 }
607 
608 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
609 {
610 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
611 
612 	/* each tqp has TX & RX two queues */
613 	return kinfo->num_tqps * (2);
614 }
615 
616 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
617 {
618 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
619 	u8 *buff = data;
620 	int i = 0;
621 
622 	for (i = 0; i < kinfo->num_tqps; i++) {
623 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
624 			struct hclge_tqp, q);
625 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
626 			 tqp->index);
627 		buff = buff + ETH_GSTRING_LEN;
628 	}
629 
630 	for (i = 0; i < kinfo->num_tqps; i++) {
631 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
632 			struct hclge_tqp, q);
633 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
634 			 tqp->index);
635 		buff = buff + ETH_GSTRING_LEN;
636 	}
637 
638 	return buff;
639 }
640 
641 static u64 *hclge_comm_get_stats(const void *comm_stats,
642 				 const struct hclge_comm_stats_str strs[],
643 				 int size, u64 *data)
644 {
645 	u64 *buf = data;
646 	u32 i;
647 
648 	for (i = 0; i < size; i++)
649 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
650 
651 	return buf + size;
652 }
653 
654 static u8 *hclge_comm_get_strings(u32 stringset,
655 				  const struct hclge_comm_stats_str strs[],
656 				  int size, u8 *data)
657 {
658 	char *buff = (char *)data;
659 	u32 i;
660 
661 	if (stringset != ETH_SS_STATS)
662 		return buff;
663 
664 	for (i = 0; i < size; i++) {
665 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return (u8 *)buff;
670 }
671 
672 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
673 {
674 	struct hnae3_handle *handle;
675 	int status;
676 
677 	handle = &hdev->vport[0].nic;
678 	if (handle->client) {
679 		status = hclge_tqps_update_stats(handle);
680 		if (status) {
681 			dev_err(&hdev->pdev->dev,
682 				"Update TQPS stats fail, status = %d.\n",
683 				status);
684 		}
685 	}
686 
687 	status = hclge_mac_update_stats(hdev);
688 	if (status)
689 		dev_err(&hdev->pdev->dev,
690 			"Update MAC stats fail, status = %d.\n", status);
691 }
692 
693 static void hclge_update_stats(struct hnae3_handle *handle,
694 			       struct net_device_stats *net_stats)
695 {
696 	struct hclge_vport *vport = hclge_get_vport(handle);
697 	struct hclge_dev *hdev = vport->back;
698 	int status;
699 
700 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
701 		return;
702 
703 	status = hclge_mac_update_stats(hdev);
704 	if (status)
705 		dev_err(&hdev->pdev->dev,
706 			"Update MAC stats fail, status = %d.\n",
707 			status);
708 
709 	status = hclge_tqps_update_stats(handle);
710 	if (status)
711 		dev_err(&hdev->pdev->dev,
712 			"Update TQPS stats fail, status = %d.\n",
713 			status);
714 
715 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
716 }
717 
718 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
719 {
720 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
721 		HNAE3_SUPPORT_PHY_LOOPBACK |\
722 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
723 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
724 
725 	struct hclge_vport *vport = hclge_get_vport(handle);
726 	struct hclge_dev *hdev = vport->back;
727 	int count = 0;
728 
729 	/* Loopback test support rules:
730 	 * mac: only GE mode support
731 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
732 	 * phy: only support when phy device exist on board
733 	 */
734 	if (stringset == ETH_SS_TEST) {
735 		/* clear loopback bit flags at first */
736 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
737 		if (hdev->pdev->revision >= 0x21 ||
738 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
739 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
740 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
741 			count += 1;
742 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
743 		}
744 
745 		count += 2;
746 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
747 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
748 
749 		if (hdev->hw.mac.phydev) {
750 			count += 1;
751 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
752 		}
753 
754 	} else if (stringset == ETH_SS_STATS) {
755 		count = ARRAY_SIZE(g_mac_stats_string) +
756 			hclge_tqps_get_sset_count(handle, stringset);
757 	}
758 
759 	return count;
760 }
761 
762 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
763 			      u8 *data)
764 {
765 	u8 *p = (char *)data;
766 	int size;
767 
768 	if (stringset == ETH_SS_STATS) {
769 		size = ARRAY_SIZE(g_mac_stats_string);
770 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
771 					   size, p);
772 		p = hclge_tqps_get_strings(handle, p);
773 	} else if (stringset == ETH_SS_TEST) {
774 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
775 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
776 			       ETH_GSTRING_LEN);
777 			p += ETH_GSTRING_LEN;
778 		}
779 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
780 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
781 			       ETH_GSTRING_LEN);
782 			p += ETH_GSTRING_LEN;
783 		}
784 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
785 			memcpy(p,
786 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
787 			       ETH_GSTRING_LEN);
788 			p += ETH_GSTRING_LEN;
789 		}
790 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
791 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
792 			       ETH_GSTRING_LEN);
793 			p += ETH_GSTRING_LEN;
794 		}
795 	}
796 }
797 
798 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
799 {
800 	struct hclge_vport *vport = hclge_get_vport(handle);
801 	struct hclge_dev *hdev = vport->back;
802 	u64 *p;
803 
804 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
805 				 ARRAY_SIZE(g_mac_stats_string), data);
806 	p = hclge_tqps_get_stats(handle, p);
807 }
808 
809 static void hclge_get_mac_stat(struct hnae3_handle *handle,
810 			       struct hns3_mac_stats *mac_stats)
811 {
812 	struct hclge_vport *vport = hclge_get_vport(handle);
813 	struct hclge_dev *hdev = vport->back;
814 
815 	hclge_update_stats(handle, NULL);
816 
817 	mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
818 	mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
819 }
820 
821 static int hclge_parse_func_status(struct hclge_dev *hdev,
822 				   struct hclge_func_status_cmd *status)
823 {
824 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
825 		return -EINVAL;
826 
827 	/* Set the pf to main pf */
828 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
829 		hdev->flag |= HCLGE_FLAG_MAIN;
830 	else
831 		hdev->flag &= ~HCLGE_FLAG_MAIN;
832 
833 	return 0;
834 }
835 
836 static int hclge_query_function_status(struct hclge_dev *hdev)
837 {
838 #define HCLGE_QUERY_MAX_CNT	5
839 
840 	struct hclge_func_status_cmd *req;
841 	struct hclge_desc desc;
842 	int timeout = 0;
843 	int ret;
844 
845 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
846 	req = (struct hclge_func_status_cmd *)desc.data;
847 
848 	do {
849 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
850 		if (ret) {
851 			dev_err(&hdev->pdev->dev,
852 				"query function status failed %d.\n", ret);
853 			return ret;
854 		}
855 
856 		/* Check pf reset is done */
857 		if (req->pf_state)
858 			break;
859 		usleep_range(1000, 2000);
860 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
861 
862 	ret = hclge_parse_func_status(hdev, req);
863 
864 	return ret;
865 }
866 
867 static int hclge_query_pf_resource(struct hclge_dev *hdev)
868 {
869 	struct hclge_pf_res_cmd *req;
870 	struct hclge_desc desc;
871 	int ret;
872 
873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
874 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
875 	if (ret) {
876 		dev_err(&hdev->pdev->dev,
877 			"query pf resource failed %d.\n", ret);
878 		return ret;
879 	}
880 
881 	req = (struct hclge_pf_res_cmd *)desc.data;
882 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
883 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
884 
885 	if (req->tx_buf_size)
886 		hdev->tx_buf_size =
887 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
888 	else
889 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
890 
891 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
892 
893 	if (req->dv_buf_size)
894 		hdev->dv_buf_size =
895 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
896 	else
897 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
898 
899 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
900 
901 	if (hnae3_dev_roce_supported(hdev)) {
902 		hdev->roce_base_msix_offset =
903 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
904 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
905 		hdev->num_roce_msi =
906 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
907 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
908 
909 		/* nic's msix numbers is always equals to the roce's. */
910 		hdev->num_nic_msi = hdev->num_roce_msi;
911 
912 		/* PF should have NIC vectors and Roce vectors,
913 		 * NIC vectors are queued before Roce vectors.
914 		 */
915 		hdev->num_msi = hdev->num_roce_msi +
916 				hdev->roce_base_msix_offset;
917 	} else {
918 		hdev->num_msi =
919 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
920 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
921 
922 		hdev->num_nic_msi = hdev->num_msi;
923 	}
924 
925 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
926 		dev_err(&hdev->pdev->dev,
927 			"Just %u msi resources, not enough for pf(min:2).\n",
928 			hdev->num_nic_msi);
929 		return -EINVAL;
930 	}
931 
932 	return 0;
933 }
934 
935 static int hclge_parse_speed(int speed_cmd, int *speed)
936 {
937 	switch (speed_cmd) {
938 	case 6:
939 		*speed = HCLGE_MAC_SPEED_10M;
940 		break;
941 	case 7:
942 		*speed = HCLGE_MAC_SPEED_100M;
943 		break;
944 	case 0:
945 		*speed = HCLGE_MAC_SPEED_1G;
946 		break;
947 	case 1:
948 		*speed = HCLGE_MAC_SPEED_10G;
949 		break;
950 	case 2:
951 		*speed = HCLGE_MAC_SPEED_25G;
952 		break;
953 	case 3:
954 		*speed = HCLGE_MAC_SPEED_40G;
955 		break;
956 	case 4:
957 		*speed = HCLGE_MAC_SPEED_50G;
958 		break;
959 	case 5:
960 		*speed = HCLGE_MAC_SPEED_100G;
961 		break;
962 	default:
963 		return -EINVAL;
964 	}
965 
966 	return 0;
967 }
968 
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971 	struct hclge_vport *vport = hclge_get_vport(handle);
972 	struct hclge_dev *hdev = vport->back;
973 	u32 speed_ability = hdev->hw.mac.speed_ability;
974 	u32 speed_bit = 0;
975 
976 	switch (speed) {
977 	case HCLGE_MAC_SPEED_10M:
978 		speed_bit = HCLGE_SUPPORT_10M_BIT;
979 		break;
980 	case HCLGE_MAC_SPEED_100M:
981 		speed_bit = HCLGE_SUPPORT_100M_BIT;
982 		break;
983 	case HCLGE_MAC_SPEED_1G:
984 		speed_bit = HCLGE_SUPPORT_1G_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_10G:
987 		speed_bit = HCLGE_SUPPORT_10G_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_25G:
990 		speed_bit = HCLGE_SUPPORT_25G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_40G:
993 		speed_bit = HCLGE_SUPPORT_40G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_50G:
996 		speed_bit = HCLGE_SUPPORT_50G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_100G:
999 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1000 		break;
1001 	default:
1002 		return -EINVAL;
1003 	}
1004 
1005 	if (speed_bit & speed_ability)
1006 		return 0;
1007 
1008 	return -EINVAL;
1009 }
1010 
1011 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1012 {
1013 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1014 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1015 				 mac->supported);
1016 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1017 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1018 				 mac->supported);
1019 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1027 				 mac->supported);
1028 }
1029 
1030 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1031 {
1032 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1033 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1034 				 mac->supported);
1035 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1036 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1037 				 mac->supported);
1038 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1046 				 mac->supported);
1047 }
1048 
1049 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1050 {
1051 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1059 				 mac->supported);
1060 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1061 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1062 				 mac->supported);
1063 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1064 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1065 				 mac->supported);
1066 }
1067 
1068 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1069 {
1070 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1080 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1081 				 mac->supported);
1082 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1083 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1084 				 mac->supported);
1085 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1086 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1087 				 mac->supported);
1088 }
1089 
1090 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1091 {
1092 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1093 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1094 
1095 	switch (mac->speed) {
1096 	case HCLGE_MAC_SPEED_10G:
1097 	case HCLGE_MAC_SPEED_40G:
1098 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1099 				 mac->supported);
1100 		mac->fec_ability =
1101 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1102 		break;
1103 	case HCLGE_MAC_SPEED_25G:
1104 	case HCLGE_MAC_SPEED_50G:
1105 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1106 				 mac->supported);
1107 		mac->fec_ability =
1108 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1109 			BIT(HNAE3_FEC_AUTO);
1110 		break;
1111 	case HCLGE_MAC_SPEED_100G:
1112 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1113 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1114 		break;
1115 	default:
1116 		mac->fec_ability = 0;
1117 		break;
1118 	}
1119 }
1120 
1121 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1122 					u8 speed_ability)
1123 {
1124 	struct hclge_mac *mac = &hdev->hw.mac;
1125 
1126 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1127 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1128 				 mac->supported);
1129 
1130 	hclge_convert_setting_sr(mac, speed_ability);
1131 	hclge_convert_setting_lr(mac, speed_ability);
1132 	hclge_convert_setting_cr(mac, speed_ability);
1133 	if (hdev->pdev->revision >= 0x21)
1134 		hclge_convert_setting_fec(mac);
1135 
1136 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1137 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1138 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1139 }
1140 
1141 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1142 					    u8 speed_ability)
1143 {
1144 	struct hclge_mac *mac = &hdev->hw.mac;
1145 
1146 	hclge_convert_setting_kr(mac, speed_ability);
1147 	if (hdev->pdev->revision >= 0x21)
1148 		hclge_convert_setting_fec(mac);
1149 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1150 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1151 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1152 }
1153 
1154 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1155 					 u8 speed_ability)
1156 {
1157 	unsigned long *supported = hdev->hw.mac.supported;
1158 
1159 	/* default to support all speed for GE port */
1160 	if (!speed_ability)
1161 		speed_ability = HCLGE_SUPPORT_GE;
1162 
1163 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1164 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1165 				 supported);
1166 
1167 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1168 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1169 				 supported);
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1171 				 supported);
1172 	}
1173 
1174 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1175 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1176 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1177 	}
1178 
1179 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1180 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1181 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1183 }
1184 
1185 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1186 {
1187 	u8 media_type = hdev->hw.mac.media_type;
1188 
1189 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1190 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1191 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1192 		hclge_parse_copper_link_mode(hdev, speed_ability);
1193 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1194 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1195 }
1196 
1197 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1198 {
1199 	struct hclge_cfg_param_cmd *req;
1200 	u64 mac_addr_tmp_high;
1201 	u64 mac_addr_tmp;
1202 	unsigned int i;
1203 
1204 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1205 
1206 	/* get the configuration */
1207 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1208 					      HCLGE_CFG_VMDQ_M,
1209 					      HCLGE_CFG_VMDQ_S);
1210 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1211 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1212 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1213 					    HCLGE_CFG_TQP_DESC_N_M,
1214 					    HCLGE_CFG_TQP_DESC_N_S);
1215 
1216 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1217 					HCLGE_CFG_PHY_ADDR_M,
1218 					HCLGE_CFG_PHY_ADDR_S);
1219 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1220 					  HCLGE_CFG_MEDIA_TP_M,
1221 					  HCLGE_CFG_MEDIA_TP_S);
1222 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1223 					  HCLGE_CFG_RX_BUF_LEN_M,
1224 					  HCLGE_CFG_RX_BUF_LEN_S);
1225 	/* get mac_address */
1226 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1227 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1228 					    HCLGE_CFG_MAC_ADDR_H_M,
1229 					    HCLGE_CFG_MAC_ADDR_H_S);
1230 
1231 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1232 
1233 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1234 					     HCLGE_CFG_DEFAULT_SPEED_M,
1235 					     HCLGE_CFG_DEFAULT_SPEED_S);
1236 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1237 					    HCLGE_CFG_RSS_SIZE_M,
1238 					    HCLGE_CFG_RSS_SIZE_S);
1239 
1240 	for (i = 0; i < ETH_ALEN; i++)
1241 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1242 
1243 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1244 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1245 
1246 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 					     HCLGE_CFG_SPEED_ABILITY_M,
1248 					     HCLGE_CFG_SPEED_ABILITY_S);
1249 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1251 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1252 	if (!cfg->umv_space)
1253 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1254 }
1255 
1256 /* hclge_get_cfg: query the static parameter from flash
1257  * @hdev: pointer to struct hclge_dev
1258  * @hcfg: the config structure to be getted
1259  */
1260 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1261 {
1262 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1263 	struct hclge_cfg_param_cmd *req;
1264 	unsigned int i;
1265 	int ret;
1266 
1267 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1268 		u32 offset = 0;
1269 
1270 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1271 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1272 					   true);
1273 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1274 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1275 		/* Len should be united by 4 bytes when send to hardware */
1276 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1277 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1278 		req->offset = cpu_to_le32(offset);
1279 	}
1280 
1281 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1282 	if (ret) {
1283 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1284 		return ret;
1285 	}
1286 
1287 	hclge_parse_cfg(hcfg, desc);
1288 
1289 	return 0;
1290 }
1291 
1292 static int hclge_get_cap(struct hclge_dev *hdev)
1293 {
1294 	int ret;
1295 
1296 	ret = hclge_query_function_status(hdev);
1297 	if (ret) {
1298 		dev_err(&hdev->pdev->dev,
1299 			"query function status error %d.\n", ret);
1300 		return ret;
1301 	}
1302 
1303 	/* get pf resource */
1304 	ret = hclge_query_pf_resource(hdev);
1305 	if (ret)
1306 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1307 
1308 	return ret;
1309 }
1310 
1311 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1312 {
1313 #define HCLGE_MIN_TX_DESC	64
1314 #define HCLGE_MIN_RX_DESC	64
1315 
1316 	if (!is_kdump_kernel())
1317 		return;
1318 
1319 	dev_info(&hdev->pdev->dev,
1320 		 "Running kdump kernel. Using minimal resources\n");
1321 
1322 	/* minimal queue pairs equals to the number of vports */
1323 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1324 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1325 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1326 }
1327 
1328 static int hclge_configure(struct hclge_dev *hdev)
1329 {
1330 	struct hclge_cfg cfg;
1331 	unsigned int i;
1332 	int ret;
1333 
1334 	ret = hclge_get_cfg(hdev, &cfg);
1335 	if (ret) {
1336 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1337 		return ret;
1338 	}
1339 
1340 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1341 	hdev->base_tqp_pid = 0;
1342 	hdev->rss_size_max = cfg.rss_size_max;
1343 	hdev->rx_buf_len = cfg.rx_buf_len;
1344 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1345 	hdev->hw.mac.media_type = cfg.media_type;
1346 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1347 	hdev->num_tx_desc = cfg.tqp_desc_num;
1348 	hdev->num_rx_desc = cfg.tqp_desc_num;
1349 	hdev->tm_info.num_pg = 1;
1350 	hdev->tc_max = cfg.tc_num;
1351 	hdev->tm_info.hw_pfc_map = 0;
1352 	hdev->wanted_umv_size = cfg.umv_space;
1353 
1354 	if (hnae3_dev_fd_supported(hdev)) {
1355 		hdev->fd_en = true;
1356 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1357 	}
1358 
1359 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1360 	if (ret) {
1361 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1362 		return ret;
1363 	}
1364 
1365 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1366 
1367 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1368 	    (hdev->tc_max < 1)) {
1369 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1370 			 hdev->tc_max);
1371 		hdev->tc_max = 1;
1372 	}
1373 
1374 	/* Dev does not support DCB */
1375 	if (!hnae3_dev_dcb_supported(hdev)) {
1376 		hdev->tc_max = 1;
1377 		hdev->pfc_max = 0;
1378 	} else {
1379 		hdev->pfc_max = hdev->tc_max;
1380 	}
1381 
1382 	hdev->tm_info.num_tc = 1;
1383 
1384 	/* Currently not support uncontiuous tc */
1385 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1386 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1387 
1388 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1389 
1390 	hclge_init_kdump_kernel_config(hdev);
1391 
1392 	/* Set the init affinity based on pci func number */
1393 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1394 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1395 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1396 			&hdev->affinity_mask);
1397 
1398 	return ret;
1399 }
1400 
1401 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1402 			    unsigned int tso_mss_max)
1403 {
1404 	struct hclge_cfg_tso_status_cmd *req;
1405 	struct hclge_desc desc;
1406 	u16 tso_mss;
1407 
1408 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1409 
1410 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1411 
1412 	tso_mss = 0;
1413 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1414 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1415 	req->tso_mss_min = cpu_to_le16(tso_mss);
1416 
1417 	tso_mss = 0;
1418 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1419 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1420 	req->tso_mss_max = cpu_to_le16(tso_mss);
1421 
1422 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1423 }
1424 
1425 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1426 {
1427 	struct hclge_cfg_gro_status_cmd *req;
1428 	struct hclge_desc desc;
1429 	int ret;
1430 
1431 	if (!hnae3_dev_gro_supported(hdev))
1432 		return 0;
1433 
1434 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1435 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1436 
1437 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1438 
1439 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1440 	if (ret)
1441 		dev_err(&hdev->pdev->dev,
1442 			"GRO hardware config cmd failed, ret = %d\n", ret);
1443 
1444 	return ret;
1445 }
1446 
1447 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1448 {
1449 	struct hclge_tqp *tqp;
1450 	int i;
1451 
1452 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1453 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1454 	if (!hdev->htqp)
1455 		return -ENOMEM;
1456 
1457 	tqp = hdev->htqp;
1458 
1459 	for (i = 0; i < hdev->num_tqps; i++) {
1460 		tqp->dev = &hdev->pdev->dev;
1461 		tqp->index = i;
1462 
1463 		tqp->q.ae_algo = &ae_algo;
1464 		tqp->q.buf_size = hdev->rx_buf_len;
1465 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1466 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1467 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1468 			i * HCLGE_TQP_REG_SIZE;
1469 
1470 		tqp++;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1477 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1478 {
1479 	struct hclge_tqp_map_cmd *req;
1480 	struct hclge_desc desc;
1481 	int ret;
1482 
1483 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1484 
1485 	req = (struct hclge_tqp_map_cmd *)desc.data;
1486 	req->tqp_id = cpu_to_le16(tqp_pid);
1487 	req->tqp_vf = func_id;
1488 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1489 	if (!is_pf)
1490 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1491 	req->tqp_vid = cpu_to_le16(tqp_vid);
1492 
1493 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1494 	if (ret)
1495 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1496 
1497 	return ret;
1498 }
1499 
1500 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1501 {
1502 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1503 	struct hclge_dev *hdev = vport->back;
1504 	int i, alloced;
1505 
1506 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1507 	     alloced < num_tqps; i++) {
1508 		if (!hdev->htqp[i].alloced) {
1509 			hdev->htqp[i].q.handle = &vport->nic;
1510 			hdev->htqp[i].q.tqp_index = alloced;
1511 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1512 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1513 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1514 			hdev->htqp[i].alloced = true;
1515 			alloced++;
1516 		}
1517 	}
1518 	vport->alloc_tqps = alloced;
1519 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1520 				vport->alloc_tqps / hdev->tm_info.num_tc);
1521 
1522 	/* ensure one to one mapping between irq and queue at default */
1523 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1524 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1525 
1526 	return 0;
1527 }
1528 
1529 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1530 			    u16 num_tx_desc, u16 num_rx_desc)
1531 
1532 {
1533 	struct hnae3_handle *nic = &vport->nic;
1534 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1535 	struct hclge_dev *hdev = vport->back;
1536 	int ret;
1537 
1538 	kinfo->num_tx_desc = num_tx_desc;
1539 	kinfo->num_rx_desc = num_rx_desc;
1540 
1541 	kinfo->rx_buf_len = hdev->rx_buf_len;
1542 
1543 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1544 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1545 	if (!kinfo->tqp)
1546 		return -ENOMEM;
1547 
1548 	ret = hclge_assign_tqp(vport, num_tqps);
1549 	if (ret)
1550 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1551 
1552 	return ret;
1553 }
1554 
1555 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1556 				  struct hclge_vport *vport)
1557 {
1558 	struct hnae3_handle *nic = &vport->nic;
1559 	struct hnae3_knic_private_info *kinfo;
1560 	u16 i;
1561 
1562 	kinfo = &nic->kinfo;
1563 	for (i = 0; i < vport->alloc_tqps; i++) {
1564 		struct hclge_tqp *q =
1565 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1566 		bool is_pf;
1567 		int ret;
1568 
1569 		is_pf = !(vport->vport_id);
1570 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1571 					     i, is_pf);
1572 		if (ret)
1573 			return ret;
1574 	}
1575 
1576 	return 0;
1577 }
1578 
1579 static int hclge_map_tqp(struct hclge_dev *hdev)
1580 {
1581 	struct hclge_vport *vport = hdev->vport;
1582 	u16 i, num_vport;
1583 
1584 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1585 	for (i = 0; i < num_vport; i++)	{
1586 		int ret;
1587 
1588 		ret = hclge_map_tqp_to_vport(hdev, vport);
1589 		if (ret)
1590 			return ret;
1591 
1592 		vport++;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1599 {
1600 	struct hnae3_handle *nic = &vport->nic;
1601 	struct hclge_dev *hdev = vport->back;
1602 	int ret;
1603 
1604 	nic->pdev = hdev->pdev;
1605 	nic->ae_algo = &ae_algo;
1606 	nic->numa_node_mask = hdev->numa_node_mask;
1607 
1608 	ret = hclge_knic_setup(vport, num_tqps,
1609 			       hdev->num_tx_desc, hdev->num_rx_desc);
1610 	if (ret)
1611 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1612 
1613 	return ret;
1614 }
1615 
1616 static int hclge_alloc_vport(struct hclge_dev *hdev)
1617 {
1618 	struct pci_dev *pdev = hdev->pdev;
1619 	struct hclge_vport *vport;
1620 	u32 tqp_main_vport;
1621 	u32 tqp_per_vport;
1622 	int num_vport, i;
1623 	int ret;
1624 
1625 	/* We need to alloc a vport for main NIC of PF */
1626 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1627 
1628 	if (hdev->num_tqps < num_vport) {
1629 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1630 			hdev->num_tqps, num_vport);
1631 		return -EINVAL;
1632 	}
1633 
1634 	/* Alloc the same number of TQPs for every vport */
1635 	tqp_per_vport = hdev->num_tqps / num_vport;
1636 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1637 
1638 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1639 			     GFP_KERNEL);
1640 	if (!vport)
1641 		return -ENOMEM;
1642 
1643 	hdev->vport = vport;
1644 	hdev->num_alloc_vport = num_vport;
1645 
1646 	if (IS_ENABLED(CONFIG_PCI_IOV))
1647 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1648 
1649 	for (i = 0; i < num_vport; i++) {
1650 		vport->back = hdev;
1651 		vport->vport_id = i;
1652 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1653 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1654 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1655 		INIT_LIST_HEAD(&vport->vlan_list);
1656 		INIT_LIST_HEAD(&vport->uc_mac_list);
1657 		INIT_LIST_HEAD(&vport->mc_mac_list);
1658 
1659 		if (i == 0)
1660 			ret = hclge_vport_setup(vport, tqp_main_vport);
1661 		else
1662 			ret = hclge_vport_setup(vport, tqp_per_vport);
1663 		if (ret) {
1664 			dev_err(&pdev->dev,
1665 				"vport setup failed for vport %d, %d\n",
1666 				i, ret);
1667 			return ret;
1668 		}
1669 
1670 		vport++;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1677 				    struct hclge_pkt_buf_alloc *buf_alloc)
1678 {
1679 /* TX buffer size is unit by 128 byte */
1680 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1681 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1682 	struct hclge_tx_buff_alloc_cmd *req;
1683 	struct hclge_desc desc;
1684 	int ret;
1685 	u8 i;
1686 
1687 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1688 
1689 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1690 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1691 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1692 
1693 		req->tx_pkt_buff[i] =
1694 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1695 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1696 	}
1697 
1698 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1699 	if (ret)
1700 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1701 			ret);
1702 
1703 	return ret;
1704 }
1705 
1706 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1707 				 struct hclge_pkt_buf_alloc *buf_alloc)
1708 {
1709 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1710 
1711 	if (ret)
1712 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1713 
1714 	return ret;
1715 }
1716 
1717 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1718 {
1719 	unsigned int i;
1720 	u32 cnt = 0;
1721 
1722 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1723 		if (hdev->hw_tc_map & BIT(i))
1724 			cnt++;
1725 	return cnt;
1726 }
1727 
1728 /* Get the number of pfc enabled TCs, which have private buffer */
1729 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1730 				  struct hclge_pkt_buf_alloc *buf_alloc)
1731 {
1732 	struct hclge_priv_buf *priv;
1733 	unsigned int i;
1734 	int cnt = 0;
1735 
1736 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1737 		priv = &buf_alloc->priv_buf[i];
1738 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1739 		    priv->enable)
1740 			cnt++;
1741 	}
1742 
1743 	return cnt;
1744 }
1745 
1746 /* Get the number of pfc disabled TCs, which have private buffer */
1747 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1748 				     struct hclge_pkt_buf_alloc *buf_alloc)
1749 {
1750 	struct hclge_priv_buf *priv;
1751 	unsigned int i;
1752 	int cnt = 0;
1753 
1754 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1755 		priv = &buf_alloc->priv_buf[i];
1756 		if (hdev->hw_tc_map & BIT(i) &&
1757 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1758 		    priv->enable)
1759 			cnt++;
1760 	}
1761 
1762 	return cnt;
1763 }
1764 
1765 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767 	struct hclge_priv_buf *priv;
1768 	u32 rx_priv = 0;
1769 	int i;
1770 
1771 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 		priv = &buf_alloc->priv_buf[i];
1773 		if (priv->enable)
1774 			rx_priv += priv->buf_size;
1775 	}
1776 	return rx_priv;
1777 }
1778 
1779 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1780 {
1781 	u32 i, total_tx_size = 0;
1782 
1783 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1784 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1785 
1786 	return total_tx_size;
1787 }
1788 
1789 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1790 				struct hclge_pkt_buf_alloc *buf_alloc,
1791 				u32 rx_all)
1792 {
1793 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1794 	u32 tc_num = hclge_get_tc_num(hdev);
1795 	u32 shared_buf, aligned_mps;
1796 	u32 rx_priv;
1797 	int i;
1798 
1799 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1800 
1801 	if (hnae3_dev_dcb_supported(hdev))
1802 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1803 					hdev->dv_buf_size;
1804 	else
1805 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1806 					+ hdev->dv_buf_size;
1807 
1808 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1809 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1810 			     HCLGE_BUF_SIZE_UNIT);
1811 
1812 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1813 	if (rx_all < rx_priv + shared_std)
1814 		return false;
1815 
1816 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1817 	buf_alloc->s_buf.buf_size = shared_buf;
1818 	if (hnae3_dev_dcb_supported(hdev)) {
1819 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1820 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1821 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1822 				  HCLGE_BUF_SIZE_UNIT);
1823 	} else {
1824 		buf_alloc->s_buf.self.high = aligned_mps +
1825 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1826 		buf_alloc->s_buf.self.low = aligned_mps;
1827 	}
1828 
1829 	if (hnae3_dev_dcb_supported(hdev)) {
1830 		hi_thrd = shared_buf - hdev->dv_buf_size;
1831 
1832 		if (tc_num <= NEED_RESERVE_TC_NUM)
1833 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1834 					/ BUF_MAX_PERCENT;
1835 
1836 		if (tc_num)
1837 			hi_thrd = hi_thrd / tc_num;
1838 
1839 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1840 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1841 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1842 	} else {
1843 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1844 		lo_thrd = aligned_mps;
1845 	}
1846 
1847 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1848 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1849 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1850 	}
1851 
1852 	return true;
1853 }
1854 
1855 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1856 				struct hclge_pkt_buf_alloc *buf_alloc)
1857 {
1858 	u32 i, total_size;
1859 
1860 	total_size = hdev->pkt_buf_size;
1861 
1862 	/* alloc tx buffer for all enabled tc */
1863 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1864 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1865 
1866 		if (hdev->hw_tc_map & BIT(i)) {
1867 			if (total_size < hdev->tx_buf_size)
1868 				return -ENOMEM;
1869 
1870 			priv->tx_buf_size = hdev->tx_buf_size;
1871 		} else {
1872 			priv->tx_buf_size = 0;
1873 		}
1874 
1875 		total_size -= priv->tx_buf_size;
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1882 				  struct hclge_pkt_buf_alloc *buf_alloc)
1883 {
1884 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1885 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1886 	unsigned int i;
1887 
1888 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890 
1891 		priv->enable = 0;
1892 		priv->wl.low = 0;
1893 		priv->wl.high = 0;
1894 		priv->buf_size = 0;
1895 
1896 		if (!(hdev->hw_tc_map & BIT(i)))
1897 			continue;
1898 
1899 		priv->enable = 1;
1900 
1901 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1902 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1903 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1904 						HCLGE_BUF_SIZE_UNIT);
1905 		} else {
1906 			priv->wl.low = 0;
1907 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1908 					aligned_mps;
1909 		}
1910 
1911 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1912 	}
1913 
1914 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1915 }
1916 
1917 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1918 					  struct hclge_pkt_buf_alloc *buf_alloc)
1919 {
1920 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1921 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1922 	int i;
1923 
1924 	/* let the last to be cleared first */
1925 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1926 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1927 		unsigned int mask = BIT((unsigned int)i);
1928 
1929 		if (hdev->hw_tc_map & mask &&
1930 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1931 			/* Clear the no pfc TC private buffer */
1932 			priv->wl.low = 0;
1933 			priv->wl.high = 0;
1934 			priv->buf_size = 0;
1935 			priv->enable = 0;
1936 			no_pfc_priv_num--;
1937 		}
1938 
1939 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1940 		    no_pfc_priv_num == 0)
1941 			break;
1942 	}
1943 
1944 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1945 }
1946 
1947 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1948 					struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1951 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1952 	int i;
1953 
1954 	/* let the last to be cleared first */
1955 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1956 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1957 		unsigned int mask = BIT((unsigned int)i);
1958 
1959 		if (hdev->hw_tc_map & mask &&
1960 		    hdev->tm_info.hw_pfc_map & mask) {
1961 			/* Reduce the number of pfc TC with private buffer */
1962 			priv->wl.low = 0;
1963 			priv->enable = 0;
1964 			priv->wl.high = 0;
1965 			priv->buf_size = 0;
1966 			pfc_priv_num--;
1967 		}
1968 
1969 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1970 		    pfc_priv_num == 0)
1971 			break;
1972 	}
1973 
1974 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1975 }
1976 
1977 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1978 				      struct hclge_pkt_buf_alloc *buf_alloc)
1979 {
1980 #define COMPENSATE_BUFFER	0x3C00
1981 #define COMPENSATE_HALF_MPS_NUM	5
1982 #define PRIV_WL_GAP		0x1800
1983 
1984 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1985 	u32 tc_num = hclge_get_tc_num(hdev);
1986 	u32 half_mps = hdev->mps >> 1;
1987 	u32 min_rx_priv;
1988 	unsigned int i;
1989 
1990 	if (tc_num)
1991 		rx_priv = rx_priv / tc_num;
1992 
1993 	if (tc_num <= NEED_RESERVE_TC_NUM)
1994 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1995 
1996 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1997 			COMPENSATE_HALF_MPS_NUM * half_mps;
1998 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1999 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2000 
2001 	if (rx_priv < min_rx_priv)
2002 		return false;
2003 
2004 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2005 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2006 
2007 		priv->enable = 0;
2008 		priv->wl.low = 0;
2009 		priv->wl.high = 0;
2010 		priv->buf_size = 0;
2011 
2012 		if (!(hdev->hw_tc_map & BIT(i)))
2013 			continue;
2014 
2015 		priv->enable = 1;
2016 		priv->buf_size = rx_priv;
2017 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2018 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2019 	}
2020 
2021 	buf_alloc->s_buf.buf_size = 0;
2022 
2023 	return true;
2024 }
2025 
2026 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2027  * @hdev: pointer to struct hclge_dev
2028  * @buf_alloc: pointer to buffer calculation data
2029  * @return: 0: calculate sucessful, negative: fail
2030  */
2031 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2032 				struct hclge_pkt_buf_alloc *buf_alloc)
2033 {
2034 	/* When DCB is not supported, rx private buffer is not allocated. */
2035 	if (!hnae3_dev_dcb_supported(hdev)) {
2036 		u32 rx_all = hdev->pkt_buf_size;
2037 
2038 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2039 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2040 			return -ENOMEM;
2041 
2042 		return 0;
2043 	}
2044 
2045 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2046 		return 0;
2047 
2048 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2049 		return 0;
2050 
2051 	/* try to decrease the buffer size */
2052 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2053 		return 0;
2054 
2055 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2056 		return 0;
2057 
2058 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2059 		return 0;
2060 
2061 	return -ENOMEM;
2062 }
2063 
2064 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2065 				   struct hclge_pkt_buf_alloc *buf_alloc)
2066 {
2067 	struct hclge_rx_priv_buff_cmd *req;
2068 	struct hclge_desc desc;
2069 	int ret;
2070 	int i;
2071 
2072 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2073 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2074 
2075 	/* Alloc private buffer TCs */
2076 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2077 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2078 
2079 		req->buf_num[i] =
2080 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2081 		req->buf_num[i] |=
2082 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2083 	}
2084 
2085 	req->shared_buf =
2086 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2087 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2088 
2089 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2090 	if (ret)
2091 		dev_err(&hdev->pdev->dev,
2092 			"rx private buffer alloc cmd failed %d\n", ret);
2093 
2094 	return ret;
2095 }
2096 
2097 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2098 				   struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100 	struct hclge_rx_priv_wl_buf *req;
2101 	struct hclge_priv_buf *priv;
2102 	struct hclge_desc desc[2];
2103 	int i, j;
2104 	int ret;
2105 
2106 	for (i = 0; i < 2; i++) {
2107 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2108 					   false);
2109 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2110 
2111 		/* The first descriptor set the NEXT bit to 1 */
2112 		if (i == 0)
2113 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2114 		else
2115 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2116 
2117 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2118 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2119 
2120 			priv = &buf_alloc->priv_buf[idx];
2121 			req->tc_wl[j].high =
2122 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2123 			req->tc_wl[j].high |=
2124 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2125 			req->tc_wl[j].low =
2126 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2127 			req->tc_wl[j].low |=
2128 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2129 		}
2130 	}
2131 
2132 	/* Send 2 descriptor at one time */
2133 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2134 	if (ret)
2135 		dev_err(&hdev->pdev->dev,
2136 			"rx private waterline config cmd failed %d\n",
2137 			ret);
2138 	return ret;
2139 }
2140 
2141 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2142 				    struct hclge_pkt_buf_alloc *buf_alloc)
2143 {
2144 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2145 	struct hclge_rx_com_thrd *req;
2146 	struct hclge_desc desc[2];
2147 	struct hclge_tc_thrd *tc;
2148 	int i, j;
2149 	int ret;
2150 
2151 	for (i = 0; i < 2; i++) {
2152 		hclge_cmd_setup_basic_desc(&desc[i],
2153 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2154 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2155 
2156 		/* The first descriptor set the NEXT bit to 1 */
2157 		if (i == 0)
2158 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2159 		else
2160 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2161 
2162 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2163 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2164 
2165 			req->com_thrd[j].high =
2166 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2167 			req->com_thrd[j].high |=
2168 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2169 			req->com_thrd[j].low =
2170 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2171 			req->com_thrd[j].low |=
2172 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2173 		}
2174 	}
2175 
2176 	/* Send 2 descriptors at one time */
2177 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2178 	if (ret)
2179 		dev_err(&hdev->pdev->dev,
2180 			"common threshold config cmd failed %d\n", ret);
2181 	return ret;
2182 }
2183 
2184 static int hclge_common_wl_config(struct hclge_dev *hdev,
2185 				  struct hclge_pkt_buf_alloc *buf_alloc)
2186 {
2187 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2188 	struct hclge_rx_com_wl *req;
2189 	struct hclge_desc desc;
2190 	int ret;
2191 
2192 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2193 
2194 	req = (struct hclge_rx_com_wl *)desc.data;
2195 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2196 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2197 
2198 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2199 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2200 
2201 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2202 	if (ret)
2203 		dev_err(&hdev->pdev->dev,
2204 			"common waterline config cmd failed %d\n", ret);
2205 
2206 	return ret;
2207 }
2208 
2209 int hclge_buffer_alloc(struct hclge_dev *hdev)
2210 {
2211 	struct hclge_pkt_buf_alloc *pkt_buf;
2212 	int ret;
2213 
2214 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2215 	if (!pkt_buf)
2216 		return -ENOMEM;
2217 
2218 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2219 	if (ret) {
2220 		dev_err(&hdev->pdev->dev,
2221 			"could not calc tx buffer size for all TCs %d\n", ret);
2222 		goto out;
2223 	}
2224 
2225 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2226 	if (ret) {
2227 		dev_err(&hdev->pdev->dev,
2228 			"could not alloc tx buffers %d\n", ret);
2229 		goto out;
2230 	}
2231 
2232 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2233 	if (ret) {
2234 		dev_err(&hdev->pdev->dev,
2235 			"could not calc rx priv buffer size for all TCs %d\n",
2236 			ret);
2237 		goto out;
2238 	}
2239 
2240 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2241 	if (ret) {
2242 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2243 			ret);
2244 		goto out;
2245 	}
2246 
2247 	if (hnae3_dev_dcb_supported(hdev)) {
2248 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2249 		if (ret) {
2250 			dev_err(&hdev->pdev->dev,
2251 				"could not configure rx private waterline %d\n",
2252 				ret);
2253 			goto out;
2254 		}
2255 
2256 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2257 		if (ret) {
2258 			dev_err(&hdev->pdev->dev,
2259 				"could not configure common threshold %d\n",
2260 				ret);
2261 			goto out;
2262 		}
2263 	}
2264 
2265 	ret = hclge_common_wl_config(hdev, pkt_buf);
2266 	if (ret)
2267 		dev_err(&hdev->pdev->dev,
2268 			"could not configure common waterline %d\n", ret);
2269 
2270 out:
2271 	kfree(pkt_buf);
2272 	return ret;
2273 }
2274 
2275 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2276 {
2277 	struct hnae3_handle *roce = &vport->roce;
2278 	struct hnae3_handle *nic = &vport->nic;
2279 
2280 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2281 
2282 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2283 	    vport->back->num_msi_left == 0)
2284 		return -EINVAL;
2285 
2286 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2287 
2288 	roce->rinfo.netdev = nic->kinfo.netdev;
2289 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2290 
2291 	roce->pdev = nic->pdev;
2292 	roce->ae_algo = nic->ae_algo;
2293 	roce->numa_node_mask = nic->numa_node_mask;
2294 
2295 	return 0;
2296 }
2297 
2298 static int hclge_init_msi(struct hclge_dev *hdev)
2299 {
2300 	struct pci_dev *pdev = hdev->pdev;
2301 	int vectors;
2302 	int i;
2303 
2304 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2305 					hdev->num_msi,
2306 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2307 	if (vectors < 0) {
2308 		dev_err(&pdev->dev,
2309 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2310 			vectors);
2311 		return vectors;
2312 	}
2313 	if (vectors < hdev->num_msi)
2314 		dev_warn(&hdev->pdev->dev,
2315 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2316 			 hdev->num_msi, vectors);
2317 
2318 	hdev->num_msi = vectors;
2319 	hdev->num_msi_left = vectors;
2320 
2321 	hdev->base_msi_vector = pdev->irq;
2322 	hdev->roce_base_vector = hdev->base_msi_vector +
2323 				hdev->roce_base_msix_offset;
2324 
2325 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2326 					   sizeof(u16), GFP_KERNEL);
2327 	if (!hdev->vector_status) {
2328 		pci_free_irq_vectors(pdev);
2329 		return -ENOMEM;
2330 	}
2331 
2332 	for (i = 0; i < hdev->num_msi; i++)
2333 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2334 
2335 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2336 					sizeof(int), GFP_KERNEL);
2337 	if (!hdev->vector_irq) {
2338 		pci_free_irq_vectors(pdev);
2339 		return -ENOMEM;
2340 	}
2341 
2342 	return 0;
2343 }
2344 
2345 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2346 {
2347 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2348 		duplex = HCLGE_MAC_FULL;
2349 
2350 	return duplex;
2351 }
2352 
2353 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2354 				      u8 duplex)
2355 {
2356 	struct hclge_config_mac_speed_dup_cmd *req;
2357 	struct hclge_desc desc;
2358 	int ret;
2359 
2360 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2361 
2362 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2363 
2364 	if (duplex)
2365 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2366 
2367 	switch (speed) {
2368 	case HCLGE_MAC_SPEED_10M:
2369 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2370 				HCLGE_CFG_SPEED_S, 6);
2371 		break;
2372 	case HCLGE_MAC_SPEED_100M:
2373 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2374 				HCLGE_CFG_SPEED_S, 7);
2375 		break;
2376 	case HCLGE_MAC_SPEED_1G:
2377 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2378 				HCLGE_CFG_SPEED_S, 0);
2379 		break;
2380 	case HCLGE_MAC_SPEED_10G:
2381 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2382 				HCLGE_CFG_SPEED_S, 1);
2383 		break;
2384 	case HCLGE_MAC_SPEED_25G:
2385 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2386 				HCLGE_CFG_SPEED_S, 2);
2387 		break;
2388 	case HCLGE_MAC_SPEED_40G:
2389 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2390 				HCLGE_CFG_SPEED_S, 3);
2391 		break;
2392 	case HCLGE_MAC_SPEED_50G:
2393 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2394 				HCLGE_CFG_SPEED_S, 4);
2395 		break;
2396 	case HCLGE_MAC_SPEED_100G:
2397 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2398 				HCLGE_CFG_SPEED_S, 5);
2399 		break;
2400 	default:
2401 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2402 		return -EINVAL;
2403 	}
2404 
2405 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2406 		      1);
2407 
2408 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2409 	if (ret) {
2410 		dev_err(&hdev->pdev->dev,
2411 			"mac speed/duplex config cmd failed %d.\n", ret);
2412 		return ret;
2413 	}
2414 
2415 	return 0;
2416 }
2417 
2418 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2419 {
2420 	int ret;
2421 
2422 	duplex = hclge_check_speed_dup(duplex, speed);
2423 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2424 		return 0;
2425 
2426 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2427 	if (ret)
2428 		return ret;
2429 
2430 	hdev->hw.mac.speed = speed;
2431 	hdev->hw.mac.duplex = duplex;
2432 
2433 	return 0;
2434 }
2435 
2436 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2437 				     u8 duplex)
2438 {
2439 	struct hclge_vport *vport = hclge_get_vport(handle);
2440 	struct hclge_dev *hdev = vport->back;
2441 
2442 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2443 }
2444 
2445 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2446 {
2447 	struct hclge_config_auto_neg_cmd *req;
2448 	struct hclge_desc desc;
2449 	u32 flag = 0;
2450 	int ret;
2451 
2452 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2453 
2454 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2455 	if (enable)
2456 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2457 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2458 
2459 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2460 	if (ret)
2461 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2462 			ret);
2463 
2464 	return ret;
2465 }
2466 
2467 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2468 {
2469 	struct hclge_vport *vport = hclge_get_vport(handle);
2470 	struct hclge_dev *hdev = vport->back;
2471 
2472 	if (!hdev->hw.mac.support_autoneg) {
2473 		if (enable) {
2474 			dev_err(&hdev->pdev->dev,
2475 				"autoneg is not supported by current port\n");
2476 			return -EOPNOTSUPP;
2477 		} else {
2478 			return 0;
2479 		}
2480 	}
2481 
2482 	return hclge_set_autoneg_en(hdev, enable);
2483 }
2484 
2485 static int hclge_get_autoneg(struct hnae3_handle *handle)
2486 {
2487 	struct hclge_vport *vport = hclge_get_vport(handle);
2488 	struct hclge_dev *hdev = vport->back;
2489 	struct phy_device *phydev = hdev->hw.mac.phydev;
2490 
2491 	if (phydev)
2492 		return phydev->autoneg;
2493 
2494 	return hdev->hw.mac.autoneg;
2495 }
2496 
2497 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2498 {
2499 	struct hclge_vport *vport = hclge_get_vport(handle);
2500 	struct hclge_dev *hdev = vport->back;
2501 	int ret;
2502 
2503 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2504 
2505 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2506 	if (ret)
2507 		return ret;
2508 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2509 }
2510 
2511 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2512 {
2513 	struct hclge_vport *vport = hclge_get_vport(handle);
2514 	struct hclge_dev *hdev = vport->back;
2515 
2516 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2517 		return hclge_set_autoneg_en(hdev, !halt);
2518 
2519 	return 0;
2520 }
2521 
2522 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2523 {
2524 	struct hclge_config_fec_cmd *req;
2525 	struct hclge_desc desc;
2526 	int ret;
2527 
2528 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2529 
2530 	req = (struct hclge_config_fec_cmd *)desc.data;
2531 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2532 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2533 	if (fec_mode & BIT(HNAE3_FEC_RS))
2534 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2535 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2536 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2537 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2538 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2539 
2540 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2541 	if (ret)
2542 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2543 
2544 	return ret;
2545 }
2546 
2547 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2548 {
2549 	struct hclge_vport *vport = hclge_get_vport(handle);
2550 	struct hclge_dev *hdev = vport->back;
2551 	struct hclge_mac *mac = &hdev->hw.mac;
2552 	int ret;
2553 
2554 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2555 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2556 		return -EINVAL;
2557 	}
2558 
2559 	ret = hclge_set_fec_hw(hdev, fec_mode);
2560 	if (ret)
2561 		return ret;
2562 
2563 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2564 	return 0;
2565 }
2566 
2567 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2568 			  u8 *fec_mode)
2569 {
2570 	struct hclge_vport *vport = hclge_get_vport(handle);
2571 	struct hclge_dev *hdev = vport->back;
2572 	struct hclge_mac *mac = &hdev->hw.mac;
2573 
2574 	if (fec_ability)
2575 		*fec_ability = mac->fec_ability;
2576 	if (fec_mode)
2577 		*fec_mode = mac->fec_mode;
2578 }
2579 
2580 static int hclge_mac_init(struct hclge_dev *hdev)
2581 {
2582 	struct hclge_mac *mac = &hdev->hw.mac;
2583 	int ret;
2584 
2585 	hdev->support_sfp_query = true;
2586 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2587 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2588 					 hdev->hw.mac.duplex);
2589 	if (ret) {
2590 		dev_err(&hdev->pdev->dev,
2591 			"Config mac speed dup fail ret=%d\n", ret);
2592 		return ret;
2593 	}
2594 
2595 	if (hdev->hw.mac.support_autoneg) {
2596 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2597 		if (ret) {
2598 			dev_err(&hdev->pdev->dev,
2599 				"Config mac autoneg fail ret=%d\n", ret);
2600 			return ret;
2601 		}
2602 	}
2603 
2604 	mac->link = 0;
2605 
2606 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2607 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2608 		if (ret) {
2609 			dev_err(&hdev->pdev->dev,
2610 				"Fec mode init fail, ret = %d\n", ret);
2611 			return ret;
2612 		}
2613 	}
2614 
2615 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2616 	if (ret) {
2617 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2618 		return ret;
2619 	}
2620 
2621 	ret = hclge_set_default_loopback(hdev);
2622 	if (ret)
2623 		return ret;
2624 
2625 	ret = hclge_buffer_alloc(hdev);
2626 	if (ret)
2627 		dev_err(&hdev->pdev->dev,
2628 			"allocate buffer fail, ret=%d\n", ret);
2629 
2630 	return ret;
2631 }
2632 
2633 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2634 {
2635 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2636 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2637 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2638 			      &hdev->mbx_service_task);
2639 }
2640 
2641 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2642 {
2643 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2644 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2645 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2646 			      &hdev->rst_service_task);
2647 }
2648 
2649 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2650 {
2651 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2652 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2653 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2654 		hdev->hw_stats.stats_timer++;
2655 		hdev->fd_arfs_expire_timer++;
2656 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2657 				    system_wq, &hdev->service_task,
2658 				    delay_time);
2659 	}
2660 }
2661 
2662 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2663 {
2664 	struct hclge_link_status_cmd *req;
2665 	struct hclge_desc desc;
2666 	int link_status;
2667 	int ret;
2668 
2669 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2670 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2671 	if (ret) {
2672 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2673 			ret);
2674 		return ret;
2675 	}
2676 
2677 	req = (struct hclge_link_status_cmd *)desc.data;
2678 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2679 
2680 	return !!link_status;
2681 }
2682 
2683 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2684 {
2685 	unsigned int mac_state;
2686 	int link_stat;
2687 
2688 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2689 		return 0;
2690 
2691 	mac_state = hclge_get_mac_link_status(hdev);
2692 
2693 	if (hdev->hw.mac.phydev) {
2694 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2695 			link_stat = mac_state &
2696 				hdev->hw.mac.phydev->link;
2697 		else
2698 			link_stat = 0;
2699 
2700 	} else {
2701 		link_stat = mac_state;
2702 	}
2703 
2704 	return !!link_stat;
2705 }
2706 
2707 static void hclge_update_link_status(struct hclge_dev *hdev)
2708 {
2709 	struct hnae3_client *rclient = hdev->roce_client;
2710 	struct hnae3_client *client = hdev->nic_client;
2711 	struct hnae3_handle *rhandle;
2712 	struct hnae3_handle *handle;
2713 	int state;
2714 	int i;
2715 
2716 	if (!client)
2717 		return;
2718 	state = hclge_get_mac_phy_link(hdev);
2719 	if (state != hdev->hw.mac.link) {
2720 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2721 			handle = &hdev->vport[i].nic;
2722 			client->ops->link_status_change(handle, state);
2723 			hclge_config_mac_tnl_int(hdev, state);
2724 			rhandle = &hdev->vport[i].roce;
2725 			if (rclient && rclient->ops->link_status_change)
2726 				rclient->ops->link_status_change(rhandle,
2727 								 state);
2728 		}
2729 		hdev->hw.mac.link = state;
2730 	}
2731 }
2732 
2733 static void hclge_update_port_capability(struct hclge_mac *mac)
2734 {
2735 	/* update fec ability by speed */
2736 	hclge_convert_setting_fec(mac);
2737 
2738 	/* firmware can not identify back plane type, the media type
2739 	 * read from configuration can help deal it
2740 	 */
2741 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2742 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2743 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2744 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2745 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2746 
2747 	if (mac->support_autoneg == true) {
2748 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2749 		linkmode_copy(mac->advertising, mac->supported);
2750 	} else {
2751 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2752 				   mac->supported);
2753 		linkmode_zero(mac->advertising);
2754 	}
2755 }
2756 
2757 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2758 {
2759 	struct hclge_sfp_info_cmd *resp;
2760 	struct hclge_desc desc;
2761 	int ret;
2762 
2763 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2764 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2765 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2766 	if (ret == -EOPNOTSUPP) {
2767 		dev_warn(&hdev->pdev->dev,
2768 			 "IMP do not support get SFP speed %d\n", ret);
2769 		return ret;
2770 	} else if (ret) {
2771 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2772 		return ret;
2773 	}
2774 
2775 	*speed = le32_to_cpu(resp->speed);
2776 
2777 	return 0;
2778 }
2779 
2780 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2781 {
2782 	struct hclge_sfp_info_cmd *resp;
2783 	struct hclge_desc desc;
2784 	int ret;
2785 
2786 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2787 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2788 
2789 	resp->query_type = QUERY_ACTIVE_SPEED;
2790 
2791 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2792 	if (ret == -EOPNOTSUPP) {
2793 		dev_warn(&hdev->pdev->dev,
2794 			 "IMP does not support get SFP info %d\n", ret);
2795 		return ret;
2796 	} else if (ret) {
2797 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2798 		return ret;
2799 	}
2800 
2801 	mac->speed = le32_to_cpu(resp->speed);
2802 	/* if resp->speed_ability is 0, it means it's an old version
2803 	 * firmware, do not update these params
2804 	 */
2805 	if (resp->speed_ability) {
2806 		mac->module_type = le32_to_cpu(resp->module_type);
2807 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2808 		mac->autoneg = resp->autoneg;
2809 		mac->support_autoneg = resp->autoneg_ability;
2810 		mac->speed_type = QUERY_ACTIVE_SPEED;
2811 		if (!resp->active_fec)
2812 			mac->fec_mode = 0;
2813 		else
2814 			mac->fec_mode = BIT(resp->active_fec);
2815 	} else {
2816 		mac->speed_type = QUERY_SFP_SPEED;
2817 	}
2818 
2819 	return 0;
2820 }
2821 
2822 static int hclge_update_port_info(struct hclge_dev *hdev)
2823 {
2824 	struct hclge_mac *mac = &hdev->hw.mac;
2825 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2826 	int ret;
2827 
2828 	/* get the port info from SFP cmd if not copper port */
2829 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2830 		return 0;
2831 
2832 	/* if IMP does not support get SFP/qSFP info, return directly */
2833 	if (!hdev->support_sfp_query)
2834 		return 0;
2835 
2836 	if (hdev->pdev->revision >= 0x21)
2837 		ret = hclge_get_sfp_info(hdev, mac);
2838 	else
2839 		ret = hclge_get_sfp_speed(hdev, &speed);
2840 
2841 	if (ret == -EOPNOTSUPP) {
2842 		hdev->support_sfp_query = false;
2843 		return ret;
2844 	} else if (ret) {
2845 		return ret;
2846 	}
2847 
2848 	if (hdev->pdev->revision >= 0x21) {
2849 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2850 			hclge_update_port_capability(mac);
2851 			return 0;
2852 		}
2853 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2854 					       HCLGE_MAC_FULL);
2855 	} else {
2856 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2857 			return 0; /* do nothing if no SFP */
2858 
2859 		/* must config full duplex for SFP */
2860 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2861 	}
2862 }
2863 
2864 static int hclge_get_status(struct hnae3_handle *handle)
2865 {
2866 	struct hclge_vport *vport = hclge_get_vport(handle);
2867 	struct hclge_dev *hdev = vport->back;
2868 
2869 	hclge_update_link_status(hdev);
2870 
2871 	return hdev->hw.mac.link;
2872 }
2873 
2874 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2875 {
2876 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2877 
2878 	/* fetch the events from their corresponding regs */
2879 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2880 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2881 	msix_src_reg = hclge_read_dev(&hdev->hw,
2882 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2883 
2884 	/* Assumption: If by any chance reset and mailbox events are reported
2885 	 * together then we will only process reset event in this go and will
2886 	 * defer the processing of the mailbox events. Since, we would have not
2887 	 * cleared RX CMDQ event this time we would receive again another
2888 	 * interrupt from H/W just for the mailbox.
2889 	 *
2890 	 * check for vector0 reset event sources
2891 	 */
2892 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2893 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2894 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2895 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2896 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2897 		hdev->rst_stats.imp_rst_cnt++;
2898 		return HCLGE_VECTOR0_EVENT_RST;
2899 	}
2900 
2901 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2902 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2903 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2904 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2905 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2906 		hdev->rst_stats.global_rst_cnt++;
2907 		return HCLGE_VECTOR0_EVENT_RST;
2908 	}
2909 
2910 	/* check for vector0 msix event source */
2911 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2912 		dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2913 			 msix_src_reg);
2914 		*clearval = msix_src_reg;
2915 		return HCLGE_VECTOR0_EVENT_ERR;
2916 	}
2917 
2918 	/* check for vector0 mailbox(=CMDQ RX) event source */
2919 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2920 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2921 		*clearval = cmdq_src_reg;
2922 		return HCLGE_VECTOR0_EVENT_MBX;
2923 	}
2924 
2925 	/* print other vector0 event source */
2926 	dev_info(&hdev->pdev->dev,
2927 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2928 		 cmdq_src_reg, msix_src_reg);
2929 	*clearval = msix_src_reg;
2930 
2931 	return HCLGE_VECTOR0_EVENT_OTHER;
2932 }
2933 
2934 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2935 				    u32 regclr)
2936 {
2937 	switch (event_type) {
2938 	case HCLGE_VECTOR0_EVENT_RST:
2939 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2940 		break;
2941 	case HCLGE_VECTOR0_EVENT_MBX:
2942 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2943 		break;
2944 	default:
2945 		break;
2946 	}
2947 }
2948 
2949 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2950 {
2951 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2952 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2953 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2954 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2955 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2956 }
2957 
2958 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2959 {
2960 	writel(enable ? 1 : 0, vector->addr);
2961 }
2962 
2963 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2964 {
2965 	struct hclge_dev *hdev = data;
2966 	u32 clearval = 0;
2967 	u32 event_cause;
2968 
2969 	hclge_enable_vector(&hdev->misc_vector, false);
2970 	event_cause = hclge_check_event_cause(hdev, &clearval);
2971 
2972 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2973 	switch (event_cause) {
2974 	case HCLGE_VECTOR0_EVENT_ERR:
2975 		/* we do not know what type of reset is required now. This could
2976 		 * only be decided after we fetch the type of errors which
2977 		 * caused this event. Therefore, we will do below for now:
2978 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2979 		 *    have defered type of reset to be used.
2980 		 * 2. Schedule the reset serivce task.
2981 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2982 		 *    will fetch the correct type of reset.  This would be done
2983 		 *    by first decoding the types of errors.
2984 		 */
2985 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2986 		/* fall through */
2987 	case HCLGE_VECTOR0_EVENT_RST:
2988 		hclge_reset_task_schedule(hdev);
2989 		break;
2990 	case HCLGE_VECTOR0_EVENT_MBX:
2991 		/* If we are here then,
2992 		 * 1. Either we are not handling any mbx task and we are not
2993 		 *    scheduled as well
2994 		 *                        OR
2995 		 * 2. We could be handling a mbx task but nothing more is
2996 		 *    scheduled.
2997 		 * In both cases, we should schedule mbx task as there are more
2998 		 * mbx messages reported by this interrupt.
2999 		 */
3000 		hclge_mbx_task_schedule(hdev);
3001 		break;
3002 	default:
3003 		dev_warn(&hdev->pdev->dev,
3004 			 "received unknown or unhandled event of vector0\n");
3005 		break;
3006 	}
3007 
3008 	hclge_clear_event_cause(hdev, event_cause, clearval);
3009 
3010 	/* Enable interrupt if it is not cause by reset. And when
3011 	 * clearval equal to 0, it means interrupt status may be
3012 	 * cleared by hardware before driver reads status register.
3013 	 * For this case, vector0 interrupt also should be enabled.
3014 	 */
3015 	if (!clearval ||
3016 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3017 		hclge_enable_vector(&hdev->misc_vector, true);
3018 	}
3019 
3020 	return IRQ_HANDLED;
3021 }
3022 
3023 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3024 {
3025 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3026 		dev_warn(&hdev->pdev->dev,
3027 			 "vector(vector_id %d) has been freed.\n", vector_id);
3028 		return;
3029 	}
3030 
3031 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3032 	hdev->num_msi_left += 1;
3033 	hdev->num_msi_used -= 1;
3034 }
3035 
3036 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3037 {
3038 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3039 
3040 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3041 
3042 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3043 	hdev->vector_status[0] = 0;
3044 
3045 	hdev->num_msi_left -= 1;
3046 	hdev->num_msi_used += 1;
3047 }
3048 
3049 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3050 				      const cpumask_t *mask)
3051 {
3052 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3053 					      affinity_notify);
3054 
3055 	cpumask_copy(&hdev->affinity_mask, mask);
3056 }
3057 
3058 static void hclge_irq_affinity_release(struct kref *ref)
3059 {
3060 }
3061 
3062 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3063 {
3064 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3065 			      &hdev->affinity_mask);
3066 
3067 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3068 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3069 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3070 				  &hdev->affinity_notify);
3071 }
3072 
3073 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3074 {
3075 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3076 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3077 }
3078 
3079 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3080 {
3081 	int ret;
3082 
3083 	hclge_get_misc_vector(hdev);
3084 
3085 	/* this would be explicitly freed in the end */
3086 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3087 			  0, "hclge_misc", hdev);
3088 	if (ret) {
3089 		hclge_free_vector(hdev, 0);
3090 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3091 			hdev->misc_vector.vector_irq);
3092 	}
3093 
3094 	return ret;
3095 }
3096 
3097 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3098 {
3099 	free_irq(hdev->misc_vector.vector_irq, hdev);
3100 	hclge_free_vector(hdev, 0);
3101 }
3102 
3103 int hclge_notify_client(struct hclge_dev *hdev,
3104 			enum hnae3_reset_notify_type type)
3105 {
3106 	struct hnae3_client *client = hdev->nic_client;
3107 	u16 i;
3108 
3109 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3110 		return 0;
3111 
3112 	if (!client->ops->reset_notify)
3113 		return -EOPNOTSUPP;
3114 
3115 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3116 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3117 		int ret;
3118 
3119 		ret = client->ops->reset_notify(handle, type);
3120 		if (ret) {
3121 			dev_err(&hdev->pdev->dev,
3122 				"notify nic client failed %d(%d)\n", type, ret);
3123 			return ret;
3124 		}
3125 	}
3126 
3127 	return 0;
3128 }
3129 
3130 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3131 				    enum hnae3_reset_notify_type type)
3132 {
3133 	struct hnae3_client *client = hdev->roce_client;
3134 	int ret = 0;
3135 	u16 i;
3136 
3137 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3138 		return 0;
3139 
3140 	if (!client->ops->reset_notify)
3141 		return -EOPNOTSUPP;
3142 
3143 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3144 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3145 
3146 		ret = client->ops->reset_notify(handle, type);
3147 		if (ret) {
3148 			dev_err(&hdev->pdev->dev,
3149 				"notify roce client failed %d(%d)",
3150 				type, ret);
3151 			return ret;
3152 		}
3153 	}
3154 
3155 	return ret;
3156 }
3157 
3158 static int hclge_reset_wait(struct hclge_dev *hdev)
3159 {
3160 #define HCLGE_RESET_WATI_MS	100
3161 #define HCLGE_RESET_WAIT_CNT	200
3162 	u32 val, reg, reg_bit;
3163 	u32 cnt = 0;
3164 
3165 	switch (hdev->reset_type) {
3166 	case HNAE3_IMP_RESET:
3167 		reg = HCLGE_GLOBAL_RESET_REG;
3168 		reg_bit = HCLGE_IMP_RESET_BIT;
3169 		break;
3170 	case HNAE3_GLOBAL_RESET:
3171 		reg = HCLGE_GLOBAL_RESET_REG;
3172 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3173 		break;
3174 	case HNAE3_FUNC_RESET:
3175 		reg = HCLGE_FUN_RST_ING;
3176 		reg_bit = HCLGE_FUN_RST_ING_B;
3177 		break;
3178 	case HNAE3_FLR_RESET:
3179 		break;
3180 	default:
3181 		dev_err(&hdev->pdev->dev,
3182 			"Wait for unsupported reset type: %d\n",
3183 			hdev->reset_type);
3184 		return -EINVAL;
3185 	}
3186 
3187 	if (hdev->reset_type == HNAE3_FLR_RESET) {
3188 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3189 		       cnt++ < HCLGE_RESET_WAIT_CNT)
3190 			msleep(HCLGE_RESET_WATI_MS);
3191 
3192 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3193 			dev_err(&hdev->pdev->dev,
3194 				"flr wait timeout: %d\n", cnt);
3195 			return -EBUSY;
3196 		}
3197 
3198 		return 0;
3199 	}
3200 
3201 	val = hclge_read_dev(&hdev->hw, reg);
3202 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3203 		msleep(HCLGE_RESET_WATI_MS);
3204 		val = hclge_read_dev(&hdev->hw, reg);
3205 		cnt++;
3206 	}
3207 
3208 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3209 		dev_warn(&hdev->pdev->dev,
3210 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3211 		return -EBUSY;
3212 	}
3213 
3214 	return 0;
3215 }
3216 
3217 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3218 {
3219 	struct hclge_vf_rst_cmd *req;
3220 	struct hclge_desc desc;
3221 
3222 	req = (struct hclge_vf_rst_cmd *)desc.data;
3223 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3224 	req->dest_vfid = func_id;
3225 
3226 	if (reset)
3227 		req->vf_rst = 0x1;
3228 
3229 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3230 }
3231 
3232 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3233 {
3234 	int i;
3235 
3236 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3237 		struct hclge_vport *vport = &hdev->vport[i];
3238 		int ret;
3239 
3240 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3241 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3242 		if (ret) {
3243 			dev_err(&hdev->pdev->dev,
3244 				"set vf(%d) rst failed %d!\n",
3245 				vport->vport_id, ret);
3246 			return ret;
3247 		}
3248 
3249 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3250 			continue;
3251 
3252 		/* Inform VF to process the reset.
3253 		 * hclge_inform_reset_assert_to_vf may fail if VF
3254 		 * driver is not loaded.
3255 		 */
3256 		ret = hclge_inform_reset_assert_to_vf(vport);
3257 		if (ret)
3258 			dev_warn(&hdev->pdev->dev,
3259 				 "inform reset to vf(%d) failed %d!\n",
3260 				 vport->vport_id, ret);
3261 	}
3262 
3263 	return 0;
3264 }
3265 
3266 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3267 {
3268 	struct hclge_pf_rst_sync_cmd *req;
3269 	struct hclge_desc desc;
3270 	int cnt = 0;
3271 	int ret;
3272 
3273 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3274 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3275 
3276 	do {
3277 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3278 		/* for compatible with old firmware, wait
3279 		 * 100 ms for VF to stop IO
3280 		 */
3281 		if (ret == -EOPNOTSUPP) {
3282 			msleep(HCLGE_RESET_SYNC_TIME);
3283 			return 0;
3284 		} else if (ret) {
3285 			dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3286 				ret);
3287 			return ret;
3288 		} else if (req->all_vf_ready) {
3289 			return 0;
3290 		}
3291 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3292 		hclge_cmd_reuse_desc(&desc, true);
3293 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3294 
3295 	dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3296 	return -ETIME;
3297 }
3298 
3299 void hclge_report_hw_error(struct hclge_dev *hdev,
3300 			   enum hnae3_hw_error_type type)
3301 {
3302 	struct hnae3_client *client = hdev->nic_client;
3303 	u16 i;
3304 
3305 	if (!client || !client->ops->process_hw_error ||
3306 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3307 		return;
3308 
3309 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3310 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3311 }
3312 
3313 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3314 {
3315 	u32 reg_val;
3316 
3317 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3318 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3319 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3320 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3321 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3322 	}
3323 
3324 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3325 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3326 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3327 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3328 	}
3329 }
3330 
3331 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3332 {
3333 	struct hclge_desc desc;
3334 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3335 	int ret;
3336 
3337 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3338 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3339 	req->fun_reset_vfid = func_id;
3340 
3341 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3342 	if (ret)
3343 		dev_err(&hdev->pdev->dev,
3344 			"send function reset cmd fail, status =%d\n", ret);
3345 
3346 	return ret;
3347 }
3348 
3349 static void hclge_do_reset(struct hclge_dev *hdev)
3350 {
3351 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3352 	struct pci_dev *pdev = hdev->pdev;
3353 	u32 val;
3354 
3355 	if (hclge_get_hw_reset_stat(handle)) {
3356 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3357 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3358 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3359 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3360 		return;
3361 	}
3362 
3363 	switch (hdev->reset_type) {
3364 	case HNAE3_GLOBAL_RESET:
3365 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3366 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3367 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3368 		dev_info(&pdev->dev, "Global Reset requested\n");
3369 		break;
3370 	case HNAE3_FUNC_RESET:
3371 		dev_info(&pdev->dev, "PF Reset requested\n");
3372 		/* schedule again to check later */
3373 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3374 		hclge_reset_task_schedule(hdev);
3375 		break;
3376 	case HNAE3_FLR_RESET:
3377 		dev_info(&pdev->dev, "FLR requested\n");
3378 		/* schedule again to check later */
3379 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3380 		hclge_reset_task_schedule(hdev);
3381 		break;
3382 	default:
3383 		dev_warn(&pdev->dev,
3384 			 "Unsupported reset type: %d\n", hdev->reset_type);
3385 		break;
3386 	}
3387 }
3388 
3389 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3390 						   unsigned long *addr)
3391 {
3392 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3393 	struct hclge_dev *hdev = ae_dev->priv;
3394 
3395 	/* first, resolve any unknown reset type to the known type(s) */
3396 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3397 		/* we will intentionally ignore any errors from this function
3398 		 *  as we will end up in *some* reset request in any case
3399 		 */
3400 		hclge_handle_hw_msix_error(hdev, addr);
3401 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3402 		/* We defered the clearing of the error event which caused
3403 		 * interrupt since it was not posssible to do that in
3404 		 * interrupt context (and this is the reason we introduced
3405 		 * new UNKNOWN reset type). Now, the errors have been
3406 		 * handled and cleared in hardware we can safely enable
3407 		 * interrupts. This is an exception to the norm.
3408 		 */
3409 		hclge_enable_vector(&hdev->misc_vector, true);
3410 	}
3411 
3412 	/* return the highest priority reset level amongst all */
3413 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3414 		rst_level = HNAE3_IMP_RESET;
3415 		clear_bit(HNAE3_IMP_RESET, addr);
3416 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3417 		clear_bit(HNAE3_FUNC_RESET, addr);
3418 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3419 		rst_level = HNAE3_GLOBAL_RESET;
3420 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3421 		clear_bit(HNAE3_FUNC_RESET, addr);
3422 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3423 		rst_level = HNAE3_FUNC_RESET;
3424 		clear_bit(HNAE3_FUNC_RESET, addr);
3425 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3426 		rst_level = HNAE3_FLR_RESET;
3427 		clear_bit(HNAE3_FLR_RESET, addr);
3428 	}
3429 
3430 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3431 	    rst_level < hdev->reset_type)
3432 		return HNAE3_NONE_RESET;
3433 
3434 	return rst_level;
3435 }
3436 
3437 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3438 {
3439 	u32 clearval = 0;
3440 
3441 	switch (hdev->reset_type) {
3442 	case HNAE3_IMP_RESET:
3443 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3444 		break;
3445 	case HNAE3_GLOBAL_RESET:
3446 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3447 		break;
3448 	default:
3449 		break;
3450 	}
3451 
3452 	if (!clearval)
3453 		return;
3454 
3455 	/* For revision 0x20, the reset interrupt source
3456 	 * can only be cleared after hardware reset done
3457 	 */
3458 	if (hdev->pdev->revision == 0x20)
3459 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3460 				clearval);
3461 
3462 	hclge_enable_vector(&hdev->misc_vector, true);
3463 }
3464 
3465 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3466 {
3467 	int ret = 0;
3468 
3469 	switch (hdev->reset_type) {
3470 	case HNAE3_FUNC_RESET:
3471 		/* fall through */
3472 	case HNAE3_FLR_RESET:
3473 		ret = hclge_set_all_vf_rst(hdev, true);
3474 		break;
3475 	default:
3476 		break;
3477 	}
3478 
3479 	return ret;
3480 }
3481 
3482 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3483 {
3484 	u32 reg_val;
3485 
3486 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3487 	if (enable)
3488 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3489 	else
3490 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3491 
3492 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3493 }
3494 
3495 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3496 {
3497 	u32 reg_val;
3498 	int ret = 0;
3499 
3500 	switch (hdev->reset_type) {
3501 	case HNAE3_FUNC_RESET:
3502 		/* to confirm whether all running VF is ready
3503 		 * before request PF reset
3504 		 */
3505 		ret = hclge_func_reset_sync_vf(hdev);
3506 		if (ret)
3507 			return ret;
3508 
3509 		ret = hclge_func_reset_cmd(hdev, 0);
3510 		if (ret) {
3511 			dev_err(&hdev->pdev->dev,
3512 				"asserting function reset fail %d!\n", ret);
3513 			return ret;
3514 		}
3515 
3516 		/* After performaning pf reset, it is not necessary to do the
3517 		 * mailbox handling or send any command to firmware, because
3518 		 * any mailbox handling or command to firmware is only valid
3519 		 * after hclge_cmd_init is called.
3520 		 */
3521 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3522 		hdev->rst_stats.pf_rst_cnt++;
3523 		break;
3524 	case HNAE3_FLR_RESET:
3525 		/* to confirm whether all running VF is ready
3526 		 * before request PF reset
3527 		 */
3528 		ret = hclge_func_reset_sync_vf(hdev);
3529 		if (ret)
3530 			return ret;
3531 
3532 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3533 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3534 		hdev->rst_stats.flr_rst_cnt++;
3535 		break;
3536 	case HNAE3_IMP_RESET:
3537 		hclge_handle_imp_error(hdev);
3538 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3539 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3540 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3541 		break;
3542 	default:
3543 		break;
3544 	}
3545 
3546 	/* inform hardware that preparatory work is done */
3547 	msleep(HCLGE_RESET_SYNC_TIME);
3548 	hclge_reset_handshake(hdev, true);
3549 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3550 
3551 	return ret;
3552 }
3553 
3554 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3555 {
3556 #define MAX_RESET_FAIL_CNT 5
3557 
3558 	if (hdev->reset_pending) {
3559 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3560 			 hdev->reset_pending);
3561 		return true;
3562 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3563 		   HCLGE_RESET_INT_M) {
3564 		dev_info(&hdev->pdev->dev,
3565 			 "reset failed because new reset interrupt\n");
3566 		hclge_clear_reset_cause(hdev);
3567 		return false;
3568 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3569 		hdev->rst_stats.reset_fail_cnt++;
3570 		set_bit(hdev->reset_type, &hdev->reset_pending);
3571 		dev_info(&hdev->pdev->dev,
3572 			 "re-schedule reset task(%d)\n",
3573 			 hdev->rst_stats.reset_fail_cnt);
3574 		return true;
3575 	}
3576 
3577 	hclge_clear_reset_cause(hdev);
3578 
3579 	/* recover the handshake status when reset fail */
3580 	hclge_reset_handshake(hdev, true);
3581 
3582 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3583 	return false;
3584 }
3585 
3586 static int hclge_set_rst_done(struct hclge_dev *hdev)
3587 {
3588 	struct hclge_pf_rst_done_cmd *req;
3589 	struct hclge_desc desc;
3590 
3591 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3592 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3593 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3594 
3595 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3596 }
3597 
3598 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3599 {
3600 	int ret = 0;
3601 
3602 	switch (hdev->reset_type) {
3603 	case HNAE3_FUNC_RESET:
3604 		/* fall through */
3605 	case HNAE3_FLR_RESET:
3606 		ret = hclge_set_all_vf_rst(hdev, false);
3607 		break;
3608 	case HNAE3_GLOBAL_RESET:
3609 		/* fall through */
3610 	case HNAE3_IMP_RESET:
3611 		ret = hclge_set_rst_done(hdev);
3612 		break;
3613 	default:
3614 		break;
3615 	}
3616 
3617 	/* clear up the handshake status after re-initialize done */
3618 	hclge_reset_handshake(hdev, false);
3619 
3620 	return ret;
3621 }
3622 
3623 static int hclge_reset_stack(struct hclge_dev *hdev)
3624 {
3625 	int ret;
3626 
3627 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3628 	if (ret)
3629 		return ret;
3630 
3631 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3632 	if (ret)
3633 		return ret;
3634 
3635 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3636 	if (ret)
3637 		return ret;
3638 
3639 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3640 }
3641 
3642 static void hclge_reset(struct hclge_dev *hdev)
3643 {
3644 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3645 	enum hnae3_reset_type reset_level;
3646 	int ret;
3647 
3648 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3649 	 * know if device is undergoing reset
3650 	 */
3651 	ae_dev->reset_type = hdev->reset_type;
3652 	hdev->rst_stats.reset_cnt++;
3653 	/* perform reset of the stack & ae device for a client */
3654 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3655 	if (ret)
3656 		goto err_reset;
3657 
3658 	ret = hclge_reset_prepare_down(hdev);
3659 	if (ret)
3660 		goto err_reset;
3661 
3662 	rtnl_lock();
3663 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3664 	if (ret)
3665 		goto err_reset_lock;
3666 
3667 	rtnl_unlock();
3668 
3669 	ret = hclge_reset_prepare_wait(hdev);
3670 	if (ret)
3671 		goto err_reset;
3672 
3673 	if (hclge_reset_wait(hdev))
3674 		goto err_reset;
3675 
3676 	hdev->rst_stats.hw_reset_done_cnt++;
3677 
3678 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3679 	if (ret)
3680 		goto err_reset;
3681 
3682 	rtnl_lock();
3683 
3684 	ret = hclge_reset_stack(hdev);
3685 	if (ret)
3686 		goto err_reset_lock;
3687 
3688 	hclge_clear_reset_cause(hdev);
3689 
3690 	ret = hclge_reset_prepare_up(hdev);
3691 	if (ret)
3692 		goto err_reset_lock;
3693 
3694 	rtnl_unlock();
3695 
3696 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3697 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3698 	 * times
3699 	 */
3700 	if (ret &&
3701 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3702 		goto err_reset;
3703 
3704 	rtnl_lock();
3705 
3706 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3707 	if (ret)
3708 		goto err_reset_lock;
3709 
3710 	rtnl_unlock();
3711 
3712 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3713 	if (ret)
3714 		goto err_reset;
3715 
3716 	hdev->last_reset_time = jiffies;
3717 	hdev->rst_stats.reset_fail_cnt = 0;
3718 	hdev->rst_stats.reset_done_cnt++;
3719 	ae_dev->reset_type = HNAE3_NONE_RESET;
3720 
3721 	/* if default_reset_request has a higher level reset request,
3722 	 * it should be handled as soon as possible. since some errors
3723 	 * need this kind of reset to fix.
3724 	 */
3725 	reset_level = hclge_get_reset_level(ae_dev,
3726 					    &hdev->default_reset_request);
3727 	if (reset_level != HNAE3_NONE_RESET)
3728 		set_bit(reset_level, &hdev->reset_request);
3729 
3730 	return;
3731 
3732 err_reset_lock:
3733 	rtnl_unlock();
3734 err_reset:
3735 	if (hclge_reset_err_handle(hdev))
3736 		hclge_reset_task_schedule(hdev);
3737 }
3738 
3739 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3740 {
3741 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3742 	struct hclge_dev *hdev = ae_dev->priv;
3743 
3744 	/* We might end up getting called broadly because of 2 below cases:
3745 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3746 	 *    normalcy is to reset.
3747 	 * 2. A new reset request from the stack due to timeout
3748 	 *
3749 	 * For the first case,error event might not have ae handle available.
3750 	 * check if this is a new reset request and we are not here just because
3751 	 * last reset attempt did not succeed and watchdog hit us again. We will
3752 	 * know this if last reset request did not occur very recently (watchdog
3753 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3754 	 * In case of new request we reset the "reset level" to PF reset.
3755 	 * And if it is a repeat reset request of the most recent one then we
3756 	 * want to make sure we throttle the reset request. Therefore, we will
3757 	 * not allow it again before 3*HZ times.
3758 	 */
3759 	if (!handle)
3760 		handle = &hdev->vport[0].nic;
3761 
3762 	if (time_before(jiffies, (hdev->last_reset_time +
3763 				  HCLGE_RESET_INTERVAL))) {
3764 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3765 		return;
3766 	} else if (hdev->default_reset_request)
3767 		hdev->reset_level =
3768 			hclge_get_reset_level(ae_dev,
3769 					      &hdev->default_reset_request);
3770 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3771 		hdev->reset_level = HNAE3_FUNC_RESET;
3772 
3773 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3774 		 hdev->reset_level);
3775 
3776 	/* request reset & schedule reset task */
3777 	set_bit(hdev->reset_level, &hdev->reset_request);
3778 	hclge_reset_task_schedule(hdev);
3779 
3780 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3781 		hdev->reset_level++;
3782 }
3783 
3784 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3785 					enum hnae3_reset_type rst_type)
3786 {
3787 	struct hclge_dev *hdev = ae_dev->priv;
3788 
3789 	set_bit(rst_type, &hdev->default_reset_request);
3790 }
3791 
3792 static void hclge_reset_timer(struct timer_list *t)
3793 {
3794 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3795 
3796 	/* if default_reset_request has no value, it means that this reset
3797 	 * request has already be handled, so just return here
3798 	 */
3799 	if (!hdev->default_reset_request)
3800 		return;
3801 
3802 	dev_info(&hdev->pdev->dev,
3803 		 "triggering reset in reset timer\n");
3804 	hclge_reset_event(hdev->pdev, NULL);
3805 }
3806 
3807 static void hclge_reset_subtask(struct hclge_dev *hdev)
3808 {
3809 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3810 
3811 	/* check if there is any ongoing reset in the hardware. This status can
3812 	 * be checked from reset_pending. If there is then, we need to wait for
3813 	 * hardware to complete reset.
3814 	 *    a. If we are able to figure out in reasonable time that hardware
3815 	 *       has fully resetted then, we can proceed with driver, client
3816 	 *       reset.
3817 	 *    b. else, we can come back later to check this status so re-sched
3818 	 *       now.
3819 	 */
3820 	hdev->last_reset_time = jiffies;
3821 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3822 	if (hdev->reset_type != HNAE3_NONE_RESET)
3823 		hclge_reset(hdev);
3824 
3825 	/* check if we got any *new* reset requests to be honored */
3826 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3827 	if (hdev->reset_type != HNAE3_NONE_RESET)
3828 		hclge_do_reset(hdev);
3829 
3830 	hdev->reset_type = HNAE3_NONE_RESET;
3831 }
3832 
3833 static void hclge_reset_service_task(struct work_struct *work)
3834 {
3835 	struct hclge_dev *hdev =
3836 		container_of(work, struct hclge_dev, rst_service_task);
3837 
3838 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3839 		return;
3840 
3841 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3842 
3843 	hclge_reset_subtask(hdev);
3844 
3845 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3846 }
3847 
3848 static void hclge_mailbox_service_task(struct work_struct *work)
3849 {
3850 	struct hclge_dev *hdev =
3851 		container_of(work, struct hclge_dev, mbx_service_task);
3852 
3853 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3854 		return;
3855 
3856 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3857 
3858 	hclge_mbx_handler(hdev);
3859 
3860 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3861 }
3862 
3863 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3864 {
3865 	int i;
3866 
3867 	/* start from vport 1 for PF is always alive */
3868 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3869 		struct hclge_vport *vport = &hdev->vport[i];
3870 
3871 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3872 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3873 
3874 		/* If vf is not alive, set to default value */
3875 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3876 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3877 	}
3878 }
3879 
3880 static void hclge_service_task(struct work_struct *work)
3881 {
3882 	struct hclge_dev *hdev =
3883 		container_of(work, struct hclge_dev, service_task.work);
3884 
3885 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3886 
3887 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3888 		hclge_update_stats_for_all(hdev);
3889 		hdev->hw_stats.stats_timer = 0;
3890 	}
3891 
3892 	hclge_update_port_info(hdev);
3893 	hclge_update_link_status(hdev);
3894 	hclge_update_vport_alive(hdev);
3895 	hclge_sync_vlan_filter(hdev);
3896 	if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3897 		hclge_rfs_filter_expire(hdev);
3898 		hdev->fd_arfs_expire_timer = 0;
3899 	}
3900 
3901 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3902 }
3903 
3904 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3905 {
3906 	/* VF handle has no client */
3907 	if (!handle->client)
3908 		return container_of(handle, struct hclge_vport, nic);
3909 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3910 		return container_of(handle, struct hclge_vport, roce);
3911 	else
3912 		return container_of(handle, struct hclge_vport, nic);
3913 }
3914 
3915 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3916 			    struct hnae3_vector_info *vector_info)
3917 {
3918 	struct hclge_vport *vport = hclge_get_vport(handle);
3919 	struct hnae3_vector_info *vector = vector_info;
3920 	struct hclge_dev *hdev = vport->back;
3921 	int alloc = 0;
3922 	int i, j;
3923 
3924 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
3925 	vector_num = min(hdev->num_msi_left, vector_num);
3926 
3927 	for (j = 0; j < vector_num; j++) {
3928 		for (i = 1; i < hdev->num_msi; i++) {
3929 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3930 				vector->vector = pci_irq_vector(hdev->pdev, i);
3931 				vector->io_addr = hdev->hw.io_base +
3932 					HCLGE_VECTOR_REG_BASE +
3933 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3934 					vport->vport_id *
3935 					HCLGE_VECTOR_VF_OFFSET;
3936 				hdev->vector_status[i] = vport->vport_id;
3937 				hdev->vector_irq[i] = vector->vector;
3938 
3939 				vector++;
3940 				alloc++;
3941 
3942 				break;
3943 			}
3944 		}
3945 	}
3946 	hdev->num_msi_left -= alloc;
3947 	hdev->num_msi_used += alloc;
3948 
3949 	return alloc;
3950 }
3951 
3952 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3953 {
3954 	int i;
3955 
3956 	for (i = 0; i < hdev->num_msi; i++)
3957 		if (vector == hdev->vector_irq[i])
3958 			return i;
3959 
3960 	return -EINVAL;
3961 }
3962 
3963 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3964 {
3965 	struct hclge_vport *vport = hclge_get_vport(handle);
3966 	struct hclge_dev *hdev = vport->back;
3967 	int vector_id;
3968 
3969 	vector_id = hclge_get_vector_index(hdev, vector);
3970 	if (vector_id < 0) {
3971 		dev_err(&hdev->pdev->dev,
3972 			"Get vector index fail. vector_id =%d\n", vector_id);
3973 		return vector_id;
3974 	}
3975 
3976 	hclge_free_vector(hdev, vector_id);
3977 
3978 	return 0;
3979 }
3980 
3981 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3982 {
3983 	return HCLGE_RSS_KEY_SIZE;
3984 }
3985 
3986 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3987 {
3988 	return HCLGE_RSS_IND_TBL_SIZE;
3989 }
3990 
3991 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3992 				  const u8 hfunc, const u8 *key)
3993 {
3994 	struct hclge_rss_config_cmd *req;
3995 	unsigned int key_offset = 0;
3996 	struct hclge_desc desc;
3997 	int key_counts;
3998 	int key_size;
3999 	int ret;
4000 
4001 	key_counts = HCLGE_RSS_KEY_SIZE;
4002 	req = (struct hclge_rss_config_cmd *)desc.data;
4003 
4004 	while (key_counts) {
4005 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4006 					   false);
4007 
4008 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4009 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4010 
4011 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4012 		memcpy(req->hash_key,
4013 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4014 
4015 		key_counts -= key_size;
4016 		key_offset++;
4017 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4018 		if (ret) {
4019 			dev_err(&hdev->pdev->dev,
4020 				"Configure RSS config fail, status = %d\n",
4021 				ret);
4022 			return ret;
4023 		}
4024 	}
4025 	return 0;
4026 }
4027 
4028 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4029 {
4030 	struct hclge_rss_indirection_table_cmd *req;
4031 	struct hclge_desc desc;
4032 	int i, j;
4033 	int ret;
4034 
4035 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4036 
4037 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4038 		hclge_cmd_setup_basic_desc
4039 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4040 
4041 		req->start_table_index =
4042 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4043 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4044 
4045 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4046 			req->rss_result[j] =
4047 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4048 
4049 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4050 		if (ret) {
4051 			dev_err(&hdev->pdev->dev,
4052 				"Configure rss indir table fail,status = %d\n",
4053 				ret);
4054 			return ret;
4055 		}
4056 	}
4057 	return 0;
4058 }
4059 
4060 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4061 				 u16 *tc_size, u16 *tc_offset)
4062 {
4063 	struct hclge_rss_tc_mode_cmd *req;
4064 	struct hclge_desc desc;
4065 	int ret;
4066 	int i;
4067 
4068 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4069 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4070 
4071 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4072 		u16 mode = 0;
4073 
4074 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4075 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4076 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4077 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4078 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4079 
4080 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4081 	}
4082 
4083 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4084 	if (ret)
4085 		dev_err(&hdev->pdev->dev,
4086 			"Configure rss tc mode fail, status = %d\n", ret);
4087 
4088 	return ret;
4089 }
4090 
4091 static void hclge_get_rss_type(struct hclge_vport *vport)
4092 {
4093 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4094 	    vport->rss_tuple_sets.ipv4_udp_en ||
4095 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4096 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4097 	    vport->rss_tuple_sets.ipv6_udp_en ||
4098 	    vport->rss_tuple_sets.ipv6_sctp_en)
4099 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4100 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4101 		 vport->rss_tuple_sets.ipv6_fragment_en)
4102 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4103 	else
4104 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4105 }
4106 
4107 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4108 {
4109 	struct hclge_rss_input_tuple_cmd *req;
4110 	struct hclge_desc desc;
4111 	int ret;
4112 
4113 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4114 
4115 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4116 
4117 	/* Get the tuple cfg from pf */
4118 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4119 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4120 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4121 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4122 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4123 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4124 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4125 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4126 	hclge_get_rss_type(&hdev->vport[0]);
4127 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4128 	if (ret)
4129 		dev_err(&hdev->pdev->dev,
4130 			"Configure rss input fail, status = %d\n", ret);
4131 	return ret;
4132 }
4133 
4134 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4135 			 u8 *key, u8 *hfunc)
4136 {
4137 	struct hclge_vport *vport = hclge_get_vport(handle);
4138 	int i;
4139 
4140 	/* Get hash algorithm */
4141 	if (hfunc) {
4142 		switch (vport->rss_algo) {
4143 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4144 			*hfunc = ETH_RSS_HASH_TOP;
4145 			break;
4146 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4147 			*hfunc = ETH_RSS_HASH_XOR;
4148 			break;
4149 		default:
4150 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4151 			break;
4152 		}
4153 	}
4154 
4155 	/* Get the RSS Key required by the user */
4156 	if (key)
4157 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4158 
4159 	/* Get indirect table */
4160 	if (indir)
4161 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4162 			indir[i] =  vport->rss_indirection_tbl[i];
4163 
4164 	return 0;
4165 }
4166 
4167 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4168 			 const  u8 *key, const  u8 hfunc)
4169 {
4170 	struct hclge_vport *vport = hclge_get_vport(handle);
4171 	struct hclge_dev *hdev = vport->back;
4172 	u8 hash_algo;
4173 	int ret, i;
4174 
4175 	/* Set the RSS Hash Key if specififed by the user */
4176 	if (key) {
4177 		switch (hfunc) {
4178 		case ETH_RSS_HASH_TOP:
4179 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4180 			break;
4181 		case ETH_RSS_HASH_XOR:
4182 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4183 			break;
4184 		case ETH_RSS_HASH_NO_CHANGE:
4185 			hash_algo = vport->rss_algo;
4186 			break;
4187 		default:
4188 			return -EINVAL;
4189 		}
4190 
4191 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4192 		if (ret)
4193 			return ret;
4194 
4195 		/* Update the shadow RSS key with user specified qids */
4196 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4197 		vport->rss_algo = hash_algo;
4198 	}
4199 
4200 	/* Update the shadow RSS table with user specified qids */
4201 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4202 		vport->rss_indirection_tbl[i] = indir[i];
4203 
4204 	/* Update the hardware */
4205 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4206 }
4207 
4208 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4209 {
4210 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4211 
4212 	if (nfc->data & RXH_L4_B_2_3)
4213 		hash_sets |= HCLGE_D_PORT_BIT;
4214 	else
4215 		hash_sets &= ~HCLGE_D_PORT_BIT;
4216 
4217 	if (nfc->data & RXH_IP_SRC)
4218 		hash_sets |= HCLGE_S_IP_BIT;
4219 	else
4220 		hash_sets &= ~HCLGE_S_IP_BIT;
4221 
4222 	if (nfc->data & RXH_IP_DST)
4223 		hash_sets |= HCLGE_D_IP_BIT;
4224 	else
4225 		hash_sets &= ~HCLGE_D_IP_BIT;
4226 
4227 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4228 		hash_sets |= HCLGE_V_TAG_BIT;
4229 
4230 	return hash_sets;
4231 }
4232 
4233 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4234 			       struct ethtool_rxnfc *nfc)
4235 {
4236 	struct hclge_vport *vport = hclge_get_vport(handle);
4237 	struct hclge_dev *hdev = vport->back;
4238 	struct hclge_rss_input_tuple_cmd *req;
4239 	struct hclge_desc desc;
4240 	u8 tuple_sets;
4241 	int ret;
4242 
4243 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4244 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4245 		return -EINVAL;
4246 
4247 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4248 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4249 
4250 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4251 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4252 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4253 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4254 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4255 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4256 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4257 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4258 
4259 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4260 	switch (nfc->flow_type) {
4261 	case TCP_V4_FLOW:
4262 		req->ipv4_tcp_en = tuple_sets;
4263 		break;
4264 	case TCP_V6_FLOW:
4265 		req->ipv6_tcp_en = tuple_sets;
4266 		break;
4267 	case UDP_V4_FLOW:
4268 		req->ipv4_udp_en = tuple_sets;
4269 		break;
4270 	case UDP_V6_FLOW:
4271 		req->ipv6_udp_en = tuple_sets;
4272 		break;
4273 	case SCTP_V4_FLOW:
4274 		req->ipv4_sctp_en = tuple_sets;
4275 		break;
4276 	case SCTP_V6_FLOW:
4277 		if ((nfc->data & RXH_L4_B_0_1) ||
4278 		    (nfc->data & RXH_L4_B_2_3))
4279 			return -EINVAL;
4280 
4281 		req->ipv6_sctp_en = tuple_sets;
4282 		break;
4283 	case IPV4_FLOW:
4284 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4285 		break;
4286 	case IPV6_FLOW:
4287 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4288 		break;
4289 	default:
4290 		return -EINVAL;
4291 	}
4292 
4293 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4294 	if (ret) {
4295 		dev_err(&hdev->pdev->dev,
4296 			"Set rss tuple fail, status = %d\n", ret);
4297 		return ret;
4298 	}
4299 
4300 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4301 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4302 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4303 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4304 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4305 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4306 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4307 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4308 	hclge_get_rss_type(vport);
4309 	return 0;
4310 }
4311 
4312 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4313 			       struct ethtool_rxnfc *nfc)
4314 {
4315 	struct hclge_vport *vport = hclge_get_vport(handle);
4316 	u8 tuple_sets;
4317 
4318 	nfc->data = 0;
4319 
4320 	switch (nfc->flow_type) {
4321 	case TCP_V4_FLOW:
4322 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4323 		break;
4324 	case UDP_V4_FLOW:
4325 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4326 		break;
4327 	case TCP_V6_FLOW:
4328 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4329 		break;
4330 	case UDP_V6_FLOW:
4331 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4332 		break;
4333 	case SCTP_V4_FLOW:
4334 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4335 		break;
4336 	case SCTP_V6_FLOW:
4337 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4338 		break;
4339 	case IPV4_FLOW:
4340 	case IPV6_FLOW:
4341 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4342 		break;
4343 	default:
4344 		return -EINVAL;
4345 	}
4346 
4347 	if (!tuple_sets)
4348 		return 0;
4349 
4350 	if (tuple_sets & HCLGE_D_PORT_BIT)
4351 		nfc->data |= RXH_L4_B_2_3;
4352 	if (tuple_sets & HCLGE_S_PORT_BIT)
4353 		nfc->data |= RXH_L4_B_0_1;
4354 	if (tuple_sets & HCLGE_D_IP_BIT)
4355 		nfc->data |= RXH_IP_DST;
4356 	if (tuple_sets & HCLGE_S_IP_BIT)
4357 		nfc->data |= RXH_IP_SRC;
4358 
4359 	return 0;
4360 }
4361 
4362 static int hclge_get_tc_size(struct hnae3_handle *handle)
4363 {
4364 	struct hclge_vport *vport = hclge_get_vport(handle);
4365 	struct hclge_dev *hdev = vport->back;
4366 
4367 	return hdev->rss_size_max;
4368 }
4369 
4370 int hclge_rss_init_hw(struct hclge_dev *hdev)
4371 {
4372 	struct hclge_vport *vport = hdev->vport;
4373 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4374 	u16 rss_size = vport[0].alloc_rss_size;
4375 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4376 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4377 	u8 *key = vport[0].rss_hash_key;
4378 	u8 hfunc = vport[0].rss_algo;
4379 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4380 	u16 roundup_size;
4381 	unsigned int i;
4382 	int ret;
4383 
4384 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4385 	if (ret)
4386 		return ret;
4387 
4388 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4389 	if (ret)
4390 		return ret;
4391 
4392 	ret = hclge_set_rss_input_tuple(hdev);
4393 	if (ret)
4394 		return ret;
4395 
4396 	/* Each TC have the same queue size, and tc_size set to hardware is
4397 	 * the log2 of roundup power of two of rss_size, the acutal queue
4398 	 * size is limited by indirection table.
4399 	 */
4400 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4401 		dev_err(&hdev->pdev->dev,
4402 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
4403 			rss_size);
4404 		return -EINVAL;
4405 	}
4406 
4407 	roundup_size = roundup_pow_of_two(rss_size);
4408 	roundup_size = ilog2(roundup_size);
4409 
4410 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4411 		tc_valid[i] = 0;
4412 
4413 		if (!(hdev->hw_tc_map & BIT(i)))
4414 			continue;
4415 
4416 		tc_valid[i] = 1;
4417 		tc_size[i] = roundup_size;
4418 		tc_offset[i] = rss_size * i;
4419 	}
4420 
4421 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4422 }
4423 
4424 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4425 {
4426 	struct hclge_vport *vport = hdev->vport;
4427 	int i, j;
4428 
4429 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4430 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4431 			vport[j].rss_indirection_tbl[i] =
4432 				i % vport[j].alloc_rss_size;
4433 	}
4434 }
4435 
4436 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4437 {
4438 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4439 	struct hclge_vport *vport = hdev->vport;
4440 
4441 	if (hdev->pdev->revision >= 0x21)
4442 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4443 
4444 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4445 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4446 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4447 		vport[i].rss_tuple_sets.ipv4_udp_en =
4448 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4449 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4450 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4451 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4452 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4453 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4454 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4455 		vport[i].rss_tuple_sets.ipv6_udp_en =
4456 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4457 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4458 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4459 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4460 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4461 
4462 		vport[i].rss_algo = rss_algo;
4463 
4464 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4465 		       HCLGE_RSS_KEY_SIZE);
4466 	}
4467 
4468 	hclge_rss_indir_init_cfg(hdev);
4469 }
4470 
4471 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4472 				int vector_id, bool en,
4473 				struct hnae3_ring_chain_node *ring_chain)
4474 {
4475 	struct hclge_dev *hdev = vport->back;
4476 	struct hnae3_ring_chain_node *node;
4477 	struct hclge_desc desc;
4478 	struct hclge_ctrl_vector_chain_cmd *req =
4479 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4480 	enum hclge_cmd_status status;
4481 	enum hclge_opcode_type op;
4482 	u16 tqp_type_and_id;
4483 	int i;
4484 
4485 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4486 	hclge_cmd_setup_basic_desc(&desc, op, false);
4487 	req->int_vector_id = vector_id;
4488 
4489 	i = 0;
4490 	for (node = ring_chain; node; node = node->next) {
4491 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4492 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4493 				HCLGE_INT_TYPE_S,
4494 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4495 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4496 				HCLGE_TQP_ID_S, node->tqp_index);
4497 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4498 				HCLGE_INT_GL_IDX_S,
4499 				hnae3_get_field(node->int_gl_idx,
4500 						HNAE3_RING_GL_IDX_M,
4501 						HNAE3_RING_GL_IDX_S));
4502 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4503 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4504 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4505 			req->vfid = vport->vport_id;
4506 
4507 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4508 			if (status) {
4509 				dev_err(&hdev->pdev->dev,
4510 					"Map TQP fail, status is %d.\n",
4511 					status);
4512 				return -EIO;
4513 			}
4514 			i = 0;
4515 
4516 			hclge_cmd_setup_basic_desc(&desc,
4517 						   op,
4518 						   false);
4519 			req->int_vector_id = vector_id;
4520 		}
4521 	}
4522 
4523 	if (i > 0) {
4524 		req->int_cause_num = i;
4525 		req->vfid = vport->vport_id;
4526 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4527 		if (status) {
4528 			dev_err(&hdev->pdev->dev,
4529 				"Map TQP fail, status is %d.\n", status);
4530 			return -EIO;
4531 		}
4532 	}
4533 
4534 	return 0;
4535 }
4536 
4537 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4538 				    struct hnae3_ring_chain_node *ring_chain)
4539 {
4540 	struct hclge_vport *vport = hclge_get_vport(handle);
4541 	struct hclge_dev *hdev = vport->back;
4542 	int vector_id;
4543 
4544 	vector_id = hclge_get_vector_index(hdev, vector);
4545 	if (vector_id < 0) {
4546 		dev_err(&hdev->pdev->dev,
4547 			"Get vector index fail. vector_id =%d\n", vector_id);
4548 		return vector_id;
4549 	}
4550 
4551 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4552 }
4553 
4554 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4555 				       struct hnae3_ring_chain_node *ring_chain)
4556 {
4557 	struct hclge_vport *vport = hclge_get_vport(handle);
4558 	struct hclge_dev *hdev = vport->back;
4559 	int vector_id, ret;
4560 
4561 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4562 		return 0;
4563 
4564 	vector_id = hclge_get_vector_index(hdev, vector);
4565 	if (vector_id < 0) {
4566 		dev_err(&handle->pdev->dev,
4567 			"Get vector index fail. ret =%d\n", vector_id);
4568 		return vector_id;
4569 	}
4570 
4571 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4572 	if (ret)
4573 		dev_err(&handle->pdev->dev,
4574 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4575 			vector_id, ret);
4576 
4577 	return ret;
4578 }
4579 
4580 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4581 			       struct hclge_promisc_param *param)
4582 {
4583 	struct hclge_promisc_cfg_cmd *req;
4584 	struct hclge_desc desc;
4585 	int ret;
4586 
4587 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4588 
4589 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4590 	req->vf_id = param->vf_id;
4591 
4592 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4593 	 * pdev revision(0x20), new revision support them. The
4594 	 * value of this two fields will not return error when driver
4595 	 * send command to fireware in revision(0x20).
4596 	 */
4597 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4598 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4599 
4600 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4601 	if (ret)
4602 		dev_err(&hdev->pdev->dev,
4603 			"Set promisc mode fail, status is %d.\n", ret);
4604 
4605 	return ret;
4606 }
4607 
4608 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4609 			      bool en_mc, bool en_bc, int vport_id)
4610 {
4611 	if (!param)
4612 		return;
4613 
4614 	memset(param, 0, sizeof(struct hclge_promisc_param));
4615 	if (en_uc)
4616 		param->enable = HCLGE_PROMISC_EN_UC;
4617 	if (en_mc)
4618 		param->enable |= HCLGE_PROMISC_EN_MC;
4619 	if (en_bc)
4620 		param->enable |= HCLGE_PROMISC_EN_BC;
4621 	param->vf_id = vport_id;
4622 }
4623 
4624 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4625 				  bool en_mc_pmc)
4626 {
4627 	struct hclge_vport *vport = hclge_get_vport(handle);
4628 	struct hclge_dev *hdev = vport->back;
4629 	struct hclge_promisc_param param;
4630 	bool en_bc_pmc = true;
4631 
4632 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4633 	 * always bypassed. So broadcast promisc should be disabled until
4634 	 * user enable promisc mode
4635 	 */
4636 	if (handle->pdev->revision == 0x20)
4637 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4638 
4639 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4640 				 vport->vport_id);
4641 	return hclge_cmd_set_promisc_mode(hdev, &param);
4642 }
4643 
4644 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4645 {
4646 	struct hclge_get_fd_mode_cmd *req;
4647 	struct hclge_desc desc;
4648 	int ret;
4649 
4650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4651 
4652 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4653 
4654 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4655 	if (ret) {
4656 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4657 		return ret;
4658 	}
4659 
4660 	*fd_mode = req->mode;
4661 
4662 	return ret;
4663 }
4664 
4665 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4666 				   u32 *stage1_entry_num,
4667 				   u32 *stage2_entry_num,
4668 				   u16 *stage1_counter_num,
4669 				   u16 *stage2_counter_num)
4670 {
4671 	struct hclge_get_fd_allocation_cmd *req;
4672 	struct hclge_desc desc;
4673 	int ret;
4674 
4675 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4676 
4677 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4678 
4679 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4680 	if (ret) {
4681 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4682 			ret);
4683 		return ret;
4684 	}
4685 
4686 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4687 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4688 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4689 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4690 
4691 	return ret;
4692 }
4693 
4694 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4695 {
4696 	struct hclge_set_fd_key_config_cmd *req;
4697 	struct hclge_fd_key_cfg *stage;
4698 	struct hclge_desc desc;
4699 	int ret;
4700 
4701 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4702 
4703 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4704 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4705 	req->stage = stage_num;
4706 	req->key_select = stage->key_sel;
4707 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4708 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4709 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4710 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4711 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4712 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4713 
4714 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4715 	if (ret)
4716 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4717 
4718 	return ret;
4719 }
4720 
4721 static int hclge_init_fd_config(struct hclge_dev *hdev)
4722 {
4723 #define LOW_2_WORDS		0x03
4724 	struct hclge_fd_key_cfg *key_cfg;
4725 	int ret;
4726 
4727 	if (!hnae3_dev_fd_supported(hdev))
4728 		return 0;
4729 
4730 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4731 	if (ret)
4732 		return ret;
4733 
4734 	switch (hdev->fd_cfg.fd_mode) {
4735 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4736 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4737 		break;
4738 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4739 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4740 		break;
4741 	default:
4742 		dev_err(&hdev->pdev->dev,
4743 			"Unsupported flow director mode %d\n",
4744 			hdev->fd_cfg.fd_mode);
4745 		return -EOPNOTSUPP;
4746 	}
4747 
4748 	hdev->fd_cfg.proto_support =
4749 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4750 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4751 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4752 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4753 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4754 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4755 	key_cfg->outer_sipv6_word_en = 0;
4756 	key_cfg->outer_dipv6_word_en = 0;
4757 
4758 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4759 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4760 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4761 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4762 
4763 	/* If use max 400bit key, we can support tuples for ether type */
4764 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4765 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4766 		key_cfg->tuple_active |=
4767 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4768 	}
4769 
4770 	/* roce_type is used to filter roce frames
4771 	 * dst_vport is used to specify the rule
4772 	 */
4773 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4774 
4775 	ret = hclge_get_fd_allocation(hdev,
4776 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4777 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4778 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4779 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4780 	if (ret)
4781 		return ret;
4782 
4783 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4784 }
4785 
4786 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4787 				int loc, u8 *key, bool is_add)
4788 {
4789 	struct hclge_fd_tcam_config_1_cmd *req1;
4790 	struct hclge_fd_tcam_config_2_cmd *req2;
4791 	struct hclge_fd_tcam_config_3_cmd *req3;
4792 	struct hclge_desc desc[3];
4793 	int ret;
4794 
4795 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4796 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4797 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4798 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4799 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4800 
4801 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4802 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4803 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4804 
4805 	req1->stage = stage;
4806 	req1->xy_sel = sel_x ? 1 : 0;
4807 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4808 	req1->index = cpu_to_le32(loc);
4809 	req1->entry_vld = sel_x ? is_add : 0;
4810 
4811 	if (key) {
4812 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4813 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4814 		       sizeof(req2->tcam_data));
4815 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4816 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4817 	}
4818 
4819 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4820 	if (ret)
4821 		dev_err(&hdev->pdev->dev,
4822 			"config tcam key fail, ret=%d\n",
4823 			ret);
4824 
4825 	return ret;
4826 }
4827 
4828 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4829 			      struct hclge_fd_ad_data *action)
4830 {
4831 	struct hclge_fd_ad_config_cmd *req;
4832 	struct hclge_desc desc;
4833 	u64 ad_data = 0;
4834 	int ret;
4835 
4836 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4837 
4838 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4839 	req->index = cpu_to_le32(loc);
4840 	req->stage = stage;
4841 
4842 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4843 		      action->write_rule_id_to_bd);
4844 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4845 			action->rule_id);
4846 	ad_data <<= 32;
4847 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4848 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4849 		      action->forward_to_direct_queue);
4850 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4851 			action->queue_id);
4852 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4853 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4854 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4855 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4856 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4857 			action->counter_id);
4858 
4859 	req->ad_data = cpu_to_le64(ad_data);
4860 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4861 	if (ret)
4862 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4863 
4864 	return ret;
4865 }
4866 
4867 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4868 				   struct hclge_fd_rule *rule)
4869 {
4870 	u16 tmp_x_s, tmp_y_s;
4871 	u32 tmp_x_l, tmp_y_l;
4872 	int i;
4873 
4874 	if (rule->unused_tuple & tuple_bit)
4875 		return true;
4876 
4877 	switch (tuple_bit) {
4878 	case 0:
4879 		return false;
4880 	case BIT(INNER_DST_MAC):
4881 		for (i = 0; i < ETH_ALEN; i++) {
4882 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4883 			       rule->tuples_mask.dst_mac[i]);
4884 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4885 			       rule->tuples_mask.dst_mac[i]);
4886 		}
4887 
4888 		return true;
4889 	case BIT(INNER_SRC_MAC):
4890 		for (i = 0; i < ETH_ALEN; i++) {
4891 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4892 			       rule->tuples.src_mac[i]);
4893 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4894 			       rule->tuples.src_mac[i]);
4895 		}
4896 
4897 		return true;
4898 	case BIT(INNER_VLAN_TAG_FST):
4899 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4900 		       rule->tuples_mask.vlan_tag1);
4901 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4902 		       rule->tuples_mask.vlan_tag1);
4903 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4904 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4905 
4906 		return true;
4907 	case BIT(INNER_ETH_TYPE):
4908 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4909 		       rule->tuples_mask.ether_proto);
4910 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4911 		       rule->tuples_mask.ether_proto);
4912 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4913 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4914 
4915 		return true;
4916 	case BIT(INNER_IP_TOS):
4917 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4918 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4919 
4920 		return true;
4921 	case BIT(INNER_IP_PROTO):
4922 		calc_x(*key_x, rule->tuples.ip_proto,
4923 		       rule->tuples_mask.ip_proto);
4924 		calc_y(*key_y, rule->tuples.ip_proto,
4925 		       rule->tuples_mask.ip_proto);
4926 
4927 		return true;
4928 	case BIT(INNER_SRC_IP):
4929 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4930 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4931 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4932 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4933 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4934 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4935 
4936 		return true;
4937 	case BIT(INNER_DST_IP):
4938 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4939 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4940 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4941 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4942 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4943 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4944 
4945 		return true;
4946 	case BIT(INNER_SRC_PORT):
4947 		calc_x(tmp_x_s, rule->tuples.src_port,
4948 		       rule->tuples_mask.src_port);
4949 		calc_y(tmp_y_s, rule->tuples.src_port,
4950 		       rule->tuples_mask.src_port);
4951 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4952 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4953 
4954 		return true;
4955 	case BIT(INNER_DST_PORT):
4956 		calc_x(tmp_x_s, rule->tuples.dst_port,
4957 		       rule->tuples_mask.dst_port);
4958 		calc_y(tmp_y_s, rule->tuples.dst_port,
4959 		       rule->tuples_mask.dst_port);
4960 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4961 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4962 
4963 		return true;
4964 	default:
4965 		return false;
4966 	}
4967 }
4968 
4969 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4970 				 u8 vf_id, u8 network_port_id)
4971 {
4972 	u32 port_number = 0;
4973 
4974 	if (port_type == HOST_PORT) {
4975 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4976 				pf_id);
4977 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4978 				vf_id);
4979 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4980 	} else {
4981 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4982 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4983 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4984 	}
4985 
4986 	return port_number;
4987 }
4988 
4989 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4990 				       __le32 *key_x, __le32 *key_y,
4991 				       struct hclge_fd_rule *rule)
4992 {
4993 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4994 	u8 cur_pos = 0, tuple_size, shift_bits;
4995 	unsigned int i;
4996 
4997 	for (i = 0; i < MAX_META_DATA; i++) {
4998 		tuple_size = meta_data_key_info[i].key_length;
4999 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5000 
5001 		switch (tuple_bit) {
5002 		case BIT(ROCE_TYPE):
5003 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5004 			cur_pos += tuple_size;
5005 			break;
5006 		case BIT(DST_VPORT):
5007 			port_number = hclge_get_port_number(HOST_PORT, 0,
5008 							    rule->vf_id, 0);
5009 			hnae3_set_field(meta_data,
5010 					GENMASK(cur_pos + tuple_size, cur_pos),
5011 					cur_pos, port_number);
5012 			cur_pos += tuple_size;
5013 			break;
5014 		default:
5015 			break;
5016 		}
5017 	}
5018 
5019 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5020 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5021 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5022 
5023 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5024 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5025 }
5026 
5027 /* A complete key is combined with meta data key and tuple key.
5028  * Meta data key is stored at the MSB region, and tuple key is stored at
5029  * the LSB region, unused bits will be filled 0.
5030  */
5031 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5032 			    struct hclge_fd_rule *rule)
5033 {
5034 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5035 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5036 	u8 *cur_key_x, *cur_key_y;
5037 	unsigned int i;
5038 	int ret, tuple_size;
5039 	u8 meta_data_region;
5040 
5041 	memset(key_x, 0, sizeof(key_x));
5042 	memset(key_y, 0, sizeof(key_y));
5043 	cur_key_x = key_x;
5044 	cur_key_y = key_y;
5045 
5046 	for (i = 0 ; i < MAX_TUPLE; i++) {
5047 		bool tuple_valid;
5048 		u32 check_tuple;
5049 
5050 		tuple_size = tuple_key_info[i].key_length / 8;
5051 		check_tuple = key_cfg->tuple_active & BIT(i);
5052 
5053 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5054 						     cur_key_y, rule);
5055 		if (tuple_valid) {
5056 			cur_key_x += tuple_size;
5057 			cur_key_y += tuple_size;
5058 		}
5059 	}
5060 
5061 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5062 			MAX_META_DATA_LENGTH / 8;
5063 
5064 	hclge_fd_convert_meta_data(key_cfg,
5065 				   (__le32 *)(key_x + meta_data_region),
5066 				   (__le32 *)(key_y + meta_data_region),
5067 				   rule);
5068 
5069 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5070 				   true);
5071 	if (ret) {
5072 		dev_err(&hdev->pdev->dev,
5073 			"fd key_y config fail, loc=%d, ret=%d\n",
5074 			rule->queue_id, ret);
5075 		return ret;
5076 	}
5077 
5078 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5079 				   true);
5080 	if (ret)
5081 		dev_err(&hdev->pdev->dev,
5082 			"fd key_x config fail, loc=%d, ret=%d\n",
5083 			rule->queue_id, ret);
5084 	return ret;
5085 }
5086 
5087 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5088 			       struct hclge_fd_rule *rule)
5089 {
5090 	struct hclge_fd_ad_data ad_data;
5091 
5092 	ad_data.ad_id = rule->location;
5093 
5094 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5095 		ad_data.drop_packet = true;
5096 		ad_data.forward_to_direct_queue = false;
5097 		ad_data.queue_id = 0;
5098 	} else {
5099 		ad_data.drop_packet = false;
5100 		ad_data.forward_to_direct_queue = true;
5101 		ad_data.queue_id = rule->queue_id;
5102 	}
5103 
5104 	ad_data.use_counter = false;
5105 	ad_data.counter_id = 0;
5106 
5107 	ad_data.use_next_stage = false;
5108 	ad_data.next_input_key = 0;
5109 
5110 	ad_data.write_rule_id_to_bd = true;
5111 	ad_data.rule_id = rule->location;
5112 
5113 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5114 }
5115 
5116 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5117 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5118 {
5119 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5120 	struct ethtool_usrip4_spec *usr_ip4_spec;
5121 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5122 	struct ethtool_usrip6_spec *usr_ip6_spec;
5123 	struct ethhdr *ether_spec;
5124 
5125 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5126 		return -EINVAL;
5127 
5128 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5129 		return -EOPNOTSUPP;
5130 
5131 	if ((fs->flow_type & FLOW_EXT) &&
5132 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5133 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5134 		return -EOPNOTSUPP;
5135 	}
5136 
5137 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5138 	case SCTP_V4_FLOW:
5139 	case TCP_V4_FLOW:
5140 	case UDP_V4_FLOW:
5141 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5142 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5143 
5144 		if (!tcp_ip4_spec->ip4src)
5145 			*unused |= BIT(INNER_SRC_IP);
5146 
5147 		if (!tcp_ip4_spec->ip4dst)
5148 			*unused |= BIT(INNER_DST_IP);
5149 
5150 		if (!tcp_ip4_spec->psrc)
5151 			*unused |= BIT(INNER_SRC_PORT);
5152 
5153 		if (!tcp_ip4_spec->pdst)
5154 			*unused |= BIT(INNER_DST_PORT);
5155 
5156 		if (!tcp_ip4_spec->tos)
5157 			*unused |= BIT(INNER_IP_TOS);
5158 
5159 		break;
5160 	case IP_USER_FLOW:
5161 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5162 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5163 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5164 
5165 		if (!usr_ip4_spec->ip4src)
5166 			*unused |= BIT(INNER_SRC_IP);
5167 
5168 		if (!usr_ip4_spec->ip4dst)
5169 			*unused |= BIT(INNER_DST_IP);
5170 
5171 		if (!usr_ip4_spec->tos)
5172 			*unused |= BIT(INNER_IP_TOS);
5173 
5174 		if (!usr_ip4_spec->proto)
5175 			*unused |= BIT(INNER_IP_PROTO);
5176 
5177 		if (usr_ip4_spec->l4_4_bytes)
5178 			return -EOPNOTSUPP;
5179 
5180 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5181 			return -EOPNOTSUPP;
5182 
5183 		break;
5184 	case SCTP_V6_FLOW:
5185 	case TCP_V6_FLOW:
5186 	case UDP_V6_FLOW:
5187 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5188 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5189 			BIT(INNER_IP_TOS);
5190 
5191 		/* check whether src/dst ip address used */
5192 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5193 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5194 			*unused |= BIT(INNER_SRC_IP);
5195 
5196 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5197 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5198 			*unused |= BIT(INNER_DST_IP);
5199 
5200 		if (!tcp_ip6_spec->psrc)
5201 			*unused |= BIT(INNER_SRC_PORT);
5202 
5203 		if (!tcp_ip6_spec->pdst)
5204 			*unused |= BIT(INNER_DST_PORT);
5205 
5206 		if (tcp_ip6_spec->tclass)
5207 			return -EOPNOTSUPP;
5208 
5209 		break;
5210 	case IPV6_USER_FLOW:
5211 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5212 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5213 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5214 			BIT(INNER_DST_PORT);
5215 
5216 		/* check whether src/dst ip address used */
5217 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5218 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5219 			*unused |= BIT(INNER_SRC_IP);
5220 
5221 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5222 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5223 			*unused |= BIT(INNER_DST_IP);
5224 
5225 		if (!usr_ip6_spec->l4_proto)
5226 			*unused |= BIT(INNER_IP_PROTO);
5227 
5228 		if (usr_ip6_spec->tclass)
5229 			return -EOPNOTSUPP;
5230 
5231 		if (usr_ip6_spec->l4_4_bytes)
5232 			return -EOPNOTSUPP;
5233 
5234 		break;
5235 	case ETHER_FLOW:
5236 		ether_spec = &fs->h_u.ether_spec;
5237 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5238 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5239 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5240 
5241 		if (is_zero_ether_addr(ether_spec->h_source))
5242 			*unused |= BIT(INNER_SRC_MAC);
5243 
5244 		if (is_zero_ether_addr(ether_spec->h_dest))
5245 			*unused |= BIT(INNER_DST_MAC);
5246 
5247 		if (!ether_spec->h_proto)
5248 			*unused |= BIT(INNER_ETH_TYPE);
5249 
5250 		break;
5251 	default:
5252 		return -EOPNOTSUPP;
5253 	}
5254 
5255 	if ((fs->flow_type & FLOW_EXT)) {
5256 		if (fs->h_ext.vlan_etype)
5257 			return -EOPNOTSUPP;
5258 		if (!fs->h_ext.vlan_tci)
5259 			*unused |= BIT(INNER_VLAN_TAG_FST);
5260 
5261 		if (fs->m_ext.vlan_tci) {
5262 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5263 				return -EINVAL;
5264 		}
5265 	} else {
5266 		*unused |= BIT(INNER_VLAN_TAG_FST);
5267 	}
5268 
5269 	if (fs->flow_type & FLOW_MAC_EXT) {
5270 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5271 			return -EOPNOTSUPP;
5272 
5273 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5274 			*unused |= BIT(INNER_DST_MAC);
5275 		else
5276 			*unused &= ~(BIT(INNER_DST_MAC));
5277 	}
5278 
5279 	return 0;
5280 }
5281 
5282 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5283 {
5284 	struct hclge_fd_rule *rule = NULL;
5285 	struct hlist_node *node2;
5286 
5287 	spin_lock_bh(&hdev->fd_rule_lock);
5288 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5289 		if (rule->location >= location)
5290 			break;
5291 	}
5292 
5293 	spin_unlock_bh(&hdev->fd_rule_lock);
5294 
5295 	return  rule && rule->location == location;
5296 }
5297 
5298 /* make sure being called after lock up with fd_rule_lock */
5299 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5300 				     struct hclge_fd_rule *new_rule,
5301 				     u16 location,
5302 				     bool is_add)
5303 {
5304 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5305 	struct hlist_node *node2;
5306 
5307 	if (is_add && !new_rule)
5308 		return -EINVAL;
5309 
5310 	hlist_for_each_entry_safe(rule, node2,
5311 				  &hdev->fd_rule_list, rule_node) {
5312 		if (rule->location >= location)
5313 			break;
5314 		parent = rule;
5315 	}
5316 
5317 	if (rule && rule->location == location) {
5318 		hlist_del(&rule->rule_node);
5319 		kfree(rule);
5320 		hdev->hclge_fd_rule_num--;
5321 
5322 		if (!is_add) {
5323 			if (!hdev->hclge_fd_rule_num)
5324 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5325 			clear_bit(location, hdev->fd_bmap);
5326 
5327 			return 0;
5328 		}
5329 	} else if (!is_add) {
5330 		dev_err(&hdev->pdev->dev,
5331 			"delete fail, rule %d is inexistent\n",
5332 			location);
5333 		return -EINVAL;
5334 	}
5335 
5336 	INIT_HLIST_NODE(&new_rule->rule_node);
5337 
5338 	if (parent)
5339 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5340 	else
5341 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5342 
5343 	set_bit(location, hdev->fd_bmap);
5344 	hdev->hclge_fd_rule_num++;
5345 	hdev->fd_active_type = new_rule->rule_type;
5346 
5347 	return 0;
5348 }
5349 
5350 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5351 			      struct ethtool_rx_flow_spec *fs,
5352 			      struct hclge_fd_rule *rule)
5353 {
5354 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5355 
5356 	switch (flow_type) {
5357 	case SCTP_V4_FLOW:
5358 	case TCP_V4_FLOW:
5359 	case UDP_V4_FLOW:
5360 		rule->tuples.src_ip[IPV4_INDEX] =
5361 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5362 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5363 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5364 
5365 		rule->tuples.dst_ip[IPV4_INDEX] =
5366 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5367 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5368 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5369 
5370 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5371 		rule->tuples_mask.src_port =
5372 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5373 
5374 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5375 		rule->tuples_mask.dst_port =
5376 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5377 
5378 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5379 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5380 
5381 		rule->tuples.ether_proto = ETH_P_IP;
5382 		rule->tuples_mask.ether_proto = 0xFFFF;
5383 
5384 		break;
5385 	case IP_USER_FLOW:
5386 		rule->tuples.src_ip[IPV4_INDEX] =
5387 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5388 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5389 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5390 
5391 		rule->tuples.dst_ip[IPV4_INDEX] =
5392 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5393 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5394 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5395 
5396 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5397 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5398 
5399 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5400 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5401 
5402 		rule->tuples.ether_proto = ETH_P_IP;
5403 		rule->tuples_mask.ether_proto = 0xFFFF;
5404 
5405 		break;
5406 	case SCTP_V6_FLOW:
5407 	case TCP_V6_FLOW:
5408 	case UDP_V6_FLOW:
5409 		be32_to_cpu_array(rule->tuples.src_ip,
5410 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5411 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5412 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5413 
5414 		be32_to_cpu_array(rule->tuples.dst_ip,
5415 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5416 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5417 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5418 
5419 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5420 		rule->tuples_mask.src_port =
5421 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5422 
5423 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5424 		rule->tuples_mask.dst_port =
5425 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5426 
5427 		rule->tuples.ether_proto = ETH_P_IPV6;
5428 		rule->tuples_mask.ether_proto = 0xFFFF;
5429 
5430 		break;
5431 	case IPV6_USER_FLOW:
5432 		be32_to_cpu_array(rule->tuples.src_ip,
5433 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5434 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5435 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5436 
5437 		be32_to_cpu_array(rule->tuples.dst_ip,
5438 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5439 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5440 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5441 
5442 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5443 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5444 
5445 		rule->tuples.ether_proto = ETH_P_IPV6;
5446 		rule->tuples_mask.ether_proto = 0xFFFF;
5447 
5448 		break;
5449 	case ETHER_FLOW:
5450 		ether_addr_copy(rule->tuples.src_mac,
5451 				fs->h_u.ether_spec.h_source);
5452 		ether_addr_copy(rule->tuples_mask.src_mac,
5453 				fs->m_u.ether_spec.h_source);
5454 
5455 		ether_addr_copy(rule->tuples.dst_mac,
5456 				fs->h_u.ether_spec.h_dest);
5457 		ether_addr_copy(rule->tuples_mask.dst_mac,
5458 				fs->m_u.ether_spec.h_dest);
5459 
5460 		rule->tuples.ether_proto =
5461 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5462 		rule->tuples_mask.ether_proto =
5463 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5464 
5465 		break;
5466 	default:
5467 		return -EOPNOTSUPP;
5468 	}
5469 
5470 	switch (flow_type) {
5471 	case SCTP_V4_FLOW:
5472 	case SCTP_V6_FLOW:
5473 		rule->tuples.ip_proto = IPPROTO_SCTP;
5474 		rule->tuples_mask.ip_proto = 0xFF;
5475 		break;
5476 	case TCP_V4_FLOW:
5477 	case TCP_V6_FLOW:
5478 		rule->tuples.ip_proto = IPPROTO_TCP;
5479 		rule->tuples_mask.ip_proto = 0xFF;
5480 		break;
5481 	case UDP_V4_FLOW:
5482 	case UDP_V6_FLOW:
5483 		rule->tuples.ip_proto = IPPROTO_UDP;
5484 		rule->tuples_mask.ip_proto = 0xFF;
5485 		break;
5486 	default:
5487 		break;
5488 	}
5489 
5490 	if ((fs->flow_type & FLOW_EXT)) {
5491 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5492 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5493 	}
5494 
5495 	if (fs->flow_type & FLOW_MAC_EXT) {
5496 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5497 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5498 	}
5499 
5500 	return 0;
5501 }
5502 
5503 /* make sure being called after lock up with fd_rule_lock */
5504 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5505 				struct hclge_fd_rule *rule)
5506 {
5507 	int ret;
5508 
5509 	if (!rule) {
5510 		dev_err(&hdev->pdev->dev,
5511 			"The flow director rule is NULL\n");
5512 		return -EINVAL;
5513 	}
5514 
5515 	/* it will never fail here, so needn't to check return value */
5516 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5517 
5518 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5519 	if (ret)
5520 		goto clear_rule;
5521 
5522 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5523 	if (ret)
5524 		goto clear_rule;
5525 
5526 	return 0;
5527 
5528 clear_rule:
5529 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5530 	return ret;
5531 }
5532 
5533 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5534 			      struct ethtool_rxnfc *cmd)
5535 {
5536 	struct hclge_vport *vport = hclge_get_vport(handle);
5537 	struct hclge_dev *hdev = vport->back;
5538 	u16 dst_vport_id = 0, q_index = 0;
5539 	struct ethtool_rx_flow_spec *fs;
5540 	struct hclge_fd_rule *rule;
5541 	u32 unused = 0;
5542 	u8 action;
5543 	int ret;
5544 
5545 	if (!hnae3_dev_fd_supported(hdev))
5546 		return -EOPNOTSUPP;
5547 
5548 	if (!hdev->fd_en) {
5549 		dev_warn(&hdev->pdev->dev,
5550 			 "Please enable flow director first\n");
5551 		return -EOPNOTSUPP;
5552 	}
5553 
5554 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5555 
5556 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5557 	if (ret) {
5558 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5559 		return ret;
5560 	}
5561 
5562 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5563 		action = HCLGE_FD_ACTION_DROP_PACKET;
5564 	} else {
5565 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5566 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5567 		u16 tqps;
5568 
5569 		if (vf > hdev->num_req_vfs) {
5570 			dev_err(&hdev->pdev->dev,
5571 				"Error: vf id (%d) > max vf num (%d)\n",
5572 				vf, hdev->num_req_vfs);
5573 			return -EINVAL;
5574 		}
5575 
5576 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5577 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5578 
5579 		if (ring >= tqps) {
5580 			dev_err(&hdev->pdev->dev,
5581 				"Error: queue id (%d) > max tqp num (%d)\n",
5582 				ring, tqps - 1);
5583 			return -EINVAL;
5584 		}
5585 
5586 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5587 		q_index = ring;
5588 	}
5589 
5590 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5591 	if (!rule)
5592 		return -ENOMEM;
5593 
5594 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5595 	if (ret) {
5596 		kfree(rule);
5597 		return ret;
5598 	}
5599 
5600 	rule->flow_type = fs->flow_type;
5601 
5602 	rule->location = fs->location;
5603 	rule->unused_tuple = unused;
5604 	rule->vf_id = dst_vport_id;
5605 	rule->queue_id = q_index;
5606 	rule->action = action;
5607 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5608 
5609 	/* to avoid rule conflict, when user configure rule by ethtool,
5610 	 * we need to clear all arfs rules
5611 	 */
5612 	hclge_clear_arfs_rules(handle);
5613 
5614 	spin_lock_bh(&hdev->fd_rule_lock);
5615 	ret = hclge_fd_config_rule(hdev, rule);
5616 
5617 	spin_unlock_bh(&hdev->fd_rule_lock);
5618 
5619 	return ret;
5620 }
5621 
5622 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5623 			      struct ethtool_rxnfc *cmd)
5624 {
5625 	struct hclge_vport *vport = hclge_get_vport(handle);
5626 	struct hclge_dev *hdev = vport->back;
5627 	struct ethtool_rx_flow_spec *fs;
5628 	int ret;
5629 
5630 	if (!hnae3_dev_fd_supported(hdev))
5631 		return -EOPNOTSUPP;
5632 
5633 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5634 
5635 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5636 		return -EINVAL;
5637 
5638 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5639 		dev_err(&hdev->pdev->dev,
5640 			"Delete fail, rule %d is inexistent\n", fs->location);
5641 		return -ENOENT;
5642 	}
5643 
5644 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5645 				   NULL, false);
5646 	if (ret)
5647 		return ret;
5648 
5649 	spin_lock_bh(&hdev->fd_rule_lock);
5650 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5651 
5652 	spin_unlock_bh(&hdev->fd_rule_lock);
5653 
5654 	return ret;
5655 }
5656 
5657 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5658 				     bool clear_list)
5659 {
5660 	struct hclge_vport *vport = hclge_get_vport(handle);
5661 	struct hclge_dev *hdev = vport->back;
5662 	struct hclge_fd_rule *rule;
5663 	struct hlist_node *node;
5664 	u16 location;
5665 
5666 	if (!hnae3_dev_fd_supported(hdev))
5667 		return;
5668 
5669 	spin_lock_bh(&hdev->fd_rule_lock);
5670 	for_each_set_bit(location, hdev->fd_bmap,
5671 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5672 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5673 				     NULL, false);
5674 
5675 	if (clear_list) {
5676 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5677 					  rule_node) {
5678 			hlist_del(&rule->rule_node);
5679 			kfree(rule);
5680 		}
5681 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5682 		hdev->hclge_fd_rule_num = 0;
5683 		bitmap_zero(hdev->fd_bmap,
5684 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5685 	}
5686 
5687 	spin_unlock_bh(&hdev->fd_rule_lock);
5688 }
5689 
5690 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5691 {
5692 	struct hclge_vport *vport = hclge_get_vport(handle);
5693 	struct hclge_dev *hdev = vport->back;
5694 	struct hclge_fd_rule *rule;
5695 	struct hlist_node *node;
5696 	int ret;
5697 
5698 	/* Return ok here, because reset error handling will check this
5699 	 * return value. If error is returned here, the reset process will
5700 	 * fail.
5701 	 */
5702 	if (!hnae3_dev_fd_supported(hdev))
5703 		return 0;
5704 
5705 	/* if fd is disabled, should not restore it when reset */
5706 	if (!hdev->fd_en)
5707 		return 0;
5708 
5709 	spin_lock_bh(&hdev->fd_rule_lock);
5710 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5711 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5712 		if (!ret)
5713 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5714 
5715 		if (ret) {
5716 			dev_warn(&hdev->pdev->dev,
5717 				 "Restore rule %d failed, remove it\n",
5718 				 rule->location);
5719 			clear_bit(rule->location, hdev->fd_bmap);
5720 			hlist_del(&rule->rule_node);
5721 			kfree(rule);
5722 			hdev->hclge_fd_rule_num--;
5723 		}
5724 	}
5725 
5726 	if (hdev->hclge_fd_rule_num)
5727 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5728 
5729 	spin_unlock_bh(&hdev->fd_rule_lock);
5730 
5731 	return 0;
5732 }
5733 
5734 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5735 				 struct ethtool_rxnfc *cmd)
5736 {
5737 	struct hclge_vport *vport = hclge_get_vport(handle);
5738 	struct hclge_dev *hdev = vport->back;
5739 
5740 	if (!hnae3_dev_fd_supported(hdev))
5741 		return -EOPNOTSUPP;
5742 
5743 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5744 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5745 
5746 	return 0;
5747 }
5748 
5749 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5750 				  struct ethtool_rxnfc *cmd)
5751 {
5752 	struct hclge_vport *vport = hclge_get_vport(handle);
5753 	struct hclge_fd_rule *rule = NULL;
5754 	struct hclge_dev *hdev = vport->back;
5755 	struct ethtool_rx_flow_spec *fs;
5756 	struct hlist_node *node2;
5757 
5758 	if (!hnae3_dev_fd_supported(hdev))
5759 		return -EOPNOTSUPP;
5760 
5761 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5762 
5763 	spin_lock_bh(&hdev->fd_rule_lock);
5764 
5765 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5766 		if (rule->location >= fs->location)
5767 			break;
5768 	}
5769 
5770 	if (!rule || fs->location != rule->location) {
5771 		spin_unlock_bh(&hdev->fd_rule_lock);
5772 
5773 		return -ENOENT;
5774 	}
5775 
5776 	fs->flow_type = rule->flow_type;
5777 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5778 	case SCTP_V4_FLOW:
5779 	case TCP_V4_FLOW:
5780 	case UDP_V4_FLOW:
5781 		fs->h_u.tcp_ip4_spec.ip4src =
5782 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5783 		fs->m_u.tcp_ip4_spec.ip4src =
5784 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5785 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5786 
5787 		fs->h_u.tcp_ip4_spec.ip4dst =
5788 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5789 		fs->m_u.tcp_ip4_spec.ip4dst =
5790 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5791 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5792 
5793 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5794 		fs->m_u.tcp_ip4_spec.psrc =
5795 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5796 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5797 
5798 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5799 		fs->m_u.tcp_ip4_spec.pdst =
5800 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5801 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5802 
5803 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5804 		fs->m_u.tcp_ip4_spec.tos =
5805 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5806 				0 : rule->tuples_mask.ip_tos;
5807 
5808 		break;
5809 	case IP_USER_FLOW:
5810 		fs->h_u.usr_ip4_spec.ip4src =
5811 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5812 		fs->m_u.tcp_ip4_spec.ip4src =
5813 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5814 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5815 
5816 		fs->h_u.usr_ip4_spec.ip4dst =
5817 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5818 		fs->m_u.usr_ip4_spec.ip4dst =
5819 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5820 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5821 
5822 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5823 		fs->m_u.usr_ip4_spec.tos =
5824 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5825 				0 : rule->tuples_mask.ip_tos;
5826 
5827 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5828 		fs->m_u.usr_ip4_spec.proto =
5829 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5830 				0 : rule->tuples_mask.ip_proto;
5831 
5832 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5833 
5834 		break;
5835 	case SCTP_V6_FLOW:
5836 	case TCP_V6_FLOW:
5837 	case UDP_V6_FLOW:
5838 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5839 				  rule->tuples.src_ip, IPV6_SIZE);
5840 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5841 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5842 			       sizeof(int) * IPV6_SIZE);
5843 		else
5844 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5845 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5846 
5847 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5848 				  rule->tuples.dst_ip, IPV6_SIZE);
5849 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5850 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5851 			       sizeof(int) * IPV6_SIZE);
5852 		else
5853 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5854 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5855 
5856 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5857 		fs->m_u.tcp_ip6_spec.psrc =
5858 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5859 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5860 
5861 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5862 		fs->m_u.tcp_ip6_spec.pdst =
5863 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5864 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5865 
5866 		break;
5867 	case IPV6_USER_FLOW:
5868 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5869 				  rule->tuples.src_ip, IPV6_SIZE);
5870 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5871 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5872 			       sizeof(int) * IPV6_SIZE);
5873 		else
5874 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5875 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5876 
5877 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5878 				  rule->tuples.dst_ip, IPV6_SIZE);
5879 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5880 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5881 			       sizeof(int) * IPV6_SIZE);
5882 		else
5883 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5884 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5885 
5886 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5887 		fs->m_u.usr_ip6_spec.l4_proto =
5888 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5889 				0 : rule->tuples_mask.ip_proto;
5890 
5891 		break;
5892 	case ETHER_FLOW:
5893 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5894 				rule->tuples.src_mac);
5895 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5896 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5897 		else
5898 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5899 					rule->tuples_mask.src_mac);
5900 
5901 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5902 				rule->tuples.dst_mac);
5903 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5904 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5905 		else
5906 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5907 					rule->tuples_mask.dst_mac);
5908 
5909 		fs->h_u.ether_spec.h_proto =
5910 				cpu_to_be16(rule->tuples.ether_proto);
5911 		fs->m_u.ether_spec.h_proto =
5912 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5913 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5914 
5915 		break;
5916 	default:
5917 		spin_unlock_bh(&hdev->fd_rule_lock);
5918 		return -EOPNOTSUPP;
5919 	}
5920 
5921 	if (fs->flow_type & FLOW_EXT) {
5922 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5923 		fs->m_ext.vlan_tci =
5924 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5925 				cpu_to_be16(VLAN_VID_MASK) :
5926 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5927 	}
5928 
5929 	if (fs->flow_type & FLOW_MAC_EXT) {
5930 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5931 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5932 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5933 		else
5934 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5935 					rule->tuples_mask.dst_mac);
5936 	}
5937 
5938 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5939 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5940 	} else {
5941 		u64 vf_id;
5942 
5943 		fs->ring_cookie = rule->queue_id;
5944 		vf_id = rule->vf_id;
5945 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5946 		fs->ring_cookie |= vf_id;
5947 	}
5948 
5949 	spin_unlock_bh(&hdev->fd_rule_lock);
5950 
5951 	return 0;
5952 }
5953 
5954 static int hclge_get_all_rules(struct hnae3_handle *handle,
5955 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5956 {
5957 	struct hclge_vport *vport = hclge_get_vport(handle);
5958 	struct hclge_dev *hdev = vport->back;
5959 	struct hclge_fd_rule *rule;
5960 	struct hlist_node *node2;
5961 	int cnt = 0;
5962 
5963 	if (!hnae3_dev_fd_supported(hdev))
5964 		return -EOPNOTSUPP;
5965 
5966 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5967 
5968 	spin_lock_bh(&hdev->fd_rule_lock);
5969 	hlist_for_each_entry_safe(rule, node2,
5970 				  &hdev->fd_rule_list, rule_node) {
5971 		if (cnt == cmd->rule_cnt) {
5972 			spin_unlock_bh(&hdev->fd_rule_lock);
5973 			return -EMSGSIZE;
5974 		}
5975 
5976 		rule_locs[cnt] = rule->location;
5977 		cnt++;
5978 	}
5979 
5980 	spin_unlock_bh(&hdev->fd_rule_lock);
5981 
5982 	cmd->rule_cnt = cnt;
5983 
5984 	return 0;
5985 }
5986 
5987 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5988 				     struct hclge_fd_rule_tuples *tuples)
5989 {
5990 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5991 	tuples->ip_proto = fkeys->basic.ip_proto;
5992 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5993 
5994 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5995 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5996 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5997 	} else {
5998 		memcpy(tuples->src_ip,
5999 		       fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6000 		       sizeof(tuples->src_ip));
6001 		memcpy(tuples->dst_ip,
6002 		       fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6003 		       sizeof(tuples->dst_ip));
6004 	}
6005 }
6006 
6007 /* traverse all rules, check whether an existed rule has the same tuples */
6008 static struct hclge_fd_rule *
6009 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6010 			  const struct hclge_fd_rule_tuples *tuples)
6011 {
6012 	struct hclge_fd_rule *rule = NULL;
6013 	struct hlist_node *node;
6014 
6015 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6016 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6017 			return rule;
6018 	}
6019 
6020 	return NULL;
6021 }
6022 
6023 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6024 				     struct hclge_fd_rule *rule)
6025 {
6026 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6027 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6028 			     BIT(INNER_SRC_PORT);
6029 	rule->action = 0;
6030 	rule->vf_id = 0;
6031 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6032 	if (tuples->ether_proto == ETH_P_IP) {
6033 		if (tuples->ip_proto == IPPROTO_TCP)
6034 			rule->flow_type = TCP_V4_FLOW;
6035 		else
6036 			rule->flow_type = UDP_V4_FLOW;
6037 	} else {
6038 		if (tuples->ip_proto == IPPROTO_TCP)
6039 			rule->flow_type = TCP_V6_FLOW;
6040 		else
6041 			rule->flow_type = UDP_V6_FLOW;
6042 	}
6043 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6044 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6045 }
6046 
6047 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6048 				      u16 flow_id, struct flow_keys *fkeys)
6049 {
6050 	struct hclge_vport *vport = hclge_get_vport(handle);
6051 	struct hclge_fd_rule_tuples new_tuples;
6052 	struct hclge_dev *hdev = vport->back;
6053 	struct hclge_fd_rule *rule;
6054 	u16 tmp_queue_id;
6055 	u16 bit_id;
6056 	int ret;
6057 
6058 	if (!hnae3_dev_fd_supported(hdev))
6059 		return -EOPNOTSUPP;
6060 
6061 	memset(&new_tuples, 0, sizeof(new_tuples));
6062 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6063 
6064 	spin_lock_bh(&hdev->fd_rule_lock);
6065 
6066 	/* when there is already fd rule existed add by user,
6067 	 * arfs should not work
6068 	 */
6069 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6070 		spin_unlock_bh(&hdev->fd_rule_lock);
6071 
6072 		return -EOPNOTSUPP;
6073 	}
6074 
6075 	/* check is there flow director filter existed for this flow,
6076 	 * if not, create a new filter for it;
6077 	 * if filter exist with different queue id, modify the filter;
6078 	 * if filter exist with same queue id, do nothing
6079 	 */
6080 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6081 	if (!rule) {
6082 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6083 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6084 			spin_unlock_bh(&hdev->fd_rule_lock);
6085 
6086 			return -ENOSPC;
6087 		}
6088 
6089 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6090 		if (!rule) {
6091 			spin_unlock_bh(&hdev->fd_rule_lock);
6092 
6093 			return -ENOMEM;
6094 		}
6095 
6096 		set_bit(bit_id, hdev->fd_bmap);
6097 		rule->location = bit_id;
6098 		rule->flow_id = flow_id;
6099 		rule->queue_id = queue_id;
6100 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6101 		ret = hclge_fd_config_rule(hdev, rule);
6102 
6103 		spin_unlock_bh(&hdev->fd_rule_lock);
6104 
6105 		if (ret)
6106 			return ret;
6107 
6108 		return rule->location;
6109 	}
6110 
6111 	spin_unlock_bh(&hdev->fd_rule_lock);
6112 
6113 	if (rule->queue_id == queue_id)
6114 		return rule->location;
6115 
6116 	tmp_queue_id = rule->queue_id;
6117 	rule->queue_id = queue_id;
6118 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6119 	if (ret) {
6120 		rule->queue_id = tmp_queue_id;
6121 		return ret;
6122 	}
6123 
6124 	return rule->location;
6125 }
6126 
6127 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6128 {
6129 #ifdef CONFIG_RFS_ACCEL
6130 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6131 	struct hclge_fd_rule *rule;
6132 	struct hlist_node *node;
6133 	HLIST_HEAD(del_list);
6134 
6135 	spin_lock_bh(&hdev->fd_rule_lock);
6136 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6137 		spin_unlock_bh(&hdev->fd_rule_lock);
6138 		return;
6139 	}
6140 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6141 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6142 					rule->flow_id, rule->location)) {
6143 			hlist_del_init(&rule->rule_node);
6144 			hlist_add_head(&rule->rule_node, &del_list);
6145 			hdev->hclge_fd_rule_num--;
6146 			clear_bit(rule->location, hdev->fd_bmap);
6147 		}
6148 	}
6149 	spin_unlock_bh(&hdev->fd_rule_lock);
6150 
6151 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6152 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6153 				     rule->location, NULL, false);
6154 		kfree(rule);
6155 	}
6156 #endif
6157 }
6158 
6159 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6160 {
6161 #ifdef CONFIG_RFS_ACCEL
6162 	struct hclge_vport *vport = hclge_get_vport(handle);
6163 	struct hclge_dev *hdev = vport->back;
6164 
6165 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6166 		hclge_del_all_fd_entries(handle, true);
6167 #endif
6168 }
6169 
6170 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6171 {
6172 	struct hclge_vport *vport = hclge_get_vport(handle);
6173 	struct hclge_dev *hdev = vport->back;
6174 
6175 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6176 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6177 }
6178 
6179 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6180 {
6181 	struct hclge_vport *vport = hclge_get_vport(handle);
6182 	struct hclge_dev *hdev = vport->back;
6183 
6184 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6185 }
6186 
6187 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6188 {
6189 	struct hclge_vport *vport = hclge_get_vport(handle);
6190 	struct hclge_dev *hdev = vport->back;
6191 
6192 	return hdev->rst_stats.hw_reset_done_cnt;
6193 }
6194 
6195 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6196 {
6197 	struct hclge_vport *vport = hclge_get_vport(handle);
6198 	struct hclge_dev *hdev = vport->back;
6199 	bool clear;
6200 
6201 	hdev->fd_en = enable;
6202 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6203 	if (!enable)
6204 		hclge_del_all_fd_entries(handle, clear);
6205 	else
6206 		hclge_restore_fd_entries(handle);
6207 }
6208 
6209 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6210 {
6211 	struct hclge_desc desc;
6212 	struct hclge_config_mac_mode_cmd *req =
6213 		(struct hclge_config_mac_mode_cmd *)desc.data;
6214 	u32 loop_en = 0;
6215 	int ret;
6216 
6217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6218 
6219 	if (enable) {
6220 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6221 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6222 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6223 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6224 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6225 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6226 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6227 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6228 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6229 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6230 	}
6231 
6232 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6233 
6234 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6235 	if (ret)
6236 		dev_err(&hdev->pdev->dev,
6237 			"mac enable fail, ret =%d.\n", ret);
6238 }
6239 
6240 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6241 				     u8 switch_param, u8 param_mask)
6242 {
6243 	struct hclge_mac_vlan_switch_cmd *req;
6244 	struct hclge_desc desc;
6245 	u32 func_id;
6246 	int ret;
6247 
6248 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6249 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6250 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6251 				   false);
6252 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6253 	req->func_id = cpu_to_le32(func_id);
6254 	req->switch_param = switch_param;
6255 	req->param_mask = param_mask;
6256 
6257 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6258 	if (ret)
6259 		dev_err(&hdev->pdev->dev,
6260 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6261 	return ret;
6262 }
6263 
6264 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6265 				       int link_ret)
6266 {
6267 #define HCLGE_PHY_LINK_STATUS_NUM  200
6268 
6269 	struct phy_device *phydev = hdev->hw.mac.phydev;
6270 	int i = 0;
6271 	int ret;
6272 
6273 	do {
6274 		ret = phy_read_status(phydev);
6275 		if (ret) {
6276 			dev_err(&hdev->pdev->dev,
6277 				"phy update link status fail, ret = %d\n", ret);
6278 			return;
6279 		}
6280 
6281 		if (phydev->link == link_ret)
6282 			break;
6283 
6284 		msleep(HCLGE_LINK_STATUS_MS);
6285 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6286 }
6287 
6288 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6289 {
6290 #define HCLGE_MAC_LINK_STATUS_NUM  100
6291 
6292 	int i = 0;
6293 	int ret;
6294 
6295 	do {
6296 		ret = hclge_get_mac_link_status(hdev);
6297 		if (ret < 0)
6298 			return ret;
6299 		else if (ret == link_ret)
6300 			return 0;
6301 
6302 		msleep(HCLGE_LINK_STATUS_MS);
6303 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6304 	return -EBUSY;
6305 }
6306 
6307 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6308 					  bool is_phy)
6309 {
6310 #define HCLGE_LINK_STATUS_DOWN 0
6311 #define HCLGE_LINK_STATUS_UP   1
6312 
6313 	int link_ret;
6314 
6315 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6316 
6317 	if (is_phy)
6318 		hclge_phy_link_status_wait(hdev, link_ret);
6319 
6320 	return hclge_mac_link_status_wait(hdev, link_ret);
6321 }
6322 
6323 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6324 {
6325 	struct hclge_config_mac_mode_cmd *req;
6326 	struct hclge_desc desc;
6327 	u32 loop_en;
6328 	int ret;
6329 
6330 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6331 	/* 1 Read out the MAC mode config at first */
6332 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6333 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6334 	if (ret) {
6335 		dev_err(&hdev->pdev->dev,
6336 			"mac loopback get fail, ret =%d.\n", ret);
6337 		return ret;
6338 	}
6339 
6340 	/* 2 Then setup the loopback flag */
6341 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6342 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6343 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6344 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6345 
6346 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6347 
6348 	/* 3 Config mac work mode with loopback flag
6349 	 * and its original configure parameters
6350 	 */
6351 	hclge_cmd_reuse_desc(&desc, false);
6352 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6353 	if (ret)
6354 		dev_err(&hdev->pdev->dev,
6355 			"mac loopback set fail, ret =%d.\n", ret);
6356 	return ret;
6357 }
6358 
6359 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6360 				     enum hnae3_loop loop_mode)
6361 {
6362 #define HCLGE_SERDES_RETRY_MS	10
6363 #define HCLGE_SERDES_RETRY_NUM	100
6364 
6365 	struct hclge_serdes_lb_cmd *req;
6366 	struct hclge_desc desc;
6367 	int ret, i = 0;
6368 	u8 loop_mode_b;
6369 
6370 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6371 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6372 
6373 	switch (loop_mode) {
6374 	case HNAE3_LOOP_SERIAL_SERDES:
6375 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6376 		break;
6377 	case HNAE3_LOOP_PARALLEL_SERDES:
6378 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6379 		break;
6380 	default:
6381 		dev_err(&hdev->pdev->dev,
6382 			"unsupported serdes loopback mode %d\n", loop_mode);
6383 		return -ENOTSUPP;
6384 	}
6385 
6386 	if (en) {
6387 		req->enable = loop_mode_b;
6388 		req->mask = loop_mode_b;
6389 	} else {
6390 		req->mask = loop_mode_b;
6391 	}
6392 
6393 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6394 	if (ret) {
6395 		dev_err(&hdev->pdev->dev,
6396 			"serdes loopback set fail, ret = %d\n", ret);
6397 		return ret;
6398 	}
6399 
6400 	do {
6401 		msleep(HCLGE_SERDES_RETRY_MS);
6402 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6403 					   true);
6404 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6405 		if (ret) {
6406 			dev_err(&hdev->pdev->dev,
6407 				"serdes loopback get, ret = %d\n", ret);
6408 			return ret;
6409 		}
6410 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6411 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6412 
6413 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6414 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6415 		return -EBUSY;
6416 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6417 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6418 		return -EIO;
6419 	}
6420 	return ret;
6421 }
6422 
6423 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6424 				     enum hnae3_loop loop_mode)
6425 {
6426 	int ret;
6427 
6428 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6429 	if (ret)
6430 		return ret;
6431 
6432 	hclge_cfg_mac_mode(hdev, en);
6433 
6434 	ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6435 	if (ret)
6436 		dev_err(&hdev->pdev->dev,
6437 			"serdes loopback config mac mode timeout\n");
6438 
6439 	return ret;
6440 }
6441 
6442 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6443 				     struct phy_device *phydev)
6444 {
6445 	int ret;
6446 
6447 	if (!phydev->suspended) {
6448 		ret = phy_suspend(phydev);
6449 		if (ret)
6450 			return ret;
6451 	}
6452 
6453 	ret = phy_resume(phydev);
6454 	if (ret)
6455 		return ret;
6456 
6457 	return phy_loopback(phydev, true);
6458 }
6459 
6460 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6461 				      struct phy_device *phydev)
6462 {
6463 	int ret;
6464 
6465 	ret = phy_loopback(phydev, false);
6466 	if (ret)
6467 		return ret;
6468 
6469 	return phy_suspend(phydev);
6470 }
6471 
6472 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6473 {
6474 	struct phy_device *phydev = hdev->hw.mac.phydev;
6475 	int ret;
6476 
6477 	if (!phydev)
6478 		return -ENOTSUPP;
6479 
6480 	if (en)
6481 		ret = hclge_enable_phy_loopback(hdev, phydev);
6482 	else
6483 		ret = hclge_disable_phy_loopback(hdev, phydev);
6484 	if (ret) {
6485 		dev_err(&hdev->pdev->dev,
6486 			"set phy loopback fail, ret = %d\n", ret);
6487 		return ret;
6488 	}
6489 
6490 	hclge_cfg_mac_mode(hdev, en);
6491 
6492 	ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6493 	if (ret)
6494 		dev_err(&hdev->pdev->dev,
6495 			"phy loopback config mac mode timeout\n");
6496 
6497 	return ret;
6498 }
6499 
6500 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6501 			    int stream_id, bool enable)
6502 {
6503 	struct hclge_desc desc;
6504 	struct hclge_cfg_com_tqp_queue_cmd *req =
6505 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6506 	int ret;
6507 
6508 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6509 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6510 	req->stream_id = cpu_to_le16(stream_id);
6511 	if (enable)
6512 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6513 
6514 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6515 	if (ret)
6516 		dev_err(&hdev->pdev->dev,
6517 			"Tqp enable fail, status =%d.\n", ret);
6518 	return ret;
6519 }
6520 
6521 static int hclge_set_loopback(struct hnae3_handle *handle,
6522 			      enum hnae3_loop loop_mode, bool en)
6523 {
6524 	struct hclge_vport *vport = hclge_get_vport(handle);
6525 	struct hnae3_knic_private_info *kinfo;
6526 	struct hclge_dev *hdev = vport->back;
6527 	int i, ret;
6528 
6529 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6530 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6531 	 * the same, the packets are looped back in the SSU. If SSU loopback
6532 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6533 	 */
6534 	if (hdev->pdev->revision >= 0x21) {
6535 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6536 
6537 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6538 						HCLGE_SWITCH_ALW_LPBK_MASK);
6539 		if (ret)
6540 			return ret;
6541 	}
6542 
6543 	switch (loop_mode) {
6544 	case HNAE3_LOOP_APP:
6545 		ret = hclge_set_app_loopback(hdev, en);
6546 		break;
6547 	case HNAE3_LOOP_SERIAL_SERDES:
6548 	case HNAE3_LOOP_PARALLEL_SERDES:
6549 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6550 		break;
6551 	case HNAE3_LOOP_PHY:
6552 		ret = hclge_set_phy_loopback(hdev, en);
6553 		break;
6554 	default:
6555 		ret = -ENOTSUPP;
6556 		dev_err(&hdev->pdev->dev,
6557 			"loop_mode %d is not supported\n", loop_mode);
6558 		break;
6559 	}
6560 
6561 	if (ret)
6562 		return ret;
6563 
6564 	kinfo = &vport->nic.kinfo;
6565 	for (i = 0; i < kinfo->num_tqps; i++) {
6566 		ret = hclge_tqp_enable(hdev, i, 0, en);
6567 		if (ret)
6568 			return ret;
6569 	}
6570 
6571 	return 0;
6572 }
6573 
6574 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6575 {
6576 	int ret;
6577 
6578 	ret = hclge_set_app_loopback(hdev, false);
6579 	if (ret)
6580 		return ret;
6581 
6582 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6583 	if (ret)
6584 		return ret;
6585 
6586 	return hclge_cfg_serdes_loopback(hdev, false,
6587 					 HNAE3_LOOP_PARALLEL_SERDES);
6588 }
6589 
6590 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6591 {
6592 	struct hclge_vport *vport = hclge_get_vport(handle);
6593 	struct hnae3_knic_private_info *kinfo;
6594 	struct hnae3_queue *queue;
6595 	struct hclge_tqp *tqp;
6596 	int i;
6597 
6598 	kinfo = &vport->nic.kinfo;
6599 	for (i = 0; i < kinfo->num_tqps; i++) {
6600 		queue = handle->kinfo.tqp[i];
6601 		tqp = container_of(queue, struct hclge_tqp, q);
6602 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6603 	}
6604 }
6605 
6606 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6607 {
6608 	struct hclge_vport *vport = hclge_get_vport(handle);
6609 	struct hclge_dev *hdev = vport->back;
6610 
6611 	if (enable) {
6612 		hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6613 	} else {
6614 		/* Set the DOWN flag here to disable the service to be
6615 		 * scheduled again
6616 		 */
6617 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6618 		cancel_delayed_work_sync(&hdev->service_task);
6619 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6620 	}
6621 }
6622 
6623 static int hclge_ae_start(struct hnae3_handle *handle)
6624 {
6625 	struct hclge_vport *vport = hclge_get_vport(handle);
6626 	struct hclge_dev *hdev = vport->back;
6627 
6628 	/* mac enable */
6629 	hclge_cfg_mac_mode(hdev, true);
6630 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6631 	hdev->hw.mac.link = 0;
6632 
6633 	/* reset tqp stats */
6634 	hclge_reset_tqp_stats(handle);
6635 
6636 	hclge_mac_start_phy(hdev);
6637 
6638 	return 0;
6639 }
6640 
6641 static void hclge_ae_stop(struct hnae3_handle *handle)
6642 {
6643 	struct hclge_vport *vport = hclge_get_vport(handle);
6644 	struct hclge_dev *hdev = vport->back;
6645 	int i;
6646 
6647 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6648 
6649 	hclge_clear_arfs_rules(handle);
6650 
6651 	/* If it is not PF reset, the firmware will disable the MAC,
6652 	 * so it only need to stop phy here.
6653 	 */
6654 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6655 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6656 		hclge_mac_stop_phy(hdev);
6657 		hclge_update_link_status(hdev);
6658 		return;
6659 	}
6660 
6661 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6662 		hclge_reset_tqp(handle, i);
6663 
6664 	hclge_config_mac_tnl_int(hdev, false);
6665 
6666 	/* Mac disable */
6667 	hclge_cfg_mac_mode(hdev, false);
6668 
6669 	hclge_mac_stop_phy(hdev);
6670 
6671 	/* reset tqp stats */
6672 	hclge_reset_tqp_stats(handle);
6673 	hclge_update_link_status(hdev);
6674 }
6675 
6676 int hclge_vport_start(struct hclge_vport *vport)
6677 {
6678 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6679 	vport->last_active_jiffies = jiffies;
6680 	return 0;
6681 }
6682 
6683 void hclge_vport_stop(struct hclge_vport *vport)
6684 {
6685 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6686 }
6687 
6688 static int hclge_client_start(struct hnae3_handle *handle)
6689 {
6690 	struct hclge_vport *vport = hclge_get_vport(handle);
6691 
6692 	return hclge_vport_start(vport);
6693 }
6694 
6695 static void hclge_client_stop(struct hnae3_handle *handle)
6696 {
6697 	struct hclge_vport *vport = hclge_get_vport(handle);
6698 
6699 	hclge_vport_stop(vport);
6700 }
6701 
6702 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6703 					 u16 cmdq_resp, u8  resp_code,
6704 					 enum hclge_mac_vlan_tbl_opcode op)
6705 {
6706 	struct hclge_dev *hdev = vport->back;
6707 
6708 	if (cmdq_resp) {
6709 		dev_err(&hdev->pdev->dev,
6710 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6711 			cmdq_resp);
6712 		return -EIO;
6713 	}
6714 
6715 	if (op == HCLGE_MAC_VLAN_ADD) {
6716 		if ((!resp_code) || (resp_code == 1)) {
6717 			return 0;
6718 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6719 			dev_err(&hdev->pdev->dev,
6720 				"add mac addr failed for uc_overflow.\n");
6721 			return -ENOSPC;
6722 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6723 			dev_err(&hdev->pdev->dev,
6724 				"add mac addr failed for mc_overflow.\n");
6725 			return -ENOSPC;
6726 		}
6727 
6728 		dev_err(&hdev->pdev->dev,
6729 			"add mac addr failed for undefined, code=%u.\n",
6730 			resp_code);
6731 		return -EIO;
6732 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6733 		if (!resp_code) {
6734 			return 0;
6735 		} else if (resp_code == 1) {
6736 			dev_dbg(&hdev->pdev->dev,
6737 				"remove mac addr failed for miss.\n");
6738 			return -ENOENT;
6739 		}
6740 
6741 		dev_err(&hdev->pdev->dev,
6742 			"remove mac addr failed for undefined, code=%u.\n",
6743 			resp_code);
6744 		return -EIO;
6745 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6746 		if (!resp_code) {
6747 			return 0;
6748 		} else if (resp_code == 1) {
6749 			dev_dbg(&hdev->pdev->dev,
6750 				"lookup mac addr failed for miss.\n");
6751 			return -ENOENT;
6752 		}
6753 
6754 		dev_err(&hdev->pdev->dev,
6755 			"lookup mac addr failed for undefined, code=%u.\n",
6756 			resp_code);
6757 		return -EIO;
6758 	}
6759 
6760 	dev_err(&hdev->pdev->dev,
6761 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6762 
6763 	return -EINVAL;
6764 }
6765 
6766 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6767 {
6768 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6769 
6770 	unsigned int word_num;
6771 	unsigned int bit_num;
6772 
6773 	if (vfid > 255 || vfid < 0)
6774 		return -EIO;
6775 
6776 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6777 		word_num = vfid / 32;
6778 		bit_num  = vfid % 32;
6779 		if (clr)
6780 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6781 		else
6782 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6783 	} else {
6784 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6785 		bit_num  = vfid % 32;
6786 		if (clr)
6787 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6788 		else
6789 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6790 	}
6791 
6792 	return 0;
6793 }
6794 
6795 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6796 {
6797 #define HCLGE_DESC_NUMBER 3
6798 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6799 	int i, j;
6800 
6801 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6802 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6803 			if (desc[i].data[j])
6804 				return false;
6805 
6806 	return true;
6807 }
6808 
6809 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6810 				   const u8 *addr, bool is_mc)
6811 {
6812 	const unsigned char *mac_addr = addr;
6813 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6814 		       (mac_addr[0]) | (mac_addr[1] << 8);
6815 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6816 
6817 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6818 	if (is_mc) {
6819 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6820 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6821 	}
6822 
6823 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6824 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6825 }
6826 
6827 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6828 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6829 {
6830 	struct hclge_dev *hdev = vport->back;
6831 	struct hclge_desc desc;
6832 	u8 resp_code;
6833 	u16 retval;
6834 	int ret;
6835 
6836 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6837 
6838 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6839 
6840 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6841 	if (ret) {
6842 		dev_err(&hdev->pdev->dev,
6843 			"del mac addr failed for cmd_send, ret =%d.\n",
6844 			ret);
6845 		return ret;
6846 	}
6847 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6848 	retval = le16_to_cpu(desc.retval);
6849 
6850 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6851 					     HCLGE_MAC_VLAN_REMOVE);
6852 }
6853 
6854 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6855 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
6856 				     struct hclge_desc *desc,
6857 				     bool is_mc)
6858 {
6859 	struct hclge_dev *hdev = vport->back;
6860 	u8 resp_code;
6861 	u16 retval;
6862 	int ret;
6863 
6864 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6865 	if (is_mc) {
6866 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6867 		memcpy(desc[0].data,
6868 		       req,
6869 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6870 		hclge_cmd_setup_basic_desc(&desc[1],
6871 					   HCLGE_OPC_MAC_VLAN_ADD,
6872 					   true);
6873 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6874 		hclge_cmd_setup_basic_desc(&desc[2],
6875 					   HCLGE_OPC_MAC_VLAN_ADD,
6876 					   true);
6877 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
6878 	} else {
6879 		memcpy(desc[0].data,
6880 		       req,
6881 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6882 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
6883 	}
6884 	if (ret) {
6885 		dev_err(&hdev->pdev->dev,
6886 			"lookup mac addr failed for cmd_send, ret =%d.\n",
6887 			ret);
6888 		return ret;
6889 	}
6890 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6891 	retval = le16_to_cpu(desc[0].retval);
6892 
6893 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6894 					     HCLGE_MAC_VLAN_LKUP);
6895 }
6896 
6897 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6898 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
6899 				  struct hclge_desc *mc_desc)
6900 {
6901 	struct hclge_dev *hdev = vport->back;
6902 	int cfg_status;
6903 	u8 resp_code;
6904 	u16 retval;
6905 	int ret;
6906 
6907 	if (!mc_desc) {
6908 		struct hclge_desc desc;
6909 
6910 		hclge_cmd_setup_basic_desc(&desc,
6911 					   HCLGE_OPC_MAC_VLAN_ADD,
6912 					   false);
6913 		memcpy(desc.data, req,
6914 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6915 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6916 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6917 		retval = le16_to_cpu(desc.retval);
6918 
6919 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6920 							   resp_code,
6921 							   HCLGE_MAC_VLAN_ADD);
6922 	} else {
6923 		hclge_cmd_reuse_desc(&mc_desc[0], false);
6924 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6925 		hclge_cmd_reuse_desc(&mc_desc[1], false);
6926 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6927 		hclge_cmd_reuse_desc(&mc_desc[2], false);
6928 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6929 		memcpy(mc_desc[0].data, req,
6930 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6931 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6932 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6933 		retval = le16_to_cpu(mc_desc[0].retval);
6934 
6935 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6936 							   resp_code,
6937 							   HCLGE_MAC_VLAN_ADD);
6938 	}
6939 
6940 	if (ret) {
6941 		dev_err(&hdev->pdev->dev,
6942 			"add mac addr failed for cmd_send, ret =%d.\n",
6943 			ret);
6944 		return ret;
6945 	}
6946 
6947 	return cfg_status;
6948 }
6949 
6950 static int hclge_init_umv_space(struct hclge_dev *hdev)
6951 {
6952 	u16 allocated_size = 0;
6953 	int ret;
6954 
6955 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6956 				  true);
6957 	if (ret)
6958 		return ret;
6959 
6960 	if (allocated_size < hdev->wanted_umv_size)
6961 		dev_warn(&hdev->pdev->dev,
6962 			 "Alloc umv space failed, want %d, get %d\n",
6963 			 hdev->wanted_umv_size, allocated_size);
6964 
6965 	mutex_init(&hdev->umv_mutex);
6966 	hdev->max_umv_size = allocated_size;
6967 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6968 	 * preserve some unicast mac vlan table entries shared by pf
6969 	 * and its vfs.
6970 	 */
6971 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6972 	hdev->share_umv_size = hdev->priv_umv_size +
6973 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6974 
6975 	return 0;
6976 }
6977 
6978 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6979 {
6980 	int ret;
6981 
6982 	if (hdev->max_umv_size > 0) {
6983 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6984 					  false);
6985 		if (ret)
6986 			return ret;
6987 		hdev->max_umv_size = 0;
6988 	}
6989 	mutex_destroy(&hdev->umv_mutex);
6990 
6991 	return 0;
6992 }
6993 
6994 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6995 			       u16 *allocated_size, bool is_alloc)
6996 {
6997 	struct hclge_umv_spc_alc_cmd *req;
6998 	struct hclge_desc desc;
6999 	int ret;
7000 
7001 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7002 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7003 	if (!is_alloc)
7004 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7005 
7006 	req->space_size = cpu_to_le32(space_size);
7007 
7008 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7009 	if (ret) {
7010 		dev_err(&hdev->pdev->dev,
7011 			"%s umv space failed for cmd_send, ret =%d\n",
7012 			is_alloc ? "allocate" : "free", ret);
7013 		return ret;
7014 	}
7015 
7016 	if (is_alloc && allocated_size)
7017 		*allocated_size = le32_to_cpu(desc.data[1]);
7018 
7019 	return 0;
7020 }
7021 
7022 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7023 {
7024 	struct hclge_vport *vport;
7025 	int i;
7026 
7027 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7028 		vport = &hdev->vport[i];
7029 		vport->used_umv_num = 0;
7030 	}
7031 
7032 	mutex_lock(&hdev->umv_mutex);
7033 	hdev->share_umv_size = hdev->priv_umv_size +
7034 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7035 	mutex_unlock(&hdev->umv_mutex);
7036 }
7037 
7038 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7039 {
7040 	struct hclge_dev *hdev = vport->back;
7041 	bool is_full;
7042 
7043 	mutex_lock(&hdev->umv_mutex);
7044 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7045 		   hdev->share_umv_size == 0);
7046 	mutex_unlock(&hdev->umv_mutex);
7047 
7048 	return is_full;
7049 }
7050 
7051 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7052 {
7053 	struct hclge_dev *hdev = vport->back;
7054 
7055 	mutex_lock(&hdev->umv_mutex);
7056 	if (is_free) {
7057 		if (vport->used_umv_num > hdev->priv_umv_size)
7058 			hdev->share_umv_size++;
7059 
7060 		if (vport->used_umv_num > 0)
7061 			vport->used_umv_num--;
7062 	} else {
7063 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7064 		    hdev->share_umv_size > 0)
7065 			hdev->share_umv_size--;
7066 		vport->used_umv_num++;
7067 	}
7068 	mutex_unlock(&hdev->umv_mutex);
7069 }
7070 
7071 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7072 			     const unsigned char *addr)
7073 {
7074 	struct hclge_vport *vport = hclge_get_vport(handle);
7075 
7076 	return hclge_add_uc_addr_common(vport, addr);
7077 }
7078 
7079 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7080 			     const unsigned char *addr)
7081 {
7082 	struct hclge_dev *hdev = vport->back;
7083 	struct hclge_mac_vlan_tbl_entry_cmd req;
7084 	struct hclge_desc desc;
7085 	u16 egress_port = 0;
7086 	int ret;
7087 
7088 	/* mac addr check */
7089 	if (is_zero_ether_addr(addr) ||
7090 	    is_broadcast_ether_addr(addr) ||
7091 	    is_multicast_ether_addr(addr)) {
7092 		dev_err(&hdev->pdev->dev,
7093 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7094 			 addr, is_zero_ether_addr(addr),
7095 			 is_broadcast_ether_addr(addr),
7096 			 is_multicast_ether_addr(addr));
7097 		return -EINVAL;
7098 	}
7099 
7100 	memset(&req, 0, sizeof(req));
7101 
7102 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7103 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7104 
7105 	req.egress_port = cpu_to_le16(egress_port);
7106 
7107 	hclge_prepare_mac_addr(&req, addr, false);
7108 
7109 	/* Lookup the mac address in the mac_vlan table, and add
7110 	 * it if the entry is inexistent. Repeated unicast entry
7111 	 * is not allowed in the mac vlan table.
7112 	 */
7113 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7114 	if (ret == -ENOENT) {
7115 		if (!hclge_is_umv_space_full(vport)) {
7116 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7117 			if (!ret)
7118 				hclge_update_umv_space(vport, false);
7119 			return ret;
7120 		}
7121 
7122 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7123 			hdev->priv_umv_size);
7124 
7125 		return -ENOSPC;
7126 	}
7127 
7128 	/* check if we just hit the duplicate */
7129 	if (!ret) {
7130 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
7131 			 vport->vport_id, addr);
7132 		return 0;
7133 	}
7134 
7135 	dev_err(&hdev->pdev->dev,
7136 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7137 		addr);
7138 
7139 	return ret;
7140 }
7141 
7142 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7143 			    const unsigned char *addr)
7144 {
7145 	struct hclge_vport *vport = hclge_get_vport(handle);
7146 
7147 	return hclge_rm_uc_addr_common(vport, addr);
7148 }
7149 
7150 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7151 			    const unsigned char *addr)
7152 {
7153 	struct hclge_dev *hdev = vport->back;
7154 	struct hclge_mac_vlan_tbl_entry_cmd req;
7155 	int ret;
7156 
7157 	/* mac addr check */
7158 	if (is_zero_ether_addr(addr) ||
7159 	    is_broadcast_ether_addr(addr) ||
7160 	    is_multicast_ether_addr(addr)) {
7161 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7162 			addr);
7163 		return -EINVAL;
7164 	}
7165 
7166 	memset(&req, 0, sizeof(req));
7167 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7168 	hclge_prepare_mac_addr(&req, addr, false);
7169 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7170 	if (!ret)
7171 		hclge_update_umv_space(vport, true);
7172 
7173 	return ret;
7174 }
7175 
7176 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7177 			     const unsigned char *addr)
7178 {
7179 	struct hclge_vport *vport = hclge_get_vport(handle);
7180 
7181 	return hclge_add_mc_addr_common(vport, addr);
7182 }
7183 
7184 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7185 			     const unsigned char *addr)
7186 {
7187 	struct hclge_dev *hdev = vport->back;
7188 	struct hclge_mac_vlan_tbl_entry_cmd req;
7189 	struct hclge_desc desc[3];
7190 	int status;
7191 
7192 	/* mac addr check */
7193 	if (!is_multicast_ether_addr(addr)) {
7194 		dev_err(&hdev->pdev->dev,
7195 			"Add mc mac err! invalid mac:%pM.\n",
7196 			 addr);
7197 		return -EINVAL;
7198 	}
7199 	memset(&req, 0, sizeof(req));
7200 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7201 	hclge_prepare_mac_addr(&req, addr, true);
7202 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7203 	if (status) {
7204 		/* This mac addr do not exist, add new entry for it */
7205 		memset(desc[0].data, 0, sizeof(desc[0].data));
7206 		memset(desc[1].data, 0, sizeof(desc[0].data));
7207 		memset(desc[2].data, 0, sizeof(desc[0].data));
7208 	}
7209 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7210 	if (status)
7211 		return status;
7212 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7213 
7214 	if (status == -ENOSPC)
7215 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7216 
7217 	return status;
7218 }
7219 
7220 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7221 			    const unsigned char *addr)
7222 {
7223 	struct hclge_vport *vport = hclge_get_vport(handle);
7224 
7225 	return hclge_rm_mc_addr_common(vport, addr);
7226 }
7227 
7228 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7229 			    const unsigned char *addr)
7230 {
7231 	struct hclge_dev *hdev = vport->back;
7232 	struct hclge_mac_vlan_tbl_entry_cmd req;
7233 	enum hclge_cmd_status status;
7234 	struct hclge_desc desc[3];
7235 
7236 	/* mac addr check */
7237 	if (!is_multicast_ether_addr(addr)) {
7238 		dev_dbg(&hdev->pdev->dev,
7239 			"Remove mc mac err! invalid mac:%pM.\n",
7240 			 addr);
7241 		return -EINVAL;
7242 	}
7243 
7244 	memset(&req, 0, sizeof(req));
7245 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7246 	hclge_prepare_mac_addr(&req, addr, true);
7247 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7248 	if (!status) {
7249 		/* This mac addr exist, remove this handle's VFID for it */
7250 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7251 		if (status)
7252 			return status;
7253 
7254 		if (hclge_is_all_function_id_zero(desc))
7255 			/* All the vfid is zero, so need to delete this entry */
7256 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7257 		else
7258 			/* Not all the vfid is zero, update the vfid */
7259 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7260 
7261 	} else {
7262 		/* Maybe this mac address is in mta table, but it cannot be
7263 		 * deleted here because an entry of mta represents an address
7264 		 * range rather than a specific address. the delete action to
7265 		 * all entries will take effect in update_mta_status called by
7266 		 * hns3_nic_set_rx_mode.
7267 		 */
7268 		status = 0;
7269 	}
7270 
7271 	return status;
7272 }
7273 
7274 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7275 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
7276 {
7277 	struct hclge_vport_mac_addr_cfg *mac_cfg;
7278 	struct list_head *list;
7279 
7280 	if (!vport->vport_id)
7281 		return;
7282 
7283 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7284 	if (!mac_cfg)
7285 		return;
7286 
7287 	mac_cfg->hd_tbl_status = true;
7288 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7289 
7290 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7291 	       &vport->uc_mac_list : &vport->mc_mac_list;
7292 
7293 	list_add_tail(&mac_cfg->node, list);
7294 }
7295 
7296 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7297 			      bool is_write_tbl,
7298 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7299 {
7300 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7301 	struct list_head *list;
7302 	bool uc_flag, mc_flag;
7303 
7304 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7305 	       &vport->uc_mac_list : &vport->mc_mac_list;
7306 
7307 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7308 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7309 
7310 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7311 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7312 			if (uc_flag && mac_cfg->hd_tbl_status)
7313 				hclge_rm_uc_addr_common(vport, mac_addr);
7314 
7315 			if (mc_flag && mac_cfg->hd_tbl_status)
7316 				hclge_rm_mc_addr_common(vport, mac_addr);
7317 
7318 			list_del(&mac_cfg->node);
7319 			kfree(mac_cfg);
7320 			break;
7321 		}
7322 	}
7323 }
7324 
7325 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7326 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7327 {
7328 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7329 	struct list_head *list;
7330 
7331 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7332 	       &vport->uc_mac_list : &vport->mc_mac_list;
7333 
7334 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7335 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7336 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7337 
7338 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7339 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7340 
7341 		mac_cfg->hd_tbl_status = false;
7342 		if (is_del_list) {
7343 			list_del(&mac_cfg->node);
7344 			kfree(mac_cfg);
7345 		}
7346 	}
7347 }
7348 
7349 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7350 {
7351 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7352 	struct hclge_vport *vport;
7353 	int i;
7354 
7355 	mutex_lock(&hdev->vport_cfg_mutex);
7356 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7357 		vport = &hdev->vport[i];
7358 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7359 			list_del(&mac->node);
7360 			kfree(mac);
7361 		}
7362 
7363 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7364 			list_del(&mac->node);
7365 			kfree(mac);
7366 		}
7367 	}
7368 	mutex_unlock(&hdev->vport_cfg_mutex);
7369 }
7370 
7371 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7372 					      u16 cmdq_resp, u8 resp_code)
7373 {
7374 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7375 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7376 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7377 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7378 
7379 	int return_status;
7380 
7381 	if (cmdq_resp) {
7382 		dev_err(&hdev->pdev->dev,
7383 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7384 			cmdq_resp);
7385 		return -EIO;
7386 	}
7387 
7388 	switch (resp_code) {
7389 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7390 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7391 		return_status = 0;
7392 		break;
7393 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7394 		dev_err(&hdev->pdev->dev,
7395 			"add mac ethertype failed for manager table overflow.\n");
7396 		return_status = -EIO;
7397 		break;
7398 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7399 		dev_err(&hdev->pdev->dev,
7400 			"add mac ethertype failed for key conflict.\n");
7401 		return_status = -EIO;
7402 		break;
7403 	default:
7404 		dev_err(&hdev->pdev->dev,
7405 			"add mac ethertype failed for undefined, code=%d.\n",
7406 			resp_code);
7407 		return_status = -EIO;
7408 	}
7409 
7410 	return return_status;
7411 }
7412 
7413 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7414 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7415 {
7416 	struct hclge_desc desc;
7417 	u8 resp_code;
7418 	u16 retval;
7419 	int ret;
7420 
7421 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7422 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7423 
7424 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7425 	if (ret) {
7426 		dev_err(&hdev->pdev->dev,
7427 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7428 			ret);
7429 		return ret;
7430 	}
7431 
7432 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7433 	retval = le16_to_cpu(desc.retval);
7434 
7435 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7436 }
7437 
7438 static int init_mgr_tbl(struct hclge_dev *hdev)
7439 {
7440 	int ret;
7441 	int i;
7442 
7443 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7444 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7445 		if (ret) {
7446 			dev_err(&hdev->pdev->dev,
7447 				"add mac ethertype failed, ret =%d.\n",
7448 				ret);
7449 			return ret;
7450 		}
7451 	}
7452 
7453 	return 0;
7454 }
7455 
7456 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7457 {
7458 	struct hclge_vport *vport = hclge_get_vport(handle);
7459 	struct hclge_dev *hdev = vport->back;
7460 
7461 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7462 }
7463 
7464 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7465 			      bool is_first)
7466 {
7467 	const unsigned char *new_addr = (const unsigned char *)p;
7468 	struct hclge_vport *vport = hclge_get_vport(handle);
7469 	struct hclge_dev *hdev = vport->back;
7470 	int ret;
7471 
7472 	/* mac addr check */
7473 	if (is_zero_ether_addr(new_addr) ||
7474 	    is_broadcast_ether_addr(new_addr) ||
7475 	    is_multicast_ether_addr(new_addr)) {
7476 		dev_err(&hdev->pdev->dev,
7477 			"Change uc mac err! invalid mac:%pM.\n",
7478 			 new_addr);
7479 		return -EINVAL;
7480 	}
7481 
7482 	if ((!is_first || is_kdump_kernel()) &&
7483 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7484 		dev_warn(&hdev->pdev->dev,
7485 			 "remove old uc mac address fail.\n");
7486 
7487 	ret = hclge_add_uc_addr(handle, new_addr);
7488 	if (ret) {
7489 		dev_err(&hdev->pdev->dev,
7490 			"add uc mac address fail, ret =%d.\n",
7491 			ret);
7492 
7493 		if (!is_first &&
7494 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7495 			dev_err(&hdev->pdev->dev,
7496 				"restore uc mac address fail.\n");
7497 
7498 		return -EIO;
7499 	}
7500 
7501 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7502 	if (ret) {
7503 		dev_err(&hdev->pdev->dev,
7504 			"configure mac pause address fail, ret =%d.\n",
7505 			ret);
7506 		return -EIO;
7507 	}
7508 
7509 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7510 
7511 	return 0;
7512 }
7513 
7514 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7515 			  int cmd)
7516 {
7517 	struct hclge_vport *vport = hclge_get_vport(handle);
7518 	struct hclge_dev *hdev = vport->back;
7519 
7520 	if (!hdev->hw.mac.phydev)
7521 		return -EOPNOTSUPP;
7522 
7523 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7524 }
7525 
7526 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7527 				      u8 fe_type, bool filter_en, u8 vf_id)
7528 {
7529 	struct hclge_vlan_filter_ctrl_cmd *req;
7530 	struct hclge_desc desc;
7531 	int ret;
7532 
7533 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7534 
7535 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7536 	req->vlan_type = vlan_type;
7537 	req->vlan_fe = filter_en ? fe_type : 0;
7538 	req->vf_id = vf_id;
7539 
7540 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7541 	if (ret)
7542 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7543 			ret);
7544 
7545 	return ret;
7546 }
7547 
7548 #define HCLGE_FILTER_TYPE_VF		0
7549 #define HCLGE_FILTER_TYPE_PORT		1
7550 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7551 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7552 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7553 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7554 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7555 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7556 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7557 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7558 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7559 
7560 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7561 {
7562 	struct hclge_vport *vport = hclge_get_vport(handle);
7563 	struct hclge_dev *hdev = vport->back;
7564 
7565 	if (hdev->pdev->revision >= 0x21) {
7566 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7567 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7568 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7569 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7570 	} else {
7571 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7572 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7573 					   0);
7574 	}
7575 	if (enable)
7576 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7577 	else
7578 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7579 }
7580 
7581 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7582 				    bool is_kill, u16 vlan,
7583 				    __be16 proto)
7584 {
7585 #define HCLGE_MAX_VF_BYTES  16
7586 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7587 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7588 	struct hclge_desc desc[2];
7589 	u8 vf_byte_val;
7590 	u8 vf_byte_off;
7591 	int ret;
7592 
7593 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7594 	 * is unable and unnecessary to add new vlan id to vf vlan filter
7595 	 */
7596 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7597 		return 0;
7598 
7599 	hclge_cmd_setup_basic_desc(&desc[0],
7600 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7601 	hclge_cmd_setup_basic_desc(&desc[1],
7602 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7603 
7604 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7605 
7606 	vf_byte_off = vfid / 8;
7607 	vf_byte_val = 1 << (vfid % 8);
7608 
7609 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7610 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7611 
7612 	req0->vlan_id  = cpu_to_le16(vlan);
7613 	req0->vlan_cfg = is_kill;
7614 
7615 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7616 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7617 	else
7618 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7619 
7620 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7621 	if (ret) {
7622 		dev_err(&hdev->pdev->dev,
7623 			"Send vf vlan command fail, ret =%d.\n",
7624 			ret);
7625 		return ret;
7626 	}
7627 
7628 	if (!is_kill) {
7629 #define HCLGE_VF_VLAN_NO_ENTRY	2
7630 		if (!req0->resp_code || req0->resp_code == 1)
7631 			return 0;
7632 
7633 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7634 			set_bit(vfid, hdev->vf_vlan_full);
7635 			dev_warn(&hdev->pdev->dev,
7636 				 "vf vlan table is full, vf vlan filter is disabled\n");
7637 			return 0;
7638 		}
7639 
7640 		dev_err(&hdev->pdev->dev,
7641 			"Add vf vlan filter fail, ret =%d.\n",
7642 			req0->resp_code);
7643 	} else {
7644 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7645 		if (!req0->resp_code)
7646 			return 0;
7647 
7648 		/* vf vlan filter is disabled when vf vlan table is full,
7649 		 * then new vlan id will not be added into vf vlan table.
7650 		 * Just return 0 without warning, avoid massive verbose
7651 		 * print logs when unload.
7652 		 */
7653 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7654 			return 0;
7655 
7656 		dev_err(&hdev->pdev->dev,
7657 			"Kill vf vlan filter fail, ret =%d.\n",
7658 			req0->resp_code);
7659 	}
7660 
7661 	return -EIO;
7662 }
7663 
7664 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7665 				      u16 vlan_id, bool is_kill)
7666 {
7667 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7668 	struct hclge_desc desc;
7669 	u8 vlan_offset_byte_val;
7670 	u8 vlan_offset_byte;
7671 	u8 vlan_offset_160;
7672 	int ret;
7673 
7674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7675 
7676 	vlan_offset_160 = vlan_id / 160;
7677 	vlan_offset_byte = (vlan_id % 160) / 8;
7678 	vlan_offset_byte_val = 1 << (vlan_id % 8);
7679 
7680 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7681 	req->vlan_offset = vlan_offset_160;
7682 	req->vlan_cfg = is_kill;
7683 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7684 
7685 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7686 	if (ret)
7687 		dev_err(&hdev->pdev->dev,
7688 			"port vlan command, send fail, ret =%d.\n", ret);
7689 	return ret;
7690 }
7691 
7692 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7693 				    u16 vport_id, u16 vlan_id,
7694 				    bool is_kill)
7695 {
7696 	u16 vport_idx, vport_num = 0;
7697 	int ret;
7698 
7699 	if (is_kill && !vlan_id)
7700 		return 0;
7701 
7702 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7703 				       proto);
7704 	if (ret) {
7705 		dev_err(&hdev->pdev->dev,
7706 			"Set %d vport vlan filter config fail, ret =%d.\n",
7707 			vport_id, ret);
7708 		return ret;
7709 	}
7710 
7711 	/* vlan 0 may be added twice when 8021q module is enabled */
7712 	if (!is_kill && !vlan_id &&
7713 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7714 		return 0;
7715 
7716 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7717 		dev_err(&hdev->pdev->dev,
7718 			"Add port vlan failed, vport %d is already in vlan %d\n",
7719 			vport_id, vlan_id);
7720 		return -EINVAL;
7721 	}
7722 
7723 	if (is_kill &&
7724 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7725 		dev_err(&hdev->pdev->dev,
7726 			"Delete port vlan failed, vport %d is not in vlan %d\n",
7727 			vport_id, vlan_id);
7728 		return -EINVAL;
7729 	}
7730 
7731 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7732 		vport_num++;
7733 
7734 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7735 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7736 						 is_kill);
7737 
7738 	return ret;
7739 }
7740 
7741 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7742 {
7743 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7744 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7745 	struct hclge_dev *hdev = vport->back;
7746 	struct hclge_desc desc;
7747 	u16 bmap_index;
7748 	int status;
7749 
7750 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7751 
7752 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7753 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7754 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7755 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7756 		      vcfg->accept_tag1 ? 1 : 0);
7757 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7758 		      vcfg->accept_untag1 ? 1 : 0);
7759 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7760 		      vcfg->accept_tag2 ? 1 : 0);
7761 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7762 		      vcfg->accept_untag2 ? 1 : 0);
7763 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7764 		      vcfg->insert_tag1_en ? 1 : 0);
7765 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7766 		      vcfg->insert_tag2_en ? 1 : 0);
7767 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7768 
7769 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7770 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7771 			HCLGE_VF_NUM_PER_BYTE;
7772 	req->vf_bitmap[bmap_index] =
7773 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7774 
7775 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7776 	if (status)
7777 		dev_err(&hdev->pdev->dev,
7778 			"Send port txvlan cfg command fail, ret =%d\n",
7779 			status);
7780 
7781 	return status;
7782 }
7783 
7784 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7785 {
7786 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7787 	struct hclge_vport_vtag_rx_cfg_cmd *req;
7788 	struct hclge_dev *hdev = vport->back;
7789 	struct hclge_desc desc;
7790 	u16 bmap_index;
7791 	int status;
7792 
7793 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7794 
7795 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7796 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7797 		      vcfg->strip_tag1_en ? 1 : 0);
7798 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7799 		      vcfg->strip_tag2_en ? 1 : 0);
7800 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7801 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
7802 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7803 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
7804 
7805 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7806 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7807 			HCLGE_VF_NUM_PER_BYTE;
7808 	req->vf_bitmap[bmap_index] =
7809 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7810 
7811 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7812 	if (status)
7813 		dev_err(&hdev->pdev->dev,
7814 			"Send port rxvlan cfg command fail, ret =%d\n",
7815 			status);
7816 
7817 	return status;
7818 }
7819 
7820 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7821 				  u16 port_base_vlan_state,
7822 				  u16 vlan_tag)
7823 {
7824 	int ret;
7825 
7826 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7827 		vport->txvlan_cfg.accept_tag1 = true;
7828 		vport->txvlan_cfg.insert_tag1_en = false;
7829 		vport->txvlan_cfg.default_tag1 = 0;
7830 	} else {
7831 		vport->txvlan_cfg.accept_tag1 = false;
7832 		vport->txvlan_cfg.insert_tag1_en = true;
7833 		vport->txvlan_cfg.default_tag1 = vlan_tag;
7834 	}
7835 
7836 	vport->txvlan_cfg.accept_untag1 = true;
7837 
7838 	/* accept_tag2 and accept_untag2 are not supported on
7839 	 * pdev revision(0x20), new revision support them,
7840 	 * this two fields can not be configured by user.
7841 	 */
7842 	vport->txvlan_cfg.accept_tag2 = true;
7843 	vport->txvlan_cfg.accept_untag2 = true;
7844 	vport->txvlan_cfg.insert_tag2_en = false;
7845 	vport->txvlan_cfg.default_tag2 = 0;
7846 
7847 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7848 		vport->rxvlan_cfg.strip_tag1_en = false;
7849 		vport->rxvlan_cfg.strip_tag2_en =
7850 				vport->rxvlan_cfg.rx_vlan_offload_en;
7851 	} else {
7852 		vport->rxvlan_cfg.strip_tag1_en =
7853 				vport->rxvlan_cfg.rx_vlan_offload_en;
7854 		vport->rxvlan_cfg.strip_tag2_en = true;
7855 	}
7856 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7857 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7858 
7859 	ret = hclge_set_vlan_tx_offload_cfg(vport);
7860 	if (ret)
7861 		return ret;
7862 
7863 	return hclge_set_vlan_rx_offload_cfg(vport);
7864 }
7865 
7866 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7867 {
7868 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7869 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7870 	struct hclge_desc desc;
7871 	int status;
7872 
7873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7874 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7875 	rx_req->ot_fst_vlan_type =
7876 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7877 	rx_req->ot_sec_vlan_type =
7878 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7879 	rx_req->in_fst_vlan_type =
7880 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7881 	rx_req->in_sec_vlan_type =
7882 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7883 
7884 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7885 	if (status) {
7886 		dev_err(&hdev->pdev->dev,
7887 			"Send rxvlan protocol type command fail, ret =%d\n",
7888 			status);
7889 		return status;
7890 	}
7891 
7892 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7893 
7894 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7895 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7896 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7897 
7898 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7899 	if (status)
7900 		dev_err(&hdev->pdev->dev,
7901 			"Send txvlan protocol type command fail, ret =%d\n",
7902 			status);
7903 
7904 	return status;
7905 }
7906 
7907 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7908 {
7909 #define HCLGE_DEF_VLAN_TYPE		0x8100
7910 
7911 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7912 	struct hclge_vport *vport;
7913 	int ret;
7914 	int i;
7915 
7916 	if (hdev->pdev->revision >= 0x21) {
7917 		/* for revision 0x21, vf vlan filter is per function */
7918 		for (i = 0; i < hdev->num_alloc_vport; i++) {
7919 			vport = &hdev->vport[i];
7920 			ret = hclge_set_vlan_filter_ctrl(hdev,
7921 							 HCLGE_FILTER_TYPE_VF,
7922 							 HCLGE_FILTER_FE_EGRESS,
7923 							 true,
7924 							 vport->vport_id);
7925 			if (ret)
7926 				return ret;
7927 		}
7928 
7929 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7930 						 HCLGE_FILTER_FE_INGRESS, true,
7931 						 0);
7932 		if (ret)
7933 			return ret;
7934 	} else {
7935 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7936 						 HCLGE_FILTER_FE_EGRESS_V1_B,
7937 						 true, 0);
7938 		if (ret)
7939 			return ret;
7940 	}
7941 
7942 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
7943 
7944 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7945 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7946 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7947 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7948 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7949 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7950 
7951 	ret = hclge_set_vlan_protocol_type(hdev);
7952 	if (ret)
7953 		return ret;
7954 
7955 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7956 		u16 vlan_tag;
7957 
7958 		vport = &hdev->vport[i];
7959 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7960 
7961 		ret = hclge_vlan_offload_cfg(vport,
7962 					     vport->port_base_vlan_cfg.state,
7963 					     vlan_tag);
7964 		if (ret)
7965 			return ret;
7966 	}
7967 
7968 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7969 }
7970 
7971 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7972 				       bool writen_to_tbl)
7973 {
7974 	struct hclge_vport_vlan_cfg *vlan;
7975 
7976 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7977 	if (!vlan)
7978 		return;
7979 
7980 	vlan->hd_tbl_status = writen_to_tbl;
7981 	vlan->vlan_id = vlan_id;
7982 
7983 	list_add_tail(&vlan->node, &vport->vlan_list);
7984 }
7985 
7986 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7987 {
7988 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7989 	struct hclge_dev *hdev = vport->back;
7990 	int ret;
7991 
7992 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7993 		if (!vlan->hd_tbl_status) {
7994 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7995 						       vport->vport_id,
7996 						       vlan->vlan_id, false);
7997 			if (ret) {
7998 				dev_err(&hdev->pdev->dev,
7999 					"restore vport vlan list failed, ret=%d\n",
8000 					ret);
8001 				return ret;
8002 			}
8003 		}
8004 		vlan->hd_tbl_status = true;
8005 	}
8006 
8007 	return 0;
8008 }
8009 
8010 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8011 				      bool is_write_tbl)
8012 {
8013 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8014 	struct hclge_dev *hdev = vport->back;
8015 
8016 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8017 		if (vlan->vlan_id == vlan_id) {
8018 			if (is_write_tbl && vlan->hd_tbl_status)
8019 				hclge_set_vlan_filter_hw(hdev,
8020 							 htons(ETH_P_8021Q),
8021 							 vport->vport_id,
8022 							 vlan_id,
8023 							 true);
8024 
8025 			list_del(&vlan->node);
8026 			kfree(vlan);
8027 			break;
8028 		}
8029 	}
8030 }
8031 
8032 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8033 {
8034 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8035 	struct hclge_dev *hdev = vport->back;
8036 
8037 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8038 		if (vlan->hd_tbl_status)
8039 			hclge_set_vlan_filter_hw(hdev,
8040 						 htons(ETH_P_8021Q),
8041 						 vport->vport_id,
8042 						 vlan->vlan_id,
8043 						 true);
8044 
8045 		vlan->hd_tbl_status = false;
8046 		if (is_del_list) {
8047 			list_del(&vlan->node);
8048 			kfree(vlan);
8049 		}
8050 	}
8051 }
8052 
8053 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8054 {
8055 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8056 	struct hclge_vport *vport;
8057 	int i;
8058 
8059 	mutex_lock(&hdev->vport_cfg_mutex);
8060 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8061 		vport = &hdev->vport[i];
8062 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8063 			list_del(&vlan->node);
8064 			kfree(vlan);
8065 		}
8066 	}
8067 	mutex_unlock(&hdev->vport_cfg_mutex);
8068 }
8069 
8070 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8071 {
8072 	struct hclge_vport *vport = hclge_get_vport(handle);
8073 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8074 	struct hclge_dev *hdev = vport->back;
8075 	u16 vlan_proto;
8076 	u16 state, vlan_id;
8077 	int i;
8078 
8079 	mutex_lock(&hdev->vport_cfg_mutex);
8080 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8081 		vport = &hdev->vport[i];
8082 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8083 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8084 		state = vport->port_base_vlan_cfg.state;
8085 
8086 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8087 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8088 						 vport->vport_id, vlan_id,
8089 						 false);
8090 			continue;
8091 		}
8092 
8093 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8094 			if (vlan->hd_tbl_status)
8095 				hclge_set_vlan_filter_hw(hdev,
8096 							 htons(ETH_P_8021Q),
8097 							 vport->vport_id,
8098 							 vlan->vlan_id,
8099 							 false);
8100 		}
8101 	}
8102 
8103 	mutex_unlock(&hdev->vport_cfg_mutex);
8104 }
8105 
8106 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8107 {
8108 	struct hclge_vport *vport = hclge_get_vport(handle);
8109 
8110 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8111 		vport->rxvlan_cfg.strip_tag1_en = false;
8112 		vport->rxvlan_cfg.strip_tag2_en = enable;
8113 	} else {
8114 		vport->rxvlan_cfg.strip_tag1_en = enable;
8115 		vport->rxvlan_cfg.strip_tag2_en = true;
8116 	}
8117 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8118 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8119 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8120 
8121 	return hclge_set_vlan_rx_offload_cfg(vport);
8122 }
8123 
8124 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8125 					    u16 port_base_vlan_state,
8126 					    struct hclge_vlan_info *new_info,
8127 					    struct hclge_vlan_info *old_info)
8128 {
8129 	struct hclge_dev *hdev = vport->back;
8130 	int ret;
8131 
8132 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8133 		hclge_rm_vport_all_vlan_table(vport, false);
8134 		return hclge_set_vlan_filter_hw(hdev,
8135 						 htons(new_info->vlan_proto),
8136 						 vport->vport_id,
8137 						 new_info->vlan_tag,
8138 						 false);
8139 	}
8140 
8141 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8142 				       vport->vport_id, old_info->vlan_tag,
8143 				       true);
8144 	if (ret)
8145 		return ret;
8146 
8147 	return hclge_add_vport_all_vlan_table(vport);
8148 }
8149 
8150 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8151 				    struct hclge_vlan_info *vlan_info)
8152 {
8153 	struct hnae3_handle *nic = &vport->nic;
8154 	struct hclge_vlan_info *old_vlan_info;
8155 	struct hclge_dev *hdev = vport->back;
8156 	int ret;
8157 
8158 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8159 
8160 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8161 	if (ret)
8162 		return ret;
8163 
8164 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8165 		/* add new VLAN tag */
8166 		ret = hclge_set_vlan_filter_hw(hdev,
8167 					       htons(vlan_info->vlan_proto),
8168 					       vport->vport_id,
8169 					       vlan_info->vlan_tag,
8170 					       false);
8171 		if (ret)
8172 			return ret;
8173 
8174 		/* remove old VLAN tag */
8175 		ret = hclge_set_vlan_filter_hw(hdev,
8176 					       htons(old_vlan_info->vlan_proto),
8177 					       vport->vport_id,
8178 					       old_vlan_info->vlan_tag,
8179 					       true);
8180 		if (ret)
8181 			return ret;
8182 
8183 		goto update;
8184 	}
8185 
8186 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8187 					       old_vlan_info);
8188 	if (ret)
8189 		return ret;
8190 
8191 	/* update state only when disable/enable port based VLAN */
8192 	vport->port_base_vlan_cfg.state = state;
8193 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8194 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8195 	else
8196 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8197 
8198 update:
8199 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8200 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8201 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8202 
8203 	return 0;
8204 }
8205 
8206 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8207 					  enum hnae3_port_base_vlan_state state,
8208 					  u16 vlan)
8209 {
8210 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8211 		if (!vlan)
8212 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8213 		else
8214 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8215 	} else {
8216 		if (!vlan)
8217 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8218 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8219 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8220 		else
8221 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8222 	}
8223 }
8224 
8225 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8226 				    u16 vlan, u8 qos, __be16 proto)
8227 {
8228 	struct hclge_vport *vport = hclge_get_vport(handle);
8229 	struct hclge_dev *hdev = vport->back;
8230 	struct hclge_vlan_info vlan_info;
8231 	u16 state;
8232 	int ret;
8233 
8234 	if (hdev->pdev->revision == 0x20)
8235 		return -EOPNOTSUPP;
8236 
8237 	/* qos is a 3 bits value, so can not be bigger than 7 */
8238 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8239 		return -EINVAL;
8240 	if (proto != htons(ETH_P_8021Q))
8241 		return -EPROTONOSUPPORT;
8242 
8243 	vport = &hdev->vport[vfid];
8244 	state = hclge_get_port_base_vlan_state(vport,
8245 					       vport->port_base_vlan_cfg.state,
8246 					       vlan);
8247 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8248 		return 0;
8249 
8250 	vlan_info.vlan_tag = vlan;
8251 	vlan_info.qos = qos;
8252 	vlan_info.vlan_proto = ntohs(proto);
8253 
8254 	/* update port based VLAN for PF */
8255 	if (!vfid) {
8256 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8257 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8258 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8259 
8260 		return ret;
8261 	}
8262 
8263 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8264 		return hclge_update_port_base_vlan_cfg(vport, state,
8265 						       &vlan_info);
8266 	} else {
8267 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8268 							(u8)vfid, state,
8269 							vlan, qos,
8270 							ntohs(proto));
8271 		return ret;
8272 	}
8273 }
8274 
8275 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8276 			  u16 vlan_id, bool is_kill)
8277 {
8278 	struct hclge_vport *vport = hclge_get_vport(handle);
8279 	struct hclge_dev *hdev = vport->back;
8280 	bool writen_to_tbl = false;
8281 	int ret = 0;
8282 
8283 	/* When device is resetting, firmware is unable to handle
8284 	 * mailbox. Just record the vlan id, and remove it after
8285 	 * reset finished.
8286 	 */
8287 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8288 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8289 		return -EBUSY;
8290 	}
8291 
8292 	/* when port base vlan enabled, we use port base vlan as the vlan
8293 	 * filter entry. In this case, we don't update vlan filter table
8294 	 * when user add new vlan or remove exist vlan, just update the vport
8295 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8296 	 * table until port base vlan disabled
8297 	 */
8298 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8299 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8300 					       vlan_id, is_kill);
8301 		writen_to_tbl = true;
8302 	}
8303 
8304 	if (!ret) {
8305 		if (is_kill)
8306 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8307 		else
8308 			hclge_add_vport_vlan_table(vport, vlan_id,
8309 						   writen_to_tbl);
8310 	} else if (is_kill) {
8311 		/* when remove hw vlan filter failed, record the vlan id,
8312 		 * and try to remove it from hw later, to be consistence
8313 		 * with stack
8314 		 */
8315 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8316 	}
8317 	return ret;
8318 }
8319 
8320 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8321 {
8322 #define HCLGE_MAX_SYNC_COUNT	60
8323 
8324 	int i, ret, sync_cnt = 0;
8325 	u16 vlan_id;
8326 
8327 	/* start from vport 1 for PF is always alive */
8328 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8329 		struct hclge_vport *vport = &hdev->vport[i];
8330 
8331 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8332 					 VLAN_N_VID);
8333 		while (vlan_id != VLAN_N_VID) {
8334 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8335 						       vport->vport_id, vlan_id,
8336 						       true);
8337 			if (ret && ret != -EINVAL)
8338 				return;
8339 
8340 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8341 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8342 
8343 			sync_cnt++;
8344 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8345 				return;
8346 
8347 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8348 						 VLAN_N_VID);
8349 		}
8350 	}
8351 }
8352 
8353 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8354 {
8355 	struct hclge_config_max_frm_size_cmd *req;
8356 	struct hclge_desc desc;
8357 
8358 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8359 
8360 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8361 	req->max_frm_size = cpu_to_le16(new_mps);
8362 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8363 
8364 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8365 }
8366 
8367 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8368 {
8369 	struct hclge_vport *vport = hclge_get_vport(handle);
8370 
8371 	return hclge_set_vport_mtu(vport, new_mtu);
8372 }
8373 
8374 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8375 {
8376 	struct hclge_dev *hdev = vport->back;
8377 	int i, max_frm_size, ret;
8378 
8379 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8380 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8381 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8382 		return -EINVAL;
8383 
8384 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8385 	mutex_lock(&hdev->vport_lock);
8386 	/* VF's mps must fit within hdev->mps */
8387 	if (vport->vport_id && max_frm_size > hdev->mps) {
8388 		mutex_unlock(&hdev->vport_lock);
8389 		return -EINVAL;
8390 	} else if (vport->vport_id) {
8391 		vport->mps = max_frm_size;
8392 		mutex_unlock(&hdev->vport_lock);
8393 		return 0;
8394 	}
8395 
8396 	/* PF's mps must be greater then VF's mps */
8397 	for (i = 1; i < hdev->num_alloc_vport; i++)
8398 		if (max_frm_size < hdev->vport[i].mps) {
8399 			mutex_unlock(&hdev->vport_lock);
8400 			return -EINVAL;
8401 		}
8402 
8403 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8404 
8405 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8406 	if (ret) {
8407 		dev_err(&hdev->pdev->dev,
8408 			"Change mtu fail, ret =%d\n", ret);
8409 		goto out;
8410 	}
8411 
8412 	hdev->mps = max_frm_size;
8413 	vport->mps = max_frm_size;
8414 
8415 	ret = hclge_buffer_alloc(hdev);
8416 	if (ret)
8417 		dev_err(&hdev->pdev->dev,
8418 			"Allocate buffer fail, ret =%d\n", ret);
8419 
8420 out:
8421 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8422 	mutex_unlock(&hdev->vport_lock);
8423 	return ret;
8424 }
8425 
8426 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8427 				    bool enable)
8428 {
8429 	struct hclge_reset_tqp_queue_cmd *req;
8430 	struct hclge_desc desc;
8431 	int ret;
8432 
8433 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8434 
8435 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8436 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8437 	if (enable)
8438 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8439 
8440 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8441 	if (ret) {
8442 		dev_err(&hdev->pdev->dev,
8443 			"Send tqp reset cmd error, status =%d\n", ret);
8444 		return ret;
8445 	}
8446 
8447 	return 0;
8448 }
8449 
8450 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8451 {
8452 	struct hclge_reset_tqp_queue_cmd *req;
8453 	struct hclge_desc desc;
8454 	int ret;
8455 
8456 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8457 
8458 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8459 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8460 
8461 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8462 	if (ret) {
8463 		dev_err(&hdev->pdev->dev,
8464 			"Get reset status error, status =%d\n", ret);
8465 		return ret;
8466 	}
8467 
8468 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8469 }
8470 
8471 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8472 {
8473 	struct hnae3_queue *queue;
8474 	struct hclge_tqp *tqp;
8475 
8476 	queue = handle->kinfo.tqp[queue_id];
8477 	tqp = container_of(queue, struct hclge_tqp, q);
8478 
8479 	return tqp->index;
8480 }
8481 
8482 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8483 {
8484 	struct hclge_vport *vport = hclge_get_vport(handle);
8485 	struct hclge_dev *hdev = vport->back;
8486 	int reset_try_times = 0;
8487 	int reset_status;
8488 	u16 queue_gid;
8489 	int ret;
8490 
8491 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8492 
8493 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8494 	if (ret) {
8495 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8496 		return ret;
8497 	}
8498 
8499 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8500 	if (ret) {
8501 		dev_err(&hdev->pdev->dev,
8502 			"Send reset tqp cmd fail, ret = %d\n", ret);
8503 		return ret;
8504 	}
8505 
8506 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8507 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8508 		if (reset_status)
8509 			break;
8510 
8511 		/* Wait for tqp hw reset */
8512 		usleep_range(1000, 1200);
8513 	}
8514 
8515 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8516 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8517 		return ret;
8518 	}
8519 
8520 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8521 	if (ret)
8522 		dev_err(&hdev->pdev->dev,
8523 			"Deassert the soft reset fail, ret = %d\n", ret);
8524 
8525 	return ret;
8526 }
8527 
8528 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8529 {
8530 	struct hclge_dev *hdev = vport->back;
8531 	int reset_try_times = 0;
8532 	int reset_status;
8533 	u16 queue_gid;
8534 	int ret;
8535 
8536 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8537 
8538 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8539 	if (ret) {
8540 		dev_warn(&hdev->pdev->dev,
8541 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8542 		return;
8543 	}
8544 
8545 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8546 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8547 		if (reset_status)
8548 			break;
8549 
8550 		/* Wait for tqp hw reset */
8551 		usleep_range(1000, 1200);
8552 	}
8553 
8554 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8555 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8556 		return;
8557 	}
8558 
8559 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8560 	if (ret)
8561 		dev_warn(&hdev->pdev->dev,
8562 			 "Deassert the soft reset fail, ret = %d\n", ret);
8563 }
8564 
8565 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8566 {
8567 	struct hclge_vport *vport = hclge_get_vport(handle);
8568 	struct hclge_dev *hdev = vport->back;
8569 
8570 	return hdev->fw_version;
8571 }
8572 
8573 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8574 {
8575 	struct phy_device *phydev = hdev->hw.mac.phydev;
8576 
8577 	if (!phydev)
8578 		return;
8579 
8580 	phy_set_asym_pause(phydev, rx_en, tx_en);
8581 }
8582 
8583 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8584 {
8585 	int ret;
8586 
8587 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8588 		return 0;
8589 
8590 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8591 	if (ret)
8592 		dev_err(&hdev->pdev->dev,
8593 			"configure pauseparam error, ret = %d.\n", ret);
8594 
8595 	return ret;
8596 }
8597 
8598 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8599 {
8600 	struct phy_device *phydev = hdev->hw.mac.phydev;
8601 	u16 remote_advertising = 0;
8602 	u16 local_advertising;
8603 	u32 rx_pause, tx_pause;
8604 	u8 flowctl;
8605 
8606 	if (!phydev->link || !phydev->autoneg)
8607 		return 0;
8608 
8609 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8610 
8611 	if (phydev->pause)
8612 		remote_advertising = LPA_PAUSE_CAP;
8613 
8614 	if (phydev->asym_pause)
8615 		remote_advertising |= LPA_PAUSE_ASYM;
8616 
8617 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8618 					   remote_advertising);
8619 	tx_pause = flowctl & FLOW_CTRL_TX;
8620 	rx_pause = flowctl & FLOW_CTRL_RX;
8621 
8622 	if (phydev->duplex == HCLGE_MAC_HALF) {
8623 		tx_pause = 0;
8624 		rx_pause = 0;
8625 	}
8626 
8627 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8628 }
8629 
8630 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8631 				 u32 *rx_en, u32 *tx_en)
8632 {
8633 	struct hclge_vport *vport = hclge_get_vport(handle);
8634 	struct hclge_dev *hdev = vport->back;
8635 	struct phy_device *phydev = hdev->hw.mac.phydev;
8636 
8637 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8638 
8639 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8640 		*rx_en = 0;
8641 		*tx_en = 0;
8642 		return;
8643 	}
8644 
8645 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8646 		*rx_en = 1;
8647 		*tx_en = 0;
8648 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8649 		*tx_en = 1;
8650 		*rx_en = 0;
8651 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8652 		*rx_en = 1;
8653 		*tx_en = 1;
8654 	} else {
8655 		*rx_en = 0;
8656 		*tx_en = 0;
8657 	}
8658 }
8659 
8660 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8661 					 u32 rx_en, u32 tx_en)
8662 {
8663 	if (rx_en && tx_en)
8664 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8665 	else if (rx_en && !tx_en)
8666 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8667 	else if (!rx_en && tx_en)
8668 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8669 	else
8670 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8671 
8672 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8673 }
8674 
8675 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8676 				u32 rx_en, u32 tx_en)
8677 {
8678 	struct hclge_vport *vport = hclge_get_vport(handle);
8679 	struct hclge_dev *hdev = vport->back;
8680 	struct phy_device *phydev = hdev->hw.mac.phydev;
8681 	u32 fc_autoneg;
8682 
8683 	if (phydev) {
8684 		fc_autoneg = hclge_get_autoneg(handle);
8685 		if (auto_neg != fc_autoneg) {
8686 			dev_info(&hdev->pdev->dev,
8687 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8688 			return -EOPNOTSUPP;
8689 		}
8690 	}
8691 
8692 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8693 		dev_info(&hdev->pdev->dev,
8694 			 "Priority flow control enabled. Cannot set link flow control.\n");
8695 		return -EOPNOTSUPP;
8696 	}
8697 
8698 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8699 
8700 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8701 
8702 	if (!auto_neg)
8703 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8704 
8705 	if (phydev)
8706 		return phy_start_aneg(phydev);
8707 
8708 	return -EOPNOTSUPP;
8709 }
8710 
8711 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8712 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8713 {
8714 	struct hclge_vport *vport = hclge_get_vport(handle);
8715 	struct hclge_dev *hdev = vport->back;
8716 
8717 	if (speed)
8718 		*speed = hdev->hw.mac.speed;
8719 	if (duplex)
8720 		*duplex = hdev->hw.mac.duplex;
8721 	if (auto_neg)
8722 		*auto_neg = hdev->hw.mac.autoneg;
8723 }
8724 
8725 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8726 				 u8 *module_type)
8727 {
8728 	struct hclge_vport *vport = hclge_get_vport(handle);
8729 	struct hclge_dev *hdev = vport->back;
8730 
8731 	if (media_type)
8732 		*media_type = hdev->hw.mac.media_type;
8733 
8734 	if (module_type)
8735 		*module_type = hdev->hw.mac.module_type;
8736 }
8737 
8738 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8739 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8740 {
8741 	struct hclge_vport *vport = hclge_get_vport(handle);
8742 	struct hclge_dev *hdev = vport->back;
8743 	struct phy_device *phydev = hdev->hw.mac.phydev;
8744 	int mdix_ctrl, mdix, is_resolved;
8745 	unsigned int retval;
8746 
8747 	if (!phydev) {
8748 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8749 		*tp_mdix = ETH_TP_MDI_INVALID;
8750 		return;
8751 	}
8752 
8753 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8754 
8755 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8756 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8757 				    HCLGE_PHY_MDIX_CTRL_S);
8758 
8759 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8760 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8761 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8762 
8763 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8764 
8765 	switch (mdix_ctrl) {
8766 	case 0x0:
8767 		*tp_mdix_ctrl = ETH_TP_MDI;
8768 		break;
8769 	case 0x1:
8770 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8771 		break;
8772 	case 0x3:
8773 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8774 		break;
8775 	default:
8776 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8777 		break;
8778 	}
8779 
8780 	if (!is_resolved)
8781 		*tp_mdix = ETH_TP_MDI_INVALID;
8782 	else if (mdix)
8783 		*tp_mdix = ETH_TP_MDI_X;
8784 	else
8785 		*tp_mdix = ETH_TP_MDI;
8786 }
8787 
8788 static void hclge_info_show(struct hclge_dev *hdev)
8789 {
8790 	struct device *dev = &hdev->pdev->dev;
8791 
8792 	dev_info(dev, "PF info begin:\n");
8793 
8794 	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8795 	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8796 	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8797 	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8798 	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8799 	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8800 	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8801 	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8802 	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8803 	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8804 	dev_info(dev, "This is %s PF\n",
8805 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8806 	dev_info(dev, "DCB %s\n",
8807 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8808 	dev_info(dev, "MQPRIO %s\n",
8809 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8810 
8811 	dev_info(dev, "PF info end.\n");
8812 }
8813 
8814 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8815 					  struct hclge_vport *vport)
8816 {
8817 	struct hnae3_client *client = vport->nic.client;
8818 	struct hclge_dev *hdev = ae_dev->priv;
8819 	int rst_cnt;
8820 	int ret;
8821 
8822 	rst_cnt = hdev->rst_stats.reset_cnt;
8823 	ret = client->ops->init_instance(&vport->nic);
8824 	if (ret)
8825 		return ret;
8826 
8827 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8828 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8829 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8830 		ret = -EBUSY;
8831 		goto init_nic_err;
8832 	}
8833 
8834 	/* Enable nic hw error interrupts */
8835 	ret = hclge_config_nic_hw_error(hdev, true);
8836 	if (ret) {
8837 		dev_err(&ae_dev->pdev->dev,
8838 			"fail(%d) to enable hw error interrupts\n", ret);
8839 		goto init_nic_err;
8840 	}
8841 
8842 	hnae3_set_client_init_flag(client, ae_dev, 1);
8843 
8844 	if (netif_msg_drv(&hdev->vport->nic))
8845 		hclge_info_show(hdev);
8846 
8847 	return ret;
8848 
8849 init_nic_err:
8850 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8851 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8852 		msleep(HCLGE_WAIT_RESET_DONE);
8853 
8854 	client->ops->uninit_instance(&vport->nic, 0);
8855 
8856 	return ret;
8857 }
8858 
8859 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8860 					   struct hclge_vport *vport)
8861 {
8862 	struct hnae3_client *client = vport->roce.client;
8863 	struct hclge_dev *hdev = ae_dev->priv;
8864 	int rst_cnt;
8865 	int ret;
8866 
8867 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8868 	    !hdev->nic_client)
8869 		return 0;
8870 
8871 	client = hdev->roce_client;
8872 	ret = hclge_init_roce_base_info(vport);
8873 	if (ret)
8874 		return ret;
8875 
8876 	rst_cnt = hdev->rst_stats.reset_cnt;
8877 	ret = client->ops->init_instance(&vport->roce);
8878 	if (ret)
8879 		return ret;
8880 
8881 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8882 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8883 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8884 		ret = -EBUSY;
8885 		goto init_roce_err;
8886 	}
8887 
8888 	/* Enable roce ras interrupts */
8889 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
8890 	if (ret) {
8891 		dev_err(&ae_dev->pdev->dev,
8892 			"fail(%d) to enable roce ras interrupts\n", ret);
8893 		goto init_roce_err;
8894 	}
8895 
8896 	hnae3_set_client_init_flag(client, ae_dev, 1);
8897 
8898 	return 0;
8899 
8900 init_roce_err:
8901 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8902 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8903 		msleep(HCLGE_WAIT_RESET_DONE);
8904 
8905 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8906 
8907 	return ret;
8908 }
8909 
8910 static int hclge_init_client_instance(struct hnae3_client *client,
8911 				      struct hnae3_ae_dev *ae_dev)
8912 {
8913 	struct hclge_dev *hdev = ae_dev->priv;
8914 	struct hclge_vport *vport;
8915 	int i, ret;
8916 
8917 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8918 		vport = &hdev->vport[i];
8919 
8920 		switch (client->type) {
8921 		case HNAE3_CLIENT_KNIC:
8922 
8923 			hdev->nic_client = client;
8924 			vport->nic.client = client;
8925 			ret = hclge_init_nic_client_instance(ae_dev, vport);
8926 			if (ret)
8927 				goto clear_nic;
8928 
8929 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8930 			if (ret)
8931 				goto clear_roce;
8932 
8933 			break;
8934 		case HNAE3_CLIENT_ROCE:
8935 			if (hnae3_dev_roce_supported(hdev)) {
8936 				hdev->roce_client = client;
8937 				vport->roce.client = client;
8938 			}
8939 
8940 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8941 			if (ret)
8942 				goto clear_roce;
8943 
8944 			break;
8945 		default:
8946 			return -EINVAL;
8947 		}
8948 	}
8949 
8950 	return 0;
8951 
8952 clear_nic:
8953 	hdev->nic_client = NULL;
8954 	vport->nic.client = NULL;
8955 	return ret;
8956 clear_roce:
8957 	hdev->roce_client = NULL;
8958 	vport->roce.client = NULL;
8959 	return ret;
8960 }
8961 
8962 static void hclge_uninit_client_instance(struct hnae3_client *client,
8963 					 struct hnae3_ae_dev *ae_dev)
8964 {
8965 	struct hclge_dev *hdev = ae_dev->priv;
8966 	struct hclge_vport *vport;
8967 	int i;
8968 
8969 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8970 		vport = &hdev->vport[i];
8971 		if (hdev->roce_client) {
8972 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8973 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8974 				msleep(HCLGE_WAIT_RESET_DONE);
8975 
8976 			hdev->roce_client->ops->uninit_instance(&vport->roce,
8977 								0);
8978 			hdev->roce_client = NULL;
8979 			vport->roce.client = NULL;
8980 		}
8981 		if (client->type == HNAE3_CLIENT_ROCE)
8982 			return;
8983 		if (hdev->nic_client && client->ops->uninit_instance) {
8984 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8985 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8986 				msleep(HCLGE_WAIT_RESET_DONE);
8987 
8988 			client->ops->uninit_instance(&vport->nic, 0);
8989 			hdev->nic_client = NULL;
8990 			vport->nic.client = NULL;
8991 		}
8992 	}
8993 }
8994 
8995 static int hclge_pci_init(struct hclge_dev *hdev)
8996 {
8997 	struct pci_dev *pdev = hdev->pdev;
8998 	struct hclge_hw *hw;
8999 	int ret;
9000 
9001 	ret = pci_enable_device(pdev);
9002 	if (ret) {
9003 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9004 		return ret;
9005 	}
9006 
9007 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9008 	if (ret) {
9009 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9010 		if (ret) {
9011 			dev_err(&pdev->dev,
9012 				"can't set consistent PCI DMA");
9013 			goto err_disable_device;
9014 		}
9015 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9016 	}
9017 
9018 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9019 	if (ret) {
9020 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9021 		goto err_disable_device;
9022 	}
9023 
9024 	pci_set_master(pdev);
9025 	hw = &hdev->hw;
9026 	hw->io_base = pcim_iomap(pdev, 2, 0);
9027 	if (!hw->io_base) {
9028 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9029 		ret = -ENOMEM;
9030 		goto err_clr_master;
9031 	}
9032 
9033 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9034 
9035 	return 0;
9036 err_clr_master:
9037 	pci_clear_master(pdev);
9038 	pci_release_regions(pdev);
9039 err_disable_device:
9040 	pci_disable_device(pdev);
9041 
9042 	return ret;
9043 }
9044 
9045 static void hclge_pci_uninit(struct hclge_dev *hdev)
9046 {
9047 	struct pci_dev *pdev = hdev->pdev;
9048 
9049 	pcim_iounmap(pdev, hdev->hw.io_base);
9050 	pci_free_irq_vectors(pdev);
9051 	pci_clear_master(pdev);
9052 	pci_release_mem_regions(pdev);
9053 	pci_disable_device(pdev);
9054 }
9055 
9056 static void hclge_state_init(struct hclge_dev *hdev)
9057 {
9058 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9059 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9060 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9061 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9062 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9063 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9064 }
9065 
9066 static void hclge_state_uninit(struct hclge_dev *hdev)
9067 {
9068 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9069 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9070 
9071 	if (hdev->reset_timer.function)
9072 		del_timer_sync(&hdev->reset_timer);
9073 	if (hdev->service_task.work.func)
9074 		cancel_delayed_work_sync(&hdev->service_task);
9075 	if (hdev->rst_service_task.func)
9076 		cancel_work_sync(&hdev->rst_service_task);
9077 	if (hdev->mbx_service_task.func)
9078 		cancel_work_sync(&hdev->mbx_service_task);
9079 }
9080 
9081 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9082 {
9083 #define HCLGE_FLR_WAIT_MS	100
9084 #define HCLGE_FLR_WAIT_CNT	50
9085 	struct hclge_dev *hdev = ae_dev->priv;
9086 	int cnt = 0;
9087 
9088 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9089 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9090 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9091 	hclge_reset_event(hdev->pdev, NULL);
9092 
9093 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9094 	       cnt++ < HCLGE_FLR_WAIT_CNT)
9095 		msleep(HCLGE_FLR_WAIT_MS);
9096 
9097 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9098 		dev_err(&hdev->pdev->dev,
9099 			"flr wait down timeout: %d\n", cnt);
9100 }
9101 
9102 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9103 {
9104 	struct hclge_dev *hdev = ae_dev->priv;
9105 
9106 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9107 }
9108 
9109 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9110 {
9111 	u16 i;
9112 
9113 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9114 		struct hclge_vport *vport = &hdev->vport[i];
9115 		int ret;
9116 
9117 		 /* Send cmd to clear VF's FUNC_RST_ING */
9118 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9119 		if (ret)
9120 			dev_warn(&hdev->pdev->dev,
9121 				 "clear vf(%d) rst failed %d!\n",
9122 				 vport->vport_id, ret);
9123 	}
9124 }
9125 
9126 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9127 {
9128 	struct pci_dev *pdev = ae_dev->pdev;
9129 	struct hclge_dev *hdev;
9130 	int ret;
9131 
9132 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9133 	if (!hdev) {
9134 		ret = -ENOMEM;
9135 		goto out;
9136 	}
9137 
9138 	hdev->pdev = pdev;
9139 	hdev->ae_dev = ae_dev;
9140 	hdev->reset_type = HNAE3_NONE_RESET;
9141 	hdev->reset_level = HNAE3_FUNC_RESET;
9142 	ae_dev->priv = hdev;
9143 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9144 
9145 	mutex_init(&hdev->vport_lock);
9146 	mutex_init(&hdev->vport_cfg_mutex);
9147 	spin_lock_init(&hdev->fd_rule_lock);
9148 
9149 	ret = hclge_pci_init(hdev);
9150 	if (ret) {
9151 		dev_err(&pdev->dev, "PCI init failed\n");
9152 		goto out;
9153 	}
9154 
9155 	/* Firmware command queue initialize */
9156 	ret = hclge_cmd_queue_init(hdev);
9157 	if (ret) {
9158 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9159 		goto err_pci_uninit;
9160 	}
9161 
9162 	/* Firmware command initialize */
9163 	ret = hclge_cmd_init(hdev);
9164 	if (ret)
9165 		goto err_cmd_uninit;
9166 
9167 	ret = hclge_get_cap(hdev);
9168 	if (ret) {
9169 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9170 			ret);
9171 		goto err_cmd_uninit;
9172 	}
9173 
9174 	ret = hclge_configure(hdev);
9175 	if (ret) {
9176 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9177 		goto err_cmd_uninit;
9178 	}
9179 
9180 	ret = hclge_init_msi(hdev);
9181 	if (ret) {
9182 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9183 		goto err_cmd_uninit;
9184 	}
9185 
9186 	ret = hclge_misc_irq_init(hdev);
9187 	if (ret) {
9188 		dev_err(&pdev->dev,
9189 			"Misc IRQ(vector0) init error, ret = %d.\n",
9190 			ret);
9191 		goto err_msi_uninit;
9192 	}
9193 
9194 	ret = hclge_alloc_tqps(hdev);
9195 	if (ret) {
9196 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9197 		goto err_msi_irq_uninit;
9198 	}
9199 
9200 	ret = hclge_alloc_vport(hdev);
9201 	if (ret) {
9202 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9203 		goto err_msi_irq_uninit;
9204 	}
9205 
9206 	ret = hclge_map_tqp(hdev);
9207 	if (ret) {
9208 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9209 		goto err_msi_irq_uninit;
9210 	}
9211 
9212 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9213 		ret = hclge_mac_mdio_config(hdev);
9214 		if (ret) {
9215 			dev_err(&hdev->pdev->dev,
9216 				"mdio config fail ret=%d\n", ret);
9217 			goto err_msi_irq_uninit;
9218 		}
9219 	}
9220 
9221 	ret = hclge_init_umv_space(hdev);
9222 	if (ret) {
9223 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9224 		goto err_mdiobus_unreg;
9225 	}
9226 
9227 	ret = hclge_mac_init(hdev);
9228 	if (ret) {
9229 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9230 		goto err_mdiobus_unreg;
9231 	}
9232 
9233 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9234 	if (ret) {
9235 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9236 		goto err_mdiobus_unreg;
9237 	}
9238 
9239 	ret = hclge_config_gro(hdev, true);
9240 	if (ret)
9241 		goto err_mdiobus_unreg;
9242 
9243 	ret = hclge_init_vlan_config(hdev);
9244 	if (ret) {
9245 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9246 		goto err_mdiobus_unreg;
9247 	}
9248 
9249 	ret = hclge_tm_schd_init(hdev);
9250 	if (ret) {
9251 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9252 		goto err_mdiobus_unreg;
9253 	}
9254 
9255 	hclge_rss_init_cfg(hdev);
9256 	ret = hclge_rss_init_hw(hdev);
9257 	if (ret) {
9258 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9259 		goto err_mdiobus_unreg;
9260 	}
9261 
9262 	ret = init_mgr_tbl(hdev);
9263 	if (ret) {
9264 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9265 		goto err_mdiobus_unreg;
9266 	}
9267 
9268 	ret = hclge_init_fd_config(hdev);
9269 	if (ret) {
9270 		dev_err(&pdev->dev,
9271 			"fd table init fail, ret=%d\n", ret);
9272 		goto err_mdiobus_unreg;
9273 	}
9274 
9275 	INIT_KFIFO(hdev->mac_tnl_log);
9276 
9277 	hclge_dcb_ops_set(hdev);
9278 
9279 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9280 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9281 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9282 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9283 
9284 	/* Setup affinity after service timer setup because add_timer_on
9285 	 * is called in affinity notify.
9286 	 */
9287 	hclge_misc_affinity_setup(hdev);
9288 
9289 	hclge_clear_all_event_cause(hdev);
9290 	hclge_clear_resetting_state(hdev);
9291 
9292 	/* Log and clear the hw errors those already occurred */
9293 	hclge_handle_all_hns_hw_errors(ae_dev);
9294 
9295 	/* request delayed reset for the error recovery because an immediate
9296 	 * global reset on a PF affecting pending initialization of other PFs
9297 	 */
9298 	if (ae_dev->hw_err_reset_req) {
9299 		enum hnae3_reset_type reset_level;
9300 
9301 		reset_level = hclge_get_reset_level(ae_dev,
9302 						    &ae_dev->hw_err_reset_req);
9303 		hclge_set_def_reset_request(ae_dev, reset_level);
9304 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9305 	}
9306 
9307 	/* Enable MISC vector(vector0) */
9308 	hclge_enable_vector(&hdev->misc_vector, true);
9309 
9310 	hclge_state_init(hdev);
9311 	hdev->last_reset_time = jiffies;
9312 
9313 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9314 		 HCLGE_DRIVER_NAME);
9315 
9316 	return 0;
9317 
9318 err_mdiobus_unreg:
9319 	if (hdev->hw.mac.phydev)
9320 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9321 err_msi_irq_uninit:
9322 	hclge_misc_irq_uninit(hdev);
9323 err_msi_uninit:
9324 	pci_free_irq_vectors(pdev);
9325 err_cmd_uninit:
9326 	hclge_cmd_uninit(hdev);
9327 err_pci_uninit:
9328 	pcim_iounmap(pdev, hdev->hw.io_base);
9329 	pci_clear_master(pdev);
9330 	pci_release_regions(pdev);
9331 	pci_disable_device(pdev);
9332 out:
9333 	return ret;
9334 }
9335 
9336 static void hclge_stats_clear(struct hclge_dev *hdev)
9337 {
9338 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9339 }
9340 
9341 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9342 {
9343 	struct hclge_vport *vport = hdev->vport;
9344 	int i;
9345 
9346 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9347 		hclge_vport_stop(vport);
9348 		vport++;
9349 	}
9350 }
9351 
9352 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9353 {
9354 	struct hclge_dev *hdev = ae_dev->priv;
9355 	struct pci_dev *pdev = ae_dev->pdev;
9356 	int ret;
9357 
9358 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9359 
9360 	hclge_stats_clear(hdev);
9361 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9362 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9363 
9364 	ret = hclge_cmd_init(hdev);
9365 	if (ret) {
9366 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9367 		return ret;
9368 	}
9369 
9370 	ret = hclge_map_tqp(hdev);
9371 	if (ret) {
9372 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9373 		return ret;
9374 	}
9375 
9376 	hclge_reset_umv_space(hdev);
9377 
9378 	ret = hclge_mac_init(hdev);
9379 	if (ret) {
9380 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9381 		return ret;
9382 	}
9383 
9384 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9385 	if (ret) {
9386 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9387 		return ret;
9388 	}
9389 
9390 	ret = hclge_config_gro(hdev, true);
9391 	if (ret)
9392 		return ret;
9393 
9394 	ret = hclge_init_vlan_config(hdev);
9395 	if (ret) {
9396 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9397 		return ret;
9398 	}
9399 
9400 	ret = hclge_tm_init_hw(hdev, true);
9401 	if (ret) {
9402 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9403 		return ret;
9404 	}
9405 
9406 	ret = hclge_rss_init_hw(hdev);
9407 	if (ret) {
9408 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9409 		return ret;
9410 	}
9411 
9412 	ret = hclge_init_fd_config(hdev);
9413 	if (ret) {
9414 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9415 		return ret;
9416 	}
9417 
9418 	/* Re-enable the hw error interrupts because
9419 	 * the interrupts get disabled on global reset.
9420 	 */
9421 	ret = hclge_config_nic_hw_error(hdev, true);
9422 	if (ret) {
9423 		dev_err(&pdev->dev,
9424 			"fail(%d) to re-enable NIC hw error interrupts\n",
9425 			ret);
9426 		return ret;
9427 	}
9428 
9429 	if (hdev->roce_client) {
9430 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9431 		if (ret) {
9432 			dev_err(&pdev->dev,
9433 				"fail(%d) to re-enable roce ras interrupts\n",
9434 				ret);
9435 			return ret;
9436 		}
9437 	}
9438 
9439 	hclge_reset_vport_state(hdev);
9440 
9441 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9442 		 HCLGE_DRIVER_NAME);
9443 
9444 	return 0;
9445 }
9446 
9447 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9448 {
9449 	struct hclge_dev *hdev = ae_dev->priv;
9450 	struct hclge_mac *mac = &hdev->hw.mac;
9451 
9452 	hclge_misc_affinity_teardown(hdev);
9453 	hclge_state_uninit(hdev);
9454 
9455 	if (mac->phydev)
9456 		mdiobus_unregister(mac->mdio_bus);
9457 
9458 	hclge_uninit_umv_space(hdev);
9459 
9460 	/* Disable MISC vector(vector0) */
9461 	hclge_enable_vector(&hdev->misc_vector, false);
9462 	synchronize_irq(hdev->misc_vector.vector_irq);
9463 
9464 	/* Disable all hw interrupts */
9465 	hclge_config_mac_tnl_int(hdev, false);
9466 	hclge_config_nic_hw_error(hdev, false);
9467 	hclge_config_rocee_ras_interrupt(hdev, false);
9468 
9469 	hclge_cmd_uninit(hdev);
9470 	hclge_misc_irq_uninit(hdev);
9471 	hclge_pci_uninit(hdev);
9472 	mutex_destroy(&hdev->vport_lock);
9473 	hclge_uninit_vport_mac_table(hdev);
9474 	hclge_uninit_vport_vlan_table(hdev);
9475 	mutex_destroy(&hdev->vport_cfg_mutex);
9476 	ae_dev->priv = NULL;
9477 }
9478 
9479 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9480 {
9481 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9482 	struct hclge_vport *vport = hclge_get_vport(handle);
9483 	struct hclge_dev *hdev = vport->back;
9484 
9485 	return min_t(u32, hdev->rss_size_max,
9486 		     vport->alloc_tqps / kinfo->num_tc);
9487 }
9488 
9489 static void hclge_get_channels(struct hnae3_handle *handle,
9490 			       struct ethtool_channels *ch)
9491 {
9492 	ch->max_combined = hclge_get_max_channels(handle);
9493 	ch->other_count = 1;
9494 	ch->max_other = 1;
9495 	ch->combined_count = handle->kinfo.rss_size;
9496 }
9497 
9498 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9499 					u16 *alloc_tqps, u16 *max_rss_size)
9500 {
9501 	struct hclge_vport *vport = hclge_get_vport(handle);
9502 	struct hclge_dev *hdev = vport->back;
9503 
9504 	*alloc_tqps = vport->alloc_tqps;
9505 	*max_rss_size = hdev->rss_size_max;
9506 }
9507 
9508 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9509 			      bool rxfh_configured)
9510 {
9511 	struct hclge_vport *vport = hclge_get_vport(handle);
9512 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9513 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9514 	struct hclge_dev *hdev = vport->back;
9515 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9516 	int cur_rss_size = kinfo->rss_size;
9517 	int cur_tqps = kinfo->num_tqps;
9518 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9519 	u16 roundup_size;
9520 	u32 *rss_indir;
9521 	unsigned int i;
9522 	int ret;
9523 
9524 	kinfo->req_rss_size = new_tqps_num;
9525 
9526 	ret = hclge_tm_vport_map_update(hdev);
9527 	if (ret) {
9528 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9529 		return ret;
9530 	}
9531 
9532 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9533 	roundup_size = ilog2(roundup_size);
9534 	/* Set the RSS TC mode according to the new RSS size */
9535 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9536 		tc_valid[i] = 0;
9537 
9538 		if (!(hdev->hw_tc_map & BIT(i)))
9539 			continue;
9540 
9541 		tc_valid[i] = 1;
9542 		tc_size[i] = roundup_size;
9543 		tc_offset[i] = kinfo->rss_size * i;
9544 	}
9545 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9546 	if (ret)
9547 		return ret;
9548 
9549 	/* RSS indirection table has been configuared by user */
9550 	if (rxfh_configured)
9551 		goto out;
9552 
9553 	/* Reinitializes the rss indirect table according to the new RSS size */
9554 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9555 	if (!rss_indir)
9556 		return -ENOMEM;
9557 
9558 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9559 		rss_indir[i] = i % kinfo->rss_size;
9560 
9561 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9562 	if (ret)
9563 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9564 			ret);
9565 
9566 	kfree(rss_indir);
9567 
9568 out:
9569 	if (!ret)
9570 		dev_info(&hdev->pdev->dev,
9571 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9572 			 cur_rss_size, kinfo->rss_size,
9573 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9574 
9575 	return ret;
9576 }
9577 
9578 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9579 			      u32 *regs_num_64_bit)
9580 {
9581 	struct hclge_desc desc;
9582 	u32 total_num;
9583 	int ret;
9584 
9585 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9586 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9587 	if (ret) {
9588 		dev_err(&hdev->pdev->dev,
9589 			"Query register number cmd failed, ret = %d.\n", ret);
9590 		return ret;
9591 	}
9592 
9593 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
9594 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
9595 
9596 	total_num = *regs_num_32_bit + *regs_num_64_bit;
9597 	if (!total_num)
9598 		return -EINVAL;
9599 
9600 	return 0;
9601 }
9602 
9603 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9604 				 void *data)
9605 {
9606 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9607 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9608 
9609 	struct hclge_desc *desc;
9610 	u32 *reg_val = data;
9611 	__le32 *desc_data;
9612 	int nodata_num;
9613 	int cmd_num;
9614 	int i, k, n;
9615 	int ret;
9616 
9617 	if (regs_num == 0)
9618 		return 0;
9619 
9620 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9621 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9622 			       HCLGE_32_BIT_REG_RTN_DATANUM);
9623 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9624 	if (!desc)
9625 		return -ENOMEM;
9626 
9627 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9628 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9629 	if (ret) {
9630 		dev_err(&hdev->pdev->dev,
9631 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
9632 		kfree(desc);
9633 		return ret;
9634 	}
9635 
9636 	for (i = 0; i < cmd_num; i++) {
9637 		if (i == 0) {
9638 			desc_data = (__le32 *)(&desc[i].data[0]);
9639 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9640 		} else {
9641 			desc_data = (__le32 *)(&desc[i]);
9642 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
9643 		}
9644 		for (k = 0; k < n; k++) {
9645 			*reg_val++ = le32_to_cpu(*desc_data++);
9646 
9647 			regs_num--;
9648 			if (!regs_num)
9649 				break;
9650 		}
9651 	}
9652 
9653 	kfree(desc);
9654 	return 0;
9655 }
9656 
9657 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9658 				 void *data)
9659 {
9660 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9661 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9662 
9663 	struct hclge_desc *desc;
9664 	u64 *reg_val = data;
9665 	__le64 *desc_data;
9666 	int nodata_len;
9667 	int cmd_num;
9668 	int i, k, n;
9669 	int ret;
9670 
9671 	if (regs_num == 0)
9672 		return 0;
9673 
9674 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9675 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9676 			       HCLGE_64_BIT_REG_RTN_DATANUM);
9677 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9678 	if (!desc)
9679 		return -ENOMEM;
9680 
9681 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9682 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9683 	if (ret) {
9684 		dev_err(&hdev->pdev->dev,
9685 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
9686 		kfree(desc);
9687 		return ret;
9688 	}
9689 
9690 	for (i = 0; i < cmd_num; i++) {
9691 		if (i == 0) {
9692 			desc_data = (__le64 *)(&desc[i].data[0]);
9693 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9694 		} else {
9695 			desc_data = (__le64 *)(&desc[i]);
9696 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
9697 		}
9698 		for (k = 0; k < n; k++) {
9699 			*reg_val++ = le64_to_cpu(*desc_data++);
9700 
9701 			regs_num--;
9702 			if (!regs_num)
9703 				break;
9704 		}
9705 	}
9706 
9707 	kfree(desc);
9708 	return 0;
9709 }
9710 
9711 #define MAX_SEPARATE_NUM	4
9712 #define SEPARATOR_VALUE		0xFDFCFBFA
9713 #define REG_NUM_PER_LINE	4
9714 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
9715 #define REG_SEPARATOR_LINE	1
9716 #define REG_NUM_REMAIN_MASK	3
9717 #define BD_LIST_MAX_NUM		30
9718 
9719 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9720 {
9721 	/*prepare 4 commands to query DFX BD number*/
9722 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9723 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9724 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9725 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9726 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9727 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9728 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9729 
9730 	return hclge_cmd_send(&hdev->hw, desc, 4);
9731 }
9732 
9733 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9734 				    int *bd_num_list,
9735 				    u32 type_num)
9736 {
9737 #define HCLGE_DFX_REG_BD_NUM	4
9738 
9739 	u32 entries_per_desc, desc_index, index, offset, i;
9740 	struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9741 	int ret;
9742 
9743 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
9744 	if (ret) {
9745 		dev_err(&hdev->pdev->dev,
9746 			"Get dfx bd num fail, status is %d.\n", ret);
9747 		return ret;
9748 	}
9749 
9750 	entries_per_desc = ARRAY_SIZE(desc[0].data);
9751 	for (i = 0; i < type_num; i++) {
9752 		offset = hclge_dfx_bd_offset_list[i];
9753 		index = offset % entries_per_desc;
9754 		desc_index = offset / entries_per_desc;
9755 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9756 	}
9757 
9758 	return ret;
9759 }
9760 
9761 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9762 				  struct hclge_desc *desc_src, int bd_num,
9763 				  enum hclge_opcode_type cmd)
9764 {
9765 	struct hclge_desc *desc = desc_src;
9766 	int i, ret;
9767 
9768 	hclge_cmd_setup_basic_desc(desc, cmd, true);
9769 	for (i = 0; i < bd_num - 1; i++) {
9770 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9771 		desc++;
9772 		hclge_cmd_setup_basic_desc(desc, cmd, true);
9773 	}
9774 
9775 	desc = desc_src;
9776 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9777 	if (ret)
9778 		dev_err(&hdev->pdev->dev,
9779 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9780 			cmd, ret);
9781 
9782 	return ret;
9783 }
9784 
9785 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9786 				    void *data)
9787 {
9788 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9789 	struct hclge_desc *desc = desc_src;
9790 	u32 *reg = data;
9791 
9792 	entries_per_desc = ARRAY_SIZE(desc->data);
9793 	reg_num = entries_per_desc * bd_num;
9794 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9795 	for (i = 0; i < reg_num; i++) {
9796 		index = i % entries_per_desc;
9797 		desc_index = i / entries_per_desc;
9798 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
9799 	}
9800 	for (i = 0; i < separator_num; i++)
9801 		*reg++ = SEPARATOR_VALUE;
9802 
9803 	return reg_num + separator_num;
9804 }
9805 
9806 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9807 {
9808 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9809 	int data_len_per_desc, data_len, bd_num, i;
9810 	int bd_num_list[BD_LIST_MAX_NUM];
9811 	int ret;
9812 
9813 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9814 	if (ret) {
9815 		dev_err(&hdev->pdev->dev,
9816 			"Get dfx reg bd num fail, status is %d.\n", ret);
9817 		return ret;
9818 	}
9819 
9820 	data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9821 	*len = 0;
9822 	for (i = 0; i < dfx_reg_type_num; i++) {
9823 		bd_num = bd_num_list[i];
9824 		data_len = data_len_per_desc * bd_num;
9825 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9826 	}
9827 
9828 	return ret;
9829 }
9830 
9831 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9832 {
9833 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9834 	int bd_num, bd_num_max, buf_len, i;
9835 	int bd_num_list[BD_LIST_MAX_NUM];
9836 	struct hclge_desc *desc_src;
9837 	u32 *reg = data;
9838 	int ret;
9839 
9840 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9841 	if (ret) {
9842 		dev_err(&hdev->pdev->dev,
9843 			"Get dfx reg bd num fail, status is %d.\n", ret);
9844 		return ret;
9845 	}
9846 
9847 	bd_num_max = bd_num_list[0];
9848 	for (i = 1; i < dfx_reg_type_num; i++)
9849 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9850 
9851 	buf_len = sizeof(*desc_src) * bd_num_max;
9852 	desc_src = kzalloc(buf_len, GFP_KERNEL);
9853 	if (!desc_src) {
9854 		dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9855 		return -ENOMEM;
9856 	}
9857 
9858 	for (i = 0; i < dfx_reg_type_num; i++) {
9859 		bd_num = bd_num_list[i];
9860 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9861 					     hclge_dfx_reg_opcode_list[i]);
9862 		if (ret) {
9863 			dev_err(&hdev->pdev->dev,
9864 				"Get dfx reg fail, status is %d.\n", ret);
9865 			break;
9866 		}
9867 
9868 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9869 	}
9870 
9871 	kfree(desc_src);
9872 	return ret;
9873 }
9874 
9875 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9876 			      struct hnae3_knic_private_info *kinfo)
9877 {
9878 #define HCLGE_RING_REG_OFFSET		0x200
9879 #define HCLGE_RING_INT_REG_OFFSET	0x4
9880 
9881 	int i, j, reg_num, separator_num;
9882 	int data_num_sum;
9883 	u32 *reg = data;
9884 
9885 	/* fetching per-PF registers valus from PF PCIe register space */
9886 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9887 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9888 	for (i = 0; i < reg_num; i++)
9889 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9890 	for (i = 0; i < separator_num; i++)
9891 		*reg++ = SEPARATOR_VALUE;
9892 	data_num_sum = reg_num + separator_num;
9893 
9894 	reg_num = ARRAY_SIZE(common_reg_addr_list);
9895 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9896 	for (i = 0; i < reg_num; i++)
9897 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9898 	for (i = 0; i < separator_num; i++)
9899 		*reg++ = SEPARATOR_VALUE;
9900 	data_num_sum += reg_num + separator_num;
9901 
9902 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
9903 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9904 	for (j = 0; j < kinfo->num_tqps; j++) {
9905 		for (i = 0; i < reg_num; i++)
9906 			*reg++ = hclge_read_dev(&hdev->hw,
9907 						ring_reg_addr_list[i] +
9908 						HCLGE_RING_REG_OFFSET * j);
9909 		for (i = 0; i < separator_num; i++)
9910 			*reg++ = SEPARATOR_VALUE;
9911 	}
9912 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9913 
9914 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9915 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9916 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
9917 		for (i = 0; i < reg_num; i++)
9918 			*reg++ = hclge_read_dev(&hdev->hw,
9919 						tqp_intr_reg_addr_list[i] +
9920 						HCLGE_RING_INT_REG_OFFSET * j);
9921 		for (i = 0; i < separator_num; i++)
9922 			*reg++ = SEPARATOR_VALUE;
9923 	}
9924 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9925 
9926 	return data_num_sum;
9927 }
9928 
9929 static int hclge_get_regs_len(struct hnae3_handle *handle)
9930 {
9931 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9932 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9933 	struct hclge_vport *vport = hclge_get_vport(handle);
9934 	struct hclge_dev *hdev = vport->back;
9935 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9936 	int regs_lines_32_bit, regs_lines_64_bit;
9937 	int ret;
9938 
9939 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9940 	if (ret) {
9941 		dev_err(&hdev->pdev->dev,
9942 			"Get register number failed, ret = %d.\n", ret);
9943 		return ret;
9944 	}
9945 
9946 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9947 	if (ret) {
9948 		dev_err(&hdev->pdev->dev,
9949 			"Get dfx reg len failed, ret = %d.\n", ret);
9950 		return ret;
9951 	}
9952 
9953 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9954 		REG_SEPARATOR_LINE;
9955 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9956 		REG_SEPARATOR_LINE;
9957 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9958 		REG_SEPARATOR_LINE;
9959 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9960 		REG_SEPARATOR_LINE;
9961 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9962 		REG_SEPARATOR_LINE;
9963 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9964 		REG_SEPARATOR_LINE;
9965 
9966 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9967 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9968 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9969 }
9970 
9971 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9972 			   void *data)
9973 {
9974 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9975 	struct hclge_vport *vport = hclge_get_vport(handle);
9976 	struct hclge_dev *hdev = vport->back;
9977 	u32 regs_num_32_bit, regs_num_64_bit;
9978 	int i, reg_num, separator_num, ret;
9979 	u32 *reg = data;
9980 
9981 	*version = hdev->fw_version;
9982 
9983 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9984 	if (ret) {
9985 		dev_err(&hdev->pdev->dev,
9986 			"Get register number failed, ret = %d.\n", ret);
9987 		return;
9988 	}
9989 
9990 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
9991 
9992 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9993 	if (ret) {
9994 		dev_err(&hdev->pdev->dev,
9995 			"Get 32 bit register failed, ret = %d.\n", ret);
9996 		return;
9997 	}
9998 	reg_num = regs_num_32_bit;
9999 	reg += reg_num;
10000 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10001 	for (i = 0; i < separator_num; i++)
10002 		*reg++ = SEPARATOR_VALUE;
10003 
10004 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10005 	if (ret) {
10006 		dev_err(&hdev->pdev->dev,
10007 			"Get 64 bit register failed, ret = %d.\n", ret);
10008 		return;
10009 	}
10010 	reg_num = regs_num_64_bit * 2;
10011 	reg += reg_num;
10012 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10013 	for (i = 0; i < separator_num; i++)
10014 		*reg++ = SEPARATOR_VALUE;
10015 
10016 	ret = hclge_get_dfx_reg(hdev, reg);
10017 	if (ret)
10018 		dev_err(&hdev->pdev->dev,
10019 			"Get dfx register failed, ret = %d.\n", ret);
10020 }
10021 
10022 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10023 {
10024 	struct hclge_set_led_state_cmd *req;
10025 	struct hclge_desc desc;
10026 	int ret;
10027 
10028 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10029 
10030 	req = (struct hclge_set_led_state_cmd *)desc.data;
10031 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10032 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10033 
10034 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10035 	if (ret)
10036 		dev_err(&hdev->pdev->dev,
10037 			"Send set led state cmd error, ret =%d\n", ret);
10038 
10039 	return ret;
10040 }
10041 
10042 enum hclge_led_status {
10043 	HCLGE_LED_OFF,
10044 	HCLGE_LED_ON,
10045 	HCLGE_LED_NO_CHANGE = 0xFF,
10046 };
10047 
10048 static int hclge_set_led_id(struct hnae3_handle *handle,
10049 			    enum ethtool_phys_id_state status)
10050 {
10051 	struct hclge_vport *vport = hclge_get_vport(handle);
10052 	struct hclge_dev *hdev = vport->back;
10053 
10054 	switch (status) {
10055 	case ETHTOOL_ID_ACTIVE:
10056 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
10057 	case ETHTOOL_ID_INACTIVE:
10058 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10059 	default:
10060 		return -EINVAL;
10061 	}
10062 }
10063 
10064 static void hclge_get_link_mode(struct hnae3_handle *handle,
10065 				unsigned long *supported,
10066 				unsigned long *advertising)
10067 {
10068 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10069 	struct hclge_vport *vport = hclge_get_vport(handle);
10070 	struct hclge_dev *hdev = vport->back;
10071 	unsigned int idx = 0;
10072 
10073 	for (; idx < size; idx++) {
10074 		supported[idx] = hdev->hw.mac.supported[idx];
10075 		advertising[idx] = hdev->hw.mac.advertising[idx];
10076 	}
10077 }
10078 
10079 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10080 {
10081 	struct hclge_vport *vport = hclge_get_vport(handle);
10082 	struct hclge_dev *hdev = vport->back;
10083 
10084 	return hclge_config_gro(hdev, enable);
10085 }
10086 
10087 static const struct hnae3_ae_ops hclge_ops = {
10088 	.init_ae_dev = hclge_init_ae_dev,
10089 	.uninit_ae_dev = hclge_uninit_ae_dev,
10090 	.flr_prepare = hclge_flr_prepare,
10091 	.flr_done = hclge_flr_done,
10092 	.init_client_instance = hclge_init_client_instance,
10093 	.uninit_client_instance = hclge_uninit_client_instance,
10094 	.map_ring_to_vector = hclge_map_ring_to_vector,
10095 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10096 	.get_vector = hclge_get_vector,
10097 	.put_vector = hclge_put_vector,
10098 	.set_promisc_mode = hclge_set_promisc_mode,
10099 	.set_loopback = hclge_set_loopback,
10100 	.start = hclge_ae_start,
10101 	.stop = hclge_ae_stop,
10102 	.client_start = hclge_client_start,
10103 	.client_stop = hclge_client_stop,
10104 	.get_status = hclge_get_status,
10105 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
10106 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10107 	.get_media_type = hclge_get_media_type,
10108 	.check_port_speed = hclge_check_port_speed,
10109 	.get_fec = hclge_get_fec,
10110 	.set_fec = hclge_set_fec,
10111 	.get_rss_key_size = hclge_get_rss_key_size,
10112 	.get_rss_indir_size = hclge_get_rss_indir_size,
10113 	.get_rss = hclge_get_rss,
10114 	.set_rss = hclge_set_rss,
10115 	.set_rss_tuple = hclge_set_rss_tuple,
10116 	.get_rss_tuple = hclge_get_rss_tuple,
10117 	.get_tc_size = hclge_get_tc_size,
10118 	.get_mac_addr = hclge_get_mac_addr,
10119 	.set_mac_addr = hclge_set_mac_addr,
10120 	.do_ioctl = hclge_do_ioctl,
10121 	.add_uc_addr = hclge_add_uc_addr,
10122 	.rm_uc_addr = hclge_rm_uc_addr,
10123 	.add_mc_addr = hclge_add_mc_addr,
10124 	.rm_mc_addr = hclge_rm_mc_addr,
10125 	.set_autoneg = hclge_set_autoneg,
10126 	.get_autoneg = hclge_get_autoneg,
10127 	.restart_autoneg = hclge_restart_autoneg,
10128 	.halt_autoneg = hclge_halt_autoneg,
10129 	.get_pauseparam = hclge_get_pauseparam,
10130 	.set_pauseparam = hclge_set_pauseparam,
10131 	.set_mtu = hclge_set_mtu,
10132 	.reset_queue = hclge_reset_tqp,
10133 	.get_stats = hclge_get_stats,
10134 	.get_mac_stats = hclge_get_mac_stat,
10135 	.update_stats = hclge_update_stats,
10136 	.get_strings = hclge_get_strings,
10137 	.get_sset_count = hclge_get_sset_count,
10138 	.get_fw_version = hclge_get_fw_version,
10139 	.get_mdix_mode = hclge_get_mdix_mode,
10140 	.enable_vlan_filter = hclge_enable_vlan_filter,
10141 	.set_vlan_filter = hclge_set_vlan_filter,
10142 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10143 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10144 	.reset_event = hclge_reset_event,
10145 	.get_reset_level = hclge_get_reset_level,
10146 	.set_default_reset_request = hclge_set_def_reset_request,
10147 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10148 	.set_channels = hclge_set_channels,
10149 	.get_channels = hclge_get_channels,
10150 	.get_regs_len = hclge_get_regs_len,
10151 	.get_regs = hclge_get_regs,
10152 	.set_led_id = hclge_set_led_id,
10153 	.get_link_mode = hclge_get_link_mode,
10154 	.add_fd_entry = hclge_add_fd_entry,
10155 	.del_fd_entry = hclge_del_fd_entry,
10156 	.del_all_fd_entries = hclge_del_all_fd_entries,
10157 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10158 	.get_fd_rule_info = hclge_get_fd_rule_info,
10159 	.get_fd_all_rules = hclge_get_all_rules,
10160 	.restore_fd_rules = hclge_restore_fd_entries,
10161 	.enable_fd = hclge_enable_fd,
10162 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
10163 	.dbg_run_cmd = hclge_dbg_run_cmd,
10164 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
10165 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
10166 	.ae_dev_resetting = hclge_ae_dev_resetting,
10167 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10168 	.set_gro_en = hclge_gro_en,
10169 	.get_global_queue_id = hclge_covert_handle_qid_global,
10170 	.set_timer_task = hclge_set_timer_task,
10171 	.mac_connect_phy = hclge_mac_connect_phy,
10172 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
10173 	.restore_vlan_table = hclge_restore_vlan_table,
10174 };
10175 
10176 static struct hnae3_ae_algo ae_algo = {
10177 	.ops = &hclge_ops,
10178 	.pdev_id_table = ae_algo_pci_tbl,
10179 };
10180 
10181 static int hclge_init(void)
10182 {
10183 	pr_info("%s is initializing\n", HCLGE_NAME);
10184 
10185 	hnae3_register_ae_algo(&ae_algo);
10186 
10187 	return 0;
10188 }
10189 
10190 static void hclge_exit(void)
10191 {
10192 	hnae3_unregister_ae_algo(&ae_algo);
10193 }
10194 module_init(hclge_init);
10195 module_exit(hclge_exit);
10196 
10197 MODULE_LICENSE("GPL");
10198 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10199 MODULE_DESCRIPTION("HCLGE Driver");
10200 MODULE_VERSION(HCLGE_MOD_VERSION);
10201