1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 
74 static struct hnae3_ae_algo ae_algo;
75 
76 static struct workqueue_struct *hclge_wq;
77 
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87 	/* required last entry */
88 	{0, }
89 };
90 
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92 
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 					 HCLGE_CMDQ_TX_ADDR_H_REG,
95 					 HCLGE_CMDQ_TX_DEPTH_REG,
96 					 HCLGE_CMDQ_TX_TAIL_REG,
97 					 HCLGE_CMDQ_TX_HEAD_REG,
98 					 HCLGE_CMDQ_RX_ADDR_L_REG,
99 					 HCLGE_CMDQ_RX_ADDR_H_REG,
100 					 HCLGE_CMDQ_RX_DEPTH_REG,
101 					 HCLGE_CMDQ_RX_TAIL_REG,
102 					 HCLGE_CMDQ_RX_HEAD_REG,
103 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 					 HCLGE_CMDQ_INTR_STS_REG,
105 					 HCLGE_CMDQ_INTR_EN_REG,
106 					 HCLGE_CMDQ_INTR_GEN_REG};
107 
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 					   HCLGE_VECTOR0_OTER_EN_REG,
110 					   HCLGE_MISC_RESET_STS_REG,
111 					   HCLGE_MISC_VECTOR_INT_STS,
112 					   HCLGE_GLOBAL_RESET_REG,
113 					   HCLGE_FUN_RST_ING,
114 					   HCLGE_GRO_EN_REG};
115 
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 					 HCLGE_RING_RX_ADDR_H_REG,
118 					 HCLGE_RING_RX_BD_NUM_REG,
119 					 HCLGE_RING_RX_BD_LENGTH_REG,
120 					 HCLGE_RING_RX_MERGE_EN_REG,
121 					 HCLGE_RING_RX_TAIL_REG,
122 					 HCLGE_RING_RX_HEAD_REG,
123 					 HCLGE_RING_RX_FBD_NUM_REG,
124 					 HCLGE_RING_RX_OFFSET_REG,
125 					 HCLGE_RING_RX_FBD_OFFSET_REG,
126 					 HCLGE_RING_RX_STASH_REG,
127 					 HCLGE_RING_RX_BD_ERR_REG,
128 					 HCLGE_RING_TX_ADDR_L_REG,
129 					 HCLGE_RING_TX_ADDR_H_REG,
130 					 HCLGE_RING_TX_BD_NUM_REG,
131 					 HCLGE_RING_TX_PRIORITY_REG,
132 					 HCLGE_RING_TX_TC_REG,
133 					 HCLGE_RING_TX_MERGE_EN_REG,
134 					 HCLGE_RING_TX_TAIL_REG,
135 					 HCLGE_RING_TX_HEAD_REG,
136 					 HCLGE_RING_TX_FBD_NUM_REG,
137 					 HCLGE_RING_TX_OFFSET_REG,
138 					 HCLGE_RING_TX_EBD_NUM_REG,
139 					 HCLGE_RING_TX_EBD_OFFSET_REG,
140 					 HCLGE_RING_TX_BD_ERR_REG,
141 					 HCLGE_RING_EN_REG};
142 
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 					     HCLGE_TQP_INTR_GL0_REG,
145 					     HCLGE_TQP_INTR_GL1_REG,
146 					     HCLGE_TQP_INTR_GL2_REG,
147 					     HCLGE_TQP_INTR_RL_REG};
148 
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150 	"App    Loopback test",
151 	"Serdes serial Loopback test",
152 	"Serdes parallel Loopback test",
153 	"Phy    Loopback test"
154 };
155 
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 	{"mac_tx_mac_pause_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 	{"mac_rx_mac_pause_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 	{"mac_tx_control_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 	{"mac_rx_control_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 	{"mac_tx_pfc_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 	{"mac_tx_pfc_pri0_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 	{"mac_tx_pfc_pri1_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 	{"mac_tx_pfc_pri2_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 	{"mac_tx_pfc_pri3_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 	{"mac_tx_pfc_pri4_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 	{"mac_tx_pfc_pri5_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 	{"mac_tx_pfc_pri6_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 	{"mac_tx_pfc_pri7_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 	{"mac_rx_pfc_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 	{"mac_rx_pfc_pri0_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 	{"mac_rx_pfc_pri1_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 	{"mac_rx_pfc_pri2_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 	{"mac_rx_pfc_pri3_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 	{"mac_rx_pfc_pri4_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 	{"mac_rx_pfc_pri5_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 	{"mac_rx_pfc_pri6_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 	{"mac_rx_pfc_pri7_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 	{"mac_tx_total_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 	{"mac_tx_total_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 	{"mac_tx_good_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 	{"mac_tx_bad_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 	{"mac_tx_good_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 	{"mac_tx_bad_oct_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 	{"mac_tx_uni_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 	{"mac_tx_multi_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 	{"mac_tx_broad_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 	{"mac_tx_undersize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 	{"mac_tx_oversize_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 	{"mac_tx_64_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 	{"mac_tx_65_127_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 	{"mac_tx_128_255_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 	{"mac_tx_256_511_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 	{"mac_tx_512_1023_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 	{"mac_tx_1024_1518_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 	{"mac_tx_1519_2047_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 	{"mac_tx_2048_4095_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 	{"mac_tx_4096_8191_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 	{"mac_tx_8192_9216_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 	{"mac_tx_9217_12287_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 	{"mac_tx_12288_16383_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 	{"mac_tx_1519_max_good_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 	{"mac_tx_1519_max_bad_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 	{"mac_rx_total_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 	{"mac_rx_total_oct_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 	{"mac_rx_good_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 	{"mac_rx_bad_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 	{"mac_rx_good_oct_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 	{"mac_rx_bad_oct_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 	{"mac_rx_uni_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 	{"mac_rx_multi_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 	{"mac_rx_broad_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 	{"mac_rx_undersize_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 	{"mac_rx_oversize_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 	{"mac_rx_64_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 	{"mac_rx_65_127_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 	{"mac_rx_128_255_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 	{"mac_rx_256_511_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 	{"mac_rx_512_1023_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 	{"mac_rx_1024_1518_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 	{"mac_rx_1519_2047_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 	{"mac_rx_2048_4095_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 	{"mac_rx_4096_8191_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 	{"mac_rx_8192_9216_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 	{"mac_rx_9217_12287_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 	{"mac_rx_12288_16383_oct_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 	{"mac_rx_1519_max_good_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 	{"mac_rx_1519_max_bad_pkt_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301 
302 	{"mac_tx_fragment_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 	{"mac_tx_undermin_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 	{"mac_tx_jabber_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 	{"mac_tx_err_all_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 	{"mac_tx_from_app_good_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 	{"mac_tx_from_app_bad_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 	{"mac_rx_fragment_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 	{"mac_rx_undermin_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 	{"mac_rx_jabber_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 	{"mac_rx_fcs_err_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 	{"mac_rx_send_app_good_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 	{"mac_rx_send_app_bad_pkt_num",
325 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327 
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329 	{
330 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
332 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 		.i_port_bitmap = 0x1,
334 	},
335 };
336 
337 static const u8 hclge_hash_key[] = {
338 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344 
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 	HCLGE_DFX_BIOS_BD_OFFSET,
347 	HCLGE_DFX_SSU_0_BD_OFFSET,
348 	HCLGE_DFX_SSU_1_BD_OFFSET,
349 	HCLGE_DFX_IGU_BD_OFFSET,
350 	HCLGE_DFX_RPU_0_BD_OFFSET,
351 	HCLGE_DFX_RPU_1_BD_OFFSET,
352 	HCLGE_DFX_NCSI_BD_OFFSET,
353 	HCLGE_DFX_RTC_BD_OFFSET,
354 	HCLGE_DFX_PPP_BD_OFFSET,
355 	HCLGE_DFX_RCB_BD_OFFSET,
356 	HCLGE_DFX_TQP_BD_OFFSET,
357 	HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359 
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 	HCLGE_OPC_DFX_SSU_REG_0,
363 	HCLGE_OPC_DFX_SSU_REG_1,
364 	HCLGE_OPC_DFX_IGU_EGU_REG,
365 	HCLGE_OPC_DFX_RPU_REG_0,
366 	HCLGE_OPC_DFX_RPU_REG_1,
367 	HCLGE_OPC_DFX_NCSI_REG,
368 	HCLGE_OPC_DFX_RTC_REG,
369 	HCLGE_OPC_DFX_PPP_REG,
370 	HCLGE_OPC_DFX_RCB_REG,
371 	HCLGE_OPC_DFX_TQP_REG,
372 	HCLGE_OPC_DFX_SSU_REG_2
373 };
374 
375 static const struct key_info meta_data_key_info[] = {
376 	{ PACKET_TYPE_ID, 6},
377 	{ IP_FRAGEMENT, 1},
378 	{ ROCE_TYPE, 1},
379 	{ NEXT_KEY, 5},
380 	{ VLAN_NUMBER, 2},
381 	{ SRC_VPORT, 12},
382 	{ DST_VPORT, 12},
383 	{ TUNNEL_PACKET, 1},
384 };
385 
386 static const struct key_info tuple_key_info[] = {
387 	{ OUTER_DST_MAC, 48},
388 	{ OUTER_SRC_MAC, 48},
389 	{ OUTER_VLAN_TAG_FST, 16},
390 	{ OUTER_VLAN_TAG_SEC, 16},
391 	{ OUTER_ETH_TYPE, 16},
392 	{ OUTER_L2_RSV, 16},
393 	{ OUTER_IP_TOS, 8},
394 	{ OUTER_IP_PROTO, 8},
395 	{ OUTER_SRC_IP, 32},
396 	{ OUTER_DST_IP, 32},
397 	{ OUTER_L3_RSV, 16},
398 	{ OUTER_SRC_PORT, 16},
399 	{ OUTER_DST_PORT, 16},
400 	{ OUTER_L4_RSV, 32},
401 	{ OUTER_TUN_VNI, 24},
402 	{ OUTER_TUN_FLOW_ID, 8},
403 	{ INNER_DST_MAC, 48},
404 	{ INNER_SRC_MAC, 48},
405 	{ INNER_VLAN_TAG_FST, 16},
406 	{ INNER_VLAN_TAG_SEC, 16},
407 	{ INNER_ETH_TYPE, 16},
408 	{ INNER_L2_RSV, 16},
409 	{ INNER_IP_TOS, 8},
410 	{ INNER_IP_PROTO, 8},
411 	{ INNER_SRC_IP, 32},
412 	{ INNER_DST_IP, 32},
413 	{ INNER_L3_RSV, 16},
414 	{ INNER_SRC_PORT, 16},
415 	{ INNER_DST_PORT, 16},
416 	{ INNER_L4_RSV, 32},
417 };
418 
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422 
423 	u64 *data = (u64 *)(&hdev->mac_stats);
424 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425 	__le64 *desc_data;
426 	int i, k, n;
427 	int ret;
428 
429 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431 	if (ret) {
432 		dev_err(&hdev->pdev->dev,
433 			"Get MAC pkt stats fail, status = %d.\n", ret);
434 
435 		return ret;
436 	}
437 
438 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 		/* for special opcode 0032, only the first desc has the head */
440 		if (unlikely(i == 0)) {
441 			desc_data = (__le64 *)(&desc[i].data[0]);
442 			n = HCLGE_RD_FIRST_STATS_NUM;
443 		} else {
444 			desc_data = (__le64 *)(&desc[i]);
445 			n = HCLGE_RD_OTHER_STATS_NUM;
446 		}
447 
448 		for (k = 0; k < n; k++) {
449 			*data += le64_to_cpu(*desc_data);
450 			data++;
451 			desc_data++;
452 		}
453 	}
454 
455 	return 0;
456 }
457 
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460 	u64 *data = (u64 *)(&hdev->mac_stats);
461 	struct hclge_desc *desc;
462 	__le64 *desc_data;
463 	u16 i, k, n;
464 	int ret;
465 
466 	/* This may be called inside atomic sections,
467 	 * so GFP_ATOMIC is more suitalbe here
468 	 */
469 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470 	if (!desc)
471 		return -ENOMEM;
472 
473 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475 	if (ret) {
476 		kfree(desc);
477 		return ret;
478 	}
479 
480 	for (i = 0; i < desc_num; i++) {
481 		/* for special opcode 0034, only the first desc has the head */
482 		if (i == 0) {
483 			desc_data = (__le64 *)(&desc[i].data[0]);
484 			n = HCLGE_RD_FIRST_STATS_NUM;
485 		} else {
486 			desc_data = (__le64 *)(&desc[i]);
487 			n = HCLGE_RD_OTHER_STATS_NUM;
488 		}
489 
490 		for (k = 0; k < n; k++) {
491 			*data += le64_to_cpu(*desc_data);
492 			data++;
493 			desc_data++;
494 		}
495 	}
496 
497 	kfree(desc);
498 
499 	return 0;
500 }
501 
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504 	struct hclge_desc desc;
505 	__le32 *desc_data;
506 	u32 reg_num;
507 	int ret;
508 
509 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511 	if (ret)
512 		return ret;
513 
514 	desc_data = (__le32 *)(&desc.data[0]);
515 	reg_num = le32_to_cpu(*desc_data);
516 
517 	*desc_num = 1 + ((reg_num - 3) >> 2) +
518 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519 
520 	return 0;
521 }
522 
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525 	u32 desc_num;
526 	int ret;
527 
528 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
529 
530 	/* The firmware supports the new statistics acquisition method */
531 	if (!ret)
532 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 	else if (ret == -EOPNOTSUPP)
534 		ret = hclge_mac_update_stats_defective(hdev);
535 	else
536 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537 
538 	return ret;
539 }
540 
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 	struct hclge_vport *vport = hclge_get_vport(handle);
545 	struct hclge_dev *hdev = vport->back;
546 	struct hnae3_queue *queue;
547 	struct hclge_desc desc[1];
548 	struct hclge_tqp *tqp;
549 	int ret, i;
550 
551 	for (i = 0; i < kinfo->num_tqps; i++) {
552 		queue = handle->kinfo.tqp[i];
553 		tqp = container_of(queue, struct hclge_tqp, q);
554 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
555 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556 					   true);
557 
558 		desc[0].data[0] = cpu_to_le32(tqp->index);
559 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
560 		if (ret) {
561 			dev_err(&hdev->pdev->dev,
562 				"Query tqp stat fail, status = %d,queue = %d\n",
563 				ret, i);
564 			return ret;
565 		}
566 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 			le32_to_cpu(desc[0].data[1]);
568 	}
569 
570 	for (i = 0; i < kinfo->num_tqps; i++) {
571 		queue = handle->kinfo.tqp[i];
572 		tqp = container_of(queue, struct hclge_tqp, q);
573 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
574 		hclge_cmd_setup_basic_desc(&desc[0],
575 					   HCLGE_OPC_QUERY_TX_STATS,
576 					   true);
577 
578 		desc[0].data[0] = cpu_to_le32(tqp->index);
579 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
580 		if (ret) {
581 			dev_err(&hdev->pdev->dev,
582 				"Query tqp stat fail, status = %d,queue = %d\n",
583 				ret, i);
584 			return ret;
585 		}
586 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 			le32_to_cpu(desc[0].data[1]);
588 	}
589 
590 	return 0;
591 }
592 
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 	struct hclge_tqp *tqp;
597 	u64 *buff = data;
598 	int i;
599 
600 	for (i = 0; i < kinfo->num_tqps; i++) {
601 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 	}
604 
605 	for (i = 0; i < kinfo->num_tqps; i++) {
606 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608 	}
609 
610 	return buff;
611 }
612 
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616 
617 	/* each tqp has TX & RX two queues */
618 	return kinfo->num_tqps * (2);
619 }
620 
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624 	u8 *buff = data;
625 	int i;
626 
627 	for (i = 0; i < kinfo->num_tqps; i++) {
628 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 			struct hclge_tqp, q);
630 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
631 			 tqp->index);
632 		buff = buff + ETH_GSTRING_LEN;
633 	}
634 
635 	for (i = 0; i < kinfo->num_tqps; i++) {
636 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 			struct hclge_tqp, q);
638 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
639 			 tqp->index);
640 		buff = buff + ETH_GSTRING_LEN;
641 	}
642 
643 	return buff;
644 }
645 
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 				 const struct hclge_comm_stats_str strs[],
648 				 int size, u64 *data)
649 {
650 	u64 *buf = data;
651 	u32 i;
652 
653 	for (i = 0; i < size; i++)
654 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655 
656 	return buf + size;
657 }
658 
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 				  const struct hclge_comm_stats_str strs[],
661 				  int size, u8 *data)
662 {
663 	char *buff = (char *)data;
664 	u32 i;
665 
666 	if (stringset != ETH_SS_STATS)
667 		return buff;
668 
669 	for (i = 0; i < size; i++) {
670 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 		buff = buff + ETH_GSTRING_LEN;
672 	}
673 
674 	return (u8 *)buff;
675 }
676 
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679 	struct hnae3_handle *handle;
680 	int status;
681 
682 	handle = &hdev->vport[0].nic;
683 	if (handle->client) {
684 		status = hclge_tqps_update_stats(handle);
685 		if (status) {
686 			dev_err(&hdev->pdev->dev,
687 				"Update TQPS stats fail, status = %d.\n",
688 				status);
689 		}
690 	}
691 
692 	status = hclge_mac_update_stats(hdev);
693 	if (status)
694 		dev_err(&hdev->pdev->dev,
695 			"Update MAC stats fail, status = %d.\n", status);
696 }
697 
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 			       struct net_device_stats *net_stats)
700 {
701 	struct hclge_vport *vport = hclge_get_vport(handle);
702 	struct hclge_dev *hdev = vport->back;
703 	int status;
704 
705 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 		return;
707 
708 	status = hclge_mac_update_stats(hdev);
709 	if (status)
710 		dev_err(&hdev->pdev->dev,
711 			"Update MAC stats fail, status = %d.\n",
712 			status);
713 
714 	status = hclge_tqps_update_stats(handle);
715 	if (status)
716 		dev_err(&hdev->pdev->dev,
717 			"Update TQPS stats fail, status = %d.\n",
718 			status);
719 
720 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722 
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 		HNAE3_SUPPORT_PHY_LOOPBACK |\
727 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729 
730 	struct hclge_vport *vport = hclge_get_vport(handle);
731 	struct hclge_dev *hdev = vport->back;
732 	int count = 0;
733 
734 	/* Loopback test support rules:
735 	 * mac: only GE mode support
736 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 	 * phy: only support when phy device exist on board
738 	 */
739 	if (stringset == ETH_SS_TEST) {
740 		/* clear loopback bit flags at first */
741 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 			count += 1;
747 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748 		}
749 
750 		count += 2;
751 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753 
754 		if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755 		    hdev->hw.mac.phydev->drv->set_loopback) {
756 			count += 1;
757 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758 		}
759 
760 	} else if (stringset == ETH_SS_STATS) {
761 		count = ARRAY_SIZE(g_mac_stats_string) +
762 			hclge_tqps_get_sset_count(handle, stringset);
763 	}
764 
765 	return count;
766 }
767 
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
769 			      u8 *data)
770 {
771 	u8 *p = (char *)data;
772 	int size;
773 
774 	if (stringset == ETH_SS_STATS) {
775 		size = ARRAY_SIZE(g_mac_stats_string);
776 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 					   size, p);
778 		p = hclge_tqps_get_strings(handle, p);
779 	} else if (stringset == ETH_SS_TEST) {
780 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782 			       ETH_GSTRING_LEN);
783 			p += ETH_GSTRING_LEN;
784 		}
785 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787 			       ETH_GSTRING_LEN);
788 			p += ETH_GSTRING_LEN;
789 		}
790 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 			memcpy(p,
792 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793 			       ETH_GSTRING_LEN);
794 			p += ETH_GSTRING_LEN;
795 		}
796 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798 			       ETH_GSTRING_LEN);
799 			p += ETH_GSTRING_LEN;
800 		}
801 	}
802 }
803 
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 {
806 	struct hclge_vport *vport = hclge_get_vport(handle);
807 	struct hclge_dev *hdev = vport->back;
808 	u64 *p;
809 
810 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811 				 ARRAY_SIZE(g_mac_stats_string), data);
812 	p = hclge_tqps_get_stats(handle, p);
813 }
814 
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 			       struct hns3_mac_stats *mac_stats)
817 {
818 	struct hclge_vport *vport = hclge_get_vport(handle);
819 	struct hclge_dev *hdev = vport->back;
820 
821 	hclge_update_stats(handle, NULL);
822 
823 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
825 }
826 
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828 				   struct hclge_func_status_cmd *status)
829 {
830 #define HCLGE_MAC_ID_MASK	0xF
831 
832 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833 		return -EINVAL;
834 
835 	/* Set the pf to main pf */
836 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 		hdev->flag |= HCLGE_FLAG_MAIN;
838 	else
839 		hdev->flag &= ~HCLGE_FLAG_MAIN;
840 
841 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
842 	return 0;
843 }
844 
845 static int hclge_query_function_status(struct hclge_dev *hdev)
846 {
847 #define HCLGE_QUERY_MAX_CNT	5
848 
849 	struct hclge_func_status_cmd *req;
850 	struct hclge_desc desc;
851 	int timeout = 0;
852 	int ret;
853 
854 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855 	req = (struct hclge_func_status_cmd *)desc.data;
856 
857 	do {
858 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 		if (ret) {
860 			dev_err(&hdev->pdev->dev,
861 				"query function status failed %d.\n", ret);
862 			return ret;
863 		}
864 
865 		/* Check pf reset is done */
866 		if (req->pf_state)
867 			break;
868 		usleep_range(1000, 2000);
869 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
870 
871 	return hclge_parse_func_status(hdev, req);
872 }
873 
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 {
876 	struct hclge_pf_res_cmd *req;
877 	struct hclge_desc desc;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 	if (ret) {
883 		dev_err(&hdev->pdev->dev,
884 			"query pf resource failed %d.\n", ret);
885 		return ret;
886 	}
887 
888 	req = (struct hclge_pf_res_cmd *)desc.data;
889 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890 			 le16_to_cpu(req->ext_tqp_num);
891 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892 
893 	if (req->tx_buf_size)
894 		hdev->tx_buf_size =
895 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896 	else
897 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898 
899 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900 
901 	if (req->dv_buf_size)
902 		hdev->dv_buf_size =
903 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904 	else
905 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906 
907 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908 
909 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911 		dev_err(&hdev->pdev->dev,
912 			"only %u msi resources available, not enough for pf(min:2).\n",
913 			hdev->num_nic_msi);
914 		return -EINVAL;
915 	}
916 
917 	if (hnae3_dev_roce_supported(hdev)) {
918 		hdev->num_roce_msi =
919 			le16_to_cpu(req->pf_intr_vector_number_roce);
920 
921 		/* PF should have NIC vectors and Roce vectors,
922 		 * NIC vectors are queued before Roce vectors.
923 		 */
924 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
925 	} else {
926 		hdev->num_msi = hdev->num_nic_msi;
927 	}
928 
929 	return 0;
930 }
931 
932 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
933 {
934 	switch (speed_cmd) {
935 	case 6:
936 		*speed = HCLGE_MAC_SPEED_10M;
937 		break;
938 	case 7:
939 		*speed = HCLGE_MAC_SPEED_100M;
940 		break;
941 	case 0:
942 		*speed = HCLGE_MAC_SPEED_1G;
943 		break;
944 	case 1:
945 		*speed = HCLGE_MAC_SPEED_10G;
946 		break;
947 	case 2:
948 		*speed = HCLGE_MAC_SPEED_25G;
949 		break;
950 	case 3:
951 		*speed = HCLGE_MAC_SPEED_40G;
952 		break;
953 	case 4:
954 		*speed = HCLGE_MAC_SPEED_50G;
955 		break;
956 	case 5:
957 		*speed = HCLGE_MAC_SPEED_100G;
958 		break;
959 	case 8:
960 		*speed = HCLGE_MAC_SPEED_200G;
961 		break;
962 	default:
963 		return -EINVAL;
964 	}
965 
966 	return 0;
967 }
968 
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971 	struct hclge_vport *vport = hclge_get_vport(handle);
972 	struct hclge_dev *hdev = vport->back;
973 	u32 speed_ability = hdev->hw.mac.speed_ability;
974 	u32 speed_bit = 0;
975 
976 	switch (speed) {
977 	case HCLGE_MAC_SPEED_10M:
978 		speed_bit = HCLGE_SUPPORT_10M_BIT;
979 		break;
980 	case HCLGE_MAC_SPEED_100M:
981 		speed_bit = HCLGE_SUPPORT_100M_BIT;
982 		break;
983 	case HCLGE_MAC_SPEED_1G:
984 		speed_bit = HCLGE_SUPPORT_1G_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_10G:
987 		speed_bit = HCLGE_SUPPORT_10G_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_25G:
990 		speed_bit = HCLGE_SUPPORT_25G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_40G:
993 		speed_bit = HCLGE_SUPPORT_40G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_50G:
996 		speed_bit = HCLGE_SUPPORT_50G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_100G:
999 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1000 		break;
1001 	case HCLGE_MAC_SPEED_200G:
1002 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1003 		break;
1004 	default:
1005 		return -EINVAL;
1006 	}
1007 
1008 	if (speed_bit & speed_ability)
1009 		return 0;
1010 
1011 	return -EINVAL;
1012 }
1013 
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1015 {
1016 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018 				 mac->supported);
1019 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027 				 mac->supported);
1028 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030 				 mac->supported);
1031 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1033 				 mac->supported);
1034 }
1035 
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1037 {
1038 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 				 mac->supported);
1047 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 				 mac->supported);
1050 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 				 mac->supported);
1053 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1054 		linkmode_set_bit(
1055 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1056 			mac->supported);
1057 }
1058 
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1060 {
1061 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1078 				 mac->supported);
1079 }
1080 
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1082 {
1083 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1085 				 mac->supported);
1086 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1088 				 mac->supported);
1089 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1091 				 mac->supported);
1092 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1094 				 mac->supported);
1095 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1097 				 mac->supported);
1098 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1100 				 mac->supported);
1101 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1103 				 mac->supported);
1104 }
1105 
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1107 {
1108 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1110 
1111 	switch (mac->speed) {
1112 	case HCLGE_MAC_SPEED_10G:
1113 	case HCLGE_MAC_SPEED_40G:
1114 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1115 				 mac->supported);
1116 		mac->fec_ability =
1117 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1118 		break;
1119 	case HCLGE_MAC_SPEED_25G:
1120 	case HCLGE_MAC_SPEED_50G:
1121 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1122 				 mac->supported);
1123 		mac->fec_ability =
1124 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125 			BIT(HNAE3_FEC_AUTO);
1126 		break;
1127 	case HCLGE_MAC_SPEED_100G:
1128 	case HCLGE_MAC_SPEED_200G:
1129 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1131 		break;
1132 	default:
1133 		mac->fec_ability = 0;
1134 		break;
1135 	}
1136 }
1137 
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1139 					u16 speed_ability)
1140 {
1141 	struct hclge_mac *mac = &hdev->hw.mac;
1142 
1143 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1145 				 mac->supported);
1146 
1147 	hclge_convert_setting_sr(mac, speed_ability);
1148 	hclge_convert_setting_lr(mac, speed_ability);
1149 	hclge_convert_setting_cr(mac, speed_ability);
1150 	if (hnae3_dev_fec_supported(hdev))
1151 		hclge_convert_setting_fec(mac);
1152 
1153 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1154 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1156 }
1157 
1158 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1159 					    u16 speed_ability)
1160 {
1161 	struct hclge_mac *mac = &hdev->hw.mac;
1162 
1163 	hclge_convert_setting_kr(mac, speed_ability);
1164 	if (hnae3_dev_fec_supported(hdev))
1165 		hclge_convert_setting_fec(mac);
1166 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1167 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1169 }
1170 
1171 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1172 					 u16 speed_ability)
1173 {
1174 	unsigned long *supported = hdev->hw.mac.supported;
1175 
1176 	/* default to support all speed for GE port */
1177 	if (!speed_ability)
1178 		speed_ability = HCLGE_SUPPORT_GE;
1179 
1180 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1181 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1182 				 supported);
1183 
1184 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1185 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1186 				 supported);
1187 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1188 				 supported);
1189 	}
1190 
1191 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1192 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1193 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1194 	}
1195 
1196 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1200 }
1201 
1202 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1203 {
1204 	u8 media_type = hdev->hw.mac.media_type;
1205 
1206 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1207 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1208 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1209 		hclge_parse_copper_link_mode(hdev, speed_ability);
1210 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1211 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1212 }
1213 
1214 static u32 hclge_get_max_speed(u16 speed_ability)
1215 {
1216 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1217 		return HCLGE_MAC_SPEED_200G;
1218 
1219 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1220 		return HCLGE_MAC_SPEED_100G;
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1223 		return HCLGE_MAC_SPEED_50G;
1224 
1225 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1226 		return HCLGE_MAC_SPEED_40G;
1227 
1228 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1229 		return HCLGE_MAC_SPEED_25G;
1230 
1231 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1232 		return HCLGE_MAC_SPEED_10G;
1233 
1234 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1235 		return HCLGE_MAC_SPEED_1G;
1236 
1237 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1238 		return HCLGE_MAC_SPEED_100M;
1239 
1240 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1241 		return HCLGE_MAC_SPEED_10M;
1242 
1243 	return HCLGE_MAC_SPEED_1G;
1244 }
1245 
1246 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1247 {
1248 #define SPEED_ABILITY_EXT_SHIFT			8
1249 
1250 	struct hclge_cfg_param_cmd *req;
1251 	u64 mac_addr_tmp_high;
1252 	u16 speed_ability_ext;
1253 	u64 mac_addr_tmp;
1254 	unsigned int i;
1255 
1256 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1257 
1258 	/* get the configuration */
1259 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1260 					      HCLGE_CFG_VMDQ_M,
1261 					      HCLGE_CFG_VMDQ_S);
1262 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1264 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265 					    HCLGE_CFG_TQP_DESC_N_M,
1266 					    HCLGE_CFG_TQP_DESC_N_S);
1267 
1268 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269 					HCLGE_CFG_PHY_ADDR_M,
1270 					HCLGE_CFG_PHY_ADDR_S);
1271 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272 					  HCLGE_CFG_MEDIA_TP_M,
1273 					  HCLGE_CFG_MEDIA_TP_S);
1274 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1275 					  HCLGE_CFG_RX_BUF_LEN_M,
1276 					  HCLGE_CFG_RX_BUF_LEN_S);
1277 	/* get mac_address */
1278 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1279 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1280 					    HCLGE_CFG_MAC_ADDR_H_M,
1281 					    HCLGE_CFG_MAC_ADDR_H_S);
1282 
1283 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1284 
1285 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286 					     HCLGE_CFG_DEFAULT_SPEED_M,
1287 					     HCLGE_CFG_DEFAULT_SPEED_S);
1288 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289 					       HCLGE_CFG_RSS_SIZE_M,
1290 					       HCLGE_CFG_RSS_SIZE_S);
1291 
1292 	for (i = 0; i < ETH_ALEN; i++)
1293 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1294 
1295 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1296 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1297 
1298 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299 					     HCLGE_CFG_SPEED_ABILITY_M,
1300 					     HCLGE_CFG_SPEED_ABILITY_S);
1301 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1303 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1304 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1305 
1306 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1308 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1309 	if (!cfg->umv_space)
1310 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1311 
1312 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1313 					       HCLGE_CFG_PF_RSS_SIZE_M,
1314 					       HCLGE_CFG_PF_RSS_SIZE_S);
1315 
1316 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1317 	 * power of 2, instead of reading out directly. This would
1318 	 * be more flexible for future changes and expansions.
1319 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1320 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1321 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1322 	 */
1323 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1324 			       1U << cfg->pf_rss_size_max :
1325 			       cfg->vf_rss_size_max;
1326 }
1327 
1328 /* hclge_get_cfg: query the static parameter from flash
1329  * @hdev: pointer to struct hclge_dev
1330  * @hcfg: the config structure to be getted
1331  */
1332 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1333 {
1334 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1335 	struct hclge_cfg_param_cmd *req;
1336 	unsigned int i;
1337 	int ret;
1338 
1339 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1340 		u32 offset = 0;
1341 
1342 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1343 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1344 					   true);
1345 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1346 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1347 		/* Len should be united by 4 bytes when send to hardware */
1348 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1349 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1350 		req->offset = cpu_to_le32(offset);
1351 	}
1352 
1353 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1354 	if (ret) {
1355 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1356 		return ret;
1357 	}
1358 
1359 	hclge_parse_cfg(hcfg, desc);
1360 
1361 	return 0;
1362 }
1363 
1364 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1365 {
1366 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1367 
1368 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1369 
1370 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1371 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1372 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1373 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1374 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1375 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1376 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1377 }
1378 
1379 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1380 				  struct hclge_desc *desc)
1381 {
1382 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1383 	struct hclge_dev_specs_0_cmd *req0;
1384 	struct hclge_dev_specs_1_cmd *req1;
1385 
1386 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1387 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1388 
1389 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1390 	ae_dev->dev_specs.rss_ind_tbl_size =
1391 		le16_to_cpu(req0->rss_ind_tbl_size);
1392 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1393 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1394 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1395 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1396 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1397 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1398 }
1399 
1400 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1401 {
1402 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1403 
1404 	if (!dev_specs->max_non_tso_bd_num)
1405 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406 	if (!dev_specs->rss_ind_tbl_size)
1407 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1408 	if (!dev_specs->rss_key_size)
1409 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1410 	if (!dev_specs->max_tm_rate)
1411 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1412 	if (!dev_specs->max_qset_num)
1413 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1414 	if (!dev_specs->max_int_gl)
1415 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1416 	if (!dev_specs->max_frm_size)
1417 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1418 }
1419 
1420 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1421 {
1422 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1423 	int ret;
1424 	int i;
1425 
1426 	/* set default specifications as devices lower than version V3 do not
1427 	 * support querying specifications from firmware.
1428 	 */
1429 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1430 		hclge_set_default_dev_specs(hdev);
1431 		return 0;
1432 	}
1433 
1434 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1435 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1436 					   true);
1437 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1438 	}
1439 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1440 
1441 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1442 	if (ret)
1443 		return ret;
1444 
1445 	hclge_parse_dev_specs(hdev, desc);
1446 	hclge_check_dev_specs(hdev);
1447 
1448 	return 0;
1449 }
1450 
1451 static int hclge_get_cap(struct hclge_dev *hdev)
1452 {
1453 	int ret;
1454 
1455 	ret = hclge_query_function_status(hdev);
1456 	if (ret) {
1457 		dev_err(&hdev->pdev->dev,
1458 			"query function status error %d.\n", ret);
1459 		return ret;
1460 	}
1461 
1462 	/* get pf resource */
1463 	return hclge_query_pf_resource(hdev);
1464 }
1465 
1466 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1467 {
1468 #define HCLGE_MIN_TX_DESC	64
1469 #define HCLGE_MIN_RX_DESC	64
1470 
1471 	if (!is_kdump_kernel())
1472 		return;
1473 
1474 	dev_info(&hdev->pdev->dev,
1475 		 "Running kdump kernel. Using minimal resources\n");
1476 
1477 	/* minimal queue pairs equals to the number of vports */
1478 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1479 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1480 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1481 }
1482 
1483 static int hclge_configure(struct hclge_dev *hdev)
1484 {
1485 	struct hclge_cfg cfg;
1486 	unsigned int i;
1487 	int ret;
1488 
1489 	ret = hclge_get_cfg(hdev, &cfg);
1490 	if (ret)
1491 		return ret;
1492 
1493 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1494 	hdev->base_tqp_pid = 0;
1495 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1496 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1497 	hdev->rx_buf_len = cfg.rx_buf_len;
1498 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1499 	hdev->hw.mac.media_type = cfg.media_type;
1500 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1501 	hdev->num_tx_desc = cfg.tqp_desc_num;
1502 	hdev->num_rx_desc = cfg.tqp_desc_num;
1503 	hdev->tm_info.num_pg = 1;
1504 	hdev->tc_max = cfg.tc_num;
1505 	hdev->tm_info.hw_pfc_map = 0;
1506 	hdev->wanted_umv_size = cfg.umv_space;
1507 
1508 	if (hnae3_dev_fd_supported(hdev)) {
1509 		hdev->fd_en = true;
1510 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1511 	}
1512 
1513 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1514 	if (ret) {
1515 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1516 			cfg.default_speed, ret);
1517 		return ret;
1518 	}
1519 
1520 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1521 
1522 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1523 
1524 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1525 	    (hdev->tc_max < 1)) {
1526 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1527 			 hdev->tc_max);
1528 		hdev->tc_max = 1;
1529 	}
1530 
1531 	/* Dev does not support DCB */
1532 	if (!hnae3_dev_dcb_supported(hdev)) {
1533 		hdev->tc_max = 1;
1534 		hdev->pfc_max = 0;
1535 	} else {
1536 		hdev->pfc_max = hdev->tc_max;
1537 	}
1538 
1539 	hdev->tm_info.num_tc = 1;
1540 
1541 	/* Currently not support uncontiuous tc */
1542 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1543 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1544 
1545 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1546 
1547 	hclge_init_kdump_kernel_config(hdev);
1548 
1549 	/* Set the init affinity based on pci func number */
1550 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1551 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1552 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1553 			&hdev->affinity_mask);
1554 
1555 	return ret;
1556 }
1557 
1558 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1559 			    u16 tso_mss_max)
1560 {
1561 	struct hclge_cfg_tso_status_cmd *req;
1562 	struct hclge_desc desc;
1563 
1564 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1565 
1566 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1567 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1568 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1569 
1570 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1571 }
1572 
1573 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1574 {
1575 	struct hclge_cfg_gro_status_cmd *req;
1576 	struct hclge_desc desc;
1577 	int ret;
1578 
1579 	if (!hnae3_dev_gro_supported(hdev))
1580 		return 0;
1581 
1582 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1583 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1584 
1585 	req->gro_en = en ? 1 : 0;
1586 
1587 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1588 	if (ret)
1589 		dev_err(&hdev->pdev->dev,
1590 			"GRO hardware config cmd failed, ret = %d\n", ret);
1591 
1592 	return ret;
1593 }
1594 
1595 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1596 {
1597 	struct hclge_tqp *tqp;
1598 	int i;
1599 
1600 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1601 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1602 	if (!hdev->htqp)
1603 		return -ENOMEM;
1604 
1605 	tqp = hdev->htqp;
1606 
1607 	for (i = 0; i < hdev->num_tqps; i++) {
1608 		tqp->dev = &hdev->pdev->dev;
1609 		tqp->index = i;
1610 
1611 		tqp->q.ae_algo = &ae_algo;
1612 		tqp->q.buf_size = hdev->rx_buf_len;
1613 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1614 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1615 
1616 		/* need an extended offset to configure queues >=
1617 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1618 		 */
1619 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1620 			tqp->q.io_base = hdev->hw.io_base +
1621 					 HCLGE_TQP_REG_OFFSET +
1622 					 i * HCLGE_TQP_REG_SIZE;
1623 		else
1624 			tqp->q.io_base = hdev->hw.io_base +
1625 					 HCLGE_TQP_REG_OFFSET +
1626 					 HCLGE_TQP_EXT_REG_OFFSET +
1627 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1628 					 HCLGE_TQP_REG_SIZE;
1629 
1630 		tqp++;
1631 	}
1632 
1633 	return 0;
1634 }
1635 
1636 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1637 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1638 {
1639 	struct hclge_tqp_map_cmd *req;
1640 	struct hclge_desc desc;
1641 	int ret;
1642 
1643 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1644 
1645 	req = (struct hclge_tqp_map_cmd *)desc.data;
1646 	req->tqp_id = cpu_to_le16(tqp_pid);
1647 	req->tqp_vf = func_id;
1648 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1649 	if (!is_pf)
1650 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1651 	req->tqp_vid = cpu_to_le16(tqp_vid);
1652 
1653 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1654 	if (ret)
1655 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1656 
1657 	return ret;
1658 }
1659 
1660 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1661 {
1662 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1663 	struct hclge_dev *hdev = vport->back;
1664 	int i, alloced;
1665 
1666 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1667 	     alloced < num_tqps; i++) {
1668 		if (!hdev->htqp[i].alloced) {
1669 			hdev->htqp[i].q.handle = &vport->nic;
1670 			hdev->htqp[i].q.tqp_index = alloced;
1671 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1672 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1673 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1674 			hdev->htqp[i].alloced = true;
1675 			alloced++;
1676 		}
1677 	}
1678 	vport->alloc_tqps = alloced;
1679 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1680 				vport->alloc_tqps / hdev->tm_info.num_tc);
1681 
1682 	/* ensure one to one mapping between irq and queue at default */
1683 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1684 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1685 
1686 	return 0;
1687 }
1688 
1689 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1690 			    u16 num_tx_desc, u16 num_rx_desc)
1691 
1692 {
1693 	struct hnae3_handle *nic = &vport->nic;
1694 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1695 	struct hclge_dev *hdev = vport->back;
1696 	int ret;
1697 
1698 	kinfo->num_tx_desc = num_tx_desc;
1699 	kinfo->num_rx_desc = num_rx_desc;
1700 
1701 	kinfo->rx_buf_len = hdev->rx_buf_len;
1702 
1703 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1704 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1705 	if (!kinfo->tqp)
1706 		return -ENOMEM;
1707 
1708 	ret = hclge_assign_tqp(vport, num_tqps);
1709 	if (ret)
1710 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1711 
1712 	return ret;
1713 }
1714 
1715 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1716 				  struct hclge_vport *vport)
1717 {
1718 	struct hnae3_handle *nic = &vport->nic;
1719 	struct hnae3_knic_private_info *kinfo;
1720 	u16 i;
1721 
1722 	kinfo = &nic->kinfo;
1723 	for (i = 0; i < vport->alloc_tqps; i++) {
1724 		struct hclge_tqp *q =
1725 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1726 		bool is_pf;
1727 		int ret;
1728 
1729 		is_pf = !(vport->vport_id);
1730 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1731 					     i, is_pf);
1732 		if (ret)
1733 			return ret;
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 static int hclge_map_tqp(struct hclge_dev *hdev)
1740 {
1741 	struct hclge_vport *vport = hdev->vport;
1742 	u16 i, num_vport;
1743 
1744 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1745 	for (i = 0; i < num_vport; i++)	{
1746 		int ret;
1747 
1748 		ret = hclge_map_tqp_to_vport(hdev, vport);
1749 		if (ret)
1750 			return ret;
1751 
1752 		vport++;
1753 	}
1754 
1755 	return 0;
1756 }
1757 
1758 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1759 {
1760 	struct hnae3_handle *nic = &vport->nic;
1761 	struct hclge_dev *hdev = vport->back;
1762 	int ret;
1763 
1764 	nic->pdev = hdev->pdev;
1765 	nic->ae_algo = &ae_algo;
1766 	nic->numa_node_mask = hdev->numa_node_mask;
1767 
1768 	ret = hclge_knic_setup(vport, num_tqps,
1769 			       hdev->num_tx_desc, hdev->num_rx_desc);
1770 	if (ret)
1771 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1772 
1773 	return ret;
1774 }
1775 
1776 static int hclge_alloc_vport(struct hclge_dev *hdev)
1777 {
1778 	struct pci_dev *pdev = hdev->pdev;
1779 	struct hclge_vport *vport;
1780 	u32 tqp_main_vport;
1781 	u32 tqp_per_vport;
1782 	int num_vport, i;
1783 	int ret;
1784 
1785 	/* We need to alloc a vport for main NIC of PF */
1786 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1787 
1788 	if (hdev->num_tqps < num_vport) {
1789 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1790 			hdev->num_tqps, num_vport);
1791 		return -EINVAL;
1792 	}
1793 
1794 	/* Alloc the same number of TQPs for every vport */
1795 	tqp_per_vport = hdev->num_tqps / num_vport;
1796 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1797 
1798 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1799 			     GFP_KERNEL);
1800 	if (!vport)
1801 		return -ENOMEM;
1802 
1803 	hdev->vport = vport;
1804 	hdev->num_alloc_vport = num_vport;
1805 
1806 	if (IS_ENABLED(CONFIG_PCI_IOV))
1807 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1808 
1809 	for (i = 0; i < num_vport; i++) {
1810 		vport->back = hdev;
1811 		vport->vport_id = i;
1812 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1813 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1814 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1815 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1816 		INIT_LIST_HEAD(&vport->vlan_list);
1817 		INIT_LIST_HEAD(&vport->uc_mac_list);
1818 		INIT_LIST_HEAD(&vport->mc_mac_list);
1819 		spin_lock_init(&vport->mac_list_lock);
1820 
1821 		if (i == 0)
1822 			ret = hclge_vport_setup(vport, tqp_main_vport);
1823 		else
1824 			ret = hclge_vport_setup(vport, tqp_per_vport);
1825 		if (ret) {
1826 			dev_err(&pdev->dev,
1827 				"vport setup failed for vport %d, %d\n",
1828 				i, ret);
1829 			return ret;
1830 		}
1831 
1832 		vport++;
1833 	}
1834 
1835 	return 0;
1836 }
1837 
1838 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1839 				    struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841 /* TX buffer size is unit by 128 byte */
1842 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1843 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1844 	struct hclge_tx_buff_alloc_cmd *req;
1845 	struct hclge_desc desc;
1846 	int ret;
1847 	u8 i;
1848 
1849 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1850 
1851 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1852 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1853 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1854 
1855 		req->tx_pkt_buff[i] =
1856 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1857 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1858 	}
1859 
1860 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1861 	if (ret)
1862 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1863 			ret);
1864 
1865 	return ret;
1866 }
1867 
1868 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1869 				 struct hclge_pkt_buf_alloc *buf_alloc)
1870 {
1871 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1872 
1873 	if (ret)
1874 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1875 
1876 	return ret;
1877 }
1878 
1879 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1880 {
1881 	unsigned int i;
1882 	u32 cnt = 0;
1883 
1884 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1885 		if (hdev->hw_tc_map & BIT(i))
1886 			cnt++;
1887 	return cnt;
1888 }
1889 
1890 /* Get the number of pfc enabled TCs, which have private buffer */
1891 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1892 				  struct hclge_pkt_buf_alloc *buf_alloc)
1893 {
1894 	struct hclge_priv_buf *priv;
1895 	unsigned int i;
1896 	int cnt = 0;
1897 
1898 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899 		priv = &buf_alloc->priv_buf[i];
1900 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1901 		    priv->enable)
1902 			cnt++;
1903 	}
1904 
1905 	return cnt;
1906 }
1907 
1908 /* Get the number of pfc disabled TCs, which have private buffer */
1909 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1910 				     struct hclge_pkt_buf_alloc *buf_alloc)
1911 {
1912 	struct hclge_priv_buf *priv;
1913 	unsigned int i;
1914 	int cnt = 0;
1915 
1916 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1917 		priv = &buf_alloc->priv_buf[i];
1918 		if (hdev->hw_tc_map & BIT(i) &&
1919 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1920 		    priv->enable)
1921 			cnt++;
1922 	}
1923 
1924 	return cnt;
1925 }
1926 
1927 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1928 {
1929 	struct hclge_priv_buf *priv;
1930 	u32 rx_priv = 0;
1931 	int i;
1932 
1933 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1934 		priv = &buf_alloc->priv_buf[i];
1935 		if (priv->enable)
1936 			rx_priv += priv->buf_size;
1937 	}
1938 	return rx_priv;
1939 }
1940 
1941 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1942 {
1943 	u32 i, total_tx_size = 0;
1944 
1945 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1946 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1947 
1948 	return total_tx_size;
1949 }
1950 
1951 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1952 				struct hclge_pkt_buf_alloc *buf_alloc,
1953 				u32 rx_all)
1954 {
1955 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1956 	u32 tc_num = hclge_get_tc_num(hdev);
1957 	u32 shared_buf, aligned_mps;
1958 	u32 rx_priv;
1959 	int i;
1960 
1961 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1962 
1963 	if (hnae3_dev_dcb_supported(hdev))
1964 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1965 					hdev->dv_buf_size;
1966 	else
1967 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1968 					+ hdev->dv_buf_size;
1969 
1970 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1971 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1972 			     HCLGE_BUF_SIZE_UNIT);
1973 
1974 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1975 	if (rx_all < rx_priv + shared_std)
1976 		return false;
1977 
1978 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1979 	buf_alloc->s_buf.buf_size = shared_buf;
1980 	if (hnae3_dev_dcb_supported(hdev)) {
1981 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1982 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1983 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1984 				  HCLGE_BUF_SIZE_UNIT);
1985 	} else {
1986 		buf_alloc->s_buf.self.high = aligned_mps +
1987 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1988 		buf_alloc->s_buf.self.low = aligned_mps;
1989 	}
1990 
1991 	if (hnae3_dev_dcb_supported(hdev)) {
1992 		hi_thrd = shared_buf - hdev->dv_buf_size;
1993 
1994 		if (tc_num <= NEED_RESERVE_TC_NUM)
1995 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1996 					/ BUF_MAX_PERCENT;
1997 
1998 		if (tc_num)
1999 			hi_thrd = hi_thrd / tc_num;
2000 
2001 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2002 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2003 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2004 	} else {
2005 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2006 		lo_thrd = aligned_mps;
2007 	}
2008 
2009 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2010 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2011 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2012 	}
2013 
2014 	return true;
2015 }
2016 
2017 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2018 				struct hclge_pkt_buf_alloc *buf_alloc)
2019 {
2020 	u32 i, total_size;
2021 
2022 	total_size = hdev->pkt_buf_size;
2023 
2024 	/* alloc tx buffer for all enabled tc */
2025 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2026 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2027 
2028 		if (hdev->hw_tc_map & BIT(i)) {
2029 			if (total_size < hdev->tx_buf_size)
2030 				return -ENOMEM;
2031 
2032 			priv->tx_buf_size = hdev->tx_buf_size;
2033 		} else {
2034 			priv->tx_buf_size = 0;
2035 		}
2036 
2037 		total_size -= priv->tx_buf_size;
2038 	}
2039 
2040 	return 0;
2041 }
2042 
2043 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2044 				  struct hclge_pkt_buf_alloc *buf_alloc)
2045 {
2046 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2047 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2048 	unsigned int i;
2049 
2050 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2051 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2052 
2053 		priv->enable = 0;
2054 		priv->wl.low = 0;
2055 		priv->wl.high = 0;
2056 		priv->buf_size = 0;
2057 
2058 		if (!(hdev->hw_tc_map & BIT(i)))
2059 			continue;
2060 
2061 		priv->enable = 1;
2062 
2063 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2064 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2065 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2066 						HCLGE_BUF_SIZE_UNIT);
2067 		} else {
2068 			priv->wl.low = 0;
2069 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2070 					aligned_mps;
2071 		}
2072 
2073 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2074 	}
2075 
2076 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2077 }
2078 
2079 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2080 					  struct hclge_pkt_buf_alloc *buf_alloc)
2081 {
2082 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2084 	int i;
2085 
2086 	/* let the last to be cleared first */
2087 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2088 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089 		unsigned int mask = BIT((unsigned int)i);
2090 
2091 		if (hdev->hw_tc_map & mask &&
2092 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2093 			/* Clear the no pfc TC private buffer */
2094 			priv->wl.low = 0;
2095 			priv->wl.high = 0;
2096 			priv->buf_size = 0;
2097 			priv->enable = 0;
2098 			no_pfc_priv_num--;
2099 		}
2100 
2101 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2102 		    no_pfc_priv_num == 0)
2103 			break;
2104 	}
2105 
2106 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108 
2109 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2110 					struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2114 	int i;
2115 
2116 	/* let the last to be cleared first */
2117 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119 		unsigned int mask = BIT((unsigned int)i);
2120 
2121 		if (hdev->hw_tc_map & mask &&
2122 		    hdev->tm_info.hw_pfc_map & mask) {
2123 			/* Reduce the number of pfc TC with private buffer */
2124 			priv->wl.low = 0;
2125 			priv->enable = 0;
2126 			priv->wl.high = 0;
2127 			priv->buf_size = 0;
2128 			pfc_priv_num--;
2129 		}
2130 
2131 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2132 		    pfc_priv_num == 0)
2133 			break;
2134 	}
2135 
2136 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138 
2139 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2140 				      struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142 #define COMPENSATE_BUFFER	0x3C00
2143 #define COMPENSATE_HALF_MPS_NUM	5
2144 #define PRIV_WL_GAP		0x1800
2145 
2146 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2147 	u32 tc_num = hclge_get_tc_num(hdev);
2148 	u32 half_mps = hdev->mps >> 1;
2149 	u32 min_rx_priv;
2150 	unsigned int i;
2151 
2152 	if (tc_num)
2153 		rx_priv = rx_priv / tc_num;
2154 
2155 	if (tc_num <= NEED_RESERVE_TC_NUM)
2156 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2157 
2158 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2159 			COMPENSATE_HALF_MPS_NUM * half_mps;
2160 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2161 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2162 
2163 	if (rx_priv < min_rx_priv)
2164 		return false;
2165 
2166 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2167 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2168 
2169 		priv->enable = 0;
2170 		priv->wl.low = 0;
2171 		priv->wl.high = 0;
2172 		priv->buf_size = 0;
2173 
2174 		if (!(hdev->hw_tc_map & BIT(i)))
2175 			continue;
2176 
2177 		priv->enable = 1;
2178 		priv->buf_size = rx_priv;
2179 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2180 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2181 	}
2182 
2183 	buf_alloc->s_buf.buf_size = 0;
2184 
2185 	return true;
2186 }
2187 
2188 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2189  * @hdev: pointer to struct hclge_dev
2190  * @buf_alloc: pointer to buffer calculation data
2191  * @return: 0: calculate sucessful, negative: fail
2192  */
2193 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2194 				struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196 	/* When DCB is not supported, rx private buffer is not allocated. */
2197 	if (!hnae3_dev_dcb_supported(hdev)) {
2198 		u32 rx_all = hdev->pkt_buf_size;
2199 
2200 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2201 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2202 			return -ENOMEM;
2203 
2204 		return 0;
2205 	}
2206 
2207 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2208 		return 0;
2209 
2210 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2211 		return 0;
2212 
2213 	/* try to decrease the buffer size */
2214 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2215 		return 0;
2216 
2217 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2218 		return 0;
2219 
2220 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2221 		return 0;
2222 
2223 	return -ENOMEM;
2224 }
2225 
2226 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2227 				   struct hclge_pkt_buf_alloc *buf_alloc)
2228 {
2229 	struct hclge_rx_priv_buff_cmd *req;
2230 	struct hclge_desc desc;
2231 	int ret;
2232 	int i;
2233 
2234 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2235 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2236 
2237 	/* Alloc private buffer TCs */
2238 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2239 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2240 
2241 		req->buf_num[i] =
2242 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2243 		req->buf_num[i] |=
2244 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2245 	}
2246 
2247 	req->shared_buf =
2248 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2249 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2250 
2251 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2252 	if (ret)
2253 		dev_err(&hdev->pdev->dev,
2254 			"rx private buffer alloc cmd failed %d\n", ret);
2255 
2256 	return ret;
2257 }
2258 
2259 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2260 				   struct hclge_pkt_buf_alloc *buf_alloc)
2261 {
2262 	struct hclge_rx_priv_wl_buf *req;
2263 	struct hclge_priv_buf *priv;
2264 	struct hclge_desc desc[2];
2265 	int i, j;
2266 	int ret;
2267 
2268 	for (i = 0; i < 2; i++) {
2269 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2270 					   false);
2271 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2272 
2273 		/* The first descriptor set the NEXT bit to 1 */
2274 		if (i == 0)
2275 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2276 		else
2277 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2278 
2279 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2280 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2281 
2282 			priv = &buf_alloc->priv_buf[idx];
2283 			req->tc_wl[j].high =
2284 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2285 			req->tc_wl[j].high |=
2286 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2287 			req->tc_wl[j].low =
2288 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2289 			req->tc_wl[j].low |=
2290 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2291 		}
2292 	}
2293 
2294 	/* Send 2 descriptor at one time */
2295 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2296 	if (ret)
2297 		dev_err(&hdev->pdev->dev,
2298 			"rx private waterline config cmd failed %d\n",
2299 			ret);
2300 	return ret;
2301 }
2302 
2303 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2304 				    struct hclge_pkt_buf_alloc *buf_alloc)
2305 {
2306 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2307 	struct hclge_rx_com_thrd *req;
2308 	struct hclge_desc desc[2];
2309 	struct hclge_tc_thrd *tc;
2310 	int i, j;
2311 	int ret;
2312 
2313 	for (i = 0; i < 2; i++) {
2314 		hclge_cmd_setup_basic_desc(&desc[i],
2315 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2316 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2317 
2318 		/* The first descriptor set the NEXT bit to 1 */
2319 		if (i == 0)
2320 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2321 		else
2322 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2323 
2324 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2325 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2326 
2327 			req->com_thrd[j].high =
2328 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2329 			req->com_thrd[j].high |=
2330 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331 			req->com_thrd[j].low =
2332 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2333 			req->com_thrd[j].low |=
2334 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2335 		}
2336 	}
2337 
2338 	/* Send 2 descriptors at one time */
2339 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2340 	if (ret)
2341 		dev_err(&hdev->pdev->dev,
2342 			"common threshold config cmd failed %d\n", ret);
2343 	return ret;
2344 }
2345 
2346 static int hclge_common_wl_config(struct hclge_dev *hdev,
2347 				  struct hclge_pkt_buf_alloc *buf_alloc)
2348 {
2349 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2350 	struct hclge_rx_com_wl *req;
2351 	struct hclge_desc desc;
2352 	int ret;
2353 
2354 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2355 
2356 	req = (struct hclge_rx_com_wl *)desc.data;
2357 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2358 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2359 
2360 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2361 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2362 
2363 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2364 	if (ret)
2365 		dev_err(&hdev->pdev->dev,
2366 			"common waterline config cmd failed %d\n", ret);
2367 
2368 	return ret;
2369 }
2370 
2371 int hclge_buffer_alloc(struct hclge_dev *hdev)
2372 {
2373 	struct hclge_pkt_buf_alloc *pkt_buf;
2374 	int ret;
2375 
2376 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2377 	if (!pkt_buf)
2378 		return -ENOMEM;
2379 
2380 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2381 	if (ret) {
2382 		dev_err(&hdev->pdev->dev,
2383 			"could not calc tx buffer size for all TCs %d\n", ret);
2384 		goto out;
2385 	}
2386 
2387 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2388 	if (ret) {
2389 		dev_err(&hdev->pdev->dev,
2390 			"could not alloc tx buffers %d\n", ret);
2391 		goto out;
2392 	}
2393 
2394 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2395 	if (ret) {
2396 		dev_err(&hdev->pdev->dev,
2397 			"could not calc rx priv buffer size for all TCs %d\n",
2398 			ret);
2399 		goto out;
2400 	}
2401 
2402 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2403 	if (ret) {
2404 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2405 			ret);
2406 		goto out;
2407 	}
2408 
2409 	if (hnae3_dev_dcb_supported(hdev)) {
2410 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2411 		if (ret) {
2412 			dev_err(&hdev->pdev->dev,
2413 				"could not configure rx private waterline %d\n",
2414 				ret);
2415 			goto out;
2416 		}
2417 
2418 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2419 		if (ret) {
2420 			dev_err(&hdev->pdev->dev,
2421 				"could not configure common threshold %d\n",
2422 				ret);
2423 			goto out;
2424 		}
2425 	}
2426 
2427 	ret = hclge_common_wl_config(hdev, pkt_buf);
2428 	if (ret)
2429 		dev_err(&hdev->pdev->dev,
2430 			"could not configure common waterline %d\n", ret);
2431 
2432 out:
2433 	kfree(pkt_buf);
2434 	return ret;
2435 }
2436 
2437 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2438 {
2439 	struct hnae3_handle *roce = &vport->roce;
2440 	struct hnae3_handle *nic = &vport->nic;
2441 	struct hclge_dev *hdev = vport->back;
2442 
2443 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2444 
2445 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2446 		return -EINVAL;
2447 
2448 	roce->rinfo.base_vector = hdev->roce_base_vector;
2449 
2450 	roce->rinfo.netdev = nic->kinfo.netdev;
2451 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2452 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2453 
2454 	roce->pdev = nic->pdev;
2455 	roce->ae_algo = nic->ae_algo;
2456 	roce->numa_node_mask = nic->numa_node_mask;
2457 
2458 	return 0;
2459 }
2460 
2461 static int hclge_init_msi(struct hclge_dev *hdev)
2462 {
2463 	struct pci_dev *pdev = hdev->pdev;
2464 	int vectors;
2465 	int i;
2466 
2467 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2468 					hdev->num_msi,
2469 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2470 	if (vectors < 0) {
2471 		dev_err(&pdev->dev,
2472 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2473 			vectors);
2474 		return vectors;
2475 	}
2476 	if (vectors < hdev->num_msi)
2477 		dev_warn(&hdev->pdev->dev,
2478 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2479 			 hdev->num_msi, vectors);
2480 
2481 	hdev->num_msi = vectors;
2482 	hdev->num_msi_left = vectors;
2483 
2484 	hdev->base_msi_vector = pdev->irq;
2485 	hdev->roce_base_vector = hdev->base_msi_vector +
2486 				hdev->num_nic_msi;
2487 
2488 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2489 					   sizeof(u16), GFP_KERNEL);
2490 	if (!hdev->vector_status) {
2491 		pci_free_irq_vectors(pdev);
2492 		return -ENOMEM;
2493 	}
2494 
2495 	for (i = 0; i < hdev->num_msi; i++)
2496 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2497 
2498 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2499 					sizeof(int), GFP_KERNEL);
2500 	if (!hdev->vector_irq) {
2501 		pci_free_irq_vectors(pdev);
2502 		return -ENOMEM;
2503 	}
2504 
2505 	return 0;
2506 }
2507 
2508 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2509 {
2510 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2511 		duplex = HCLGE_MAC_FULL;
2512 
2513 	return duplex;
2514 }
2515 
2516 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2517 				      u8 duplex)
2518 {
2519 	struct hclge_config_mac_speed_dup_cmd *req;
2520 	struct hclge_desc desc;
2521 	int ret;
2522 
2523 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2524 
2525 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2526 
2527 	if (duplex)
2528 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2529 
2530 	switch (speed) {
2531 	case HCLGE_MAC_SPEED_10M:
2532 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2533 				HCLGE_CFG_SPEED_S, 6);
2534 		break;
2535 	case HCLGE_MAC_SPEED_100M:
2536 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2537 				HCLGE_CFG_SPEED_S, 7);
2538 		break;
2539 	case HCLGE_MAC_SPEED_1G:
2540 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541 				HCLGE_CFG_SPEED_S, 0);
2542 		break;
2543 	case HCLGE_MAC_SPEED_10G:
2544 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545 				HCLGE_CFG_SPEED_S, 1);
2546 		break;
2547 	case HCLGE_MAC_SPEED_25G:
2548 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549 				HCLGE_CFG_SPEED_S, 2);
2550 		break;
2551 	case HCLGE_MAC_SPEED_40G:
2552 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553 				HCLGE_CFG_SPEED_S, 3);
2554 		break;
2555 	case HCLGE_MAC_SPEED_50G:
2556 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557 				HCLGE_CFG_SPEED_S, 4);
2558 		break;
2559 	case HCLGE_MAC_SPEED_100G:
2560 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2561 				HCLGE_CFG_SPEED_S, 5);
2562 		break;
2563 	case HCLGE_MAC_SPEED_200G:
2564 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2565 				HCLGE_CFG_SPEED_S, 8);
2566 		break;
2567 	default:
2568 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2569 		return -EINVAL;
2570 	}
2571 
2572 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2573 		      1);
2574 
2575 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576 	if (ret) {
2577 		dev_err(&hdev->pdev->dev,
2578 			"mac speed/duplex config cmd failed %d.\n", ret);
2579 		return ret;
2580 	}
2581 
2582 	return 0;
2583 }
2584 
2585 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2586 {
2587 	struct hclge_mac *mac = &hdev->hw.mac;
2588 	int ret;
2589 
2590 	duplex = hclge_check_speed_dup(duplex, speed);
2591 	if (!mac->support_autoneg && mac->speed == speed &&
2592 	    mac->duplex == duplex)
2593 		return 0;
2594 
2595 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2596 	if (ret)
2597 		return ret;
2598 
2599 	hdev->hw.mac.speed = speed;
2600 	hdev->hw.mac.duplex = duplex;
2601 
2602 	return 0;
2603 }
2604 
2605 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2606 				     u8 duplex)
2607 {
2608 	struct hclge_vport *vport = hclge_get_vport(handle);
2609 	struct hclge_dev *hdev = vport->back;
2610 
2611 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2612 }
2613 
2614 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2615 {
2616 	struct hclge_config_auto_neg_cmd *req;
2617 	struct hclge_desc desc;
2618 	u32 flag = 0;
2619 	int ret;
2620 
2621 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2622 
2623 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2624 	if (enable)
2625 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2626 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2627 
2628 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2629 	if (ret)
2630 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2631 			ret);
2632 
2633 	return ret;
2634 }
2635 
2636 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2637 {
2638 	struct hclge_vport *vport = hclge_get_vport(handle);
2639 	struct hclge_dev *hdev = vport->back;
2640 
2641 	if (!hdev->hw.mac.support_autoneg) {
2642 		if (enable) {
2643 			dev_err(&hdev->pdev->dev,
2644 				"autoneg is not supported by current port\n");
2645 			return -EOPNOTSUPP;
2646 		} else {
2647 			return 0;
2648 		}
2649 	}
2650 
2651 	return hclge_set_autoneg_en(hdev, enable);
2652 }
2653 
2654 static int hclge_get_autoneg(struct hnae3_handle *handle)
2655 {
2656 	struct hclge_vport *vport = hclge_get_vport(handle);
2657 	struct hclge_dev *hdev = vport->back;
2658 	struct phy_device *phydev = hdev->hw.mac.phydev;
2659 
2660 	if (phydev)
2661 		return phydev->autoneg;
2662 
2663 	return hdev->hw.mac.autoneg;
2664 }
2665 
2666 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2667 {
2668 	struct hclge_vport *vport = hclge_get_vport(handle);
2669 	struct hclge_dev *hdev = vport->back;
2670 	int ret;
2671 
2672 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2673 
2674 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2675 	if (ret)
2676 		return ret;
2677 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2678 }
2679 
2680 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2681 {
2682 	struct hclge_vport *vport = hclge_get_vport(handle);
2683 	struct hclge_dev *hdev = vport->back;
2684 
2685 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2686 		return hclge_set_autoneg_en(hdev, !halt);
2687 
2688 	return 0;
2689 }
2690 
2691 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2692 {
2693 	struct hclge_config_fec_cmd *req;
2694 	struct hclge_desc desc;
2695 	int ret;
2696 
2697 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2698 
2699 	req = (struct hclge_config_fec_cmd *)desc.data;
2700 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2701 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2702 	if (fec_mode & BIT(HNAE3_FEC_RS))
2703 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2704 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2705 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2706 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2707 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2708 
2709 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2710 	if (ret)
2711 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2712 
2713 	return ret;
2714 }
2715 
2716 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2717 {
2718 	struct hclge_vport *vport = hclge_get_vport(handle);
2719 	struct hclge_dev *hdev = vport->back;
2720 	struct hclge_mac *mac = &hdev->hw.mac;
2721 	int ret;
2722 
2723 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2724 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2725 		return -EINVAL;
2726 	}
2727 
2728 	ret = hclge_set_fec_hw(hdev, fec_mode);
2729 	if (ret)
2730 		return ret;
2731 
2732 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2733 	return 0;
2734 }
2735 
2736 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2737 			  u8 *fec_mode)
2738 {
2739 	struct hclge_vport *vport = hclge_get_vport(handle);
2740 	struct hclge_dev *hdev = vport->back;
2741 	struct hclge_mac *mac = &hdev->hw.mac;
2742 
2743 	if (fec_ability)
2744 		*fec_ability = mac->fec_ability;
2745 	if (fec_mode)
2746 		*fec_mode = mac->fec_mode;
2747 }
2748 
2749 static int hclge_mac_init(struct hclge_dev *hdev)
2750 {
2751 	struct hclge_mac *mac = &hdev->hw.mac;
2752 	int ret;
2753 
2754 	hdev->support_sfp_query = true;
2755 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2756 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2757 					 hdev->hw.mac.duplex);
2758 	if (ret)
2759 		return ret;
2760 
2761 	if (hdev->hw.mac.support_autoneg) {
2762 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2763 		if (ret)
2764 			return ret;
2765 	}
2766 
2767 	mac->link = 0;
2768 
2769 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2770 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2771 		if (ret)
2772 			return ret;
2773 	}
2774 
2775 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2776 	if (ret) {
2777 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2778 		return ret;
2779 	}
2780 
2781 	ret = hclge_set_default_loopback(hdev);
2782 	if (ret)
2783 		return ret;
2784 
2785 	ret = hclge_buffer_alloc(hdev);
2786 	if (ret)
2787 		dev_err(&hdev->pdev->dev,
2788 			"allocate buffer fail, ret=%d\n", ret);
2789 
2790 	return ret;
2791 }
2792 
2793 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2794 {
2795 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2796 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2797 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2798 				    hclge_wq, &hdev->service_task, 0);
2799 }
2800 
2801 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2802 {
2803 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2805 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2806 				    hclge_wq, &hdev->service_task, 0);
2807 }
2808 
2809 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2810 {
2811 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2812 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2813 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2814 				    hclge_wq, &hdev->service_task,
2815 				    delay_time);
2816 }
2817 
2818 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2819 {
2820 	struct hclge_link_status_cmd *req;
2821 	struct hclge_desc desc;
2822 	int ret;
2823 
2824 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2825 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2826 	if (ret) {
2827 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2828 			ret);
2829 		return ret;
2830 	}
2831 
2832 	req = (struct hclge_link_status_cmd *)desc.data;
2833 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2834 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2835 
2836 	return 0;
2837 }
2838 
2839 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2840 {
2841 	struct phy_device *phydev = hdev->hw.mac.phydev;
2842 
2843 	*link_status = HCLGE_LINK_STATUS_DOWN;
2844 
2845 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2846 		return 0;
2847 
2848 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2849 		return 0;
2850 
2851 	return hclge_get_mac_link_status(hdev, link_status);
2852 }
2853 
2854 static void hclge_update_link_status(struct hclge_dev *hdev)
2855 {
2856 	struct hnae3_client *rclient = hdev->roce_client;
2857 	struct hnae3_client *client = hdev->nic_client;
2858 	struct hnae3_handle *rhandle;
2859 	struct hnae3_handle *handle;
2860 	int state;
2861 	int ret;
2862 	int i;
2863 
2864 	if (!client)
2865 		return;
2866 
2867 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2868 		return;
2869 
2870 	ret = hclge_get_mac_phy_link(hdev, &state);
2871 	if (ret) {
2872 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2873 		return;
2874 	}
2875 
2876 	if (state != hdev->hw.mac.link) {
2877 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2878 			handle = &hdev->vport[i].nic;
2879 			client->ops->link_status_change(handle, state);
2880 			hclge_config_mac_tnl_int(hdev, state);
2881 			rhandle = &hdev->vport[i].roce;
2882 			if (rclient && rclient->ops->link_status_change)
2883 				rclient->ops->link_status_change(rhandle,
2884 								 state);
2885 		}
2886 		hdev->hw.mac.link = state;
2887 	}
2888 
2889 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2890 }
2891 
2892 static void hclge_update_port_capability(struct hclge_mac *mac)
2893 {
2894 	/* update fec ability by speed */
2895 	hclge_convert_setting_fec(mac);
2896 
2897 	/* firmware can not identify back plane type, the media type
2898 	 * read from configuration can help deal it
2899 	 */
2900 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2901 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2902 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2903 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2904 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2905 
2906 	if (mac->support_autoneg) {
2907 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2908 		linkmode_copy(mac->advertising, mac->supported);
2909 	} else {
2910 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2911 				   mac->supported);
2912 		linkmode_zero(mac->advertising);
2913 	}
2914 }
2915 
2916 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2917 {
2918 	struct hclge_sfp_info_cmd *resp;
2919 	struct hclge_desc desc;
2920 	int ret;
2921 
2922 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2923 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2924 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2925 	if (ret == -EOPNOTSUPP) {
2926 		dev_warn(&hdev->pdev->dev,
2927 			 "IMP do not support get SFP speed %d\n", ret);
2928 		return ret;
2929 	} else if (ret) {
2930 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2931 		return ret;
2932 	}
2933 
2934 	*speed = le32_to_cpu(resp->speed);
2935 
2936 	return 0;
2937 }
2938 
2939 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2940 {
2941 	struct hclge_sfp_info_cmd *resp;
2942 	struct hclge_desc desc;
2943 	int ret;
2944 
2945 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2946 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2947 
2948 	resp->query_type = QUERY_ACTIVE_SPEED;
2949 
2950 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2951 	if (ret == -EOPNOTSUPP) {
2952 		dev_warn(&hdev->pdev->dev,
2953 			 "IMP does not support get SFP info %d\n", ret);
2954 		return ret;
2955 	} else if (ret) {
2956 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2957 		return ret;
2958 	}
2959 
2960 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2961 	 * set to mac->speed.
2962 	 */
2963 	if (!le32_to_cpu(resp->speed))
2964 		return 0;
2965 
2966 	mac->speed = le32_to_cpu(resp->speed);
2967 	/* if resp->speed_ability is 0, it means it's an old version
2968 	 * firmware, do not update these params
2969 	 */
2970 	if (resp->speed_ability) {
2971 		mac->module_type = le32_to_cpu(resp->module_type);
2972 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2973 		mac->autoneg = resp->autoneg;
2974 		mac->support_autoneg = resp->autoneg_ability;
2975 		mac->speed_type = QUERY_ACTIVE_SPEED;
2976 		if (!resp->active_fec)
2977 			mac->fec_mode = 0;
2978 		else
2979 			mac->fec_mode = BIT(resp->active_fec);
2980 	} else {
2981 		mac->speed_type = QUERY_SFP_SPEED;
2982 	}
2983 
2984 	return 0;
2985 }
2986 
2987 static int hclge_update_port_info(struct hclge_dev *hdev)
2988 {
2989 	struct hclge_mac *mac = &hdev->hw.mac;
2990 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2991 	int ret;
2992 
2993 	/* get the port info from SFP cmd if not copper port */
2994 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2995 		return 0;
2996 
2997 	/* if IMP does not support get SFP/qSFP info, return directly */
2998 	if (!hdev->support_sfp_query)
2999 		return 0;
3000 
3001 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3002 		ret = hclge_get_sfp_info(hdev, mac);
3003 	else
3004 		ret = hclge_get_sfp_speed(hdev, &speed);
3005 
3006 	if (ret == -EOPNOTSUPP) {
3007 		hdev->support_sfp_query = false;
3008 		return ret;
3009 	} else if (ret) {
3010 		return ret;
3011 	}
3012 
3013 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3014 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3015 			hclge_update_port_capability(mac);
3016 			return 0;
3017 		}
3018 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3019 					       HCLGE_MAC_FULL);
3020 	} else {
3021 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3022 			return 0; /* do nothing if no SFP */
3023 
3024 		/* must config full duplex for SFP */
3025 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3026 	}
3027 }
3028 
3029 static int hclge_get_status(struct hnae3_handle *handle)
3030 {
3031 	struct hclge_vport *vport = hclge_get_vport(handle);
3032 	struct hclge_dev *hdev = vport->back;
3033 
3034 	hclge_update_link_status(hdev);
3035 
3036 	return hdev->hw.mac.link;
3037 }
3038 
3039 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3040 {
3041 	if (!pci_num_vf(hdev->pdev)) {
3042 		dev_err(&hdev->pdev->dev,
3043 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3044 		return NULL;
3045 	}
3046 
3047 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3048 		dev_err(&hdev->pdev->dev,
3049 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3050 			vf, pci_num_vf(hdev->pdev));
3051 		return NULL;
3052 	}
3053 
3054 	/* VF start from 1 in vport */
3055 	vf += HCLGE_VF_VPORT_START_NUM;
3056 	return &hdev->vport[vf];
3057 }
3058 
3059 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3060 			       struct ifla_vf_info *ivf)
3061 {
3062 	struct hclge_vport *vport = hclge_get_vport(handle);
3063 	struct hclge_dev *hdev = vport->back;
3064 
3065 	vport = hclge_get_vf_vport(hdev, vf);
3066 	if (!vport)
3067 		return -EINVAL;
3068 
3069 	ivf->vf = vf;
3070 	ivf->linkstate = vport->vf_info.link_state;
3071 	ivf->spoofchk = vport->vf_info.spoofchk;
3072 	ivf->trusted = vport->vf_info.trusted;
3073 	ivf->min_tx_rate = 0;
3074 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3075 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3076 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3077 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3078 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3079 
3080 	return 0;
3081 }
3082 
3083 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3084 				   int link_state)
3085 {
3086 	struct hclge_vport *vport = hclge_get_vport(handle);
3087 	struct hclge_dev *hdev = vport->back;
3088 
3089 	vport = hclge_get_vf_vport(hdev, vf);
3090 	if (!vport)
3091 		return -EINVAL;
3092 
3093 	vport->vf_info.link_state = link_state;
3094 
3095 	return 0;
3096 }
3097 
3098 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3099 {
3100 	u32 cmdq_src_reg, msix_src_reg;
3101 
3102 	/* fetch the events from their corresponding regs */
3103 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3104 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3105 
3106 	/* Assumption: If by any chance reset and mailbox events are reported
3107 	 * together then we will only process reset event in this go and will
3108 	 * defer the processing of the mailbox events. Since, we would have not
3109 	 * cleared RX CMDQ event this time we would receive again another
3110 	 * interrupt from H/W just for the mailbox.
3111 	 *
3112 	 * check for vector0 reset event sources
3113 	 */
3114 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3115 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3116 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3117 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3118 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3119 		hdev->rst_stats.imp_rst_cnt++;
3120 		return HCLGE_VECTOR0_EVENT_RST;
3121 	}
3122 
3123 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3124 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3125 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3126 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3127 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3128 		hdev->rst_stats.global_rst_cnt++;
3129 		return HCLGE_VECTOR0_EVENT_RST;
3130 	}
3131 
3132 	/* check for vector0 msix event source */
3133 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3134 		*clearval = msix_src_reg;
3135 		return HCLGE_VECTOR0_EVENT_ERR;
3136 	}
3137 
3138 	/* check for vector0 mailbox(=CMDQ RX) event source */
3139 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3140 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3141 		*clearval = cmdq_src_reg;
3142 		return HCLGE_VECTOR0_EVENT_MBX;
3143 	}
3144 
3145 	/* print other vector0 event source */
3146 	dev_info(&hdev->pdev->dev,
3147 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3148 		 cmdq_src_reg, msix_src_reg);
3149 	*clearval = msix_src_reg;
3150 
3151 	return HCLGE_VECTOR0_EVENT_OTHER;
3152 }
3153 
3154 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3155 				    u32 regclr)
3156 {
3157 	switch (event_type) {
3158 	case HCLGE_VECTOR0_EVENT_RST:
3159 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3160 		break;
3161 	case HCLGE_VECTOR0_EVENT_MBX:
3162 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3163 		break;
3164 	default:
3165 		break;
3166 	}
3167 }
3168 
3169 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3170 {
3171 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3172 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3173 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3174 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3175 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3176 }
3177 
3178 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3179 {
3180 	writel(enable ? 1 : 0, vector->addr);
3181 }
3182 
3183 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3184 {
3185 	struct hclge_dev *hdev = data;
3186 	u32 clearval = 0;
3187 	u32 event_cause;
3188 
3189 	hclge_enable_vector(&hdev->misc_vector, false);
3190 	event_cause = hclge_check_event_cause(hdev, &clearval);
3191 
3192 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3193 	switch (event_cause) {
3194 	case HCLGE_VECTOR0_EVENT_ERR:
3195 		/* we do not know what type of reset is required now. This could
3196 		 * only be decided after we fetch the type of errors which
3197 		 * caused this event. Therefore, we will do below for now:
3198 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3199 		 *    have defered type of reset to be used.
3200 		 * 2. Schedule the reset serivce task.
3201 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3202 		 *    will fetch the correct type of reset.  This would be done
3203 		 *    by first decoding the types of errors.
3204 		 */
3205 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3206 		fallthrough;
3207 	case HCLGE_VECTOR0_EVENT_RST:
3208 		hclge_reset_task_schedule(hdev);
3209 		break;
3210 	case HCLGE_VECTOR0_EVENT_MBX:
3211 		/* If we are here then,
3212 		 * 1. Either we are not handling any mbx task and we are not
3213 		 *    scheduled as well
3214 		 *                        OR
3215 		 * 2. We could be handling a mbx task but nothing more is
3216 		 *    scheduled.
3217 		 * In both cases, we should schedule mbx task as there are more
3218 		 * mbx messages reported by this interrupt.
3219 		 */
3220 		hclge_mbx_task_schedule(hdev);
3221 		break;
3222 	default:
3223 		dev_warn(&hdev->pdev->dev,
3224 			 "received unknown or unhandled event of vector0\n");
3225 		break;
3226 	}
3227 
3228 	hclge_clear_event_cause(hdev, event_cause, clearval);
3229 
3230 	/* Enable interrupt if it is not cause by reset. And when
3231 	 * clearval equal to 0, it means interrupt status may be
3232 	 * cleared by hardware before driver reads status register.
3233 	 * For this case, vector0 interrupt also should be enabled.
3234 	 */
3235 	if (!clearval ||
3236 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3237 		hclge_enable_vector(&hdev->misc_vector, true);
3238 	}
3239 
3240 	return IRQ_HANDLED;
3241 }
3242 
3243 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3244 {
3245 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3246 		dev_warn(&hdev->pdev->dev,
3247 			 "vector(vector_id %d) has been freed.\n", vector_id);
3248 		return;
3249 	}
3250 
3251 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3252 	hdev->num_msi_left += 1;
3253 	hdev->num_msi_used -= 1;
3254 }
3255 
3256 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3257 {
3258 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3259 
3260 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3261 
3262 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3263 	hdev->vector_status[0] = 0;
3264 
3265 	hdev->num_msi_left -= 1;
3266 	hdev->num_msi_used += 1;
3267 }
3268 
3269 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3270 				      const cpumask_t *mask)
3271 {
3272 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3273 					      affinity_notify);
3274 
3275 	cpumask_copy(&hdev->affinity_mask, mask);
3276 }
3277 
3278 static void hclge_irq_affinity_release(struct kref *ref)
3279 {
3280 }
3281 
3282 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3283 {
3284 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3285 			      &hdev->affinity_mask);
3286 
3287 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3288 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3289 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3290 				  &hdev->affinity_notify);
3291 }
3292 
3293 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3294 {
3295 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3296 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3297 }
3298 
3299 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3300 {
3301 	int ret;
3302 
3303 	hclge_get_misc_vector(hdev);
3304 
3305 	/* this would be explicitly freed in the end */
3306 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3307 		 HCLGE_NAME, pci_name(hdev->pdev));
3308 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3309 			  0, hdev->misc_vector.name, hdev);
3310 	if (ret) {
3311 		hclge_free_vector(hdev, 0);
3312 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3313 			hdev->misc_vector.vector_irq);
3314 	}
3315 
3316 	return ret;
3317 }
3318 
3319 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3320 {
3321 	free_irq(hdev->misc_vector.vector_irq, hdev);
3322 	hclge_free_vector(hdev, 0);
3323 }
3324 
3325 int hclge_notify_client(struct hclge_dev *hdev,
3326 			enum hnae3_reset_notify_type type)
3327 {
3328 	struct hnae3_client *client = hdev->nic_client;
3329 	u16 i;
3330 
3331 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3332 		return 0;
3333 
3334 	if (!client->ops->reset_notify)
3335 		return -EOPNOTSUPP;
3336 
3337 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3338 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3339 		int ret;
3340 
3341 		ret = client->ops->reset_notify(handle, type);
3342 		if (ret) {
3343 			dev_err(&hdev->pdev->dev,
3344 				"notify nic client failed %d(%d)\n", type, ret);
3345 			return ret;
3346 		}
3347 	}
3348 
3349 	return 0;
3350 }
3351 
3352 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3353 				    enum hnae3_reset_notify_type type)
3354 {
3355 	struct hnae3_client *client = hdev->roce_client;
3356 	int ret;
3357 	u16 i;
3358 
3359 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3360 		return 0;
3361 
3362 	if (!client->ops->reset_notify)
3363 		return -EOPNOTSUPP;
3364 
3365 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3366 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3367 
3368 		ret = client->ops->reset_notify(handle, type);
3369 		if (ret) {
3370 			dev_err(&hdev->pdev->dev,
3371 				"notify roce client failed %d(%d)",
3372 				type, ret);
3373 			return ret;
3374 		}
3375 	}
3376 
3377 	return ret;
3378 }
3379 
3380 static int hclge_reset_wait(struct hclge_dev *hdev)
3381 {
3382 #define HCLGE_RESET_WATI_MS	100
3383 #define HCLGE_RESET_WAIT_CNT	350
3384 
3385 	u32 val, reg, reg_bit;
3386 	u32 cnt = 0;
3387 
3388 	switch (hdev->reset_type) {
3389 	case HNAE3_IMP_RESET:
3390 		reg = HCLGE_GLOBAL_RESET_REG;
3391 		reg_bit = HCLGE_IMP_RESET_BIT;
3392 		break;
3393 	case HNAE3_GLOBAL_RESET:
3394 		reg = HCLGE_GLOBAL_RESET_REG;
3395 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3396 		break;
3397 	case HNAE3_FUNC_RESET:
3398 		reg = HCLGE_FUN_RST_ING;
3399 		reg_bit = HCLGE_FUN_RST_ING_B;
3400 		break;
3401 	default:
3402 		dev_err(&hdev->pdev->dev,
3403 			"Wait for unsupported reset type: %d\n",
3404 			hdev->reset_type);
3405 		return -EINVAL;
3406 	}
3407 
3408 	val = hclge_read_dev(&hdev->hw, reg);
3409 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3410 		msleep(HCLGE_RESET_WATI_MS);
3411 		val = hclge_read_dev(&hdev->hw, reg);
3412 		cnt++;
3413 	}
3414 
3415 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3416 		dev_warn(&hdev->pdev->dev,
3417 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3418 		return -EBUSY;
3419 	}
3420 
3421 	return 0;
3422 }
3423 
3424 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3425 {
3426 	struct hclge_vf_rst_cmd *req;
3427 	struct hclge_desc desc;
3428 
3429 	req = (struct hclge_vf_rst_cmd *)desc.data;
3430 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3431 	req->dest_vfid = func_id;
3432 
3433 	if (reset)
3434 		req->vf_rst = 0x1;
3435 
3436 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3437 }
3438 
3439 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3440 {
3441 	int i;
3442 
3443 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3444 		struct hclge_vport *vport = &hdev->vport[i];
3445 		int ret;
3446 
3447 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3448 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3449 		if (ret) {
3450 			dev_err(&hdev->pdev->dev,
3451 				"set vf(%u) rst failed %d!\n",
3452 				vport->vport_id, ret);
3453 			return ret;
3454 		}
3455 
3456 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3457 			continue;
3458 
3459 		/* Inform VF to process the reset.
3460 		 * hclge_inform_reset_assert_to_vf may fail if VF
3461 		 * driver is not loaded.
3462 		 */
3463 		ret = hclge_inform_reset_assert_to_vf(vport);
3464 		if (ret)
3465 			dev_warn(&hdev->pdev->dev,
3466 				 "inform reset to vf(%u) failed %d!\n",
3467 				 vport->vport_id, ret);
3468 	}
3469 
3470 	return 0;
3471 }
3472 
3473 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3474 {
3475 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3476 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3477 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3478 		return;
3479 
3480 	hclge_mbx_handler(hdev);
3481 
3482 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3483 }
3484 
3485 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3486 {
3487 	struct hclge_pf_rst_sync_cmd *req;
3488 	struct hclge_desc desc;
3489 	int cnt = 0;
3490 	int ret;
3491 
3492 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3493 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3494 
3495 	do {
3496 		/* vf need to down netdev by mbx during PF or FLR reset */
3497 		hclge_mailbox_service_task(hdev);
3498 
3499 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3500 		/* for compatible with old firmware, wait
3501 		 * 100 ms for VF to stop IO
3502 		 */
3503 		if (ret == -EOPNOTSUPP) {
3504 			msleep(HCLGE_RESET_SYNC_TIME);
3505 			return;
3506 		} else if (ret) {
3507 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3508 				 ret);
3509 			return;
3510 		} else if (req->all_vf_ready) {
3511 			return;
3512 		}
3513 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3514 		hclge_cmd_reuse_desc(&desc, true);
3515 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3516 
3517 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3518 }
3519 
3520 void hclge_report_hw_error(struct hclge_dev *hdev,
3521 			   enum hnae3_hw_error_type type)
3522 {
3523 	struct hnae3_client *client = hdev->nic_client;
3524 	u16 i;
3525 
3526 	if (!client || !client->ops->process_hw_error ||
3527 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3528 		return;
3529 
3530 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3531 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3532 }
3533 
3534 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3535 {
3536 	u32 reg_val;
3537 
3538 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3539 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3540 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3541 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3542 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3543 	}
3544 
3545 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3546 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3547 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3548 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3549 	}
3550 }
3551 
3552 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3553 {
3554 	struct hclge_desc desc;
3555 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3556 	int ret;
3557 
3558 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3559 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3560 	req->fun_reset_vfid = func_id;
3561 
3562 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3563 	if (ret)
3564 		dev_err(&hdev->pdev->dev,
3565 			"send function reset cmd fail, status =%d\n", ret);
3566 
3567 	return ret;
3568 }
3569 
3570 static void hclge_do_reset(struct hclge_dev *hdev)
3571 {
3572 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3573 	struct pci_dev *pdev = hdev->pdev;
3574 	u32 val;
3575 
3576 	if (hclge_get_hw_reset_stat(handle)) {
3577 		dev_info(&pdev->dev, "hardware reset not finish\n");
3578 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3579 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3580 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3581 		return;
3582 	}
3583 
3584 	switch (hdev->reset_type) {
3585 	case HNAE3_GLOBAL_RESET:
3586 		dev_info(&pdev->dev, "global reset requested\n");
3587 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3588 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3589 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3590 		break;
3591 	case HNAE3_FUNC_RESET:
3592 		dev_info(&pdev->dev, "PF reset requested\n");
3593 		/* schedule again to check later */
3594 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3595 		hclge_reset_task_schedule(hdev);
3596 		break;
3597 	default:
3598 		dev_warn(&pdev->dev,
3599 			 "unsupported reset type: %d\n", hdev->reset_type);
3600 		break;
3601 	}
3602 }
3603 
3604 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3605 						   unsigned long *addr)
3606 {
3607 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3608 	struct hclge_dev *hdev = ae_dev->priv;
3609 
3610 	/* first, resolve any unknown reset type to the known type(s) */
3611 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3612 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3613 					HCLGE_MISC_VECTOR_INT_STS);
3614 		/* we will intentionally ignore any errors from this function
3615 		 *  as we will end up in *some* reset request in any case
3616 		 */
3617 		if (hclge_handle_hw_msix_error(hdev, addr))
3618 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3619 				 msix_sts_reg);
3620 
3621 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3622 		/* We defered the clearing of the error event which caused
3623 		 * interrupt since it was not posssible to do that in
3624 		 * interrupt context (and this is the reason we introduced
3625 		 * new UNKNOWN reset type). Now, the errors have been
3626 		 * handled and cleared in hardware we can safely enable
3627 		 * interrupts. This is an exception to the norm.
3628 		 */
3629 		hclge_enable_vector(&hdev->misc_vector, true);
3630 	}
3631 
3632 	/* return the highest priority reset level amongst all */
3633 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3634 		rst_level = HNAE3_IMP_RESET;
3635 		clear_bit(HNAE3_IMP_RESET, addr);
3636 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3637 		clear_bit(HNAE3_FUNC_RESET, addr);
3638 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3639 		rst_level = HNAE3_GLOBAL_RESET;
3640 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3641 		clear_bit(HNAE3_FUNC_RESET, addr);
3642 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3643 		rst_level = HNAE3_FUNC_RESET;
3644 		clear_bit(HNAE3_FUNC_RESET, addr);
3645 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3646 		rst_level = HNAE3_FLR_RESET;
3647 		clear_bit(HNAE3_FLR_RESET, addr);
3648 	}
3649 
3650 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3651 	    rst_level < hdev->reset_type)
3652 		return HNAE3_NONE_RESET;
3653 
3654 	return rst_level;
3655 }
3656 
3657 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3658 {
3659 	u32 clearval = 0;
3660 
3661 	switch (hdev->reset_type) {
3662 	case HNAE3_IMP_RESET:
3663 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3664 		break;
3665 	case HNAE3_GLOBAL_RESET:
3666 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3667 		break;
3668 	default:
3669 		break;
3670 	}
3671 
3672 	if (!clearval)
3673 		return;
3674 
3675 	/* For revision 0x20, the reset interrupt source
3676 	 * can only be cleared after hardware reset done
3677 	 */
3678 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3679 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3680 				clearval);
3681 
3682 	hclge_enable_vector(&hdev->misc_vector, true);
3683 }
3684 
3685 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3686 {
3687 	u32 reg_val;
3688 
3689 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3690 	if (enable)
3691 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3692 	else
3693 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3694 
3695 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3696 }
3697 
3698 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3699 {
3700 	int ret;
3701 
3702 	ret = hclge_set_all_vf_rst(hdev, true);
3703 	if (ret)
3704 		return ret;
3705 
3706 	hclge_func_reset_sync_vf(hdev);
3707 
3708 	return 0;
3709 }
3710 
3711 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3712 {
3713 	u32 reg_val;
3714 	int ret = 0;
3715 
3716 	switch (hdev->reset_type) {
3717 	case HNAE3_FUNC_RESET:
3718 		ret = hclge_func_reset_notify_vf(hdev);
3719 		if (ret)
3720 			return ret;
3721 
3722 		ret = hclge_func_reset_cmd(hdev, 0);
3723 		if (ret) {
3724 			dev_err(&hdev->pdev->dev,
3725 				"asserting function reset fail %d!\n", ret);
3726 			return ret;
3727 		}
3728 
3729 		/* After performaning pf reset, it is not necessary to do the
3730 		 * mailbox handling or send any command to firmware, because
3731 		 * any mailbox handling or command to firmware is only valid
3732 		 * after hclge_cmd_init is called.
3733 		 */
3734 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3735 		hdev->rst_stats.pf_rst_cnt++;
3736 		break;
3737 	case HNAE3_FLR_RESET:
3738 		ret = hclge_func_reset_notify_vf(hdev);
3739 		if (ret)
3740 			return ret;
3741 		break;
3742 	case HNAE3_IMP_RESET:
3743 		hclge_handle_imp_error(hdev);
3744 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3745 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3746 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3747 		break;
3748 	default:
3749 		break;
3750 	}
3751 
3752 	/* inform hardware that preparatory work is done */
3753 	msleep(HCLGE_RESET_SYNC_TIME);
3754 	hclge_reset_handshake(hdev, true);
3755 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3756 
3757 	return ret;
3758 }
3759 
3760 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3761 {
3762 #define MAX_RESET_FAIL_CNT 5
3763 
3764 	if (hdev->reset_pending) {
3765 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3766 			 hdev->reset_pending);
3767 		return true;
3768 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3769 		   HCLGE_RESET_INT_M) {
3770 		dev_info(&hdev->pdev->dev,
3771 			 "reset failed because new reset interrupt\n");
3772 		hclge_clear_reset_cause(hdev);
3773 		return false;
3774 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3775 		hdev->rst_stats.reset_fail_cnt++;
3776 		set_bit(hdev->reset_type, &hdev->reset_pending);
3777 		dev_info(&hdev->pdev->dev,
3778 			 "re-schedule reset task(%u)\n",
3779 			 hdev->rst_stats.reset_fail_cnt);
3780 		return true;
3781 	}
3782 
3783 	hclge_clear_reset_cause(hdev);
3784 
3785 	/* recover the handshake status when reset fail */
3786 	hclge_reset_handshake(hdev, true);
3787 
3788 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3789 
3790 	hclge_dbg_dump_rst_info(hdev);
3791 
3792 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3793 
3794 	return false;
3795 }
3796 
3797 static int hclge_set_rst_done(struct hclge_dev *hdev)
3798 {
3799 	struct hclge_pf_rst_done_cmd *req;
3800 	struct hclge_desc desc;
3801 	int ret;
3802 
3803 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3804 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3805 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3806 
3807 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3808 	/* To be compatible with the old firmware, which does not support
3809 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3810 	 * return success
3811 	 */
3812 	if (ret == -EOPNOTSUPP) {
3813 		dev_warn(&hdev->pdev->dev,
3814 			 "current firmware does not support command(0x%x)!\n",
3815 			 HCLGE_OPC_PF_RST_DONE);
3816 		return 0;
3817 	} else if (ret) {
3818 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3819 			ret);
3820 	}
3821 
3822 	return ret;
3823 }
3824 
3825 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3826 {
3827 	int ret = 0;
3828 
3829 	switch (hdev->reset_type) {
3830 	case HNAE3_FUNC_RESET:
3831 	case HNAE3_FLR_RESET:
3832 		ret = hclge_set_all_vf_rst(hdev, false);
3833 		break;
3834 	case HNAE3_GLOBAL_RESET:
3835 	case HNAE3_IMP_RESET:
3836 		ret = hclge_set_rst_done(hdev);
3837 		break;
3838 	default:
3839 		break;
3840 	}
3841 
3842 	/* clear up the handshake status after re-initialize done */
3843 	hclge_reset_handshake(hdev, false);
3844 
3845 	return ret;
3846 }
3847 
3848 static int hclge_reset_stack(struct hclge_dev *hdev)
3849 {
3850 	int ret;
3851 
3852 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3853 	if (ret)
3854 		return ret;
3855 
3856 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3857 	if (ret)
3858 		return ret;
3859 
3860 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3861 }
3862 
3863 static int hclge_reset_prepare(struct hclge_dev *hdev)
3864 {
3865 	int ret;
3866 
3867 	hdev->rst_stats.reset_cnt++;
3868 	/* perform reset of the stack & ae device for a client */
3869 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3870 	if (ret)
3871 		return ret;
3872 
3873 	rtnl_lock();
3874 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3875 	rtnl_unlock();
3876 	if (ret)
3877 		return ret;
3878 
3879 	return hclge_reset_prepare_wait(hdev);
3880 }
3881 
3882 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3883 {
3884 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3885 	enum hnae3_reset_type reset_level;
3886 	int ret;
3887 
3888 	hdev->rst_stats.hw_reset_done_cnt++;
3889 
3890 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3891 	if (ret)
3892 		return ret;
3893 
3894 	rtnl_lock();
3895 	ret = hclge_reset_stack(hdev);
3896 	rtnl_unlock();
3897 	if (ret)
3898 		return ret;
3899 
3900 	hclge_clear_reset_cause(hdev);
3901 
3902 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3903 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3904 	 * times
3905 	 */
3906 	if (ret &&
3907 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3908 		return ret;
3909 
3910 	ret = hclge_reset_prepare_up(hdev);
3911 	if (ret)
3912 		return ret;
3913 
3914 	rtnl_lock();
3915 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3916 	rtnl_unlock();
3917 	if (ret)
3918 		return ret;
3919 
3920 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3921 	if (ret)
3922 		return ret;
3923 
3924 	hdev->last_reset_time = jiffies;
3925 	hdev->rst_stats.reset_fail_cnt = 0;
3926 	hdev->rst_stats.reset_done_cnt++;
3927 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3928 
3929 	/* if default_reset_request has a higher level reset request,
3930 	 * it should be handled as soon as possible. since some errors
3931 	 * need this kind of reset to fix.
3932 	 */
3933 	reset_level = hclge_get_reset_level(ae_dev,
3934 					    &hdev->default_reset_request);
3935 	if (reset_level != HNAE3_NONE_RESET)
3936 		set_bit(reset_level, &hdev->reset_request);
3937 
3938 	return 0;
3939 }
3940 
3941 static void hclge_reset(struct hclge_dev *hdev)
3942 {
3943 	if (hclge_reset_prepare(hdev))
3944 		goto err_reset;
3945 
3946 	if (hclge_reset_wait(hdev))
3947 		goto err_reset;
3948 
3949 	if (hclge_reset_rebuild(hdev))
3950 		goto err_reset;
3951 
3952 	return;
3953 
3954 err_reset:
3955 	if (hclge_reset_err_handle(hdev))
3956 		hclge_reset_task_schedule(hdev);
3957 }
3958 
3959 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3960 {
3961 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3962 	struct hclge_dev *hdev = ae_dev->priv;
3963 
3964 	/* We might end up getting called broadly because of 2 below cases:
3965 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3966 	 *    normalcy is to reset.
3967 	 * 2. A new reset request from the stack due to timeout
3968 	 *
3969 	 * check if this is a new reset request and we are not here just because
3970 	 * last reset attempt did not succeed and watchdog hit us again. We will
3971 	 * know this if last reset request did not occur very recently (watchdog
3972 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3973 	 * In case of new request we reset the "reset level" to PF reset.
3974 	 * And if it is a repeat reset request of the most recent one then we
3975 	 * want to make sure we throttle the reset request. Therefore, we will
3976 	 * not allow it again before 3*HZ times.
3977 	 */
3978 
3979 	if (time_before(jiffies, (hdev->last_reset_time +
3980 				  HCLGE_RESET_INTERVAL))) {
3981 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3982 		return;
3983 	}
3984 
3985 	if (hdev->default_reset_request) {
3986 		hdev->reset_level =
3987 			hclge_get_reset_level(ae_dev,
3988 					      &hdev->default_reset_request);
3989 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3990 		hdev->reset_level = HNAE3_FUNC_RESET;
3991 	}
3992 
3993 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3994 		 hdev->reset_level);
3995 
3996 	/* request reset & schedule reset task */
3997 	set_bit(hdev->reset_level, &hdev->reset_request);
3998 	hclge_reset_task_schedule(hdev);
3999 
4000 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4001 		hdev->reset_level++;
4002 }
4003 
4004 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4005 					enum hnae3_reset_type rst_type)
4006 {
4007 	struct hclge_dev *hdev = ae_dev->priv;
4008 
4009 	set_bit(rst_type, &hdev->default_reset_request);
4010 }
4011 
4012 static void hclge_reset_timer(struct timer_list *t)
4013 {
4014 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4015 
4016 	/* if default_reset_request has no value, it means that this reset
4017 	 * request has already be handled, so just return here
4018 	 */
4019 	if (!hdev->default_reset_request)
4020 		return;
4021 
4022 	dev_info(&hdev->pdev->dev,
4023 		 "triggering reset in reset timer\n");
4024 	hclge_reset_event(hdev->pdev, NULL);
4025 }
4026 
4027 static void hclge_reset_subtask(struct hclge_dev *hdev)
4028 {
4029 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4030 
4031 	/* check if there is any ongoing reset in the hardware. This status can
4032 	 * be checked from reset_pending. If there is then, we need to wait for
4033 	 * hardware to complete reset.
4034 	 *    a. If we are able to figure out in reasonable time that hardware
4035 	 *       has fully resetted then, we can proceed with driver, client
4036 	 *       reset.
4037 	 *    b. else, we can come back later to check this status so re-sched
4038 	 *       now.
4039 	 */
4040 	hdev->last_reset_time = jiffies;
4041 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4042 	if (hdev->reset_type != HNAE3_NONE_RESET)
4043 		hclge_reset(hdev);
4044 
4045 	/* check if we got any *new* reset requests to be honored */
4046 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4047 	if (hdev->reset_type != HNAE3_NONE_RESET)
4048 		hclge_do_reset(hdev);
4049 
4050 	hdev->reset_type = HNAE3_NONE_RESET;
4051 }
4052 
4053 static void hclge_reset_service_task(struct hclge_dev *hdev)
4054 {
4055 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4056 		return;
4057 
4058 	down(&hdev->reset_sem);
4059 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4060 
4061 	hclge_reset_subtask(hdev);
4062 
4063 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4064 	up(&hdev->reset_sem);
4065 }
4066 
4067 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4068 {
4069 	int i;
4070 
4071 	/* start from vport 1 for PF is always alive */
4072 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4073 		struct hclge_vport *vport = &hdev->vport[i];
4074 
4075 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4076 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4077 
4078 		/* If vf is not alive, set to default value */
4079 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4080 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4081 	}
4082 }
4083 
4084 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4085 {
4086 	unsigned long delta = round_jiffies_relative(HZ);
4087 
4088 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4089 		return;
4090 
4091 	/* Always handle the link updating to make sure link state is
4092 	 * updated when it is triggered by mbx.
4093 	 */
4094 	hclge_update_link_status(hdev);
4095 	hclge_sync_mac_table(hdev);
4096 	hclge_sync_promisc_mode(hdev);
4097 
4098 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4099 		delta = jiffies - hdev->last_serv_processed;
4100 
4101 		if (delta < round_jiffies_relative(HZ)) {
4102 			delta = round_jiffies_relative(HZ) - delta;
4103 			goto out;
4104 		}
4105 	}
4106 
4107 	hdev->serv_processed_cnt++;
4108 	hclge_update_vport_alive(hdev);
4109 
4110 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4111 		hdev->last_serv_processed = jiffies;
4112 		goto out;
4113 	}
4114 
4115 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4116 		hclge_update_stats_for_all(hdev);
4117 
4118 	hclge_update_port_info(hdev);
4119 	hclge_sync_vlan_filter(hdev);
4120 
4121 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4122 		hclge_rfs_filter_expire(hdev);
4123 
4124 	hdev->last_serv_processed = jiffies;
4125 
4126 out:
4127 	hclge_task_schedule(hdev, delta);
4128 }
4129 
4130 static void hclge_service_task(struct work_struct *work)
4131 {
4132 	struct hclge_dev *hdev =
4133 		container_of(work, struct hclge_dev, service_task.work);
4134 
4135 	hclge_reset_service_task(hdev);
4136 	hclge_mailbox_service_task(hdev);
4137 	hclge_periodic_service_task(hdev);
4138 
4139 	/* Handle reset and mbx again in case periodical task delays the
4140 	 * handling by calling hclge_task_schedule() in
4141 	 * hclge_periodic_service_task().
4142 	 */
4143 	hclge_reset_service_task(hdev);
4144 	hclge_mailbox_service_task(hdev);
4145 }
4146 
4147 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4148 {
4149 	/* VF handle has no client */
4150 	if (!handle->client)
4151 		return container_of(handle, struct hclge_vport, nic);
4152 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4153 		return container_of(handle, struct hclge_vport, roce);
4154 	else
4155 		return container_of(handle, struct hclge_vport, nic);
4156 }
4157 
4158 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4159 				  struct hnae3_vector_info *vector_info)
4160 {
4161 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4162 
4163 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4164 
4165 	/* need an extend offset to config vector >= 64 */
4166 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4167 		vector_info->io_addr = hdev->hw.io_base +
4168 				HCLGE_VECTOR_REG_BASE +
4169 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4170 	else
4171 		vector_info->io_addr = hdev->hw.io_base +
4172 				HCLGE_VECTOR_EXT_REG_BASE +
4173 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4174 				HCLGE_VECTOR_REG_OFFSET_H +
4175 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4176 				HCLGE_VECTOR_REG_OFFSET;
4177 
4178 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4179 	hdev->vector_irq[idx] = vector_info->vector;
4180 }
4181 
4182 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4183 			    struct hnae3_vector_info *vector_info)
4184 {
4185 	struct hclge_vport *vport = hclge_get_vport(handle);
4186 	struct hnae3_vector_info *vector = vector_info;
4187 	struct hclge_dev *hdev = vport->back;
4188 	int alloc = 0;
4189 	u16 i = 0;
4190 	u16 j;
4191 
4192 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4193 	vector_num = min(hdev->num_msi_left, vector_num);
4194 
4195 	for (j = 0; j < vector_num; j++) {
4196 		while (++i < hdev->num_nic_msi) {
4197 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4198 				hclge_get_vector_info(hdev, i, vector);
4199 				vector++;
4200 				alloc++;
4201 
4202 				break;
4203 			}
4204 		}
4205 	}
4206 	hdev->num_msi_left -= alloc;
4207 	hdev->num_msi_used += alloc;
4208 
4209 	return alloc;
4210 }
4211 
4212 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4213 {
4214 	int i;
4215 
4216 	for (i = 0; i < hdev->num_msi; i++)
4217 		if (vector == hdev->vector_irq[i])
4218 			return i;
4219 
4220 	return -EINVAL;
4221 }
4222 
4223 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4224 {
4225 	struct hclge_vport *vport = hclge_get_vport(handle);
4226 	struct hclge_dev *hdev = vport->back;
4227 	int vector_id;
4228 
4229 	vector_id = hclge_get_vector_index(hdev, vector);
4230 	if (vector_id < 0) {
4231 		dev_err(&hdev->pdev->dev,
4232 			"Get vector index fail. vector = %d\n", vector);
4233 		return vector_id;
4234 	}
4235 
4236 	hclge_free_vector(hdev, vector_id);
4237 
4238 	return 0;
4239 }
4240 
4241 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4242 {
4243 	return HCLGE_RSS_KEY_SIZE;
4244 }
4245 
4246 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4247 				  const u8 hfunc, const u8 *key)
4248 {
4249 	struct hclge_rss_config_cmd *req;
4250 	unsigned int key_offset = 0;
4251 	struct hclge_desc desc;
4252 	int key_counts;
4253 	int key_size;
4254 	int ret;
4255 
4256 	key_counts = HCLGE_RSS_KEY_SIZE;
4257 	req = (struct hclge_rss_config_cmd *)desc.data;
4258 
4259 	while (key_counts) {
4260 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4261 					   false);
4262 
4263 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4264 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4265 
4266 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4267 		memcpy(req->hash_key,
4268 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4269 
4270 		key_counts -= key_size;
4271 		key_offset++;
4272 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4273 		if (ret) {
4274 			dev_err(&hdev->pdev->dev,
4275 				"Configure RSS config fail, status = %d\n",
4276 				ret);
4277 			return ret;
4278 		}
4279 	}
4280 	return 0;
4281 }
4282 
4283 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4284 {
4285 	struct hclge_rss_indirection_table_cmd *req;
4286 	struct hclge_desc desc;
4287 	int rss_cfg_tbl_num;
4288 	u8 rss_msb_oft;
4289 	u8 rss_msb_val;
4290 	int ret;
4291 	u16 qid;
4292 	int i;
4293 	u32 j;
4294 
4295 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4296 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4297 			  HCLGE_RSS_CFG_TBL_SIZE;
4298 
4299 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4300 		hclge_cmd_setup_basic_desc
4301 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4302 
4303 		req->start_table_index =
4304 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4305 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4306 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4307 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4308 			req->rss_qid_l[j] = qid & 0xff;
4309 			rss_msb_oft =
4310 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4311 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4312 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4313 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4314 		}
4315 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4316 		if (ret) {
4317 			dev_err(&hdev->pdev->dev,
4318 				"Configure rss indir table fail,status = %d\n",
4319 				ret);
4320 			return ret;
4321 		}
4322 	}
4323 	return 0;
4324 }
4325 
4326 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4327 				 u16 *tc_size, u16 *tc_offset)
4328 {
4329 	struct hclge_rss_tc_mode_cmd *req;
4330 	struct hclge_desc desc;
4331 	int ret;
4332 	int i;
4333 
4334 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4335 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4336 
4337 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4338 		u16 mode = 0;
4339 
4340 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4341 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4342 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4343 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4344 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4345 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4346 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4347 
4348 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4349 	}
4350 
4351 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4352 	if (ret)
4353 		dev_err(&hdev->pdev->dev,
4354 			"Configure rss tc mode fail, status = %d\n", ret);
4355 
4356 	return ret;
4357 }
4358 
4359 static void hclge_get_rss_type(struct hclge_vport *vport)
4360 {
4361 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4362 	    vport->rss_tuple_sets.ipv4_udp_en ||
4363 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4364 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4365 	    vport->rss_tuple_sets.ipv6_udp_en ||
4366 	    vport->rss_tuple_sets.ipv6_sctp_en)
4367 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4368 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4369 		 vport->rss_tuple_sets.ipv6_fragment_en)
4370 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4371 	else
4372 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4373 }
4374 
4375 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4376 {
4377 	struct hclge_rss_input_tuple_cmd *req;
4378 	struct hclge_desc desc;
4379 	int ret;
4380 
4381 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4382 
4383 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4384 
4385 	/* Get the tuple cfg from pf */
4386 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4387 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4388 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4389 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4390 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4391 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4392 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4393 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4394 	hclge_get_rss_type(&hdev->vport[0]);
4395 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4396 	if (ret)
4397 		dev_err(&hdev->pdev->dev,
4398 			"Configure rss input fail, status = %d\n", ret);
4399 	return ret;
4400 }
4401 
4402 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4403 			 u8 *key, u8 *hfunc)
4404 {
4405 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4406 	struct hclge_vport *vport = hclge_get_vport(handle);
4407 	int i;
4408 
4409 	/* Get hash algorithm */
4410 	if (hfunc) {
4411 		switch (vport->rss_algo) {
4412 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4413 			*hfunc = ETH_RSS_HASH_TOP;
4414 			break;
4415 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4416 			*hfunc = ETH_RSS_HASH_XOR;
4417 			break;
4418 		default:
4419 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4420 			break;
4421 		}
4422 	}
4423 
4424 	/* Get the RSS Key required by the user */
4425 	if (key)
4426 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4427 
4428 	/* Get indirect table */
4429 	if (indir)
4430 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4431 			indir[i] =  vport->rss_indirection_tbl[i];
4432 
4433 	return 0;
4434 }
4435 
4436 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4437 			 const  u8 *key, const  u8 hfunc)
4438 {
4439 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4440 	struct hclge_vport *vport = hclge_get_vport(handle);
4441 	struct hclge_dev *hdev = vport->back;
4442 	u8 hash_algo;
4443 	int ret, i;
4444 
4445 	/* Set the RSS Hash Key if specififed by the user */
4446 	if (key) {
4447 		switch (hfunc) {
4448 		case ETH_RSS_HASH_TOP:
4449 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4450 			break;
4451 		case ETH_RSS_HASH_XOR:
4452 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4453 			break;
4454 		case ETH_RSS_HASH_NO_CHANGE:
4455 			hash_algo = vport->rss_algo;
4456 			break;
4457 		default:
4458 			return -EINVAL;
4459 		}
4460 
4461 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4462 		if (ret)
4463 			return ret;
4464 
4465 		/* Update the shadow RSS key with user specified qids */
4466 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4467 		vport->rss_algo = hash_algo;
4468 	}
4469 
4470 	/* Update the shadow RSS table with user specified qids */
4471 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4472 		vport->rss_indirection_tbl[i] = indir[i];
4473 
4474 	/* Update the hardware */
4475 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4476 }
4477 
4478 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4479 {
4480 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4481 
4482 	if (nfc->data & RXH_L4_B_2_3)
4483 		hash_sets |= HCLGE_D_PORT_BIT;
4484 	else
4485 		hash_sets &= ~HCLGE_D_PORT_BIT;
4486 
4487 	if (nfc->data & RXH_IP_SRC)
4488 		hash_sets |= HCLGE_S_IP_BIT;
4489 	else
4490 		hash_sets &= ~HCLGE_S_IP_BIT;
4491 
4492 	if (nfc->data & RXH_IP_DST)
4493 		hash_sets |= HCLGE_D_IP_BIT;
4494 	else
4495 		hash_sets &= ~HCLGE_D_IP_BIT;
4496 
4497 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4498 		hash_sets |= HCLGE_V_TAG_BIT;
4499 
4500 	return hash_sets;
4501 }
4502 
4503 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4504 				    struct ethtool_rxnfc *nfc,
4505 				    struct hclge_rss_input_tuple_cmd *req)
4506 {
4507 	struct hclge_dev *hdev = vport->back;
4508 	u8 tuple_sets;
4509 
4510 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4511 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4512 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4513 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4514 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4515 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4516 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4517 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4518 
4519 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4520 	switch (nfc->flow_type) {
4521 	case TCP_V4_FLOW:
4522 		req->ipv4_tcp_en = tuple_sets;
4523 		break;
4524 	case TCP_V6_FLOW:
4525 		req->ipv6_tcp_en = tuple_sets;
4526 		break;
4527 	case UDP_V4_FLOW:
4528 		req->ipv4_udp_en = tuple_sets;
4529 		break;
4530 	case UDP_V6_FLOW:
4531 		req->ipv6_udp_en = tuple_sets;
4532 		break;
4533 	case SCTP_V4_FLOW:
4534 		req->ipv4_sctp_en = tuple_sets;
4535 		break;
4536 	case SCTP_V6_FLOW:
4537 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4538 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4539 			return -EINVAL;
4540 
4541 		req->ipv6_sctp_en = tuple_sets;
4542 		break;
4543 	case IPV4_FLOW:
4544 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4545 		break;
4546 	case IPV6_FLOW:
4547 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4548 		break;
4549 	default:
4550 		return -EINVAL;
4551 	}
4552 
4553 	return 0;
4554 }
4555 
4556 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4557 			       struct ethtool_rxnfc *nfc)
4558 {
4559 	struct hclge_vport *vport = hclge_get_vport(handle);
4560 	struct hclge_dev *hdev = vport->back;
4561 	struct hclge_rss_input_tuple_cmd *req;
4562 	struct hclge_desc desc;
4563 	int ret;
4564 
4565 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4566 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4567 		return -EINVAL;
4568 
4569 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4570 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4571 
4572 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4573 	if (ret) {
4574 		dev_err(&hdev->pdev->dev,
4575 			"failed to init rss tuple cmd, ret = %d\n", ret);
4576 		return ret;
4577 	}
4578 
4579 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4580 	if (ret) {
4581 		dev_err(&hdev->pdev->dev,
4582 			"Set rss tuple fail, status = %d\n", ret);
4583 		return ret;
4584 	}
4585 
4586 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4587 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4588 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4589 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4590 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4591 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4592 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4593 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4594 	hclge_get_rss_type(vport);
4595 	return 0;
4596 }
4597 
4598 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4599 				     u8 *tuple_sets)
4600 {
4601 	switch (flow_type) {
4602 	case TCP_V4_FLOW:
4603 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4604 		break;
4605 	case UDP_V4_FLOW:
4606 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4607 		break;
4608 	case TCP_V6_FLOW:
4609 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4610 		break;
4611 	case UDP_V6_FLOW:
4612 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4613 		break;
4614 	case SCTP_V4_FLOW:
4615 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4616 		break;
4617 	case SCTP_V6_FLOW:
4618 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4619 		break;
4620 	case IPV4_FLOW:
4621 	case IPV6_FLOW:
4622 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4623 		break;
4624 	default:
4625 		return -EINVAL;
4626 	}
4627 
4628 	return 0;
4629 }
4630 
4631 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4632 {
4633 	u64 tuple_data = 0;
4634 
4635 	if (tuple_sets & HCLGE_D_PORT_BIT)
4636 		tuple_data |= RXH_L4_B_2_3;
4637 	if (tuple_sets & HCLGE_S_PORT_BIT)
4638 		tuple_data |= RXH_L4_B_0_1;
4639 	if (tuple_sets & HCLGE_D_IP_BIT)
4640 		tuple_data |= RXH_IP_DST;
4641 	if (tuple_sets & HCLGE_S_IP_BIT)
4642 		tuple_data |= RXH_IP_SRC;
4643 
4644 	return tuple_data;
4645 }
4646 
4647 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4648 			       struct ethtool_rxnfc *nfc)
4649 {
4650 	struct hclge_vport *vport = hclge_get_vport(handle);
4651 	u8 tuple_sets;
4652 	int ret;
4653 
4654 	nfc->data = 0;
4655 
4656 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4657 	if (ret || !tuple_sets)
4658 		return ret;
4659 
4660 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4661 
4662 	return 0;
4663 }
4664 
4665 static int hclge_get_tc_size(struct hnae3_handle *handle)
4666 {
4667 	struct hclge_vport *vport = hclge_get_vport(handle);
4668 	struct hclge_dev *hdev = vport->back;
4669 
4670 	return hdev->pf_rss_size_max;
4671 }
4672 
4673 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4674 {
4675 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4676 	struct hclge_vport *vport = hdev->vport;
4677 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4678 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4679 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4680 	struct hnae3_tc_info *tc_info;
4681 	u16 roundup_size;
4682 	u16 rss_size;
4683 	int i;
4684 
4685 	tc_info = &vport->nic.kinfo.tc_info;
4686 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4687 		rss_size = tc_info->tqp_count[i];
4688 		tc_valid[i] = 0;
4689 
4690 		if (!(hdev->hw_tc_map & BIT(i)))
4691 			continue;
4692 
4693 		/* tc_size set to hardware is the log2 of roundup power of two
4694 		 * of rss_size, the acutal queue size is limited by indirection
4695 		 * table.
4696 		 */
4697 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4698 		    rss_size == 0) {
4699 			dev_err(&hdev->pdev->dev,
4700 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4701 				rss_size);
4702 			return -EINVAL;
4703 		}
4704 
4705 		roundup_size = roundup_pow_of_two(rss_size);
4706 		roundup_size = ilog2(roundup_size);
4707 
4708 		tc_valid[i] = 1;
4709 		tc_size[i] = roundup_size;
4710 		tc_offset[i] = tc_info->tqp_offset[i];
4711 	}
4712 
4713 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4714 }
4715 
4716 int hclge_rss_init_hw(struct hclge_dev *hdev)
4717 {
4718 	struct hclge_vport *vport = hdev->vport;
4719 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4720 	u8 *key = vport[0].rss_hash_key;
4721 	u8 hfunc = vport[0].rss_algo;
4722 	int ret;
4723 
4724 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4725 	if (ret)
4726 		return ret;
4727 
4728 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4729 	if (ret)
4730 		return ret;
4731 
4732 	ret = hclge_set_rss_input_tuple(hdev);
4733 	if (ret)
4734 		return ret;
4735 
4736 	return hclge_init_rss_tc_mode(hdev);
4737 }
4738 
4739 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4740 {
4741 	struct hclge_vport *vport = hdev->vport;
4742 	int i, j;
4743 
4744 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4745 		for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4746 			vport[j].rss_indirection_tbl[i] =
4747 				i % vport[j].alloc_rss_size;
4748 	}
4749 }
4750 
4751 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4752 {
4753 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4754 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4755 	struct hclge_vport *vport = hdev->vport;
4756 
4757 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4758 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4759 
4760 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4761 		u16 *rss_ind_tbl;
4762 
4763 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4764 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4765 		vport[i].rss_tuple_sets.ipv4_udp_en =
4766 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4767 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4768 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4769 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4770 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4771 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4772 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4773 		vport[i].rss_tuple_sets.ipv6_udp_en =
4774 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4775 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4776 			hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4777 			HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4778 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4779 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4780 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4781 
4782 		vport[i].rss_algo = rss_algo;
4783 
4784 		rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4785 					   sizeof(*rss_ind_tbl), GFP_KERNEL);
4786 		if (!rss_ind_tbl)
4787 			return -ENOMEM;
4788 
4789 		vport[i].rss_indirection_tbl = rss_ind_tbl;
4790 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4791 		       HCLGE_RSS_KEY_SIZE);
4792 	}
4793 
4794 	hclge_rss_indir_init_cfg(hdev);
4795 
4796 	return 0;
4797 }
4798 
4799 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4800 				int vector_id, bool en,
4801 				struct hnae3_ring_chain_node *ring_chain)
4802 {
4803 	struct hclge_dev *hdev = vport->back;
4804 	struct hnae3_ring_chain_node *node;
4805 	struct hclge_desc desc;
4806 	struct hclge_ctrl_vector_chain_cmd *req =
4807 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4808 	enum hclge_cmd_status status;
4809 	enum hclge_opcode_type op;
4810 	u16 tqp_type_and_id;
4811 	int i;
4812 
4813 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4814 	hclge_cmd_setup_basic_desc(&desc, op, false);
4815 	req->int_vector_id_l = hnae3_get_field(vector_id,
4816 					       HCLGE_VECTOR_ID_L_M,
4817 					       HCLGE_VECTOR_ID_L_S);
4818 	req->int_vector_id_h = hnae3_get_field(vector_id,
4819 					       HCLGE_VECTOR_ID_H_M,
4820 					       HCLGE_VECTOR_ID_H_S);
4821 
4822 	i = 0;
4823 	for (node = ring_chain; node; node = node->next) {
4824 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4825 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4826 				HCLGE_INT_TYPE_S,
4827 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4828 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4829 				HCLGE_TQP_ID_S, node->tqp_index);
4830 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4831 				HCLGE_INT_GL_IDX_S,
4832 				hnae3_get_field(node->int_gl_idx,
4833 						HNAE3_RING_GL_IDX_M,
4834 						HNAE3_RING_GL_IDX_S));
4835 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4836 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4837 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4838 			req->vfid = vport->vport_id;
4839 
4840 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4841 			if (status) {
4842 				dev_err(&hdev->pdev->dev,
4843 					"Map TQP fail, status is %d.\n",
4844 					status);
4845 				return -EIO;
4846 			}
4847 			i = 0;
4848 
4849 			hclge_cmd_setup_basic_desc(&desc,
4850 						   op,
4851 						   false);
4852 			req->int_vector_id_l =
4853 				hnae3_get_field(vector_id,
4854 						HCLGE_VECTOR_ID_L_M,
4855 						HCLGE_VECTOR_ID_L_S);
4856 			req->int_vector_id_h =
4857 				hnae3_get_field(vector_id,
4858 						HCLGE_VECTOR_ID_H_M,
4859 						HCLGE_VECTOR_ID_H_S);
4860 		}
4861 	}
4862 
4863 	if (i > 0) {
4864 		req->int_cause_num = i;
4865 		req->vfid = vport->vport_id;
4866 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4867 		if (status) {
4868 			dev_err(&hdev->pdev->dev,
4869 				"Map TQP fail, status is %d.\n", status);
4870 			return -EIO;
4871 		}
4872 	}
4873 
4874 	return 0;
4875 }
4876 
4877 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4878 				    struct hnae3_ring_chain_node *ring_chain)
4879 {
4880 	struct hclge_vport *vport = hclge_get_vport(handle);
4881 	struct hclge_dev *hdev = vport->back;
4882 	int vector_id;
4883 
4884 	vector_id = hclge_get_vector_index(hdev, vector);
4885 	if (vector_id < 0) {
4886 		dev_err(&hdev->pdev->dev,
4887 			"failed to get vector index. vector=%d\n", vector);
4888 		return vector_id;
4889 	}
4890 
4891 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4892 }
4893 
4894 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4895 				       struct hnae3_ring_chain_node *ring_chain)
4896 {
4897 	struct hclge_vport *vport = hclge_get_vport(handle);
4898 	struct hclge_dev *hdev = vport->back;
4899 	int vector_id, ret;
4900 
4901 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4902 		return 0;
4903 
4904 	vector_id = hclge_get_vector_index(hdev, vector);
4905 	if (vector_id < 0) {
4906 		dev_err(&handle->pdev->dev,
4907 			"Get vector index fail. ret =%d\n", vector_id);
4908 		return vector_id;
4909 	}
4910 
4911 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4912 	if (ret)
4913 		dev_err(&handle->pdev->dev,
4914 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4915 			vector_id, ret);
4916 
4917 	return ret;
4918 }
4919 
4920 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4921 				      bool en_uc, bool en_mc, bool en_bc)
4922 {
4923 	struct hclge_vport *vport = &hdev->vport[vf_id];
4924 	struct hnae3_handle *handle = &vport->nic;
4925 	struct hclge_promisc_cfg_cmd *req;
4926 	struct hclge_desc desc;
4927 	bool uc_tx_en = en_uc;
4928 	u8 promisc_cfg = 0;
4929 	int ret;
4930 
4931 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4932 
4933 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4934 	req->vf_id = vf_id;
4935 
4936 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4937 		uc_tx_en = false;
4938 
4939 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4940 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4941 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4942 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4943 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4944 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4945 	req->extend_promisc = promisc_cfg;
4946 
4947 	/* to be compatible with DEVICE_VERSION_V1/2 */
4948 	promisc_cfg = 0;
4949 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4950 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4951 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4952 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4953 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4954 	req->promisc = promisc_cfg;
4955 
4956 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4957 	if (ret)
4958 		dev_err(&hdev->pdev->dev,
4959 			"failed to set vport %u promisc mode, ret = %d.\n",
4960 			vf_id, ret);
4961 
4962 	return ret;
4963 }
4964 
4965 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4966 				 bool en_mc_pmc, bool en_bc_pmc)
4967 {
4968 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4969 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
4970 }
4971 
4972 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4973 				  bool en_mc_pmc)
4974 {
4975 	struct hclge_vport *vport = hclge_get_vport(handle);
4976 	struct hclge_dev *hdev = vport->back;
4977 	bool en_bc_pmc = true;
4978 
4979 	/* For device whose version below V2, if broadcast promisc enabled,
4980 	 * vlan filter is always bypassed. So broadcast promisc should be
4981 	 * disabled until user enable promisc mode
4982 	 */
4983 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4984 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4985 
4986 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4987 					    en_bc_pmc);
4988 }
4989 
4990 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4991 {
4992 	struct hclge_vport *vport = hclge_get_vport(handle);
4993 	struct hclge_dev *hdev = vport->back;
4994 
4995 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4996 }
4997 
4998 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4999 {
5000 	struct hclge_get_fd_mode_cmd *req;
5001 	struct hclge_desc desc;
5002 	int ret;
5003 
5004 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5005 
5006 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5007 
5008 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5009 	if (ret) {
5010 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5011 		return ret;
5012 	}
5013 
5014 	*fd_mode = req->mode;
5015 
5016 	return ret;
5017 }
5018 
5019 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5020 				   u32 *stage1_entry_num,
5021 				   u32 *stage2_entry_num,
5022 				   u16 *stage1_counter_num,
5023 				   u16 *stage2_counter_num)
5024 {
5025 	struct hclge_get_fd_allocation_cmd *req;
5026 	struct hclge_desc desc;
5027 	int ret;
5028 
5029 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5030 
5031 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5032 
5033 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5034 	if (ret) {
5035 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5036 			ret);
5037 		return ret;
5038 	}
5039 
5040 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5041 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5042 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5043 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5044 
5045 	return ret;
5046 }
5047 
5048 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5049 				   enum HCLGE_FD_STAGE stage_num)
5050 {
5051 	struct hclge_set_fd_key_config_cmd *req;
5052 	struct hclge_fd_key_cfg *stage;
5053 	struct hclge_desc desc;
5054 	int ret;
5055 
5056 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5057 
5058 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5059 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5060 	req->stage = stage_num;
5061 	req->key_select = stage->key_sel;
5062 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5063 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5064 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5065 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5066 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5067 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5068 
5069 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5070 	if (ret)
5071 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5072 
5073 	return ret;
5074 }
5075 
5076 static int hclge_init_fd_config(struct hclge_dev *hdev)
5077 {
5078 #define LOW_2_WORDS		0x03
5079 	struct hclge_fd_key_cfg *key_cfg;
5080 	int ret;
5081 
5082 	if (!hnae3_dev_fd_supported(hdev))
5083 		return 0;
5084 
5085 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5086 	if (ret)
5087 		return ret;
5088 
5089 	switch (hdev->fd_cfg.fd_mode) {
5090 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5091 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5092 		break;
5093 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5094 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5095 		break;
5096 	default:
5097 		dev_err(&hdev->pdev->dev,
5098 			"Unsupported flow director mode %u\n",
5099 			hdev->fd_cfg.fd_mode);
5100 		return -EOPNOTSUPP;
5101 	}
5102 
5103 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5104 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5105 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5106 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5107 	key_cfg->outer_sipv6_word_en = 0;
5108 	key_cfg->outer_dipv6_word_en = 0;
5109 
5110 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5111 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5112 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5113 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5114 
5115 	/* If use max 400bit key, we can support tuples for ether type */
5116 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5117 		key_cfg->tuple_active |=
5118 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5119 
5120 	/* roce_type is used to filter roce frames
5121 	 * dst_vport is used to specify the rule
5122 	 */
5123 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5124 
5125 	ret = hclge_get_fd_allocation(hdev,
5126 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5127 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5128 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5129 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5130 	if (ret)
5131 		return ret;
5132 
5133 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5134 }
5135 
5136 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5137 				int loc, u8 *key, bool is_add)
5138 {
5139 	struct hclge_fd_tcam_config_1_cmd *req1;
5140 	struct hclge_fd_tcam_config_2_cmd *req2;
5141 	struct hclge_fd_tcam_config_3_cmd *req3;
5142 	struct hclge_desc desc[3];
5143 	int ret;
5144 
5145 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5146 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5147 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5148 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5149 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5150 
5151 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5152 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5153 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5154 
5155 	req1->stage = stage;
5156 	req1->xy_sel = sel_x ? 1 : 0;
5157 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5158 	req1->index = cpu_to_le32(loc);
5159 	req1->entry_vld = sel_x ? is_add : 0;
5160 
5161 	if (key) {
5162 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5163 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5164 		       sizeof(req2->tcam_data));
5165 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5166 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5167 	}
5168 
5169 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5170 	if (ret)
5171 		dev_err(&hdev->pdev->dev,
5172 			"config tcam key fail, ret=%d\n",
5173 			ret);
5174 
5175 	return ret;
5176 }
5177 
5178 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5179 			      struct hclge_fd_ad_data *action)
5180 {
5181 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5182 	struct hclge_fd_ad_config_cmd *req;
5183 	struct hclge_desc desc;
5184 	u64 ad_data = 0;
5185 	int ret;
5186 
5187 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5188 
5189 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5190 	req->index = cpu_to_le32(loc);
5191 	req->stage = stage;
5192 
5193 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5194 		      action->write_rule_id_to_bd);
5195 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5196 			action->rule_id);
5197 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5198 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5199 			      action->override_tc);
5200 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5201 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5202 	}
5203 	ad_data <<= 32;
5204 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5205 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5206 		      action->forward_to_direct_queue);
5207 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5208 			action->queue_id);
5209 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5210 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5211 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5212 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5213 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5214 			action->counter_id);
5215 
5216 	req->ad_data = cpu_to_le64(ad_data);
5217 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5218 	if (ret)
5219 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5220 
5221 	return ret;
5222 }
5223 
5224 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5225 				   struct hclge_fd_rule *rule)
5226 {
5227 	u16 tmp_x_s, tmp_y_s;
5228 	u32 tmp_x_l, tmp_y_l;
5229 	int i;
5230 
5231 	if (rule->unused_tuple & tuple_bit)
5232 		return true;
5233 
5234 	switch (tuple_bit) {
5235 	case BIT(INNER_DST_MAC):
5236 		for (i = 0; i < ETH_ALEN; i++) {
5237 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5238 			       rule->tuples_mask.dst_mac[i]);
5239 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5240 			       rule->tuples_mask.dst_mac[i]);
5241 		}
5242 
5243 		return true;
5244 	case BIT(INNER_SRC_MAC):
5245 		for (i = 0; i < ETH_ALEN; i++) {
5246 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5247 			       rule->tuples_mask.src_mac[i]);
5248 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5249 			       rule->tuples_mask.src_mac[i]);
5250 		}
5251 
5252 		return true;
5253 	case BIT(INNER_VLAN_TAG_FST):
5254 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5255 		       rule->tuples_mask.vlan_tag1);
5256 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5257 		       rule->tuples_mask.vlan_tag1);
5258 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5259 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5260 
5261 		return true;
5262 	case BIT(INNER_ETH_TYPE):
5263 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5264 		       rule->tuples_mask.ether_proto);
5265 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5266 		       rule->tuples_mask.ether_proto);
5267 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5268 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5269 
5270 		return true;
5271 	case BIT(INNER_IP_TOS):
5272 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5273 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5274 
5275 		return true;
5276 	case BIT(INNER_IP_PROTO):
5277 		calc_x(*key_x, rule->tuples.ip_proto,
5278 		       rule->tuples_mask.ip_proto);
5279 		calc_y(*key_y, rule->tuples.ip_proto,
5280 		       rule->tuples_mask.ip_proto);
5281 
5282 		return true;
5283 	case BIT(INNER_SRC_IP):
5284 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5285 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5286 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5287 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5288 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5289 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5290 
5291 		return true;
5292 	case BIT(INNER_DST_IP):
5293 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5294 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5295 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5296 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5297 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5298 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5299 
5300 		return true;
5301 	case BIT(INNER_SRC_PORT):
5302 		calc_x(tmp_x_s, rule->tuples.src_port,
5303 		       rule->tuples_mask.src_port);
5304 		calc_y(tmp_y_s, rule->tuples.src_port,
5305 		       rule->tuples_mask.src_port);
5306 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5307 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5308 
5309 		return true;
5310 	case BIT(INNER_DST_PORT):
5311 		calc_x(tmp_x_s, rule->tuples.dst_port,
5312 		       rule->tuples_mask.dst_port);
5313 		calc_y(tmp_y_s, rule->tuples.dst_port,
5314 		       rule->tuples_mask.dst_port);
5315 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5316 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5317 
5318 		return true;
5319 	default:
5320 		return false;
5321 	}
5322 }
5323 
5324 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5325 				 u8 vf_id, u8 network_port_id)
5326 {
5327 	u32 port_number = 0;
5328 
5329 	if (port_type == HOST_PORT) {
5330 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5331 				pf_id);
5332 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5333 				vf_id);
5334 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5335 	} else {
5336 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5337 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5338 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5339 	}
5340 
5341 	return port_number;
5342 }
5343 
5344 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5345 				       __le32 *key_x, __le32 *key_y,
5346 				       struct hclge_fd_rule *rule)
5347 {
5348 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5349 	u8 cur_pos = 0, tuple_size, shift_bits;
5350 	unsigned int i;
5351 
5352 	for (i = 0; i < MAX_META_DATA; i++) {
5353 		tuple_size = meta_data_key_info[i].key_length;
5354 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5355 
5356 		switch (tuple_bit) {
5357 		case BIT(ROCE_TYPE):
5358 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5359 			cur_pos += tuple_size;
5360 			break;
5361 		case BIT(DST_VPORT):
5362 			port_number = hclge_get_port_number(HOST_PORT, 0,
5363 							    rule->vf_id, 0);
5364 			hnae3_set_field(meta_data,
5365 					GENMASK(cur_pos + tuple_size, cur_pos),
5366 					cur_pos, port_number);
5367 			cur_pos += tuple_size;
5368 			break;
5369 		default:
5370 			break;
5371 		}
5372 	}
5373 
5374 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5375 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5376 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5377 
5378 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5379 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5380 }
5381 
5382 /* A complete key is combined with meta data key and tuple key.
5383  * Meta data key is stored at the MSB region, and tuple key is stored at
5384  * the LSB region, unused bits will be filled 0.
5385  */
5386 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5387 			    struct hclge_fd_rule *rule)
5388 {
5389 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5390 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5391 	u8 *cur_key_x, *cur_key_y;
5392 	u8 meta_data_region;
5393 	u8 tuple_size;
5394 	int ret;
5395 	u32 i;
5396 
5397 	memset(key_x, 0, sizeof(key_x));
5398 	memset(key_y, 0, sizeof(key_y));
5399 	cur_key_x = key_x;
5400 	cur_key_y = key_y;
5401 
5402 	for (i = 0 ; i < MAX_TUPLE; i++) {
5403 		bool tuple_valid;
5404 		u32 check_tuple;
5405 
5406 		tuple_size = tuple_key_info[i].key_length / 8;
5407 		check_tuple = key_cfg->tuple_active & BIT(i);
5408 
5409 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5410 						     cur_key_y, rule);
5411 		if (tuple_valid) {
5412 			cur_key_x += tuple_size;
5413 			cur_key_y += tuple_size;
5414 		}
5415 	}
5416 
5417 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5418 			MAX_META_DATA_LENGTH / 8;
5419 
5420 	hclge_fd_convert_meta_data(key_cfg,
5421 				   (__le32 *)(key_x + meta_data_region),
5422 				   (__le32 *)(key_y + meta_data_region),
5423 				   rule);
5424 
5425 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5426 				   true);
5427 	if (ret) {
5428 		dev_err(&hdev->pdev->dev,
5429 			"fd key_y config fail, loc=%u, ret=%d\n",
5430 			rule->queue_id, ret);
5431 		return ret;
5432 	}
5433 
5434 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5435 				   true);
5436 	if (ret)
5437 		dev_err(&hdev->pdev->dev,
5438 			"fd key_x config fail, loc=%u, ret=%d\n",
5439 			rule->queue_id, ret);
5440 	return ret;
5441 }
5442 
5443 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5444 			       struct hclge_fd_rule *rule)
5445 {
5446 	struct hclge_vport *vport = hdev->vport;
5447 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5448 	struct hclge_fd_ad_data ad_data;
5449 
5450 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5451 	ad_data.ad_id = rule->location;
5452 
5453 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5454 		ad_data.drop_packet = true;
5455 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5456 		ad_data.override_tc = true;
5457 		ad_data.queue_id =
5458 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5459 		ad_data.tc_size =
5460 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5461 	} else {
5462 		ad_data.forward_to_direct_queue = true;
5463 		ad_data.queue_id = rule->queue_id;
5464 	}
5465 
5466 	ad_data.use_counter = false;
5467 	ad_data.counter_id = 0;
5468 
5469 	ad_data.use_next_stage = false;
5470 	ad_data.next_input_key = 0;
5471 
5472 	ad_data.write_rule_id_to_bd = true;
5473 	ad_data.rule_id = rule->location;
5474 
5475 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5476 }
5477 
5478 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5479 				       u32 *unused_tuple)
5480 {
5481 	if (!spec || !unused_tuple)
5482 		return -EINVAL;
5483 
5484 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5485 
5486 	if (!spec->ip4src)
5487 		*unused_tuple |= BIT(INNER_SRC_IP);
5488 
5489 	if (!spec->ip4dst)
5490 		*unused_tuple |= BIT(INNER_DST_IP);
5491 
5492 	if (!spec->psrc)
5493 		*unused_tuple |= BIT(INNER_SRC_PORT);
5494 
5495 	if (!spec->pdst)
5496 		*unused_tuple |= BIT(INNER_DST_PORT);
5497 
5498 	if (!spec->tos)
5499 		*unused_tuple |= BIT(INNER_IP_TOS);
5500 
5501 	return 0;
5502 }
5503 
5504 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5505 				    u32 *unused_tuple)
5506 {
5507 	if (!spec || !unused_tuple)
5508 		return -EINVAL;
5509 
5510 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5511 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5512 
5513 	if (!spec->ip4src)
5514 		*unused_tuple |= BIT(INNER_SRC_IP);
5515 
5516 	if (!spec->ip4dst)
5517 		*unused_tuple |= BIT(INNER_DST_IP);
5518 
5519 	if (!spec->tos)
5520 		*unused_tuple |= BIT(INNER_IP_TOS);
5521 
5522 	if (!spec->proto)
5523 		*unused_tuple |= BIT(INNER_IP_PROTO);
5524 
5525 	if (spec->l4_4_bytes)
5526 		return -EOPNOTSUPP;
5527 
5528 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5529 		return -EOPNOTSUPP;
5530 
5531 	return 0;
5532 }
5533 
5534 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5535 				       u32 *unused_tuple)
5536 {
5537 	if (!spec || !unused_tuple)
5538 		return -EINVAL;
5539 
5540 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5541 		BIT(INNER_IP_TOS);
5542 
5543 	/* check whether src/dst ip address used */
5544 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5545 		*unused_tuple |= BIT(INNER_SRC_IP);
5546 
5547 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5548 		*unused_tuple |= BIT(INNER_DST_IP);
5549 
5550 	if (!spec->psrc)
5551 		*unused_tuple |= BIT(INNER_SRC_PORT);
5552 
5553 	if (!spec->pdst)
5554 		*unused_tuple |= BIT(INNER_DST_PORT);
5555 
5556 	if (spec->tclass)
5557 		return -EOPNOTSUPP;
5558 
5559 	return 0;
5560 }
5561 
5562 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5563 				    u32 *unused_tuple)
5564 {
5565 	if (!spec || !unused_tuple)
5566 		return -EINVAL;
5567 
5568 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5569 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5570 
5571 	/* check whether src/dst ip address used */
5572 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5573 		*unused_tuple |= BIT(INNER_SRC_IP);
5574 
5575 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5576 		*unused_tuple |= BIT(INNER_DST_IP);
5577 
5578 	if (!spec->l4_proto)
5579 		*unused_tuple |= BIT(INNER_IP_PROTO);
5580 
5581 	if (spec->tclass)
5582 		return -EOPNOTSUPP;
5583 
5584 	if (spec->l4_4_bytes)
5585 		return -EOPNOTSUPP;
5586 
5587 	return 0;
5588 }
5589 
5590 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5591 {
5592 	if (!spec || !unused_tuple)
5593 		return -EINVAL;
5594 
5595 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5596 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5597 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5598 
5599 	if (is_zero_ether_addr(spec->h_source))
5600 		*unused_tuple |= BIT(INNER_SRC_MAC);
5601 
5602 	if (is_zero_ether_addr(spec->h_dest))
5603 		*unused_tuple |= BIT(INNER_DST_MAC);
5604 
5605 	if (!spec->h_proto)
5606 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5607 
5608 	return 0;
5609 }
5610 
5611 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5612 				    struct ethtool_rx_flow_spec *fs,
5613 				    u32 *unused_tuple)
5614 {
5615 	if (fs->flow_type & FLOW_EXT) {
5616 		if (fs->h_ext.vlan_etype) {
5617 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5618 			return -EOPNOTSUPP;
5619 		}
5620 
5621 		if (!fs->h_ext.vlan_tci)
5622 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5623 
5624 		if (fs->m_ext.vlan_tci &&
5625 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5626 			dev_err(&hdev->pdev->dev,
5627 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5628 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5629 			return -EINVAL;
5630 		}
5631 	} else {
5632 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5633 	}
5634 
5635 	if (fs->flow_type & FLOW_MAC_EXT) {
5636 		if (hdev->fd_cfg.fd_mode !=
5637 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5638 			dev_err(&hdev->pdev->dev,
5639 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5640 			return -EOPNOTSUPP;
5641 		}
5642 
5643 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5644 			*unused_tuple |= BIT(INNER_DST_MAC);
5645 		else
5646 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5647 	}
5648 
5649 	return 0;
5650 }
5651 
5652 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5653 			       struct ethtool_rx_flow_spec *fs,
5654 			       u32 *unused_tuple)
5655 {
5656 	u32 flow_type;
5657 	int ret;
5658 
5659 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5660 		dev_err(&hdev->pdev->dev,
5661 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5662 			fs->location,
5663 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5664 		return -EINVAL;
5665 	}
5666 
5667 	if ((fs->flow_type & FLOW_EXT) &&
5668 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5669 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5670 		return -EOPNOTSUPP;
5671 	}
5672 
5673 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5674 	switch (flow_type) {
5675 	case SCTP_V4_FLOW:
5676 	case TCP_V4_FLOW:
5677 	case UDP_V4_FLOW:
5678 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5679 						  unused_tuple);
5680 		break;
5681 	case IP_USER_FLOW:
5682 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5683 					       unused_tuple);
5684 		break;
5685 	case SCTP_V6_FLOW:
5686 	case TCP_V6_FLOW:
5687 	case UDP_V6_FLOW:
5688 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5689 						  unused_tuple);
5690 		break;
5691 	case IPV6_USER_FLOW:
5692 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5693 					       unused_tuple);
5694 		break;
5695 	case ETHER_FLOW:
5696 		if (hdev->fd_cfg.fd_mode !=
5697 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5698 			dev_err(&hdev->pdev->dev,
5699 				"ETHER_FLOW is not supported in current fd mode!\n");
5700 			return -EOPNOTSUPP;
5701 		}
5702 
5703 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5704 						 unused_tuple);
5705 		break;
5706 	default:
5707 		dev_err(&hdev->pdev->dev,
5708 			"unsupported protocol type, protocol type = %#x\n",
5709 			flow_type);
5710 		return -EOPNOTSUPP;
5711 	}
5712 
5713 	if (ret) {
5714 		dev_err(&hdev->pdev->dev,
5715 			"failed to check flow union tuple, ret = %d\n",
5716 			ret);
5717 		return ret;
5718 	}
5719 
5720 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5721 }
5722 
5723 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5724 {
5725 	struct hclge_fd_rule *rule = NULL;
5726 	struct hlist_node *node2;
5727 
5728 	spin_lock_bh(&hdev->fd_rule_lock);
5729 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5730 		if (rule->location >= location)
5731 			break;
5732 	}
5733 
5734 	spin_unlock_bh(&hdev->fd_rule_lock);
5735 
5736 	return  rule && rule->location == location;
5737 }
5738 
5739 /* make sure being called after lock up with fd_rule_lock */
5740 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5741 				     struct hclge_fd_rule *new_rule,
5742 				     u16 location,
5743 				     bool is_add)
5744 {
5745 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5746 	struct hlist_node *node2;
5747 
5748 	if (is_add && !new_rule)
5749 		return -EINVAL;
5750 
5751 	hlist_for_each_entry_safe(rule, node2,
5752 				  &hdev->fd_rule_list, rule_node) {
5753 		if (rule->location >= location)
5754 			break;
5755 		parent = rule;
5756 	}
5757 
5758 	if (rule && rule->location == location) {
5759 		hlist_del(&rule->rule_node);
5760 		kfree(rule);
5761 		hdev->hclge_fd_rule_num--;
5762 
5763 		if (!is_add) {
5764 			if (!hdev->hclge_fd_rule_num)
5765 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5766 			clear_bit(location, hdev->fd_bmap);
5767 
5768 			return 0;
5769 		}
5770 	} else if (!is_add) {
5771 		dev_err(&hdev->pdev->dev,
5772 			"delete fail, rule %u is inexistent\n",
5773 			location);
5774 		return -EINVAL;
5775 	}
5776 
5777 	INIT_HLIST_NODE(&new_rule->rule_node);
5778 
5779 	if (parent)
5780 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5781 	else
5782 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5783 
5784 	set_bit(location, hdev->fd_bmap);
5785 	hdev->hclge_fd_rule_num++;
5786 	hdev->fd_active_type = new_rule->rule_type;
5787 
5788 	return 0;
5789 }
5790 
5791 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5792 			      struct ethtool_rx_flow_spec *fs,
5793 			      struct hclge_fd_rule *rule)
5794 {
5795 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5796 
5797 	switch (flow_type) {
5798 	case SCTP_V4_FLOW:
5799 	case TCP_V4_FLOW:
5800 	case UDP_V4_FLOW:
5801 		rule->tuples.src_ip[IPV4_INDEX] =
5802 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5803 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5804 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5805 
5806 		rule->tuples.dst_ip[IPV4_INDEX] =
5807 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5808 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5809 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5810 
5811 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5812 		rule->tuples_mask.src_port =
5813 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5814 
5815 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5816 		rule->tuples_mask.dst_port =
5817 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5818 
5819 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5820 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5821 
5822 		rule->tuples.ether_proto = ETH_P_IP;
5823 		rule->tuples_mask.ether_proto = 0xFFFF;
5824 
5825 		break;
5826 	case IP_USER_FLOW:
5827 		rule->tuples.src_ip[IPV4_INDEX] =
5828 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5829 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5830 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5831 
5832 		rule->tuples.dst_ip[IPV4_INDEX] =
5833 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5834 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5835 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5836 
5837 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5838 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5839 
5840 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5841 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5842 
5843 		rule->tuples.ether_proto = ETH_P_IP;
5844 		rule->tuples_mask.ether_proto = 0xFFFF;
5845 
5846 		break;
5847 	case SCTP_V6_FLOW:
5848 	case TCP_V6_FLOW:
5849 	case UDP_V6_FLOW:
5850 		be32_to_cpu_array(rule->tuples.src_ip,
5851 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5852 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5853 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5854 
5855 		be32_to_cpu_array(rule->tuples.dst_ip,
5856 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5857 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5858 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5859 
5860 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5861 		rule->tuples_mask.src_port =
5862 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5863 
5864 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5865 		rule->tuples_mask.dst_port =
5866 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5867 
5868 		rule->tuples.ether_proto = ETH_P_IPV6;
5869 		rule->tuples_mask.ether_proto = 0xFFFF;
5870 
5871 		break;
5872 	case IPV6_USER_FLOW:
5873 		be32_to_cpu_array(rule->tuples.src_ip,
5874 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5875 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5876 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5877 
5878 		be32_to_cpu_array(rule->tuples.dst_ip,
5879 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5880 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5881 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5882 
5883 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5884 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5885 
5886 		rule->tuples.ether_proto = ETH_P_IPV6;
5887 		rule->tuples_mask.ether_proto = 0xFFFF;
5888 
5889 		break;
5890 	case ETHER_FLOW:
5891 		ether_addr_copy(rule->tuples.src_mac,
5892 				fs->h_u.ether_spec.h_source);
5893 		ether_addr_copy(rule->tuples_mask.src_mac,
5894 				fs->m_u.ether_spec.h_source);
5895 
5896 		ether_addr_copy(rule->tuples.dst_mac,
5897 				fs->h_u.ether_spec.h_dest);
5898 		ether_addr_copy(rule->tuples_mask.dst_mac,
5899 				fs->m_u.ether_spec.h_dest);
5900 
5901 		rule->tuples.ether_proto =
5902 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5903 		rule->tuples_mask.ether_proto =
5904 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5905 
5906 		break;
5907 	default:
5908 		return -EOPNOTSUPP;
5909 	}
5910 
5911 	switch (flow_type) {
5912 	case SCTP_V4_FLOW:
5913 	case SCTP_V6_FLOW:
5914 		rule->tuples.ip_proto = IPPROTO_SCTP;
5915 		rule->tuples_mask.ip_proto = 0xFF;
5916 		break;
5917 	case TCP_V4_FLOW:
5918 	case TCP_V6_FLOW:
5919 		rule->tuples.ip_proto = IPPROTO_TCP;
5920 		rule->tuples_mask.ip_proto = 0xFF;
5921 		break;
5922 	case UDP_V4_FLOW:
5923 	case UDP_V6_FLOW:
5924 		rule->tuples.ip_proto = IPPROTO_UDP;
5925 		rule->tuples_mask.ip_proto = 0xFF;
5926 		break;
5927 	default:
5928 		break;
5929 	}
5930 
5931 	if (fs->flow_type & FLOW_EXT) {
5932 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5933 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5934 	}
5935 
5936 	if (fs->flow_type & FLOW_MAC_EXT) {
5937 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5938 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5939 	}
5940 
5941 	return 0;
5942 }
5943 
5944 /* make sure being called after lock up with fd_rule_lock */
5945 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5946 				struct hclge_fd_rule *rule)
5947 {
5948 	int ret;
5949 
5950 	if (!rule) {
5951 		dev_err(&hdev->pdev->dev,
5952 			"The flow director rule is NULL\n");
5953 		return -EINVAL;
5954 	}
5955 
5956 	/* it will never fail here, so needn't to check return value */
5957 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5958 
5959 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5960 	if (ret)
5961 		goto clear_rule;
5962 
5963 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5964 	if (ret)
5965 		goto clear_rule;
5966 
5967 	return 0;
5968 
5969 clear_rule:
5970 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5971 	return ret;
5972 }
5973 
5974 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5975 {
5976 	struct hclge_vport *vport = hclge_get_vport(handle);
5977 	struct hclge_dev *hdev = vport->back;
5978 
5979 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5980 }
5981 
5982 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5983 			      struct ethtool_rxnfc *cmd)
5984 {
5985 	struct hclge_vport *vport = hclge_get_vport(handle);
5986 	struct hclge_dev *hdev = vport->back;
5987 	u16 dst_vport_id = 0, q_index = 0;
5988 	struct ethtool_rx_flow_spec *fs;
5989 	struct hclge_fd_rule *rule;
5990 	u32 unused = 0;
5991 	u8 action;
5992 	int ret;
5993 
5994 	if (!hnae3_dev_fd_supported(hdev)) {
5995 		dev_err(&hdev->pdev->dev,
5996 			"flow table director is not supported\n");
5997 		return -EOPNOTSUPP;
5998 	}
5999 
6000 	if (!hdev->fd_en) {
6001 		dev_err(&hdev->pdev->dev,
6002 			"please enable flow director first\n");
6003 		return -EOPNOTSUPP;
6004 	}
6005 
6006 	if (hclge_is_cls_flower_active(handle)) {
6007 		dev_err(&hdev->pdev->dev,
6008 			"please delete all exist cls flower rules first\n");
6009 		return -EINVAL;
6010 	}
6011 
6012 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6013 
6014 	ret = hclge_fd_check_spec(hdev, fs, &unused);
6015 	if (ret)
6016 		return ret;
6017 
6018 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6019 		action = HCLGE_FD_ACTION_DROP_PACKET;
6020 	} else {
6021 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6022 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6023 		u16 tqps;
6024 
6025 		if (vf > hdev->num_req_vfs) {
6026 			dev_err(&hdev->pdev->dev,
6027 				"Error: vf id (%u) > max vf num (%u)\n",
6028 				vf, hdev->num_req_vfs);
6029 			return -EINVAL;
6030 		}
6031 
6032 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6033 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6034 
6035 		if (ring >= tqps) {
6036 			dev_err(&hdev->pdev->dev,
6037 				"Error: queue id (%u) > max tqp num (%u)\n",
6038 				ring, tqps - 1);
6039 			return -EINVAL;
6040 		}
6041 
6042 		action = HCLGE_FD_ACTION_SELECT_QUEUE;
6043 		q_index = ring;
6044 	}
6045 
6046 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6047 	if (!rule)
6048 		return -ENOMEM;
6049 
6050 	ret = hclge_fd_get_tuple(hdev, fs, rule);
6051 	if (ret) {
6052 		kfree(rule);
6053 		return ret;
6054 	}
6055 
6056 	rule->flow_type = fs->flow_type;
6057 	rule->location = fs->location;
6058 	rule->unused_tuple = unused;
6059 	rule->vf_id = dst_vport_id;
6060 	rule->queue_id = q_index;
6061 	rule->action = action;
6062 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6063 
6064 	/* to avoid rule conflict, when user configure rule by ethtool,
6065 	 * we need to clear all arfs rules
6066 	 */
6067 	spin_lock_bh(&hdev->fd_rule_lock);
6068 	hclge_clear_arfs_rules(handle);
6069 
6070 	ret = hclge_fd_config_rule(hdev, rule);
6071 
6072 	spin_unlock_bh(&hdev->fd_rule_lock);
6073 
6074 	return ret;
6075 }
6076 
6077 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6078 			      struct ethtool_rxnfc *cmd)
6079 {
6080 	struct hclge_vport *vport = hclge_get_vport(handle);
6081 	struct hclge_dev *hdev = vport->back;
6082 	struct ethtool_rx_flow_spec *fs;
6083 	int ret;
6084 
6085 	if (!hnae3_dev_fd_supported(hdev))
6086 		return -EOPNOTSUPP;
6087 
6088 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6089 
6090 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6091 		return -EINVAL;
6092 
6093 	if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6094 	    !hclge_fd_rule_exist(hdev, fs->location)) {
6095 		dev_err(&hdev->pdev->dev,
6096 			"Delete fail, rule %u is inexistent\n", fs->location);
6097 		return -ENOENT;
6098 	}
6099 
6100 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6101 				   NULL, false);
6102 	if (ret)
6103 		return ret;
6104 
6105 	spin_lock_bh(&hdev->fd_rule_lock);
6106 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6107 
6108 	spin_unlock_bh(&hdev->fd_rule_lock);
6109 
6110 	return ret;
6111 }
6112 
6113 /* make sure being called after lock up with fd_rule_lock */
6114 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6115 				     bool clear_list)
6116 {
6117 	struct hclge_vport *vport = hclge_get_vport(handle);
6118 	struct hclge_dev *hdev = vport->back;
6119 	struct hclge_fd_rule *rule;
6120 	struct hlist_node *node;
6121 	u16 location;
6122 
6123 	if (!hnae3_dev_fd_supported(hdev))
6124 		return;
6125 
6126 	for_each_set_bit(location, hdev->fd_bmap,
6127 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6128 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6129 				     NULL, false);
6130 
6131 	if (clear_list) {
6132 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6133 					  rule_node) {
6134 			hlist_del(&rule->rule_node);
6135 			kfree(rule);
6136 		}
6137 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6138 		hdev->hclge_fd_rule_num = 0;
6139 		bitmap_zero(hdev->fd_bmap,
6140 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6141 	}
6142 }
6143 
6144 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6145 {
6146 	struct hclge_vport *vport = hclge_get_vport(handle);
6147 	struct hclge_dev *hdev = vport->back;
6148 	struct hclge_fd_rule *rule;
6149 	struct hlist_node *node;
6150 	int ret;
6151 
6152 	/* Return ok here, because reset error handling will check this
6153 	 * return value. If error is returned here, the reset process will
6154 	 * fail.
6155 	 */
6156 	if (!hnae3_dev_fd_supported(hdev))
6157 		return 0;
6158 
6159 	/* if fd is disabled, should not restore it when reset */
6160 	if (!hdev->fd_en)
6161 		return 0;
6162 
6163 	spin_lock_bh(&hdev->fd_rule_lock);
6164 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6165 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6166 		if (!ret)
6167 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6168 
6169 		if (ret) {
6170 			dev_warn(&hdev->pdev->dev,
6171 				 "Restore rule %u failed, remove it\n",
6172 				 rule->location);
6173 			clear_bit(rule->location, hdev->fd_bmap);
6174 			hlist_del(&rule->rule_node);
6175 			kfree(rule);
6176 			hdev->hclge_fd_rule_num--;
6177 		}
6178 	}
6179 
6180 	if (hdev->hclge_fd_rule_num)
6181 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6182 
6183 	spin_unlock_bh(&hdev->fd_rule_lock);
6184 
6185 	return 0;
6186 }
6187 
6188 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6189 				 struct ethtool_rxnfc *cmd)
6190 {
6191 	struct hclge_vport *vport = hclge_get_vport(handle);
6192 	struct hclge_dev *hdev = vport->back;
6193 
6194 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6195 		return -EOPNOTSUPP;
6196 
6197 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6198 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6199 
6200 	return 0;
6201 }
6202 
6203 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6204 				     struct ethtool_tcpip4_spec *spec,
6205 				     struct ethtool_tcpip4_spec *spec_mask)
6206 {
6207 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6208 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6209 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6210 
6211 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6212 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6213 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6214 
6215 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6216 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6217 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6218 
6219 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6220 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6221 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6222 
6223 	spec->tos = rule->tuples.ip_tos;
6224 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6225 			0 : rule->tuples_mask.ip_tos;
6226 }
6227 
6228 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6229 				  struct ethtool_usrip4_spec *spec,
6230 				  struct ethtool_usrip4_spec *spec_mask)
6231 {
6232 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6233 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6234 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6235 
6236 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6237 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6238 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6239 
6240 	spec->tos = rule->tuples.ip_tos;
6241 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6242 			0 : rule->tuples_mask.ip_tos;
6243 
6244 	spec->proto = rule->tuples.ip_proto;
6245 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6246 			0 : rule->tuples_mask.ip_proto;
6247 
6248 	spec->ip_ver = ETH_RX_NFC_IP4;
6249 }
6250 
6251 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6252 				     struct ethtool_tcpip6_spec *spec,
6253 				     struct ethtool_tcpip6_spec *spec_mask)
6254 {
6255 	cpu_to_be32_array(spec->ip6src,
6256 			  rule->tuples.src_ip, IPV6_SIZE);
6257 	cpu_to_be32_array(spec->ip6dst,
6258 			  rule->tuples.dst_ip, IPV6_SIZE);
6259 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6260 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6261 	else
6262 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6263 				  IPV6_SIZE);
6264 
6265 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6266 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6267 	else
6268 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6269 				  IPV6_SIZE);
6270 
6271 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6272 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6273 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6274 
6275 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6276 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6277 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6278 }
6279 
6280 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6281 				  struct ethtool_usrip6_spec *spec,
6282 				  struct ethtool_usrip6_spec *spec_mask)
6283 {
6284 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6285 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6286 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6287 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6288 	else
6289 		cpu_to_be32_array(spec_mask->ip6src,
6290 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6291 
6292 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6293 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6294 	else
6295 		cpu_to_be32_array(spec_mask->ip6dst,
6296 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6297 
6298 	spec->l4_proto = rule->tuples.ip_proto;
6299 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6300 			0 : rule->tuples_mask.ip_proto;
6301 }
6302 
6303 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6304 				    struct ethhdr *spec,
6305 				    struct ethhdr *spec_mask)
6306 {
6307 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6308 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6309 
6310 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6311 		eth_zero_addr(spec_mask->h_source);
6312 	else
6313 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6314 
6315 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6316 		eth_zero_addr(spec_mask->h_dest);
6317 	else
6318 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6319 
6320 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6321 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6322 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6323 }
6324 
6325 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6326 				  struct hclge_fd_rule *rule)
6327 {
6328 	if (fs->flow_type & FLOW_EXT) {
6329 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6330 		fs->m_ext.vlan_tci =
6331 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6332 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6333 	}
6334 
6335 	if (fs->flow_type & FLOW_MAC_EXT) {
6336 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6337 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6338 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6339 		else
6340 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6341 					rule->tuples_mask.dst_mac);
6342 	}
6343 }
6344 
6345 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6346 				  struct ethtool_rxnfc *cmd)
6347 {
6348 	struct hclge_vport *vport = hclge_get_vport(handle);
6349 	struct hclge_fd_rule *rule = NULL;
6350 	struct hclge_dev *hdev = vport->back;
6351 	struct ethtool_rx_flow_spec *fs;
6352 	struct hlist_node *node2;
6353 
6354 	if (!hnae3_dev_fd_supported(hdev))
6355 		return -EOPNOTSUPP;
6356 
6357 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6358 
6359 	spin_lock_bh(&hdev->fd_rule_lock);
6360 
6361 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6362 		if (rule->location >= fs->location)
6363 			break;
6364 	}
6365 
6366 	if (!rule || fs->location != rule->location) {
6367 		spin_unlock_bh(&hdev->fd_rule_lock);
6368 
6369 		return -ENOENT;
6370 	}
6371 
6372 	fs->flow_type = rule->flow_type;
6373 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6374 	case SCTP_V4_FLOW:
6375 	case TCP_V4_FLOW:
6376 	case UDP_V4_FLOW:
6377 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6378 					 &fs->m_u.tcp_ip4_spec);
6379 		break;
6380 	case IP_USER_FLOW:
6381 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6382 				      &fs->m_u.usr_ip4_spec);
6383 		break;
6384 	case SCTP_V6_FLOW:
6385 	case TCP_V6_FLOW:
6386 	case UDP_V6_FLOW:
6387 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6388 					 &fs->m_u.tcp_ip6_spec);
6389 		break;
6390 	case IPV6_USER_FLOW:
6391 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6392 				      &fs->m_u.usr_ip6_spec);
6393 		break;
6394 	/* The flow type of fd rule has been checked before adding in to rule
6395 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6396 	 * for the default case
6397 	 */
6398 	default:
6399 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6400 					&fs->m_u.ether_spec);
6401 		break;
6402 	}
6403 
6404 	hclge_fd_get_ext_info(fs, rule);
6405 
6406 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6407 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6408 	} else {
6409 		u64 vf_id;
6410 
6411 		fs->ring_cookie = rule->queue_id;
6412 		vf_id = rule->vf_id;
6413 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6414 		fs->ring_cookie |= vf_id;
6415 	}
6416 
6417 	spin_unlock_bh(&hdev->fd_rule_lock);
6418 
6419 	return 0;
6420 }
6421 
6422 static int hclge_get_all_rules(struct hnae3_handle *handle,
6423 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6424 {
6425 	struct hclge_vport *vport = hclge_get_vport(handle);
6426 	struct hclge_dev *hdev = vport->back;
6427 	struct hclge_fd_rule *rule;
6428 	struct hlist_node *node2;
6429 	int cnt = 0;
6430 
6431 	if (!hnae3_dev_fd_supported(hdev))
6432 		return -EOPNOTSUPP;
6433 
6434 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6435 
6436 	spin_lock_bh(&hdev->fd_rule_lock);
6437 	hlist_for_each_entry_safe(rule, node2,
6438 				  &hdev->fd_rule_list, rule_node) {
6439 		if (cnt == cmd->rule_cnt) {
6440 			spin_unlock_bh(&hdev->fd_rule_lock);
6441 			return -EMSGSIZE;
6442 		}
6443 
6444 		rule_locs[cnt] = rule->location;
6445 		cnt++;
6446 	}
6447 
6448 	spin_unlock_bh(&hdev->fd_rule_lock);
6449 
6450 	cmd->rule_cnt = cnt;
6451 
6452 	return 0;
6453 }
6454 
6455 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6456 				     struct hclge_fd_rule_tuples *tuples)
6457 {
6458 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6459 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6460 
6461 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6462 	tuples->ip_proto = fkeys->basic.ip_proto;
6463 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6464 
6465 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6466 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6467 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6468 	} else {
6469 		int i;
6470 
6471 		for (i = 0; i < IPV6_SIZE; i++) {
6472 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6473 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6474 		}
6475 	}
6476 }
6477 
6478 /* traverse all rules, check whether an existed rule has the same tuples */
6479 static struct hclge_fd_rule *
6480 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6481 			  const struct hclge_fd_rule_tuples *tuples)
6482 {
6483 	struct hclge_fd_rule *rule = NULL;
6484 	struct hlist_node *node;
6485 
6486 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6487 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6488 			return rule;
6489 	}
6490 
6491 	return NULL;
6492 }
6493 
6494 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6495 				     struct hclge_fd_rule *rule)
6496 {
6497 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6498 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6499 			     BIT(INNER_SRC_PORT);
6500 	rule->action = 0;
6501 	rule->vf_id = 0;
6502 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6503 	if (tuples->ether_proto == ETH_P_IP) {
6504 		if (tuples->ip_proto == IPPROTO_TCP)
6505 			rule->flow_type = TCP_V4_FLOW;
6506 		else
6507 			rule->flow_type = UDP_V4_FLOW;
6508 	} else {
6509 		if (tuples->ip_proto == IPPROTO_TCP)
6510 			rule->flow_type = TCP_V6_FLOW;
6511 		else
6512 			rule->flow_type = UDP_V6_FLOW;
6513 	}
6514 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6515 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6516 }
6517 
6518 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6519 				      u16 flow_id, struct flow_keys *fkeys)
6520 {
6521 	struct hclge_vport *vport = hclge_get_vport(handle);
6522 	struct hclge_fd_rule_tuples new_tuples = {};
6523 	struct hclge_dev *hdev = vport->back;
6524 	struct hclge_fd_rule *rule;
6525 	u16 tmp_queue_id;
6526 	u16 bit_id;
6527 	int ret;
6528 
6529 	if (!hnae3_dev_fd_supported(hdev))
6530 		return -EOPNOTSUPP;
6531 
6532 	/* when there is already fd rule existed add by user,
6533 	 * arfs should not work
6534 	 */
6535 	spin_lock_bh(&hdev->fd_rule_lock);
6536 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6537 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6538 		spin_unlock_bh(&hdev->fd_rule_lock);
6539 		return -EOPNOTSUPP;
6540 	}
6541 
6542 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6543 
6544 	/* check is there flow director filter existed for this flow,
6545 	 * if not, create a new filter for it;
6546 	 * if filter exist with different queue id, modify the filter;
6547 	 * if filter exist with same queue id, do nothing
6548 	 */
6549 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6550 	if (!rule) {
6551 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6552 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6553 			spin_unlock_bh(&hdev->fd_rule_lock);
6554 			return -ENOSPC;
6555 		}
6556 
6557 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6558 		if (!rule) {
6559 			spin_unlock_bh(&hdev->fd_rule_lock);
6560 			return -ENOMEM;
6561 		}
6562 
6563 		set_bit(bit_id, hdev->fd_bmap);
6564 		rule->location = bit_id;
6565 		rule->arfs.flow_id = flow_id;
6566 		rule->queue_id = queue_id;
6567 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6568 		ret = hclge_fd_config_rule(hdev, rule);
6569 
6570 		spin_unlock_bh(&hdev->fd_rule_lock);
6571 
6572 		if (ret)
6573 			return ret;
6574 
6575 		return rule->location;
6576 	}
6577 
6578 	spin_unlock_bh(&hdev->fd_rule_lock);
6579 
6580 	if (rule->queue_id == queue_id)
6581 		return rule->location;
6582 
6583 	tmp_queue_id = rule->queue_id;
6584 	rule->queue_id = queue_id;
6585 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6586 	if (ret) {
6587 		rule->queue_id = tmp_queue_id;
6588 		return ret;
6589 	}
6590 
6591 	return rule->location;
6592 }
6593 
6594 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6595 {
6596 #ifdef CONFIG_RFS_ACCEL
6597 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6598 	struct hclge_fd_rule *rule;
6599 	struct hlist_node *node;
6600 	HLIST_HEAD(del_list);
6601 
6602 	spin_lock_bh(&hdev->fd_rule_lock);
6603 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6604 		spin_unlock_bh(&hdev->fd_rule_lock);
6605 		return;
6606 	}
6607 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6608 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6609 					rule->arfs.flow_id, rule->location)) {
6610 			hlist_del_init(&rule->rule_node);
6611 			hlist_add_head(&rule->rule_node, &del_list);
6612 			hdev->hclge_fd_rule_num--;
6613 			clear_bit(rule->location, hdev->fd_bmap);
6614 		}
6615 	}
6616 	spin_unlock_bh(&hdev->fd_rule_lock);
6617 
6618 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6619 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6620 				     rule->location, NULL, false);
6621 		kfree(rule);
6622 	}
6623 #endif
6624 }
6625 
6626 /* make sure being called after lock up with fd_rule_lock */
6627 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6628 {
6629 #ifdef CONFIG_RFS_ACCEL
6630 	struct hclge_vport *vport = hclge_get_vport(handle);
6631 	struct hclge_dev *hdev = vport->back;
6632 
6633 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6634 		hclge_del_all_fd_entries(handle, true);
6635 #endif
6636 }
6637 
6638 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6639 				    struct hclge_fd_rule *rule)
6640 {
6641 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6642 		struct flow_match_basic match;
6643 		u16 ethtype_key, ethtype_mask;
6644 
6645 		flow_rule_match_basic(flow, &match);
6646 		ethtype_key = ntohs(match.key->n_proto);
6647 		ethtype_mask = ntohs(match.mask->n_proto);
6648 
6649 		if (ethtype_key == ETH_P_ALL) {
6650 			ethtype_key = 0;
6651 			ethtype_mask = 0;
6652 		}
6653 		rule->tuples.ether_proto = ethtype_key;
6654 		rule->tuples_mask.ether_proto = ethtype_mask;
6655 		rule->tuples.ip_proto = match.key->ip_proto;
6656 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
6657 	} else {
6658 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
6659 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6660 	}
6661 }
6662 
6663 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6664 				  struct hclge_fd_rule *rule)
6665 {
6666 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6667 		struct flow_match_eth_addrs match;
6668 
6669 		flow_rule_match_eth_addrs(flow, &match);
6670 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6671 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6672 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
6673 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6674 	} else {
6675 		rule->unused_tuple |= BIT(INNER_DST_MAC);
6676 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
6677 	}
6678 }
6679 
6680 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6681 				   struct hclge_fd_rule *rule)
6682 {
6683 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6684 		struct flow_match_vlan match;
6685 
6686 		flow_rule_match_vlan(flow, &match);
6687 		rule->tuples.vlan_tag1 = match.key->vlan_id |
6688 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
6689 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6690 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6691 	} else {
6692 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6693 	}
6694 }
6695 
6696 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6697 				 struct hclge_fd_rule *rule)
6698 {
6699 	u16 addr_type = 0;
6700 
6701 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6702 		struct flow_match_control match;
6703 
6704 		flow_rule_match_control(flow, &match);
6705 		addr_type = match.key->addr_type;
6706 	}
6707 
6708 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6709 		struct flow_match_ipv4_addrs match;
6710 
6711 		flow_rule_match_ipv4_addrs(flow, &match);
6712 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6713 		rule->tuples_mask.src_ip[IPV4_INDEX] =
6714 						be32_to_cpu(match.mask->src);
6715 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6716 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
6717 						be32_to_cpu(match.mask->dst);
6718 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6719 		struct flow_match_ipv6_addrs match;
6720 
6721 		flow_rule_match_ipv6_addrs(flow, &match);
6722 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6723 				  IPV6_SIZE);
6724 		be32_to_cpu_array(rule->tuples_mask.src_ip,
6725 				  match.mask->src.s6_addr32, IPV6_SIZE);
6726 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6727 				  IPV6_SIZE);
6728 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
6729 				  match.mask->dst.s6_addr32, IPV6_SIZE);
6730 	} else {
6731 		rule->unused_tuple |= BIT(INNER_SRC_IP);
6732 		rule->unused_tuple |= BIT(INNER_DST_IP);
6733 	}
6734 }
6735 
6736 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6737 				   struct hclge_fd_rule *rule)
6738 {
6739 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6740 		struct flow_match_ports match;
6741 
6742 		flow_rule_match_ports(flow, &match);
6743 
6744 		rule->tuples.src_port = be16_to_cpu(match.key->src);
6745 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6746 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6747 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6748 	} else {
6749 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
6750 		rule->unused_tuple |= BIT(INNER_DST_PORT);
6751 	}
6752 }
6753 
6754 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6755 				  struct flow_cls_offload *cls_flower,
6756 				  struct hclge_fd_rule *rule)
6757 {
6758 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6759 	struct flow_dissector *dissector = flow->match.dissector;
6760 
6761 	if (dissector->used_keys &
6762 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6763 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
6764 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6765 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
6766 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6767 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6768 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6769 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6770 			dissector->used_keys);
6771 		return -EOPNOTSUPP;
6772 	}
6773 
6774 	hclge_get_cls_key_basic(flow, rule);
6775 	hclge_get_cls_key_mac(flow, rule);
6776 	hclge_get_cls_key_vlan(flow, rule);
6777 	hclge_get_cls_key_ip(flow, rule);
6778 	hclge_get_cls_key_port(flow, rule);
6779 
6780 	return 0;
6781 }
6782 
6783 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6784 				  struct flow_cls_offload *cls_flower, int tc)
6785 {
6786 	u32 prio = cls_flower->common.prio;
6787 
6788 	if (tc < 0 || tc > hdev->tc_max) {
6789 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6790 		return -EINVAL;
6791 	}
6792 
6793 	if (prio == 0 ||
6794 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6795 		dev_err(&hdev->pdev->dev,
6796 			"prio %u should be in range[1, %u]\n",
6797 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6798 		return -EINVAL;
6799 	}
6800 
6801 	if (test_bit(prio - 1, hdev->fd_bmap)) {
6802 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6803 		return -EINVAL;
6804 	}
6805 	return 0;
6806 }
6807 
6808 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6809 				struct flow_cls_offload *cls_flower,
6810 				int tc)
6811 {
6812 	struct hclge_vport *vport = hclge_get_vport(handle);
6813 	struct hclge_dev *hdev = vport->back;
6814 	struct hclge_fd_rule *rule;
6815 	int ret;
6816 
6817 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6818 		dev_err(&hdev->pdev->dev,
6819 			"please remove all exist fd rules via ethtool first\n");
6820 		return -EINVAL;
6821 	}
6822 
6823 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6824 	if (ret) {
6825 		dev_err(&hdev->pdev->dev,
6826 			"failed to check cls flower params, ret = %d\n", ret);
6827 		return ret;
6828 	}
6829 
6830 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6831 	if (!rule)
6832 		return -ENOMEM;
6833 
6834 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6835 	if (ret)
6836 		goto err;
6837 
6838 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
6839 	rule->cls_flower.tc = tc;
6840 	rule->location = cls_flower->common.prio - 1;
6841 	rule->vf_id = 0;
6842 	rule->cls_flower.cookie = cls_flower->cookie;
6843 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6844 
6845 	spin_lock_bh(&hdev->fd_rule_lock);
6846 	hclge_clear_arfs_rules(handle);
6847 
6848 	ret = hclge_fd_config_rule(hdev, rule);
6849 
6850 	spin_unlock_bh(&hdev->fd_rule_lock);
6851 
6852 	if (ret) {
6853 		dev_err(&hdev->pdev->dev,
6854 			"failed to add cls flower rule, ret = %d\n", ret);
6855 		goto err;
6856 	}
6857 
6858 	return 0;
6859 err:
6860 	kfree(rule);
6861 	return ret;
6862 }
6863 
6864 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6865 						   unsigned long cookie)
6866 {
6867 	struct hclge_fd_rule *rule;
6868 	struct hlist_node *node;
6869 
6870 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6871 		if (rule->cls_flower.cookie == cookie)
6872 			return rule;
6873 	}
6874 
6875 	return NULL;
6876 }
6877 
6878 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6879 				struct flow_cls_offload *cls_flower)
6880 {
6881 	struct hclge_vport *vport = hclge_get_vport(handle);
6882 	struct hclge_dev *hdev = vport->back;
6883 	struct hclge_fd_rule *rule;
6884 	int ret;
6885 
6886 	spin_lock_bh(&hdev->fd_rule_lock);
6887 
6888 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6889 	if (!rule) {
6890 		spin_unlock_bh(&hdev->fd_rule_lock);
6891 		return -EINVAL;
6892 	}
6893 
6894 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6895 				   NULL, false);
6896 	if (ret) {
6897 		dev_err(&hdev->pdev->dev,
6898 			"failed to delete cls flower rule %u, ret = %d\n",
6899 			rule->location, ret);
6900 		spin_unlock_bh(&hdev->fd_rule_lock);
6901 		return ret;
6902 	}
6903 
6904 	ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6905 	if (ret) {
6906 		dev_err(&hdev->pdev->dev,
6907 			"failed to delete cls flower rule %u in list, ret = %d\n",
6908 			rule->location, ret);
6909 		spin_unlock_bh(&hdev->fd_rule_lock);
6910 		return ret;
6911 	}
6912 
6913 	spin_unlock_bh(&hdev->fd_rule_lock);
6914 
6915 	return 0;
6916 }
6917 
6918 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6919 {
6920 	struct hclge_vport *vport = hclge_get_vport(handle);
6921 	struct hclge_dev *hdev = vport->back;
6922 
6923 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6924 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6925 }
6926 
6927 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6928 {
6929 	struct hclge_vport *vport = hclge_get_vport(handle);
6930 	struct hclge_dev *hdev = vport->back;
6931 
6932 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6933 }
6934 
6935 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6936 {
6937 	struct hclge_vport *vport = hclge_get_vport(handle);
6938 	struct hclge_dev *hdev = vport->back;
6939 
6940 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6941 }
6942 
6943 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6944 {
6945 	struct hclge_vport *vport = hclge_get_vport(handle);
6946 	struct hclge_dev *hdev = vport->back;
6947 
6948 	return hdev->rst_stats.hw_reset_done_cnt;
6949 }
6950 
6951 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6952 {
6953 	struct hclge_vport *vport = hclge_get_vport(handle);
6954 	struct hclge_dev *hdev = vport->back;
6955 	bool clear;
6956 
6957 	hdev->fd_en = enable;
6958 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6959 
6960 	if (!enable) {
6961 		spin_lock_bh(&hdev->fd_rule_lock);
6962 		hclge_del_all_fd_entries(handle, clear);
6963 		spin_unlock_bh(&hdev->fd_rule_lock);
6964 	} else {
6965 		hclge_restore_fd_entries(handle);
6966 	}
6967 }
6968 
6969 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6970 {
6971 	struct hclge_desc desc;
6972 	struct hclge_config_mac_mode_cmd *req =
6973 		(struct hclge_config_mac_mode_cmd *)desc.data;
6974 	u32 loop_en = 0;
6975 	int ret;
6976 
6977 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6978 
6979 	if (enable) {
6980 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6981 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6982 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6983 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6984 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6985 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6986 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6987 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6988 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6989 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6990 	}
6991 
6992 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6993 
6994 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6995 	if (ret)
6996 		dev_err(&hdev->pdev->dev,
6997 			"mac enable fail, ret =%d.\n", ret);
6998 }
6999 
7000 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7001 				     u8 switch_param, u8 param_mask)
7002 {
7003 	struct hclge_mac_vlan_switch_cmd *req;
7004 	struct hclge_desc desc;
7005 	u32 func_id;
7006 	int ret;
7007 
7008 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7009 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7010 
7011 	/* read current config parameter */
7012 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7013 				   true);
7014 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7015 	req->func_id = cpu_to_le32(func_id);
7016 
7017 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7018 	if (ret) {
7019 		dev_err(&hdev->pdev->dev,
7020 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7021 		return ret;
7022 	}
7023 
7024 	/* modify and write new config parameter */
7025 	hclge_cmd_reuse_desc(&desc, false);
7026 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7027 	req->param_mask = param_mask;
7028 
7029 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7030 	if (ret)
7031 		dev_err(&hdev->pdev->dev,
7032 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7033 	return ret;
7034 }
7035 
7036 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7037 				       int link_ret)
7038 {
7039 #define HCLGE_PHY_LINK_STATUS_NUM  200
7040 
7041 	struct phy_device *phydev = hdev->hw.mac.phydev;
7042 	int i = 0;
7043 	int ret;
7044 
7045 	do {
7046 		ret = phy_read_status(phydev);
7047 		if (ret) {
7048 			dev_err(&hdev->pdev->dev,
7049 				"phy update link status fail, ret = %d\n", ret);
7050 			return;
7051 		}
7052 
7053 		if (phydev->link == link_ret)
7054 			break;
7055 
7056 		msleep(HCLGE_LINK_STATUS_MS);
7057 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7058 }
7059 
7060 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7061 {
7062 #define HCLGE_MAC_LINK_STATUS_NUM  100
7063 
7064 	int link_status;
7065 	int i = 0;
7066 	int ret;
7067 
7068 	do {
7069 		ret = hclge_get_mac_link_status(hdev, &link_status);
7070 		if (ret)
7071 			return ret;
7072 		if (link_status == link_ret)
7073 			return 0;
7074 
7075 		msleep(HCLGE_LINK_STATUS_MS);
7076 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7077 	return -EBUSY;
7078 }
7079 
7080 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7081 					  bool is_phy)
7082 {
7083 	int link_ret;
7084 
7085 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7086 
7087 	if (is_phy)
7088 		hclge_phy_link_status_wait(hdev, link_ret);
7089 
7090 	return hclge_mac_link_status_wait(hdev, link_ret);
7091 }
7092 
7093 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7094 {
7095 	struct hclge_config_mac_mode_cmd *req;
7096 	struct hclge_desc desc;
7097 	u32 loop_en;
7098 	int ret;
7099 
7100 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7101 	/* 1 Read out the MAC mode config at first */
7102 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7103 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7104 	if (ret) {
7105 		dev_err(&hdev->pdev->dev,
7106 			"mac loopback get fail, ret =%d.\n", ret);
7107 		return ret;
7108 	}
7109 
7110 	/* 2 Then setup the loopback flag */
7111 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7112 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7113 
7114 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7115 
7116 	/* 3 Config mac work mode with loopback flag
7117 	 * and its original configure parameters
7118 	 */
7119 	hclge_cmd_reuse_desc(&desc, false);
7120 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7121 	if (ret)
7122 		dev_err(&hdev->pdev->dev,
7123 			"mac loopback set fail, ret =%d.\n", ret);
7124 	return ret;
7125 }
7126 
7127 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7128 				     enum hnae3_loop loop_mode)
7129 {
7130 #define HCLGE_SERDES_RETRY_MS	10
7131 #define HCLGE_SERDES_RETRY_NUM	100
7132 
7133 	struct hclge_serdes_lb_cmd *req;
7134 	struct hclge_desc desc;
7135 	int ret, i = 0;
7136 	u8 loop_mode_b;
7137 
7138 	req = (struct hclge_serdes_lb_cmd *)desc.data;
7139 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7140 
7141 	switch (loop_mode) {
7142 	case HNAE3_LOOP_SERIAL_SERDES:
7143 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7144 		break;
7145 	case HNAE3_LOOP_PARALLEL_SERDES:
7146 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7147 		break;
7148 	default:
7149 		dev_err(&hdev->pdev->dev,
7150 			"unsupported serdes loopback mode %d\n", loop_mode);
7151 		return -ENOTSUPP;
7152 	}
7153 
7154 	if (en) {
7155 		req->enable = loop_mode_b;
7156 		req->mask = loop_mode_b;
7157 	} else {
7158 		req->mask = loop_mode_b;
7159 	}
7160 
7161 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7162 	if (ret) {
7163 		dev_err(&hdev->pdev->dev,
7164 			"serdes loopback set fail, ret = %d\n", ret);
7165 		return ret;
7166 	}
7167 
7168 	do {
7169 		msleep(HCLGE_SERDES_RETRY_MS);
7170 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7171 					   true);
7172 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7173 		if (ret) {
7174 			dev_err(&hdev->pdev->dev,
7175 				"serdes loopback get, ret = %d\n", ret);
7176 			return ret;
7177 		}
7178 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
7179 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
7180 
7181 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7182 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7183 		return -EBUSY;
7184 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7185 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7186 		return -EIO;
7187 	}
7188 	return ret;
7189 }
7190 
7191 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7192 				     enum hnae3_loop loop_mode)
7193 {
7194 	int ret;
7195 
7196 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7197 	if (ret)
7198 		return ret;
7199 
7200 	hclge_cfg_mac_mode(hdev, en);
7201 
7202 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7203 	if (ret)
7204 		dev_err(&hdev->pdev->dev,
7205 			"serdes loopback config mac mode timeout\n");
7206 
7207 	return ret;
7208 }
7209 
7210 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7211 				     struct phy_device *phydev)
7212 {
7213 	int ret;
7214 
7215 	if (!phydev->suspended) {
7216 		ret = phy_suspend(phydev);
7217 		if (ret)
7218 			return ret;
7219 	}
7220 
7221 	ret = phy_resume(phydev);
7222 	if (ret)
7223 		return ret;
7224 
7225 	return phy_loopback(phydev, true);
7226 }
7227 
7228 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7229 				      struct phy_device *phydev)
7230 {
7231 	int ret;
7232 
7233 	ret = phy_loopback(phydev, false);
7234 	if (ret)
7235 		return ret;
7236 
7237 	return phy_suspend(phydev);
7238 }
7239 
7240 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7241 {
7242 	struct phy_device *phydev = hdev->hw.mac.phydev;
7243 	int ret;
7244 
7245 	if (!phydev)
7246 		return -ENOTSUPP;
7247 
7248 	if (en)
7249 		ret = hclge_enable_phy_loopback(hdev, phydev);
7250 	else
7251 		ret = hclge_disable_phy_loopback(hdev, phydev);
7252 	if (ret) {
7253 		dev_err(&hdev->pdev->dev,
7254 			"set phy loopback fail, ret = %d\n", ret);
7255 		return ret;
7256 	}
7257 
7258 	hclge_cfg_mac_mode(hdev, en);
7259 
7260 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7261 	if (ret)
7262 		dev_err(&hdev->pdev->dev,
7263 			"phy loopback config mac mode timeout\n");
7264 
7265 	return ret;
7266 }
7267 
7268 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7269 			    int stream_id, bool enable)
7270 {
7271 	struct hclge_desc desc;
7272 	struct hclge_cfg_com_tqp_queue_cmd *req =
7273 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7274 	int ret;
7275 
7276 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7277 	req->tqp_id = cpu_to_le16(tqp_id);
7278 	req->stream_id = cpu_to_le16(stream_id);
7279 	if (enable)
7280 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7281 
7282 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7283 	if (ret)
7284 		dev_err(&hdev->pdev->dev,
7285 			"Tqp enable fail, status =%d.\n", ret);
7286 	return ret;
7287 }
7288 
7289 static int hclge_set_loopback(struct hnae3_handle *handle,
7290 			      enum hnae3_loop loop_mode, bool en)
7291 {
7292 	struct hclge_vport *vport = hclge_get_vport(handle);
7293 	struct hnae3_knic_private_info *kinfo;
7294 	struct hclge_dev *hdev = vport->back;
7295 	int i, ret;
7296 
7297 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7298 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7299 	 * the same, the packets are looped back in the SSU. If SSU loopback
7300 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7301 	 */
7302 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7303 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7304 
7305 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7306 						HCLGE_SWITCH_ALW_LPBK_MASK);
7307 		if (ret)
7308 			return ret;
7309 	}
7310 
7311 	switch (loop_mode) {
7312 	case HNAE3_LOOP_APP:
7313 		ret = hclge_set_app_loopback(hdev, en);
7314 		break;
7315 	case HNAE3_LOOP_SERIAL_SERDES:
7316 	case HNAE3_LOOP_PARALLEL_SERDES:
7317 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7318 		break;
7319 	case HNAE3_LOOP_PHY:
7320 		ret = hclge_set_phy_loopback(hdev, en);
7321 		break;
7322 	default:
7323 		ret = -ENOTSUPP;
7324 		dev_err(&hdev->pdev->dev,
7325 			"loop_mode %d is not supported\n", loop_mode);
7326 		break;
7327 	}
7328 
7329 	if (ret)
7330 		return ret;
7331 
7332 	kinfo = &vport->nic.kinfo;
7333 	for (i = 0; i < kinfo->num_tqps; i++) {
7334 		ret = hclge_tqp_enable(hdev, i, 0, en);
7335 		if (ret)
7336 			return ret;
7337 	}
7338 
7339 	return 0;
7340 }
7341 
7342 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7343 {
7344 	int ret;
7345 
7346 	ret = hclge_set_app_loopback(hdev, false);
7347 	if (ret)
7348 		return ret;
7349 
7350 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7351 	if (ret)
7352 		return ret;
7353 
7354 	return hclge_cfg_serdes_loopback(hdev, false,
7355 					 HNAE3_LOOP_PARALLEL_SERDES);
7356 }
7357 
7358 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7359 {
7360 	struct hclge_vport *vport = hclge_get_vport(handle);
7361 	struct hnae3_knic_private_info *kinfo;
7362 	struct hnae3_queue *queue;
7363 	struct hclge_tqp *tqp;
7364 	int i;
7365 
7366 	kinfo = &vport->nic.kinfo;
7367 	for (i = 0; i < kinfo->num_tqps; i++) {
7368 		queue = handle->kinfo.tqp[i];
7369 		tqp = container_of(queue, struct hclge_tqp, q);
7370 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7371 	}
7372 }
7373 
7374 static void hclge_flush_link_update(struct hclge_dev *hdev)
7375 {
7376 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
7377 
7378 	unsigned long last = hdev->serv_processed_cnt;
7379 	int i = 0;
7380 
7381 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7382 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7383 	       last == hdev->serv_processed_cnt)
7384 		usleep_range(1, 1);
7385 }
7386 
7387 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7388 {
7389 	struct hclge_vport *vport = hclge_get_vport(handle);
7390 	struct hclge_dev *hdev = vport->back;
7391 
7392 	if (enable) {
7393 		hclge_task_schedule(hdev, 0);
7394 	} else {
7395 		/* Set the DOWN flag here to disable link updating */
7396 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7397 
7398 		/* flush memory to make sure DOWN is seen by service task */
7399 		smp_mb__before_atomic();
7400 		hclge_flush_link_update(hdev);
7401 	}
7402 }
7403 
7404 static int hclge_ae_start(struct hnae3_handle *handle)
7405 {
7406 	struct hclge_vport *vport = hclge_get_vport(handle);
7407 	struct hclge_dev *hdev = vport->back;
7408 
7409 	/* mac enable */
7410 	hclge_cfg_mac_mode(hdev, true);
7411 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7412 	hdev->hw.mac.link = 0;
7413 
7414 	/* reset tqp stats */
7415 	hclge_reset_tqp_stats(handle);
7416 
7417 	hclge_mac_start_phy(hdev);
7418 
7419 	return 0;
7420 }
7421 
7422 static void hclge_ae_stop(struct hnae3_handle *handle)
7423 {
7424 	struct hclge_vport *vport = hclge_get_vport(handle);
7425 	struct hclge_dev *hdev = vport->back;
7426 	int i;
7427 
7428 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7429 	spin_lock_bh(&hdev->fd_rule_lock);
7430 	hclge_clear_arfs_rules(handle);
7431 	spin_unlock_bh(&hdev->fd_rule_lock);
7432 
7433 	/* If it is not PF reset, the firmware will disable the MAC,
7434 	 * so it only need to stop phy here.
7435 	 */
7436 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7437 	    hdev->reset_type != HNAE3_FUNC_RESET) {
7438 		hclge_mac_stop_phy(hdev);
7439 		hclge_update_link_status(hdev);
7440 		return;
7441 	}
7442 
7443 	for (i = 0; i < handle->kinfo.num_tqps; i++)
7444 		hclge_reset_tqp(handle, i);
7445 
7446 	hclge_config_mac_tnl_int(hdev, false);
7447 
7448 	/* Mac disable */
7449 	hclge_cfg_mac_mode(hdev, false);
7450 
7451 	hclge_mac_stop_phy(hdev);
7452 
7453 	/* reset tqp stats */
7454 	hclge_reset_tqp_stats(handle);
7455 	hclge_update_link_status(hdev);
7456 }
7457 
7458 int hclge_vport_start(struct hclge_vport *vport)
7459 {
7460 	struct hclge_dev *hdev = vport->back;
7461 
7462 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7463 	vport->last_active_jiffies = jiffies;
7464 
7465 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7466 		if (vport->vport_id) {
7467 			hclge_restore_mac_table_common(vport);
7468 			hclge_restore_vport_vlan_table(vport);
7469 		} else {
7470 			hclge_restore_hw_table(hdev);
7471 		}
7472 	}
7473 
7474 	clear_bit(vport->vport_id, hdev->vport_config_block);
7475 
7476 	return 0;
7477 }
7478 
7479 void hclge_vport_stop(struct hclge_vport *vport)
7480 {
7481 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7482 }
7483 
7484 static int hclge_client_start(struct hnae3_handle *handle)
7485 {
7486 	struct hclge_vport *vport = hclge_get_vport(handle);
7487 
7488 	return hclge_vport_start(vport);
7489 }
7490 
7491 static void hclge_client_stop(struct hnae3_handle *handle)
7492 {
7493 	struct hclge_vport *vport = hclge_get_vport(handle);
7494 
7495 	hclge_vport_stop(vport);
7496 }
7497 
7498 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7499 					 u16 cmdq_resp, u8  resp_code,
7500 					 enum hclge_mac_vlan_tbl_opcode op)
7501 {
7502 	struct hclge_dev *hdev = vport->back;
7503 
7504 	if (cmdq_resp) {
7505 		dev_err(&hdev->pdev->dev,
7506 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7507 			cmdq_resp);
7508 		return -EIO;
7509 	}
7510 
7511 	if (op == HCLGE_MAC_VLAN_ADD) {
7512 		if (!resp_code || resp_code == 1)
7513 			return 0;
7514 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7515 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
7516 			return -ENOSPC;
7517 
7518 		dev_err(&hdev->pdev->dev,
7519 			"add mac addr failed for undefined, code=%u.\n",
7520 			resp_code);
7521 		return -EIO;
7522 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
7523 		if (!resp_code) {
7524 			return 0;
7525 		} else if (resp_code == 1) {
7526 			dev_dbg(&hdev->pdev->dev,
7527 				"remove mac addr failed for miss.\n");
7528 			return -ENOENT;
7529 		}
7530 
7531 		dev_err(&hdev->pdev->dev,
7532 			"remove mac addr failed for undefined, code=%u.\n",
7533 			resp_code);
7534 		return -EIO;
7535 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
7536 		if (!resp_code) {
7537 			return 0;
7538 		} else if (resp_code == 1) {
7539 			dev_dbg(&hdev->pdev->dev,
7540 				"lookup mac addr failed for miss.\n");
7541 			return -ENOENT;
7542 		}
7543 
7544 		dev_err(&hdev->pdev->dev,
7545 			"lookup mac addr failed for undefined, code=%u.\n",
7546 			resp_code);
7547 		return -EIO;
7548 	}
7549 
7550 	dev_err(&hdev->pdev->dev,
7551 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7552 
7553 	return -EINVAL;
7554 }
7555 
7556 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7557 {
7558 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7559 
7560 	unsigned int word_num;
7561 	unsigned int bit_num;
7562 
7563 	if (vfid > 255 || vfid < 0)
7564 		return -EIO;
7565 
7566 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7567 		word_num = vfid / 32;
7568 		bit_num  = vfid % 32;
7569 		if (clr)
7570 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7571 		else
7572 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7573 	} else {
7574 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7575 		bit_num  = vfid % 32;
7576 		if (clr)
7577 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7578 		else
7579 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7580 	}
7581 
7582 	return 0;
7583 }
7584 
7585 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7586 {
7587 #define HCLGE_DESC_NUMBER 3
7588 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7589 	int i, j;
7590 
7591 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7592 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7593 			if (desc[i].data[j])
7594 				return false;
7595 
7596 	return true;
7597 }
7598 
7599 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7600 				   const u8 *addr, bool is_mc)
7601 {
7602 	const unsigned char *mac_addr = addr;
7603 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7604 		       (mac_addr[0]) | (mac_addr[1] << 8);
7605 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7606 
7607 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7608 	if (is_mc) {
7609 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7610 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7611 	}
7612 
7613 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7614 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7615 }
7616 
7617 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7618 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7619 {
7620 	struct hclge_dev *hdev = vport->back;
7621 	struct hclge_desc desc;
7622 	u8 resp_code;
7623 	u16 retval;
7624 	int ret;
7625 
7626 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7627 
7628 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7629 
7630 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7631 	if (ret) {
7632 		dev_err(&hdev->pdev->dev,
7633 			"del mac addr failed for cmd_send, ret =%d.\n",
7634 			ret);
7635 		return ret;
7636 	}
7637 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7638 	retval = le16_to_cpu(desc.retval);
7639 
7640 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7641 					     HCLGE_MAC_VLAN_REMOVE);
7642 }
7643 
7644 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7645 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7646 				     struct hclge_desc *desc,
7647 				     bool is_mc)
7648 {
7649 	struct hclge_dev *hdev = vport->back;
7650 	u8 resp_code;
7651 	u16 retval;
7652 	int ret;
7653 
7654 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7655 	if (is_mc) {
7656 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7657 		memcpy(desc[0].data,
7658 		       req,
7659 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7660 		hclge_cmd_setup_basic_desc(&desc[1],
7661 					   HCLGE_OPC_MAC_VLAN_ADD,
7662 					   true);
7663 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7664 		hclge_cmd_setup_basic_desc(&desc[2],
7665 					   HCLGE_OPC_MAC_VLAN_ADD,
7666 					   true);
7667 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7668 	} else {
7669 		memcpy(desc[0].data,
7670 		       req,
7671 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7672 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7673 	}
7674 	if (ret) {
7675 		dev_err(&hdev->pdev->dev,
7676 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7677 			ret);
7678 		return ret;
7679 	}
7680 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7681 	retval = le16_to_cpu(desc[0].retval);
7682 
7683 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7684 					     HCLGE_MAC_VLAN_LKUP);
7685 }
7686 
7687 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7688 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7689 				  struct hclge_desc *mc_desc)
7690 {
7691 	struct hclge_dev *hdev = vport->back;
7692 	int cfg_status;
7693 	u8 resp_code;
7694 	u16 retval;
7695 	int ret;
7696 
7697 	if (!mc_desc) {
7698 		struct hclge_desc desc;
7699 
7700 		hclge_cmd_setup_basic_desc(&desc,
7701 					   HCLGE_OPC_MAC_VLAN_ADD,
7702 					   false);
7703 		memcpy(desc.data, req,
7704 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7705 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7706 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7707 		retval = le16_to_cpu(desc.retval);
7708 
7709 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7710 							   resp_code,
7711 							   HCLGE_MAC_VLAN_ADD);
7712 	} else {
7713 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7714 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7715 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7716 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7717 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7718 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7719 		memcpy(mc_desc[0].data, req,
7720 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7721 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7722 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7723 		retval = le16_to_cpu(mc_desc[0].retval);
7724 
7725 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7726 							   resp_code,
7727 							   HCLGE_MAC_VLAN_ADD);
7728 	}
7729 
7730 	if (ret) {
7731 		dev_err(&hdev->pdev->dev,
7732 			"add mac addr failed for cmd_send, ret =%d.\n",
7733 			ret);
7734 		return ret;
7735 	}
7736 
7737 	return cfg_status;
7738 }
7739 
7740 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7741 			       u16 *allocated_size)
7742 {
7743 	struct hclge_umv_spc_alc_cmd *req;
7744 	struct hclge_desc desc;
7745 	int ret;
7746 
7747 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7748 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7749 
7750 	req->space_size = cpu_to_le32(space_size);
7751 
7752 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7753 	if (ret) {
7754 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7755 			ret);
7756 		return ret;
7757 	}
7758 
7759 	*allocated_size = le32_to_cpu(desc.data[1]);
7760 
7761 	return 0;
7762 }
7763 
7764 static int hclge_init_umv_space(struct hclge_dev *hdev)
7765 {
7766 	u16 allocated_size = 0;
7767 	int ret;
7768 
7769 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7770 	if (ret)
7771 		return ret;
7772 
7773 	if (allocated_size < hdev->wanted_umv_size)
7774 		dev_warn(&hdev->pdev->dev,
7775 			 "failed to alloc umv space, want %u, get %u\n",
7776 			 hdev->wanted_umv_size, allocated_size);
7777 
7778 	hdev->max_umv_size = allocated_size;
7779 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7780 	hdev->share_umv_size = hdev->priv_umv_size +
7781 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7782 
7783 	return 0;
7784 }
7785 
7786 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7787 {
7788 	struct hclge_vport *vport;
7789 	int i;
7790 
7791 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7792 		vport = &hdev->vport[i];
7793 		vport->used_umv_num = 0;
7794 	}
7795 
7796 	mutex_lock(&hdev->vport_lock);
7797 	hdev->share_umv_size = hdev->priv_umv_size +
7798 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7799 	mutex_unlock(&hdev->vport_lock);
7800 }
7801 
7802 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7803 {
7804 	struct hclge_dev *hdev = vport->back;
7805 	bool is_full;
7806 
7807 	if (need_lock)
7808 		mutex_lock(&hdev->vport_lock);
7809 
7810 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7811 		   hdev->share_umv_size == 0);
7812 
7813 	if (need_lock)
7814 		mutex_unlock(&hdev->vport_lock);
7815 
7816 	return is_full;
7817 }
7818 
7819 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7820 {
7821 	struct hclge_dev *hdev = vport->back;
7822 
7823 	if (is_free) {
7824 		if (vport->used_umv_num > hdev->priv_umv_size)
7825 			hdev->share_umv_size++;
7826 
7827 		if (vport->used_umv_num > 0)
7828 			vport->used_umv_num--;
7829 	} else {
7830 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7831 		    hdev->share_umv_size > 0)
7832 			hdev->share_umv_size--;
7833 		vport->used_umv_num++;
7834 	}
7835 }
7836 
7837 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7838 						  const u8 *mac_addr)
7839 {
7840 	struct hclge_mac_node *mac_node, *tmp;
7841 
7842 	list_for_each_entry_safe(mac_node, tmp, list, node)
7843 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7844 			return mac_node;
7845 
7846 	return NULL;
7847 }
7848 
7849 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7850 				  enum HCLGE_MAC_NODE_STATE state)
7851 {
7852 	switch (state) {
7853 	/* from set_rx_mode or tmp_add_list */
7854 	case HCLGE_MAC_TO_ADD:
7855 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7856 			mac_node->state = HCLGE_MAC_ACTIVE;
7857 		break;
7858 	/* only from set_rx_mode */
7859 	case HCLGE_MAC_TO_DEL:
7860 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7861 			list_del(&mac_node->node);
7862 			kfree(mac_node);
7863 		} else {
7864 			mac_node->state = HCLGE_MAC_TO_DEL;
7865 		}
7866 		break;
7867 	/* only from tmp_add_list, the mac_node->state won't be
7868 	 * ACTIVE.
7869 	 */
7870 	case HCLGE_MAC_ACTIVE:
7871 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7872 			mac_node->state = HCLGE_MAC_ACTIVE;
7873 
7874 		break;
7875 	}
7876 }
7877 
7878 int hclge_update_mac_list(struct hclge_vport *vport,
7879 			  enum HCLGE_MAC_NODE_STATE state,
7880 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7881 			  const unsigned char *addr)
7882 {
7883 	struct hclge_dev *hdev = vport->back;
7884 	struct hclge_mac_node *mac_node;
7885 	struct list_head *list;
7886 
7887 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7888 		&vport->uc_mac_list : &vport->mc_mac_list;
7889 
7890 	spin_lock_bh(&vport->mac_list_lock);
7891 
7892 	/* if the mac addr is already in the mac list, no need to add a new
7893 	 * one into it, just check the mac addr state, convert it to a new
7894 	 * new state, or just remove it, or do nothing.
7895 	 */
7896 	mac_node = hclge_find_mac_node(list, addr);
7897 	if (mac_node) {
7898 		hclge_update_mac_node(mac_node, state);
7899 		spin_unlock_bh(&vport->mac_list_lock);
7900 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7901 		return 0;
7902 	}
7903 
7904 	/* if this address is never added, unnecessary to delete */
7905 	if (state == HCLGE_MAC_TO_DEL) {
7906 		spin_unlock_bh(&vport->mac_list_lock);
7907 		dev_err(&hdev->pdev->dev,
7908 			"failed to delete address %pM from mac list\n",
7909 			addr);
7910 		return -ENOENT;
7911 	}
7912 
7913 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7914 	if (!mac_node) {
7915 		spin_unlock_bh(&vport->mac_list_lock);
7916 		return -ENOMEM;
7917 	}
7918 
7919 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7920 
7921 	mac_node->state = state;
7922 	ether_addr_copy(mac_node->mac_addr, addr);
7923 	list_add_tail(&mac_node->node, list);
7924 
7925 	spin_unlock_bh(&vport->mac_list_lock);
7926 
7927 	return 0;
7928 }
7929 
7930 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7931 			     const unsigned char *addr)
7932 {
7933 	struct hclge_vport *vport = hclge_get_vport(handle);
7934 
7935 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7936 				     addr);
7937 }
7938 
7939 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7940 			     const unsigned char *addr)
7941 {
7942 	struct hclge_dev *hdev = vport->back;
7943 	struct hclge_mac_vlan_tbl_entry_cmd req;
7944 	struct hclge_desc desc;
7945 	u16 egress_port = 0;
7946 	int ret;
7947 
7948 	/* mac addr check */
7949 	if (is_zero_ether_addr(addr) ||
7950 	    is_broadcast_ether_addr(addr) ||
7951 	    is_multicast_ether_addr(addr)) {
7952 		dev_err(&hdev->pdev->dev,
7953 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7954 			 addr, is_zero_ether_addr(addr),
7955 			 is_broadcast_ether_addr(addr),
7956 			 is_multicast_ether_addr(addr));
7957 		return -EINVAL;
7958 	}
7959 
7960 	memset(&req, 0, sizeof(req));
7961 
7962 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7963 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7964 
7965 	req.egress_port = cpu_to_le16(egress_port);
7966 
7967 	hclge_prepare_mac_addr(&req, addr, false);
7968 
7969 	/* Lookup the mac address in the mac_vlan table, and add
7970 	 * it if the entry is inexistent. Repeated unicast entry
7971 	 * is not allowed in the mac vlan table.
7972 	 */
7973 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7974 	if (ret == -ENOENT) {
7975 		mutex_lock(&hdev->vport_lock);
7976 		if (!hclge_is_umv_space_full(vport, false)) {
7977 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7978 			if (!ret)
7979 				hclge_update_umv_space(vport, false);
7980 			mutex_unlock(&hdev->vport_lock);
7981 			return ret;
7982 		}
7983 		mutex_unlock(&hdev->vport_lock);
7984 
7985 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7986 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7987 				hdev->priv_umv_size);
7988 
7989 		return -ENOSPC;
7990 	}
7991 
7992 	/* check if we just hit the duplicate */
7993 	if (!ret) {
7994 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7995 			 vport->vport_id, addr);
7996 		return 0;
7997 	}
7998 
7999 	dev_err(&hdev->pdev->dev,
8000 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8001 		addr);
8002 
8003 	return ret;
8004 }
8005 
8006 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8007 			    const unsigned char *addr)
8008 {
8009 	struct hclge_vport *vport = hclge_get_vport(handle);
8010 
8011 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8012 				     addr);
8013 }
8014 
8015 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8016 			    const unsigned char *addr)
8017 {
8018 	struct hclge_dev *hdev = vport->back;
8019 	struct hclge_mac_vlan_tbl_entry_cmd req;
8020 	int ret;
8021 
8022 	/* mac addr check */
8023 	if (is_zero_ether_addr(addr) ||
8024 	    is_broadcast_ether_addr(addr) ||
8025 	    is_multicast_ether_addr(addr)) {
8026 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8027 			addr);
8028 		return -EINVAL;
8029 	}
8030 
8031 	memset(&req, 0, sizeof(req));
8032 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8033 	hclge_prepare_mac_addr(&req, addr, false);
8034 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8035 	if (!ret) {
8036 		mutex_lock(&hdev->vport_lock);
8037 		hclge_update_umv_space(vport, true);
8038 		mutex_unlock(&hdev->vport_lock);
8039 	} else if (ret == -ENOENT) {
8040 		ret = 0;
8041 	}
8042 
8043 	return ret;
8044 }
8045 
8046 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8047 			     const unsigned char *addr)
8048 {
8049 	struct hclge_vport *vport = hclge_get_vport(handle);
8050 
8051 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8052 				     addr);
8053 }
8054 
8055 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8056 			     const unsigned char *addr)
8057 {
8058 	struct hclge_dev *hdev = vport->back;
8059 	struct hclge_mac_vlan_tbl_entry_cmd req;
8060 	struct hclge_desc desc[3];
8061 	int status;
8062 
8063 	/* mac addr check */
8064 	if (!is_multicast_ether_addr(addr)) {
8065 		dev_err(&hdev->pdev->dev,
8066 			"Add mc mac err! invalid mac:%pM.\n",
8067 			 addr);
8068 		return -EINVAL;
8069 	}
8070 	memset(&req, 0, sizeof(req));
8071 	hclge_prepare_mac_addr(&req, addr, true);
8072 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8073 	if (status) {
8074 		/* This mac addr do not exist, add new entry for it */
8075 		memset(desc[0].data, 0, sizeof(desc[0].data));
8076 		memset(desc[1].data, 0, sizeof(desc[0].data));
8077 		memset(desc[2].data, 0, sizeof(desc[0].data));
8078 	}
8079 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8080 	if (status)
8081 		return status;
8082 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8083 
8084 	/* if already overflow, not to print each time */
8085 	if (status == -ENOSPC &&
8086 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8087 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8088 
8089 	return status;
8090 }
8091 
8092 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8093 			    const unsigned char *addr)
8094 {
8095 	struct hclge_vport *vport = hclge_get_vport(handle);
8096 
8097 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8098 				     addr);
8099 }
8100 
8101 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8102 			    const unsigned char *addr)
8103 {
8104 	struct hclge_dev *hdev = vport->back;
8105 	struct hclge_mac_vlan_tbl_entry_cmd req;
8106 	enum hclge_cmd_status status;
8107 	struct hclge_desc desc[3];
8108 
8109 	/* mac addr check */
8110 	if (!is_multicast_ether_addr(addr)) {
8111 		dev_dbg(&hdev->pdev->dev,
8112 			"Remove mc mac err! invalid mac:%pM.\n",
8113 			 addr);
8114 		return -EINVAL;
8115 	}
8116 
8117 	memset(&req, 0, sizeof(req));
8118 	hclge_prepare_mac_addr(&req, addr, true);
8119 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8120 	if (!status) {
8121 		/* This mac addr exist, remove this handle's VFID for it */
8122 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8123 		if (status)
8124 			return status;
8125 
8126 		if (hclge_is_all_function_id_zero(desc))
8127 			/* All the vfid is zero, so need to delete this entry */
8128 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8129 		else
8130 			/* Not all the vfid is zero, update the vfid */
8131 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8132 
8133 	} else if (status == -ENOENT) {
8134 		status = 0;
8135 	}
8136 
8137 	return status;
8138 }
8139 
8140 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8141 				      struct list_head *list,
8142 				      int (*sync)(struct hclge_vport *,
8143 						  const unsigned char *))
8144 {
8145 	struct hclge_mac_node *mac_node, *tmp;
8146 	int ret;
8147 
8148 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8149 		ret = sync(vport, mac_node->mac_addr);
8150 		if (!ret) {
8151 			mac_node->state = HCLGE_MAC_ACTIVE;
8152 		} else {
8153 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8154 				&vport->state);
8155 			break;
8156 		}
8157 	}
8158 }
8159 
8160 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8161 					struct list_head *list,
8162 					int (*unsync)(struct hclge_vport *,
8163 						      const unsigned char *))
8164 {
8165 	struct hclge_mac_node *mac_node, *tmp;
8166 	int ret;
8167 
8168 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8169 		ret = unsync(vport, mac_node->mac_addr);
8170 		if (!ret || ret == -ENOENT) {
8171 			list_del(&mac_node->node);
8172 			kfree(mac_node);
8173 		} else {
8174 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8175 				&vport->state);
8176 			break;
8177 		}
8178 	}
8179 }
8180 
8181 static bool hclge_sync_from_add_list(struct list_head *add_list,
8182 				     struct list_head *mac_list)
8183 {
8184 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8185 	bool all_added = true;
8186 
8187 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8188 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8189 			all_added = false;
8190 
8191 		/* if the mac address from tmp_add_list is not in the
8192 		 * uc/mc_mac_list, it means have received a TO_DEL request
8193 		 * during the time window of adding the mac address into mac
8194 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8195 		 * then it will be removed at next time. else it must be TO_ADD,
8196 		 * this address hasn't been added into mac table,
8197 		 * so just remove the mac node.
8198 		 */
8199 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8200 		if (new_node) {
8201 			hclge_update_mac_node(new_node, mac_node->state);
8202 			list_del(&mac_node->node);
8203 			kfree(mac_node);
8204 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8205 			mac_node->state = HCLGE_MAC_TO_DEL;
8206 			list_del(&mac_node->node);
8207 			list_add_tail(&mac_node->node, mac_list);
8208 		} else {
8209 			list_del(&mac_node->node);
8210 			kfree(mac_node);
8211 		}
8212 	}
8213 
8214 	return all_added;
8215 }
8216 
8217 static void hclge_sync_from_del_list(struct list_head *del_list,
8218 				     struct list_head *mac_list)
8219 {
8220 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8221 
8222 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8223 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8224 		if (new_node) {
8225 			/* If the mac addr exists in the mac list, it means
8226 			 * received a new TO_ADD request during the time window
8227 			 * of configuring the mac address. For the mac node
8228 			 * state is TO_ADD, and the address is already in the
8229 			 * in the hardware(due to delete fail), so we just need
8230 			 * to change the mac node state to ACTIVE.
8231 			 */
8232 			new_node->state = HCLGE_MAC_ACTIVE;
8233 			list_del(&mac_node->node);
8234 			kfree(mac_node);
8235 		} else {
8236 			list_del(&mac_node->node);
8237 			list_add_tail(&mac_node->node, mac_list);
8238 		}
8239 	}
8240 }
8241 
8242 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8243 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8244 					bool is_all_added)
8245 {
8246 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8247 		if (is_all_added)
8248 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8249 		else
8250 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8251 	} else {
8252 		if (is_all_added)
8253 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8254 		else
8255 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8256 	}
8257 }
8258 
8259 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8260 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8261 {
8262 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8263 	struct list_head tmp_add_list, tmp_del_list;
8264 	struct list_head *list;
8265 	bool all_added;
8266 
8267 	INIT_LIST_HEAD(&tmp_add_list);
8268 	INIT_LIST_HEAD(&tmp_del_list);
8269 
8270 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8271 	 * we can add/delete these mac addr outside the spin lock
8272 	 */
8273 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8274 		&vport->uc_mac_list : &vport->mc_mac_list;
8275 
8276 	spin_lock_bh(&vport->mac_list_lock);
8277 
8278 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8279 		switch (mac_node->state) {
8280 		case HCLGE_MAC_TO_DEL:
8281 			list_del(&mac_node->node);
8282 			list_add_tail(&mac_node->node, &tmp_del_list);
8283 			break;
8284 		case HCLGE_MAC_TO_ADD:
8285 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8286 			if (!new_node)
8287 				goto stop_traverse;
8288 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8289 			new_node->state = mac_node->state;
8290 			list_add_tail(&new_node->node, &tmp_add_list);
8291 			break;
8292 		default:
8293 			break;
8294 		}
8295 	}
8296 
8297 stop_traverse:
8298 	spin_unlock_bh(&vport->mac_list_lock);
8299 
8300 	/* delete first, in order to get max mac table space for adding */
8301 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8302 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8303 					    hclge_rm_uc_addr_common);
8304 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8305 					  hclge_add_uc_addr_common);
8306 	} else {
8307 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8308 					    hclge_rm_mc_addr_common);
8309 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8310 					  hclge_add_mc_addr_common);
8311 	}
8312 
8313 	/* if some mac addresses were added/deleted fail, move back to the
8314 	 * mac_list, and retry at next time.
8315 	 */
8316 	spin_lock_bh(&vport->mac_list_lock);
8317 
8318 	hclge_sync_from_del_list(&tmp_del_list, list);
8319 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8320 
8321 	spin_unlock_bh(&vport->mac_list_lock);
8322 
8323 	hclge_update_overflow_flags(vport, mac_type, all_added);
8324 }
8325 
8326 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8327 {
8328 	struct hclge_dev *hdev = vport->back;
8329 
8330 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8331 		return false;
8332 
8333 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8334 		return true;
8335 
8336 	return false;
8337 }
8338 
8339 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8340 {
8341 	int i;
8342 
8343 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8344 		struct hclge_vport *vport = &hdev->vport[i];
8345 
8346 		if (!hclge_need_sync_mac_table(vport))
8347 			continue;
8348 
8349 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8350 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8351 	}
8352 }
8353 
8354 static void hclge_build_del_list(struct list_head *list,
8355 				 bool is_del_list,
8356 				 struct list_head *tmp_del_list)
8357 {
8358 	struct hclge_mac_node *mac_cfg, *tmp;
8359 
8360 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8361 		switch (mac_cfg->state) {
8362 		case HCLGE_MAC_TO_DEL:
8363 		case HCLGE_MAC_ACTIVE:
8364 			list_del(&mac_cfg->node);
8365 			list_add_tail(&mac_cfg->node, tmp_del_list);
8366 			break;
8367 		case HCLGE_MAC_TO_ADD:
8368 			if (is_del_list) {
8369 				list_del(&mac_cfg->node);
8370 				kfree(mac_cfg);
8371 			}
8372 			break;
8373 		}
8374 	}
8375 }
8376 
8377 static void hclge_unsync_del_list(struct hclge_vport *vport,
8378 				  int (*unsync)(struct hclge_vport *vport,
8379 						const unsigned char *addr),
8380 				  bool is_del_list,
8381 				  struct list_head *tmp_del_list)
8382 {
8383 	struct hclge_mac_node *mac_cfg, *tmp;
8384 	int ret;
8385 
8386 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8387 		ret = unsync(vport, mac_cfg->mac_addr);
8388 		if (!ret || ret == -ENOENT) {
8389 			/* clear all mac addr from hardware, but remain these
8390 			 * mac addr in the mac list, and restore them after
8391 			 * vf reset finished.
8392 			 */
8393 			if (!is_del_list &&
8394 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8395 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8396 			} else {
8397 				list_del(&mac_cfg->node);
8398 				kfree(mac_cfg);
8399 			}
8400 		} else if (is_del_list) {
8401 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8402 		}
8403 	}
8404 }
8405 
8406 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8407 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
8408 {
8409 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8410 	struct hclge_dev *hdev = vport->back;
8411 	struct list_head tmp_del_list, *list;
8412 
8413 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8414 		list = &vport->uc_mac_list;
8415 		unsync = hclge_rm_uc_addr_common;
8416 	} else {
8417 		list = &vport->mc_mac_list;
8418 		unsync = hclge_rm_mc_addr_common;
8419 	}
8420 
8421 	INIT_LIST_HEAD(&tmp_del_list);
8422 
8423 	if (!is_del_list)
8424 		set_bit(vport->vport_id, hdev->vport_config_block);
8425 
8426 	spin_lock_bh(&vport->mac_list_lock);
8427 
8428 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
8429 
8430 	spin_unlock_bh(&vport->mac_list_lock);
8431 
8432 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8433 
8434 	spin_lock_bh(&vport->mac_list_lock);
8435 
8436 	hclge_sync_from_del_list(&tmp_del_list, list);
8437 
8438 	spin_unlock_bh(&vport->mac_list_lock);
8439 }
8440 
8441 /* remove all mac address when uninitailize */
8442 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8443 					enum HCLGE_MAC_ADDR_TYPE mac_type)
8444 {
8445 	struct hclge_mac_node *mac_node, *tmp;
8446 	struct hclge_dev *hdev = vport->back;
8447 	struct list_head tmp_del_list, *list;
8448 
8449 	INIT_LIST_HEAD(&tmp_del_list);
8450 
8451 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8452 		&vport->uc_mac_list : &vport->mc_mac_list;
8453 
8454 	spin_lock_bh(&vport->mac_list_lock);
8455 
8456 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8457 		switch (mac_node->state) {
8458 		case HCLGE_MAC_TO_DEL:
8459 		case HCLGE_MAC_ACTIVE:
8460 			list_del(&mac_node->node);
8461 			list_add_tail(&mac_node->node, &tmp_del_list);
8462 			break;
8463 		case HCLGE_MAC_TO_ADD:
8464 			list_del(&mac_node->node);
8465 			kfree(mac_node);
8466 			break;
8467 		}
8468 	}
8469 
8470 	spin_unlock_bh(&vport->mac_list_lock);
8471 
8472 	if (mac_type == HCLGE_MAC_ADDR_UC)
8473 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8474 					    hclge_rm_uc_addr_common);
8475 	else
8476 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8477 					    hclge_rm_mc_addr_common);
8478 
8479 	if (!list_empty(&tmp_del_list))
8480 		dev_warn(&hdev->pdev->dev,
8481 			 "uninit %s mac list for vport %u not completely.\n",
8482 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8483 			 vport->vport_id);
8484 
8485 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8486 		list_del(&mac_node->node);
8487 		kfree(mac_node);
8488 	}
8489 }
8490 
8491 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8492 {
8493 	struct hclge_vport *vport;
8494 	int i;
8495 
8496 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8497 		vport = &hdev->vport[i];
8498 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8499 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8500 	}
8501 }
8502 
8503 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8504 					      u16 cmdq_resp, u8 resp_code)
8505 {
8506 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
8507 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
8508 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
8509 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
8510 
8511 	int return_status;
8512 
8513 	if (cmdq_resp) {
8514 		dev_err(&hdev->pdev->dev,
8515 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8516 			cmdq_resp);
8517 		return -EIO;
8518 	}
8519 
8520 	switch (resp_code) {
8521 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
8522 	case HCLGE_ETHERTYPE_ALREADY_ADD:
8523 		return_status = 0;
8524 		break;
8525 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8526 		dev_err(&hdev->pdev->dev,
8527 			"add mac ethertype failed for manager table overflow.\n");
8528 		return_status = -EIO;
8529 		break;
8530 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
8531 		dev_err(&hdev->pdev->dev,
8532 			"add mac ethertype failed for key conflict.\n");
8533 		return_status = -EIO;
8534 		break;
8535 	default:
8536 		dev_err(&hdev->pdev->dev,
8537 			"add mac ethertype failed for undefined, code=%u.\n",
8538 			resp_code);
8539 		return_status = -EIO;
8540 	}
8541 
8542 	return return_status;
8543 }
8544 
8545 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8546 				     u8 *mac_addr)
8547 {
8548 	struct hclge_mac_vlan_tbl_entry_cmd req;
8549 	struct hclge_dev *hdev = vport->back;
8550 	struct hclge_desc desc;
8551 	u16 egress_port = 0;
8552 	int i;
8553 
8554 	if (is_zero_ether_addr(mac_addr))
8555 		return false;
8556 
8557 	memset(&req, 0, sizeof(req));
8558 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8559 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8560 	req.egress_port = cpu_to_le16(egress_port);
8561 	hclge_prepare_mac_addr(&req, mac_addr, false);
8562 
8563 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8564 		return true;
8565 
8566 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8567 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8568 		if (i != vf_idx &&
8569 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8570 			return true;
8571 
8572 	return false;
8573 }
8574 
8575 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8576 			    u8 *mac_addr)
8577 {
8578 	struct hclge_vport *vport = hclge_get_vport(handle);
8579 	struct hclge_dev *hdev = vport->back;
8580 
8581 	vport = hclge_get_vf_vport(hdev, vf);
8582 	if (!vport)
8583 		return -EINVAL;
8584 
8585 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8586 		dev_info(&hdev->pdev->dev,
8587 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8588 			 mac_addr);
8589 		return 0;
8590 	}
8591 
8592 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8593 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8594 			mac_addr);
8595 		return -EEXIST;
8596 	}
8597 
8598 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8599 
8600 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8601 		dev_info(&hdev->pdev->dev,
8602 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8603 			 vf, mac_addr);
8604 		return hclge_inform_reset_assert_to_vf(vport);
8605 	}
8606 
8607 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8608 		 vf, mac_addr);
8609 	return 0;
8610 }
8611 
8612 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8613 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8614 {
8615 	struct hclge_desc desc;
8616 	u8 resp_code;
8617 	u16 retval;
8618 	int ret;
8619 
8620 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8621 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8622 
8623 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8624 	if (ret) {
8625 		dev_err(&hdev->pdev->dev,
8626 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8627 			ret);
8628 		return ret;
8629 	}
8630 
8631 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8632 	retval = le16_to_cpu(desc.retval);
8633 
8634 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8635 }
8636 
8637 static int init_mgr_tbl(struct hclge_dev *hdev)
8638 {
8639 	int ret;
8640 	int i;
8641 
8642 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8643 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8644 		if (ret) {
8645 			dev_err(&hdev->pdev->dev,
8646 				"add mac ethertype failed, ret =%d.\n",
8647 				ret);
8648 			return ret;
8649 		}
8650 	}
8651 
8652 	return 0;
8653 }
8654 
8655 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8656 {
8657 	struct hclge_vport *vport = hclge_get_vport(handle);
8658 	struct hclge_dev *hdev = vport->back;
8659 
8660 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8661 }
8662 
8663 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8664 				       const u8 *old_addr, const u8 *new_addr)
8665 {
8666 	struct list_head *list = &vport->uc_mac_list;
8667 	struct hclge_mac_node *old_node, *new_node;
8668 
8669 	new_node = hclge_find_mac_node(list, new_addr);
8670 	if (!new_node) {
8671 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8672 		if (!new_node)
8673 			return -ENOMEM;
8674 
8675 		new_node->state = HCLGE_MAC_TO_ADD;
8676 		ether_addr_copy(new_node->mac_addr, new_addr);
8677 		list_add(&new_node->node, list);
8678 	} else {
8679 		if (new_node->state == HCLGE_MAC_TO_DEL)
8680 			new_node->state = HCLGE_MAC_ACTIVE;
8681 
8682 		/* make sure the new addr is in the list head, avoid dev
8683 		 * addr may be not re-added into mac table for the umv space
8684 		 * limitation after global/imp reset which will clear mac
8685 		 * table by hardware.
8686 		 */
8687 		list_move(&new_node->node, list);
8688 	}
8689 
8690 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8691 		old_node = hclge_find_mac_node(list, old_addr);
8692 		if (old_node) {
8693 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8694 				list_del(&old_node->node);
8695 				kfree(old_node);
8696 			} else {
8697 				old_node->state = HCLGE_MAC_TO_DEL;
8698 			}
8699 		}
8700 	}
8701 
8702 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8703 
8704 	return 0;
8705 }
8706 
8707 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8708 			      bool is_first)
8709 {
8710 	const unsigned char *new_addr = (const unsigned char *)p;
8711 	struct hclge_vport *vport = hclge_get_vport(handle);
8712 	struct hclge_dev *hdev = vport->back;
8713 	unsigned char *old_addr = NULL;
8714 	int ret;
8715 
8716 	/* mac addr check */
8717 	if (is_zero_ether_addr(new_addr) ||
8718 	    is_broadcast_ether_addr(new_addr) ||
8719 	    is_multicast_ether_addr(new_addr)) {
8720 		dev_err(&hdev->pdev->dev,
8721 			"change uc mac err! invalid mac: %pM.\n",
8722 			 new_addr);
8723 		return -EINVAL;
8724 	}
8725 
8726 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8727 	if (ret) {
8728 		dev_err(&hdev->pdev->dev,
8729 			"failed to configure mac pause address, ret = %d\n",
8730 			ret);
8731 		return ret;
8732 	}
8733 
8734 	if (!is_first)
8735 		old_addr = hdev->hw.mac.mac_addr;
8736 
8737 	spin_lock_bh(&vport->mac_list_lock);
8738 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8739 	if (ret) {
8740 		dev_err(&hdev->pdev->dev,
8741 			"failed to change the mac addr:%pM, ret = %d\n",
8742 			new_addr, ret);
8743 		spin_unlock_bh(&vport->mac_list_lock);
8744 
8745 		if (!is_first)
8746 			hclge_pause_addr_cfg(hdev, old_addr);
8747 
8748 		return ret;
8749 	}
8750 	/* we must update dev addr with spin lock protect, preventing dev addr
8751 	 * being removed by set_rx_mode path.
8752 	 */
8753 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8754 	spin_unlock_bh(&vport->mac_list_lock);
8755 
8756 	hclge_task_schedule(hdev, 0);
8757 
8758 	return 0;
8759 }
8760 
8761 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8762 			  int cmd)
8763 {
8764 	struct hclge_vport *vport = hclge_get_vport(handle);
8765 	struct hclge_dev *hdev = vport->back;
8766 
8767 	if (!hdev->hw.mac.phydev)
8768 		return -EOPNOTSUPP;
8769 
8770 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8771 }
8772 
8773 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8774 				      u8 fe_type, bool filter_en, u8 vf_id)
8775 {
8776 	struct hclge_vlan_filter_ctrl_cmd *req;
8777 	struct hclge_desc desc;
8778 	int ret;
8779 
8780 	/* read current vlan filter parameter */
8781 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8782 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8783 	req->vlan_type = vlan_type;
8784 	req->vf_id = vf_id;
8785 
8786 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8787 	if (ret) {
8788 		dev_err(&hdev->pdev->dev,
8789 			"failed to get vlan filter config, ret = %d.\n", ret);
8790 		return ret;
8791 	}
8792 
8793 	/* modify and write new config parameter */
8794 	hclge_cmd_reuse_desc(&desc, false);
8795 	req->vlan_fe = filter_en ?
8796 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8797 
8798 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8799 	if (ret)
8800 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8801 			ret);
8802 
8803 	return ret;
8804 }
8805 
8806 #define HCLGE_FILTER_TYPE_VF		0
8807 #define HCLGE_FILTER_TYPE_PORT		1
8808 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8809 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8810 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8811 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8812 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8813 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8814 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8815 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8816 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8817 
8818 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8819 {
8820 	struct hclge_vport *vport = hclge_get_vport(handle);
8821 	struct hclge_dev *hdev = vport->back;
8822 
8823 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8824 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8825 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8826 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8827 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8828 	} else {
8829 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8830 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8831 					   0);
8832 	}
8833 	if (enable)
8834 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8835 	else
8836 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8837 }
8838 
8839 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
8840 					bool is_kill, u16 vlan,
8841 					struct hclge_desc *desc)
8842 {
8843 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8844 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8845 	u8 vf_byte_val;
8846 	u8 vf_byte_off;
8847 	int ret;
8848 
8849 	hclge_cmd_setup_basic_desc(&desc[0],
8850 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8851 	hclge_cmd_setup_basic_desc(&desc[1],
8852 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8853 
8854 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8855 
8856 	vf_byte_off = vfid / 8;
8857 	vf_byte_val = 1 << (vfid % 8);
8858 
8859 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8860 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8861 
8862 	req0->vlan_id  = cpu_to_le16(vlan);
8863 	req0->vlan_cfg = is_kill;
8864 
8865 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8866 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8867 	else
8868 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8869 
8870 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8871 	if (ret) {
8872 		dev_err(&hdev->pdev->dev,
8873 			"Send vf vlan command fail, ret =%d.\n",
8874 			ret);
8875 		return ret;
8876 	}
8877 
8878 	return 0;
8879 }
8880 
8881 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
8882 					  bool is_kill, struct hclge_desc *desc)
8883 {
8884 	struct hclge_vlan_filter_vf_cfg_cmd *req;
8885 
8886 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8887 
8888 	if (!is_kill) {
8889 #define HCLGE_VF_VLAN_NO_ENTRY	2
8890 		if (!req->resp_code || req->resp_code == 1)
8891 			return 0;
8892 
8893 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8894 			set_bit(vfid, hdev->vf_vlan_full);
8895 			dev_warn(&hdev->pdev->dev,
8896 				 "vf vlan table is full, vf vlan filter is disabled\n");
8897 			return 0;
8898 		}
8899 
8900 		dev_err(&hdev->pdev->dev,
8901 			"Add vf vlan filter fail, ret =%u.\n",
8902 			req->resp_code);
8903 	} else {
8904 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8905 		if (!req->resp_code)
8906 			return 0;
8907 
8908 		/* vf vlan filter is disabled when vf vlan table is full,
8909 		 * then new vlan id will not be added into vf vlan table.
8910 		 * Just return 0 without warning, avoid massive verbose
8911 		 * print logs when unload.
8912 		 */
8913 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8914 			return 0;
8915 
8916 		dev_err(&hdev->pdev->dev,
8917 			"Kill vf vlan filter fail, ret =%u.\n",
8918 			req->resp_code);
8919 	}
8920 
8921 	return -EIO;
8922 }
8923 
8924 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8925 				    bool is_kill, u16 vlan,
8926 				    __be16 proto)
8927 {
8928 	struct hclge_vport *vport = &hdev->vport[vfid];
8929 	struct hclge_desc desc[2];
8930 	int ret;
8931 
8932 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8933 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8934 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8935 	 * new vlan, because tx packets with these vlan id will be dropped.
8936 	 */
8937 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8938 		if (vport->vf_info.spoofchk && vlan) {
8939 			dev_err(&hdev->pdev->dev,
8940 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8941 			return -EPERM;
8942 		}
8943 		return 0;
8944 	}
8945 
8946 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
8947 	if (ret)
8948 		return ret;
8949 
8950 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
8951 }
8952 
8953 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8954 				      u16 vlan_id, bool is_kill)
8955 {
8956 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8957 	struct hclge_desc desc;
8958 	u8 vlan_offset_byte_val;
8959 	u8 vlan_offset_byte;
8960 	u8 vlan_offset_160;
8961 	int ret;
8962 
8963 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8964 
8965 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8966 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8967 			   HCLGE_VLAN_BYTE_SIZE;
8968 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8969 
8970 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8971 	req->vlan_offset = vlan_offset_160;
8972 	req->vlan_cfg = is_kill;
8973 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8974 
8975 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8976 	if (ret)
8977 		dev_err(&hdev->pdev->dev,
8978 			"port vlan command, send fail, ret =%d.\n", ret);
8979 	return ret;
8980 }
8981 
8982 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8983 				    u16 vport_id, u16 vlan_id,
8984 				    bool is_kill)
8985 {
8986 	u16 vport_idx, vport_num = 0;
8987 	int ret;
8988 
8989 	if (is_kill && !vlan_id)
8990 		return 0;
8991 
8992 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8993 				       proto);
8994 	if (ret) {
8995 		dev_err(&hdev->pdev->dev,
8996 			"Set %u vport vlan filter config fail, ret =%d.\n",
8997 			vport_id, ret);
8998 		return ret;
8999 	}
9000 
9001 	/* vlan 0 may be added twice when 8021q module is enabled */
9002 	if (!is_kill && !vlan_id &&
9003 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9004 		return 0;
9005 
9006 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9007 		dev_err(&hdev->pdev->dev,
9008 			"Add port vlan failed, vport %u is already in vlan %u\n",
9009 			vport_id, vlan_id);
9010 		return -EINVAL;
9011 	}
9012 
9013 	if (is_kill &&
9014 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9015 		dev_err(&hdev->pdev->dev,
9016 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9017 			vport_id, vlan_id);
9018 		return -EINVAL;
9019 	}
9020 
9021 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9022 		vport_num++;
9023 
9024 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9025 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9026 						 is_kill);
9027 
9028 	return ret;
9029 }
9030 
9031 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9032 {
9033 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9034 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9035 	struct hclge_dev *hdev = vport->back;
9036 	struct hclge_desc desc;
9037 	u16 bmap_index;
9038 	int status;
9039 
9040 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9041 
9042 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9043 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9044 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9045 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9046 		      vcfg->accept_tag1 ? 1 : 0);
9047 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9048 		      vcfg->accept_untag1 ? 1 : 0);
9049 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9050 		      vcfg->accept_tag2 ? 1 : 0);
9051 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9052 		      vcfg->accept_untag2 ? 1 : 0);
9053 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9054 		      vcfg->insert_tag1_en ? 1 : 0);
9055 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9056 		      vcfg->insert_tag2_en ? 1 : 0);
9057 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9058 		      vcfg->tag_shift_mode_en ? 1 : 0);
9059 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9060 
9061 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9062 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9063 			HCLGE_VF_NUM_PER_BYTE;
9064 	req->vf_bitmap[bmap_index] =
9065 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9066 
9067 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9068 	if (status)
9069 		dev_err(&hdev->pdev->dev,
9070 			"Send port txvlan cfg command fail, ret =%d\n",
9071 			status);
9072 
9073 	return status;
9074 }
9075 
9076 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9077 {
9078 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9079 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9080 	struct hclge_dev *hdev = vport->back;
9081 	struct hclge_desc desc;
9082 	u16 bmap_index;
9083 	int status;
9084 
9085 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9086 
9087 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9088 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9089 		      vcfg->strip_tag1_en ? 1 : 0);
9090 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9091 		      vcfg->strip_tag2_en ? 1 : 0);
9092 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9093 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9094 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9095 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9096 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9097 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9098 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9099 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9100 
9101 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9102 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9103 			HCLGE_VF_NUM_PER_BYTE;
9104 	req->vf_bitmap[bmap_index] =
9105 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9106 
9107 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9108 	if (status)
9109 		dev_err(&hdev->pdev->dev,
9110 			"Send port rxvlan cfg command fail, ret =%d\n",
9111 			status);
9112 
9113 	return status;
9114 }
9115 
9116 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9117 				  u16 port_base_vlan_state,
9118 				  u16 vlan_tag)
9119 {
9120 	int ret;
9121 
9122 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9123 		vport->txvlan_cfg.accept_tag1 = true;
9124 		vport->txvlan_cfg.insert_tag1_en = false;
9125 		vport->txvlan_cfg.default_tag1 = 0;
9126 	} else {
9127 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9128 
9129 		vport->txvlan_cfg.accept_tag1 =
9130 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9131 		vport->txvlan_cfg.insert_tag1_en = true;
9132 		vport->txvlan_cfg.default_tag1 = vlan_tag;
9133 	}
9134 
9135 	vport->txvlan_cfg.accept_untag1 = true;
9136 
9137 	/* accept_tag2 and accept_untag2 are not supported on
9138 	 * pdev revision(0x20), new revision support them,
9139 	 * this two fields can not be configured by user.
9140 	 */
9141 	vport->txvlan_cfg.accept_tag2 = true;
9142 	vport->txvlan_cfg.accept_untag2 = true;
9143 	vport->txvlan_cfg.insert_tag2_en = false;
9144 	vport->txvlan_cfg.default_tag2 = 0;
9145 	vport->txvlan_cfg.tag_shift_mode_en = true;
9146 
9147 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9148 		vport->rxvlan_cfg.strip_tag1_en = false;
9149 		vport->rxvlan_cfg.strip_tag2_en =
9150 				vport->rxvlan_cfg.rx_vlan_offload_en;
9151 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9152 	} else {
9153 		vport->rxvlan_cfg.strip_tag1_en =
9154 				vport->rxvlan_cfg.rx_vlan_offload_en;
9155 		vport->rxvlan_cfg.strip_tag2_en = true;
9156 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9157 	}
9158 
9159 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9160 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9161 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9162 
9163 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9164 	if (ret)
9165 		return ret;
9166 
9167 	return hclge_set_vlan_rx_offload_cfg(vport);
9168 }
9169 
9170 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9171 {
9172 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9173 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9174 	struct hclge_desc desc;
9175 	int status;
9176 
9177 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9178 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9179 	rx_req->ot_fst_vlan_type =
9180 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9181 	rx_req->ot_sec_vlan_type =
9182 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9183 	rx_req->in_fst_vlan_type =
9184 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9185 	rx_req->in_sec_vlan_type =
9186 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9187 
9188 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9189 	if (status) {
9190 		dev_err(&hdev->pdev->dev,
9191 			"Send rxvlan protocol type command fail, ret =%d\n",
9192 			status);
9193 		return status;
9194 	}
9195 
9196 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9197 
9198 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9199 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9200 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9201 
9202 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9203 	if (status)
9204 		dev_err(&hdev->pdev->dev,
9205 			"Send txvlan protocol type command fail, ret =%d\n",
9206 			status);
9207 
9208 	return status;
9209 }
9210 
9211 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9212 {
9213 #define HCLGE_DEF_VLAN_TYPE		0x8100
9214 
9215 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9216 	struct hclge_vport *vport;
9217 	int ret;
9218 	int i;
9219 
9220 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9221 		/* for revision 0x21, vf vlan filter is per function */
9222 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9223 			vport = &hdev->vport[i];
9224 			ret = hclge_set_vlan_filter_ctrl(hdev,
9225 							 HCLGE_FILTER_TYPE_VF,
9226 							 HCLGE_FILTER_FE_EGRESS,
9227 							 true,
9228 							 vport->vport_id);
9229 			if (ret)
9230 				return ret;
9231 		}
9232 
9233 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9234 						 HCLGE_FILTER_FE_INGRESS, true,
9235 						 0);
9236 		if (ret)
9237 			return ret;
9238 	} else {
9239 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9240 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9241 						 true, 0);
9242 		if (ret)
9243 			return ret;
9244 	}
9245 
9246 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
9247 
9248 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9249 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9250 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9251 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9252 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9253 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9254 
9255 	ret = hclge_set_vlan_protocol_type(hdev);
9256 	if (ret)
9257 		return ret;
9258 
9259 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9260 		u16 vlan_tag;
9261 
9262 		vport = &hdev->vport[i];
9263 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9264 
9265 		ret = hclge_vlan_offload_cfg(vport,
9266 					     vport->port_base_vlan_cfg.state,
9267 					     vlan_tag);
9268 		if (ret)
9269 			return ret;
9270 	}
9271 
9272 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9273 }
9274 
9275 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9276 				       bool writen_to_tbl)
9277 {
9278 	struct hclge_vport_vlan_cfg *vlan;
9279 
9280 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9281 	if (!vlan)
9282 		return;
9283 
9284 	vlan->hd_tbl_status = writen_to_tbl;
9285 	vlan->vlan_id = vlan_id;
9286 
9287 	list_add_tail(&vlan->node, &vport->vlan_list);
9288 }
9289 
9290 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9291 {
9292 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9293 	struct hclge_dev *hdev = vport->back;
9294 	int ret;
9295 
9296 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9297 		if (!vlan->hd_tbl_status) {
9298 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9299 						       vport->vport_id,
9300 						       vlan->vlan_id, false);
9301 			if (ret) {
9302 				dev_err(&hdev->pdev->dev,
9303 					"restore vport vlan list failed, ret=%d\n",
9304 					ret);
9305 				return ret;
9306 			}
9307 		}
9308 		vlan->hd_tbl_status = true;
9309 	}
9310 
9311 	return 0;
9312 }
9313 
9314 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9315 				      bool is_write_tbl)
9316 {
9317 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9318 	struct hclge_dev *hdev = vport->back;
9319 
9320 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9321 		if (vlan->vlan_id == vlan_id) {
9322 			if (is_write_tbl && vlan->hd_tbl_status)
9323 				hclge_set_vlan_filter_hw(hdev,
9324 							 htons(ETH_P_8021Q),
9325 							 vport->vport_id,
9326 							 vlan_id,
9327 							 true);
9328 
9329 			list_del(&vlan->node);
9330 			kfree(vlan);
9331 			break;
9332 		}
9333 	}
9334 }
9335 
9336 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9337 {
9338 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9339 	struct hclge_dev *hdev = vport->back;
9340 
9341 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9342 		if (vlan->hd_tbl_status)
9343 			hclge_set_vlan_filter_hw(hdev,
9344 						 htons(ETH_P_8021Q),
9345 						 vport->vport_id,
9346 						 vlan->vlan_id,
9347 						 true);
9348 
9349 		vlan->hd_tbl_status = false;
9350 		if (is_del_list) {
9351 			list_del(&vlan->node);
9352 			kfree(vlan);
9353 		}
9354 	}
9355 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
9356 }
9357 
9358 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9359 {
9360 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9361 	struct hclge_vport *vport;
9362 	int i;
9363 
9364 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9365 		vport = &hdev->vport[i];
9366 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9367 			list_del(&vlan->node);
9368 			kfree(vlan);
9369 		}
9370 	}
9371 }
9372 
9373 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9374 {
9375 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9376 	struct hclge_dev *hdev = vport->back;
9377 	u16 vlan_proto;
9378 	u16 vlan_id;
9379 	u16 state;
9380 	int ret;
9381 
9382 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9383 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9384 	state = vport->port_base_vlan_cfg.state;
9385 
9386 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9387 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9388 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9389 					 vport->vport_id, vlan_id,
9390 					 false);
9391 		return;
9392 	}
9393 
9394 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9395 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9396 					       vport->vport_id,
9397 					       vlan->vlan_id, false);
9398 		if (ret)
9399 			break;
9400 		vlan->hd_tbl_status = true;
9401 	}
9402 }
9403 
9404 /* For global reset and imp reset, hardware will clear the mac table,
9405  * so we change the mac address state from ACTIVE to TO_ADD, then they
9406  * can be restored in the service task after reset complete. Furtherly,
9407  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9408  * be restored after reset, so just remove these mac nodes from mac_list.
9409  */
9410 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9411 {
9412 	struct hclge_mac_node *mac_node, *tmp;
9413 
9414 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9415 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
9416 			mac_node->state = HCLGE_MAC_TO_ADD;
9417 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9418 			list_del(&mac_node->node);
9419 			kfree(mac_node);
9420 		}
9421 	}
9422 }
9423 
9424 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9425 {
9426 	spin_lock_bh(&vport->mac_list_lock);
9427 
9428 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9429 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9430 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9431 
9432 	spin_unlock_bh(&vport->mac_list_lock);
9433 }
9434 
9435 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9436 {
9437 	struct hclge_vport *vport = &hdev->vport[0];
9438 	struct hnae3_handle *handle = &vport->nic;
9439 
9440 	hclge_restore_mac_table_common(vport);
9441 	hclge_restore_vport_vlan_table(vport);
9442 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9443 
9444 	hclge_restore_fd_entries(handle);
9445 }
9446 
9447 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9448 {
9449 	struct hclge_vport *vport = hclge_get_vport(handle);
9450 
9451 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9452 		vport->rxvlan_cfg.strip_tag1_en = false;
9453 		vport->rxvlan_cfg.strip_tag2_en = enable;
9454 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9455 	} else {
9456 		vport->rxvlan_cfg.strip_tag1_en = enable;
9457 		vport->rxvlan_cfg.strip_tag2_en = true;
9458 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9459 	}
9460 
9461 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9462 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9463 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9464 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9465 
9466 	return hclge_set_vlan_rx_offload_cfg(vport);
9467 }
9468 
9469 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9470 					    u16 port_base_vlan_state,
9471 					    struct hclge_vlan_info *new_info,
9472 					    struct hclge_vlan_info *old_info)
9473 {
9474 	struct hclge_dev *hdev = vport->back;
9475 	int ret;
9476 
9477 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9478 		hclge_rm_vport_all_vlan_table(vport, false);
9479 		return hclge_set_vlan_filter_hw(hdev,
9480 						 htons(new_info->vlan_proto),
9481 						 vport->vport_id,
9482 						 new_info->vlan_tag,
9483 						 false);
9484 	}
9485 
9486 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9487 				       vport->vport_id, old_info->vlan_tag,
9488 				       true);
9489 	if (ret)
9490 		return ret;
9491 
9492 	return hclge_add_vport_all_vlan_table(vport);
9493 }
9494 
9495 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9496 				    struct hclge_vlan_info *vlan_info)
9497 {
9498 	struct hnae3_handle *nic = &vport->nic;
9499 	struct hclge_vlan_info *old_vlan_info;
9500 	struct hclge_dev *hdev = vport->back;
9501 	int ret;
9502 
9503 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9504 
9505 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9506 	if (ret)
9507 		return ret;
9508 
9509 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9510 		/* add new VLAN tag */
9511 		ret = hclge_set_vlan_filter_hw(hdev,
9512 					       htons(vlan_info->vlan_proto),
9513 					       vport->vport_id,
9514 					       vlan_info->vlan_tag,
9515 					       false);
9516 		if (ret)
9517 			return ret;
9518 
9519 		/* remove old VLAN tag */
9520 		ret = hclge_set_vlan_filter_hw(hdev,
9521 					       htons(old_vlan_info->vlan_proto),
9522 					       vport->vport_id,
9523 					       old_vlan_info->vlan_tag,
9524 					       true);
9525 		if (ret)
9526 			return ret;
9527 
9528 		goto update;
9529 	}
9530 
9531 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9532 					       old_vlan_info);
9533 	if (ret)
9534 		return ret;
9535 
9536 	/* update state only when disable/enable port based VLAN */
9537 	vport->port_base_vlan_cfg.state = state;
9538 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9539 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9540 	else
9541 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9542 
9543 update:
9544 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9545 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9546 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9547 
9548 	return 0;
9549 }
9550 
9551 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9552 					  enum hnae3_port_base_vlan_state state,
9553 					  u16 vlan)
9554 {
9555 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9556 		if (!vlan)
9557 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9558 		else
9559 			return HNAE3_PORT_BASE_VLAN_ENABLE;
9560 	} else {
9561 		if (!vlan)
9562 			return HNAE3_PORT_BASE_VLAN_DISABLE;
9563 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9564 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9565 		else
9566 			return HNAE3_PORT_BASE_VLAN_MODIFY;
9567 	}
9568 }
9569 
9570 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9571 				    u16 vlan, u8 qos, __be16 proto)
9572 {
9573 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9574 	struct hclge_vport *vport = hclge_get_vport(handle);
9575 	struct hclge_dev *hdev = vport->back;
9576 	struct hclge_vlan_info vlan_info;
9577 	u16 state;
9578 	int ret;
9579 
9580 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9581 		return -EOPNOTSUPP;
9582 
9583 	vport = hclge_get_vf_vport(hdev, vfid);
9584 	if (!vport)
9585 		return -EINVAL;
9586 
9587 	/* qos is a 3 bits value, so can not be bigger than 7 */
9588 	if (vlan > VLAN_N_VID - 1 || qos > 7)
9589 		return -EINVAL;
9590 	if (proto != htons(ETH_P_8021Q))
9591 		return -EPROTONOSUPPORT;
9592 
9593 	state = hclge_get_port_base_vlan_state(vport,
9594 					       vport->port_base_vlan_cfg.state,
9595 					       vlan);
9596 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9597 		return 0;
9598 
9599 	vlan_info.vlan_tag = vlan;
9600 	vlan_info.qos = qos;
9601 	vlan_info.vlan_proto = ntohs(proto);
9602 
9603 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9604 	if (ret) {
9605 		dev_err(&hdev->pdev->dev,
9606 			"failed to update port base vlan for vf %d, ret = %d\n",
9607 			vfid, ret);
9608 		return ret;
9609 	}
9610 
9611 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9612 	 * VLAN state.
9613 	 */
9614 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9615 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9616 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9617 						  vport->vport_id, state,
9618 						  vlan, qos,
9619 						  ntohs(proto));
9620 
9621 	return 0;
9622 }
9623 
9624 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9625 {
9626 	struct hclge_vlan_info *vlan_info;
9627 	struct hclge_vport *vport;
9628 	int ret;
9629 	int vf;
9630 
9631 	/* clear port base vlan for all vf */
9632 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9633 		vport = &hdev->vport[vf];
9634 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9635 
9636 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9637 					       vport->vport_id,
9638 					       vlan_info->vlan_tag, true);
9639 		if (ret)
9640 			dev_err(&hdev->pdev->dev,
9641 				"failed to clear vf vlan for vf%d, ret = %d\n",
9642 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9643 	}
9644 }
9645 
9646 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9647 			  u16 vlan_id, bool is_kill)
9648 {
9649 	struct hclge_vport *vport = hclge_get_vport(handle);
9650 	struct hclge_dev *hdev = vport->back;
9651 	bool writen_to_tbl = false;
9652 	int ret = 0;
9653 
9654 	/* When device is resetting or reset failed, firmware is unable to
9655 	 * handle mailbox. Just record the vlan id, and remove it after
9656 	 * reset finished.
9657 	 */
9658 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9659 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9660 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9661 		return -EBUSY;
9662 	}
9663 
9664 	/* when port base vlan enabled, we use port base vlan as the vlan
9665 	 * filter entry. In this case, we don't update vlan filter table
9666 	 * when user add new vlan or remove exist vlan, just update the vport
9667 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9668 	 * table until port base vlan disabled
9669 	 */
9670 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9671 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9672 					       vlan_id, is_kill);
9673 		writen_to_tbl = true;
9674 	}
9675 
9676 	if (!ret) {
9677 		if (is_kill)
9678 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9679 		else
9680 			hclge_add_vport_vlan_table(vport, vlan_id,
9681 						   writen_to_tbl);
9682 	} else if (is_kill) {
9683 		/* when remove hw vlan filter failed, record the vlan id,
9684 		 * and try to remove it from hw later, to be consistence
9685 		 * with stack
9686 		 */
9687 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9688 	}
9689 	return ret;
9690 }
9691 
9692 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9693 {
9694 #define HCLGE_MAX_SYNC_COUNT	60
9695 
9696 	int i, ret, sync_cnt = 0;
9697 	u16 vlan_id;
9698 
9699 	/* start from vport 1 for PF is always alive */
9700 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9701 		struct hclge_vport *vport = &hdev->vport[i];
9702 
9703 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9704 					 VLAN_N_VID);
9705 		while (vlan_id != VLAN_N_VID) {
9706 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9707 						       vport->vport_id, vlan_id,
9708 						       true);
9709 			if (ret && ret != -EINVAL)
9710 				return;
9711 
9712 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9713 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9714 
9715 			sync_cnt++;
9716 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9717 				return;
9718 
9719 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9720 						 VLAN_N_VID);
9721 		}
9722 	}
9723 }
9724 
9725 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9726 {
9727 	struct hclge_config_max_frm_size_cmd *req;
9728 	struct hclge_desc desc;
9729 
9730 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9731 
9732 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9733 	req->max_frm_size = cpu_to_le16(new_mps);
9734 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9735 
9736 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9737 }
9738 
9739 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9740 {
9741 	struct hclge_vport *vport = hclge_get_vport(handle);
9742 
9743 	return hclge_set_vport_mtu(vport, new_mtu);
9744 }
9745 
9746 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9747 {
9748 	struct hclge_dev *hdev = vport->back;
9749 	int i, max_frm_size, ret;
9750 
9751 	/* HW supprt 2 layer vlan */
9752 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9753 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9754 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9755 		return -EINVAL;
9756 
9757 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9758 	mutex_lock(&hdev->vport_lock);
9759 	/* VF's mps must fit within hdev->mps */
9760 	if (vport->vport_id && max_frm_size > hdev->mps) {
9761 		mutex_unlock(&hdev->vport_lock);
9762 		return -EINVAL;
9763 	} else if (vport->vport_id) {
9764 		vport->mps = max_frm_size;
9765 		mutex_unlock(&hdev->vport_lock);
9766 		return 0;
9767 	}
9768 
9769 	/* PF's mps must be greater then VF's mps */
9770 	for (i = 1; i < hdev->num_alloc_vport; i++)
9771 		if (max_frm_size < hdev->vport[i].mps) {
9772 			mutex_unlock(&hdev->vport_lock);
9773 			return -EINVAL;
9774 		}
9775 
9776 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9777 
9778 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9779 	if (ret) {
9780 		dev_err(&hdev->pdev->dev,
9781 			"Change mtu fail, ret =%d\n", ret);
9782 		goto out;
9783 	}
9784 
9785 	hdev->mps = max_frm_size;
9786 	vport->mps = max_frm_size;
9787 
9788 	ret = hclge_buffer_alloc(hdev);
9789 	if (ret)
9790 		dev_err(&hdev->pdev->dev,
9791 			"Allocate buffer fail, ret =%d\n", ret);
9792 
9793 out:
9794 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9795 	mutex_unlock(&hdev->vport_lock);
9796 	return ret;
9797 }
9798 
9799 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9800 				    bool enable)
9801 {
9802 	struct hclge_reset_tqp_queue_cmd *req;
9803 	struct hclge_desc desc;
9804 	int ret;
9805 
9806 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9807 
9808 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9809 	req->tqp_id = cpu_to_le16(queue_id);
9810 	if (enable)
9811 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9812 
9813 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9814 	if (ret) {
9815 		dev_err(&hdev->pdev->dev,
9816 			"Send tqp reset cmd error, status =%d\n", ret);
9817 		return ret;
9818 	}
9819 
9820 	return 0;
9821 }
9822 
9823 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9824 {
9825 	struct hclge_reset_tqp_queue_cmd *req;
9826 	struct hclge_desc desc;
9827 	int ret;
9828 
9829 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9830 
9831 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9832 	req->tqp_id = cpu_to_le16(queue_id);
9833 
9834 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9835 	if (ret) {
9836 		dev_err(&hdev->pdev->dev,
9837 			"Get reset status error, status =%d\n", ret);
9838 		return ret;
9839 	}
9840 
9841 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9842 }
9843 
9844 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9845 {
9846 	struct hnae3_queue *queue;
9847 	struct hclge_tqp *tqp;
9848 
9849 	queue = handle->kinfo.tqp[queue_id];
9850 	tqp = container_of(queue, struct hclge_tqp, q);
9851 
9852 	return tqp->index;
9853 }
9854 
9855 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9856 {
9857 	struct hclge_vport *vport = hclge_get_vport(handle);
9858 	struct hclge_dev *hdev = vport->back;
9859 	int reset_try_times = 0;
9860 	int reset_status;
9861 	u16 queue_gid;
9862 	int ret;
9863 
9864 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9865 
9866 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9867 	if (ret) {
9868 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9869 		return ret;
9870 	}
9871 
9872 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9873 	if (ret) {
9874 		dev_err(&hdev->pdev->dev,
9875 			"Send reset tqp cmd fail, ret = %d\n", ret);
9876 		return ret;
9877 	}
9878 
9879 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9880 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9881 		if (reset_status)
9882 			break;
9883 
9884 		/* Wait for tqp hw reset */
9885 		usleep_range(1000, 1200);
9886 	}
9887 
9888 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9889 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9890 		return ret;
9891 	}
9892 
9893 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9894 	if (ret)
9895 		dev_err(&hdev->pdev->dev,
9896 			"Deassert the soft reset fail, ret = %d\n", ret);
9897 
9898 	return ret;
9899 }
9900 
9901 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9902 {
9903 	struct hnae3_handle *handle = &vport->nic;
9904 	struct hclge_dev *hdev = vport->back;
9905 	int reset_try_times = 0;
9906 	int reset_status;
9907 	u16 queue_gid;
9908 	int ret;
9909 
9910 	if (queue_id >= handle->kinfo.num_tqps) {
9911 		dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9912 			 queue_id);
9913 		return;
9914 	}
9915 
9916 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9917 
9918 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9919 	if (ret) {
9920 		dev_warn(&hdev->pdev->dev,
9921 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9922 		return;
9923 	}
9924 
9925 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9926 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9927 		if (reset_status)
9928 			break;
9929 
9930 		/* Wait for tqp hw reset */
9931 		usleep_range(1000, 1200);
9932 	}
9933 
9934 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9935 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9936 		return;
9937 	}
9938 
9939 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9940 	if (ret)
9941 		dev_warn(&hdev->pdev->dev,
9942 			 "Deassert the soft reset fail, ret = %d\n", ret);
9943 }
9944 
9945 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9946 {
9947 	struct hclge_vport *vport = hclge_get_vport(handle);
9948 	struct hclge_dev *hdev = vport->back;
9949 
9950 	return hdev->fw_version;
9951 }
9952 
9953 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9954 {
9955 	struct phy_device *phydev = hdev->hw.mac.phydev;
9956 
9957 	if (!phydev)
9958 		return;
9959 
9960 	phy_set_asym_pause(phydev, rx_en, tx_en);
9961 }
9962 
9963 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9964 {
9965 	int ret;
9966 
9967 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9968 		return 0;
9969 
9970 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9971 	if (ret)
9972 		dev_err(&hdev->pdev->dev,
9973 			"configure pauseparam error, ret = %d.\n", ret);
9974 
9975 	return ret;
9976 }
9977 
9978 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9979 {
9980 	struct phy_device *phydev = hdev->hw.mac.phydev;
9981 	u16 remote_advertising = 0;
9982 	u16 local_advertising;
9983 	u32 rx_pause, tx_pause;
9984 	u8 flowctl;
9985 
9986 	if (!phydev->link || !phydev->autoneg)
9987 		return 0;
9988 
9989 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9990 
9991 	if (phydev->pause)
9992 		remote_advertising = LPA_PAUSE_CAP;
9993 
9994 	if (phydev->asym_pause)
9995 		remote_advertising |= LPA_PAUSE_ASYM;
9996 
9997 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9998 					   remote_advertising);
9999 	tx_pause = flowctl & FLOW_CTRL_TX;
10000 	rx_pause = flowctl & FLOW_CTRL_RX;
10001 
10002 	if (phydev->duplex == HCLGE_MAC_HALF) {
10003 		tx_pause = 0;
10004 		rx_pause = 0;
10005 	}
10006 
10007 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10008 }
10009 
10010 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10011 				 u32 *rx_en, u32 *tx_en)
10012 {
10013 	struct hclge_vport *vport = hclge_get_vport(handle);
10014 	struct hclge_dev *hdev = vport->back;
10015 	struct phy_device *phydev = hdev->hw.mac.phydev;
10016 
10017 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
10018 
10019 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10020 		*rx_en = 0;
10021 		*tx_en = 0;
10022 		return;
10023 	}
10024 
10025 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10026 		*rx_en = 1;
10027 		*tx_en = 0;
10028 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10029 		*tx_en = 1;
10030 		*rx_en = 0;
10031 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10032 		*rx_en = 1;
10033 		*tx_en = 1;
10034 	} else {
10035 		*rx_en = 0;
10036 		*tx_en = 0;
10037 	}
10038 }
10039 
10040 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10041 					 u32 rx_en, u32 tx_en)
10042 {
10043 	if (rx_en && tx_en)
10044 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10045 	else if (rx_en && !tx_en)
10046 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10047 	else if (!rx_en && tx_en)
10048 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10049 	else
10050 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10051 
10052 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10053 }
10054 
10055 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10056 				u32 rx_en, u32 tx_en)
10057 {
10058 	struct hclge_vport *vport = hclge_get_vport(handle);
10059 	struct hclge_dev *hdev = vport->back;
10060 	struct phy_device *phydev = hdev->hw.mac.phydev;
10061 	u32 fc_autoneg;
10062 
10063 	if (phydev) {
10064 		fc_autoneg = hclge_get_autoneg(handle);
10065 		if (auto_neg != fc_autoneg) {
10066 			dev_info(&hdev->pdev->dev,
10067 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10068 			return -EOPNOTSUPP;
10069 		}
10070 	}
10071 
10072 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10073 		dev_info(&hdev->pdev->dev,
10074 			 "Priority flow control enabled. Cannot set link flow control.\n");
10075 		return -EOPNOTSUPP;
10076 	}
10077 
10078 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10079 
10080 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10081 
10082 	if (!auto_neg)
10083 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10084 
10085 	if (phydev)
10086 		return phy_start_aneg(phydev);
10087 
10088 	return -EOPNOTSUPP;
10089 }
10090 
10091 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10092 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10093 {
10094 	struct hclge_vport *vport = hclge_get_vport(handle);
10095 	struct hclge_dev *hdev = vport->back;
10096 
10097 	if (speed)
10098 		*speed = hdev->hw.mac.speed;
10099 	if (duplex)
10100 		*duplex = hdev->hw.mac.duplex;
10101 	if (auto_neg)
10102 		*auto_neg = hdev->hw.mac.autoneg;
10103 }
10104 
10105 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10106 				 u8 *module_type)
10107 {
10108 	struct hclge_vport *vport = hclge_get_vport(handle);
10109 	struct hclge_dev *hdev = vport->back;
10110 
10111 	/* When nic is down, the service task is not running, doesn't update
10112 	 * the port information per second. Query the port information before
10113 	 * return the media type, ensure getting the correct media information.
10114 	 */
10115 	hclge_update_port_info(hdev);
10116 
10117 	if (media_type)
10118 		*media_type = hdev->hw.mac.media_type;
10119 
10120 	if (module_type)
10121 		*module_type = hdev->hw.mac.module_type;
10122 }
10123 
10124 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10125 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10126 {
10127 	struct hclge_vport *vport = hclge_get_vport(handle);
10128 	struct hclge_dev *hdev = vport->back;
10129 	struct phy_device *phydev = hdev->hw.mac.phydev;
10130 	int mdix_ctrl, mdix, is_resolved;
10131 	unsigned int retval;
10132 
10133 	if (!phydev) {
10134 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10135 		*tp_mdix = ETH_TP_MDI_INVALID;
10136 		return;
10137 	}
10138 
10139 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10140 
10141 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10142 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10143 				    HCLGE_PHY_MDIX_CTRL_S);
10144 
10145 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10146 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10147 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10148 
10149 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10150 
10151 	switch (mdix_ctrl) {
10152 	case 0x0:
10153 		*tp_mdix_ctrl = ETH_TP_MDI;
10154 		break;
10155 	case 0x1:
10156 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10157 		break;
10158 	case 0x3:
10159 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10160 		break;
10161 	default:
10162 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10163 		break;
10164 	}
10165 
10166 	if (!is_resolved)
10167 		*tp_mdix = ETH_TP_MDI_INVALID;
10168 	else if (mdix)
10169 		*tp_mdix = ETH_TP_MDI_X;
10170 	else
10171 		*tp_mdix = ETH_TP_MDI;
10172 }
10173 
10174 static void hclge_info_show(struct hclge_dev *hdev)
10175 {
10176 	struct device *dev = &hdev->pdev->dev;
10177 
10178 	dev_info(dev, "PF info begin:\n");
10179 
10180 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10181 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10182 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10183 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10184 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10185 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10186 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10187 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10188 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10189 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10190 	dev_info(dev, "This is %s PF\n",
10191 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10192 	dev_info(dev, "DCB %s\n",
10193 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10194 	dev_info(dev, "MQPRIO %s\n",
10195 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10196 
10197 	dev_info(dev, "PF info end.\n");
10198 }
10199 
10200 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10201 					  struct hclge_vport *vport)
10202 {
10203 	struct hnae3_client *client = vport->nic.client;
10204 	struct hclge_dev *hdev = ae_dev->priv;
10205 	int rst_cnt = hdev->rst_stats.reset_cnt;
10206 	int ret;
10207 
10208 	ret = client->ops->init_instance(&vport->nic);
10209 	if (ret)
10210 		return ret;
10211 
10212 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10213 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10214 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10215 		ret = -EBUSY;
10216 		goto init_nic_err;
10217 	}
10218 
10219 	/* Enable nic hw error interrupts */
10220 	ret = hclge_config_nic_hw_error(hdev, true);
10221 	if (ret) {
10222 		dev_err(&ae_dev->pdev->dev,
10223 			"fail(%d) to enable hw error interrupts\n", ret);
10224 		goto init_nic_err;
10225 	}
10226 
10227 	hnae3_set_client_init_flag(client, ae_dev, 1);
10228 
10229 	if (netif_msg_drv(&hdev->vport->nic))
10230 		hclge_info_show(hdev);
10231 
10232 	return ret;
10233 
10234 init_nic_err:
10235 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10236 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10237 		msleep(HCLGE_WAIT_RESET_DONE);
10238 
10239 	client->ops->uninit_instance(&vport->nic, 0);
10240 
10241 	return ret;
10242 }
10243 
10244 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10245 					   struct hclge_vport *vport)
10246 {
10247 	struct hclge_dev *hdev = ae_dev->priv;
10248 	struct hnae3_client *client;
10249 	int rst_cnt;
10250 	int ret;
10251 
10252 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10253 	    !hdev->nic_client)
10254 		return 0;
10255 
10256 	client = hdev->roce_client;
10257 	ret = hclge_init_roce_base_info(vport);
10258 	if (ret)
10259 		return ret;
10260 
10261 	rst_cnt = hdev->rst_stats.reset_cnt;
10262 	ret = client->ops->init_instance(&vport->roce);
10263 	if (ret)
10264 		return ret;
10265 
10266 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10267 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10268 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10269 		ret = -EBUSY;
10270 		goto init_roce_err;
10271 	}
10272 
10273 	/* Enable roce ras interrupts */
10274 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
10275 	if (ret) {
10276 		dev_err(&ae_dev->pdev->dev,
10277 			"fail(%d) to enable roce ras interrupts\n", ret);
10278 		goto init_roce_err;
10279 	}
10280 
10281 	hnae3_set_client_init_flag(client, ae_dev, 1);
10282 
10283 	return 0;
10284 
10285 init_roce_err:
10286 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10287 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10288 		msleep(HCLGE_WAIT_RESET_DONE);
10289 
10290 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10291 
10292 	return ret;
10293 }
10294 
10295 static int hclge_init_client_instance(struct hnae3_client *client,
10296 				      struct hnae3_ae_dev *ae_dev)
10297 {
10298 	struct hclge_dev *hdev = ae_dev->priv;
10299 	struct hclge_vport *vport;
10300 	int i, ret;
10301 
10302 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10303 		vport = &hdev->vport[i];
10304 
10305 		switch (client->type) {
10306 		case HNAE3_CLIENT_KNIC:
10307 			hdev->nic_client = client;
10308 			vport->nic.client = client;
10309 			ret = hclge_init_nic_client_instance(ae_dev, vport);
10310 			if (ret)
10311 				goto clear_nic;
10312 
10313 			ret = hclge_init_roce_client_instance(ae_dev, vport);
10314 			if (ret)
10315 				goto clear_roce;
10316 
10317 			break;
10318 		case HNAE3_CLIENT_ROCE:
10319 			if (hnae3_dev_roce_supported(hdev)) {
10320 				hdev->roce_client = client;
10321 				vport->roce.client = client;
10322 			}
10323 
10324 			ret = hclge_init_roce_client_instance(ae_dev, vport);
10325 			if (ret)
10326 				goto clear_roce;
10327 
10328 			break;
10329 		default:
10330 			return -EINVAL;
10331 		}
10332 	}
10333 
10334 	return 0;
10335 
10336 clear_nic:
10337 	hdev->nic_client = NULL;
10338 	vport->nic.client = NULL;
10339 	return ret;
10340 clear_roce:
10341 	hdev->roce_client = NULL;
10342 	vport->roce.client = NULL;
10343 	return ret;
10344 }
10345 
10346 static void hclge_uninit_client_instance(struct hnae3_client *client,
10347 					 struct hnae3_ae_dev *ae_dev)
10348 {
10349 	struct hclge_dev *hdev = ae_dev->priv;
10350 	struct hclge_vport *vport;
10351 	int i;
10352 
10353 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10354 		vport = &hdev->vport[i];
10355 		if (hdev->roce_client) {
10356 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10357 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10358 				msleep(HCLGE_WAIT_RESET_DONE);
10359 
10360 			hdev->roce_client->ops->uninit_instance(&vport->roce,
10361 								0);
10362 			hdev->roce_client = NULL;
10363 			vport->roce.client = NULL;
10364 		}
10365 		if (client->type == HNAE3_CLIENT_ROCE)
10366 			return;
10367 		if (hdev->nic_client && client->ops->uninit_instance) {
10368 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10369 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10370 				msleep(HCLGE_WAIT_RESET_DONE);
10371 
10372 			client->ops->uninit_instance(&vport->nic, 0);
10373 			hdev->nic_client = NULL;
10374 			vport->nic.client = NULL;
10375 		}
10376 	}
10377 }
10378 
10379 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10380 {
10381 #define HCLGE_MEM_BAR		4
10382 
10383 	struct pci_dev *pdev = hdev->pdev;
10384 	struct hclge_hw *hw = &hdev->hw;
10385 
10386 	/* for device does not have device memory, return directly */
10387 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10388 		return 0;
10389 
10390 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
10391 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
10392 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
10393 	if (!hw->mem_base) {
10394 		dev_err(&pdev->dev, "failed to map device memory\n");
10395 		return -EFAULT;
10396 	}
10397 
10398 	return 0;
10399 }
10400 
10401 static int hclge_pci_init(struct hclge_dev *hdev)
10402 {
10403 	struct pci_dev *pdev = hdev->pdev;
10404 	struct hclge_hw *hw;
10405 	int ret;
10406 
10407 	ret = pci_enable_device(pdev);
10408 	if (ret) {
10409 		dev_err(&pdev->dev, "failed to enable PCI device\n");
10410 		return ret;
10411 	}
10412 
10413 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10414 	if (ret) {
10415 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10416 		if (ret) {
10417 			dev_err(&pdev->dev,
10418 				"can't set consistent PCI DMA");
10419 			goto err_disable_device;
10420 		}
10421 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10422 	}
10423 
10424 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10425 	if (ret) {
10426 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10427 		goto err_disable_device;
10428 	}
10429 
10430 	pci_set_master(pdev);
10431 	hw = &hdev->hw;
10432 	hw->io_base = pcim_iomap(pdev, 2, 0);
10433 	if (!hw->io_base) {
10434 		dev_err(&pdev->dev, "Can't map configuration register space\n");
10435 		ret = -ENOMEM;
10436 		goto err_clr_master;
10437 	}
10438 
10439 	ret = hclge_dev_mem_map(hdev);
10440 	if (ret)
10441 		goto err_unmap_io_base;
10442 
10443 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10444 
10445 	return 0;
10446 
10447 err_unmap_io_base:
10448 	pcim_iounmap(pdev, hdev->hw.io_base);
10449 err_clr_master:
10450 	pci_clear_master(pdev);
10451 	pci_release_regions(pdev);
10452 err_disable_device:
10453 	pci_disable_device(pdev);
10454 
10455 	return ret;
10456 }
10457 
10458 static void hclge_pci_uninit(struct hclge_dev *hdev)
10459 {
10460 	struct pci_dev *pdev = hdev->pdev;
10461 
10462 	if (hdev->hw.mem_base)
10463 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10464 
10465 	pcim_iounmap(pdev, hdev->hw.io_base);
10466 	pci_free_irq_vectors(pdev);
10467 	pci_clear_master(pdev);
10468 	pci_release_mem_regions(pdev);
10469 	pci_disable_device(pdev);
10470 }
10471 
10472 static void hclge_state_init(struct hclge_dev *hdev)
10473 {
10474 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10475 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10476 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10477 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10478 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10479 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10480 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10481 }
10482 
10483 static void hclge_state_uninit(struct hclge_dev *hdev)
10484 {
10485 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10486 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10487 
10488 	if (hdev->reset_timer.function)
10489 		del_timer_sync(&hdev->reset_timer);
10490 	if (hdev->service_task.work.func)
10491 		cancel_delayed_work_sync(&hdev->service_task);
10492 }
10493 
10494 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10495 {
10496 #define HCLGE_FLR_RETRY_WAIT_MS	500
10497 #define HCLGE_FLR_RETRY_CNT	5
10498 
10499 	struct hclge_dev *hdev = ae_dev->priv;
10500 	int retry_cnt = 0;
10501 	int ret;
10502 
10503 retry:
10504 	down(&hdev->reset_sem);
10505 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10506 	hdev->reset_type = HNAE3_FLR_RESET;
10507 	ret = hclge_reset_prepare(hdev);
10508 	if (ret || hdev->reset_pending) {
10509 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10510 			ret);
10511 		if (hdev->reset_pending ||
10512 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10513 			dev_err(&hdev->pdev->dev,
10514 				"reset_pending:0x%lx, retry_cnt:%d\n",
10515 				hdev->reset_pending, retry_cnt);
10516 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10517 			up(&hdev->reset_sem);
10518 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
10519 			goto retry;
10520 		}
10521 	}
10522 
10523 	/* disable misc vector before FLR done */
10524 	hclge_enable_vector(&hdev->misc_vector, false);
10525 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10526 	hdev->rst_stats.flr_rst_cnt++;
10527 }
10528 
10529 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10530 {
10531 	struct hclge_dev *hdev = ae_dev->priv;
10532 	int ret;
10533 
10534 	hclge_enable_vector(&hdev->misc_vector, true);
10535 
10536 	ret = hclge_reset_rebuild(hdev);
10537 	if (ret)
10538 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10539 
10540 	hdev->reset_type = HNAE3_NONE_RESET;
10541 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10542 	up(&hdev->reset_sem);
10543 }
10544 
10545 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10546 {
10547 	u16 i;
10548 
10549 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10550 		struct hclge_vport *vport = &hdev->vport[i];
10551 		int ret;
10552 
10553 		 /* Send cmd to clear VF's FUNC_RST_ING */
10554 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10555 		if (ret)
10556 			dev_warn(&hdev->pdev->dev,
10557 				 "clear vf(%u) rst failed %d!\n",
10558 				 vport->vport_id, ret);
10559 	}
10560 }
10561 
10562 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10563 {
10564 	struct pci_dev *pdev = ae_dev->pdev;
10565 	struct hclge_dev *hdev;
10566 	int ret;
10567 
10568 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10569 	if (!hdev)
10570 		return -ENOMEM;
10571 
10572 	hdev->pdev = pdev;
10573 	hdev->ae_dev = ae_dev;
10574 	hdev->reset_type = HNAE3_NONE_RESET;
10575 	hdev->reset_level = HNAE3_FUNC_RESET;
10576 	ae_dev->priv = hdev;
10577 
10578 	/* HW supprt 2 layer vlan */
10579 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10580 
10581 	mutex_init(&hdev->vport_lock);
10582 	spin_lock_init(&hdev->fd_rule_lock);
10583 	sema_init(&hdev->reset_sem, 1);
10584 
10585 	ret = hclge_pci_init(hdev);
10586 	if (ret)
10587 		goto out;
10588 
10589 	/* Firmware command queue initialize */
10590 	ret = hclge_cmd_queue_init(hdev);
10591 	if (ret)
10592 		goto err_pci_uninit;
10593 
10594 	/* Firmware command initialize */
10595 	ret = hclge_cmd_init(hdev);
10596 	if (ret)
10597 		goto err_cmd_uninit;
10598 
10599 	ret = hclge_get_cap(hdev);
10600 	if (ret)
10601 		goto err_cmd_uninit;
10602 
10603 	ret = hclge_query_dev_specs(hdev);
10604 	if (ret) {
10605 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10606 			ret);
10607 		goto err_cmd_uninit;
10608 	}
10609 
10610 	ret = hclge_configure(hdev);
10611 	if (ret) {
10612 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10613 		goto err_cmd_uninit;
10614 	}
10615 
10616 	ret = hclge_init_msi(hdev);
10617 	if (ret) {
10618 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10619 		goto err_cmd_uninit;
10620 	}
10621 
10622 	ret = hclge_misc_irq_init(hdev);
10623 	if (ret)
10624 		goto err_msi_uninit;
10625 
10626 	ret = hclge_alloc_tqps(hdev);
10627 	if (ret) {
10628 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10629 		goto err_msi_irq_uninit;
10630 	}
10631 
10632 	ret = hclge_alloc_vport(hdev);
10633 	if (ret)
10634 		goto err_msi_irq_uninit;
10635 
10636 	ret = hclge_map_tqp(hdev);
10637 	if (ret)
10638 		goto err_msi_irq_uninit;
10639 
10640 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10641 		ret = hclge_mac_mdio_config(hdev);
10642 		if (ret)
10643 			goto err_msi_irq_uninit;
10644 	}
10645 
10646 	ret = hclge_init_umv_space(hdev);
10647 	if (ret)
10648 		goto err_mdiobus_unreg;
10649 
10650 	ret = hclge_mac_init(hdev);
10651 	if (ret) {
10652 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10653 		goto err_mdiobus_unreg;
10654 	}
10655 
10656 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10657 	if (ret) {
10658 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10659 		goto err_mdiobus_unreg;
10660 	}
10661 
10662 	ret = hclge_config_gro(hdev, true);
10663 	if (ret)
10664 		goto err_mdiobus_unreg;
10665 
10666 	ret = hclge_init_vlan_config(hdev);
10667 	if (ret) {
10668 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10669 		goto err_mdiobus_unreg;
10670 	}
10671 
10672 	ret = hclge_tm_schd_init(hdev);
10673 	if (ret) {
10674 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10675 		goto err_mdiobus_unreg;
10676 	}
10677 
10678 	ret = hclge_rss_init_cfg(hdev);
10679 	if (ret) {
10680 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10681 		goto err_mdiobus_unreg;
10682 	}
10683 
10684 	ret = hclge_rss_init_hw(hdev);
10685 	if (ret) {
10686 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10687 		goto err_mdiobus_unreg;
10688 	}
10689 
10690 	ret = init_mgr_tbl(hdev);
10691 	if (ret) {
10692 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10693 		goto err_mdiobus_unreg;
10694 	}
10695 
10696 	ret = hclge_init_fd_config(hdev);
10697 	if (ret) {
10698 		dev_err(&pdev->dev,
10699 			"fd table init fail, ret=%d\n", ret);
10700 		goto err_mdiobus_unreg;
10701 	}
10702 
10703 	INIT_KFIFO(hdev->mac_tnl_log);
10704 
10705 	hclge_dcb_ops_set(hdev);
10706 
10707 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10708 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10709 
10710 	/* Setup affinity after service timer setup because add_timer_on
10711 	 * is called in affinity notify.
10712 	 */
10713 	hclge_misc_affinity_setup(hdev);
10714 
10715 	hclge_clear_all_event_cause(hdev);
10716 	hclge_clear_resetting_state(hdev);
10717 
10718 	/* Log and clear the hw errors those already occurred */
10719 	hclge_handle_all_hns_hw_errors(ae_dev);
10720 
10721 	/* request delayed reset for the error recovery because an immediate
10722 	 * global reset on a PF affecting pending initialization of other PFs
10723 	 */
10724 	if (ae_dev->hw_err_reset_req) {
10725 		enum hnae3_reset_type reset_level;
10726 
10727 		reset_level = hclge_get_reset_level(ae_dev,
10728 						    &ae_dev->hw_err_reset_req);
10729 		hclge_set_def_reset_request(ae_dev, reset_level);
10730 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10731 	}
10732 
10733 	/* Enable MISC vector(vector0) */
10734 	hclge_enable_vector(&hdev->misc_vector, true);
10735 
10736 	hclge_state_init(hdev);
10737 	hdev->last_reset_time = jiffies;
10738 
10739 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10740 		 HCLGE_DRIVER_NAME);
10741 
10742 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10743 
10744 	return 0;
10745 
10746 err_mdiobus_unreg:
10747 	if (hdev->hw.mac.phydev)
10748 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10749 err_msi_irq_uninit:
10750 	hclge_misc_irq_uninit(hdev);
10751 err_msi_uninit:
10752 	pci_free_irq_vectors(pdev);
10753 err_cmd_uninit:
10754 	hclge_cmd_uninit(hdev);
10755 err_pci_uninit:
10756 	pcim_iounmap(pdev, hdev->hw.io_base);
10757 	pci_clear_master(pdev);
10758 	pci_release_regions(pdev);
10759 	pci_disable_device(pdev);
10760 out:
10761 	mutex_destroy(&hdev->vport_lock);
10762 	return ret;
10763 }
10764 
10765 static void hclge_stats_clear(struct hclge_dev *hdev)
10766 {
10767 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10768 }
10769 
10770 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10771 {
10772 	return hclge_config_switch_param(hdev, vf, enable,
10773 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10774 }
10775 
10776 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10777 {
10778 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10779 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10780 					  enable, vf);
10781 }
10782 
10783 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10784 {
10785 	int ret;
10786 
10787 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10788 	if (ret) {
10789 		dev_err(&hdev->pdev->dev,
10790 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10791 			vf, enable ? "on" : "off", ret);
10792 		return ret;
10793 	}
10794 
10795 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10796 	if (ret)
10797 		dev_err(&hdev->pdev->dev,
10798 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10799 			vf, enable ? "on" : "off", ret);
10800 
10801 	return ret;
10802 }
10803 
10804 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10805 				 bool enable)
10806 {
10807 	struct hclge_vport *vport = hclge_get_vport(handle);
10808 	struct hclge_dev *hdev = vport->back;
10809 	u32 new_spoofchk = enable ? 1 : 0;
10810 	int ret;
10811 
10812 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10813 		return -EOPNOTSUPP;
10814 
10815 	vport = hclge_get_vf_vport(hdev, vf);
10816 	if (!vport)
10817 		return -EINVAL;
10818 
10819 	if (vport->vf_info.spoofchk == new_spoofchk)
10820 		return 0;
10821 
10822 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10823 		dev_warn(&hdev->pdev->dev,
10824 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10825 			 vf);
10826 	else if (enable && hclge_is_umv_space_full(vport, true))
10827 		dev_warn(&hdev->pdev->dev,
10828 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10829 			 vf);
10830 
10831 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10832 	if (ret)
10833 		return ret;
10834 
10835 	vport->vf_info.spoofchk = new_spoofchk;
10836 	return 0;
10837 }
10838 
10839 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10840 {
10841 	struct hclge_vport *vport = hdev->vport;
10842 	int ret;
10843 	int i;
10844 
10845 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10846 		return 0;
10847 
10848 	/* resume the vf spoof check state after reset */
10849 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10850 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10851 					       vport->vf_info.spoofchk);
10852 		if (ret)
10853 			return ret;
10854 
10855 		vport++;
10856 	}
10857 
10858 	return 0;
10859 }
10860 
10861 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10862 {
10863 	struct hclge_vport *vport = hclge_get_vport(handle);
10864 	struct hclge_dev *hdev = vport->back;
10865 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10866 	u32 new_trusted = enable ? 1 : 0;
10867 	bool en_bc_pmc;
10868 	int ret;
10869 
10870 	vport = hclge_get_vf_vport(hdev, vf);
10871 	if (!vport)
10872 		return -EINVAL;
10873 
10874 	if (vport->vf_info.trusted == new_trusted)
10875 		return 0;
10876 
10877 	/* Disable promisc mode for VF if it is not trusted any more. */
10878 	if (!enable && vport->vf_info.promisc_enable) {
10879 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10880 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10881 						   en_bc_pmc);
10882 		if (ret)
10883 			return ret;
10884 		vport->vf_info.promisc_enable = 0;
10885 		hclge_inform_vf_promisc_info(vport);
10886 	}
10887 
10888 	vport->vf_info.trusted = new_trusted;
10889 
10890 	return 0;
10891 }
10892 
10893 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10894 {
10895 	int ret;
10896 	int vf;
10897 
10898 	/* reset vf rate to default value */
10899 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10900 		struct hclge_vport *vport = &hdev->vport[vf];
10901 
10902 		vport->vf_info.max_tx_rate = 0;
10903 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10904 		if (ret)
10905 			dev_err(&hdev->pdev->dev,
10906 				"vf%d failed to reset to default, ret=%d\n",
10907 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10908 	}
10909 }
10910 
10911 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
10912 				     int min_tx_rate, int max_tx_rate)
10913 {
10914 	if (min_tx_rate != 0 ||
10915 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10916 		dev_err(&hdev->pdev->dev,
10917 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10918 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10919 		return -EINVAL;
10920 	}
10921 
10922 	return 0;
10923 }
10924 
10925 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10926 			     int min_tx_rate, int max_tx_rate, bool force)
10927 {
10928 	struct hclge_vport *vport = hclge_get_vport(handle);
10929 	struct hclge_dev *hdev = vport->back;
10930 	int ret;
10931 
10932 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
10933 	if (ret)
10934 		return ret;
10935 
10936 	vport = hclge_get_vf_vport(hdev, vf);
10937 	if (!vport)
10938 		return -EINVAL;
10939 
10940 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10941 		return 0;
10942 
10943 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10944 	if (ret)
10945 		return ret;
10946 
10947 	vport->vf_info.max_tx_rate = max_tx_rate;
10948 
10949 	return 0;
10950 }
10951 
10952 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10953 {
10954 	struct hnae3_handle *handle = &hdev->vport->nic;
10955 	struct hclge_vport *vport;
10956 	int ret;
10957 	int vf;
10958 
10959 	/* resume the vf max_tx_rate after reset */
10960 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10961 		vport = hclge_get_vf_vport(hdev, vf);
10962 		if (!vport)
10963 			return -EINVAL;
10964 
10965 		/* zero means max rate, after reset, firmware already set it to
10966 		 * max rate, so just continue.
10967 		 */
10968 		if (!vport->vf_info.max_tx_rate)
10969 			continue;
10970 
10971 		ret = hclge_set_vf_rate(handle, vf, 0,
10972 					vport->vf_info.max_tx_rate, true);
10973 		if (ret) {
10974 			dev_err(&hdev->pdev->dev,
10975 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10976 				vf, vport->vf_info.max_tx_rate, ret);
10977 			return ret;
10978 		}
10979 	}
10980 
10981 	return 0;
10982 }
10983 
10984 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10985 {
10986 	struct hclge_vport *vport = hdev->vport;
10987 	int i;
10988 
10989 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10990 		hclge_vport_stop(vport);
10991 		vport++;
10992 	}
10993 }
10994 
10995 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10996 {
10997 	struct hclge_dev *hdev = ae_dev->priv;
10998 	struct pci_dev *pdev = ae_dev->pdev;
10999 	int ret;
11000 
11001 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11002 
11003 	hclge_stats_clear(hdev);
11004 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11005 	 * so here should not clean table in memory.
11006 	 */
11007 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11008 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11009 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11010 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11011 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11012 		hclge_reset_umv_space(hdev);
11013 	}
11014 
11015 	ret = hclge_cmd_init(hdev);
11016 	if (ret) {
11017 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11018 		return ret;
11019 	}
11020 
11021 	ret = hclge_map_tqp(hdev);
11022 	if (ret) {
11023 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11024 		return ret;
11025 	}
11026 
11027 	ret = hclge_mac_init(hdev);
11028 	if (ret) {
11029 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11030 		return ret;
11031 	}
11032 
11033 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11034 	if (ret) {
11035 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11036 		return ret;
11037 	}
11038 
11039 	ret = hclge_config_gro(hdev, true);
11040 	if (ret)
11041 		return ret;
11042 
11043 	ret = hclge_init_vlan_config(hdev);
11044 	if (ret) {
11045 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11046 		return ret;
11047 	}
11048 
11049 	ret = hclge_tm_init_hw(hdev, true);
11050 	if (ret) {
11051 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11052 		return ret;
11053 	}
11054 
11055 	ret = hclge_rss_init_hw(hdev);
11056 	if (ret) {
11057 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11058 		return ret;
11059 	}
11060 
11061 	ret = init_mgr_tbl(hdev);
11062 	if (ret) {
11063 		dev_err(&pdev->dev,
11064 			"failed to reinit manager table, ret = %d\n", ret);
11065 		return ret;
11066 	}
11067 
11068 	ret = hclge_init_fd_config(hdev);
11069 	if (ret) {
11070 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11071 		return ret;
11072 	}
11073 
11074 	/* Log and clear the hw errors those already occurred */
11075 	hclge_handle_all_hns_hw_errors(ae_dev);
11076 
11077 	/* Re-enable the hw error interrupts because
11078 	 * the interrupts get disabled on global reset.
11079 	 */
11080 	ret = hclge_config_nic_hw_error(hdev, true);
11081 	if (ret) {
11082 		dev_err(&pdev->dev,
11083 			"fail(%d) to re-enable NIC hw error interrupts\n",
11084 			ret);
11085 		return ret;
11086 	}
11087 
11088 	if (hdev->roce_client) {
11089 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11090 		if (ret) {
11091 			dev_err(&pdev->dev,
11092 				"fail(%d) to re-enable roce ras interrupts\n",
11093 				ret);
11094 			return ret;
11095 		}
11096 	}
11097 
11098 	hclge_reset_vport_state(hdev);
11099 	ret = hclge_reset_vport_spoofchk(hdev);
11100 	if (ret)
11101 		return ret;
11102 
11103 	ret = hclge_resume_vf_rate(hdev);
11104 	if (ret)
11105 		return ret;
11106 
11107 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11108 		 HCLGE_DRIVER_NAME);
11109 
11110 	return 0;
11111 }
11112 
11113 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11114 {
11115 	struct hclge_dev *hdev = ae_dev->priv;
11116 	struct hclge_mac *mac = &hdev->hw.mac;
11117 
11118 	hclge_reset_vf_rate(hdev);
11119 	hclge_clear_vf_vlan(hdev);
11120 	hclge_misc_affinity_teardown(hdev);
11121 	hclge_state_uninit(hdev);
11122 	hclge_uninit_mac_table(hdev);
11123 
11124 	if (mac->phydev)
11125 		mdiobus_unregister(mac->mdio_bus);
11126 
11127 	/* Disable MISC vector(vector0) */
11128 	hclge_enable_vector(&hdev->misc_vector, false);
11129 	synchronize_irq(hdev->misc_vector.vector_irq);
11130 
11131 	/* Disable all hw interrupts */
11132 	hclge_config_mac_tnl_int(hdev, false);
11133 	hclge_config_nic_hw_error(hdev, false);
11134 	hclge_config_rocee_ras_interrupt(hdev, false);
11135 
11136 	hclge_cmd_uninit(hdev);
11137 	hclge_misc_irq_uninit(hdev);
11138 	hclge_pci_uninit(hdev);
11139 	mutex_destroy(&hdev->vport_lock);
11140 	hclge_uninit_vport_vlan_table(hdev);
11141 	ae_dev->priv = NULL;
11142 }
11143 
11144 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11145 {
11146 	struct hclge_vport *vport = hclge_get_vport(handle);
11147 	struct hclge_dev *hdev = vport->back;
11148 
11149 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11150 }
11151 
11152 static void hclge_get_channels(struct hnae3_handle *handle,
11153 			       struct ethtool_channels *ch)
11154 {
11155 	ch->max_combined = hclge_get_max_channels(handle);
11156 	ch->other_count = 1;
11157 	ch->max_other = 1;
11158 	ch->combined_count = handle->kinfo.rss_size;
11159 }
11160 
11161 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11162 					u16 *alloc_tqps, u16 *max_rss_size)
11163 {
11164 	struct hclge_vport *vport = hclge_get_vport(handle);
11165 	struct hclge_dev *hdev = vport->back;
11166 
11167 	*alloc_tqps = vport->alloc_tqps;
11168 	*max_rss_size = hdev->pf_rss_size_max;
11169 }
11170 
11171 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11172 			      bool rxfh_configured)
11173 {
11174 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11175 	struct hclge_vport *vport = hclge_get_vport(handle);
11176 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11177 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11178 	struct hclge_dev *hdev = vport->back;
11179 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11180 	u16 cur_rss_size = kinfo->rss_size;
11181 	u16 cur_tqps = kinfo->num_tqps;
11182 	u16 tc_valid[HCLGE_MAX_TC_NUM];
11183 	u16 roundup_size;
11184 	u32 *rss_indir;
11185 	unsigned int i;
11186 	int ret;
11187 
11188 	kinfo->req_rss_size = new_tqps_num;
11189 
11190 	ret = hclge_tm_vport_map_update(hdev);
11191 	if (ret) {
11192 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11193 		return ret;
11194 	}
11195 
11196 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
11197 	roundup_size = ilog2(roundup_size);
11198 	/* Set the RSS TC mode according to the new RSS size */
11199 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11200 		tc_valid[i] = 0;
11201 
11202 		if (!(hdev->hw_tc_map & BIT(i)))
11203 			continue;
11204 
11205 		tc_valid[i] = 1;
11206 		tc_size[i] = roundup_size;
11207 		tc_offset[i] = kinfo->rss_size * i;
11208 	}
11209 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11210 	if (ret)
11211 		return ret;
11212 
11213 	/* RSS indirection table has been configured by user */
11214 	if (rxfh_configured)
11215 		goto out;
11216 
11217 	/* Reinitializes the rss indirect table according to the new RSS size */
11218 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11219 			    GFP_KERNEL);
11220 	if (!rss_indir)
11221 		return -ENOMEM;
11222 
11223 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11224 		rss_indir[i] = i % kinfo->rss_size;
11225 
11226 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11227 	if (ret)
11228 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11229 			ret);
11230 
11231 	kfree(rss_indir);
11232 
11233 out:
11234 	if (!ret)
11235 		dev_info(&hdev->pdev->dev,
11236 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11237 			 cur_rss_size, kinfo->rss_size,
11238 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11239 
11240 	return ret;
11241 }
11242 
11243 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11244 			      u32 *regs_num_64_bit)
11245 {
11246 	struct hclge_desc desc;
11247 	u32 total_num;
11248 	int ret;
11249 
11250 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11251 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11252 	if (ret) {
11253 		dev_err(&hdev->pdev->dev,
11254 			"Query register number cmd failed, ret = %d.\n", ret);
11255 		return ret;
11256 	}
11257 
11258 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
11259 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
11260 
11261 	total_num = *regs_num_32_bit + *regs_num_64_bit;
11262 	if (!total_num)
11263 		return -EINVAL;
11264 
11265 	return 0;
11266 }
11267 
11268 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11269 				 void *data)
11270 {
11271 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11272 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11273 
11274 	struct hclge_desc *desc;
11275 	u32 *reg_val = data;
11276 	__le32 *desc_data;
11277 	int nodata_num;
11278 	int cmd_num;
11279 	int i, k, n;
11280 	int ret;
11281 
11282 	if (regs_num == 0)
11283 		return 0;
11284 
11285 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11286 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11287 			       HCLGE_32_BIT_REG_RTN_DATANUM);
11288 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11289 	if (!desc)
11290 		return -ENOMEM;
11291 
11292 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11293 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11294 	if (ret) {
11295 		dev_err(&hdev->pdev->dev,
11296 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
11297 		kfree(desc);
11298 		return ret;
11299 	}
11300 
11301 	for (i = 0; i < cmd_num; i++) {
11302 		if (i == 0) {
11303 			desc_data = (__le32 *)(&desc[i].data[0]);
11304 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11305 		} else {
11306 			desc_data = (__le32 *)(&desc[i]);
11307 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
11308 		}
11309 		for (k = 0; k < n; k++) {
11310 			*reg_val++ = le32_to_cpu(*desc_data++);
11311 
11312 			regs_num--;
11313 			if (!regs_num)
11314 				break;
11315 		}
11316 	}
11317 
11318 	kfree(desc);
11319 	return 0;
11320 }
11321 
11322 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11323 				 void *data)
11324 {
11325 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11326 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11327 
11328 	struct hclge_desc *desc;
11329 	u64 *reg_val = data;
11330 	__le64 *desc_data;
11331 	int nodata_len;
11332 	int cmd_num;
11333 	int i, k, n;
11334 	int ret;
11335 
11336 	if (regs_num == 0)
11337 		return 0;
11338 
11339 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11340 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11341 			       HCLGE_64_BIT_REG_RTN_DATANUM);
11342 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11343 	if (!desc)
11344 		return -ENOMEM;
11345 
11346 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11347 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11348 	if (ret) {
11349 		dev_err(&hdev->pdev->dev,
11350 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
11351 		kfree(desc);
11352 		return ret;
11353 	}
11354 
11355 	for (i = 0; i < cmd_num; i++) {
11356 		if (i == 0) {
11357 			desc_data = (__le64 *)(&desc[i].data[0]);
11358 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11359 		} else {
11360 			desc_data = (__le64 *)(&desc[i]);
11361 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
11362 		}
11363 		for (k = 0; k < n; k++) {
11364 			*reg_val++ = le64_to_cpu(*desc_data++);
11365 
11366 			regs_num--;
11367 			if (!regs_num)
11368 				break;
11369 		}
11370 	}
11371 
11372 	kfree(desc);
11373 	return 0;
11374 }
11375 
11376 #define MAX_SEPARATE_NUM	4
11377 #define SEPARATOR_VALUE		0xFDFCFBFA
11378 #define REG_NUM_PER_LINE	4
11379 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
11380 #define REG_SEPARATOR_LINE	1
11381 #define REG_NUM_REMAIN_MASK	3
11382 #define BD_LIST_MAX_NUM		30
11383 
11384 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11385 {
11386 	int i;
11387 
11388 	/* initialize command BD except the last one */
11389 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11390 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11391 					   true);
11392 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11393 	}
11394 
11395 	/* initialize the last command BD */
11396 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11397 
11398 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11399 }
11400 
11401 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11402 				    int *bd_num_list,
11403 				    u32 type_num)
11404 {
11405 	u32 entries_per_desc, desc_index, index, offset, i;
11406 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11407 	int ret;
11408 
11409 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
11410 	if (ret) {
11411 		dev_err(&hdev->pdev->dev,
11412 			"Get dfx bd num fail, status is %d.\n", ret);
11413 		return ret;
11414 	}
11415 
11416 	entries_per_desc = ARRAY_SIZE(desc[0].data);
11417 	for (i = 0; i < type_num; i++) {
11418 		offset = hclge_dfx_bd_offset_list[i];
11419 		index = offset % entries_per_desc;
11420 		desc_index = offset / entries_per_desc;
11421 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11422 	}
11423 
11424 	return ret;
11425 }
11426 
11427 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11428 				  struct hclge_desc *desc_src, int bd_num,
11429 				  enum hclge_opcode_type cmd)
11430 {
11431 	struct hclge_desc *desc = desc_src;
11432 	int i, ret;
11433 
11434 	hclge_cmd_setup_basic_desc(desc, cmd, true);
11435 	for (i = 0; i < bd_num - 1; i++) {
11436 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11437 		desc++;
11438 		hclge_cmd_setup_basic_desc(desc, cmd, true);
11439 	}
11440 
11441 	desc = desc_src;
11442 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11443 	if (ret)
11444 		dev_err(&hdev->pdev->dev,
11445 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11446 			cmd, ret);
11447 
11448 	return ret;
11449 }
11450 
11451 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11452 				    void *data)
11453 {
11454 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11455 	struct hclge_desc *desc = desc_src;
11456 	u32 *reg = data;
11457 
11458 	entries_per_desc = ARRAY_SIZE(desc->data);
11459 	reg_num = entries_per_desc * bd_num;
11460 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11461 	for (i = 0; i < reg_num; i++) {
11462 		index = i % entries_per_desc;
11463 		desc_index = i / entries_per_desc;
11464 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
11465 	}
11466 	for (i = 0; i < separator_num; i++)
11467 		*reg++ = SEPARATOR_VALUE;
11468 
11469 	return reg_num + separator_num;
11470 }
11471 
11472 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11473 {
11474 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11475 	int data_len_per_desc, bd_num, i;
11476 	int bd_num_list[BD_LIST_MAX_NUM];
11477 	u32 data_len;
11478 	int ret;
11479 
11480 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11481 	if (ret) {
11482 		dev_err(&hdev->pdev->dev,
11483 			"Get dfx reg bd num fail, status is %d.\n", ret);
11484 		return ret;
11485 	}
11486 
11487 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
11488 	*len = 0;
11489 	for (i = 0; i < dfx_reg_type_num; i++) {
11490 		bd_num = bd_num_list[i];
11491 		data_len = data_len_per_desc * bd_num;
11492 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11493 	}
11494 
11495 	return ret;
11496 }
11497 
11498 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11499 {
11500 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11501 	int bd_num, bd_num_max, buf_len, i;
11502 	int bd_num_list[BD_LIST_MAX_NUM];
11503 	struct hclge_desc *desc_src;
11504 	u32 *reg = data;
11505 	int ret;
11506 
11507 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11508 	if (ret) {
11509 		dev_err(&hdev->pdev->dev,
11510 			"Get dfx reg bd num fail, status is %d.\n", ret);
11511 		return ret;
11512 	}
11513 
11514 	bd_num_max = bd_num_list[0];
11515 	for (i = 1; i < dfx_reg_type_num; i++)
11516 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11517 
11518 	buf_len = sizeof(*desc_src) * bd_num_max;
11519 	desc_src = kzalloc(buf_len, GFP_KERNEL);
11520 	if (!desc_src)
11521 		return -ENOMEM;
11522 
11523 	for (i = 0; i < dfx_reg_type_num; i++) {
11524 		bd_num = bd_num_list[i];
11525 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11526 					     hclge_dfx_reg_opcode_list[i]);
11527 		if (ret) {
11528 			dev_err(&hdev->pdev->dev,
11529 				"Get dfx reg fail, status is %d.\n", ret);
11530 			break;
11531 		}
11532 
11533 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11534 	}
11535 
11536 	kfree(desc_src);
11537 	return ret;
11538 }
11539 
11540 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11541 			      struct hnae3_knic_private_info *kinfo)
11542 {
11543 #define HCLGE_RING_REG_OFFSET		0x200
11544 #define HCLGE_RING_INT_REG_OFFSET	0x4
11545 
11546 	int i, j, reg_num, separator_num;
11547 	int data_num_sum;
11548 	u32 *reg = data;
11549 
11550 	/* fetching per-PF registers valus from PF PCIe register space */
11551 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11552 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11553 	for (i = 0; i < reg_num; i++)
11554 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11555 	for (i = 0; i < separator_num; i++)
11556 		*reg++ = SEPARATOR_VALUE;
11557 	data_num_sum = reg_num + separator_num;
11558 
11559 	reg_num = ARRAY_SIZE(common_reg_addr_list);
11560 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11561 	for (i = 0; i < reg_num; i++)
11562 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11563 	for (i = 0; i < separator_num; i++)
11564 		*reg++ = SEPARATOR_VALUE;
11565 	data_num_sum += reg_num + separator_num;
11566 
11567 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
11568 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11569 	for (j = 0; j < kinfo->num_tqps; j++) {
11570 		for (i = 0; i < reg_num; i++)
11571 			*reg++ = hclge_read_dev(&hdev->hw,
11572 						ring_reg_addr_list[i] +
11573 						HCLGE_RING_REG_OFFSET * j);
11574 		for (i = 0; i < separator_num; i++)
11575 			*reg++ = SEPARATOR_VALUE;
11576 	}
11577 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11578 
11579 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11580 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11581 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
11582 		for (i = 0; i < reg_num; i++)
11583 			*reg++ = hclge_read_dev(&hdev->hw,
11584 						tqp_intr_reg_addr_list[i] +
11585 						HCLGE_RING_INT_REG_OFFSET * j);
11586 		for (i = 0; i < separator_num; i++)
11587 			*reg++ = SEPARATOR_VALUE;
11588 	}
11589 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11590 
11591 	return data_num_sum;
11592 }
11593 
11594 static int hclge_get_regs_len(struct hnae3_handle *handle)
11595 {
11596 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11597 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11598 	struct hclge_vport *vport = hclge_get_vport(handle);
11599 	struct hclge_dev *hdev = vport->back;
11600 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11601 	int regs_lines_32_bit, regs_lines_64_bit;
11602 	int ret;
11603 
11604 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11605 	if (ret) {
11606 		dev_err(&hdev->pdev->dev,
11607 			"Get register number failed, ret = %d.\n", ret);
11608 		return ret;
11609 	}
11610 
11611 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11612 	if (ret) {
11613 		dev_err(&hdev->pdev->dev,
11614 			"Get dfx reg len failed, ret = %d.\n", ret);
11615 		return ret;
11616 	}
11617 
11618 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11619 		REG_SEPARATOR_LINE;
11620 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11621 		REG_SEPARATOR_LINE;
11622 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11623 		REG_SEPARATOR_LINE;
11624 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11625 		REG_SEPARATOR_LINE;
11626 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11627 		REG_SEPARATOR_LINE;
11628 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11629 		REG_SEPARATOR_LINE;
11630 
11631 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11632 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11633 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11634 }
11635 
11636 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11637 			   void *data)
11638 {
11639 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11640 	struct hclge_vport *vport = hclge_get_vport(handle);
11641 	struct hclge_dev *hdev = vport->back;
11642 	u32 regs_num_32_bit, regs_num_64_bit;
11643 	int i, reg_num, separator_num, ret;
11644 	u32 *reg = data;
11645 
11646 	*version = hdev->fw_version;
11647 
11648 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11649 	if (ret) {
11650 		dev_err(&hdev->pdev->dev,
11651 			"Get register number failed, ret = %d.\n", ret);
11652 		return;
11653 	}
11654 
11655 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11656 
11657 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11658 	if (ret) {
11659 		dev_err(&hdev->pdev->dev,
11660 			"Get 32 bit register failed, ret = %d.\n", ret);
11661 		return;
11662 	}
11663 	reg_num = regs_num_32_bit;
11664 	reg += reg_num;
11665 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11666 	for (i = 0; i < separator_num; i++)
11667 		*reg++ = SEPARATOR_VALUE;
11668 
11669 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11670 	if (ret) {
11671 		dev_err(&hdev->pdev->dev,
11672 			"Get 64 bit register failed, ret = %d.\n", ret);
11673 		return;
11674 	}
11675 	reg_num = regs_num_64_bit * 2;
11676 	reg += reg_num;
11677 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11678 	for (i = 0; i < separator_num; i++)
11679 		*reg++ = SEPARATOR_VALUE;
11680 
11681 	ret = hclge_get_dfx_reg(hdev, reg);
11682 	if (ret)
11683 		dev_err(&hdev->pdev->dev,
11684 			"Get dfx register failed, ret = %d.\n", ret);
11685 }
11686 
11687 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11688 {
11689 	struct hclge_set_led_state_cmd *req;
11690 	struct hclge_desc desc;
11691 	int ret;
11692 
11693 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11694 
11695 	req = (struct hclge_set_led_state_cmd *)desc.data;
11696 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11697 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11698 
11699 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11700 	if (ret)
11701 		dev_err(&hdev->pdev->dev,
11702 			"Send set led state cmd error, ret =%d\n", ret);
11703 
11704 	return ret;
11705 }
11706 
11707 enum hclge_led_status {
11708 	HCLGE_LED_OFF,
11709 	HCLGE_LED_ON,
11710 	HCLGE_LED_NO_CHANGE = 0xFF,
11711 };
11712 
11713 static int hclge_set_led_id(struct hnae3_handle *handle,
11714 			    enum ethtool_phys_id_state status)
11715 {
11716 	struct hclge_vport *vport = hclge_get_vport(handle);
11717 	struct hclge_dev *hdev = vport->back;
11718 
11719 	switch (status) {
11720 	case ETHTOOL_ID_ACTIVE:
11721 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11722 	case ETHTOOL_ID_INACTIVE:
11723 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11724 	default:
11725 		return -EINVAL;
11726 	}
11727 }
11728 
11729 static void hclge_get_link_mode(struct hnae3_handle *handle,
11730 				unsigned long *supported,
11731 				unsigned long *advertising)
11732 {
11733 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11734 	struct hclge_vport *vport = hclge_get_vport(handle);
11735 	struct hclge_dev *hdev = vport->back;
11736 	unsigned int idx = 0;
11737 
11738 	for (; idx < size; idx++) {
11739 		supported[idx] = hdev->hw.mac.supported[idx];
11740 		advertising[idx] = hdev->hw.mac.advertising[idx];
11741 	}
11742 }
11743 
11744 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11745 {
11746 	struct hclge_vport *vport = hclge_get_vport(handle);
11747 	struct hclge_dev *hdev = vport->back;
11748 
11749 	return hclge_config_gro(hdev, enable);
11750 }
11751 
11752 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11753 {
11754 	struct hclge_vport *vport = &hdev->vport[0];
11755 	struct hnae3_handle *handle = &vport->nic;
11756 	u8 tmp_flags;
11757 	int ret;
11758 
11759 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11760 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11761 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11762 	}
11763 
11764 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11765 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11766 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11767 					     tmp_flags & HNAE3_MPE);
11768 		if (!ret) {
11769 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11770 			hclge_enable_vlan_filter(handle,
11771 						 tmp_flags & HNAE3_VLAN_FLTR);
11772 		}
11773 	}
11774 }
11775 
11776 static bool hclge_module_existed(struct hclge_dev *hdev)
11777 {
11778 	struct hclge_desc desc;
11779 	u32 existed;
11780 	int ret;
11781 
11782 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11783 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11784 	if (ret) {
11785 		dev_err(&hdev->pdev->dev,
11786 			"failed to get SFP exist state, ret = %d\n", ret);
11787 		return false;
11788 	}
11789 
11790 	existed = le32_to_cpu(desc.data[0]);
11791 
11792 	return existed != 0;
11793 }
11794 
11795 /* need 6 bds(total 140 bytes) in one reading
11796  * return the number of bytes actually read, 0 means read failed.
11797  */
11798 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11799 				     u32 len, u8 *data)
11800 {
11801 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11802 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11803 	u16 read_len;
11804 	u16 copy_len;
11805 	int ret;
11806 	int i;
11807 
11808 	/* setup all 6 bds to read module eeprom info. */
11809 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11810 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11811 					   true);
11812 
11813 		/* bd0~bd4 need next flag */
11814 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11815 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11816 	}
11817 
11818 	/* setup bd0, this bd contains offset and read length. */
11819 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11820 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11821 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11822 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11823 
11824 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11825 	if (ret) {
11826 		dev_err(&hdev->pdev->dev,
11827 			"failed to get SFP eeprom info, ret = %d\n", ret);
11828 		return 0;
11829 	}
11830 
11831 	/* copy sfp info from bd0 to out buffer. */
11832 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11833 	memcpy(data, sfp_info_bd0->data, copy_len);
11834 	read_len = copy_len;
11835 
11836 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11837 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11838 		if (read_len >= len)
11839 			return read_len;
11840 
11841 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11842 		memcpy(data + read_len, desc[i].data, copy_len);
11843 		read_len += copy_len;
11844 	}
11845 
11846 	return read_len;
11847 }
11848 
11849 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11850 				   u32 len, u8 *data)
11851 {
11852 	struct hclge_vport *vport = hclge_get_vport(handle);
11853 	struct hclge_dev *hdev = vport->back;
11854 	u32 read_len = 0;
11855 	u16 data_len;
11856 
11857 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11858 		return -EOPNOTSUPP;
11859 
11860 	if (!hclge_module_existed(hdev))
11861 		return -ENXIO;
11862 
11863 	while (read_len < len) {
11864 		data_len = hclge_get_sfp_eeprom_info(hdev,
11865 						     offset + read_len,
11866 						     len - read_len,
11867 						     data + read_len);
11868 		if (!data_len)
11869 			return -EIO;
11870 
11871 		read_len += data_len;
11872 	}
11873 
11874 	return 0;
11875 }
11876 
11877 static const struct hnae3_ae_ops hclge_ops = {
11878 	.init_ae_dev = hclge_init_ae_dev,
11879 	.uninit_ae_dev = hclge_uninit_ae_dev,
11880 	.flr_prepare = hclge_flr_prepare,
11881 	.flr_done = hclge_flr_done,
11882 	.init_client_instance = hclge_init_client_instance,
11883 	.uninit_client_instance = hclge_uninit_client_instance,
11884 	.map_ring_to_vector = hclge_map_ring_to_vector,
11885 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11886 	.get_vector = hclge_get_vector,
11887 	.put_vector = hclge_put_vector,
11888 	.set_promisc_mode = hclge_set_promisc_mode,
11889 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11890 	.set_loopback = hclge_set_loopback,
11891 	.start = hclge_ae_start,
11892 	.stop = hclge_ae_stop,
11893 	.client_start = hclge_client_start,
11894 	.client_stop = hclge_client_stop,
11895 	.get_status = hclge_get_status,
11896 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11897 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11898 	.get_media_type = hclge_get_media_type,
11899 	.check_port_speed = hclge_check_port_speed,
11900 	.get_fec = hclge_get_fec,
11901 	.set_fec = hclge_set_fec,
11902 	.get_rss_key_size = hclge_get_rss_key_size,
11903 	.get_rss = hclge_get_rss,
11904 	.set_rss = hclge_set_rss,
11905 	.set_rss_tuple = hclge_set_rss_tuple,
11906 	.get_rss_tuple = hclge_get_rss_tuple,
11907 	.get_tc_size = hclge_get_tc_size,
11908 	.get_mac_addr = hclge_get_mac_addr,
11909 	.set_mac_addr = hclge_set_mac_addr,
11910 	.do_ioctl = hclge_do_ioctl,
11911 	.add_uc_addr = hclge_add_uc_addr,
11912 	.rm_uc_addr = hclge_rm_uc_addr,
11913 	.add_mc_addr = hclge_add_mc_addr,
11914 	.rm_mc_addr = hclge_rm_mc_addr,
11915 	.set_autoneg = hclge_set_autoneg,
11916 	.get_autoneg = hclge_get_autoneg,
11917 	.restart_autoneg = hclge_restart_autoneg,
11918 	.halt_autoneg = hclge_halt_autoneg,
11919 	.get_pauseparam = hclge_get_pauseparam,
11920 	.set_pauseparam = hclge_set_pauseparam,
11921 	.set_mtu = hclge_set_mtu,
11922 	.reset_queue = hclge_reset_tqp,
11923 	.get_stats = hclge_get_stats,
11924 	.get_mac_stats = hclge_get_mac_stat,
11925 	.update_stats = hclge_update_stats,
11926 	.get_strings = hclge_get_strings,
11927 	.get_sset_count = hclge_get_sset_count,
11928 	.get_fw_version = hclge_get_fw_version,
11929 	.get_mdix_mode = hclge_get_mdix_mode,
11930 	.enable_vlan_filter = hclge_enable_vlan_filter,
11931 	.set_vlan_filter = hclge_set_vlan_filter,
11932 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11933 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11934 	.reset_event = hclge_reset_event,
11935 	.get_reset_level = hclge_get_reset_level,
11936 	.set_default_reset_request = hclge_set_def_reset_request,
11937 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11938 	.set_channels = hclge_set_channels,
11939 	.get_channels = hclge_get_channels,
11940 	.get_regs_len = hclge_get_regs_len,
11941 	.get_regs = hclge_get_regs,
11942 	.set_led_id = hclge_set_led_id,
11943 	.get_link_mode = hclge_get_link_mode,
11944 	.add_fd_entry = hclge_add_fd_entry,
11945 	.del_fd_entry = hclge_del_fd_entry,
11946 	.del_all_fd_entries = hclge_del_all_fd_entries,
11947 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11948 	.get_fd_rule_info = hclge_get_fd_rule_info,
11949 	.get_fd_all_rules = hclge_get_all_rules,
11950 	.enable_fd = hclge_enable_fd,
11951 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11952 	.dbg_run_cmd = hclge_dbg_run_cmd,
11953 	.dbg_read_cmd = hclge_dbg_read_cmd,
11954 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11955 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11956 	.ae_dev_resetting = hclge_ae_dev_resetting,
11957 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11958 	.set_gro_en = hclge_gro_en,
11959 	.get_global_queue_id = hclge_covert_handle_qid_global,
11960 	.set_timer_task = hclge_set_timer_task,
11961 	.mac_connect_phy = hclge_mac_connect_phy,
11962 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11963 	.get_vf_config = hclge_get_vf_config,
11964 	.set_vf_link_state = hclge_set_vf_link_state,
11965 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11966 	.set_vf_trust = hclge_set_vf_trust,
11967 	.set_vf_rate = hclge_set_vf_rate,
11968 	.set_vf_mac = hclge_set_vf_mac,
11969 	.get_module_eeprom = hclge_get_module_eeprom,
11970 	.get_cmdq_stat = hclge_get_cmdq_stat,
11971 	.add_cls_flower = hclge_add_cls_flower,
11972 	.del_cls_flower = hclge_del_cls_flower,
11973 	.cls_flower_active = hclge_is_cls_flower_active,
11974 };
11975 
11976 static struct hnae3_ae_algo ae_algo = {
11977 	.ops = &hclge_ops,
11978 	.pdev_id_table = ae_algo_pci_tbl,
11979 };
11980 
11981 static int hclge_init(void)
11982 {
11983 	pr_info("%s is initializing\n", HCLGE_NAME);
11984 
11985 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11986 	if (!hclge_wq) {
11987 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11988 		return -ENOMEM;
11989 	}
11990 
11991 	hnae3_register_ae_algo(&ae_algo);
11992 
11993 	return 0;
11994 }
11995 
11996 static void hclge_exit(void)
11997 {
11998 	hnae3_unregister_ae_algo(&ae_algo);
11999 	destroy_workqueue(hclge_wq);
12000 }
12001 module_init(hclge_init);
12002 module_exit(hclge_exit);
12003 
12004 MODULE_LICENSE("GPL");
12005 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12006 MODULE_DESCRIPTION("HCLGE Driver");
12007 MODULE_VERSION(HCLGE_MOD_VERSION);
12008