1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27 
28 #define HCLGE_NAME			"hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 
32 #define HCLGE_BUF_SIZE_UNIT	256U
33 #define HCLGE_BUF_MUL_BY	2
34 #define HCLGE_BUF_DIV_BY	2
35 #define NEED_RESERVE_TC_NUM	2
36 #define BUF_MAX_PERCENT		100
37 #define BUF_RESERVE_PERCENT	90
38 
39 #define HCLGE_RESET_MAX_FAIL_CNT	5
40 #define HCLGE_RESET_SYNC_TIME		100
41 #define HCLGE_PF_RESET_SYNC_TIME	20
42 #define HCLGE_PF_RESET_SYNC_CNT		1500
43 
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57 
58 #define HCLGE_LINK_STATUS_MS	10
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75 
76 static struct hnae3_ae_algo ae_algo;
77 
78 static struct workqueue_struct *hclge_wq;
79 
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 	/* required last entry */
90 	{0, }
91 };
92 
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 					 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 					 HCLGE_NIC_CSQ_DEPTH_REG,
98 					 HCLGE_NIC_CSQ_TAIL_REG,
99 					 HCLGE_NIC_CSQ_HEAD_REG,
100 					 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 					 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 					 HCLGE_NIC_CRQ_DEPTH_REG,
103 					 HCLGE_NIC_CRQ_TAIL_REG,
104 					 HCLGE_NIC_CRQ_HEAD_REG,
105 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 					 HCLGE_CMDQ_INTR_STS_REG,
107 					 HCLGE_CMDQ_INTR_EN_REG,
108 					 HCLGE_CMDQ_INTR_GEN_REG};
109 
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 					   HCLGE_PF_OTHER_INT_REG,
112 					   HCLGE_MISC_RESET_STS_REG,
113 					   HCLGE_MISC_VECTOR_INT_STS,
114 					   HCLGE_GLOBAL_RESET_REG,
115 					   HCLGE_FUN_RST_ING,
116 					   HCLGE_GRO_EN_REG};
117 
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 					 HCLGE_RING_RX_ADDR_H_REG,
120 					 HCLGE_RING_RX_BD_NUM_REG,
121 					 HCLGE_RING_RX_BD_LENGTH_REG,
122 					 HCLGE_RING_RX_MERGE_EN_REG,
123 					 HCLGE_RING_RX_TAIL_REG,
124 					 HCLGE_RING_RX_HEAD_REG,
125 					 HCLGE_RING_RX_FBD_NUM_REG,
126 					 HCLGE_RING_RX_OFFSET_REG,
127 					 HCLGE_RING_RX_FBD_OFFSET_REG,
128 					 HCLGE_RING_RX_STASH_REG,
129 					 HCLGE_RING_RX_BD_ERR_REG,
130 					 HCLGE_RING_TX_ADDR_L_REG,
131 					 HCLGE_RING_TX_ADDR_H_REG,
132 					 HCLGE_RING_TX_BD_NUM_REG,
133 					 HCLGE_RING_TX_PRIORITY_REG,
134 					 HCLGE_RING_TX_TC_REG,
135 					 HCLGE_RING_TX_MERGE_EN_REG,
136 					 HCLGE_RING_TX_TAIL_REG,
137 					 HCLGE_RING_TX_HEAD_REG,
138 					 HCLGE_RING_TX_FBD_NUM_REG,
139 					 HCLGE_RING_TX_OFFSET_REG,
140 					 HCLGE_RING_TX_EBD_NUM_REG,
141 					 HCLGE_RING_TX_EBD_OFFSET_REG,
142 					 HCLGE_RING_TX_BD_ERR_REG,
143 					 HCLGE_RING_EN_REG};
144 
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 					     HCLGE_TQP_INTR_GL0_REG,
147 					     HCLGE_TQP_INTR_GL1_REG,
148 					     HCLGE_TQP_INTR_GL2_REG,
149 					     HCLGE_TQP_INTR_RL_REG};
150 
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 	"App    Loopback test",
153 	"Serdes serial Loopback test",
154 	"Serdes parallel Loopback test",
155 	"Phy    Loopback test"
156 };
157 
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 	{"mac_tx_mac_pause_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 	{"mac_rx_mac_pause_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 	{"mac_tx_control_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165 	{"mac_rx_control_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167 	{"mac_tx_pfc_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169 	{"mac_tx_pfc_pri0_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171 	{"mac_tx_pfc_pri1_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173 	{"mac_tx_pfc_pri2_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175 	{"mac_tx_pfc_pri3_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177 	{"mac_tx_pfc_pri4_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179 	{"mac_tx_pfc_pri5_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181 	{"mac_tx_pfc_pri6_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183 	{"mac_tx_pfc_pri7_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185 	{"mac_rx_pfc_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187 	{"mac_rx_pfc_pri0_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189 	{"mac_rx_pfc_pri1_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191 	{"mac_rx_pfc_pri2_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193 	{"mac_rx_pfc_pri3_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195 	{"mac_rx_pfc_pri4_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197 	{"mac_rx_pfc_pri5_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199 	{"mac_rx_pfc_pri6_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201 	{"mac_rx_pfc_pri7_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203 	{"mac_tx_total_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205 	{"mac_tx_total_oct_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207 	{"mac_tx_good_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209 	{"mac_tx_bad_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211 	{"mac_tx_good_oct_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213 	{"mac_tx_bad_oct_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215 	{"mac_tx_uni_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217 	{"mac_tx_multi_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219 	{"mac_tx_broad_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221 	{"mac_tx_undersize_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223 	{"mac_tx_oversize_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225 	{"mac_tx_64_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227 	{"mac_tx_65_127_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229 	{"mac_tx_128_255_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231 	{"mac_tx_256_511_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233 	{"mac_tx_512_1023_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235 	{"mac_tx_1024_1518_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237 	{"mac_tx_1519_2047_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239 	{"mac_tx_2048_4095_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241 	{"mac_tx_4096_8191_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243 	{"mac_tx_8192_9216_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245 	{"mac_tx_9217_12287_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247 	{"mac_tx_12288_16383_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249 	{"mac_tx_1519_max_good_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251 	{"mac_tx_1519_max_bad_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253 	{"mac_rx_total_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255 	{"mac_rx_total_oct_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257 	{"mac_rx_good_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259 	{"mac_rx_bad_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261 	{"mac_rx_good_oct_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263 	{"mac_rx_bad_oct_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265 	{"mac_rx_uni_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267 	{"mac_rx_multi_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269 	{"mac_rx_broad_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271 	{"mac_rx_undersize_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273 	{"mac_rx_oversize_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275 	{"mac_rx_64_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277 	{"mac_rx_65_127_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279 	{"mac_rx_128_255_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281 	{"mac_rx_256_511_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283 	{"mac_rx_512_1023_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285 	{"mac_rx_1024_1518_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287 	{"mac_rx_1519_2047_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289 	{"mac_rx_2048_4095_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291 	{"mac_rx_4096_8191_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293 	{"mac_rx_8192_9216_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295 	{"mac_rx_9217_12287_oct_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297 	{"mac_rx_12288_16383_oct_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299 	{"mac_rx_1519_max_good_pkt_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301 	{"mac_rx_1519_max_bad_pkt_num",
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303 
304 	{"mac_tx_fragment_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306 	{"mac_tx_undermin_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308 	{"mac_tx_jabber_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310 	{"mac_tx_err_all_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312 	{"mac_tx_from_app_good_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314 	{"mac_tx_from_app_bad_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316 	{"mac_rx_fragment_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318 	{"mac_rx_undermin_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320 	{"mac_rx_jabber_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322 	{"mac_rx_fcs_err_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324 	{"mac_rx_send_app_good_pkt_num",
325 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326 	{"mac_rx_send_app_bad_pkt_num",
327 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 };
329 
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 	{
332 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
334 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335 		.i_port_bitmap = 0x1,
336 	},
337 };
338 
339 static const u8 hclge_hash_key[] = {
340 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 };
346 
347 static const u32 hclge_dfx_bd_offset_list[] = {
348 	HCLGE_DFX_BIOS_BD_OFFSET,
349 	HCLGE_DFX_SSU_0_BD_OFFSET,
350 	HCLGE_DFX_SSU_1_BD_OFFSET,
351 	HCLGE_DFX_IGU_BD_OFFSET,
352 	HCLGE_DFX_RPU_0_BD_OFFSET,
353 	HCLGE_DFX_RPU_1_BD_OFFSET,
354 	HCLGE_DFX_NCSI_BD_OFFSET,
355 	HCLGE_DFX_RTC_BD_OFFSET,
356 	HCLGE_DFX_PPP_BD_OFFSET,
357 	HCLGE_DFX_RCB_BD_OFFSET,
358 	HCLGE_DFX_TQP_BD_OFFSET,
359 	HCLGE_DFX_SSU_2_BD_OFFSET
360 };
361 
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
364 	HCLGE_OPC_DFX_SSU_REG_0,
365 	HCLGE_OPC_DFX_SSU_REG_1,
366 	HCLGE_OPC_DFX_IGU_EGU_REG,
367 	HCLGE_OPC_DFX_RPU_REG_0,
368 	HCLGE_OPC_DFX_RPU_REG_1,
369 	HCLGE_OPC_DFX_NCSI_REG,
370 	HCLGE_OPC_DFX_RTC_REG,
371 	HCLGE_OPC_DFX_PPP_REG,
372 	HCLGE_OPC_DFX_RCB_REG,
373 	HCLGE_OPC_DFX_TQP_REG,
374 	HCLGE_OPC_DFX_SSU_REG_2
375 };
376 
377 static const struct key_info meta_data_key_info[] = {
378 	{ PACKET_TYPE_ID, 6 },
379 	{ IP_FRAGEMENT, 1 },
380 	{ ROCE_TYPE, 1 },
381 	{ NEXT_KEY, 5 },
382 	{ VLAN_NUMBER, 2 },
383 	{ SRC_VPORT, 12 },
384 	{ DST_VPORT, 12 },
385 	{ TUNNEL_PACKET, 1 },
386 };
387 
388 static const struct key_info tuple_key_info[] = {
389 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
406 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
407 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
409 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
410 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
417 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
419 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
422 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
423 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
425 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
426 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
428 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
429 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430 	{ INNER_DST_IP, 32, KEY_OPT_IP,
431 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
432 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
434 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
437 	  offsetof(struct hclge_fd_rule, tuples.src_port),
438 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
440 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
441 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
443 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
445 };
446 
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 {
449 #define HCLGE_MAC_CMD_NUM 21
450 
451 	u64 *data = (u64 *)(&hdev->mac_stats);
452 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
453 	__le64 *desc_data;
454 	int i, k, n;
455 	int ret;
456 
457 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459 	if (ret) {
460 		dev_err(&hdev->pdev->dev,
461 			"Get MAC pkt stats fail, status = %d.\n", ret);
462 
463 		return ret;
464 	}
465 
466 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467 		/* for special opcode 0032, only the first desc has the head */
468 		if (unlikely(i == 0)) {
469 			desc_data = (__le64 *)(&desc[i].data[0]);
470 			n = HCLGE_RD_FIRST_STATS_NUM;
471 		} else {
472 			desc_data = (__le64 *)(&desc[i]);
473 			n = HCLGE_RD_OTHER_STATS_NUM;
474 		}
475 
476 		for (k = 0; k < n; k++) {
477 			*data += le64_to_cpu(*desc_data);
478 			data++;
479 			desc_data++;
480 		}
481 	}
482 
483 	return 0;
484 }
485 
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 {
488 	u64 *data = (u64 *)(&hdev->mac_stats);
489 	struct hclge_desc *desc;
490 	__le64 *desc_data;
491 	u16 i, k, n;
492 	int ret;
493 
494 	/* This may be called inside atomic sections,
495 	 * so GFP_ATOMIC is more suitalbe here
496 	 */
497 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
498 	if (!desc)
499 		return -ENOMEM;
500 
501 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
503 	if (ret) {
504 		kfree(desc);
505 		return ret;
506 	}
507 
508 	for (i = 0; i < desc_num; i++) {
509 		/* for special opcode 0034, only the first desc has the head */
510 		if (i == 0) {
511 			desc_data = (__le64 *)(&desc[i].data[0]);
512 			n = HCLGE_RD_FIRST_STATS_NUM;
513 		} else {
514 			desc_data = (__le64 *)(&desc[i]);
515 			n = HCLGE_RD_OTHER_STATS_NUM;
516 		}
517 
518 		for (k = 0; k < n; k++) {
519 			*data += le64_to_cpu(*desc_data);
520 			data++;
521 			desc_data++;
522 		}
523 	}
524 
525 	kfree(desc);
526 
527 	return 0;
528 }
529 
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 {
532 	struct hclge_desc desc;
533 	__le32 *desc_data;
534 	u32 reg_num;
535 	int ret;
536 
537 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539 	if (ret)
540 		return ret;
541 
542 	desc_data = (__le32 *)(&desc.data[0]);
543 	reg_num = le32_to_cpu(*desc_data);
544 
545 	*desc_num = 1 + ((reg_num - 3) >> 2) +
546 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
547 
548 	return 0;
549 }
550 
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
552 {
553 	u32 desc_num;
554 	int ret;
555 
556 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
557 	/* The firmware supports the new statistics acquisition method */
558 	if (!ret)
559 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 	else if (ret == -EOPNOTSUPP)
561 		ret = hclge_mac_update_stats_defective(hdev);
562 	else
563 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564 
565 	return ret;
566 }
567 
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 	struct hclge_vport *vport = hclge_get_vport(handle);
572 	struct hclge_dev *hdev = vport->back;
573 	struct hnae3_queue *queue;
574 	struct hclge_desc desc[1];
575 	struct hclge_tqp *tqp;
576 	int ret, i;
577 
578 	for (i = 0; i < kinfo->num_tqps; i++) {
579 		queue = handle->kinfo.tqp[i];
580 		tqp = container_of(queue, struct hclge_tqp, q);
581 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
582 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583 					   true);
584 
585 		desc[0].data[0] = cpu_to_le32(tqp->index);
586 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
587 		if (ret) {
588 			dev_err(&hdev->pdev->dev,
589 				"Query tqp stat fail, status = %d,queue = %d\n",
590 				ret, i);
591 			return ret;
592 		}
593 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 			le32_to_cpu(desc[0].data[1]);
595 	}
596 
597 	for (i = 0; i < kinfo->num_tqps; i++) {
598 		queue = handle->kinfo.tqp[i];
599 		tqp = container_of(queue, struct hclge_tqp, q);
600 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
601 		hclge_cmd_setup_basic_desc(&desc[0],
602 					   HCLGE_OPC_QUERY_TX_STATS,
603 					   true);
604 
605 		desc[0].data[0] = cpu_to_le32(tqp->index);
606 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
607 		if (ret) {
608 			dev_err(&hdev->pdev->dev,
609 				"Query tqp stat fail, status = %d,queue = %d\n",
610 				ret, i);
611 			return ret;
612 		}
613 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 			le32_to_cpu(desc[0].data[1]);
615 	}
616 
617 	return 0;
618 }
619 
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 	struct hclge_tqp *tqp;
624 	u64 *buff = data;
625 	int i;
626 
627 	for (i = 0; i < kinfo->num_tqps; i++) {
628 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630 	}
631 
632 	for (i = 0; i < kinfo->num_tqps; i++) {
633 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635 	}
636 
637 	return buff;
638 }
639 
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643 
644 	/* each tqp has TX & RX two queues */
645 	return kinfo->num_tqps * (2);
646 }
647 
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651 	u8 *buff = data;
652 	int i;
653 
654 	for (i = 0; i < kinfo->num_tqps; i++) {
655 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 			struct hclge_tqp, q);
657 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658 			 tqp->index);
659 		buff = buff + ETH_GSTRING_LEN;
660 	}
661 
662 	for (i = 0; i < kinfo->num_tqps; i++) {
663 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 			struct hclge_tqp, q);
665 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666 			 tqp->index);
667 		buff = buff + ETH_GSTRING_LEN;
668 	}
669 
670 	return buff;
671 }
672 
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 				 const struct hclge_comm_stats_str strs[],
675 				 int size, u64 *data)
676 {
677 	u64 *buf = data;
678 	u32 i;
679 
680 	for (i = 0; i < size; i++)
681 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682 
683 	return buf + size;
684 }
685 
686 static u8 *hclge_comm_get_strings(u32 stringset,
687 				  const struct hclge_comm_stats_str strs[],
688 				  int size, u8 *data)
689 {
690 	char *buff = (char *)data;
691 	u32 i;
692 
693 	if (stringset != ETH_SS_STATS)
694 		return buff;
695 
696 	for (i = 0; i < size; i++) {
697 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 		buff = buff + ETH_GSTRING_LEN;
699 	}
700 
701 	return (u8 *)buff;
702 }
703 
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706 	struct hnae3_handle *handle;
707 	int status;
708 
709 	handle = &hdev->vport[0].nic;
710 	if (handle->client) {
711 		status = hclge_tqps_update_stats(handle);
712 		if (status) {
713 			dev_err(&hdev->pdev->dev,
714 				"Update TQPS stats fail, status = %d.\n",
715 				status);
716 		}
717 	}
718 
719 	status = hclge_mac_update_stats(hdev);
720 	if (status)
721 		dev_err(&hdev->pdev->dev,
722 			"Update MAC stats fail, status = %d.\n", status);
723 }
724 
725 static void hclge_update_stats(struct hnae3_handle *handle,
726 			       struct net_device_stats *net_stats)
727 {
728 	struct hclge_vport *vport = hclge_get_vport(handle);
729 	struct hclge_dev *hdev = vport->back;
730 	int status;
731 
732 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733 		return;
734 
735 	status = hclge_mac_update_stats(hdev);
736 	if (status)
737 		dev_err(&hdev->pdev->dev,
738 			"Update MAC stats fail, status = %d.\n",
739 			status);
740 
741 	status = hclge_tqps_update_stats(handle);
742 	if (status)
743 		dev_err(&hdev->pdev->dev,
744 			"Update TQPS stats fail, status = %d.\n",
745 			status);
746 
747 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749 
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753 		HNAE3_SUPPORT_PHY_LOOPBACK | \
754 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756 
757 	struct hclge_vport *vport = hclge_get_vport(handle);
758 	struct hclge_dev *hdev = vport->back;
759 	int count = 0;
760 
761 	/* Loopback test support rules:
762 	 * mac: only GE mode support
763 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 	 * phy: only support when phy device exist on board
765 	 */
766 	if (stringset == ETH_SS_TEST) {
767 		/* clear loopback bit flags at first */
768 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773 			count += 1;
774 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775 		}
776 
777 		count += 2;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780 
781 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 		     hdev->hw.mac.phydev->drv->set_loopback) ||
783 		    hnae3_dev_phy_imp_supported(hdev)) {
784 			count += 1;
785 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786 		}
787 	} else if (stringset == ETH_SS_STATS) {
788 		count = ARRAY_SIZE(g_mac_stats_string) +
789 			hclge_tqps_get_sset_count(handle, stringset);
790 	}
791 
792 	return count;
793 }
794 
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
796 			      u8 *data)
797 {
798 	u8 *p = (char *)data;
799 	int size;
800 
801 	if (stringset == ETH_SS_STATS) {
802 		size = ARRAY_SIZE(g_mac_stats_string);
803 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804 					   size, p);
805 		p = hclge_tqps_get_strings(handle, p);
806 	} else if (stringset == ETH_SS_TEST) {
807 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809 			       ETH_GSTRING_LEN);
810 			p += ETH_GSTRING_LEN;
811 		}
812 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814 			       ETH_GSTRING_LEN);
815 			p += ETH_GSTRING_LEN;
816 		}
817 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818 			memcpy(p,
819 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820 			       ETH_GSTRING_LEN);
821 			p += ETH_GSTRING_LEN;
822 		}
823 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825 			       ETH_GSTRING_LEN);
826 			p += ETH_GSTRING_LEN;
827 		}
828 	}
829 }
830 
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 {
833 	struct hclge_vport *vport = hclge_get_vport(handle);
834 	struct hclge_dev *hdev = vport->back;
835 	u64 *p;
836 
837 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838 				 ARRAY_SIZE(g_mac_stats_string), data);
839 	p = hclge_tqps_get_stats(handle, p);
840 }
841 
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843 			       struct hns3_mac_stats *mac_stats)
844 {
845 	struct hclge_vport *vport = hclge_get_vport(handle);
846 	struct hclge_dev *hdev = vport->back;
847 
848 	hclge_update_stats(handle, NULL);
849 
850 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
852 }
853 
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855 				   struct hclge_func_status_cmd *status)
856 {
857 #define HCLGE_MAC_ID_MASK	0xF
858 
859 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
860 		return -EINVAL;
861 
862 	/* Set the pf to main pf */
863 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
864 		hdev->flag |= HCLGE_FLAG_MAIN;
865 	else
866 		hdev->flag &= ~HCLGE_FLAG_MAIN;
867 
868 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
869 	return 0;
870 }
871 
872 static int hclge_query_function_status(struct hclge_dev *hdev)
873 {
874 #define HCLGE_QUERY_MAX_CNT	5
875 
876 	struct hclge_func_status_cmd *req;
877 	struct hclge_desc desc;
878 	int timeout = 0;
879 	int ret;
880 
881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882 	req = (struct hclge_func_status_cmd *)desc.data;
883 
884 	do {
885 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886 		if (ret) {
887 			dev_err(&hdev->pdev->dev,
888 				"query function status failed %d.\n", ret);
889 			return ret;
890 		}
891 
892 		/* Check pf reset is done */
893 		if (req->pf_state)
894 			break;
895 		usleep_range(1000, 2000);
896 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
897 
898 	return hclge_parse_func_status(hdev, req);
899 }
900 
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 {
903 	struct hclge_pf_res_cmd *req;
904 	struct hclge_desc desc;
905 	int ret;
906 
907 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909 	if (ret) {
910 		dev_err(&hdev->pdev->dev,
911 			"query pf resource failed %d.\n", ret);
912 		return ret;
913 	}
914 
915 	req = (struct hclge_pf_res_cmd *)desc.data;
916 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917 			 le16_to_cpu(req->ext_tqp_num);
918 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919 
920 	if (req->tx_buf_size)
921 		hdev->tx_buf_size =
922 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923 	else
924 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925 
926 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927 
928 	if (req->dv_buf_size)
929 		hdev->dv_buf_size =
930 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931 	else
932 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933 
934 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935 
936 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938 		dev_err(&hdev->pdev->dev,
939 			"only %u msi resources available, not enough for pf(min:2).\n",
940 			hdev->num_nic_msi);
941 		return -EINVAL;
942 	}
943 
944 	if (hnae3_dev_roce_supported(hdev)) {
945 		hdev->num_roce_msi =
946 			le16_to_cpu(req->pf_intr_vector_number_roce);
947 
948 		/* PF should have NIC vectors and Roce vectors,
949 		 * NIC vectors are queued before Roce vectors.
950 		 */
951 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952 	} else {
953 		hdev->num_msi = hdev->num_nic_msi;
954 	}
955 
956 	return 0;
957 }
958 
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
960 {
961 	switch (speed_cmd) {
962 	case HCLGE_FW_MAC_SPEED_10M:
963 		*speed = HCLGE_MAC_SPEED_10M;
964 		break;
965 	case HCLGE_FW_MAC_SPEED_100M:
966 		*speed = HCLGE_MAC_SPEED_100M;
967 		break;
968 	case HCLGE_FW_MAC_SPEED_1G:
969 		*speed = HCLGE_MAC_SPEED_1G;
970 		break;
971 	case HCLGE_FW_MAC_SPEED_10G:
972 		*speed = HCLGE_MAC_SPEED_10G;
973 		break;
974 	case HCLGE_FW_MAC_SPEED_25G:
975 		*speed = HCLGE_MAC_SPEED_25G;
976 		break;
977 	case HCLGE_FW_MAC_SPEED_40G:
978 		*speed = HCLGE_MAC_SPEED_40G;
979 		break;
980 	case HCLGE_FW_MAC_SPEED_50G:
981 		*speed = HCLGE_MAC_SPEED_50G;
982 		break;
983 	case HCLGE_FW_MAC_SPEED_100G:
984 		*speed = HCLGE_MAC_SPEED_100G;
985 		break;
986 	case HCLGE_FW_MAC_SPEED_200G:
987 		*speed = HCLGE_MAC_SPEED_200G;
988 		break;
989 	default:
990 		return -EINVAL;
991 	}
992 
993 	return 0;
994 }
995 
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997 	{HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998 	{HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999 	{HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000 	{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001 	{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002 	{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003 	{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004 	{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005 	{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1006 };
1007 
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1009 {
1010 	u16 i;
1011 
1012 	for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013 		if (speed == speed_bit_map[i].speed) {
1014 			*speed_bit = speed_bit_map[i].speed_bit;
1015 			return 0;
1016 		}
1017 	}
1018 
1019 	return -EINVAL;
1020 }
1021 
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1023 {
1024 	struct hclge_vport *vport = hclge_get_vport(handle);
1025 	struct hclge_dev *hdev = vport->back;
1026 	u32 speed_ability = hdev->hw.mac.speed_ability;
1027 	u32 speed_bit = 0;
1028 	int ret;
1029 
1030 	ret = hclge_get_speed_bit(speed, &speed_bit);
1031 	if (ret)
1032 		return ret;
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define HCLGE_TX_SPARE_SIZE_UNIT		4096
1283 #define SPEED_ABILITY_EXT_SHIFT			8
1284 
1285 	struct hclge_cfg_param_cmd *req;
1286 	u64 mac_addr_tmp_high;
1287 	u16 speed_ability_ext;
1288 	u64 mac_addr_tmp;
1289 	unsigned int i;
1290 
1291 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292 
1293 	/* get the configuration */
1294 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 					    HCLGE_CFG_TQP_DESC_N_M,
1298 					    HCLGE_CFG_TQP_DESC_N_S);
1299 
1300 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 					HCLGE_CFG_PHY_ADDR_M,
1302 					HCLGE_CFG_PHY_ADDR_S);
1303 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 					  HCLGE_CFG_MEDIA_TP_M,
1305 					  HCLGE_CFG_MEDIA_TP_S);
1306 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 					  HCLGE_CFG_RX_BUF_LEN_M,
1308 					  HCLGE_CFG_RX_BUF_LEN_S);
1309 	/* get mac_address */
1310 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 					    HCLGE_CFG_MAC_ADDR_H_M,
1313 					    HCLGE_CFG_MAC_ADDR_H_S);
1314 
1315 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316 
1317 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 					     HCLGE_CFG_DEFAULT_SPEED_M,
1319 					     HCLGE_CFG_DEFAULT_SPEED_S);
1320 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 					       HCLGE_CFG_RSS_SIZE_M,
1322 					       HCLGE_CFG_RSS_SIZE_S);
1323 
1324 	for (i = 0; i < ETH_ALEN; i++)
1325 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326 
1327 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329 
1330 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 					     HCLGE_CFG_SPEED_ABILITY_M,
1332 					     HCLGE_CFG_SPEED_ABILITY_S);
1333 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337 
1338 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1341 
1342 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 
1346 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1347 					       HCLGE_CFG_PF_RSS_SIZE_M,
1348 					       HCLGE_CFG_PF_RSS_SIZE_S);
1349 
1350 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1351 	 * power of 2, instead of reading out directly. This would
1352 	 * be more flexible for future changes and expansions.
1353 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1354 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1355 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1356 	 */
1357 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1358 			       1U << cfg->pf_rss_size_max :
1359 			       cfg->vf_rss_size_max;
1360 
1361 	/* The unit of the tx spare buffer size queried from configuration
1362 	 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1363 	 * needed here.
1364 	 */
1365 	cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1366 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1367 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1368 	cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1369 }
1370 
1371 /* hclge_get_cfg: query the static parameter from flash
1372  * @hdev: pointer to struct hclge_dev
1373  * @hcfg: the config structure to be getted
1374  */
1375 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1376 {
1377 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1378 	struct hclge_cfg_param_cmd *req;
1379 	unsigned int i;
1380 	int ret;
1381 
1382 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1383 		u32 offset = 0;
1384 
1385 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1386 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1387 					   true);
1388 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1389 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1390 		/* Len should be united by 4 bytes when send to hardware */
1391 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1392 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1393 		req->offset = cpu_to_le32(offset);
1394 	}
1395 
1396 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1397 	if (ret) {
1398 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1399 		return ret;
1400 	}
1401 
1402 	hclge_parse_cfg(hcfg, desc);
1403 
1404 	return 0;
1405 }
1406 
1407 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1408 {
1409 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1410 
1411 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1412 
1413 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1414 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1415 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1416 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1417 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1418 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1419 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1420 	ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1421 }
1422 
1423 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1424 				  struct hclge_desc *desc)
1425 {
1426 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1427 	struct hclge_dev_specs_0_cmd *req0;
1428 	struct hclge_dev_specs_1_cmd *req1;
1429 
1430 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1431 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1432 
1433 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1434 	ae_dev->dev_specs.rss_ind_tbl_size =
1435 		le16_to_cpu(req0->rss_ind_tbl_size);
1436 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1437 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1438 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1439 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1440 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1441 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1442 	ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1443 	ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1444 }
1445 
1446 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1447 {
1448 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1449 
1450 	if (!dev_specs->max_non_tso_bd_num)
1451 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1452 	if (!dev_specs->rss_ind_tbl_size)
1453 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1454 	if (!dev_specs->rss_key_size)
1455 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1456 	if (!dev_specs->max_tm_rate)
1457 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1458 	if (!dev_specs->max_qset_num)
1459 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1460 	if (!dev_specs->max_int_gl)
1461 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1462 	if (!dev_specs->max_frm_size)
1463 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1464 	if (!dev_specs->umv_size)
1465 		dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1466 }
1467 
1468 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1469 {
1470 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1471 	int ret;
1472 	int i;
1473 
1474 	/* set default specifications as devices lower than version V3 do not
1475 	 * support querying specifications from firmware.
1476 	 */
1477 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1478 		hclge_set_default_dev_specs(hdev);
1479 		return 0;
1480 	}
1481 
1482 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1483 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1484 					   true);
1485 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1486 	}
1487 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1488 
1489 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1490 	if (ret)
1491 		return ret;
1492 
1493 	hclge_parse_dev_specs(hdev, desc);
1494 	hclge_check_dev_specs(hdev);
1495 
1496 	return 0;
1497 }
1498 
1499 static int hclge_get_cap(struct hclge_dev *hdev)
1500 {
1501 	int ret;
1502 
1503 	ret = hclge_query_function_status(hdev);
1504 	if (ret) {
1505 		dev_err(&hdev->pdev->dev,
1506 			"query function status error %d.\n", ret);
1507 		return ret;
1508 	}
1509 
1510 	/* get pf resource */
1511 	return hclge_query_pf_resource(hdev);
1512 }
1513 
1514 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1515 {
1516 #define HCLGE_MIN_TX_DESC	64
1517 #define HCLGE_MIN_RX_DESC	64
1518 
1519 	if (!is_kdump_kernel())
1520 		return;
1521 
1522 	dev_info(&hdev->pdev->dev,
1523 		 "Running kdump kernel. Using minimal resources\n");
1524 
1525 	/* minimal queue pairs equals to the number of vports */
1526 	hdev->num_tqps = hdev->num_req_vfs + 1;
1527 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1528 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1529 }
1530 
1531 static int hclge_configure(struct hclge_dev *hdev)
1532 {
1533 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1534 	const struct cpumask *cpumask = cpu_online_mask;
1535 	struct hclge_cfg cfg;
1536 	unsigned int i;
1537 	int node, ret;
1538 
1539 	ret = hclge_get_cfg(hdev, &cfg);
1540 	if (ret)
1541 		return ret;
1542 
1543 	hdev->base_tqp_pid = 0;
1544 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1545 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1546 	hdev->rx_buf_len = cfg.rx_buf_len;
1547 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1548 	hdev->hw.mac.media_type = cfg.media_type;
1549 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1550 	hdev->num_tx_desc = cfg.tqp_desc_num;
1551 	hdev->num_rx_desc = cfg.tqp_desc_num;
1552 	hdev->tm_info.num_pg = 1;
1553 	hdev->tc_max = cfg.tc_num;
1554 	hdev->tm_info.hw_pfc_map = 0;
1555 	if (cfg.umv_space)
1556 		hdev->wanted_umv_size = cfg.umv_space;
1557 	else
1558 		hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1559 	hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1560 	hdev->gro_en = true;
1561 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1562 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1563 
1564 	if (hnae3_dev_fd_supported(hdev)) {
1565 		hdev->fd_en = true;
1566 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1567 	}
1568 
1569 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1570 	if (ret) {
1571 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1572 			cfg.default_speed, ret);
1573 		return ret;
1574 	}
1575 
1576 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1577 
1578 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1579 
1580 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1581 	    (hdev->tc_max < 1)) {
1582 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1583 			 hdev->tc_max);
1584 		hdev->tc_max = 1;
1585 	}
1586 
1587 	/* Dev does not support DCB */
1588 	if (!hnae3_dev_dcb_supported(hdev)) {
1589 		hdev->tc_max = 1;
1590 		hdev->pfc_max = 0;
1591 	} else {
1592 		hdev->pfc_max = hdev->tc_max;
1593 	}
1594 
1595 	hdev->tm_info.num_tc = 1;
1596 
1597 	/* Currently not support uncontiuous tc */
1598 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1599 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1600 
1601 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1602 
1603 	hclge_init_kdump_kernel_config(hdev);
1604 
1605 	/* Set the affinity based on numa node */
1606 	node = dev_to_node(&hdev->pdev->dev);
1607 	if (node != NUMA_NO_NODE)
1608 		cpumask = cpumask_of_node(node);
1609 
1610 	cpumask_copy(&hdev->affinity_mask, cpumask);
1611 
1612 	return ret;
1613 }
1614 
1615 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1616 			    u16 tso_mss_max)
1617 {
1618 	struct hclge_cfg_tso_status_cmd *req;
1619 	struct hclge_desc desc;
1620 
1621 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1622 
1623 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1624 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1625 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1626 
1627 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1628 }
1629 
1630 static int hclge_config_gro(struct hclge_dev *hdev)
1631 {
1632 	struct hclge_cfg_gro_status_cmd *req;
1633 	struct hclge_desc desc;
1634 	int ret;
1635 
1636 	if (!hnae3_dev_gro_supported(hdev))
1637 		return 0;
1638 
1639 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1640 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1641 
1642 	req->gro_en = hdev->gro_en ? 1 : 0;
1643 
1644 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1645 	if (ret)
1646 		dev_err(&hdev->pdev->dev,
1647 			"GRO hardware config cmd failed, ret = %d\n", ret);
1648 
1649 	return ret;
1650 }
1651 
1652 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1653 {
1654 	struct hclge_tqp *tqp;
1655 	int i;
1656 
1657 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1658 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1659 	if (!hdev->htqp)
1660 		return -ENOMEM;
1661 
1662 	tqp = hdev->htqp;
1663 
1664 	for (i = 0; i < hdev->num_tqps; i++) {
1665 		tqp->dev = &hdev->pdev->dev;
1666 		tqp->index = i;
1667 
1668 		tqp->q.ae_algo = &ae_algo;
1669 		tqp->q.buf_size = hdev->rx_buf_len;
1670 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1671 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1672 
1673 		/* need an extended offset to configure queues >=
1674 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1675 		 */
1676 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1677 			tqp->q.io_base = hdev->hw.io_base +
1678 					 HCLGE_TQP_REG_OFFSET +
1679 					 i * HCLGE_TQP_REG_SIZE;
1680 		else
1681 			tqp->q.io_base = hdev->hw.io_base +
1682 					 HCLGE_TQP_REG_OFFSET +
1683 					 HCLGE_TQP_EXT_REG_OFFSET +
1684 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1685 					 HCLGE_TQP_REG_SIZE;
1686 
1687 		tqp++;
1688 	}
1689 
1690 	return 0;
1691 }
1692 
1693 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1694 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1695 {
1696 	struct hclge_tqp_map_cmd *req;
1697 	struct hclge_desc desc;
1698 	int ret;
1699 
1700 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1701 
1702 	req = (struct hclge_tqp_map_cmd *)desc.data;
1703 	req->tqp_id = cpu_to_le16(tqp_pid);
1704 	req->tqp_vf = func_id;
1705 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1706 	if (!is_pf)
1707 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1708 	req->tqp_vid = cpu_to_le16(tqp_vid);
1709 
1710 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1711 	if (ret)
1712 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1713 
1714 	return ret;
1715 }
1716 
1717 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1718 {
1719 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1720 	struct hclge_dev *hdev = vport->back;
1721 	int i, alloced;
1722 
1723 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1724 	     alloced < num_tqps; i++) {
1725 		if (!hdev->htqp[i].alloced) {
1726 			hdev->htqp[i].q.handle = &vport->nic;
1727 			hdev->htqp[i].q.tqp_index = alloced;
1728 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1729 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1730 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1731 			hdev->htqp[i].alloced = true;
1732 			alloced++;
1733 		}
1734 	}
1735 	vport->alloc_tqps = alloced;
1736 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1737 				vport->alloc_tqps / hdev->tm_info.num_tc);
1738 
1739 	/* ensure one to one mapping between irq and queue at default */
1740 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1741 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1742 
1743 	return 0;
1744 }
1745 
1746 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1747 			    u16 num_tx_desc, u16 num_rx_desc)
1748 
1749 {
1750 	struct hnae3_handle *nic = &vport->nic;
1751 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1752 	struct hclge_dev *hdev = vport->back;
1753 	int ret;
1754 
1755 	kinfo->num_tx_desc = num_tx_desc;
1756 	kinfo->num_rx_desc = num_rx_desc;
1757 
1758 	kinfo->rx_buf_len = hdev->rx_buf_len;
1759 	kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1760 
1761 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1762 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1763 	if (!kinfo->tqp)
1764 		return -ENOMEM;
1765 
1766 	ret = hclge_assign_tqp(vport, num_tqps);
1767 	if (ret)
1768 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1769 
1770 	return ret;
1771 }
1772 
1773 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1774 				  struct hclge_vport *vport)
1775 {
1776 	struct hnae3_handle *nic = &vport->nic;
1777 	struct hnae3_knic_private_info *kinfo;
1778 	u16 i;
1779 
1780 	kinfo = &nic->kinfo;
1781 	for (i = 0; i < vport->alloc_tqps; i++) {
1782 		struct hclge_tqp *q =
1783 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1784 		bool is_pf;
1785 		int ret;
1786 
1787 		is_pf = !(vport->vport_id);
1788 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1789 					     i, is_pf);
1790 		if (ret)
1791 			return ret;
1792 	}
1793 
1794 	return 0;
1795 }
1796 
1797 static int hclge_map_tqp(struct hclge_dev *hdev)
1798 {
1799 	struct hclge_vport *vport = hdev->vport;
1800 	u16 i, num_vport;
1801 
1802 	num_vport = hdev->num_req_vfs + 1;
1803 	for (i = 0; i < num_vport; i++)	{
1804 		int ret;
1805 
1806 		ret = hclge_map_tqp_to_vport(hdev, vport);
1807 		if (ret)
1808 			return ret;
1809 
1810 		vport++;
1811 	}
1812 
1813 	return 0;
1814 }
1815 
1816 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1817 {
1818 	struct hnae3_handle *nic = &vport->nic;
1819 	struct hclge_dev *hdev = vport->back;
1820 	int ret;
1821 
1822 	nic->pdev = hdev->pdev;
1823 	nic->ae_algo = &ae_algo;
1824 	nic->numa_node_mask = hdev->numa_node_mask;
1825 	nic->kinfo.io_base = hdev->hw.io_base;
1826 
1827 	ret = hclge_knic_setup(vport, num_tqps,
1828 			       hdev->num_tx_desc, hdev->num_rx_desc);
1829 	if (ret)
1830 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1831 
1832 	return ret;
1833 }
1834 
1835 static int hclge_alloc_vport(struct hclge_dev *hdev)
1836 {
1837 	struct pci_dev *pdev = hdev->pdev;
1838 	struct hclge_vport *vport;
1839 	u32 tqp_main_vport;
1840 	u32 tqp_per_vport;
1841 	int num_vport, i;
1842 	int ret;
1843 
1844 	/* We need to alloc a vport for main NIC of PF */
1845 	num_vport = hdev->num_req_vfs + 1;
1846 
1847 	if (hdev->num_tqps < num_vport) {
1848 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1849 			hdev->num_tqps, num_vport);
1850 		return -EINVAL;
1851 	}
1852 
1853 	/* Alloc the same number of TQPs for every vport */
1854 	tqp_per_vport = hdev->num_tqps / num_vport;
1855 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1856 
1857 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1858 			     GFP_KERNEL);
1859 	if (!vport)
1860 		return -ENOMEM;
1861 
1862 	hdev->vport = vport;
1863 	hdev->num_alloc_vport = num_vport;
1864 
1865 	if (IS_ENABLED(CONFIG_PCI_IOV))
1866 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1867 
1868 	for (i = 0; i < num_vport; i++) {
1869 		vport->back = hdev;
1870 		vport->vport_id = i;
1871 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1872 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1873 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1874 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1875 		vport->req_vlan_fltr_en = true;
1876 		INIT_LIST_HEAD(&vport->vlan_list);
1877 		INIT_LIST_HEAD(&vport->uc_mac_list);
1878 		INIT_LIST_HEAD(&vport->mc_mac_list);
1879 		spin_lock_init(&vport->mac_list_lock);
1880 
1881 		if (i == 0)
1882 			ret = hclge_vport_setup(vport, tqp_main_vport);
1883 		else
1884 			ret = hclge_vport_setup(vport, tqp_per_vport);
1885 		if (ret) {
1886 			dev_err(&pdev->dev,
1887 				"vport setup failed for vport %d, %d\n",
1888 				i, ret);
1889 			return ret;
1890 		}
1891 
1892 		vport++;
1893 	}
1894 
1895 	return 0;
1896 }
1897 
1898 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1899 				    struct hclge_pkt_buf_alloc *buf_alloc)
1900 {
1901 /* TX buffer size is unit by 128 byte */
1902 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1903 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1904 	struct hclge_tx_buff_alloc_cmd *req;
1905 	struct hclge_desc desc;
1906 	int ret;
1907 	u8 i;
1908 
1909 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1910 
1911 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1912 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1913 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1914 
1915 		req->tx_pkt_buff[i] =
1916 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1917 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1918 	}
1919 
1920 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1921 	if (ret)
1922 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1923 			ret);
1924 
1925 	return ret;
1926 }
1927 
1928 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1929 				 struct hclge_pkt_buf_alloc *buf_alloc)
1930 {
1931 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1932 
1933 	if (ret)
1934 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1935 
1936 	return ret;
1937 }
1938 
1939 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1940 {
1941 	unsigned int i;
1942 	u32 cnt = 0;
1943 
1944 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1945 		if (hdev->hw_tc_map & BIT(i))
1946 			cnt++;
1947 	return cnt;
1948 }
1949 
1950 /* Get the number of pfc enabled TCs, which have private buffer */
1951 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1952 				  struct hclge_pkt_buf_alloc *buf_alloc)
1953 {
1954 	struct hclge_priv_buf *priv;
1955 	unsigned int i;
1956 	int cnt = 0;
1957 
1958 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1959 		priv = &buf_alloc->priv_buf[i];
1960 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1961 		    priv->enable)
1962 			cnt++;
1963 	}
1964 
1965 	return cnt;
1966 }
1967 
1968 /* Get the number of pfc disabled TCs, which have private buffer */
1969 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1970 				     struct hclge_pkt_buf_alloc *buf_alloc)
1971 {
1972 	struct hclge_priv_buf *priv;
1973 	unsigned int i;
1974 	int cnt = 0;
1975 
1976 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1977 		priv = &buf_alloc->priv_buf[i];
1978 		if (hdev->hw_tc_map & BIT(i) &&
1979 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1980 		    priv->enable)
1981 			cnt++;
1982 	}
1983 
1984 	return cnt;
1985 }
1986 
1987 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1988 {
1989 	struct hclge_priv_buf *priv;
1990 	u32 rx_priv = 0;
1991 	int i;
1992 
1993 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1994 		priv = &buf_alloc->priv_buf[i];
1995 		if (priv->enable)
1996 			rx_priv += priv->buf_size;
1997 	}
1998 	return rx_priv;
1999 }
2000 
2001 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2002 {
2003 	u32 i, total_tx_size = 0;
2004 
2005 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2006 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2007 
2008 	return total_tx_size;
2009 }
2010 
2011 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2012 				struct hclge_pkt_buf_alloc *buf_alloc,
2013 				u32 rx_all)
2014 {
2015 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2016 	u32 tc_num = hclge_get_tc_num(hdev);
2017 	u32 shared_buf, aligned_mps;
2018 	u32 rx_priv;
2019 	int i;
2020 
2021 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2022 
2023 	if (hnae3_dev_dcb_supported(hdev))
2024 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2025 					hdev->dv_buf_size;
2026 	else
2027 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2028 					+ hdev->dv_buf_size;
2029 
2030 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2031 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2032 			     HCLGE_BUF_SIZE_UNIT);
2033 
2034 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2035 	if (rx_all < rx_priv + shared_std)
2036 		return false;
2037 
2038 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2039 	buf_alloc->s_buf.buf_size = shared_buf;
2040 	if (hnae3_dev_dcb_supported(hdev)) {
2041 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2042 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2043 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2044 				  HCLGE_BUF_SIZE_UNIT);
2045 	} else {
2046 		buf_alloc->s_buf.self.high = aligned_mps +
2047 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2048 		buf_alloc->s_buf.self.low = aligned_mps;
2049 	}
2050 
2051 	if (hnae3_dev_dcb_supported(hdev)) {
2052 		hi_thrd = shared_buf - hdev->dv_buf_size;
2053 
2054 		if (tc_num <= NEED_RESERVE_TC_NUM)
2055 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2056 					/ BUF_MAX_PERCENT;
2057 
2058 		if (tc_num)
2059 			hi_thrd = hi_thrd / tc_num;
2060 
2061 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2062 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2063 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2064 	} else {
2065 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2066 		lo_thrd = aligned_mps;
2067 	}
2068 
2069 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2070 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2071 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2072 	}
2073 
2074 	return true;
2075 }
2076 
2077 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2078 				struct hclge_pkt_buf_alloc *buf_alloc)
2079 {
2080 	u32 i, total_size;
2081 
2082 	total_size = hdev->pkt_buf_size;
2083 
2084 	/* alloc tx buffer for all enabled tc */
2085 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2086 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2087 
2088 		if (hdev->hw_tc_map & BIT(i)) {
2089 			if (total_size < hdev->tx_buf_size)
2090 				return -ENOMEM;
2091 
2092 			priv->tx_buf_size = hdev->tx_buf_size;
2093 		} else {
2094 			priv->tx_buf_size = 0;
2095 		}
2096 
2097 		total_size -= priv->tx_buf_size;
2098 	}
2099 
2100 	return 0;
2101 }
2102 
2103 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2104 				  struct hclge_pkt_buf_alloc *buf_alloc)
2105 {
2106 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2107 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2108 	unsigned int i;
2109 
2110 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2111 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2112 
2113 		priv->enable = 0;
2114 		priv->wl.low = 0;
2115 		priv->wl.high = 0;
2116 		priv->buf_size = 0;
2117 
2118 		if (!(hdev->hw_tc_map & BIT(i)))
2119 			continue;
2120 
2121 		priv->enable = 1;
2122 
2123 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2124 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2125 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2126 						HCLGE_BUF_SIZE_UNIT);
2127 		} else {
2128 			priv->wl.low = 0;
2129 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2130 					aligned_mps;
2131 		}
2132 
2133 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2134 	}
2135 
2136 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138 
2139 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2140 					  struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2143 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2144 	int i;
2145 
2146 	/* let the last to be cleared first */
2147 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2148 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2149 		unsigned int mask = BIT((unsigned int)i);
2150 
2151 		if (hdev->hw_tc_map & mask &&
2152 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2153 			/* Clear the no pfc TC private buffer */
2154 			priv->wl.low = 0;
2155 			priv->wl.high = 0;
2156 			priv->buf_size = 0;
2157 			priv->enable = 0;
2158 			no_pfc_priv_num--;
2159 		}
2160 
2161 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2162 		    no_pfc_priv_num == 0)
2163 			break;
2164 	}
2165 
2166 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2167 }
2168 
2169 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2170 					struct hclge_pkt_buf_alloc *buf_alloc)
2171 {
2172 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2173 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2174 	int i;
2175 
2176 	/* let the last to be cleared first */
2177 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2178 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2179 		unsigned int mask = BIT((unsigned int)i);
2180 
2181 		if (hdev->hw_tc_map & mask &&
2182 		    hdev->tm_info.hw_pfc_map & mask) {
2183 			/* Reduce the number of pfc TC with private buffer */
2184 			priv->wl.low = 0;
2185 			priv->enable = 0;
2186 			priv->wl.high = 0;
2187 			priv->buf_size = 0;
2188 			pfc_priv_num--;
2189 		}
2190 
2191 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2192 		    pfc_priv_num == 0)
2193 			break;
2194 	}
2195 
2196 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2197 }
2198 
2199 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2200 				      struct hclge_pkt_buf_alloc *buf_alloc)
2201 {
2202 #define COMPENSATE_BUFFER	0x3C00
2203 #define COMPENSATE_HALF_MPS_NUM	5
2204 #define PRIV_WL_GAP		0x1800
2205 
2206 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2207 	u32 tc_num = hclge_get_tc_num(hdev);
2208 	u32 half_mps = hdev->mps >> 1;
2209 	u32 min_rx_priv;
2210 	unsigned int i;
2211 
2212 	if (tc_num)
2213 		rx_priv = rx_priv / tc_num;
2214 
2215 	if (tc_num <= NEED_RESERVE_TC_NUM)
2216 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2217 
2218 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2219 			COMPENSATE_HALF_MPS_NUM * half_mps;
2220 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2221 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2222 	if (rx_priv < min_rx_priv)
2223 		return false;
2224 
2225 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2226 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2227 
2228 		priv->enable = 0;
2229 		priv->wl.low = 0;
2230 		priv->wl.high = 0;
2231 		priv->buf_size = 0;
2232 
2233 		if (!(hdev->hw_tc_map & BIT(i)))
2234 			continue;
2235 
2236 		priv->enable = 1;
2237 		priv->buf_size = rx_priv;
2238 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2239 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2240 	}
2241 
2242 	buf_alloc->s_buf.buf_size = 0;
2243 
2244 	return true;
2245 }
2246 
2247 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2248  * @hdev: pointer to struct hclge_dev
2249  * @buf_alloc: pointer to buffer calculation data
2250  * @return: 0: calculate successful, negative: fail
2251  */
2252 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2253 				struct hclge_pkt_buf_alloc *buf_alloc)
2254 {
2255 	/* When DCB is not supported, rx private buffer is not allocated. */
2256 	if (!hnae3_dev_dcb_supported(hdev)) {
2257 		u32 rx_all = hdev->pkt_buf_size;
2258 
2259 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2260 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2261 			return -ENOMEM;
2262 
2263 		return 0;
2264 	}
2265 
2266 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2267 		return 0;
2268 
2269 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2270 		return 0;
2271 
2272 	/* try to decrease the buffer size */
2273 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2274 		return 0;
2275 
2276 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2277 		return 0;
2278 
2279 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2280 		return 0;
2281 
2282 	return -ENOMEM;
2283 }
2284 
2285 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2286 				   struct hclge_pkt_buf_alloc *buf_alloc)
2287 {
2288 	struct hclge_rx_priv_buff_cmd *req;
2289 	struct hclge_desc desc;
2290 	int ret;
2291 	int i;
2292 
2293 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2294 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2295 
2296 	/* Alloc private buffer TCs */
2297 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2298 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2299 
2300 		req->buf_num[i] =
2301 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2302 		req->buf_num[i] |=
2303 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2304 	}
2305 
2306 	req->shared_buf =
2307 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2308 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2309 
2310 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2311 	if (ret)
2312 		dev_err(&hdev->pdev->dev,
2313 			"rx private buffer alloc cmd failed %d\n", ret);
2314 
2315 	return ret;
2316 }
2317 
2318 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2319 				   struct hclge_pkt_buf_alloc *buf_alloc)
2320 {
2321 	struct hclge_rx_priv_wl_buf *req;
2322 	struct hclge_priv_buf *priv;
2323 	struct hclge_desc desc[2];
2324 	int i, j;
2325 	int ret;
2326 
2327 	for (i = 0; i < 2; i++) {
2328 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2329 					   false);
2330 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2331 
2332 		/* The first descriptor set the NEXT bit to 1 */
2333 		if (i == 0)
2334 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2335 		else
2336 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2337 
2338 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2339 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2340 
2341 			priv = &buf_alloc->priv_buf[idx];
2342 			req->tc_wl[j].high =
2343 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2344 			req->tc_wl[j].high |=
2345 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2346 			req->tc_wl[j].low =
2347 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2348 			req->tc_wl[j].low |=
2349 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2350 		}
2351 	}
2352 
2353 	/* Send 2 descriptor at one time */
2354 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2355 	if (ret)
2356 		dev_err(&hdev->pdev->dev,
2357 			"rx private waterline config cmd failed %d\n",
2358 			ret);
2359 	return ret;
2360 }
2361 
2362 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2363 				    struct hclge_pkt_buf_alloc *buf_alloc)
2364 {
2365 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2366 	struct hclge_rx_com_thrd *req;
2367 	struct hclge_desc desc[2];
2368 	struct hclge_tc_thrd *tc;
2369 	int i, j;
2370 	int ret;
2371 
2372 	for (i = 0; i < 2; i++) {
2373 		hclge_cmd_setup_basic_desc(&desc[i],
2374 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2375 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2376 
2377 		/* The first descriptor set the NEXT bit to 1 */
2378 		if (i == 0)
2379 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2380 		else
2381 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2382 
2383 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2384 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2385 
2386 			req->com_thrd[j].high =
2387 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2388 			req->com_thrd[j].high |=
2389 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2390 			req->com_thrd[j].low =
2391 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2392 			req->com_thrd[j].low |=
2393 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2394 		}
2395 	}
2396 
2397 	/* Send 2 descriptors at one time */
2398 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2399 	if (ret)
2400 		dev_err(&hdev->pdev->dev,
2401 			"common threshold config cmd failed %d\n", ret);
2402 	return ret;
2403 }
2404 
2405 static int hclge_common_wl_config(struct hclge_dev *hdev,
2406 				  struct hclge_pkt_buf_alloc *buf_alloc)
2407 {
2408 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2409 	struct hclge_rx_com_wl *req;
2410 	struct hclge_desc desc;
2411 	int ret;
2412 
2413 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2414 
2415 	req = (struct hclge_rx_com_wl *)desc.data;
2416 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2417 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2418 
2419 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2420 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2421 
2422 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2423 	if (ret)
2424 		dev_err(&hdev->pdev->dev,
2425 			"common waterline config cmd failed %d\n", ret);
2426 
2427 	return ret;
2428 }
2429 
2430 int hclge_buffer_alloc(struct hclge_dev *hdev)
2431 {
2432 	struct hclge_pkt_buf_alloc *pkt_buf;
2433 	int ret;
2434 
2435 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2436 	if (!pkt_buf)
2437 		return -ENOMEM;
2438 
2439 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2440 	if (ret) {
2441 		dev_err(&hdev->pdev->dev,
2442 			"could not calc tx buffer size for all TCs %d\n", ret);
2443 		goto out;
2444 	}
2445 
2446 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2447 	if (ret) {
2448 		dev_err(&hdev->pdev->dev,
2449 			"could not alloc tx buffers %d\n", ret);
2450 		goto out;
2451 	}
2452 
2453 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2454 	if (ret) {
2455 		dev_err(&hdev->pdev->dev,
2456 			"could not calc rx priv buffer size for all TCs %d\n",
2457 			ret);
2458 		goto out;
2459 	}
2460 
2461 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2462 	if (ret) {
2463 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2464 			ret);
2465 		goto out;
2466 	}
2467 
2468 	if (hnae3_dev_dcb_supported(hdev)) {
2469 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2470 		if (ret) {
2471 			dev_err(&hdev->pdev->dev,
2472 				"could not configure rx private waterline %d\n",
2473 				ret);
2474 			goto out;
2475 		}
2476 
2477 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2478 		if (ret) {
2479 			dev_err(&hdev->pdev->dev,
2480 				"could not configure common threshold %d\n",
2481 				ret);
2482 			goto out;
2483 		}
2484 	}
2485 
2486 	ret = hclge_common_wl_config(hdev, pkt_buf);
2487 	if (ret)
2488 		dev_err(&hdev->pdev->dev,
2489 			"could not configure common waterline %d\n", ret);
2490 
2491 out:
2492 	kfree(pkt_buf);
2493 	return ret;
2494 }
2495 
2496 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2497 {
2498 	struct hnae3_handle *roce = &vport->roce;
2499 	struct hnae3_handle *nic = &vport->nic;
2500 	struct hclge_dev *hdev = vport->back;
2501 
2502 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2503 
2504 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2505 		return -EINVAL;
2506 
2507 	roce->rinfo.base_vector = hdev->roce_base_vector;
2508 
2509 	roce->rinfo.netdev = nic->kinfo.netdev;
2510 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2511 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2512 
2513 	roce->pdev = nic->pdev;
2514 	roce->ae_algo = nic->ae_algo;
2515 	roce->numa_node_mask = nic->numa_node_mask;
2516 
2517 	return 0;
2518 }
2519 
2520 static int hclge_init_msi(struct hclge_dev *hdev)
2521 {
2522 	struct pci_dev *pdev = hdev->pdev;
2523 	int vectors;
2524 	int i;
2525 
2526 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2527 					hdev->num_msi,
2528 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2529 	if (vectors < 0) {
2530 		dev_err(&pdev->dev,
2531 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2532 			vectors);
2533 		return vectors;
2534 	}
2535 	if (vectors < hdev->num_msi)
2536 		dev_warn(&hdev->pdev->dev,
2537 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2538 			 hdev->num_msi, vectors);
2539 
2540 	hdev->num_msi = vectors;
2541 	hdev->num_msi_left = vectors;
2542 
2543 	hdev->base_msi_vector = pdev->irq;
2544 	hdev->roce_base_vector = hdev->base_msi_vector +
2545 				hdev->num_nic_msi;
2546 
2547 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2548 					   sizeof(u16), GFP_KERNEL);
2549 	if (!hdev->vector_status) {
2550 		pci_free_irq_vectors(pdev);
2551 		return -ENOMEM;
2552 	}
2553 
2554 	for (i = 0; i < hdev->num_msi; i++)
2555 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2556 
2557 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2558 					sizeof(int), GFP_KERNEL);
2559 	if (!hdev->vector_irq) {
2560 		pci_free_irq_vectors(pdev);
2561 		return -ENOMEM;
2562 	}
2563 
2564 	return 0;
2565 }
2566 
2567 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2568 {
2569 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2570 		duplex = HCLGE_MAC_FULL;
2571 
2572 	return duplex;
2573 }
2574 
2575 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2576 				      u8 duplex)
2577 {
2578 	struct hclge_config_mac_speed_dup_cmd *req;
2579 	struct hclge_desc desc;
2580 	int ret;
2581 
2582 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2583 
2584 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2585 
2586 	if (duplex)
2587 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2588 
2589 	switch (speed) {
2590 	case HCLGE_MAC_SPEED_10M:
2591 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2592 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2593 		break;
2594 	case HCLGE_MAC_SPEED_100M:
2595 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2596 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2597 		break;
2598 	case HCLGE_MAC_SPEED_1G:
2599 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2600 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2601 		break;
2602 	case HCLGE_MAC_SPEED_10G:
2603 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2604 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2605 		break;
2606 	case HCLGE_MAC_SPEED_25G:
2607 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2608 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2609 		break;
2610 	case HCLGE_MAC_SPEED_40G:
2611 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2612 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2613 		break;
2614 	case HCLGE_MAC_SPEED_50G:
2615 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2616 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2617 		break;
2618 	case HCLGE_MAC_SPEED_100G:
2619 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2620 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2621 		break;
2622 	case HCLGE_MAC_SPEED_200G:
2623 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2624 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2625 		break;
2626 	default:
2627 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2628 		return -EINVAL;
2629 	}
2630 
2631 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2632 		      1);
2633 
2634 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2635 	if (ret) {
2636 		dev_err(&hdev->pdev->dev,
2637 			"mac speed/duplex config cmd failed %d.\n", ret);
2638 		return ret;
2639 	}
2640 
2641 	return 0;
2642 }
2643 
2644 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2645 {
2646 	struct hclge_mac *mac = &hdev->hw.mac;
2647 	int ret;
2648 
2649 	duplex = hclge_check_speed_dup(duplex, speed);
2650 	if (!mac->support_autoneg && mac->speed == speed &&
2651 	    mac->duplex == duplex)
2652 		return 0;
2653 
2654 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2655 	if (ret)
2656 		return ret;
2657 
2658 	hdev->hw.mac.speed = speed;
2659 	hdev->hw.mac.duplex = duplex;
2660 
2661 	return 0;
2662 }
2663 
2664 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2665 				     u8 duplex)
2666 {
2667 	struct hclge_vport *vport = hclge_get_vport(handle);
2668 	struct hclge_dev *hdev = vport->back;
2669 
2670 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2671 }
2672 
2673 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2674 {
2675 	struct hclge_config_auto_neg_cmd *req;
2676 	struct hclge_desc desc;
2677 	u32 flag = 0;
2678 	int ret;
2679 
2680 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2681 
2682 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2683 	if (enable)
2684 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2685 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2686 
2687 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2688 	if (ret)
2689 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2690 			ret);
2691 
2692 	return ret;
2693 }
2694 
2695 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2696 {
2697 	struct hclge_vport *vport = hclge_get_vport(handle);
2698 	struct hclge_dev *hdev = vport->back;
2699 
2700 	if (!hdev->hw.mac.support_autoneg) {
2701 		if (enable) {
2702 			dev_err(&hdev->pdev->dev,
2703 				"autoneg is not supported by current port\n");
2704 			return -EOPNOTSUPP;
2705 		} else {
2706 			return 0;
2707 		}
2708 	}
2709 
2710 	return hclge_set_autoneg_en(hdev, enable);
2711 }
2712 
2713 static int hclge_get_autoneg(struct hnae3_handle *handle)
2714 {
2715 	struct hclge_vport *vport = hclge_get_vport(handle);
2716 	struct hclge_dev *hdev = vport->back;
2717 	struct phy_device *phydev = hdev->hw.mac.phydev;
2718 
2719 	if (phydev)
2720 		return phydev->autoneg;
2721 
2722 	return hdev->hw.mac.autoneg;
2723 }
2724 
2725 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2726 {
2727 	struct hclge_vport *vport = hclge_get_vport(handle);
2728 	struct hclge_dev *hdev = vport->back;
2729 	int ret;
2730 
2731 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2732 
2733 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2734 	if (ret)
2735 		return ret;
2736 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2737 }
2738 
2739 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2740 {
2741 	struct hclge_vport *vport = hclge_get_vport(handle);
2742 	struct hclge_dev *hdev = vport->back;
2743 
2744 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2745 		return hclge_set_autoneg_en(hdev, !halt);
2746 
2747 	return 0;
2748 }
2749 
2750 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2751 {
2752 	struct hclge_config_fec_cmd *req;
2753 	struct hclge_desc desc;
2754 	int ret;
2755 
2756 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2757 
2758 	req = (struct hclge_config_fec_cmd *)desc.data;
2759 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2760 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2761 	if (fec_mode & BIT(HNAE3_FEC_RS))
2762 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2763 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2764 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2765 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2766 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2767 
2768 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2769 	if (ret)
2770 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2771 
2772 	return ret;
2773 }
2774 
2775 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2776 {
2777 	struct hclge_vport *vport = hclge_get_vport(handle);
2778 	struct hclge_dev *hdev = vport->back;
2779 	struct hclge_mac *mac = &hdev->hw.mac;
2780 	int ret;
2781 
2782 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2783 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2784 		return -EINVAL;
2785 	}
2786 
2787 	ret = hclge_set_fec_hw(hdev, fec_mode);
2788 	if (ret)
2789 		return ret;
2790 
2791 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2792 	return 0;
2793 }
2794 
2795 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2796 			  u8 *fec_mode)
2797 {
2798 	struct hclge_vport *vport = hclge_get_vport(handle);
2799 	struct hclge_dev *hdev = vport->back;
2800 	struct hclge_mac *mac = &hdev->hw.mac;
2801 
2802 	if (fec_ability)
2803 		*fec_ability = mac->fec_ability;
2804 	if (fec_mode)
2805 		*fec_mode = mac->fec_mode;
2806 }
2807 
2808 static int hclge_mac_init(struct hclge_dev *hdev)
2809 {
2810 	struct hclge_mac *mac = &hdev->hw.mac;
2811 	int ret;
2812 
2813 	hdev->support_sfp_query = true;
2814 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2815 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2816 					 hdev->hw.mac.duplex);
2817 	if (ret)
2818 		return ret;
2819 
2820 	if (hdev->hw.mac.support_autoneg) {
2821 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2822 		if (ret)
2823 			return ret;
2824 	}
2825 
2826 	mac->link = 0;
2827 
2828 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2829 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2830 		if (ret)
2831 			return ret;
2832 	}
2833 
2834 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2835 	if (ret) {
2836 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2837 		return ret;
2838 	}
2839 
2840 	ret = hclge_set_default_loopback(hdev);
2841 	if (ret)
2842 		return ret;
2843 
2844 	ret = hclge_buffer_alloc(hdev);
2845 	if (ret)
2846 		dev_err(&hdev->pdev->dev,
2847 			"allocate buffer fail, ret=%d\n", ret);
2848 
2849 	return ret;
2850 }
2851 
2852 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2853 {
2854 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2855 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2856 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2857 				    hclge_wq, &hdev->service_task, 0);
2858 }
2859 
2860 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2861 {
2862 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2863 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2864 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2865 				    hclge_wq, &hdev->service_task, 0);
2866 }
2867 
2868 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2869 {
2870 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2871 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2872 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2873 				    hclge_wq, &hdev->service_task, 0);
2874 }
2875 
2876 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2877 {
2878 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2879 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2880 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2881 				    hclge_wq, &hdev->service_task,
2882 				    delay_time);
2883 }
2884 
2885 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2886 {
2887 	struct hclge_link_status_cmd *req;
2888 	struct hclge_desc desc;
2889 	int ret;
2890 
2891 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2892 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2893 	if (ret) {
2894 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2895 			ret);
2896 		return ret;
2897 	}
2898 
2899 	req = (struct hclge_link_status_cmd *)desc.data;
2900 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2901 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2902 
2903 	return 0;
2904 }
2905 
2906 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2907 {
2908 	struct phy_device *phydev = hdev->hw.mac.phydev;
2909 
2910 	*link_status = HCLGE_LINK_STATUS_DOWN;
2911 
2912 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2913 		return 0;
2914 
2915 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2916 		return 0;
2917 
2918 	return hclge_get_mac_link_status(hdev, link_status);
2919 }
2920 
2921 static void hclge_push_link_status(struct hclge_dev *hdev)
2922 {
2923 	struct hclge_vport *vport;
2924 	int ret;
2925 	u16 i;
2926 
2927 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2928 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2929 
2930 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2931 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2932 			continue;
2933 
2934 		ret = hclge_push_vf_link_status(vport);
2935 		if (ret) {
2936 			dev_err(&hdev->pdev->dev,
2937 				"failed to push link status to vf%u, ret = %d\n",
2938 				i, ret);
2939 		}
2940 	}
2941 }
2942 
2943 static void hclge_update_link_status(struct hclge_dev *hdev)
2944 {
2945 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2946 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2947 	struct hnae3_client *rclient = hdev->roce_client;
2948 	struct hnae3_client *client = hdev->nic_client;
2949 	int state;
2950 	int ret;
2951 
2952 	if (!client)
2953 		return;
2954 
2955 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2956 		return;
2957 
2958 	ret = hclge_get_mac_phy_link(hdev, &state);
2959 	if (ret) {
2960 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2961 		return;
2962 	}
2963 
2964 	if (state != hdev->hw.mac.link) {
2965 		hdev->hw.mac.link = state;
2966 		client->ops->link_status_change(handle, state);
2967 		hclge_config_mac_tnl_int(hdev, state);
2968 		if (rclient && rclient->ops->link_status_change)
2969 			rclient->ops->link_status_change(rhandle, state);
2970 
2971 		hclge_push_link_status(hdev);
2972 	}
2973 
2974 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2975 }
2976 
2977 static void hclge_update_port_capability(struct hclge_dev *hdev,
2978 					 struct hclge_mac *mac)
2979 {
2980 	if (hnae3_dev_fec_supported(hdev))
2981 		/* update fec ability by speed */
2982 		hclge_convert_setting_fec(mac);
2983 
2984 	/* firmware can not identify back plane type, the media type
2985 	 * read from configuration can help deal it
2986 	 */
2987 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2988 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2989 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2990 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2991 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2992 
2993 	if (mac->support_autoneg) {
2994 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2995 		linkmode_copy(mac->advertising, mac->supported);
2996 	} else {
2997 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2998 				   mac->supported);
2999 		linkmode_zero(mac->advertising);
3000 	}
3001 }
3002 
3003 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3004 {
3005 	struct hclge_sfp_info_cmd *resp;
3006 	struct hclge_desc desc;
3007 	int ret;
3008 
3009 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3010 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3011 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3012 	if (ret == -EOPNOTSUPP) {
3013 		dev_warn(&hdev->pdev->dev,
3014 			 "IMP do not support get SFP speed %d\n", ret);
3015 		return ret;
3016 	} else if (ret) {
3017 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3018 		return ret;
3019 	}
3020 
3021 	*speed = le32_to_cpu(resp->speed);
3022 
3023 	return 0;
3024 }
3025 
3026 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3027 {
3028 	struct hclge_sfp_info_cmd *resp;
3029 	struct hclge_desc desc;
3030 	int ret;
3031 
3032 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3033 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3034 
3035 	resp->query_type = QUERY_ACTIVE_SPEED;
3036 
3037 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3038 	if (ret == -EOPNOTSUPP) {
3039 		dev_warn(&hdev->pdev->dev,
3040 			 "IMP does not support get SFP info %d\n", ret);
3041 		return ret;
3042 	} else if (ret) {
3043 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3044 		return ret;
3045 	}
3046 
3047 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3048 	 * set to mac->speed.
3049 	 */
3050 	if (!le32_to_cpu(resp->speed))
3051 		return 0;
3052 
3053 	mac->speed = le32_to_cpu(resp->speed);
3054 	/* if resp->speed_ability is 0, it means it's an old version
3055 	 * firmware, do not update these params
3056 	 */
3057 	if (resp->speed_ability) {
3058 		mac->module_type = le32_to_cpu(resp->module_type);
3059 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3060 		mac->autoneg = resp->autoneg;
3061 		mac->support_autoneg = resp->autoneg_ability;
3062 		mac->speed_type = QUERY_ACTIVE_SPEED;
3063 		if (!resp->active_fec)
3064 			mac->fec_mode = 0;
3065 		else
3066 			mac->fec_mode = BIT(resp->active_fec);
3067 	} else {
3068 		mac->speed_type = QUERY_SFP_SPEED;
3069 	}
3070 
3071 	return 0;
3072 }
3073 
3074 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3075 					struct ethtool_link_ksettings *cmd)
3076 {
3077 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3078 	struct hclge_vport *vport = hclge_get_vport(handle);
3079 	struct hclge_phy_link_ksetting_0_cmd *req0;
3080 	struct hclge_phy_link_ksetting_1_cmd *req1;
3081 	u32 supported, advertising, lp_advertising;
3082 	struct hclge_dev *hdev = vport->back;
3083 	int ret;
3084 
3085 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3086 				   true);
3087 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3088 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3089 				   true);
3090 
3091 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3092 	if (ret) {
3093 		dev_err(&hdev->pdev->dev,
3094 			"failed to get phy link ksetting, ret = %d.\n", ret);
3095 		return ret;
3096 	}
3097 
3098 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3099 	cmd->base.autoneg = req0->autoneg;
3100 	cmd->base.speed = le32_to_cpu(req0->speed);
3101 	cmd->base.duplex = req0->duplex;
3102 	cmd->base.port = req0->port;
3103 	cmd->base.transceiver = req0->transceiver;
3104 	cmd->base.phy_address = req0->phy_address;
3105 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3106 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3107 	supported = le32_to_cpu(req0->supported);
3108 	advertising = le32_to_cpu(req0->advertising);
3109 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3110 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3111 						supported);
3112 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3113 						advertising);
3114 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3115 						lp_advertising);
3116 
3117 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3118 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3119 	cmd->base.master_slave_state = req1->master_slave_state;
3120 
3121 	return 0;
3122 }
3123 
3124 static int
3125 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3126 			     const struct ethtool_link_ksettings *cmd)
3127 {
3128 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3129 	struct hclge_vport *vport = hclge_get_vport(handle);
3130 	struct hclge_phy_link_ksetting_0_cmd *req0;
3131 	struct hclge_phy_link_ksetting_1_cmd *req1;
3132 	struct hclge_dev *hdev = vport->back;
3133 	u32 advertising;
3134 	int ret;
3135 
3136 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3137 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3138 	     (cmd->base.duplex != DUPLEX_HALF &&
3139 	      cmd->base.duplex != DUPLEX_FULL)))
3140 		return -EINVAL;
3141 
3142 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3143 				   false);
3144 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3145 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3146 				   false);
3147 
3148 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3149 	req0->autoneg = cmd->base.autoneg;
3150 	req0->speed = cpu_to_le32(cmd->base.speed);
3151 	req0->duplex = cmd->base.duplex;
3152 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3153 						cmd->link_modes.advertising);
3154 	req0->advertising = cpu_to_le32(advertising);
3155 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3156 
3157 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3158 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3159 
3160 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3161 	if (ret) {
3162 		dev_err(&hdev->pdev->dev,
3163 			"failed to set phy link ksettings, ret = %d.\n", ret);
3164 		return ret;
3165 	}
3166 
3167 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3168 	hdev->hw.mac.speed = cmd->base.speed;
3169 	hdev->hw.mac.duplex = cmd->base.duplex;
3170 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3171 
3172 	return 0;
3173 }
3174 
3175 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3176 {
3177 	struct ethtool_link_ksettings cmd;
3178 	int ret;
3179 
3180 	if (!hnae3_dev_phy_imp_supported(hdev))
3181 		return 0;
3182 
3183 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3184 	if (ret)
3185 		return ret;
3186 
3187 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3188 	hdev->hw.mac.speed = cmd.base.speed;
3189 	hdev->hw.mac.duplex = cmd.base.duplex;
3190 
3191 	return 0;
3192 }
3193 
3194 static int hclge_tp_port_init(struct hclge_dev *hdev)
3195 {
3196 	struct ethtool_link_ksettings cmd;
3197 
3198 	if (!hnae3_dev_phy_imp_supported(hdev))
3199 		return 0;
3200 
3201 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3202 	cmd.base.speed = hdev->hw.mac.speed;
3203 	cmd.base.duplex = hdev->hw.mac.duplex;
3204 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3205 
3206 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3207 }
3208 
3209 static int hclge_update_port_info(struct hclge_dev *hdev)
3210 {
3211 	struct hclge_mac *mac = &hdev->hw.mac;
3212 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3213 	int ret;
3214 
3215 	/* get the port info from SFP cmd if not copper port */
3216 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3217 		return hclge_update_tp_port_info(hdev);
3218 
3219 	/* if IMP does not support get SFP/qSFP info, return directly */
3220 	if (!hdev->support_sfp_query)
3221 		return 0;
3222 
3223 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3224 		ret = hclge_get_sfp_info(hdev, mac);
3225 	else
3226 		ret = hclge_get_sfp_speed(hdev, &speed);
3227 
3228 	if (ret == -EOPNOTSUPP) {
3229 		hdev->support_sfp_query = false;
3230 		return ret;
3231 	} else if (ret) {
3232 		return ret;
3233 	}
3234 
3235 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3236 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3237 			hclge_update_port_capability(hdev, mac);
3238 			return 0;
3239 		}
3240 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3241 					       HCLGE_MAC_FULL);
3242 	} else {
3243 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3244 			return 0; /* do nothing if no SFP */
3245 
3246 		/* must config full duplex for SFP */
3247 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3248 	}
3249 }
3250 
3251 static int hclge_get_status(struct hnae3_handle *handle)
3252 {
3253 	struct hclge_vport *vport = hclge_get_vport(handle);
3254 	struct hclge_dev *hdev = vport->back;
3255 
3256 	hclge_update_link_status(hdev);
3257 
3258 	return hdev->hw.mac.link;
3259 }
3260 
3261 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3262 {
3263 	if (!pci_num_vf(hdev->pdev)) {
3264 		dev_err(&hdev->pdev->dev,
3265 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3266 		return NULL;
3267 	}
3268 
3269 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3270 		dev_err(&hdev->pdev->dev,
3271 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3272 			vf, pci_num_vf(hdev->pdev));
3273 		return NULL;
3274 	}
3275 
3276 	/* VF start from 1 in vport */
3277 	vf += HCLGE_VF_VPORT_START_NUM;
3278 	return &hdev->vport[vf];
3279 }
3280 
3281 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3282 			       struct ifla_vf_info *ivf)
3283 {
3284 	struct hclge_vport *vport = hclge_get_vport(handle);
3285 	struct hclge_dev *hdev = vport->back;
3286 
3287 	vport = hclge_get_vf_vport(hdev, vf);
3288 	if (!vport)
3289 		return -EINVAL;
3290 
3291 	ivf->vf = vf;
3292 	ivf->linkstate = vport->vf_info.link_state;
3293 	ivf->spoofchk = vport->vf_info.spoofchk;
3294 	ivf->trusted = vport->vf_info.trusted;
3295 	ivf->min_tx_rate = 0;
3296 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3297 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3298 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3299 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3300 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3301 
3302 	return 0;
3303 }
3304 
3305 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3306 				   int link_state)
3307 {
3308 	struct hclge_vport *vport = hclge_get_vport(handle);
3309 	struct hclge_dev *hdev = vport->back;
3310 	int link_state_old;
3311 	int ret;
3312 
3313 	vport = hclge_get_vf_vport(hdev, vf);
3314 	if (!vport)
3315 		return -EINVAL;
3316 
3317 	link_state_old = vport->vf_info.link_state;
3318 	vport->vf_info.link_state = link_state;
3319 
3320 	ret = hclge_push_vf_link_status(vport);
3321 	if (ret) {
3322 		vport->vf_info.link_state = link_state_old;
3323 		dev_err(&hdev->pdev->dev,
3324 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3325 	}
3326 
3327 	return ret;
3328 }
3329 
3330 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3331 {
3332 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3333 
3334 	/* fetch the events from their corresponding regs */
3335 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3336 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3337 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3338 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3339 
3340 	/* Assumption: If by any chance reset and mailbox events are reported
3341 	 * together then we will only process reset event in this go and will
3342 	 * defer the processing of the mailbox events. Since, we would have not
3343 	 * cleared RX CMDQ event this time we would receive again another
3344 	 * interrupt from H/W just for the mailbox.
3345 	 *
3346 	 * check for vector0 reset event sources
3347 	 */
3348 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3349 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3350 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3351 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3352 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3353 		hdev->rst_stats.imp_rst_cnt++;
3354 		return HCLGE_VECTOR0_EVENT_RST;
3355 	}
3356 
3357 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3358 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3359 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3360 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3361 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3362 		hdev->rst_stats.global_rst_cnt++;
3363 		return HCLGE_VECTOR0_EVENT_RST;
3364 	}
3365 
3366 	/* check for vector0 msix event and hardware error event source */
3367 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3368 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3369 		return HCLGE_VECTOR0_EVENT_ERR;
3370 
3371 	/* check for vector0 ptp event source */
3372 	if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3373 		*clearval = msix_src_reg;
3374 		return HCLGE_VECTOR0_EVENT_PTP;
3375 	}
3376 
3377 	/* check for vector0 mailbox(=CMDQ RX) event source */
3378 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3379 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3380 		*clearval = cmdq_src_reg;
3381 		return HCLGE_VECTOR0_EVENT_MBX;
3382 	}
3383 
3384 	/* print other vector0 event source */
3385 	dev_info(&hdev->pdev->dev,
3386 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3387 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3388 
3389 	return HCLGE_VECTOR0_EVENT_OTHER;
3390 }
3391 
3392 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3393 				    u32 regclr)
3394 {
3395 	switch (event_type) {
3396 	case HCLGE_VECTOR0_EVENT_PTP:
3397 	case HCLGE_VECTOR0_EVENT_RST:
3398 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3399 		break;
3400 	case HCLGE_VECTOR0_EVENT_MBX:
3401 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3402 		break;
3403 	default:
3404 		break;
3405 	}
3406 }
3407 
3408 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3409 {
3410 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3411 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3412 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3413 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3414 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3415 }
3416 
3417 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3418 {
3419 	writel(enable ? 1 : 0, vector->addr);
3420 }
3421 
3422 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3423 {
3424 	struct hclge_dev *hdev = data;
3425 	unsigned long flags;
3426 	u32 clearval = 0;
3427 	u32 event_cause;
3428 
3429 	hclge_enable_vector(&hdev->misc_vector, false);
3430 	event_cause = hclge_check_event_cause(hdev, &clearval);
3431 
3432 	/* vector 0 interrupt is shared with reset and mailbox source events. */
3433 	switch (event_cause) {
3434 	case HCLGE_VECTOR0_EVENT_ERR:
3435 		hclge_errhand_task_schedule(hdev);
3436 		break;
3437 	case HCLGE_VECTOR0_EVENT_RST:
3438 		hclge_reset_task_schedule(hdev);
3439 		break;
3440 	case HCLGE_VECTOR0_EVENT_PTP:
3441 		spin_lock_irqsave(&hdev->ptp->lock, flags);
3442 		hclge_ptp_clean_tx_hwts(hdev);
3443 		spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3444 		break;
3445 	case HCLGE_VECTOR0_EVENT_MBX:
3446 		/* If we are here then,
3447 		 * 1. Either we are not handling any mbx task and we are not
3448 		 *    scheduled as well
3449 		 *                        OR
3450 		 * 2. We could be handling a mbx task but nothing more is
3451 		 *    scheduled.
3452 		 * In both cases, we should schedule mbx task as there are more
3453 		 * mbx messages reported by this interrupt.
3454 		 */
3455 		hclge_mbx_task_schedule(hdev);
3456 		break;
3457 	default:
3458 		dev_warn(&hdev->pdev->dev,
3459 			 "received unknown or unhandled event of vector0\n");
3460 		break;
3461 	}
3462 
3463 	hclge_clear_event_cause(hdev, event_cause, clearval);
3464 
3465 	/* Enable interrupt if it is not caused by reset event or error event */
3466 	if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3467 	    event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3468 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3469 		hclge_enable_vector(&hdev->misc_vector, true);
3470 
3471 	return IRQ_HANDLED;
3472 }
3473 
3474 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3475 {
3476 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3477 		dev_warn(&hdev->pdev->dev,
3478 			 "vector(vector_id %d) has been freed.\n", vector_id);
3479 		return;
3480 	}
3481 
3482 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3483 	hdev->num_msi_left += 1;
3484 	hdev->num_msi_used -= 1;
3485 }
3486 
3487 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3488 {
3489 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3490 
3491 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3492 
3493 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3494 	hdev->vector_status[0] = 0;
3495 
3496 	hdev->num_msi_left -= 1;
3497 	hdev->num_msi_used += 1;
3498 }
3499 
3500 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3501 				      const cpumask_t *mask)
3502 {
3503 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3504 					      affinity_notify);
3505 
3506 	cpumask_copy(&hdev->affinity_mask, mask);
3507 }
3508 
3509 static void hclge_irq_affinity_release(struct kref *ref)
3510 {
3511 }
3512 
3513 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3514 {
3515 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3516 			      &hdev->affinity_mask);
3517 
3518 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3519 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3520 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3521 				  &hdev->affinity_notify);
3522 }
3523 
3524 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3525 {
3526 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3527 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3528 }
3529 
3530 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3531 {
3532 	int ret;
3533 
3534 	hclge_get_misc_vector(hdev);
3535 
3536 	/* this would be explicitly freed in the end */
3537 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3538 		 HCLGE_NAME, pci_name(hdev->pdev));
3539 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3540 			  0, hdev->misc_vector.name, hdev);
3541 	if (ret) {
3542 		hclge_free_vector(hdev, 0);
3543 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3544 			hdev->misc_vector.vector_irq);
3545 	}
3546 
3547 	return ret;
3548 }
3549 
3550 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3551 {
3552 	free_irq(hdev->misc_vector.vector_irq, hdev);
3553 	hclge_free_vector(hdev, 0);
3554 }
3555 
3556 int hclge_notify_client(struct hclge_dev *hdev,
3557 			enum hnae3_reset_notify_type type)
3558 {
3559 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3560 	struct hnae3_client *client = hdev->nic_client;
3561 	int ret;
3562 
3563 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3564 		return 0;
3565 
3566 	if (!client->ops->reset_notify)
3567 		return -EOPNOTSUPP;
3568 
3569 	ret = client->ops->reset_notify(handle, type);
3570 	if (ret)
3571 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3572 			type, ret);
3573 
3574 	return ret;
3575 }
3576 
3577 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3578 				    enum hnae3_reset_notify_type type)
3579 {
3580 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3581 	struct hnae3_client *client = hdev->roce_client;
3582 	int ret;
3583 
3584 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3585 		return 0;
3586 
3587 	if (!client->ops->reset_notify)
3588 		return -EOPNOTSUPP;
3589 
3590 	ret = client->ops->reset_notify(handle, type);
3591 	if (ret)
3592 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3593 			type, ret);
3594 
3595 	return ret;
3596 }
3597 
3598 static int hclge_reset_wait(struct hclge_dev *hdev)
3599 {
3600 #define HCLGE_RESET_WATI_MS	100
3601 #define HCLGE_RESET_WAIT_CNT	350
3602 
3603 	u32 val, reg, reg_bit;
3604 	u32 cnt = 0;
3605 
3606 	switch (hdev->reset_type) {
3607 	case HNAE3_IMP_RESET:
3608 		reg = HCLGE_GLOBAL_RESET_REG;
3609 		reg_bit = HCLGE_IMP_RESET_BIT;
3610 		break;
3611 	case HNAE3_GLOBAL_RESET:
3612 		reg = HCLGE_GLOBAL_RESET_REG;
3613 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3614 		break;
3615 	case HNAE3_FUNC_RESET:
3616 		reg = HCLGE_FUN_RST_ING;
3617 		reg_bit = HCLGE_FUN_RST_ING_B;
3618 		break;
3619 	default:
3620 		dev_err(&hdev->pdev->dev,
3621 			"Wait for unsupported reset type: %d\n",
3622 			hdev->reset_type);
3623 		return -EINVAL;
3624 	}
3625 
3626 	val = hclge_read_dev(&hdev->hw, reg);
3627 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3628 		msleep(HCLGE_RESET_WATI_MS);
3629 		val = hclge_read_dev(&hdev->hw, reg);
3630 		cnt++;
3631 	}
3632 
3633 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3634 		dev_warn(&hdev->pdev->dev,
3635 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3636 		return -EBUSY;
3637 	}
3638 
3639 	return 0;
3640 }
3641 
3642 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3643 {
3644 	struct hclge_vf_rst_cmd *req;
3645 	struct hclge_desc desc;
3646 
3647 	req = (struct hclge_vf_rst_cmd *)desc.data;
3648 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3649 	req->dest_vfid = func_id;
3650 
3651 	if (reset)
3652 		req->vf_rst = 0x1;
3653 
3654 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3655 }
3656 
3657 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3658 {
3659 	int i;
3660 
3661 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3662 		struct hclge_vport *vport = &hdev->vport[i];
3663 		int ret;
3664 
3665 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3666 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3667 		if (ret) {
3668 			dev_err(&hdev->pdev->dev,
3669 				"set vf(%u) rst failed %d!\n",
3670 				vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3671 				ret);
3672 			return ret;
3673 		}
3674 
3675 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3676 			continue;
3677 
3678 		/* Inform VF to process the reset.
3679 		 * hclge_inform_reset_assert_to_vf may fail if VF
3680 		 * driver is not loaded.
3681 		 */
3682 		ret = hclge_inform_reset_assert_to_vf(vport);
3683 		if (ret)
3684 			dev_warn(&hdev->pdev->dev,
3685 				 "inform reset to vf(%u) failed %d!\n",
3686 				 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3687 				 ret);
3688 	}
3689 
3690 	return 0;
3691 }
3692 
3693 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3694 {
3695 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3696 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3697 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3698 		return;
3699 
3700 	hclge_mbx_handler(hdev);
3701 
3702 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3703 }
3704 
3705 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3706 {
3707 	struct hclge_pf_rst_sync_cmd *req;
3708 	struct hclge_desc desc;
3709 	int cnt = 0;
3710 	int ret;
3711 
3712 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3713 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3714 
3715 	do {
3716 		/* vf need to down netdev by mbx during PF or FLR reset */
3717 		hclge_mailbox_service_task(hdev);
3718 
3719 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3720 		/* for compatible with old firmware, wait
3721 		 * 100 ms for VF to stop IO
3722 		 */
3723 		if (ret == -EOPNOTSUPP) {
3724 			msleep(HCLGE_RESET_SYNC_TIME);
3725 			return;
3726 		} else if (ret) {
3727 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3728 				 ret);
3729 			return;
3730 		} else if (req->all_vf_ready) {
3731 			return;
3732 		}
3733 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3734 		hclge_cmd_reuse_desc(&desc, true);
3735 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3736 
3737 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3738 }
3739 
3740 void hclge_report_hw_error(struct hclge_dev *hdev,
3741 			   enum hnae3_hw_error_type type)
3742 {
3743 	struct hnae3_client *client = hdev->nic_client;
3744 
3745 	if (!client || !client->ops->process_hw_error ||
3746 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3747 		return;
3748 
3749 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3750 }
3751 
3752 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3753 {
3754 	u32 reg_val;
3755 
3756 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3757 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3758 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3759 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3760 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3761 	}
3762 
3763 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3764 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3765 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3766 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3767 	}
3768 }
3769 
3770 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3771 {
3772 	struct hclge_desc desc;
3773 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3774 	int ret;
3775 
3776 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3777 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3778 	req->fun_reset_vfid = func_id;
3779 
3780 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3781 	if (ret)
3782 		dev_err(&hdev->pdev->dev,
3783 			"send function reset cmd fail, status =%d\n", ret);
3784 
3785 	return ret;
3786 }
3787 
3788 static void hclge_do_reset(struct hclge_dev *hdev)
3789 {
3790 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3791 	struct pci_dev *pdev = hdev->pdev;
3792 	u32 val;
3793 
3794 	if (hclge_get_hw_reset_stat(handle)) {
3795 		dev_info(&pdev->dev, "hardware reset not finish\n");
3796 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3797 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3798 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3799 		return;
3800 	}
3801 
3802 	switch (hdev->reset_type) {
3803 	case HNAE3_IMP_RESET:
3804 		dev_info(&pdev->dev, "IMP reset requested\n");
3805 		val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3806 		hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3807 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3808 		break;
3809 	case HNAE3_GLOBAL_RESET:
3810 		dev_info(&pdev->dev, "global reset requested\n");
3811 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3812 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3813 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3814 		break;
3815 	case HNAE3_FUNC_RESET:
3816 		dev_info(&pdev->dev, "PF reset requested\n");
3817 		/* schedule again to check later */
3818 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3819 		hclge_reset_task_schedule(hdev);
3820 		break;
3821 	default:
3822 		dev_warn(&pdev->dev,
3823 			 "unsupported reset type: %d\n", hdev->reset_type);
3824 		break;
3825 	}
3826 }
3827 
3828 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3829 						   unsigned long *addr)
3830 {
3831 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3832 	struct hclge_dev *hdev = ae_dev->priv;
3833 
3834 	/* return the highest priority reset level amongst all */
3835 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3836 		rst_level = HNAE3_IMP_RESET;
3837 		clear_bit(HNAE3_IMP_RESET, addr);
3838 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3839 		clear_bit(HNAE3_FUNC_RESET, addr);
3840 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3841 		rst_level = HNAE3_GLOBAL_RESET;
3842 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3843 		clear_bit(HNAE3_FUNC_RESET, addr);
3844 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3845 		rst_level = HNAE3_FUNC_RESET;
3846 		clear_bit(HNAE3_FUNC_RESET, addr);
3847 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3848 		rst_level = HNAE3_FLR_RESET;
3849 		clear_bit(HNAE3_FLR_RESET, addr);
3850 	}
3851 
3852 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3853 	    rst_level < hdev->reset_type)
3854 		return HNAE3_NONE_RESET;
3855 
3856 	return rst_level;
3857 }
3858 
3859 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3860 {
3861 	u32 clearval = 0;
3862 
3863 	switch (hdev->reset_type) {
3864 	case HNAE3_IMP_RESET:
3865 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3866 		break;
3867 	case HNAE3_GLOBAL_RESET:
3868 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3869 		break;
3870 	default:
3871 		break;
3872 	}
3873 
3874 	if (!clearval)
3875 		return;
3876 
3877 	/* For revision 0x20, the reset interrupt source
3878 	 * can only be cleared after hardware reset done
3879 	 */
3880 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3881 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3882 				clearval);
3883 
3884 	hclge_enable_vector(&hdev->misc_vector, true);
3885 }
3886 
3887 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3888 {
3889 	u32 reg_val;
3890 
3891 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3892 	if (enable)
3893 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3894 	else
3895 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3896 
3897 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3898 }
3899 
3900 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3901 {
3902 	int ret;
3903 
3904 	ret = hclge_set_all_vf_rst(hdev, true);
3905 	if (ret)
3906 		return ret;
3907 
3908 	hclge_func_reset_sync_vf(hdev);
3909 
3910 	return 0;
3911 }
3912 
3913 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3914 {
3915 	u32 reg_val;
3916 	int ret = 0;
3917 
3918 	switch (hdev->reset_type) {
3919 	case HNAE3_FUNC_RESET:
3920 		ret = hclge_func_reset_notify_vf(hdev);
3921 		if (ret)
3922 			return ret;
3923 
3924 		ret = hclge_func_reset_cmd(hdev, 0);
3925 		if (ret) {
3926 			dev_err(&hdev->pdev->dev,
3927 				"asserting function reset fail %d!\n", ret);
3928 			return ret;
3929 		}
3930 
3931 		/* After performaning pf reset, it is not necessary to do the
3932 		 * mailbox handling or send any command to firmware, because
3933 		 * any mailbox handling or command to firmware is only valid
3934 		 * after hclge_cmd_init is called.
3935 		 */
3936 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3937 		hdev->rst_stats.pf_rst_cnt++;
3938 		break;
3939 	case HNAE3_FLR_RESET:
3940 		ret = hclge_func_reset_notify_vf(hdev);
3941 		if (ret)
3942 			return ret;
3943 		break;
3944 	case HNAE3_IMP_RESET:
3945 		hclge_handle_imp_error(hdev);
3946 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3947 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3948 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3949 		break;
3950 	default:
3951 		break;
3952 	}
3953 
3954 	/* inform hardware that preparatory work is done */
3955 	msleep(HCLGE_RESET_SYNC_TIME);
3956 	hclge_reset_handshake(hdev, true);
3957 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3958 
3959 	return ret;
3960 }
3961 
3962 static void hclge_show_rst_info(struct hclge_dev *hdev)
3963 {
3964 	char *buf;
3965 
3966 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3967 	if (!buf)
3968 		return;
3969 
3970 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3971 
3972 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3973 
3974 	kfree(buf);
3975 }
3976 
3977 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3978 {
3979 #define MAX_RESET_FAIL_CNT 5
3980 
3981 	if (hdev->reset_pending) {
3982 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3983 			 hdev->reset_pending);
3984 		return true;
3985 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3986 		   HCLGE_RESET_INT_M) {
3987 		dev_info(&hdev->pdev->dev,
3988 			 "reset failed because new reset interrupt\n");
3989 		hclge_clear_reset_cause(hdev);
3990 		return false;
3991 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3992 		hdev->rst_stats.reset_fail_cnt++;
3993 		set_bit(hdev->reset_type, &hdev->reset_pending);
3994 		dev_info(&hdev->pdev->dev,
3995 			 "re-schedule reset task(%u)\n",
3996 			 hdev->rst_stats.reset_fail_cnt);
3997 		return true;
3998 	}
3999 
4000 	hclge_clear_reset_cause(hdev);
4001 
4002 	/* recover the handshake status when reset fail */
4003 	hclge_reset_handshake(hdev, true);
4004 
4005 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
4006 
4007 	hclge_show_rst_info(hdev);
4008 
4009 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4010 
4011 	return false;
4012 }
4013 
4014 static void hclge_update_reset_level(struct hclge_dev *hdev)
4015 {
4016 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4017 	enum hnae3_reset_type reset_level;
4018 
4019 	/* reset request will not be set during reset, so clear
4020 	 * pending reset request to avoid unnecessary reset
4021 	 * caused by the same reason.
4022 	 */
4023 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4024 
4025 	/* if default_reset_request has a higher level reset request,
4026 	 * it should be handled as soon as possible. since some errors
4027 	 * need this kind of reset to fix.
4028 	 */
4029 	reset_level = hclge_get_reset_level(ae_dev,
4030 					    &hdev->default_reset_request);
4031 	if (reset_level != HNAE3_NONE_RESET)
4032 		set_bit(reset_level, &hdev->reset_request);
4033 }
4034 
4035 static int hclge_set_rst_done(struct hclge_dev *hdev)
4036 {
4037 	struct hclge_pf_rst_done_cmd *req;
4038 	struct hclge_desc desc;
4039 	int ret;
4040 
4041 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4042 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4043 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4044 
4045 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4046 	/* To be compatible with the old firmware, which does not support
4047 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4048 	 * return success
4049 	 */
4050 	if (ret == -EOPNOTSUPP) {
4051 		dev_warn(&hdev->pdev->dev,
4052 			 "current firmware does not support command(0x%x)!\n",
4053 			 HCLGE_OPC_PF_RST_DONE);
4054 		return 0;
4055 	} else if (ret) {
4056 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4057 			ret);
4058 	}
4059 
4060 	return ret;
4061 }
4062 
4063 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4064 {
4065 	int ret = 0;
4066 
4067 	switch (hdev->reset_type) {
4068 	case HNAE3_FUNC_RESET:
4069 	case HNAE3_FLR_RESET:
4070 		ret = hclge_set_all_vf_rst(hdev, false);
4071 		break;
4072 	case HNAE3_GLOBAL_RESET:
4073 	case HNAE3_IMP_RESET:
4074 		ret = hclge_set_rst_done(hdev);
4075 		break;
4076 	default:
4077 		break;
4078 	}
4079 
4080 	/* clear up the handshake status after re-initialize done */
4081 	hclge_reset_handshake(hdev, false);
4082 
4083 	return ret;
4084 }
4085 
4086 static int hclge_reset_stack(struct hclge_dev *hdev)
4087 {
4088 	int ret;
4089 
4090 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4091 	if (ret)
4092 		return ret;
4093 
4094 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4095 	if (ret)
4096 		return ret;
4097 
4098 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4099 }
4100 
4101 static int hclge_reset_prepare(struct hclge_dev *hdev)
4102 {
4103 	int ret;
4104 
4105 	hdev->rst_stats.reset_cnt++;
4106 	/* perform reset of the stack & ae device for a client */
4107 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4108 	if (ret)
4109 		return ret;
4110 
4111 	rtnl_lock();
4112 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4113 	rtnl_unlock();
4114 	if (ret)
4115 		return ret;
4116 
4117 	return hclge_reset_prepare_wait(hdev);
4118 }
4119 
4120 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4121 {
4122 	int ret;
4123 
4124 	hdev->rst_stats.hw_reset_done_cnt++;
4125 
4126 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4127 	if (ret)
4128 		return ret;
4129 
4130 	rtnl_lock();
4131 	ret = hclge_reset_stack(hdev);
4132 	rtnl_unlock();
4133 	if (ret)
4134 		return ret;
4135 
4136 	hclge_clear_reset_cause(hdev);
4137 
4138 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4139 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4140 	 * times
4141 	 */
4142 	if (ret &&
4143 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4144 		return ret;
4145 
4146 	ret = hclge_reset_prepare_up(hdev);
4147 	if (ret)
4148 		return ret;
4149 
4150 	rtnl_lock();
4151 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4152 	rtnl_unlock();
4153 	if (ret)
4154 		return ret;
4155 
4156 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4157 	if (ret)
4158 		return ret;
4159 
4160 	hdev->last_reset_time = jiffies;
4161 	hdev->rst_stats.reset_fail_cnt = 0;
4162 	hdev->rst_stats.reset_done_cnt++;
4163 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4164 
4165 	hclge_update_reset_level(hdev);
4166 
4167 	return 0;
4168 }
4169 
4170 static void hclge_reset(struct hclge_dev *hdev)
4171 {
4172 	if (hclge_reset_prepare(hdev))
4173 		goto err_reset;
4174 
4175 	if (hclge_reset_wait(hdev))
4176 		goto err_reset;
4177 
4178 	if (hclge_reset_rebuild(hdev))
4179 		goto err_reset;
4180 
4181 	return;
4182 
4183 err_reset:
4184 	if (hclge_reset_err_handle(hdev))
4185 		hclge_reset_task_schedule(hdev);
4186 }
4187 
4188 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4189 {
4190 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4191 	struct hclge_dev *hdev = ae_dev->priv;
4192 
4193 	/* We might end up getting called broadly because of 2 below cases:
4194 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4195 	 *    normalcy is to reset.
4196 	 * 2. A new reset request from the stack due to timeout
4197 	 *
4198 	 * check if this is a new reset request and we are not here just because
4199 	 * last reset attempt did not succeed and watchdog hit us again. We will
4200 	 * know this if last reset request did not occur very recently (watchdog
4201 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4202 	 * In case of new request we reset the "reset level" to PF reset.
4203 	 * And if it is a repeat reset request of the most recent one then we
4204 	 * want to make sure we throttle the reset request. Therefore, we will
4205 	 * not allow it again before 3*HZ times.
4206 	 */
4207 
4208 	if (time_before(jiffies, (hdev->last_reset_time +
4209 				  HCLGE_RESET_INTERVAL))) {
4210 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4211 		return;
4212 	}
4213 
4214 	if (hdev->default_reset_request) {
4215 		hdev->reset_level =
4216 			hclge_get_reset_level(ae_dev,
4217 					      &hdev->default_reset_request);
4218 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4219 		hdev->reset_level = HNAE3_FUNC_RESET;
4220 	}
4221 
4222 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4223 		 hdev->reset_level);
4224 
4225 	/* request reset & schedule reset task */
4226 	set_bit(hdev->reset_level, &hdev->reset_request);
4227 	hclge_reset_task_schedule(hdev);
4228 
4229 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4230 		hdev->reset_level++;
4231 }
4232 
4233 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4234 					enum hnae3_reset_type rst_type)
4235 {
4236 	struct hclge_dev *hdev = ae_dev->priv;
4237 
4238 	set_bit(rst_type, &hdev->default_reset_request);
4239 }
4240 
4241 static void hclge_reset_timer(struct timer_list *t)
4242 {
4243 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4244 
4245 	/* if default_reset_request has no value, it means that this reset
4246 	 * request has already be handled, so just return here
4247 	 */
4248 	if (!hdev->default_reset_request)
4249 		return;
4250 
4251 	dev_info(&hdev->pdev->dev,
4252 		 "triggering reset in reset timer\n");
4253 	hclge_reset_event(hdev->pdev, NULL);
4254 }
4255 
4256 static void hclge_reset_subtask(struct hclge_dev *hdev)
4257 {
4258 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4259 
4260 	/* check if there is any ongoing reset in the hardware. This status can
4261 	 * be checked from reset_pending. If there is then, we need to wait for
4262 	 * hardware to complete reset.
4263 	 *    a. If we are able to figure out in reasonable time that hardware
4264 	 *       has fully resetted then, we can proceed with driver, client
4265 	 *       reset.
4266 	 *    b. else, we can come back later to check this status so re-sched
4267 	 *       now.
4268 	 */
4269 	hdev->last_reset_time = jiffies;
4270 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4271 	if (hdev->reset_type != HNAE3_NONE_RESET)
4272 		hclge_reset(hdev);
4273 
4274 	/* check if we got any *new* reset requests to be honored */
4275 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4276 	if (hdev->reset_type != HNAE3_NONE_RESET)
4277 		hclge_do_reset(hdev);
4278 
4279 	hdev->reset_type = HNAE3_NONE_RESET;
4280 }
4281 
4282 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4283 {
4284 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4285 	enum hnae3_reset_type reset_type;
4286 
4287 	if (ae_dev->hw_err_reset_req) {
4288 		reset_type = hclge_get_reset_level(ae_dev,
4289 						   &ae_dev->hw_err_reset_req);
4290 		hclge_set_def_reset_request(ae_dev, reset_type);
4291 	}
4292 
4293 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4294 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4295 
4296 	/* enable interrupt after error handling complete */
4297 	hclge_enable_vector(&hdev->misc_vector, true);
4298 }
4299 
4300 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4301 {
4302 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4303 
4304 	ae_dev->hw_err_reset_req = 0;
4305 
4306 	if (hclge_find_error_source(hdev)) {
4307 		hclge_handle_error_info_log(ae_dev);
4308 		hclge_handle_mac_tnl(hdev);
4309 	}
4310 
4311 	hclge_handle_err_reset_request(hdev);
4312 }
4313 
4314 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4315 {
4316 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4317 	struct device *dev = &hdev->pdev->dev;
4318 	u32 msix_sts_reg;
4319 
4320 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4321 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4322 		if (hclge_handle_hw_msix_error
4323 				(hdev, &hdev->default_reset_request))
4324 			dev_info(dev, "received msix interrupt 0x%x\n",
4325 				 msix_sts_reg);
4326 	}
4327 
4328 	hclge_handle_hw_ras_error(ae_dev);
4329 
4330 	hclge_handle_err_reset_request(hdev);
4331 }
4332 
4333 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4334 {
4335 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4336 		return;
4337 
4338 	if (hnae3_dev_ras_imp_supported(hdev))
4339 		hclge_handle_err_recovery(hdev);
4340 	else
4341 		hclge_misc_err_recovery(hdev);
4342 }
4343 
4344 static void hclge_reset_service_task(struct hclge_dev *hdev)
4345 {
4346 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4347 		return;
4348 
4349 	down(&hdev->reset_sem);
4350 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4351 
4352 	hclge_reset_subtask(hdev);
4353 
4354 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4355 	up(&hdev->reset_sem);
4356 }
4357 
4358 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4359 {
4360 	int i;
4361 
4362 	/* start from vport 1 for PF is always alive */
4363 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4364 		struct hclge_vport *vport = &hdev->vport[i];
4365 
4366 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4367 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4368 
4369 		/* If vf is not alive, set to default value */
4370 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4371 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4372 	}
4373 }
4374 
4375 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4376 {
4377 	unsigned long delta = round_jiffies_relative(HZ);
4378 
4379 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4380 		return;
4381 
4382 	/* Always handle the link updating to make sure link state is
4383 	 * updated when it is triggered by mbx.
4384 	 */
4385 	hclge_update_link_status(hdev);
4386 	hclge_sync_mac_table(hdev);
4387 	hclge_sync_promisc_mode(hdev);
4388 	hclge_sync_fd_table(hdev);
4389 
4390 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4391 		delta = jiffies - hdev->last_serv_processed;
4392 
4393 		if (delta < round_jiffies_relative(HZ)) {
4394 			delta = round_jiffies_relative(HZ) - delta;
4395 			goto out;
4396 		}
4397 	}
4398 
4399 	hdev->serv_processed_cnt++;
4400 	hclge_update_vport_alive(hdev);
4401 
4402 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4403 		hdev->last_serv_processed = jiffies;
4404 		goto out;
4405 	}
4406 
4407 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4408 		hclge_update_stats_for_all(hdev);
4409 
4410 	hclge_update_port_info(hdev);
4411 	hclge_sync_vlan_filter(hdev);
4412 
4413 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4414 		hclge_rfs_filter_expire(hdev);
4415 
4416 	hdev->last_serv_processed = jiffies;
4417 
4418 out:
4419 	hclge_task_schedule(hdev, delta);
4420 }
4421 
4422 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4423 {
4424 	unsigned long flags;
4425 
4426 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4427 	    !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4428 	    !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4429 		return;
4430 
4431 	/* to prevent concurrence with the irq handler */
4432 	spin_lock_irqsave(&hdev->ptp->lock, flags);
4433 
4434 	/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4435 	 * handler may handle it just before spin_lock_irqsave().
4436 	 */
4437 	if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4438 		hclge_ptp_clean_tx_hwts(hdev);
4439 
4440 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4441 }
4442 
4443 static void hclge_service_task(struct work_struct *work)
4444 {
4445 	struct hclge_dev *hdev =
4446 		container_of(work, struct hclge_dev, service_task.work);
4447 
4448 	hclge_errhand_service_task(hdev);
4449 	hclge_reset_service_task(hdev);
4450 	hclge_ptp_service_task(hdev);
4451 	hclge_mailbox_service_task(hdev);
4452 	hclge_periodic_service_task(hdev);
4453 
4454 	/* Handle error recovery, reset and mbx again in case periodical task
4455 	 * delays the handling by calling hclge_task_schedule() in
4456 	 * hclge_periodic_service_task().
4457 	 */
4458 	hclge_errhand_service_task(hdev);
4459 	hclge_reset_service_task(hdev);
4460 	hclge_mailbox_service_task(hdev);
4461 }
4462 
4463 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4464 {
4465 	/* VF handle has no client */
4466 	if (!handle->client)
4467 		return container_of(handle, struct hclge_vport, nic);
4468 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4469 		return container_of(handle, struct hclge_vport, roce);
4470 	else
4471 		return container_of(handle, struct hclge_vport, nic);
4472 }
4473 
4474 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4475 				  struct hnae3_vector_info *vector_info)
4476 {
4477 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4478 
4479 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4480 
4481 	/* need an extend offset to config vector >= 64 */
4482 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4483 		vector_info->io_addr = hdev->hw.io_base +
4484 				HCLGE_VECTOR_REG_BASE +
4485 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4486 	else
4487 		vector_info->io_addr = hdev->hw.io_base +
4488 				HCLGE_VECTOR_EXT_REG_BASE +
4489 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4490 				HCLGE_VECTOR_REG_OFFSET_H +
4491 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4492 				HCLGE_VECTOR_REG_OFFSET;
4493 
4494 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4495 	hdev->vector_irq[idx] = vector_info->vector;
4496 }
4497 
4498 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4499 			    struct hnae3_vector_info *vector_info)
4500 {
4501 	struct hclge_vport *vport = hclge_get_vport(handle);
4502 	struct hnae3_vector_info *vector = vector_info;
4503 	struct hclge_dev *hdev = vport->back;
4504 	int alloc = 0;
4505 	u16 i = 0;
4506 	u16 j;
4507 
4508 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4509 	vector_num = min(hdev->num_msi_left, vector_num);
4510 
4511 	for (j = 0; j < vector_num; j++) {
4512 		while (++i < hdev->num_nic_msi) {
4513 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4514 				hclge_get_vector_info(hdev, i, vector);
4515 				vector++;
4516 				alloc++;
4517 
4518 				break;
4519 			}
4520 		}
4521 	}
4522 	hdev->num_msi_left -= alloc;
4523 	hdev->num_msi_used += alloc;
4524 
4525 	return alloc;
4526 }
4527 
4528 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4529 {
4530 	int i;
4531 
4532 	for (i = 0; i < hdev->num_msi; i++)
4533 		if (vector == hdev->vector_irq[i])
4534 			return i;
4535 
4536 	return -EINVAL;
4537 }
4538 
4539 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4540 {
4541 	struct hclge_vport *vport = hclge_get_vport(handle);
4542 	struct hclge_dev *hdev = vport->back;
4543 	int vector_id;
4544 
4545 	vector_id = hclge_get_vector_index(hdev, vector);
4546 	if (vector_id < 0) {
4547 		dev_err(&hdev->pdev->dev,
4548 			"Get vector index fail. vector = %d\n", vector);
4549 		return vector_id;
4550 	}
4551 
4552 	hclge_free_vector(hdev, vector_id);
4553 
4554 	return 0;
4555 }
4556 
4557 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4558 {
4559 	return HCLGE_RSS_KEY_SIZE;
4560 }
4561 
4562 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4563 				  const u8 hfunc, const u8 *key)
4564 {
4565 	struct hclge_rss_config_cmd *req;
4566 	unsigned int key_offset = 0;
4567 	struct hclge_desc desc;
4568 	int key_counts;
4569 	int key_size;
4570 	int ret;
4571 
4572 	key_counts = HCLGE_RSS_KEY_SIZE;
4573 	req = (struct hclge_rss_config_cmd *)desc.data;
4574 
4575 	while (key_counts) {
4576 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4577 					   false);
4578 
4579 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4580 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4581 
4582 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4583 		memcpy(req->hash_key,
4584 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4585 
4586 		key_counts -= key_size;
4587 		key_offset++;
4588 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4589 		if (ret) {
4590 			dev_err(&hdev->pdev->dev,
4591 				"Configure RSS config fail, status = %d\n",
4592 				ret);
4593 			return ret;
4594 		}
4595 	}
4596 	return 0;
4597 }
4598 
4599 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4600 {
4601 	struct hclge_rss_indirection_table_cmd *req;
4602 	struct hclge_desc desc;
4603 	int rss_cfg_tbl_num;
4604 	u8 rss_msb_oft;
4605 	u8 rss_msb_val;
4606 	int ret;
4607 	u16 qid;
4608 	int i;
4609 	u32 j;
4610 
4611 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4612 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4613 			  HCLGE_RSS_CFG_TBL_SIZE;
4614 
4615 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4616 		hclge_cmd_setup_basic_desc
4617 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4618 
4619 		req->start_table_index =
4620 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4621 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4622 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4623 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4624 			req->rss_qid_l[j] = qid & 0xff;
4625 			rss_msb_oft =
4626 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4627 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4628 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4629 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4630 		}
4631 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4632 		if (ret) {
4633 			dev_err(&hdev->pdev->dev,
4634 				"Configure rss indir table fail,status = %d\n",
4635 				ret);
4636 			return ret;
4637 		}
4638 	}
4639 	return 0;
4640 }
4641 
4642 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4643 				 u16 *tc_size, u16 *tc_offset)
4644 {
4645 	struct hclge_rss_tc_mode_cmd *req;
4646 	struct hclge_desc desc;
4647 	int ret;
4648 	int i;
4649 
4650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4651 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4652 
4653 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4654 		u16 mode = 0;
4655 
4656 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4657 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4658 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4659 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4660 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4661 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4662 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4663 
4664 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4665 	}
4666 
4667 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4668 	if (ret)
4669 		dev_err(&hdev->pdev->dev,
4670 			"Configure rss tc mode fail, status = %d\n", ret);
4671 
4672 	return ret;
4673 }
4674 
4675 static void hclge_get_rss_type(struct hclge_vport *vport)
4676 {
4677 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4678 	    vport->rss_tuple_sets.ipv4_udp_en ||
4679 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4680 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4681 	    vport->rss_tuple_sets.ipv6_udp_en ||
4682 	    vport->rss_tuple_sets.ipv6_sctp_en)
4683 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4684 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4685 		 vport->rss_tuple_sets.ipv6_fragment_en)
4686 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4687 	else
4688 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4689 }
4690 
4691 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4692 {
4693 	struct hclge_rss_input_tuple_cmd *req;
4694 	struct hclge_desc desc;
4695 	int ret;
4696 
4697 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4698 
4699 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4700 
4701 	/* Get the tuple cfg from pf */
4702 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4703 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4704 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4705 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4706 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4707 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4708 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4709 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4710 	hclge_get_rss_type(&hdev->vport[0]);
4711 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4712 	if (ret)
4713 		dev_err(&hdev->pdev->dev,
4714 			"Configure rss input fail, status = %d\n", ret);
4715 	return ret;
4716 }
4717 
4718 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4719 			 u8 *key, u8 *hfunc)
4720 {
4721 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4722 	struct hclge_vport *vport = hclge_get_vport(handle);
4723 	int i;
4724 
4725 	/* Get hash algorithm */
4726 	if (hfunc) {
4727 		switch (vport->rss_algo) {
4728 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4729 			*hfunc = ETH_RSS_HASH_TOP;
4730 			break;
4731 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4732 			*hfunc = ETH_RSS_HASH_XOR;
4733 			break;
4734 		default:
4735 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4736 			break;
4737 		}
4738 	}
4739 
4740 	/* Get the RSS Key required by the user */
4741 	if (key)
4742 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4743 
4744 	/* Get indirect table */
4745 	if (indir)
4746 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4747 			indir[i] =  vport->rss_indirection_tbl[i];
4748 
4749 	return 0;
4750 }
4751 
4752 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4753 				 u8 *hash_algo)
4754 {
4755 	switch (hfunc) {
4756 	case ETH_RSS_HASH_TOP:
4757 		*hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4758 		return 0;
4759 	case ETH_RSS_HASH_XOR:
4760 		*hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4761 		return 0;
4762 	case ETH_RSS_HASH_NO_CHANGE:
4763 		*hash_algo = vport->rss_algo;
4764 		return 0;
4765 	default:
4766 		return -EINVAL;
4767 	}
4768 }
4769 
4770 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4771 			 const  u8 *key, const  u8 hfunc)
4772 {
4773 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4774 	struct hclge_vport *vport = hclge_get_vport(handle);
4775 	struct hclge_dev *hdev = vport->back;
4776 	u8 hash_algo;
4777 	int ret, i;
4778 
4779 	ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4780 	if (ret) {
4781 		dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4782 		return ret;
4783 	}
4784 
4785 	/* Set the RSS Hash Key if specififed by the user */
4786 	if (key) {
4787 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4788 		if (ret)
4789 			return ret;
4790 
4791 		/* Update the shadow RSS key with user specified qids */
4792 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4793 	} else {
4794 		ret = hclge_set_rss_algo_key(hdev, hash_algo,
4795 					     vport->rss_hash_key);
4796 		if (ret)
4797 			return ret;
4798 	}
4799 	vport->rss_algo = hash_algo;
4800 
4801 	/* Update the shadow RSS table with user specified qids */
4802 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4803 		vport->rss_indirection_tbl[i] = indir[i];
4804 
4805 	/* Update the hardware */
4806 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4807 }
4808 
4809 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4810 {
4811 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4812 
4813 	if (nfc->data & RXH_L4_B_2_3)
4814 		hash_sets |= HCLGE_D_PORT_BIT;
4815 	else
4816 		hash_sets &= ~HCLGE_D_PORT_BIT;
4817 
4818 	if (nfc->data & RXH_IP_SRC)
4819 		hash_sets |= HCLGE_S_IP_BIT;
4820 	else
4821 		hash_sets &= ~HCLGE_S_IP_BIT;
4822 
4823 	if (nfc->data & RXH_IP_DST)
4824 		hash_sets |= HCLGE_D_IP_BIT;
4825 	else
4826 		hash_sets &= ~HCLGE_D_IP_BIT;
4827 
4828 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4829 		hash_sets |= HCLGE_V_TAG_BIT;
4830 
4831 	return hash_sets;
4832 }
4833 
4834 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4835 				    struct ethtool_rxnfc *nfc,
4836 				    struct hclge_rss_input_tuple_cmd *req)
4837 {
4838 	struct hclge_dev *hdev = vport->back;
4839 	u8 tuple_sets;
4840 
4841 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4842 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4843 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4844 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4845 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4846 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4847 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4848 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4849 
4850 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4851 	switch (nfc->flow_type) {
4852 	case TCP_V4_FLOW:
4853 		req->ipv4_tcp_en = tuple_sets;
4854 		break;
4855 	case TCP_V6_FLOW:
4856 		req->ipv6_tcp_en = tuple_sets;
4857 		break;
4858 	case UDP_V4_FLOW:
4859 		req->ipv4_udp_en = tuple_sets;
4860 		break;
4861 	case UDP_V6_FLOW:
4862 		req->ipv6_udp_en = tuple_sets;
4863 		break;
4864 	case SCTP_V4_FLOW:
4865 		req->ipv4_sctp_en = tuple_sets;
4866 		break;
4867 	case SCTP_V6_FLOW:
4868 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4869 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4870 			return -EINVAL;
4871 
4872 		req->ipv6_sctp_en = tuple_sets;
4873 		break;
4874 	case IPV4_FLOW:
4875 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4876 		break;
4877 	case IPV6_FLOW:
4878 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4879 		break;
4880 	default:
4881 		return -EINVAL;
4882 	}
4883 
4884 	return 0;
4885 }
4886 
4887 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4888 			       struct ethtool_rxnfc *nfc)
4889 {
4890 	struct hclge_vport *vport = hclge_get_vport(handle);
4891 	struct hclge_dev *hdev = vport->back;
4892 	struct hclge_rss_input_tuple_cmd *req;
4893 	struct hclge_desc desc;
4894 	int ret;
4895 
4896 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4897 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4898 		return -EINVAL;
4899 
4900 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4901 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4902 
4903 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4904 	if (ret) {
4905 		dev_err(&hdev->pdev->dev,
4906 			"failed to init rss tuple cmd, ret = %d\n", ret);
4907 		return ret;
4908 	}
4909 
4910 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4911 	if (ret) {
4912 		dev_err(&hdev->pdev->dev,
4913 			"Set rss tuple fail, status = %d\n", ret);
4914 		return ret;
4915 	}
4916 
4917 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4918 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4919 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4920 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4921 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4922 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4923 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4924 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4925 	hclge_get_rss_type(vport);
4926 	return 0;
4927 }
4928 
4929 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4930 				     u8 *tuple_sets)
4931 {
4932 	switch (flow_type) {
4933 	case TCP_V4_FLOW:
4934 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4935 		break;
4936 	case UDP_V4_FLOW:
4937 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4938 		break;
4939 	case TCP_V6_FLOW:
4940 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4941 		break;
4942 	case UDP_V6_FLOW:
4943 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4944 		break;
4945 	case SCTP_V4_FLOW:
4946 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4947 		break;
4948 	case SCTP_V6_FLOW:
4949 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4950 		break;
4951 	case IPV4_FLOW:
4952 	case IPV6_FLOW:
4953 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4954 		break;
4955 	default:
4956 		return -EINVAL;
4957 	}
4958 
4959 	return 0;
4960 }
4961 
4962 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4963 {
4964 	u64 tuple_data = 0;
4965 
4966 	if (tuple_sets & HCLGE_D_PORT_BIT)
4967 		tuple_data |= RXH_L4_B_2_3;
4968 	if (tuple_sets & HCLGE_S_PORT_BIT)
4969 		tuple_data |= RXH_L4_B_0_1;
4970 	if (tuple_sets & HCLGE_D_IP_BIT)
4971 		tuple_data |= RXH_IP_DST;
4972 	if (tuple_sets & HCLGE_S_IP_BIT)
4973 		tuple_data |= RXH_IP_SRC;
4974 
4975 	return tuple_data;
4976 }
4977 
4978 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4979 			       struct ethtool_rxnfc *nfc)
4980 {
4981 	struct hclge_vport *vport = hclge_get_vport(handle);
4982 	u8 tuple_sets;
4983 	int ret;
4984 
4985 	nfc->data = 0;
4986 
4987 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4988 	if (ret || !tuple_sets)
4989 		return ret;
4990 
4991 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4992 
4993 	return 0;
4994 }
4995 
4996 static int hclge_get_tc_size(struct hnae3_handle *handle)
4997 {
4998 	struct hclge_vport *vport = hclge_get_vport(handle);
4999 	struct hclge_dev *hdev = vport->back;
5000 
5001 	return hdev->pf_rss_size_max;
5002 }
5003 
5004 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
5005 {
5006 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5007 	struct hclge_vport *vport = hdev->vport;
5008 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5009 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5010 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5011 	struct hnae3_tc_info *tc_info;
5012 	u16 roundup_size;
5013 	u16 rss_size;
5014 	int i;
5015 
5016 	tc_info = &vport->nic.kinfo.tc_info;
5017 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5018 		rss_size = tc_info->tqp_count[i];
5019 		tc_valid[i] = 0;
5020 
5021 		if (!(hdev->hw_tc_map & BIT(i)))
5022 			continue;
5023 
5024 		/* tc_size set to hardware is the log2 of roundup power of two
5025 		 * of rss_size, the acutal queue size is limited by indirection
5026 		 * table.
5027 		 */
5028 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5029 		    rss_size == 0) {
5030 			dev_err(&hdev->pdev->dev,
5031 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
5032 				rss_size);
5033 			return -EINVAL;
5034 		}
5035 
5036 		roundup_size = roundup_pow_of_two(rss_size);
5037 		roundup_size = ilog2(roundup_size);
5038 
5039 		tc_valid[i] = 1;
5040 		tc_size[i] = roundup_size;
5041 		tc_offset[i] = tc_info->tqp_offset[i];
5042 	}
5043 
5044 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5045 }
5046 
5047 int hclge_rss_init_hw(struct hclge_dev *hdev)
5048 {
5049 	struct hclge_vport *vport = hdev->vport;
5050 	u16 *rss_indir = vport[0].rss_indirection_tbl;
5051 	u8 *key = vport[0].rss_hash_key;
5052 	u8 hfunc = vport[0].rss_algo;
5053 	int ret;
5054 
5055 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
5056 	if (ret)
5057 		return ret;
5058 
5059 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5060 	if (ret)
5061 		return ret;
5062 
5063 	ret = hclge_set_rss_input_tuple(hdev);
5064 	if (ret)
5065 		return ret;
5066 
5067 	return hclge_init_rss_tc_mode(hdev);
5068 }
5069 
5070 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5071 {
5072 	struct hclge_vport *vport = &hdev->vport[0];
5073 	int i;
5074 
5075 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5076 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5077 }
5078 
5079 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5080 {
5081 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5082 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5083 	struct hclge_vport *vport = &hdev->vport[0];
5084 	u16 *rss_ind_tbl;
5085 
5086 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5087 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5088 
5089 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5090 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5091 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5092 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5093 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5094 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5095 	vport->rss_tuple_sets.ipv6_sctp_en =
5096 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5097 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5098 		HCLGE_RSS_INPUT_TUPLE_SCTP;
5099 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5100 
5101 	vport->rss_algo = rss_algo;
5102 
5103 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5104 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
5105 	if (!rss_ind_tbl)
5106 		return -ENOMEM;
5107 
5108 	vport->rss_indirection_tbl = rss_ind_tbl;
5109 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5110 
5111 	hclge_rss_indir_init_cfg(hdev);
5112 
5113 	return 0;
5114 }
5115 
5116 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5117 				int vector_id, bool en,
5118 				struct hnae3_ring_chain_node *ring_chain)
5119 {
5120 	struct hclge_dev *hdev = vport->back;
5121 	struct hnae3_ring_chain_node *node;
5122 	struct hclge_desc desc;
5123 	struct hclge_ctrl_vector_chain_cmd *req =
5124 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5125 	enum hclge_cmd_status status;
5126 	enum hclge_opcode_type op;
5127 	u16 tqp_type_and_id;
5128 	int i;
5129 
5130 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5131 	hclge_cmd_setup_basic_desc(&desc, op, false);
5132 	req->int_vector_id_l = hnae3_get_field(vector_id,
5133 					       HCLGE_VECTOR_ID_L_M,
5134 					       HCLGE_VECTOR_ID_L_S);
5135 	req->int_vector_id_h = hnae3_get_field(vector_id,
5136 					       HCLGE_VECTOR_ID_H_M,
5137 					       HCLGE_VECTOR_ID_H_S);
5138 
5139 	i = 0;
5140 	for (node = ring_chain; node; node = node->next) {
5141 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5142 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5143 				HCLGE_INT_TYPE_S,
5144 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5145 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5146 				HCLGE_TQP_ID_S, node->tqp_index);
5147 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5148 				HCLGE_INT_GL_IDX_S,
5149 				hnae3_get_field(node->int_gl_idx,
5150 						HNAE3_RING_GL_IDX_M,
5151 						HNAE3_RING_GL_IDX_S));
5152 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5153 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5154 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5155 			req->vfid = vport->vport_id;
5156 
5157 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5158 			if (status) {
5159 				dev_err(&hdev->pdev->dev,
5160 					"Map TQP fail, status is %d.\n",
5161 					status);
5162 				return -EIO;
5163 			}
5164 			i = 0;
5165 
5166 			hclge_cmd_setup_basic_desc(&desc,
5167 						   op,
5168 						   false);
5169 			req->int_vector_id_l =
5170 				hnae3_get_field(vector_id,
5171 						HCLGE_VECTOR_ID_L_M,
5172 						HCLGE_VECTOR_ID_L_S);
5173 			req->int_vector_id_h =
5174 				hnae3_get_field(vector_id,
5175 						HCLGE_VECTOR_ID_H_M,
5176 						HCLGE_VECTOR_ID_H_S);
5177 		}
5178 	}
5179 
5180 	if (i > 0) {
5181 		req->int_cause_num = i;
5182 		req->vfid = vport->vport_id;
5183 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5184 		if (status) {
5185 			dev_err(&hdev->pdev->dev,
5186 				"Map TQP fail, status is %d.\n", status);
5187 			return -EIO;
5188 		}
5189 	}
5190 
5191 	return 0;
5192 }
5193 
5194 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5195 				    struct hnae3_ring_chain_node *ring_chain)
5196 {
5197 	struct hclge_vport *vport = hclge_get_vport(handle);
5198 	struct hclge_dev *hdev = vport->back;
5199 	int vector_id;
5200 
5201 	vector_id = hclge_get_vector_index(hdev, vector);
5202 	if (vector_id < 0) {
5203 		dev_err(&hdev->pdev->dev,
5204 			"failed to get vector index. vector=%d\n", vector);
5205 		return vector_id;
5206 	}
5207 
5208 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5209 }
5210 
5211 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5212 				       struct hnae3_ring_chain_node *ring_chain)
5213 {
5214 	struct hclge_vport *vport = hclge_get_vport(handle);
5215 	struct hclge_dev *hdev = vport->back;
5216 	int vector_id, ret;
5217 
5218 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5219 		return 0;
5220 
5221 	vector_id = hclge_get_vector_index(hdev, vector);
5222 	if (vector_id < 0) {
5223 		dev_err(&handle->pdev->dev,
5224 			"Get vector index fail. ret =%d\n", vector_id);
5225 		return vector_id;
5226 	}
5227 
5228 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5229 	if (ret)
5230 		dev_err(&handle->pdev->dev,
5231 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5232 			vector_id, ret);
5233 
5234 	return ret;
5235 }
5236 
5237 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5238 				      bool en_uc, bool en_mc, bool en_bc)
5239 {
5240 	struct hclge_vport *vport = &hdev->vport[vf_id];
5241 	struct hnae3_handle *handle = &vport->nic;
5242 	struct hclge_promisc_cfg_cmd *req;
5243 	struct hclge_desc desc;
5244 	bool uc_tx_en = en_uc;
5245 	u8 promisc_cfg = 0;
5246 	int ret;
5247 
5248 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5249 
5250 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5251 	req->vf_id = vf_id;
5252 
5253 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5254 		uc_tx_en = false;
5255 
5256 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5257 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5258 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5259 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5260 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5261 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5262 	req->extend_promisc = promisc_cfg;
5263 
5264 	/* to be compatible with DEVICE_VERSION_V1/2 */
5265 	promisc_cfg = 0;
5266 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5267 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5268 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5269 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5270 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5271 	req->promisc = promisc_cfg;
5272 
5273 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5274 	if (ret)
5275 		dev_err(&hdev->pdev->dev,
5276 			"failed to set vport %u promisc mode, ret = %d.\n",
5277 			vf_id, ret);
5278 
5279 	return ret;
5280 }
5281 
5282 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5283 				 bool en_mc_pmc, bool en_bc_pmc)
5284 {
5285 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5286 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5287 }
5288 
5289 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5290 				  bool en_mc_pmc)
5291 {
5292 	struct hclge_vport *vport = hclge_get_vport(handle);
5293 	struct hclge_dev *hdev = vport->back;
5294 	bool en_bc_pmc = true;
5295 
5296 	/* For device whose version below V2, if broadcast promisc enabled,
5297 	 * vlan filter is always bypassed. So broadcast promisc should be
5298 	 * disabled until user enable promisc mode
5299 	 */
5300 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5301 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5302 
5303 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5304 					    en_bc_pmc);
5305 }
5306 
5307 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5308 {
5309 	struct hclge_vport *vport = hclge_get_vport(handle);
5310 
5311 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5312 }
5313 
5314 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5315 {
5316 	if (hlist_empty(&hdev->fd_rule_list))
5317 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5318 }
5319 
5320 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5321 {
5322 	if (!test_bit(location, hdev->fd_bmap)) {
5323 		set_bit(location, hdev->fd_bmap);
5324 		hdev->hclge_fd_rule_num++;
5325 	}
5326 }
5327 
5328 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5329 {
5330 	if (test_bit(location, hdev->fd_bmap)) {
5331 		clear_bit(location, hdev->fd_bmap);
5332 		hdev->hclge_fd_rule_num--;
5333 	}
5334 }
5335 
5336 static void hclge_fd_free_node(struct hclge_dev *hdev,
5337 			       struct hclge_fd_rule *rule)
5338 {
5339 	hlist_del(&rule->rule_node);
5340 	kfree(rule);
5341 	hclge_sync_fd_state(hdev);
5342 }
5343 
5344 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5345 				      struct hclge_fd_rule *old_rule,
5346 				      struct hclge_fd_rule *new_rule,
5347 				      enum HCLGE_FD_NODE_STATE state)
5348 {
5349 	switch (state) {
5350 	case HCLGE_FD_TO_ADD:
5351 	case HCLGE_FD_ACTIVE:
5352 		/* 1) if the new state is TO_ADD, just replace the old rule
5353 		 * with the same location, no matter its state, because the
5354 		 * new rule will be configured to the hardware.
5355 		 * 2) if the new state is ACTIVE, it means the new rule
5356 		 * has been configured to the hardware, so just replace
5357 		 * the old rule node with the same location.
5358 		 * 3) for it doesn't add a new node to the list, so it's
5359 		 * unnecessary to update the rule number and fd_bmap.
5360 		 */
5361 		new_rule->rule_node.next = old_rule->rule_node.next;
5362 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5363 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5364 		kfree(new_rule);
5365 		break;
5366 	case HCLGE_FD_DELETED:
5367 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5368 		hclge_fd_free_node(hdev, old_rule);
5369 		break;
5370 	case HCLGE_FD_TO_DEL:
5371 		/* if new request is TO_DEL, and old rule is existent
5372 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5373 		 * because we delete rule by location, other rule content
5374 		 * is unncessary.
5375 		 * 2) the state of old rule is ACTIVE, we need to change its
5376 		 * state to TO_DEL, so the rule will be deleted when periodic
5377 		 * task being scheduled.
5378 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5379 		 * been added to hardware, so we just delete the rule node from
5380 		 * fd_rule_list directly.
5381 		 */
5382 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5383 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5384 			hclge_fd_free_node(hdev, old_rule);
5385 			return;
5386 		}
5387 		old_rule->state = HCLGE_FD_TO_DEL;
5388 		break;
5389 	}
5390 }
5391 
5392 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5393 						u16 location,
5394 						struct hclge_fd_rule **parent)
5395 {
5396 	struct hclge_fd_rule *rule;
5397 	struct hlist_node *node;
5398 
5399 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5400 		if (rule->location == location)
5401 			return rule;
5402 		else if (rule->location > location)
5403 			return NULL;
5404 		/* record the parent node, use to keep the nodes in fd_rule_list
5405 		 * in ascend order.
5406 		 */
5407 		*parent = rule;
5408 	}
5409 
5410 	return NULL;
5411 }
5412 
5413 /* insert fd rule node in ascend order according to rule->location */
5414 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5415 				      struct hclge_fd_rule *rule,
5416 				      struct hclge_fd_rule *parent)
5417 {
5418 	INIT_HLIST_NODE(&rule->rule_node);
5419 
5420 	if (parent)
5421 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5422 	else
5423 		hlist_add_head(&rule->rule_node, hlist);
5424 }
5425 
5426 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5427 				     struct hclge_fd_user_def_cfg *cfg)
5428 {
5429 	struct hclge_fd_user_def_cfg_cmd *req;
5430 	struct hclge_desc desc;
5431 	u16 data = 0;
5432 	int ret;
5433 
5434 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5435 
5436 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5437 
5438 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5439 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5440 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5441 	req->ol2_cfg = cpu_to_le16(data);
5442 
5443 	data = 0;
5444 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5445 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5446 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5447 	req->ol3_cfg = cpu_to_le16(data);
5448 
5449 	data = 0;
5450 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5451 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5452 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5453 	req->ol4_cfg = cpu_to_le16(data);
5454 
5455 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5456 	if (ret)
5457 		dev_err(&hdev->pdev->dev,
5458 			"failed to set fd user def data, ret= %d\n", ret);
5459 	return ret;
5460 }
5461 
5462 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5463 {
5464 	int ret;
5465 
5466 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5467 		return;
5468 
5469 	if (!locked)
5470 		spin_lock_bh(&hdev->fd_rule_lock);
5471 
5472 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5473 	if (ret)
5474 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5475 
5476 	if (!locked)
5477 		spin_unlock_bh(&hdev->fd_rule_lock);
5478 }
5479 
5480 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5481 					  struct hclge_fd_rule *rule)
5482 {
5483 	struct hlist_head *hlist = &hdev->fd_rule_list;
5484 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5485 	struct hclge_fd_user_def_info *info, *old_info;
5486 	struct hclge_fd_user_def_cfg *cfg;
5487 
5488 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5489 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5490 		return 0;
5491 
5492 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5493 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5494 	info = &rule->ep.user_def;
5495 
5496 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5497 		return 0;
5498 
5499 	if (cfg->ref_cnt > 1)
5500 		goto error;
5501 
5502 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5503 	if (fd_rule) {
5504 		old_info = &fd_rule->ep.user_def;
5505 		if (info->layer == old_info->layer)
5506 			return 0;
5507 	}
5508 
5509 error:
5510 	dev_err(&hdev->pdev->dev,
5511 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5512 		info->layer + 1);
5513 	return -ENOSPC;
5514 }
5515 
5516 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5517 					 struct hclge_fd_rule *rule)
5518 {
5519 	struct hclge_fd_user_def_cfg *cfg;
5520 
5521 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5522 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5523 		return;
5524 
5525 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5526 	if (!cfg->ref_cnt) {
5527 		cfg->offset = rule->ep.user_def.offset;
5528 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5529 	}
5530 	cfg->ref_cnt++;
5531 }
5532 
5533 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5534 					 struct hclge_fd_rule *rule)
5535 {
5536 	struct hclge_fd_user_def_cfg *cfg;
5537 
5538 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5539 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5540 		return;
5541 
5542 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5543 	if (!cfg->ref_cnt)
5544 		return;
5545 
5546 	cfg->ref_cnt--;
5547 	if (!cfg->ref_cnt) {
5548 		cfg->offset = 0;
5549 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5550 	}
5551 }
5552 
5553 static void hclge_update_fd_list(struct hclge_dev *hdev,
5554 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5555 				 struct hclge_fd_rule *new_rule)
5556 {
5557 	struct hlist_head *hlist = &hdev->fd_rule_list;
5558 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5559 
5560 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5561 	if (fd_rule) {
5562 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5563 		if (state == HCLGE_FD_ACTIVE)
5564 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5565 		hclge_sync_fd_user_def_cfg(hdev, true);
5566 
5567 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5568 		return;
5569 	}
5570 
5571 	/* it's unlikely to fail here, because we have checked the rule
5572 	 * exist before.
5573 	 */
5574 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5575 		dev_warn(&hdev->pdev->dev,
5576 			 "failed to delete fd rule %u, it's inexistent\n",
5577 			 location);
5578 		return;
5579 	}
5580 
5581 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5582 	hclge_sync_fd_user_def_cfg(hdev, true);
5583 
5584 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5585 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5586 
5587 	if (state == HCLGE_FD_TO_ADD) {
5588 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5589 		hclge_task_schedule(hdev, 0);
5590 	}
5591 }
5592 
5593 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5594 {
5595 	struct hclge_get_fd_mode_cmd *req;
5596 	struct hclge_desc desc;
5597 	int ret;
5598 
5599 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5600 
5601 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5602 
5603 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5604 	if (ret) {
5605 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5606 		return ret;
5607 	}
5608 
5609 	*fd_mode = req->mode;
5610 
5611 	return ret;
5612 }
5613 
5614 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5615 				   u32 *stage1_entry_num,
5616 				   u32 *stage2_entry_num,
5617 				   u16 *stage1_counter_num,
5618 				   u16 *stage2_counter_num)
5619 {
5620 	struct hclge_get_fd_allocation_cmd *req;
5621 	struct hclge_desc desc;
5622 	int ret;
5623 
5624 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5625 
5626 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5627 
5628 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5629 	if (ret) {
5630 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5631 			ret);
5632 		return ret;
5633 	}
5634 
5635 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5636 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5637 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5638 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5639 
5640 	return ret;
5641 }
5642 
5643 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5644 				   enum HCLGE_FD_STAGE stage_num)
5645 {
5646 	struct hclge_set_fd_key_config_cmd *req;
5647 	struct hclge_fd_key_cfg *stage;
5648 	struct hclge_desc desc;
5649 	int ret;
5650 
5651 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5652 
5653 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5654 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5655 	req->stage = stage_num;
5656 	req->key_select = stage->key_sel;
5657 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5658 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5659 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5660 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5661 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5662 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5663 
5664 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5665 	if (ret)
5666 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5667 
5668 	return ret;
5669 }
5670 
5671 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5672 {
5673 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5674 
5675 	spin_lock_bh(&hdev->fd_rule_lock);
5676 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5677 	spin_unlock_bh(&hdev->fd_rule_lock);
5678 
5679 	hclge_fd_set_user_def_cmd(hdev, cfg);
5680 }
5681 
5682 static int hclge_init_fd_config(struct hclge_dev *hdev)
5683 {
5684 #define LOW_2_WORDS		0x03
5685 	struct hclge_fd_key_cfg *key_cfg;
5686 	int ret;
5687 
5688 	if (!hnae3_dev_fd_supported(hdev))
5689 		return 0;
5690 
5691 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5692 	if (ret)
5693 		return ret;
5694 
5695 	switch (hdev->fd_cfg.fd_mode) {
5696 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5697 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5698 		break;
5699 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5700 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5701 		break;
5702 	default:
5703 		dev_err(&hdev->pdev->dev,
5704 			"Unsupported flow director mode %u\n",
5705 			hdev->fd_cfg.fd_mode);
5706 		return -EOPNOTSUPP;
5707 	}
5708 
5709 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5710 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5711 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5712 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5713 	key_cfg->outer_sipv6_word_en = 0;
5714 	key_cfg->outer_dipv6_word_en = 0;
5715 
5716 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5717 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5718 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5719 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5720 
5721 	/* If use max 400bit key, we can support tuples for ether type */
5722 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5723 		key_cfg->tuple_active |=
5724 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5725 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5726 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5727 	}
5728 
5729 	/* roce_type is used to filter roce frames
5730 	 * dst_vport is used to specify the rule
5731 	 */
5732 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5733 
5734 	ret = hclge_get_fd_allocation(hdev,
5735 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5736 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5737 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5738 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5739 	if (ret)
5740 		return ret;
5741 
5742 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5743 }
5744 
5745 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5746 				int loc, u8 *key, bool is_add)
5747 {
5748 	struct hclge_fd_tcam_config_1_cmd *req1;
5749 	struct hclge_fd_tcam_config_2_cmd *req2;
5750 	struct hclge_fd_tcam_config_3_cmd *req3;
5751 	struct hclge_desc desc[3];
5752 	int ret;
5753 
5754 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5755 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5756 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5757 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5758 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5759 
5760 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5761 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5762 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5763 
5764 	req1->stage = stage;
5765 	req1->xy_sel = sel_x ? 1 : 0;
5766 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5767 	req1->index = cpu_to_le32(loc);
5768 	req1->entry_vld = sel_x ? is_add : 0;
5769 
5770 	if (key) {
5771 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5772 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5773 		       sizeof(req2->tcam_data));
5774 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5775 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5776 	}
5777 
5778 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5779 	if (ret)
5780 		dev_err(&hdev->pdev->dev,
5781 			"config tcam key fail, ret=%d\n",
5782 			ret);
5783 
5784 	return ret;
5785 }
5786 
5787 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5788 			      struct hclge_fd_ad_data *action)
5789 {
5790 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5791 	struct hclge_fd_ad_config_cmd *req;
5792 	struct hclge_desc desc;
5793 	u64 ad_data = 0;
5794 	int ret;
5795 
5796 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5797 
5798 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5799 	req->index = cpu_to_le32(loc);
5800 	req->stage = stage;
5801 
5802 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5803 		      action->write_rule_id_to_bd);
5804 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5805 			action->rule_id);
5806 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5807 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5808 			      action->override_tc);
5809 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5810 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5811 	}
5812 	ad_data <<= 32;
5813 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5814 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5815 		      action->forward_to_direct_queue);
5816 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5817 			action->queue_id);
5818 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5819 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5820 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5821 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5822 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5823 			action->counter_id);
5824 
5825 	req->ad_data = cpu_to_le64(ad_data);
5826 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5827 	if (ret)
5828 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5829 
5830 	return ret;
5831 }
5832 
5833 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5834 				   struct hclge_fd_rule *rule)
5835 {
5836 	int offset, moffset, ip_offset;
5837 	enum HCLGE_FD_KEY_OPT key_opt;
5838 	u16 tmp_x_s, tmp_y_s;
5839 	u32 tmp_x_l, tmp_y_l;
5840 	u8 *p = (u8 *)rule;
5841 	int i;
5842 
5843 	if (rule->unused_tuple & BIT(tuple_bit))
5844 		return true;
5845 
5846 	key_opt = tuple_key_info[tuple_bit].key_opt;
5847 	offset = tuple_key_info[tuple_bit].offset;
5848 	moffset = tuple_key_info[tuple_bit].moffset;
5849 
5850 	switch (key_opt) {
5851 	case KEY_OPT_U8:
5852 		calc_x(*key_x, p[offset], p[moffset]);
5853 		calc_y(*key_y, p[offset], p[moffset]);
5854 
5855 		return true;
5856 	case KEY_OPT_LE16:
5857 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5858 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5859 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5860 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5861 
5862 		return true;
5863 	case KEY_OPT_LE32:
5864 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5865 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5866 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5867 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5868 
5869 		return true;
5870 	case KEY_OPT_MAC:
5871 		for (i = 0; i < ETH_ALEN; i++) {
5872 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5873 			       p[moffset + i]);
5874 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5875 			       p[moffset + i]);
5876 		}
5877 
5878 		return true;
5879 	case KEY_OPT_IP:
5880 		ip_offset = IPV4_INDEX * sizeof(u32);
5881 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5882 		       *(u32 *)(&p[moffset + ip_offset]));
5883 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5884 		       *(u32 *)(&p[moffset + ip_offset]));
5885 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5886 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5887 
5888 		return true;
5889 	default:
5890 		return false;
5891 	}
5892 }
5893 
5894 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5895 				 u8 vf_id, u8 network_port_id)
5896 {
5897 	u32 port_number = 0;
5898 
5899 	if (port_type == HOST_PORT) {
5900 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5901 				pf_id);
5902 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5903 				vf_id);
5904 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5905 	} else {
5906 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5907 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5908 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5909 	}
5910 
5911 	return port_number;
5912 }
5913 
5914 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5915 				       __le32 *key_x, __le32 *key_y,
5916 				       struct hclge_fd_rule *rule)
5917 {
5918 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5919 	u8 cur_pos = 0, tuple_size, shift_bits;
5920 	unsigned int i;
5921 
5922 	for (i = 0; i < MAX_META_DATA; i++) {
5923 		tuple_size = meta_data_key_info[i].key_length;
5924 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5925 
5926 		switch (tuple_bit) {
5927 		case BIT(ROCE_TYPE):
5928 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5929 			cur_pos += tuple_size;
5930 			break;
5931 		case BIT(DST_VPORT):
5932 			port_number = hclge_get_port_number(HOST_PORT, 0,
5933 							    rule->vf_id, 0);
5934 			hnae3_set_field(meta_data,
5935 					GENMASK(cur_pos + tuple_size, cur_pos),
5936 					cur_pos, port_number);
5937 			cur_pos += tuple_size;
5938 			break;
5939 		default:
5940 			break;
5941 		}
5942 	}
5943 
5944 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5945 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5946 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5947 
5948 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5949 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5950 }
5951 
5952 /* A complete key is combined with meta data key and tuple key.
5953  * Meta data key is stored at the MSB region, and tuple key is stored at
5954  * the LSB region, unused bits will be filled 0.
5955  */
5956 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5957 			    struct hclge_fd_rule *rule)
5958 {
5959 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5960 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5961 	u8 *cur_key_x, *cur_key_y;
5962 	u8 meta_data_region;
5963 	u8 tuple_size;
5964 	int ret;
5965 	u32 i;
5966 
5967 	memset(key_x, 0, sizeof(key_x));
5968 	memset(key_y, 0, sizeof(key_y));
5969 	cur_key_x = key_x;
5970 	cur_key_y = key_y;
5971 
5972 	for (i = 0; i < MAX_TUPLE; i++) {
5973 		bool tuple_valid;
5974 
5975 		tuple_size = tuple_key_info[i].key_length / 8;
5976 		if (!(key_cfg->tuple_active & BIT(i)))
5977 			continue;
5978 
5979 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5980 						     cur_key_y, rule);
5981 		if (tuple_valid) {
5982 			cur_key_x += tuple_size;
5983 			cur_key_y += tuple_size;
5984 		}
5985 	}
5986 
5987 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5988 			MAX_META_DATA_LENGTH / 8;
5989 
5990 	hclge_fd_convert_meta_data(key_cfg,
5991 				   (__le32 *)(key_x + meta_data_region),
5992 				   (__le32 *)(key_y + meta_data_region),
5993 				   rule);
5994 
5995 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5996 				   true);
5997 	if (ret) {
5998 		dev_err(&hdev->pdev->dev,
5999 			"fd key_y config fail, loc=%u, ret=%d\n",
6000 			rule->queue_id, ret);
6001 		return ret;
6002 	}
6003 
6004 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
6005 				   true);
6006 	if (ret)
6007 		dev_err(&hdev->pdev->dev,
6008 			"fd key_x config fail, loc=%u, ret=%d\n",
6009 			rule->queue_id, ret);
6010 	return ret;
6011 }
6012 
6013 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6014 			       struct hclge_fd_rule *rule)
6015 {
6016 	struct hclge_vport *vport = hdev->vport;
6017 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6018 	struct hclge_fd_ad_data ad_data;
6019 
6020 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6021 	ad_data.ad_id = rule->location;
6022 
6023 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6024 		ad_data.drop_packet = true;
6025 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6026 		ad_data.override_tc = true;
6027 		ad_data.queue_id =
6028 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6029 		ad_data.tc_size =
6030 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6031 	} else {
6032 		ad_data.forward_to_direct_queue = true;
6033 		ad_data.queue_id = rule->queue_id;
6034 	}
6035 
6036 	if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6037 		ad_data.use_counter = true;
6038 		ad_data.counter_id = rule->vf_id %
6039 				     hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6040 	} else {
6041 		ad_data.use_counter = false;
6042 		ad_data.counter_id = 0;
6043 	}
6044 
6045 	ad_data.use_next_stage = false;
6046 	ad_data.next_input_key = 0;
6047 
6048 	ad_data.write_rule_id_to_bd = true;
6049 	ad_data.rule_id = rule->location;
6050 
6051 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6052 }
6053 
6054 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6055 				       u32 *unused_tuple)
6056 {
6057 	if (!spec || !unused_tuple)
6058 		return -EINVAL;
6059 
6060 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6061 
6062 	if (!spec->ip4src)
6063 		*unused_tuple |= BIT(INNER_SRC_IP);
6064 
6065 	if (!spec->ip4dst)
6066 		*unused_tuple |= BIT(INNER_DST_IP);
6067 
6068 	if (!spec->psrc)
6069 		*unused_tuple |= BIT(INNER_SRC_PORT);
6070 
6071 	if (!spec->pdst)
6072 		*unused_tuple |= BIT(INNER_DST_PORT);
6073 
6074 	if (!spec->tos)
6075 		*unused_tuple |= BIT(INNER_IP_TOS);
6076 
6077 	return 0;
6078 }
6079 
6080 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6081 				    u32 *unused_tuple)
6082 {
6083 	if (!spec || !unused_tuple)
6084 		return -EINVAL;
6085 
6086 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6087 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6088 
6089 	if (!spec->ip4src)
6090 		*unused_tuple |= BIT(INNER_SRC_IP);
6091 
6092 	if (!spec->ip4dst)
6093 		*unused_tuple |= BIT(INNER_DST_IP);
6094 
6095 	if (!spec->tos)
6096 		*unused_tuple |= BIT(INNER_IP_TOS);
6097 
6098 	if (!spec->proto)
6099 		*unused_tuple |= BIT(INNER_IP_PROTO);
6100 
6101 	if (spec->l4_4_bytes)
6102 		return -EOPNOTSUPP;
6103 
6104 	if (spec->ip_ver != ETH_RX_NFC_IP4)
6105 		return -EOPNOTSUPP;
6106 
6107 	return 0;
6108 }
6109 
6110 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6111 				       u32 *unused_tuple)
6112 {
6113 	if (!spec || !unused_tuple)
6114 		return -EINVAL;
6115 
6116 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6117 
6118 	/* check whether src/dst ip address used */
6119 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6120 		*unused_tuple |= BIT(INNER_SRC_IP);
6121 
6122 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6123 		*unused_tuple |= BIT(INNER_DST_IP);
6124 
6125 	if (!spec->psrc)
6126 		*unused_tuple |= BIT(INNER_SRC_PORT);
6127 
6128 	if (!spec->pdst)
6129 		*unused_tuple |= BIT(INNER_DST_PORT);
6130 
6131 	if (!spec->tclass)
6132 		*unused_tuple |= BIT(INNER_IP_TOS);
6133 
6134 	return 0;
6135 }
6136 
6137 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6138 				    u32 *unused_tuple)
6139 {
6140 	if (!spec || !unused_tuple)
6141 		return -EINVAL;
6142 
6143 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6144 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6145 
6146 	/* check whether src/dst ip address used */
6147 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6148 		*unused_tuple |= BIT(INNER_SRC_IP);
6149 
6150 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6151 		*unused_tuple |= BIT(INNER_DST_IP);
6152 
6153 	if (!spec->l4_proto)
6154 		*unused_tuple |= BIT(INNER_IP_PROTO);
6155 
6156 	if (!spec->tclass)
6157 		*unused_tuple |= BIT(INNER_IP_TOS);
6158 
6159 	if (spec->l4_4_bytes)
6160 		return -EOPNOTSUPP;
6161 
6162 	return 0;
6163 }
6164 
6165 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6166 {
6167 	if (!spec || !unused_tuple)
6168 		return -EINVAL;
6169 
6170 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6171 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6172 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6173 
6174 	if (is_zero_ether_addr(spec->h_source))
6175 		*unused_tuple |= BIT(INNER_SRC_MAC);
6176 
6177 	if (is_zero_ether_addr(spec->h_dest))
6178 		*unused_tuple |= BIT(INNER_DST_MAC);
6179 
6180 	if (!spec->h_proto)
6181 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6182 
6183 	return 0;
6184 }
6185 
6186 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6187 				    struct ethtool_rx_flow_spec *fs,
6188 				    u32 *unused_tuple)
6189 {
6190 	if (fs->flow_type & FLOW_EXT) {
6191 		if (fs->h_ext.vlan_etype) {
6192 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6193 			return -EOPNOTSUPP;
6194 		}
6195 
6196 		if (!fs->h_ext.vlan_tci)
6197 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6198 
6199 		if (fs->m_ext.vlan_tci &&
6200 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6201 			dev_err(&hdev->pdev->dev,
6202 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6203 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6204 			return -EINVAL;
6205 		}
6206 	} else {
6207 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6208 	}
6209 
6210 	if (fs->flow_type & FLOW_MAC_EXT) {
6211 		if (hdev->fd_cfg.fd_mode !=
6212 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6213 			dev_err(&hdev->pdev->dev,
6214 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6215 			return -EOPNOTSUPP;
6216 		}
6217 
6218 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6219 			*unused_tuple |= BIT(INNER_DST_MAC);
6220 		else
6221 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6222 	}
6223 
6224 	return 0;
6225 }
6226 
6227 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6228 				       struct hclge_fd_user_def_info *info)
6229 {
6230 	switch (flow_type) {
6231 	case ETHER_FLOW:
6232 		info->layer = HCLGE_FD_USER_DEF_L2;
6233 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6234 		break;
6235 	case IP_USER_FLOW:
6236 	case IPV6_USER_FLOW:
6237 		info->layer = HCLGE_FD_USER_DEF_L3;
6238 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6239 		break;
6240 	case TCP_V4_FLOW:
6241 	case UDP_V4_FLOW:
6242 	case TCP_V6_FLOW:
6243 	case UDP_V6_FLOW:
6244 		info->layer = HCLGE_FD_USER_DEF_L4;
6245 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6246 		break;
6247 	default:
6248 		return -EOPNOTSUPP;
6249 	}
6250 
6251 	return 0;
6252 }
6253 
6254 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6255 {
6256 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6257 }
6258 
6259 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6260 					 struct ethtool_rx_flow_spec *fs,
6261 					 u32 *unused_tuple,
6262 					 struct hclge_fd_user_def_info *info)
6263 {
6264 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6265 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6266 	u16 data, offset, data_mask, offset_mask;
6267 	int ret;
6268 
6269 	info->layer = HCLGE_FD_USER_DEF_NONE;
6270 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6271 
6272 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6273 		return 0;
6274 
6275 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6276 	 * for data, and bit32~47 is used for offset.
6277 	 */
6278 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6279 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6280 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6281 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6282 
6283 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6284 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6285 		return -EOPNOTSUPP;
6286 	}
6287 
6288 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6289 		dev_err(&hdev->pdev->dev,
6290 			"user-def offset[%u] should be no more than %u\n",
6291 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6292 		return -EINVAL;
6293 	}
6294 
6295 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6296 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6297 		return -EINVAL;
6298 	}
6299 
6300 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6301 	if (ret) {
6302 		dev_err(&hdev->pdev->dev,
6303 			"unsupported flow type for user-def bytes, ret = %d\n",
6304 			ret);
6305 		return ret;
6306 	}
6307 
6308 	info->data = data;
6309 	info->data_mask = data_mask;
6310 	info->offset = offset;
6311 
6312 	return 0;
6313 }
6314 
6315 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6316 			       struct ethtool_rx_flow_spec *fs,
6317 			       u32 *unused_tuple,
6318 			       struct hclge_fd_user_def_info *info)
6319 {
6320 	u32 flow_type;
6321 	int ret;
6322 
6323 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6324 		dev_err(&hdev->pdev->dev,
6325 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6326 			fs->location,
6327 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6328 		return -EINVAL;
6329 	}
6330 
6331 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6332 	if (ret)
6333 		return ret;
6334 
6335 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6336 	switch (flow_type) {
6337 	case SCTP_V4_FLOW:
6338 	case TCP_V4_FLOW:
6339 	case UDP_V4_FLOW:
6340 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6341 						  unused_tuple);
6342 		break;
6343 	case IP_USER_FLOW:
6344 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6345 					       unused_tuple);
6346 		break;
6347 	case SCTP_V6_FLOW:
6348 	case TCP_V6_FLOW:
6349 	case UDP_V6_FLOW:
6350 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6351 						  unused_tuple);
6352 		break;
6353 	case IPV6_USER_FLOW:
6354 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6355 					       unused_tuple);
6356 		break;
6357 	case ETHER_FLOW:
6358 		if (hdev->fd_cfg.fd_mode !=
6359 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6360 			dev_err(&hdev->pdev->dev,
6361 				"ETHER_FLOW is not supported in current fd mode!\n");
6362 			return -EOPNOTSUPP;
6363 		}
6364 
6365 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6366 						 unused_tuple);
6367 		break;
6368 	default:
6369 		dev_err(&hdev->pdev->dev,
6370 			"unsupported protocol type, protocol type = %#x\n",
6371 			flow_type);
6372 		return -EOPNOTSUPP;
6373 	}
6374 
6375 	if (ret) {
6376 		dev_err(&hdev->pdev->dev,
6377 			"failed to check flow union tuple, ret = %d\n",
6378 			ret);
6379 		return ret;
6380 	}
6381 
6382 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6383 }
6384 
6385 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6386 				      struct ethtool_rx_flow_spec *fs,
6387 				      struct hclge_fd_rule *rule, u8 ip_proto)
6388 {
6389 	rule->tuples.src_ip[IPV4_INDEX] =
6390 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6391 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6392 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6393 
6394 	rule->tuples.dst_ip[IPV4_INDEX] =
6395 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6396 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6397 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6398 
6399 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6400 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6401 
6402 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6403 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6404 
6405 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6406 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6407 
6408 	rule->tuples.ether_proto = ETH_P_IP;
6409 	rule->tuples_mask.ether_proto = 0xFFFF;
6410 
6411 	rule->tuples.ip_proto = ip_proto;
6412 	rule->tuples_mask.ip_proto = 0xFF;
6413 }
6414 
6415 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6416 				   struct ethtool_rx_flow_spec *fs,
6417 				   struct hclge_fd_rule *rule)
6418 {
6419 	rule->tuples.src_ip[IPV4_INDEX] =
6420 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6421 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6422 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6423 
6424 	rule->tuples.dst_ip[IPV4_INDEX] =
6425 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6426 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6427 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6428 
6429 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6430 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6431 
6432 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6433 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6434 
6435 	rule->tuples.ether_proto = ETH_P_IP;
6436 	rule->tuples_mask.ether_proto = 0xFFFF;
6437 }
6438 
6439 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6440 				      struct ethtool_rx_flow_spec *fs,
6441 				      struct hclge_fd_rule *rule, u8 ip_proto)
6442 {
6443 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6444 			  IPV6_SIZE);
6445 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6446 			  IPV6_SIZE);
6447 
6448 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6449 			  IPV6_SIZE);
6450 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6451 			  IPV6_SIZE);
6452 
6453 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6454 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6455 
6456 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6457 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6458 
6459 	rule->tuples.ether_proto = ETH_P_IPV6;
6460 	rule->tuples_mask.ether_proto = 0xFFFF;
6461 
6462 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6463 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6464 
6465 	rule->tuples.ip_proto = ip_proto;
6466 	rule->tuples_mask.ip_proto = 0xFF;
6467 }
6468 
6469 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6470 				   struct ethtool_rx_flow_spec *fs,
6471 				   struct hclge_fd_rule *rule)
6472 {
6473 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6474 			  IPV6_SIZE);
6475 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6476 			  IPV6_SIZE);
6477 
6478 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6479 			  IPV6_SIZE);
6480 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6481 			  IPV6_SIZE);
6482 
6483 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6484 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6485 
6486 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6487 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6488 
6489 	rule->tuples.ether_proto = ETH_P_IPV6;
6490 	rule->tuples_mask.ether_proto = 0xFFFF;
6491 }
6492 
6493 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6494 				     struct ethtool_rx_flow_spec *fs,
6495 				     struct hclge_fd_rule *rule)
6496 {
6497 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6498 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6499 
6500 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6501 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6502 
6503 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6504 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6505 }
6506 
6507 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6508 					struct hclge_fd_rule *rule)
6509 {
6510 	switch (info->layer) {
6511 	case HCLGE_FD_USER_DEF_L2:
6512 		rule->tuples.l2_user_def = info->data;
6513 		rule->tuples_mask.l2_user_def = info->data_mask;
6514 		break;
6515 	case HCLGE_FD_USER_DEF_L3:
6516 		rule->tuples.l3_user_def = info->data;
6517 		rule->tuples_mask.l3_user_def = info->data_mask;
6518 		break;
6519 	case HCLGE_FD_USER_DEF_L4:
6520 		rule->tuples.l4_user_def = (u32)info->data << 16;
6521 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6522 		break;
6523 	default:
6524 		break;
6525 	}
6526 
6527 	rule->ep.user_def = *info;
6528 }
6529 
6530 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6531 			      struct ethtool_rx_flow_spec *fs,
6532 			      struct hclge_fd_rule *rule,
6533 			      struct hclge_fd_user_def_info *info)
6534 {
6535 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6536 
6537 	switch (flow_type) {
6538 	case SCTP_V4_FLOW:
6539 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6540 		break;
6541 	case TCP_V4_FLOW:
6542 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6543 		break;
6544 	case UDP_V4_FLOW:
6545 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6546 		break;
6547 	case IP_USER_FLOW:
6548 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6549 		break;
6550 	case SCTP_V6_FLOW:
6551 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6552 		break;
6553 	case TCP_V6_FLOW:
6554 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6555 		break;
6556 	case UDP_V6_FLOW:
6557 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6558 		break;
6559 	case IPV6_USER_FLOW:
6560 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6561 		break;
6562 	case ETHER_FLOW:
6563 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6564 		break;
6565 	default:
6566 		return -EOPNOTSUPP;
6567 	}
6568 
6569 	if (fs->flow_type & FLOW_EXT) {
6570 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6571 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6572 		hclge_fd_get_user_def_tuple(info, rule);
6573 	}
6574 
6575 	if (fs->flow_type & FLOW_MAC_EXT) {
6576 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6577 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6578 	}
6579 
6580 	return 0;
6581 }
6582 
6583 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6584 				struct hclge_fd_rule *rule)
6585 {
6586 	int ret;
6587 
6588 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6589 	if (ret)
6590 		return ret;
6591 
6592 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6593 }
6594 
6595 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6596 				     struct hclge_fd_rule *rule)
6597 {
6598 	int ret;
6599 
6600 	spin_lock_bh(&hdev->fd_rule_lock);
6601 
6602 	if (hdev->fd_active_type != rule->rule_type &&
6603 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6604 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6605 		dev_err(&hdev->pdev->dev,
6606 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6607 			rule->rule_type, hdev->fd_active_type);
6608 		spin_unlock_bh(&hdev->fd_rule_lock);
6609 		return -EINVAL;
6610 	}
6611 
6612 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6613 	if (ret)
6614 		goto out;
6615 
6616 	ret = hclge_clear_arfs_rules(hdev);
6617 	if (ret)
6618 		goto out;
6619 
6620 	ret = hclge_fd_config_rule(hdev, rule);
6621 	if (ret)
6622 		goto out;
6623 
6624 	rule->state = HCLGE_FD_ACTIVE;
6625 	hdev->fd_active_type = rule->rule_type;
6626 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6627 
6628 out:
6629 	spin_unlock_bh(&hdev->fd_rule_lock);
6630 	return ret;
6631 }
6632 
6633 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6634 {
6635 	struct hclge_vport *vport = hclge_get_vport(handle);
6636 	struct hclge_dev *hdev = vport->back;
6637 
6638 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6639 }
6640 
6641 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6642 				      u16 *vport_id, u8 *action, u16 *queue_id)
6643 {
6644 	struct hclge_vport *vport = hdev->vport;
6645 
6646 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6647 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6648 	} else {
6649 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6650 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6651 		u16 tqps;
6652 
6653 		/* To keep consistent with user's configuration, minus 1 when
6654 		 * printing 'vf', because vf id from ethtool is added 1 for vf.
6655 		 */
6656 		if (vf > hdev->num_req_vfs) {
6657 			dev_err(&hdev->pdev->dev,
6658 				"Error: vf id (%u) should be less than %u\n",
6659 				vf - 1, hdev->num_req_vfs);
6660 			return -EINVAL;
6661 		}
6662 
6663 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6664 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6665 
6666 		if (ring >= tqps) {
6667 			dev_err(&hdev->pdev->dev,
6668 				"Error: queue id (%u) > max tqp num (%u)\n",
6669 				ring, tqps - 1);
6670 			return -EINVAL;
6671 		}
6672 
6673 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6674 		*queue_id = ring;
6675 	}
6676 
6677 	return 0;
6678 }
6679 
6680 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6681 			      struct ethtool_rxnfc *cmd)
6682 {
6683 	struct hclge_vport *vport = hclge_get_vport(handle);
6684 	struct hclge_dev *hdev = vport->back;
6685 	struct hclge_fd_user_def_info info;
6686 	u16 dst_vport_id = 0, q_index = 0;
6687 	struct ethtool_rx_flow_spec *fs;
6688 	struct hclge_fd_rule *rule;
6689 	u32 unused = 0;
6690 	u8 action;
6691 	int ret;
6692 
6693 	if (!hnae3_dev_fd_supported(hdev)) {
6694 		dev_err(&hdev->pdev->dev,
6695 			"flow table director is not supported\n");
6696 		return -EOPNOTSUPP;
6697 	}
6698 
6699 	if (!hdev->fd_en) {
6700 		dev_err(&hdev->pdev->dev,
6701 			"please enable flow director first\n");
6702 		return -EOPNOTSUPP;
6703 	}
6704 
6705 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6706 
6707 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6708 	if (ret)
6709 		return ret;
6710 
6711 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6712 					 &action, &q_index);
6713 	if (ret)
6714 		return ret;
6715 
6716 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6717 	if (!rule)
6718 		return -ENOMEM;
6719 
6720 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6721 	if (ret) {
6722 		kfree(rule);
6723 		return ret;
6724 	}
6725 
6726 	rule->flow_type = fs->flow_type;
6727 	rule->location = fs->location;
6728 	rule->unused_tuple = unused;
6729 	rule->vf_id = dst_vport_id;
6730 	rule->queue_id = q_index;
6731 	rule->action = action;
6732 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6733 
6734 	ret = hclge_add_fd_entry_common(hdev, rule);
6735 	if (ret)
6736 		kfree(rule);
6737 
6738 	return ret;
6739 }
6740 
6741 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6742 			      struct ethtool_rxnfc *cmd)
6743 {
6744 	struct hclge_vport *vport = hclge_get_vport(handle);
6745 	struct hclge_dev *hdev = vport->back;
6746 	struct ethtool_rx_flow_spec *fs;
6747 	int ret;
6748 
6749 	if (!hnae3_dev_fd_supported(hdev))
6750 		return -EOPNOTSUPP;
6751 
6752 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6753 
6754 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6755 		return -EINVAL;
6756 
6757 	spin_lock_bh(&hdev->fd_rule_lock);
6758 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6759 	    !test_bit(fs->location, hdev->fd_bmap)) {
6760 		dev_err(&hdev->pdev->dev,
6761 			"Delete fail, rule %u is inexistent\n", fs->location);
6762 		spin_unlock_bh(&hdev->fd_rule_lock);
6763 		return -ENOENT;
6764 	}
6765 
6766 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6767 				   NULL, false);
6768 	if (ret)
6769 		goto out;
6770 
6771 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6772 
6773 out:
6774 	spin_unlock_bh(&hdev->fd_rule_lock);
6775 	return ret;
6776 }
6777 
6778 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6779 					 bool clear_list)
6780 {
6781 	struct hclge_fd_rule *rule;
6782 	struct hlist_node *node;
6783 	u16 location;
6784 
6785 	if (!hnae3_dev_fd_supported(hdev))
6786 		return;
6787 
6788 	spin_lock_bh(&hdev->fd_rule_lock);
6789 
6790 	for_each_set_bit(location, hdev->fd_bmap,
6791 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6792 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6793 				     NULL, false);
6794 
6795 	if (clear_list) {
6796 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6797 					  rule_node) {
6798 			hlist_del(&rule->rule_node);
6799 			kfree(rule);
6800 		}
6801 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6802 		hdev->hclge_fd_rule_num = 0;
6803 		bitmap_zero(hdev->fd_bmap,
6804 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6805 	}
6806 
6807 	spin_unlock_bh(&hdev->fd_rule_lock);
6808 }
6809 
6810 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6811 {
6812 	hclge_clear_fd_rules_in_list(hdev, true);
6813 	hclge_fd_disable_user_def(hdev);
6814 }
6815 
6816 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6817 {
6818 	struct hclge_vport *vport = hclge_get_vport(handle);
6819 	struct hclge_dev *hdev = vport->back;
6820 	struct hclge_fd_rule *rule;
6821 	struct hlist_node *node;
6822 
6823 	/* Return ok here, because reset error handling will check this
6824 	 * return value. If error is returned here, the reset process will
6825 	 * fail.
6826 	 */
6827 	if (!hnae3_dev_fd_supported(hdev))
6828 		return 0;
6829 
6830 	/* if fd is disabled, should not restore it when reset */
6831 	if (!hdev->fd_en)
6832 		return 0;
6833 
6834 	spin_lock_bh(&hdev->fd_rule_lock);
6835 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6836 		if (rule->state == HCLGE_FD_ACTIVE)
6837 			rule->state = HCLGE_FD_TO_ADD;
6838 	}
6839 	spin_unlock_bh(&hdev->fd_rule_lock);
6840 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6841 
6842 	return 0;
6843 }
6844 
6845 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6846 				 struct ethtool_rxnfc *cmd)
6847 {
6848 	struct hclge_vport *vport = hclge_get_vport(handle);
6849 	struct hclge_dev *hdev = vport->back;
6850 
6851 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6852 		return -EOPNOTSUPP;
6853 
6854 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6855 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6856 
6857 	return 0;
6858 }
6859 
6860 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6861 				     struct ethtool_tcpip4_spec *spec,
6862 				     struct ethtool_tcpip4_spec *spec_mask)
6863 {
6864 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6865 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6866 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6867 
6868 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6869 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6870 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6871 
6872 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6873 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6874 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6875 
6876 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6877 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6878 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6879 
6880 	spec->tos = rule->tuples.ip_tos;
6881 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6882 			0 : rule->tuples_mask.ip_tos;
6883 }
6884 
6885 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6886 				  struct ethtool_usrip4_spec *spec,
6887 				  struct ethtool_usrip4_spec *spec_mask)
6888 {
6889 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6890 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6891 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6892 
6893 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6894 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6895 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6896 
6897 	spec->tos = rule->tuples.ip_tos;
6898 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6899 			0 : rule->tuples_mask.ip_tos;
6900 
6901 	spec->proto = rule->tuples.ip_proto;
6902 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6903 			0 : rule->tuples_mask.ip_proto;
6904 
6905 	spec->ip_ver = ETH_RX_NFC_IP4;
6906 }
6907 
6908 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6909 				     struct ethtool_tcpip6_spec *spec,
6910 				     struct ethtool_tcpip6_spec *spec_mask)
6911 {
6912 	cpu_to_be32_array(spec->ip6src,
6913 			  rule->tuples.src_ip, IPV6_SIZE);
6914 	cpu_to_be32_array(spec->ip6dst,
6915 			  rule->tuples.dst_ip, IPV6_SIZE);
6916 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6917 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6918 	else
6919 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6920 				  IPV6_SIZE);
6921 
6922 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6923 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6924 	else
6925 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6926 				  IPV6_SIZE);
6927 
6928 	spec->tclass = rule->tuples.ip_tos;
6929 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6930 			0 : rule->tuples_mask.ip_tos;
6931 
6932 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6933 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6934 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6935 
6936 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6937 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6938 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6939 }
6940 
6941 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6942 				  struct ethtool_usrip6_spec *spec,
6943 				  struct ethtool_usrip6_spec *spec_mask)
6944 {
6945 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6946 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6947 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6948 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6949 	else
6950 		cpu_to_be32_array(spec_mask->ip6src,
6951 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6952 
6953 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6954 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6955 	else
6956 		cpu_to_be32_array(spec_mask->ip6dst,
6957 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6958 
6959 	spec->tclass = rule->tuples.ip_tos;
6960 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6961 			0 : rule->tuples_mask.ip_tos;
6962 
6963 	spec->l4_proto = rule->tuples.ip_proto;
6964 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6965 			0 : rule->tuples_mask.ip_proto;
6966 }
6967 
6968 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6969 				    struct ethhdr *spec,
6970 				    struct ethhdr *spec_mask)
6971 {
6972 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6973 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6974 
6975 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6976 		eth_zero_addr(spec_mask->h_source);
6977 	else
6978 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6979 
6980 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6981 		eth_zero_addr(spec_mask->h_dest);
6982 	else
6983 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6984 
6985 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6986 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6987 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6988 }
6989 
6990 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6991 				       struct hclge_fd_rule *rule)
6992 {
6993 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6994 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6995 		fs->h_ext.data[0] = 0;
6996 		fs->h_ext.data[1] = 0;
6997 		fs->m_ext.data[0] = 0;
6998 		fs->m_ext.data[1] = 0;
6999 	} else {
7000 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
7001 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
7002 		fs->m_ext.data[0] =
7003 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
7004 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
7005 	}
7006 }
7007 
7008 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7009 				  struct hclge_fd_rule *rule)
7010 {
7011 	if (fs->flow_type & FLOW_EXT) {
7012 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7013 		fs->m_ext.vlan_tci =
7014 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7015 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7016 
7017 		hclge_fd_get_user_def_info(fs, rule);
7018 	}
7019 
7020 	if (fs->flow_type & FLOW_MAC_EXT) {
7021 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7022 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
7023 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
7024 		else
7025 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
7026 					rule->tuples_mask.dst_mac);
7027 	}
7028 }
7029 
7030 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7031 				  struct ethtool_rxnfc *cmd)
7032 {
7033 	struct hclge_vport *vport = hclge_get_vport(handle);
7034 	struct hclge_fd_rule *rule = NULL;
7035 	struct hclge_dev *hdev = vport->back;
7036 	struct ethtool_rx_flow_spec *fs;
7037 	struct hlist_node *node2;
7038 
7039 	if (!hnae3_dev_fd_supported(hdev))
7040 		return -EOPNOTSUPP;
7041 
7042 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7043 
7044 	spin_lock_bh(&hdev->fd_rule_lock);
7045 
7046 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7047 		if (rule->location >= fs->location)
7048 			break;
7049 	}
7050 
7051 	if (!rule || fs->location != rule->location) {
7052 		spin_unlock_bh(&hdev->fd_rule_lock);
7053 
7054 		return -ENOENT;
7055 	}
7056 
7057 	fs->flow_type = rule->flow_type;
7058 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7059 	case SCTP_V4_FLOW:
7060 	case TCP_V4_FLOW:
7061 	case UDP_V4_FLOW:
7062 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7063 					 &fs->m_u.tcp_ip4_spec);
7064 		break;
7065 	case IP_USER_FLOW:
7066 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7067 				      &fs->m_u.usr_ip4_spec);
7068 		break;
7069 	case SCTP_V6_FLOW:
7070 	case TCP_V6_FLOW:
7071 	case UDP_V6_FLOW:
7072 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7073 					 &fs->m_u.tcp_ip6_spec);
7074 		break;
7075 	case IPV6_USER_FLOW:
7076 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7077 				      &fs->m_u.usr_ip6_spec);
7078 		break;
7079 	/* The flow type of fd rule has been checked before adding in to rule
7080 	 * list. As other flow types have been handled, it must be ETHER_FLOW
7081 	 * for the default case
7082 	 */
7083 	default:
7084 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7085 					&fs->m_u.ether_spec);
7086 		break;
7087 	}
7088 
7089 	hclge_fd_get_ext_info(fs, rule);
7090 
7091 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7092 		fs->ring_cookie = RX_CLS_FLOW_DISC;
7093 	} else {
7094 		u64 vf_id;
7095 
7096 		fs->ring_cookie = rule->queue_id;
7097 		vf_id = rule->vf_id;
7098 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7099 		fs->ring_cookie |= vf_id;
7100 	}
7101 
7102 	spin_unlock_bh(&hdev->fd_rule_lock);
7103 
7104 	return 0;
7105 }
7106 
7107 static int hclge_get_all_rules(struct hnae3_handle *handle,
7108 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
7109 {
7110 	struct hclge_vport *vport = hclge_get_vport(handle);
7111 	struct hclge_dev *hdev = vport->back;
7112 	struct hclge_fd_rule *rule;
7113 	struct hlist_node *node2;
7114 	int cnt = 0;
7115 
7116 	if (!hnae3_dev_fd_supported(hdev))
7117 		return -EOPNOTSUPP;
7118 
7119 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7120 
7121 	spin_lock_bh(&hdev->fd_rule_lock);
7122 	hlist_for_each_entry_safe(rule, node2,
7123 				  &hdev->fd_rule_list, rule_node) {
7124 		if (cnt == cmd->rule_cnt) {
7125 			spin_unlock_bh(&hdev->fd_rule_lock);
7126 			return -EMSGSIZE;
7127 		}
7128 
7129 		if (rule->state == HCLGE_FD_TO_DEL)
7130 			continue;
7131 
7132 		rule_locs[cnt] = rule->location;
7133 		cnt++;
7134 	}
7135 
7136 	spin_unlock_bh(&hdev->fd_rule_lock);
7137 
7138 	cmd->rule_cnt = cnt;
7139 
7140 	return 0;
7141 }
7142 
7143 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7144 				     struct hclge_fd_rule_tuples *tuples)
7145 {
7146 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7147 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7148 
7149 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7150 	tuples->ip_proto = fkeys->basic.ip_proto;
7151 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7152 
7153 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7154 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7155 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7156 	} else {
7157 		int i;
7158 
7159 		for (i = 0; i < IPV6_SIZE; i++) {
7160 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7161 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7162 		}
7163 	}
7164 }
7165 
7166 /* traverse all rules, check whether an existed rule has the same tuples */
7167 static struct hclge_fd_rule *
7168 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7169 			  const struct hclge_fd_rule_tuples *tuples)
7170 {
7171 	struct hclge_fd_rule *rule = NULL;
7172 	struct hlist_node *node;
7173 
7174 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7175 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7176 			return rule;
7177 	}
7178 
7179 	return NULL;
7180 }
7181 
7182 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7183 				     struct hclge_fd_rule *rule)
7184 {
7185 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7186 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7187 			     BIT(INNER_SRC_PORT);
7188 	rule->action = 0;
7189 	rule->vf_id = 0;
7190 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7191 	rule->state = HCLGE_FD_TO_ADD;
7192 	if (tuples->ether_proto == ETH_P_IP) {
7193 		if (tuples->ip_proto == IPPROTO_TCP)
7194 			rule->flow_type = TCP_V4_FLOW;
7195 		else
7196 			rule->flow_type = UDP_V4_FLOW;
7197 	} else {
7198 		if (tuples->ip_proto == IPPROTO_TCP)
7199 			rule->flow_type = TCP_V6_FLOW;
7200 		else
7201 			rule->flow_type = UDP_V6_FLOW;
7202 	}
7203 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7204 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7205 }
7206 
7207 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7208 				      u16 flow_id, struct flow_keys *fkeys)
7209 {
7210 	struct hclge_vport *vport = hclge_get_vport(handle);
7211 	struct hclge_fd_rule_tuples new_tuples = {};
7212 	struct hclge_dev *hdev = vport->back;
7213 	struct hclge_fd_rule *rule;
7214 	u16 bit_id;
7215 
7216 	if (!hnae3_dev_fd_supported(hdev))
7217 		return -EOPNOTSUPP;
7218 
7219 	/* when there is already fd rule existed add by user,
7220 	 * arfs should not work
7221 	 */
7222 	spin_lock_bh(&hdev->fd_rule_lock);
7223 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7224 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7225 		spin_unlock_bh(&hdev->fd_rule_lock);
7226 		return -EOPNOTSUPP;
7227 	}
7228 
7229 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7230 
7231 	/* check is there flow director filter existed for this flow,
7232 	 * if not, create a new filter for it;
7233 	 * if filter exist with different queue id, modify the filter;
7234 	 * if filter exist with same queue id, do nothing
7235 	 */
7236 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7237 	if (!rule) {
7238 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7239 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7240 			spin_unlock_bh(&hdev->fd_rule_lock);
7241 			return -ENOSPC;
7242 		}
7243 
7244 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7245 		if (!rule) {
7246 			spin_unlock_bh(&hdev->fd_rule_lock);
7247 			return -ENOMEM;
7248 		}
7249 
7250 		rule->location = bit_id;
7251 		rule->arfs.flow_id = flow_id;
7252 		rule->queue_id = queue_id;
7253 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7254 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7255 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7256 	} else if (rule->queue_id != queue_id) {
7257 		rule->queue_id = queue_id;
7258 		rule->state = HCLGE_FD_TO_ADD;
7259 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7260 		hclge_task_schedule(hdev, 0);
7261 	}
7262 	spin_unlock_bh(&hdev->fd_rule_lock);
7263 	return rule->location;
7264 }
7265 
7266 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7267 {
7268 #ifdef CONFIG_RFS_ACCEL
7269 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7270 	struct hclge_fd_rule *rule;
7271 	struct hlist_node *node;
7272 
7273 	spin_lock_bh(&hdev->fd_rule_lock);
7274 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7275 		spin_unlock_bh(&hdev->fd_rule_lock);
7276 		return;
7277 	}
7278 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7279 		if (rule->state != HCLGE_FD_ACTIVE)
7280 			continue;
7281 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7282 					rule->arfs.flow_id, rule->location)) {
7283 			rule->state = HCLGE_FD_TO_DEL;
7284 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7285 		}
7286 	}
7287 	spin_unlock_bh(&hdev->fd_rule_lock);
7288 #endif
7289 }
7290 
7291 /* make sure being called after lock up with fd_rule_lock */
7292 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7293 {
7294 #ifdef CONFIG_RFS_ACCEL
7295 	struct hclge_fd_rule *rule;
7296 	struct hlist_node *node;
7297 	int ret;
7298 
7299 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7300 		return 0;
7301 
7302 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7303 		switch (rule->state) {
7304 		case HCLGE_FD_TO_DEL:
7305 		case HCLGE_FD_ACTIVE:
7306 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7307 						   rule->location, NULL, false);
7308 			if (ret)
7309 				return ret;
7310 			fallthrough;
7311 		case HCLGE_FD_TO_ADD:
7312 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7313 			hlist_del(&rule->rule_node);
7314 			kfree(rule);
7315 			break;
7316 		default:
7317 			break;
7318 		}
7319 	}
7320 	hclge_sync_fd_state(hdev);
7321 
7322 #endif
7323 	return 0;
7324 }
7325 
7326 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7327 				    struct hclge_fd_rule *rule)
7328 {
7329 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7330 		struct flow_match_basic match;
7331 		u16 ethtype_key, ethtype_mask;
7332 
7333 		flow_rule_match_basic(flow, &match);
7334 		ethtype_key = ntohs(match.key->n_proto);
7335 		ethtype_mask = ntohs(match.mask->n_proto);
7336 
7337 		if (ethtype_key == ETH_P_ALL) {
7338 			ethtype_key = 0;
7339 			ethtype_mask = 0;
7340 		}
7341 		rule->tuples.ether_proto = ethtype_key;
7342 		rule->tuples_mask.ether_proto = ethtype_mask;
7343 		rule->tuples.ip_proto = match.key->ip_proto;
7344 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7345 	} else {
7346 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7347 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7348 	}
7349 }
7350 
7351 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7352 				  struct hclge_fd_rule *rule)
7353 {
7354 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7355 		struct flow_match_eth_addrs match;
7356 
7357 		flow_rule_match_eth_addrs(flow, &match);
7358 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7359 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7360 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7361 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7362 	} else {
7363 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7364 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7365 	}
7366 }
7367 
7368 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7369 				   struct hclge_fd_rule *rule)
7370 {
7371 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7372 		struct flow_match_vlan match;
7373 
7374 		flow_rule_match_vlan(flow, &match);
7375 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7376 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7377 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7378 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7379 	} else {
7380 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7381 	}
7382 }
7383 
7384 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7385 				 struct hclge_fd_rule *rule)
7386 {
7387 	u16 addr_type = 0;
7388 
7389 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7390 		struct flow_match_control match;
7391 
7392 		flow_rule_match_control(flow, &match);
7393 		addr_type = match.key->addr_type;
7394 	}
7395 
7396 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7397 		struct flow_match_ipv4_addrs match;
7398 
7399 		flow_rule_match_ipv4_addrs(flow, &match);
7400 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7401 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7402 						be32_to_cpu(match.mask->src);
7403 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7404 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7405 						be32_to_cpu(match.mask->dst);
7406 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7407 		struct flow_match_ipv6_addrs match;
7408 
7409 		flow_rule_match_ipv6_addrs(flow, &match);
7410 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7411 				  IPV6_SIZE);
7412 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7413 				  match.mask->src.s6_addr32, IPV6_SIZE);
7414 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7415 				  IPV6_SIZE);
7416 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7417 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7418 	} else {
7419 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7420 		rule->unused_tuple |= BIT(INNER_DST_IP);
7421 	}
7422 }
7423 
7424 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7425 				   struct hclge_fd_rule *rule)
7426 {
7427 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7428 		struct flow_match_ports match;
7429 
7430 		flow_rule_match_ports(flow, &match);
7431 
7432 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7433 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7434 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7435 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7436 	} else {
7437 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7438 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7439 	}
7440 }
7441 
7442 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7443 				  struct flow_cls_offload *cls_flower,
7444 				  struct hclge_fd_rule *rule)
7445 {
7446 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7447 	struct flow_dissector *dissector = flow->match.dissector;
7448 
7449 	if (dissector->used_keys &
7450 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7451 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7452 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7453 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7454 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7455 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7456 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7457 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7458 			dissector->used_keys);
7459 		return -EOPNOTSUPP;
7460 	}
7461 
7462 	hclge_get_cls_key_basic(flow, rule);
7463 	hclge_get_cls_key_mac(flow, rule);
7464 	hclge_get_cls_key_vlan(flow, rule);
7465 	hclge_get_cls_key_ip(flow, rule);
7466 	hclge_get_cls_key_port(flow, rule);
7467 
7468 	return 0;
7469 }
7470 
7471 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7472 				  struct flow_cls_offload *cls_flower, int tc)
7473 {
7474 	u32 prio = cls_flower->common.prio;
7475 
7476 	if (tc < 0 || tc > hdev->tc_max) {
7477 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7478 		return -EINVAL;
7479 	}
7480 
7481 	if (prio == 0 ||
7482 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7483 		dev_err(&hdev->pdev->dev,
7484 			"prio %u should be in range[1, %u]\n",
7485 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7486 		return -EINVAL;
7487 	}
7488 
7489 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7490 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7491 		return -EINVAL;
7492 	}
7493 	return 0;
7494 }
7495 
7496 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7497 				struct flow_cls_offload *cls_flower,
7498 				int tc)
7499 {
7500 	struct hclge_vport *vport = hclge_get_vport(handle);
7501 	struct hclge_dev *hdev = vport->back;
7502 	struct hclge_fd_rule *rule;
7503 	int ret;
7504 
7505 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7506 	if (ret) {
7507 		dev_err(&hdev->pdev->dev,
7508 			"failed to check cls flower params, ret = %d\n", ret);
7509 		return ret;
7510 	}
7511 
7512 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7513 	if (!rule)
7514 		return -ENOMEM;
7515 
7516 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7517 	if (ret) {
7518 		kfree(rule);
7519 		return ret;
7520 	}
7521 
7522 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7523 	rule->cls_flower.tc = tc;
7524 	rule->location = cls_flower->common.prio - 1;
7525 	rule->vf_id = 0;
7526 	rule->cls_flower.cookie = cls_flower->cookie;
7527 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7528 
7529 	ret = hclge_add_fd_entry_common(hdev, rule);
7530 	if (ret)
7531 		kfree(rule);
7532 
7533 	return ret;
7534 }
7535 
7536 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7537 						   unsigned long cookie)
7538 {
7539 	struct hclge_fd_rule *rule;
7540 	struct hlist_node *node;
7541 
7542 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7543 		if (rule->cls_flower.cookie == cookie)
7544 			return rule;
7545 	}
7546 
7547 	return NULL;
7548 }
7549 
7550 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7551 				struct flow_cls_offload *cls_flower)
7552 {
7553 	struct hclge_vport *vport = hclge_get_vport(handle);
7554 	struct hclge_dev *hdev = vport->back;
7555 	struct hclge_fd_rule *rule;
7556 	int ret;
7557 
7558 	spin_lock_bh(&hdev->fd_rule_lock);
7559 
7560 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7561 	if (!rule) {
7562 		spin_unlock_bh(&hdev->fd_rule_lock);
7563 		return -EINVAL;
7564 	}
7565 
7566 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7567 				   NULL, false);
7568 	if (ret) {
7569 		spin_unlock_bh(&hdev->fd_rule_lock);
7570 		return ret;
7571 	}
7572 
7573 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7574 	spin_unlock_bh(&hdev->fd_rule_lock);
7575 
7576 	return 0;
7577 }
7578 
7579 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7580 {
7581 	struct hclge_fd_rule *rule;
7582 	struct hlist_node *node;
7583 	int ret = 0;
7584 
7585 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7586 		return;
7587 
7588 	spin_lock_bh(&hdev->fd_rule_lock);
7589 
7590 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7591 		switch (rule->state) {
7592 		case HCLGE_FD_TO_ADD:
7593 			ret = hclge_fd_config_rule(hdev, rule);
7594 			if (ret)
7595 				goto out;
7596 			rule->state = HCLGE_FD_ACTIVE;
7597 			break;
7598 		case HCLGE_FD_TO_DEL:
7599 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7600 						   rule->location, NULL, false);
7601 			if (ret)
7602 				goto out;
7603 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7604 			hclge_fd_free_node(hdev, rule);
7605 			break;
7606 		default:
7607 			break;
7608 		}
7609 	}
7610 
7611 out:
7612 	if (ret)
7613 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7614 
7615 	spin_unlock_bh(&hdev->fd_rule_lock);
7616 }
7617 
7618 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7619 {
7620 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7621 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7622 
7623 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7624 	}
7625 
7626 	hclge_sync_fd_user_def_cfg(hdev, false);
7627 
7628 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7629 }
7630 
7631 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7632 {
7633 	struct hclge_vport *vport = hclge_get_vport(handle);
7634 	struct hclge_dev *hdev = vport->back;
7635 
7636 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7637 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7638 }
7639 
7640 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7641 {
7642 	struct hclge_vport *vport = hclge_get_vport(handle);
7643 	struct hclge_dev *hdev = vport->back;
7644 
7645 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7646 }
7647 
7648 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7649 {
7650 	struct hclge_vport *vport = hclge_get_vport(handle);
7651 	struct hclge_dev *hdev = vport->back;
7652 
7653 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7654 }
7655 
7656 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7657 {
7658 	struct hclge_vport *vport = hclge_get_vport(handle);
7659 	struct hclge_dev *hdev = vport->back;
7660 
7661 	return hdev->rst_stats.hw_reset_done_cnt;
7662 }
7663 
7664 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7665 {
7666 	struct hclge_vport *vport = hclge_get_vport(handle);
7667 	struct hclge_dev *hdev = vport->back;
7668 
7669 	hdev->fd_en = enable;
7670 
7671 	if (!enable)
7672 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7673 	else
7674 		hclge_restore_fd_entries(handle);
7675 
7676 	hclge_task_schedule(hdev, 0);
7677 }
7678 
7679 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7680 {
7681 	struct hclge_desc desc;
7682 	struct hclge_config_mac_mode_cmd *req =
7683 		(struct hclge_config_mac_mode_cmd *)desc.data;
7684 	u32 loop_en = 0;
7685 	int ret;
7686 
7687 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7688 
7689 	if (enable) {
7690 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7691 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7692 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7693 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7694 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7695 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7696 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7697 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7698 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7699 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7700 	}
7701 
7702 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7703 
7704 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7705 	if (ret)
7706 		dev_err(&hdev->pdev->dev,
7707 			"mac enable fail, ret =%d.\n", ret);
7708 }
7709 
7710 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7711 				     u8 switch_param, u8 param_mask)
7712 {
7713 	struct hclge_mac_vlan_switch_cmd *req;
7714 	struct hclge_desc desc;
7715 	u32 func_id;
7716 	int ret;
7717 
7718 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7719 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7720 
7721 	/* read current config parameter */
7722 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7723 				   true);
7724 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7725 	req->func_id = cpu_to_le32(func_id);
7726 
7727 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7728 	if (ret) {
7729 		dev_err(&hdev->pdev->dev,
7730 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7731 		return ret;
7732 	}
7733 
7734 	/* modify and write new config parameter */
7735 	hclge_cmd_reuse_desc(&desc, false);
7736 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7737 	req->param_mask = param_mask;
7738 
7739 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7740 	if (ret)
7741 		dev_err(&hdev->pdev->dev,
7742 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7743 	return ret;
7744 }
7745 
7746 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7747 				       int link_ret)
7748 {
7749 #define HCLGE_PHY_LINK_STATUS_NUM  200
7750 
7751 	struct phy_device *phydev = hdev->hw.mac.phydev;
7752 	int i = 0;
7753 	int ret;
7754 
7755 	do {
7756 		ret = phy_read_status(phydev);
7757 		if (ret) {
7758 			dev_err(&hdev->pdev->dev,
7759 				"phy update link status fail, ret = %d\n", ret);
7760 			return;
7761 		}
7762 
7763 		if (phydev->link == link_ret)
7764 			break;
7765 
7766 		msleep(HCLGE_LINK_STATUS_MS);
7767 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7768 }
7769 
7770 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7771 {
7772 #define HCLGE_MAC_LINK_STATUS_NUM  100
7773 
7774 	int link_status;
7775 	int i = 0;
7776 	int ret;
7777 
7778 	do {
7779 		ret = hclge_get_mac_link_status(hdev, &link_status);
7780 		if (ret)
7781 			return ret;
7782 		if (link_status == link_ret)
7783 			return 0;
7784 
7785 		msleep(HCLGE_LINK_STATUS_MS);
7786 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7787 	return -EBUSY;
7788 }
7789 
7790 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7791 					  bool is_phy)
7792 {
7793 	int link_ret;
7794 
7795 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7796 
7797 	if (is_phy)
7798 		hclge_phy_link_status_wait(hdev, link_ret);
7799 
7800 	return hclge_mac_link_status_wait(hdev, link_ret);
7801 }
7802 
7803 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7804 {
7805 	struct hclge_config_mac_mode_cmd *req;
7806 	struct hclge_desc desc;
7807 	u32 loop_en;
7808 	int ret;
7809 
7810 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7811 	/* 1 Read out the MAC mode config at first */
7812 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7813 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7814 	if (ret) {
7815 		dev_err(&hdev->pdev->dev,
7816 			"mac loopback get fail, ret =%d.\n", ret);
7817 		return ret;
7818 	}
7819 
7820 	/* 2 Then setup the loopback flag */
7821 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7822 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7823 
7824 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7825 
7826 	/* 3 Config mac work mode with loopback flag
7827 	 * and its original configure parameters
7828 	 */
7829 	hclge_cmd_reuse_desc(&desc, false);
7830 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7831 	if (ret)
7832 		dev_err(&hdev->pdev->dev,
7833 			"mac loopback set fail, ret =%d.\n", ret);
7834 	return ret;
7835 }
7836 
7837 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7838 				     enum hnae3_loop loop_mode)
7839 {
7840 #define HCLGE_COMMON_LB_RETRY_MS	10
7841 #define HCLGE_COMMON_LB_RETRY_NUM	100
7842 
7843 	struct hclge_common_lb_cmd *req;
7844 	struct hclge_desc desc;
7845 	int ret, i = 0;
7846 	u8 loop_mode_b;
7847 
7848 	req = (struct hclge_common_lb_cmd *)desc.data;
7849 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7850 
7851 	switch (loop_mode) {
7852 	case HNAE3_LOOP_SERIAL_SERDES:
7853 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7854 		break;
7855 	case HNAE3_LOOP_PARALLEL_SERDES:
7856 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7857 		break;
7858 	case HNAE3_LOOP_PHY:
7859 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7860 		break;
7861 	default:
7862 		dev_err(&hdev->pdev->dev,
7863 			"unsupported common loopback mode %d\n", loop_mode);
7864 		return -ENOTSUPP;
7865 	}
7866 
7867 	if (en) {
7868 		req->enable = loop_mode_b;
7869 		req->mask = loop_mode_b;
7870 	} else {
7871 		req->mask = loop_mode_b;
7872 	}
7873 
7874 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7875 	if (ret) {
7876 		dev_err(&hdev->pdev->dev,
7877 			"common loopback set fail, ret = %d\n", ret);
7878 		return ret;
7879 	}
7880 
7881 	do {
7882 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7883 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7884 					   true);
7885 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7886 		if (ret) {
7887 			dev_err(&hdev->pdev->dev,
7888 				"common loopback get, ret = %d\n", ret);
7889 			return ret;
7890 		}
7891 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7892 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7893 
7894 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7895 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7896 		return -EBUSY;
7897 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7898 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7899 		return -EIO;
7900 	}
7901 	return ret;
7902 }
7903 
7904 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7905 				     enum hnae3_loop loop_mode)
7906 {
7907 	int ret;
7908 
7909 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7910 	if (ret)
7911 		return ret;
7912 
7913 	hclge_cfg_mac_mode(hdev, en);
7914 
7915 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7916 	if (ret)
7917 		dev_err(&hdev->pdev->dev,
7918 			"serdes loopback config mac mode timeout\n");
7919 
7920 	return ret;
7921 }
7922 
7923 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7924 				     struct phy_device *phydev)
7925 {
7926 	int ret;
7927 
7928 	if (!phydev->suspended) {
7929 		ret = phy_suspend(phydev);
7930 		if (ret)
7931 			return ret;
7932 	}
7933 
7934 	ret = phy_resume(phydev);
7935 	if (ret)
7936 		return ret;
7937 
7938 	return phy_loopback(phydev, true);
7939 }
7940 
7941 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7942 				      struct phy_device *phydev)
7943 {
7944 	int ret;
7945 
7946 	ret = phy_loopback(phydev, false);
7947 	if (ret)
7948 		return ret;
7949 
7950 	return phy_suspend(phydev);
7951 }
7952 
7953 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7954 {
7955 	struct phy_device *phydev = hdev->hw.mac.phydev;
7956 	int ret;
7957 
7958 	if (!phydev) {
7959 		if (hnae3_dev_phy_imp_supported(hdev))
7960 			return hclge_set_common_loopback(hdev, en,
7961 							 HNAE3_LOOP_PHY);
7962 		return -ENOTSUPP;
7963 	}
7964 
7965 	if (en)
7966 		ret = hclge_enable_phy_loopback(hdev, phydev);
7967 	else
7968 		ret = hclge_disable_phy_loopback(hdev, phydev);
7969 	if (ret) {
7970 		dev_err(&hdev->pdev->dev,
7971 			"set phy loopback fail, ret = %d\n", ret);
7972 		return ret;
7973 	}
7974 
7975 	hclge_cfg_mac_mode(hdev, en);
7976 
7977 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7978 	if (ret)
7979 		dev_err(&hdev->pdev->dev,
7980 			"phy loopback config mac mode timeout\n");
7981 
7982 	return ret;
7983 }
7984 
7985 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7986 				     u16 stream_id, bool enable)
7987 {
7988 	struct hclge_desc desc;
7989 	struct hclge_cfg_com_tqp_queue_cmd *req =
7990 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7991 
7992 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7993 	req->tqp_id = cpu_to_le16(tqp_id);
7994 	req->stream_id = cpu_to_le16(stream_id);
7995 	if (enable)
7996 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7997 
7998 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7999 }
8000 
8001 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
8002 {
8003 	struct hclge_vport *vport = hclge_get_vport(handle);
8004 	struct hclge_dev *hdev = vport->back;
8005 	int ret;
8006 	u16 i;
8007 
8008 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
8009 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8010 		if (ret)
8011 			return ret;
8012 	}
8013 	return 0;
8014 }
8015 
8016 static int hclge_set_loopback(struct hnae3_handle *handle,
8017 			      enum hnae3_loop loop_mode, bool en)
8018 {
8019 	struct hclge_vport *vport = hclge_get_vport(handle);
8020 	struct hclge_dev *hdev = vport->back;
8021 	int ret;
8022 
8023 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8024 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8025 	 * the same, the packets are looped back in the SSU. If SSU loopback
8026 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8027 	 */
8028 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8029 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8030 
8031 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8032 						HCLGE_SWITCH_ALW_LPBK_MASK);
8033 		if (ret)
8034 			return ret;
8035 	}
8036 
8037 	switch (loop_mode) {
8038 	case HNAE3_LOOP_APP:
8039 		ret = hclge_set_app_loopback(hdev, en);
8040 		break;
8041 	case HNAE3_LOOP_SERIAL_SERDES:
8042 	case HNAE3_LOOP_PARALLEL_SERDES:
8043 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
8044 		break;
8045 	case HNAE3_LOOP_PHY:
8046 		ret = hclge_set_phy_loopback(hdev, en);
8047 		break;
8048 	default:
8049 		ret = -ENOTSUPP;
8050 		dev_err(&hdev->pdev->dev,
8051 			"loop_mode %d is not supported\n", loop_mode);
8052 		break;
8053 	}
8054 
8055 	if (ret)
8056 		return ret;
8057 
8058 	ret = hclge_tqp_enable(handle, en);
8059 	if (ret)
8060 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8061 			en ? "enable" : "disable", ret);
8062 
8063 	return ret;
8064 }
8065 
8066 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8067 {
8068 	int ret;
8069 
8070 	ret = hclge_set_app_loopback(hdev, false);
8071 	if (ret)
8072 		return ret;
8073 
8074 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8075 	if (ret)
8076 		return ret;
8077 
8078 	return hclge_cfg_common_loopback(hdev, false,
8079 					 HNAE3_LOOP_PARALLEL_SERDES);
8080 }
8081 
8082 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8083 {
8084 	struct hclge_vport *vport = hclge_get_vport(handle);
8085 	struct hnae3_knic_private_info *kinfo;
8086 	struct hnae3_queue *queue;
8087 	struct hclge_tqp *tqp;
8088 	int i;
8089 
8090 	kinfo = &vport->nic.kinfo;
8091 	for (i = 0; i < kinfo->num_tqps; i++) {
8092 		queue = handle->kinfo.tqp[i];
8093 		tqp = container_of(queue, struct hclge_tqp, q);
8094 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8095 	}
8096 }
8097 
8098 static void hclge_flush_link_update(struct hclge_dev *hdev)
8099 {
8100 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8101 
8102 	unsigned long last = hdev->serv_processed_cnt;
8103 	int i = 0;
8104 
8105 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8106 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8107 	       last == hdev->serv_processed_cnt)
8108 		usleep_range(1, 1);
8109 }
8110 
8111 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8112 {
8113 	struct hclge_vport *vport = hclge_get_vport(handle);
8114 	struct hclge_dev *hdev = vport->back;
8115 
8116 	if (enable) {
8117 		hclge_task_schedule(hdev, 0);
8118 	} else {
8119 		/* Set the DOWN flag here to disable link updating */
8120 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8121 
8122 		/* flush memory to make sure DOWN is seen by service task */
8123 		smp_mb__before_atomic();
8124 		hclge_flush_link_update(hdev);
8125 	}
8126 }
8127 
8128 static int hclge_ae_start(struct hnae3_handle *handle)
8129 {
8130 	struct hclge_vport *vport = hclge_get_vport(handle);
8131 	struct hclge_dev *hdev = vport->back;
8132 
8133 	/* mac enable */
8134 	hclge_cfg_mac_mode(hdev, true);
8135 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8136 	hdev->hw.mac.link = 0;
8137 
8138 	/* reset tqp stats */
8139 	hclge_reset_tqp_stats(handle);
8140 
8141 	hclge_mac_start_phy(hdev);
8142 
8143 	return 0;
8144 }
8145 
8146 static void hclge_ae_stop(struct hnae3_handle *handle)
8147 {
8148 	struct hclge_vport *vport = hclge_get_vport(handle);
8149 	struct hclge_dev *hdev = vport->back;
8150 
8151 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8152 	spin_lock_bh(&hdev->fd_rule_lock);
8153 	hclge_clear_arfs_rules(hdev);
8154 	spin_unlock_bh(&hdev->fd_rule_lock);
8155 
8156 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
8157 	 * so it only need to stop phy here.
8158 	 */
8159 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8160 	    hdev->reset_type != HNAE3_FUNC_RESET &&
8161 	    hdev->reset_type != HNAE3_FLR_RESET) {
8162 		hclge_mac_stop_phy(hdev);
8163 		hclge_update_link_status(hdev);
8164 		return;
8165 	}
8166 
8167 	hclge_reset_tqp(handle);
8168 
8169 	hclge_config_mac_tnl_int(hdev, false);
8170 
8171 	/* Mac disable */
8172 	hclge_cfg_mac_mode(hdev, false);
8173 
8174 	hclge_mac_stop_phy(hdev);
8175 
8176 	/* reset tqp stats */
8177 	hclge_reset_tqp_stats(handle);
8178 	hclge_update_link_status(hdev);
8179 }
8180 
8181 int hclge_vport_start(struct hclge_vport *vport)
8182 {
8183 	struct hclge_dev *hdev = vport->back;
8184 
8185 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8186 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8187 	vport->last_active_jiffies = jiffies;
8188 
8189 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8190 		if (vport->vport_id) {
8191 			hclge_restore_mac_table_common(vport);
8192 			hclge_restore_vport_vlan_table(vport);
8193 		} else {
8194 			hclge_restore_hw_table(hdev);
8195 		}
8196 	}
8197 
8198 	clear_bit(vport->vport_id, hdev->vport_config_block);
8199 
8200 	return 0;
8201 }
8202 
8203 void hclge_vport_stop(struct hclge_vport *vport)
8204 {
8205 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8206 }
8207 
8208 static int hclge_client_start(struct hnae3_handle *handle)
8209 {
8210 	struct hclge_vport *vport = hclge_get_vport(handle);
8211 
8212 	return hclge_vport_start(vport);
8213 }
8214 
8215 static void hclge_client_stop(struct hnae3_handle *handle)
8216 {
8217 	struct hclge_vport *vport = hclge_get_vport(handle);
8218 
8219 	hclge_vport_stop(vport);
8220 }
8221 
8222 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8223 					 u16 cmdq_resp, u8  resp_code,
8224 					 enum hclge_mac_vlan_tbl_opcode op)
8225 {
8226 	struct hclge_dev *hdev = vport->back;
8227 
8228 	if (cmdq_resp) {
8229 		dev_err(&hdev->pdev->dev,
8230 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8231 			cmdq_resp);
8232 		return -EIO;
8233 	}
8234 
8235 	if (op == HCLGE_MAC_VLAN_ADD) {
8236 		if (!resp_code || resp_code == 1)
8237 			return 0;
8238 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8239 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8240 			return -ENOSPC;
8241 
8242 		dev_err(&hdev->pdev->dev,
8243 			"add mac addr failed for undefined, code=%u.\n",
8244 			resp_code);
8245 		return -EIO;
8246 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8247 		if (!resp_code) {
8248 			return 0;
8249 		} else if (resp_code == 1) {
8250 			dev_dbg(&hdev->pdev->dev,
8251 				"remove mac addr failed for miss.\n");
8252 			return -ENOENT;
8253 		}
8254 
8255 		dev_err(&hdev->pdev->dev,
8256 			"remove mac addr failed for undefined, code=%u.\n",
8257 			resp_code);
8258 		return -EIO;
8259 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8260 		if (!resp_code) {
8261 			return 0;
8262 		} else if (resp_code == 1) {
8263 			dev_dbg(&hdev->pdev->dev,
8264 				"lookup mac addr failed for miss.\n");
8265 			return -ENOENT;
8266 		}
8267 
8268 		dev_err(&hdev->pdev->dev,
8269 			"lookup mac addr failed for undefined, code=%u.\n",
8270 			resp_code);
8271 		return -EIO;
8272 	}
8273 
8274 	dev_err(&hdev->pdev->dev,
8275 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8276 
8277 	return -EINVAL;
8278 }
8279 
8280 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8281 {
8282 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8283 
8284 	unsigned int word_num;
8285 	unsigned int bit_num;
8286 
8287 	if (vfid > 255 || vfid < 0)
8288 		return -EIO;
8289 
8290 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8291 		word_num = vfid / 32;
8292 		bit_num  = vfid % 32;
8293 		if (clr)
8294 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8295 		else
8296 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8297 	} else {
8298 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8299 		bit_num  = vfid % 32;
8300 		if (clr)
8301 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8302 		else
8303 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8304 	}
8305 
8306 	return 0;
8307 }
8308 
8309 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8310 {
8311 #define HCLGE_DESC_NUMBER 3
8312 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8313 	int i, j;
8314 
8315 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8316 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8317 			if (desc[i].data[j])
8318 				return false;
8319 
8320 	return true;
8321 }
8322 
8323 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8324 				   const u8 *addr, bool is_mc)
8325 {
8326 	const unsigned char *mac_addr = addr;
8327 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8328 		       (mac_addr[0]) | (mac_addr[1] << 8);
8329 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8330 
8331 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8332 	if (is_mc) {
8333 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8334 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8335 	}
8336 
8337 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8338 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8339 }
8340 
8341 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8342 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8343 {
8344 	struct hclge_dev *hdev = vport->back;
8345 	struct hclge_desc desc;
8346 	u8 resp_code;
8347 	u16 retval;
8348 	int ret;
8349 
8350 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8351 
8352 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8353 
8354 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8355 	if (ret) {
8356 		dev_err(&hdev->pdev->dev,
8357 			"del mac addr failed for cmd_send, ret =%d.\n",
8358 			ret);
8359 		return ret;
8360 	}
8361 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8362 	retval = le16_to_cpu(desc.retval);
8363 
8364 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8365 					     HCLGE_MAC_VLAN_REMOVE);
8366 }
8367 
8368 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8369 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8370 				     struct hclge_desc *desc,
8371 				     bool is_mc)
8372 {
8373 	struct hclge_dev *hdev = vport->back;
8374 	u8 resp_code;
8375 	u16 retval;
8376 	int ret;
8377 
8378 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8379 	if (is_mc) {
8380 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8381 		memcpy(desc[0].data,
8382 		       req,
8383 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8384 		hclge_cmd_setup_basic_desc(&desc[1],
8385 					   HCLGE_OPC_MAC_VLAN_ADD,
8386 					   true);
8387 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8388 		hclge_cmd_setup_basic_desc(&desc[2],
8389 					   HCLGE_OPC_MAC_VLAN_ADD,
8390 					   true);
8391 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8392 	} else {
8393 		memcpy(desc[0].data,
8394 		       req,
8395 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8396 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8397 	}
8398 	if (ret) {
8399 		dev_err(&hdev->pdev->dev,
8400 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8401 			ret);
8402 		return ret;
8403 	}
8404 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8405 	retval = le16_to_cpu(desc[0].retval);
8406 
8407 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8408 					     HCLGE_MAC_VLAN_LKUP);
8409 }
8410 
8411 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8412 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8413 				  struct hclge_desc *mc_desc)
8414 {
8415 	struct hclge_dev *hdev = vport->back;
8416 	int cfg_status;
8417 	u8 resp_code;
8418 	u16 retval;
8419 	int ret;
8420 
8421 	if (!mc_desc) {
8422 		struct hclge_desc desc;
8423 
8424 		hclge_cmd_setup_basic_desc(&desc,
8425 					   HCLGE_OPC_MAC_VLAN_ADD,
8426 					   false);
8427 		memcpy(desc.data, req,
8428 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8429 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8430 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8431 		retval = le16_to_cpu(desc.retval);
8432 
8433 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8434 							   resp_code,
8435 							   HCLGE_MAC_VLAN_ADD);
8436 	} else {
8437 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8438 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8439 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8440 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8441 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8442 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8443 		memcpy(mc_desc[0].data, req,
8444 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8445 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8446 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8447 		retval = le16_to_cpu(mc_desc[0].retval);
8448 
8449 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8450 							   resp_code,
8451 							   HCLGE_MAC_VLAN_ADD);
8452 	}
8453 
8454 	if (ret) {
8455 		dev_err(&hdev->pdev->dev,
8456 			"add mac addr failed for cmd_send, ret =%d.\n",
8457 			ret);
8458 		return ret;
8459 	}
8460 
8461 	return cfg_status;
8462 }
8463 
8464 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8465 			       u16 *allocated_size)
8466 {
8467 	struct hclge_umv_spc_alc_cmd *req;
8468 	struct hclge_desc desc;
8469 	int ret;
8470 
8471 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8472 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8473 
8474 	req->space_size = cpu_to_le32(space_size);
8475 
8476 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8477 	if (ret) {
8478 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8479 			ret);
8480 		return ret;
8481 	}
8482 
8483 	*allocated_size = le32_to_cpu(desc.data[1]);
8484 
8485 	return 0;
8486 }
8487 
8488 static int hclge_init_umv_space(struct hclge_dev *hdev)
8489 {
8490 	u16 allocated_size = 0;
8491 	int ret;
8492 
8493 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8494 	if (ret)
8495 		return ret;
8496 
8497 	if (allocated_size < hdev->wanted_umv_size)
8498 		dev_warn(&hdev->pdev->dev,
8499 			 "failed to alloc umv space, want %u, get %u\n",
8500 			 hdev->wanted_umv_size, allocated_size);
8501 
8502 	hdev->max_umv_size = allocated_size;
8503 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8504 	hdev->share_umv_size = hdev->priv_umv_size +
8505 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8506 
8507 	if (hdev->ae_dev->dev_specs.mc_mac_size)
8508 		set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8509 
8510 	return 0;
8511 }
8512 
8513 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8514 {
8515 	struct hclge_vport *vport;
8516 	int i;
8517 
8518 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8519 		vport = &hdev->vport[i];
8520 		vport->used_umv_num = 0;
8521 	}
8522 
8523 	mutex_lock(&hdev->vport_lock);
8524 	hdev->share_umv_size = hdev->priv_umv_size +
8525 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8526 	mutex_unlock(&hdev->vport_lock);
8527 
8528 	hdev->used_mc_mac_num = 0;
8529 }
8530 
8531 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8532 {
8533 	struct hclge_dev *hdev = vport->back;
8534 	bool is_full;
8535 
8536 	if (need_lock)
8537 		mutex_lock(&hdev->vport_lock);
8538 
8539 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8540 		   hdev->share_umv_size == 0);
8541 
8542 	if (need_lock)
8543 		mutex_unlock(&hdev->vport_lock);
8544 
8545 	return is_full;
8546 }
8547 
8548 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8549 {
8550 	struct hclge_dev *hdev = vport->back;
8551 
8552 	if (is_free) {
8553 		if (vport->used_umv_num > hdev->priv_umv_size)
8554 			hdev->share_umv_size++;
8555 
8556 		if (vport->used_umv_num > 0)
8557 			vport->used_umv_num--;
8558 	} else {
8559 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8560 		    hdev->share_umv_size > 0)
8561 			hdev->share_umv_size--;
8562 		vport->used_umv_num++;
8563 	}
8564 }
8565 
8566 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8567 						  const u8 *mac_addr)
8568 {
8569 	struct hclge_mac_node *mac_node, *tmp;
8570 
8571 	list_for_each_entry_safe(mac_node, tmp, list, node)
8572 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8573 			return mac_node;
8574 
8575 	return NULL;
8576 }
8577 
8578 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8579 				  enum HCLGE_MAC_NODE_STATE state)
8580 {
8581 	switch (state) {
8582 	/* from set_rx_mode or tmp_add_list */
8583 	case HCLGE_MAC_TO_ADD:
8584 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8585 			mac_node->state = HCLGE_MAC_ACTIVE;
8586 		break;
8587 	/* only from set_rx_mode */
8588 	case HCLGE_MAC_TO_DEL:
8589 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8590 			list_del(&mac_node->node);
8591 			kfree(mac_node);
8592 		} else {
8593 			mac_node->state = HCLGE_MAC_TO_DEL;
8594 		}
8595 		break;
8596 	/* only from tmp_add_list, the mac_node->state won't be
8597 	 * ACTIVE.
8598 	 */
8599 	case HCLGE_MAC_ACTIVE:
8600 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8601 			mac_node->state = HCLGE_MAC_ACTIVE;
8602 
8603 		break;
8604 	}
8605 }
8606 
8607 int hclge_update_mac_list(struct hclge_vport *vport,
8608 			  enum HCLGE_MAC_NODE_STATE state,
8609 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8610 			  const unsigned char *addr)
8611 {
8612 	struct hclge_dev *hdev = vport->back;
8613 	struct hclge_mac_node *mac_node;
8614 	struct list_head *list;
8615 
8616 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8617 		&vport->uc_mac_list : &vport->mc_mac_list;
8618 
8619 	spin_lock_bh(&vport->mac_list_lock);
8620 
8621 	/* if the mac addr is already in the mac list, no need to add a new
8622 	 * one into it, just check the mac addr state, convert it to a new
8623 	 * state, or just remove it, or do nothing.
8624 	 */
8625 	mac_node = hclge_find_mac_node(list, addr);
8626 	if (mac_node) {
8627 		hclge_update_mac_node(mac_node, state);
8628 		spin_unlock_bh(&vport->mac_list_lock);
8629 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8630 		return 0;
8631 	}
8632 
8633 	/* if this address is never added, unnecessary to delete */
8634 	if (state == HCLGE_MAC_TO_DEL) {
8635 		spin_unlock_bh(&vport->mac_list_lock);
8636 		dev_err(&hdev->pdev->dev,
8637 			"failed to delete address %pM from mac list\n",
8638 			addr);
8639 		return -ENOENT;
8640 	}
8641 
8642 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8643 	if (!mac_node) {
8644 		spin_unlock_bh(&vport->mac_list_lock);
8645 		return -ENOMEM;
8646 	}
8647 
8648 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8649 
8650 	mac_node->state = state;
8651 	ether_addr_copy(mac_node->mac_addr, addr);
8652 	list_add_tail(&mac_node->node, list);
8653 
8654 	spin_unlock_bh(&vport->mac_list_lock);
8655 
8656 	return 0;
8657 }
8658 
8659 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8660 			     const unsigned char *addr)
8661 {
8662 	struct hclge_vport *vport = hclge_get_vport(handle);
8663 
8664 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8665 				     addr);
8666 }
8667 
8668 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8669 			     const unsigned char *addr)
8670 {
8671 	struct hclge_dev *hdev = vport->back;
8672 	struct hclge_mac_vlan_tbl_entry_cmd req;
8673 	struct hclge_desc desc;
8674 	u16 egress_port = 0;
8675 	int ret;
8676 
8677 	/* mac addr check */
8678 	if (is_zero_ether_addr(addr) ||
8679 	    is_broadcast_ether_addr(addr) ||
8680 	    is_multicast_ether_addr(addr)) {
8681 		dev_err(&hdev->pdev->dev,
8682 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8683 			 addr, is_zero_ether_addr(addr),
8684 			 is_broadcast_ether_addr(addr),
8685 			 is_multicast_ether_addr(addr));
8686 		return -EINVAL;
8687 	}
8688 
8689 	memset(&req, 0, sizeof(req));
8690 
8691 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8692 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8693 
8694 	req.egress_port = cpu_to_le16(egress_port);
8695 
8696 	hclge_prepare_mac_addr(&req, addr, false);
8697 
8698 	/* Lookup the mac address in the mac_vlan table, and add
8699 	 * it if the entry is inexistent. Repeated unicast entry
8700 	 * is not allowed in the mac vlan table.
8701 	 */
8702 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8703 	if (ret == -ENOENT) {
8704 		mutex_lock(&hdev->vport_lock);
8705 		if (!hclge_is_umv_space_full(vport, false)) {
8706 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8707 			if (!ret)
8708 				hclge_update_umv_space(vport, false);
8709 			mutex_unlock(&hdev->vport_lock);
8710 			return ret;
8711 		}
8712 		mutex_unlock(&hdev->vport_lock);
8713 
8714 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8715 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8716 				hdev->priv_umv_size);
8717 
8718 		return -ENOSPC;
8719 	}
8720 
8721 	/* check if we just hit the duplicate */
8722 	if (!ret)
8723 		return -EEXIST;
8724 
8725 	return ret;
8726 }
8727 
8728 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8729 			    const unsigned char *addr)
8730 {
8731 	struct hclge_vport *vport = hclge_get_vport(handle);
8732 
8733 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8734 				     addr);
8735 }
8736 
8737 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8738 			    const unsigned char *addr)
8739 {
8740 	struct hclge_dev *hdev = vport->back;
8741 	struct hclge_mac_vlan_tbl_entry_cmd req;
8742 	int ret;
8743 
8744 	/* mac addr check */
8745 	if (is_zero_ether_addr(addr) ||
8746 	    is_broadcast_ether_addr(addr) ||
8747 	    is_multicast_ether_addr(addr)) {
8748 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8749 			addr);
8750 		return -EINVAL;
8751 	}
8752 
8753 	memset(&req, 0, sizeof(req));
8754 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8755 	hclge_prepare_mac_addr(&req, addr, false);
8756 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8757 	if (!ret) {
8758 		mutex_lock(&hdev->vport_lock);
8759 		hclge_update_umv_space(vport, true);
8760 		mutex_unlock(&hdev->vport_lock);
8761 	} else if (ret == -ENOENT) {
8762 		ret = 0;
8763 	}
8764 
8765 	return ret;
8766 }
8767 
8768 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8769 			     const unsigned char *addr)
8770 {
8771 	struct hclge_vport *vport = hclge_get_vport(handle);
8772 
8773 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8774 				     addr);
8775 }
8776 
8777 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8778 			     const unsigned char *addr)
8779 {
8780 	struct hclge_dev *hdev = vport->back;
8781 	struct hclge_mac_vlan_tbl_entry_cmd req;
8782 	struct hclge_desc desc[3];
8783 	bool is_new_addr = false;
8784 	int status;
8785 
8786 	/* mac addr check */
8787 	if (!is_multicast_ether_addr(addr)) {
8788 		dev_err(&hdev->pdev->dev,
8789 			"Add mc mac err! invalid mac:%pM.\n",
8790 			 addr);
8791 		return -EINVAL;
8792 	}
8793 	memset(&req, 0, sizeof(req));
8794 	hclge_prepare_mac_addr(&req, addr, true);
8795 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8796 	if (status) {
8797 		if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8798 		    hdev->used_mc_mac_num >=
8799 		    hdev->ae_dev->dev_specs.mc_mac_size)
8800 			goto err_no_space;
8801 
8802 		is_new_addr = true;
8803 
8804 		/* This mac addr do not exist, add new entry for it */
8805 		memset(desc[0].data, 0, sizeof(desc[0].data));
8806 		memset(desc[1].data, 0, sizeof(desc[0].data));
8807 		memset(desc[2].data, 0, sizeof(desc[0].data));
8808 	}
8809 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8810 	if (status)
8811 		return status;
8812 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8813 	if (status == -ENOSPC)
8814 		goto err_no_space;
8815 	else if (!status && is_new_addr)
8816 		hdev->used_mc_mac_num++;
8817 
8818 	return status;
8819 
8820 err_no_space:
8821 	/* if already overflow, not to print each time */
8822 	if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8823 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8824 	return -ENOSPC;
8825 }
8826 
8827 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8828 			    const unsigned char *addr)
8829 {
8830 	struct hclge_vport *vport = hclge_get_vport(handle);
8831 
8832 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8833 				     addr);
8834 }
8835 
8836 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8837 			    const unsigned char *addr)
8838 {
8839 	struct hclge_dev *hdev = vport->back;
8840 	struct hclge_mac_vlan_tbl_entry_cmd req;
8841 	enum hclge_cmd_status status;
8842 	struct hclge_desc desc[3];
8843 
8844 	/* mac addr check */
8845 	if (!is_multicast_ether_addr(addr)) {
8846 		dev_dbg(&hdev->pdev->dev,
8847 			"Remove mc mac err! invalid mac:%pM.\n",
8848 			 addr);
8849 		return -EINVAL;
8850 	}
8851 
8852 	memset(&req, 0, sizeof(req));
8853 	hclge_prepare_mac_addr(&req, addr, true);
8854 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8855 	if (!status) {
8856 		/* This mac addr exist, remove this handle's VFID for it */
8857 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8858 		if (status)
8859 			return status;
8860 
8861 		if (hclge_is_all_function_id_zero(desc)) {
8862 			/* All the vfid is zero, so need to delete this entry */
8863 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8864 			if (!status)
8865 				hdev->used_mc_mac_num--;
8866 		} else {
8867 			/* Not all the vfid is zero, update the vfid */
8868 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8869 		}
8870 	} else if (status == -ENOENT) {
8871 		status = 0;
8872 	}
8873 
8874 	return status;
8875 }
8876 
8877 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8878 				      struct list_head *list,
8879 				      int (*sync)(struct hclge_vport *,
8880 						  const unsigned char *))
8881 {
8882 	struct hclge_mac_node *mac_node, *tmp;
8883 	int ret;
8884 
8885 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8886 		ret = sync(vport, mac_node->mac_addr);
8887 		if (!ret) {
8888 			mac_node->state = HCLGE_MAC_ACTIVE;
8889 		} else {
8890 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8891 				&vport->state);
8892 
8893 			/* If one unicast mac address is existing in hardware,
8894 			 * we need to try whether other unicast mac addresses
8895 			 * are new addresses that can be added.
8896 			 */
8897 			if (ret != -EEXIST)
8898 				break;
8899 		}
8900 	}
8901 }
8902 
8903 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8904 					struct list_head *list,
8905 					int (*unsync)(struct hclge_vport *,
8906 						      const unsigned char *))
8907 {
8908 	struct hclge_mac_node *mac_node, *tmp;
8909 	int ret;
8910 
8911 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8912 		ret = unsync(vport, mac_node->mac_addr);
8913 		if (!ret || ret == -ENOENT) {
8914 			list_del(&mac_node->node);
8915 			kfree(mac_node);
8916 		} else {
8917 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8918 				&vport->state);
8919 			break;
8920 		}
8921 	}
8922 }
8923 
8924 static bool hclge_sync_from_add_list(struct list_head *add_list,
8925 				     struct list_head *mac_list)
8926 {
8927 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8928 	bool all_added = true;
8929 
8930 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8931 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8932 			all_added = false;
8933 
8934 		/* if the mac address from tmp_add_list is not in the
8935 		 * uc/mc_mac_list, it means have received a TO_DEL request
8936 		 * during the time window of adding the mac address into mac
8937 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8938 		 * then it will be removed at next time. else it must be TO_ADD,
8939 		 * this address hasn't been added into mac table,
8940 		 * so just remove the mac node.
8941 		 */
8942 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8943 		if (new_node) {
8944 			hclge_update_mac_node(new_node, mac_node->state);
8945 			list_del(&mac_node->node);
8946 			kfree(mac_node);
8947 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8948 			mac_node->state = HCLGE_MAC_TO_DEL;
8949 			list_move_tail(&mac_node->node, mac_list);
8950 		} else {
8951 			list_del(&mac_node->node);
8952 			kfree(mac_node);
8953 		}
8954 	}
8955 
8956 	return all_added;
8957 }
8958 
8959 static void hclge_sync_from_del_list(struct list_head *del_list,
8960 				     struct list_head *mac_list)
8961 {
8962 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8963 
8964 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8965 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8966 		if (new_node) {
8967 			/* If the mac addr exists in the mac list, it means
8968 			 * received a new TO_ADD request during the time window
8969 			 * of configuring the mac address. For the mac node
8970 			 * state is TO_ADD, and the address is already in the
8971 			 * in the hardware(due to delete fail), so we just need
8972 			 * to change the mac node state to ACTIVE.
8973 			 */
8974 			new_node->state = HCLGE_MAC_ACTIVE;
8975 			list_del(&mac_node->node);
8976 			kfree(mac_node);
8977 		} else {
8978 			list_move_tail(&mac_node->node, mac_list);
8979 		}
8980 	}
8981 }
8982 
8983 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8984 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8985 					bool is_all_added)
8986 {
8987 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8988 		if (is_all_added)
8989 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8990 		else
8991 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8992 	} else {
8993 		if (is_all_added)
8994 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8995 		else
8996 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8997 	}
8998 }
8999 
9000 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
9001 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
9002 {
9003 	struct hclge_mac_node *mac_node, *tmp, *new_node;
9004 	struct list_head tmp_add_list, tmp_del_list;
9005 	struct list_head *list;
9006 	bool all_added;
9007 
9008 	INIT_LIST_HEAD(&tmp_add_list);
9009 	INIT_LIST_HEAD(&tmp_del_list);
9010 
9011 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
9012 	 * we can add/delete these mac addr outside the spin lock
9013 	 */
9014 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9015 		&vport->uc_mac_list : &vport->mc_mac_list;
9016 
9017 	spin_lock_bh(&vport->mac_list_lock);
9018 
9019 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9020 		switch (mac_node->state) {
9021 		case HCLGE_MAC_TO_DEL:
9022 			list_move_tail(&mac_node->node, &tmp_del_list);
9023 			break;
9024 		case HCLGE_MAC_TO_ADD:
9025 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9026 			if (!new_node)
9027 				goto stop_traverse;
9028 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9029 			new_node->state = mac_node->state;
9030 			list_add_tail(&new_node->node, &tmp_add_list);
9031 			break;
9032 		default:
9033 			break;
9034 		}
9035 	}
9036 
9037 stop_traverse:
9038 	spin_unlock_bh(&vport->mac_list_lock);
9039 
9040 	/* delete first, in order to get max mac table space for adding */
9041 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9042 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9043 					    hclge_rm_uc_addr_common);
9044 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
9045 					  hclge_add_uc_addr_common);
9046 	} else {
9047 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9048 					    hclge_rm_mc_addr_common);
9049 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
9050 					  hclge_add_mc_addr_common);
9051 	}
9052 
9053 	/* if some mac addresses were added/deleted fail, move back to the
9054 	 * mac_list, and retry at next time.
9055 	 */
9056 	spin_lock_bh(&vport->mac_list_lock);
9057 
9058 	hclge_sync_from_del_list(&tmp_del_list, list);
9059 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9060 
9061 	spin_unlock_bh(&vport->mac_list_lock);
9062 
9063 	hclge_update_overflow_flags(vport, mac_type, all_added);
9064 }
9065 
9066 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9067 {
9068 	struct hclge_dev *hdev = vport->back;
9069 
9070 	if (test_bit(vport->vport_id, hdev->vport_config_block))
9071 		return false;
9072 
9073 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9074 		return true;
9075 
9076 	return false;
9077 }
9078 
9079 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9080 {
9081 	int i;
9082 
9083 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9084 		struct hclge_vport *vport = &hdev->vport[i];
9085 
9086 		if (!hclge_need_sync_mac_table(vport))
9087 			continue;
9088 
9089 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9090 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9091 	}
9092 }
9093 
9094 static void hclge_build_del_list(struct list_head *list,
9095 				 bool is_del_list,
9096 				 struct list_head *tmp_del_list)
9097 {
9098 	struct hclge_mac_node *mac_cfg, *tmp;
9099 
9100 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9101 		switch (mac_cfg->state) {
9102 		case HCLGE_MAC_TO_DEL:
9103 		case HCLGE_MAC_ACTIVE:
9104 			list_move_tail(&mac_cfg->node, tmp_del_list);
9105 			break;
9106 		case HCLGE_MAC_TO_ADD:
9107 			if (is_del_list) {
9108 				list_del(&mac_cfg->node);
9109 				kfree(mac_cfg);
9110 			}
9111 			break;
9112 		}
9113 	}
9114 }
9115 
9116 static void hclge_unsync_del_list(struct hclge_vport *vport,
9117 				  int (*unsync)(struct hclge_vport *vport,
9118 						const unsigned char *addr),
9119 				  bool is_del_list,
9120 				  struct list_head *tmp_del_list)
9121 {
9122 	struct hclge_mac_node *mac_cfg, *tmp;
9123 	int ret;
9124 
9125 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9126 		ret = unsync(vport, mac_cfg->mac_addr);
9127 		if (!ret || ret == -ENOENT) {
9128 			/* clear all mac addr from hardware, but remain these
9129 			 * mac addr in the mac list, and restore them after
9130 			 * vf reset finished.
9131 			 */
9132 			if (!is_del_list &&
9133 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9134 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9135 			} else {
9136 				list_del(&mac_cfg->node);
9137 				kfree(mac_cfg);
9138 			}
9139 		} else if (is_del_list) {
9140 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9141 		}
9142 	}
9143 }
9144 
9145 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9146 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9147 {
9148 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9149 	struct hclge_dev *hdev = vport->back;
9150 	struct list_head tmp_del_list, *list;
9151 
9152 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9153 		list = &vport->uc_mac_list;
9154 		unsync = hclge_rm_uc_addr_common;
9155 	} else {
9156 		list = &vport->mc_mac_list;
9157 		unsync = hclge_rm_mc_addr_common;
9158 	}
9159 
9160 	INIT_LIST_HEAD(&tmp_del_list);
9161 
9162 	if (!is_del_list)
9163 		set_bit(vport->vport_id, hdev->vport_config_block);
9164 
9165 	spin_lock_bh(&vport->mac_list_lock);
9166 
9167 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9168 
9169 	spin_unlock_bh(&vport->mac_list_lock);
9170 
9171 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9172 
9173 	spin_lock_bh(&vport->mac_list_lock);
9174 
9175 	hclge_sync_from_del_list(&tmp_del_list, list);
9176 
9177 	spin_unlock_bh(&vport->mac_list_lock);
9178 }
9179 
9180 /* remove all mac address when uninitailize */
9181 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9182 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9183 {
9184 	struct hclge_mac_node *mac_node, *tmp;
9185 	struct hclge_dev *hdev = vport->back;
9186 	struct list_head tmp_del_list, *list;
9187 
9188 	INIT_LIST_HEAD(&tmp_del_list);
9189 
9190 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9191 		&vport->uc_mac_list : &vport->mc_mac_list;
9192 
9193 	spin_lock_bh(&vport->mac_list_lock);
9194 
9195 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9196 		switch (mac_node->state) {
9197 		case HCLGE_MAC_TO_DEL:
9198 		case HCLGE_MAC_ACTIVE:
9199 			list_move_tail(&mac_node->node, &tmp_del_list);
9200 			break;
9201 		case HCLGE_MAC_TO_ADD:
9202 			list_del(&mac_node->node);
9203 			kfree(mac_node);
9204 			break;
9205 		}
9206 	}
9207 
9208 	spin_unlock_bh(&vport->mac_list_lock);
9209 
9210 	if (mac_type == HCLGE_MAC_ADDR_UC)
9211 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9212 					    hclge_rm_uc_addr_common);
9213 	else
9214 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9215 					    hclge_rm_mc_addr_common);
9216 
9217 	if (!list_empty(&tmp_del_list))
9218 		dev_warn(&hdev->pdev->dev,
9219 			 "uninit %s mac list for vport %u not completely.\n",
9220 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9221 			 vport->vport_id);
9222 
9223 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9224 		list_del(&mac_node->node);
9225 		kfree(mac_node);
9226 	}
9227 }
9228 
9229 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9230 {
9231 	struct hclge_vport *vport;
9232 	int i;
9233 
9234 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9235 		vport = &hdev->vport[i];
9236 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9237 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9238 	}
9239 }
9240 
9241 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9242 					      u16 cmdq_resp, u8 resp_code)
9243 {
9244 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9245 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9246 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9247 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9248 
9249 	int return_status;
9250 
9251 	if (cmdq_resp) {
9252 		dev_err(&hdev->pdev->dev,
9253 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9254 			cmdq_resp);
9255 		return -EIO;
9256 	}
9257 
9258 	switch (resp_code) {
9259 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9260 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9261 		return_status = 0;
9262 		break;
9263 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9264 		dev_err(&hdev->pdev->dev,
9265 			"add mac ethertype failed for manager table overflow.\n");
9266 		return_status = -EIO;
9267 		break;
9268 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9269 		dev_err(&hdev->pdev->dev,
9270 			"add mac ethertype failed for key conflict.\n");
9271 		return_status = -EIO;
9272 		break;
9273 	default:
9274 		dev_err(&hdev->pdev->dev,
9275 			"add mac ethertype failed for undefined, code=%u.\n",
9276 			resp_code);
9277 		return_status = -EIO;
9278 	}
9279 
9280 	return return_status;
9281 }
9282 
9283 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9284 				     u8 *mac_addr)
9285 {
9286 	struct hclge_mac_vlan_tbl_entry_cmd req;
9287 	struct hclge_dev *hdev = vport->back;
9288 	struct hclge_desc desc;
9289 	u16 egress_port = 0;
9290 	int i;
9291 
9292 	if (is_zero_ether_addr(mac_addr))
9293 		return false;
9294 
9295 	memset(&req, 0, sizeof(req));
9296 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9297 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9298 	req.egress_port = cpu_to_le16(egress_port);
9299 	hclge_prepare_mac_addr(&req, mac_addr, false);
9300 
9301 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9302 		return true;
9303 
9304 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9305 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9306 		if (i != vf_idx &&
9307 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9308 			return true;
9309 
9310 	return false;
9311 }
9312 
9313 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9314 			    u8 *mac_addr)
9315 {
9316 	struct hclge_vport *vport = hclge_get_vport(handle);
9317 	struct hclge_dev *hdev = vport->back;
9318 
9319 	vport = hclge_get_vf_vport(hdev, vf);
9320 	if (!vport)
9321 		return -EINVAL;
9322 
9323 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9324 		dev_info(&hdev->pdev->dev,
9325 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9326 			 mac_addr);
9327 		return 0;
9328 	}
9329 
9330 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9331 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9332 			mac_addr);
9333 		return -EEXIST;
9334 	}
9335 
9336 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9337 
9338 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9339 		dev_info(&hdev->pdev->dev,
9340 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9341 			 vf, mac_addr);
9342 		return hclge_inform_reset_assert_to_vf(vport);
9343 	}
9344 
9345 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9346 		 vf, mac_addr);
9347 	return 0;
9348 }
9349 
9350 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9351 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9352 {
9353 	struct hclge_desc desc;
9354 	u8 resp_code;
9355 	u16 retval;
9356 	int ret;
9357 
9358 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9359 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9360 
9361 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9362 	if (ret) {
9363 		dev_err(&hdev->pdev->dev,
9364 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9365 			ret);
9366 		return ret;
9367 	}
9368 
9369 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9370 	retval = le16_to_cpu(desc.retval);
9371 
9372 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9373 }
9374 
9375 static int init_mgr_tbl(struct hclge_dev *hdev)
9376 {
9377 	int ret;
9378 	int i;
9379 
9380 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9381 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9382 		if (ret) {
9383 			dev_err(&hdev->pdev->dev,
9384 				"add mac ethertype failed, ret =%d.\n",
9385 				ret);
9386 			return ret;
9387 		}
9388 	}
9389 
9390 	return 0;
9391 }
9392 
9393 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9394 {
9395 	struct hclge_vport *vport = hclge_get_vport(handle);
9396 	struct hclge_dev *hdev = vport->back;
9397 
9398 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9399 }
9400 
9401 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9402 				       const u8 *old_addr, const u8 *new_addr)
9403 {
9404 	struct list_head *list = &vport->uc_mac_list;
9405 	struct hclge_mac_node *old_node, *new_node;
9406 
9407 	new_node = hclge_find_mac_node(list, new_addr);
9408 	if (!new_node) {
9409 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9410 		if (!new_node)
9411 			return -ENOMEM;
9412 
9413 		new_node->state = HCLGE_MAC_TO_ADD;
9414 		ether_addr_copy(new_node->mac_addr, new_addr);
9415 		list_add(&new_node->node, list);
9416 	} else {
9417 		if (new_node->state == HCLGE_MAC_TO_DEL)
9418 			new_node->state = HCLGE_MAC_ACTIVE;
9419 
9420 		/* make sure the new addr is in the list head, avoid dev
9421 		 * addr may be not re-added into mac table for the umv space
9422 		 * limitation after global/imp reset which will clear mac
9423 		 * table by hardware.
9424 		 */
9425 		list_move(&new_node->node, list);
9426 	}
9427 
9428 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9429 		old_node = hclge_find_mac_node(list, old_addr);
9430 		if (old_node) {
9431 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9432 				list_del(&old_node->node);
9433 				kfree(old_node);
9434 			} else {
9435 				old_node->state = HCLGE_MAC_TO_DEL;
9436 			}
9437 		}
9438 	}
9439 
9440 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9441 
9442 	return 0;
9443 }
9444 
9445 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9446 			      bool is_first)
9447 {
9448 	const unsigned char *new_addr = (const unsigned char *)p;
9449 	struct hclge_vport *vport = hclge_get_vport(handle);
9450 	struct hclge_dev *hdev = vport->back;
9451 	unsigned char *old_addr = NULL;
9452 	int ret;
9453 
9454 	/* mac addr check */
9455 	if (is_zero_ether_addr(new_addr) ||
9456 	    is_broadcast_ether_addr(new_addr) ||
9457 	    is_multicast_ether_addr(new_addr)) {
9458 		dev_err(&hdev->pdev->dev,
9459 			"change uc mac err! invalid mac: %pM.\n",
9460 			 new_addr);
9461 		return -EINVAL;
9462 	}
9463 
9464 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9465 	if (ret) {
9466 		dev_err(&hdev->pdev->dev,
9467 			"failed to configure mac pause address, ret = %d\n",
9468 			ret);
9469 		return ret;
9470 	}
9471 
9472 	if (!is_first)
9473 		old_addr = hdev->hw.mac.mac_addr;
9474 
9475 	spin_lock_bh(&vport->mac_list_lock);
9476 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9477 	if (ret) {
9478 		dev_err(&hdev->pdev->dev,
9479 			"failed to change the mac addr:%pM, ret = %d\n",
9480 			new_addr, ret);
9481 		spin_unlock_bh(&vport->mac_list_lock);
9482 
9483 		if (!is_first)
9484 			hclge_pause_addr_cfg(hdev, old_addr);
9485 
9486 		return ret;
9487 	}
9488 	/* we must update dev addr with spin lock protect, preventing dev addr
9489 	 * being removed by set_rx_mode path.
9490 	 */
9491 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9492 	spin_unlock_bh(&vport->mac_list_lock);
9493 
9494 	hclge_task_schedule(hdev, 0);
9495 
9496 	return 0;
9497 }
9498 
9499 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9500 {
9501 	struct mii_ioctl_data *data = if_mii(ifr);
9502 
9503 	if (!hnae3_dev_phy_imp_supported(hdev))
9504 		return -EOPNOTSUPP;
9505 
9506 	switch (cmd) {
9507 	case SIOCGMIIPHY:
9508 		data->phy_id = hdev->hw.mac.phy_addr;
9509 		/* this command reads phy id and register at the same time */
9510 		fallthrough;
9511 	case SIOCGMIIREG:
9512 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9513 		return 0;
9514 
9515 	case SIOCSMIIREG:
9516 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9517 	default:
9518 		return -EOPNOTSUPP;
9519 	}
9520 }
9521 
9522 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9523 			  int cmd)
9524 {
9525 	struct hclge_vport *vport = hclge_get_vport(handle);
9526 	struct hclge_dev *hdev = vport->back;
9527 
9528 	switch (cmd) {
9529 	case SIOCGHWTSTAMP:
9530 		return hclge_ptp_get_cfg(hdev, ifr);
9531 	case SIOCSHWTSTAMP:
9532 		return hclge_ptp_set_cfg(hdev, ifr);
9533 	default:
9534 		if (!hdev->hw.mac.phydev)
9535 			return hclge_mii_ioctl(hdev, ifr, cmd);
9536 	}
9537 
9538 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9539 }
9540 
9541 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9542 					     bool bypass_en)
9543 {
9544 	struct hclge_port_vlan_filter_bypass_cmd *req;
9545 	struct hclge_desc desc;
9546 	int ret;
9547 
9548 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9549 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9550 	req->vf_id = vf_id;
9551 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9552 		      bypass_en ? 1 : 0);
9553 
9554 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9555 	if (ret)
9556 		dev_err(&hdev->pdev->dev,
9557 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9558 			vf_id, ret);
9559 
9560 	return ret;
9561 }
9562 
9563 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9564 				      u8 fe_type, bool filter_en, u8 vf_id)
9565 {
9566 	struct hclge_vlan_filter_ctrl_cmd *req;
9567 	struct hclge_desc desc;
9568 	int ret;
9569 
9570 	/* read current vlan filter parameter */
9571 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9572 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9573 	req->vlan_type = vlan_type;
9574 	req->vf_id = vf_id;
9575 
9576 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9577 	if (ret) {
9578 		dev_err(&hdev->pdev->dev,
9579 			"failed to get vlan filter config, ret = %d.\n", ret);
9580 		return ret;
9581 	}
9582 
9583 	/* modify and write new config parameter */
9584 	hclge_cmd_reuse_desc(&desc, false);
9585 	req->vlan_fe = filter_en ?
9586 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9587 
9588 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9589 	if (ret)
9590 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9591 			ret);
9592 
9593 	return ret;
9594 }
9595 
9596 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9597 {
9598 	struct hclge_dev *hdev = vport->back;
9599 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9600 	int ret;
9601 
9602 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9603 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9604 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9605 						  enable, vport->vport_id);
9606 
9607 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9608 					 HCLGE_FILTER_FE_EGRESS, enable,
9609 					 vport->vport_id);
9610 	if (ret)
9611 		return ret;
9612 
9613 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9614 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9615 							!enable);
9616 	} else if (!vport->vport_id) {
9617 		if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9618 			enable = false;
9619 
9620 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9621 						 HCLGE_FILTER_FE_INGRESS,
9622 						 enable, 0);
9623 	}
9624 
9625 	return ret;
9626 }
9627 
9628 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9629 {
9630 	struct hnae3_handle *handle = &vport->nic;
9631 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9632 	struct hclge_dev *hdev = vport->back;
9633 
9634 	if (vport->vport_id) {
9635 		if (vport->port_base_vlan_cfg.state !=
9636 			HNAE3_PORT_BASE_VLAN_DISABLE)
9637 			return true;
9638 
9639 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9640 			return false;
9641 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9642 		return false;
9643 	}
9644 
9645 	if (!vport->req_vlan_fltr_en)
9646 		return false;
9647 
9648 	/* compatible with former device, always enable vlan filter */
9649 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9650 		return true;
9651 
9652 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9653 		if (vlan->vlan_id != 0)
9654 			return true;
9655 
9656 	return false;
9657 }
9658 
9659 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9660 {
9661 	struct hclge_dev *hdev = vport->back;
9662 	bool need_en;
9663 	int ret;
9664 
9665 	mutex_lock(&hdev->vport_lock);
9666 
9667 	vport->req_vlan_fltr_en = request_en;
9668 
9669 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9670 	if (need_en == vport->cur_vlan_fltr_en) {
9671 		mutex_unlock(&hdev->vport_lock);
9672 		return 0;
9673 	}
9674 
9675 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9676 	if (ret) {
9677 		mutex_unlock(&hdev->vport_lock);
9678 		return ret;
9679 	}
9680 
9681 	vport->cur_vlan_fltr_en = need_en;
9682 
9683 	mutex_unlock(&hdev->vport_lock);
9684 
9685 	return 0;
9686 }
9687 
9688 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9689 {
9690 	struct hclge_vport *vport = hclge_get_vport(handle);
9691 
9692 	return hclge_enable_vport_vlan_filter(vport, enable);
9693 }
9694 
9695 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9696 					bool is_kill, u16 vlan,
9697 					struct hclge_desc *desc)
9698 {
9699 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9700 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9701 	u8 vf_byte_val;
9702 	u8 vf_byte_off;
9703 	int ret;
9704 
9705 	hclge_cmd_setup_basic_desc(&desc[0],
9706 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9707 	hclge_cmd_setup_basic_desc(&desc[1],
9708 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9709 
9710 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9711 
9712 	vf_byte_off = vfid / 8;
9713 	vf_byte_val = 1 << (vfid % 8);
9714 
9715 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9716 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9717 
9718 	req0->vlan_id  = cpu_to_le16(vlan);
9719 	req0->vlan_cfg = is_kill;
9720 
9721 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9722 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9723 	else
9724 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9725 
9726 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9727 	if (ret) {
9728 		dev_err(&hdev->pdev->dev,
9729 			"Send vf vlan command fail, ret =%d.\n",
9730 			ret);
9731 		return ret;
9732 	}
9733 
9734 	return 0;
9735 }
9736 
9737 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9738 					  bool is_kill, struct hclge_desc *desc)
9739 {
9740 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9741 
9742 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9743 
9744 	if (!is_kill) {
9745 #define HCLGE_VF_VLAN_NO_ENTRY	2
9746 		if (!req->resp_code || req->resp_code == 1)
9747 			return 0;
9748 
9749 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9750 			set_bit(vfid, hdev->vf_vlan_full);
9751 			dev_warn(&hdev->pdev->dev,
9752 				 "vf vlan table is full, vf vlan filter is disabled\n");
9753 			return 0;
9754 		}
9755 
9756 		dev_err(&hdev->pdev->dev,
9757 			"Add vf vlan filter fail, ret =%u.\n",
9758 			req->resp_code);
9759 	} else {
9760 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9761 		if (!req->resp_code)
9762 			return 0;
9763 
9764 		/* vf vlan filter is disabled when vf vlan table is full,
9765 		 * then new vlan id will not be added into vf vlan table.
9766 		 * Just return 0 without warning, avoid massive verbose
9767 		 * print logs when unload.
9768 		 */
9769 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9770 			return 0;
9771 
9772 		dev_err(&hdev->pdev->dev,
9773 			"Kill vf vlan filter fail, ret =%u.\n",
9774 			req->resp_code);
9775 	}
9776 
9777 	return -EIO;
9778 }
9779 
9780 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9781 				    bool is_kill, u16 vlan)
9782 {
9783 	struct hclge_vport *vport = &hdev->vport[vfid];
9784 	struct hclge_desc desc[2];
9785 	int ret;
9786 
9787 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9788 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9789 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9790 	 * new vlan, because tx packets with these vlan id will be dropped.
9791 	 */
9792 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9793 		if (vport->vf_info.spoofchk && vlan) {
9794 			dev_err(&hdev->pdev->dev,
9795 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9796 			return -EPERM;
9797 		}
9798 		return 0;
9799 	}
9800 
9801 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9802 	if (ret)
9803 		return ret;
9804 
9805 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9806 }
9807 
9808 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9809 				      u16 vlan_id, bool is_kill)
9810 {
9811 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9812 	struct hclge_desc desc;
9813 	u8 vlan_offset_byte_val;
9814 	u8 vlan_offset_byte;
9815 	u8 vlan_offset_160;
9816 	int ret;
9817 
9818 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9819 
9820 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9821 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9822 			   HCLGE_VLAN_BYTE_SIZE;
9823 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9824 
9825 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9826 	req->vlan_offset = vlan_offset_160;
9827 	req->vlan_cfg = is_kill;
9828 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9829 
9830 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9831 	if (ret)
9832 		dev_err(&hdev->pdev->dev,
9833 			"port vlan command, send fail, ret =%d.\n", ret);
9834 	return ret;
9835 }
9836 
9837 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9838 				    u16 vport_id, u16 vlan_id,
9839 				    bool is_kill)
9840 {
9841 	u16 vport_idx, vport_num = 0;
9842 	int ret;
9843 
9844 	if (is_kill && !vlan_id)
9845 		return 0;
9846 
9847 	if (vlan_id >= VLAN_N_VID)
9848 		return -EINVAL;
9849 
9850 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9851 	if (ret) {
9852 		dev_err(&hdev->pdev->dev,
9853 			"Set %u vport vlan filter config fail, ret =%d.\n",
9854 			vport_id, ret);
9855 		return ret;
9856 	}
9857 
9858 	/* vlan 0 may be added twice when 8021q module is enabled */
9859 	if (!is_kill && !vlan_id &&
9860 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9861 		return 0;
9862 
9863 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9864 		dev_err(&hdev->pdev->dev,
9865 			"Add port vlan failed, vport %u is already in vlan %u\n",
9866 			vport_id, vlan_id);
9867 		return -EINVAL;
9868 	}
9869 
9870 	if (is_kill &&
9871 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9872 		dev_err(&hdev->pdev->dev,
9873 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9874 			vport_id, vlan_id);
9875 		return -EINVAL;
9876 	}
9877 
9878 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9879 		vport_num++;
9880 
9881 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9882 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9883 						 is_kill);
9884 
9885 	return ret;
9886 }
9887 
9888 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9889 {
9890 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9891 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9892 	struct hclge_dev *hdev = vport->back;
9893 	struct hclge_desc desc;
9894 	u16 bmap_index;
9895 	int status;
9896 
9897 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9898 
9899 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9900 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9901 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9902 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9903 		      vcfg->accept_tag1 ? 1 : 0);
9904 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9905 		      vcfg->accept_untag1 ? 1 : 0);
9906 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9907 		      vcfg->accept_tag2 ? 1 : 0);
9908 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9909 		      vcfg->accept_untag2 ? 1 : 0);
9910 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9911 		      vcfg->insert_tag1_en ? 1 : 0);
9912 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9913 		      vcfg->insert_tag2_en ? 1 : 0);
9914 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9915 		      vcfg->tag_shift_mode_en ? 1 : 0);
9916 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9917 
9918 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9919 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9920 			HCLGE_VF_NUM_PER_BYTE;
9921 	req->vf_bitmap[bmap_index] =
9922 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9923 
9924 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9925 	if (status)
9926 		dev_err(&hdev->pdev->dev,
9927 			"Send port txvlan cfg command fail, ret =%d\n",
9928 			status);
9929 
9930 	return status;
9931 }
9932 
9933 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9934 {
9935 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9936 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9937 	struct hclge_dev *hdev = vport->back;
9938 	struct hclge_desc desc;
9939 	u16 bmap_index;
9940 	int status;
9941 
9942 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9943 
9944 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9945 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9946 		      vcfg->strip_tag1_en ? 1 : 0);
9947 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9948 		      vcfg->strip_tag2_en ? 1 : 0);
9949 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9950 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9951 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9952 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9953 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9954 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9955 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9956 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9957 
9958 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9959 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9960 			HCLGE_VF_NUM_PER_BYTE;
9961 	req->vf_bitmap[bmap_index] =
9962 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9963 
9964 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9965 	if (status)
9966 		dev_err(&hdev->pdev->dev,
9967 			"Send port rxvlan cfg command fail, ret =%d\n",
9968 			status);
9969 
9970 	return status;
9971 }
9972 
9973 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9974 				  u16 port_base_vlan_state,
9975 				  u16 vlan_tag, u8 qos)
9976 {
9977 	int ret;
9978 
9979 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9980 		vport->txvlan_cfg.accept_tag1 = true;
9981 		vport->txvlan_cfg.insert_tag1_en = false;
9982 		vport->txvlan_cfg.default_tag1 = 0;
9983 	} else {
9984 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9985 
9986 		vport->txvlan_cfg.accept_tag1 =
9987 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9988 		vport->txvlan_cfg.insert_tag1_en = true;
9989 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9990 						 vlan_tag;
9991 	}
9992 
9993 	vport->txvlan_cfg.accept_untag1 = true;
9994 
9995 	/* accept_tag2 and accept_untag2 are not supported on
9996 	 * pdev revision(0x20), new revision support them,
9997 	 * this two fields can not be configured by user.
9998 	 */
9999 	vport->txvlan_cfg.accept_tag2 = true;
10000 	vport->txvlan_cfg.accept_untag2 = true;
10001 	vport->txvlan_cfg.insert_tag2_en = false;
10002 	vport->txvlan_cfg.default_tag2 = 0;
10003 	vport->txvlan_cfg.tag_shift_mode_en = true;
10004 
10005 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10006 		vport->rxvlan_cfg.strip_tag1_en = false;
10007 		vport->rxvlan_cfg.strip_tag2_en =
10008 				vport->rxvlan_cfg.rx_vlan_offload_en;
10009 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10010 	} else {
10011 		vport->rxvlan_cfg.strip_tag1_en =
10012 				vport->rxvlan_cfg.rx_vlan_offload_en;
10013 		vport->rxvlan_cfg.strip_tag2_en = true;
10014 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10015 	}
10016 
10017 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10018 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10019 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10020 
10021 	ret = hclge_set_vlan_tx_offload_cfg(vport);
10022 	if (ret)
10023 		return ret;
10024 
10025 	return hclge_set_vlan_rx_offload_cfg(vport);
10026 }
10027 
10028 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10029 {
10030 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10031 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10032 	struct hclge_desc desc;
10033 	int status;
10034 
10035 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10036 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10037 	rx_req->ot_fst_vlan_type =
10038 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10039 	rx_req->ot_sec_vlan_type =
10040 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10041 	rx_req->in_fst_vlan_type =
10042 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10043 	rx_req->in_sec_vlan_type =
10044 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10045 
10046 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10047 	if (status) {
10048 		dev_err(&hdev->pdev->dev,
10049 			"Send rxvlan protocol type command fail, ret =%d\n",
10050 			status);
10051 		return status;
10052 	}
10053 
10054 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10055 
10056 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10057 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10058 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10059 
10060 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10061 	if (status)
10062 		dev_err(&hdev->pdev->dev,
10063 			"Send txvlan protocol type command fail, ret =%d\n",
10064 			status);
10065 
10066 	return status;
10067 }
10068 
10069 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10070 {
10071 #define HCLGE_DEF_VLAN_TYPE		0x8100
10072 
10073 	struct hnae3_handle *handle = &hdev->vport[0].nic;
10074 	struct hclge_vport *vport;
10075 	int ret;
10076 	int i;
10077 
10078 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10079 		/* for revision 0x21, vf vlan filter is per function */
10080 		for (i = 0; i < hdev->num_alloc_vport; i++) {
10081 			vport = &hdev->vport[i];
10082 			ret = hclge_set_vlan_filter_ctrl(hdev,
10083 							 HCLGE_FILTER_TYPE_VF,
10084 							 HCLGE_FILTER_FE_EGRESS,
10085 							 true,
10086 							 vport->vport_id);
10087 			if (ret)
10088 				return ret;
10089 			vport->cur_vlan_fltr_en = true;
10090 		}
10091 
10092 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10093 						 HCLGE_FILTER_FE_INGRESS, true,
10094 						 0);
10095 		if (ret)
10096 			return ret;
10097 	} else {
10098 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10099 						 HCLGE_FILTER_FE_EGRESS_V1_B,
10100 						 true, 0);
10101 		if (ret)
10102 			return ret;
10103 	}
10104 
10105 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10106 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10107 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10108 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10109 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10110 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10111 
10112 	ret = hclge_set_vlan_protocol_type(hdev);
10113 	if (ret)
10114 		return ret;
10115 
10116 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10117 		u16 vlan_tag;
10118 		u8 qos;
10119 
10120 		vport = &hdev->vport[i];
10121 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10122 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
10123 
10124 		ret = hclge_vlan_offload_cfg(vport,
10125 					     vport->port_base_vlan_cfg.state,
10126 					     vlan_tag, qos);
10127 		if (ret)
10128 			return ret;
10129 	}
10130 
10131 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10132 }
10133 
10134 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10135 				       bool writen_to_tbl)
10136 {
10137 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10138 
10139 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10140 		if (vlan->vlan_id == vlan_id)
10141 			return;
10142 
10143 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10144 	if (!vlan)
10145 		return;
10146 
10147 	vlan->hd_tbl_status = writen_to_tbl;
10148 	vlan->vlan_id = vlan_id;
10149 
10150 	list_add_tail(&vlan->node, &vport->vlan_list);
10151 }
10152 
10153 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10154 {
10155 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10156 	struct hclge_dev *hdev = vport->back;
10157 	int ret;
10158 
10159 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10160 		if (!vlan->hd_tbl_status) {
10161 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10162 						       vport->vport_id,
10163 						       vlan->vlan_id, false);
10164 			if (ret) {
10165 				dev_err(&hdev->pdev->dev,
10166 					"restore vport vlan list failed, ret=%d\n",
10167 					ret);
10168 				return ret;
10169 			}
10170 		}
10171 		vlan->hd_tbl_status = true;
10172 	}
10173 
10174 	return 0;
10175 }
10176 
10177 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10178 				      bool is_write_tbl)
10179 {
10180 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10181 	struct hclge_dev *hdev = vport->back;
10182 
10183 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10184 		if (vlan->vlan_id == vlan_id) {
10185 			if (is_write_tbl && vlan->hd_tbl_status)
10186 				hclge_set_vlan_filter_hw(hdev,
10187 							 htons(ETH_P_8021Q),
10188 							 vport->vport_id,
10189 							 vlan_id,
10190 							 true);
10191 
10192 			list_del(&vlan->node);
10193 			kfree(vlan);
10194 			break;
10195 		}
10196 	}
10197 }
10198 
10199 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10200 {
10201 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10202 	struct hclge_dev *hdev = vport->back;
10203 
10204 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10205 		if (vlan->hd_tbl_status)
10206 			hclge_set_vlan_filter_hw(hdev,
10207 						 htons(ETH_P_8021Q),
10208 						 vport->vport_id,
10209 						 vlan->vlan_id,
10210 						 true);
10211 
10212 		vlan->hd_tbl_status = false;
10213 		if (is_del_list) {
10214 			list_del(&vlan->node);
10215 			kfree(vlan);
10216 		}
10217 	}
10218 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10219 }
10220 
10221 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10222 {
10223 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10224 	struct hclge_vport *vport;
10225 	int i;
10226 
10227 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10228 		vport = &hdev->vport[i];
10229 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10230 			list_del(&vlan->node);
10231 			kfree(vlan);
10232 		}
10233 	}
10234 }
10235 
10236 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10237 {
10238 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10239 	struct hclge_dev *hdev = vport->back;
10240 	u16 vlan_proto;
10241 	u16 vlan_id;
10242 	u16 state;
10243 	int ret;
10244 
10245 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10246 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10247 	state = vport->port_base_vlan_cfg.state;
10248 
10249 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10250 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10251 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10252 					 vport->vport_id, vlan_id,
10253 					 false);
10254 		return;
10255 	}
10256 
10257 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10258 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10259 					       vport->vport_id,
10260 					       vlan->vlan_id, false);
10261 		if (ret)
10262 			break;
10263 		vlan->hd_tbl_status = true;
10264 	}
10265 }
10266 
10267 /* For global reset and imp reset, hardware will clear the mac table,
10268  * so we change the mac address state from ACTIVE to TO_ADD, then they
10269  * can be restored in the service task after reset complete. Furtherly,
10270  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10271  * be restored after reset, so just remove these mac nodes from mac_list.
10272  */
10273 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10274 {
10275 	struct hclge_mac_node *mac_node, *tmp;
10276 
10277 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10278 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10279 			mac_node->state = HCLGE_MAC_TO_ADD;
10280 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10281 			list_del(&mac_node->node);
10282 			kfree(mac_node);
10283 		}
10284 	}
10285 }
10286 
10287 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10288 {
10289 	spin_lock_bh(&vport->mac_list_lock);
10290 
10291 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10292 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10293 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10294 
10295 	spin_unlock_bh(&vport->mac_list_lock);
10296 }
10297 
10298 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10299 {
10300 	struct hclge_vport *vport = &hdev->vport[0];
10301 	struct hnae3_handle *handle = &vport->nic;
10302 
10303 	hclge_restore_mac_table_common(vport);
10304 	hclge_restore_vport_vlan_table(vport);
10305 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10306 	hclge_restore_fd_entries(handle);
10307 }
10308 
10309 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10310 {
10311 	struct hclge_vport *vport = hclge_get_vport(handle);
10312 
10313 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10314 		vport->rxvlan_cfg.strip_tag1_en = false;
10315 		vport->rxvlan_cfg.strip_tag2_en = enable;
10316 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10317 	} else {
10318 		vport->rxvlan_cfg.strip_tag1_en = enable;
10319 		vport->rxvlan_cfg.strip_tag2_en = true;
10320 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10321 	}
10322 
10323 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10324 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10325 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10326 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10327 
10328 	return hclge_set_vlan_rx_offload_cfg(vport);
10329 }
10330 
10331 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10332 {
10333 	struct hclge_dev *hdev = vport->back;
10334 
10335 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10336 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10337 }
10338 
10339 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10340 					    u16 port_base_vlan_state,
10341 					    struct hclge_vlan_info *new_info,
10342 					    struct hclge_vlan_info *old_info)
10343 {
10344 	struct hclge_dev *hdev = vport->back;
10345 	int ret;
10346 
10347 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10348 		hclge_rm_vport_all_vlan_table(vport, false);
10349 		/* force clear VLAN 0 */
10350 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10351 		if (ret)
10352 			return ret;
10353 		return hclge_set_vlan_filter_hw(hdev,
10354 						 htons(new_info->vlan_proto),
10355 						 vport->vport_id,
10356 						 new_info->vlan_tag,
10357 						 false);
10358 	}
10359 
10360 	/* force add VLAN 0 */
10361 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10362 	if (ret)
10363 		return ret;
10364 
10365 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10366 				       vport->vport_id, old_info->vlan_tag,
10367 				       true);
10368 	if (ret)
10369 		return ret;
10370 
10371 	return hclge_add_vport_all_vlan_table(vport);
10372 }
10373 
10374 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10375 					  const struct hclge_vlan_info *old_cfg)
10376 {
10377 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10378 		return true;
10379 
10380 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10381 		return true;
10382 
10383 	return false;
10384 }
10385 
10386 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10387 				    struct hclge_vlan_info *vlan_info)
10388 {
10389 	struct hnae3_handle *nic = &vport->nic;
10390 	struct hclge_vlan_info *old_vlan_info;
10391 	struct hclge_dev *hdev = vport->back;
10392 	int ret;
10393 
10394 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10395 
10396 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10397 				     vlan_info->qos);
10398 	if (ret)
10399 		return ret;
10400 
10401 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10402 		goto out;
10403 
10404 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10405 		/* add new VLAN tag */
10406 		ret = hclge_set_vlan_filter_hw(hdev,
10407 					       htons(vlan_info->vlan_proto),
10408 					       vport->vport_id,
10409 					       vlan_info->vlan_tag,
10410 					       false);
10411 		if (ret)
10412 			return ret;
10413 
10414 		/* remove old VLAN tag */
10415 		if (old_vlan_info->vlan_tag == 0)
10416 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10417 						       true, 0);
10418 		else
10419 			ret = hclge_set_vlan_filter_hw(hdev,
10420 						       htons(ETH_P_8021Q),
10421 						       vport->vport_id,
10422 						       old_vlan_info->vlan_tag,
10423 						       true);
10424 		if (ret) {
10425 			dev_err(&hdev->pdev->dev,
10426 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10427 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10428 			return ret;
10429 		}
10430 
10431 		goto out;
10432 	}
10433 
10434 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10435 					       old_vlan_info);
10436 	if (ret)
10437 		return ret;
10438 
10439 out:
10440 	vport->port_base_vlan_cfg.state = state;
10441 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10442 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10443 	else
10444 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10445 
10446 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10447 	hclge_set_vport_vlan_fltr_change(vport);
10448 
10449 	return 0;
10450 }
10451 
10452 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10453 					  enum hnae3_port_base_vlan_state state,
10454 					  u16 vlan, u8 qos)
10455 {
10456 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10457 		if (!vlan && !qos)
10458 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10459 
10460 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10461 	}
10462 
10463 	if (!vlan && !qos)
10464 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10465 
10466 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10467 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10468 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10469 
10470 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10471 }
10472 
10473 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10474 				    u16 vlan, u8 qos, __be16 proto)
10475 {
10476 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10477 	struct hclge_vport *vport = hclge_get_vport(handle);
10478 	struct hclge_dev *hdev = vport->back;
10479 	struct hclge_vlan_info vlan_info;
10480 	u16 state;
10481 	int ret;
10482 
10483 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10484 		return -EOPNOTSUPP;
10485 
10486 	vport = hclge_get_vf_vport(hdev, vfid);
10487 	if (!vport)
10488 		return -EINVAL;
10489 
10490 	/* qos is a 3 bits value, so can not be bigger than 7 */
10491 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10492 		return -EINVAL;
10493 	if (proto != htons(ETH_P_8021Q))
10494 		return -EPROTONOSUPPORT;
10495 
10496 	state = hclge_get_port_base_vlan_state(vport,
10497 					       vport->port_base_vlan_cfg.state,
10498 					       vlan, qos);
10499 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10500 		return 0;
10501 
10502 	vlan_info.vlan_tag = vlan;
10503 	vlan_info.qos = qos;
10504 	vlan_info.vlan_proto = ntohs(proto);
10505 
10506 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10507 	if (ret) {
10508 		dev_err(&hdev->pdev->dev,
10509 			"failed to update port base vlan for vf %d, ret = %d\n",
10510 			vfid, ret);
10511 		return ret;
10512 	}
10513 
10514 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10515 	 * VLAN state.
10516 	 */
10517 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10518 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10519 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10520 						  vport->vport_id, state,
10521 						  &vlan_info);
10522 
10523 	return 0;
10524 }
10525 
10526 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10527 {
10528 	struct hclge_vlan_info *vlan_info;
10529 	struct hclge_vport *vport;
10530 	int ret;
10531 	int vf;
10532 
10533 	/* clear port base vlan for all vf */
10534 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10535 		vport = &hdev->vport[vf];
10536 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10537 
10538 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10539 					       vport->vport_id,
10540 					       vlan_info->vlan_tag, true);
10541 		if (ret)
10542 			dev_err(&hdev->pdev->dev,
10543 				"failed to clear vf vlan for vf%d, ret = %d\n",
10544 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10545 	}
10546 }
10547 
10548 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10549 			  u16 vlan_id, bool is_kill)
10550 {
10551 	struct hclge_vport *vport = hclge_get_vport(handle);
10552 	struct hclge_dev *hdev = vport->back;
10553 	bool writen_to_tbl = false;
10554 	int ret = 0;
10555 
10556 	/* When device is resetting or reset failed, firmware is unable to
10557 	 * handle mailbox. Just record the vlan id, and remove it after
10558 	 * reset finished.
10559 	 */
10560 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10561 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10562 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10563 		return -EBUSY;
10564 	}
10565 
10566 	/* when port base vlan enabled, we use port base vlan as the vlan
10567 	 * filter entry. In this case, we don't update vlan filter table
10568 	 * when user add new vlan or remove exist vlan, just update the vport
10569 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10570 	 * table until port base vlan disabled
10571 	 */
10572 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10573 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10574 					       vlan_id, is_kill);
10575 		writen_to_tbl = true;
10576 	}
10577 
10578 	if (!ret) {
10579 		if (is_kill)
10580 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10581 		else
10582 			hclge_add_vport_vlan_table(vport, vlan_id,
10583 						   writen_to_tbl);
10584 	} else if (is_kill) {
10585 		/* when remove hw vlan filter failed, record the vlan id,
10586 		 * and try to remove it from hw later, to be consistence
10587 		 * with stack
10588 		 */
10589 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10590 	}
10591 
10592 	hclge_set_vport_vlan_fltr_change(vport);
10593 
10594 	return ret;
10595 }
10596 
10597 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10598 {
10599 	struct hclge_vport *vport;
10600 	int ret;
10601 	u16 i;
10602 
10603 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10604 		vport = &hdev->vport[i];
10605 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10606 					&vport->state))
10607 			continue;
10608 
10609 		ret = hclge_enable_vport_vlan_filter(vport,
10610 						     vport->req_vlan_fltr_en);
10611 		if (ret) {
10612 			dev_err(&hdev->pdev->dev,
10613 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10614 				vport->vport_id, ret);
10615 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10616 				&vport->state);
10617 			return;
10618 		}
10619 	}
10620 }
10621 
10622 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10623 {
10624 #define HCLGE_MAX_SYNC_COUNT	60
10625 
10626 	int i, ret, sync_cnt = 0;
10627 	u16 vlan_id;
10628 
10629 	/* start from vport 1 for PF is always alive */
10630 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10631 		struct hclge_vport *vport = &hdev->vport[i];
10632 
10633 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10634 					 VLAN_N_VID);
10635 		while (vlan_id != VLAN_N_VID) {
10636 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10637 						       vport->vport_id, vlan_id,
10638 						       true);
10639 			if (ret && ret != -EINVAL)
10640 				return;
10641 
10642 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10643 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10644 			hclge_set_vport_vlan_fltr_change(vport);
10645 
10646 			sync_cnt++;
10647 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10648 				return;
10649 
10650 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10651 						 VLAN_N_VID);
10652 		}
10653 	}
10654 
10655 	hclge_sync_vlan_fltr_state(hdev);
10656 }
10657 
10658 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10659 {
10660 	struct hclge_config_max_frm_size_cmd *req;
10661 	struct hclge_desc desc;
10662 
10663 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10664 
10665 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10666 	req->max_frm_size = cpu_to_le16(new_mps);
10667 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10668 
10669 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10670 }
10671 
10672 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10673 {
10674 	struct hclge_vport *vport = hclge_get_vport(handle);
10675 
10676 	return hclge_set_vport_mtu(vport, new_mtu);
10677 }
10678 
10679 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10680 {
10681 	struct hclge_dev *hdev = vport->back;
10682 	int i, max_frm_size, ret;
10683 
10684 	/* HW supprt 2 layer vlan */
10685 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10686 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10687 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10688 		return -EINVAL;
10689 
10690 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10691 	mutex_lock(&hdev->vport_lock);
10692 	/* VF's mps must fit within hdev->mps */
10693 	if (vport->vport_id && max_frm_size > hdev->mps) {
10694 		mutex_unlock(&hdev->vport_lock);
10695 		return -EINVAL;
10696 	} else if (vport->vport_id) {
10697 		vport->mps = max_frm_size;
10698 		mutex_unlock(&hdev->vport_lock);
10699 		return 0;
10700 	}
10701 
10702 	/* PF's mps must be greater then VF's mps */
10703 	for (i = 1; i < hdev->num_alloc_vport; i++)
10704 		if (max_frm_size < hdev->vport[i].mps) {
10705 			mutex_unlock(&hdev->vport_lock);
10706 			return -EINVAL;
10707 		}
10708 
10709 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10710 
10711 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10712 	if (ret) {
10713 		dev_err(&hdev->pdev->dev,
10714 			"Change mtu fail, ret =%d\n", ret);
10715 		goto out;
10716 	}
10717 
10718 	hdev->mps = max_frm_size;
10719 	vport->mps = max_frm_size;
10720 
10721 	ret = hclge_buffer_alloc(hdev);
10722 	if (ret)
10723 		dev_err(&hdev->pdev->dev,
10724 			"Allocate buffer fail, ret =%d\n", ret);
10725 
10726 out:
10727 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10728 	mutex_unlock(&hdev->vport_lock);
10729 	return ret;
10730 }
10731 
10732 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10733 				    bool enable)
10734 {
10735 	struct hclge_reset_tqp_queue_cmd *req;
10736 	struct hclge_desc desc;
10737 	int ret;
10738 
10739 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10740 
10741 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10742 	req->tqp_id = cpu_to_le16(queue_id);
10743 	if (enable)
10744 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10745 
10746 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10747 	if (ret) {
10748 		dev_err(&hdev->pdev->dev,
10749 			"Send tqp reset cmd error, status =%d\n", ret);
10750 		return ret;
10751 	}
10752 
10753 	return 0;
10754 }
10755 
10756 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10757 				  u8 *reset_status)
10758 {
10759 	struct hclge_reset_tqp_queue_cmd *req;
10760 	struct hclge_desc desc;
10761 	int ret;
10762 
10763 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10764 
10765 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10766 	req->tqp_id = cpu_to_le16(queue_id);
10767 
10768 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10769 	if (ret) {
10770 		dev_err(&hdev->pdev->dev,
10771 			"Get reset status error, status =%d\n", ret);
10772 		return ret;
10773 	}
10774 
10775 	*reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10776 
10777 	return 0;
10778 }
10779 
10780 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10781 {
10782 	struct hnae3_queue *queue;
10783 	struct hclge_tqp *tqp;
10784 
10785 	queue = handle->kinfo.tqp[queue_id];
10786 	tqp = container_of(queue, struct hclge_tqp, q);
10787 
10788 	return tqp->index;
10789 }
10790 
10791 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10792 {
10793 	struct hclge_vport *vport = hclge_get_vport(handle);
10794 	struct hclge_dev *hdev = vport->back;
10795 	u16 reset_try_times = 0;
10796 	u8 reset_status;
10797 	u16 queue_gid;
10798 	int ret;
10799 	u16 i;
10800 
10801 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10802 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10803 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10804 		if (ret) {
10805 			dev_err(&hdev->pdev->dev,
10806 				"failed to send reset tqp cmd, ret = %d\n",
10807 				ret);
10808 			return ret;
10809 		}
10810 
10811 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10812 			ret = hclge_get_reset_status(hdev, queue_gid,
10813 						     &reset_status);
10814 			if (ret)
10815 				return ret;
10816 
10817 			if (reset_status)
10818 				break;
10819 
10820 			/* Wait for tqp hw reset */
10821 			usleep_range(1000, 1200);
10822 		}
10823 
10824 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10825 			dev_err(&hdev->pdev->dev,
10826 				"wait for tqp hw reset timeout\n");
10827 			return -ETIME;
10828 		}
10829 
10830 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10831 		if (ret) {
10832 			dev_err(&hdev->pdev->dev,
10833 				"failed to deassert soft reset, ret = %d\n",
10834 				ret);
10835 			return ret;
10836 		}
10837 		reset_try_times = 0;
10838 	}
10839 	return 0;
10840 }
10841 
10842 static int hclge_reset_rcb(struct hnae3_handle *handle)
10843 {
10844 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10845 #define HCLGE_RESET_RCB_SUCCESS		1U
10846 
10847 	struct hclge_vport *vport = hclge_get_vport(handle);
10848 	struct hclge_dev *hdev = vport->back;
10849 	struct hclge_reset_cmd *req;
10850 	struct hclge_desc desc;
10851 	u8 return_status;
10852 	u16 queue_gid;
10853 	int ret;
10854 
10855 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10856 
10857 	req = (struct hclge_reset_cmd *)desc.data;
10858 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10859 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10860 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10861 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10862 
10863 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10864 	if (ret) {
10865 		dev_err(&hdev->pdev->dev,
10866 			"failed to send rcb reset cmd, ret = %d\n", ret);
10867 		return ret;
10868 	}
10869 
10870 	return_status = req->fun_reset_rcb_return_status;
10871 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10872 		return 0;
10873 
10874 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10875 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10876 			return_status);
10877 		return -EIO;
10878 	}
10879 
10880 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10881 	 * again to reset all tqps
10882 	 */
10883 	return hclge_reset_tqp_cmd(handle);
10884 }
10885 
10886 int hclge_reset_tqp(struct hnae3_handle *handle)
10887 {
10888 	struct hclge_vport *vport = hclge_get_vport(handle);
10889 	struct hclge_dev *hdev = vport->back;
10890 	int ret;
10891 
10892 	/* only need to disable PF's tqp */
10893 	if (!vport->vport_id) {
10894 		ret = hclge_tqp_enable(handle, false);
10895 		if (ret) {
10896 			dev_err(&hdev->pdev->dev,
10897 				"failed to disable tqp, ret = %d\n", ret);
10898 			return ret;
10899 		}
10900 	}
10901 
10902 	return hclge_reset_rcb(handle);
10903 }
10904 
10905 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10906 {
10907 	struct hclge_vport *vport = hclge_get_vport(handle);
10908 	struct hclge_dev *hdev = vport->back;
10909 
10910 	return hdev->fw_version;
10911 }
10912 
10913 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10914 {
10915 	struct phy_device *phydev = hdev->hw.mac.phydev;
10916 
10917 	if (!phydev)
10918 		return;
10919 
10920 	phy_set_asym_pause(phydev, rx_en, tx_en);
10921 }
10922 
10923 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10924 {
10925 	int ret;
10926 
10927 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10928 		return 0;
10929 
10930 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10931 	if (ret)
10932 		dev_err(&hdev->pdev->dev,
10933 			"configure pauseparam error, ret = %d.\n", ret);
10934 
10935 	return ret;
10936 }
10937 
10938 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10939 {
10940 	struct phy_device *phydev = hdev->hw.mac.phydev;
10941 	u16 remote_advertising = 0;
10942 	u16 local_advertising;
10943 	u32 rx_pause, tx_pause;
10944 	u8 flowctl;
10945 
10946 	if (!phydev->link || !phydev->autoneg)
10947 		return 0;
10948 
10949 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10950 
10951 	if (phydev->pause)
10952 		remote_advertising = LPA_PAUSE_CAP;
10953 
10954 	if (phydev->asym_pause)
10955 		remote_advertising |= LPA_PAUSE_ASYM;
10956 
10957 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10958 					   remote_advertising);
10959 	tx_pause = flowctl & FLOW_CTRL_TX;
10960 	rx_pause = flowctl & FLOW_CTRL_RX;
10961 
10962 	if (phydev->duplex == HCLGE_MAC_HALF) {
10963 		tx_pause = 0;
10964 		rx_pause = 0;
10965 	}
10966 
10967 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10968 }
10969 
10970 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10971 				 u32 *rx_en, u32 *tx_en)
10972 {
10973 	struct hclge_vport *vport = hclge_get_vport(handle);
10974 	struct hclge_dev *hdev = vport->back;
10975 	u8 media_type = hdev->hw.mac.media_type;
10976 
10977 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10978 		    hclge_get_autoneg(handle) : 0;
10979 
10980 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10981 		*rx_en = 0;
10982 		*tx_en = 0;
10983 		return;
10984 	}
10985 
10986 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10987 		*rx_en = 1;
10988 		*tx_en = 0;
10989 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10990 		*tx_en = 1;
10991 		*rx_en = 0;
10992 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10993 		*rx_en = 1;
10994 		*tx_en = 1;
10995 	} else {
10996 		*rx_en = 0;
10997 		*tx_en = 0;
10998 	}
10999 }
11000 
11001 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11002 					 u32 rx_en, u32 tx_en)
11003 {
11004 	if (rx_en && tx_en)
11005 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
11006 	else if (rx_en && !tx_en)
11007 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11008 	else if (!rx_en && tx_en)
11009 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11010 	else
11011 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
11012 
11013 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11014 }
11015 
11016 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11017 				u32 rx_en, u32 tx_en)
11018 {
11019 	struct hclge_vport *vport = hclge_get_vport(handle);
11020 	struct hclge_dev *hdev = vport->back;
11021 	struct phy_device *phydev = hdev->hw.mac.phydev;
11022 	u32 fc_autoneg;
11023 
11024 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11025 		fc_autoneg = hclge_get_autoneg(handle);
11026 		if (auto_neg != fc_autoneg) {
11027 			dev_info(&hdev->pdev->dev,
11028 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11029 			return -EOPNOTSUPP;
11030 		}
11031 	}
11032 
11033 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11034 		dev_info(&hdev->pdev->dev,
11035 			 "Priority flow control enabled. Cannot set link flow control.\n");
11036 		return -EOPNOTSUPP;
11037 	}
11038 
11039 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11040 
11041 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11042 
11043 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11044 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11045 
11046 	if (phydev)
11047 		return phy_start_aneg(phydev);
11048 
11049 	return -EOPNOTSUPP;
11050 }
11051 
11052 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11053 					  u8 *auto_neg, u32 *speed, u8 *duplex)
11054 {
11055 	struct hclge_vport *vport = hclge_get_vport(handle);
11056 	struct hclge_dev *hdev = vport->back;
11057 
11058 	if (speed)
11059 		*speed = hdev->hw.mac.speed;
11060 	if (duplex)
11061 		*duplex = hdev->hw.mac.duplex;
11062 	if (auto_neg)
11063 		*auto_neg = hdev->hw.mac.autoneg;
11064 }
11065 
11066 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11067 				 u8 *module_type)
11068 {
11069 	struct hclge_vport *vport = hclge_get_vport(handle);
11070 	struct hclge_dev *hdev = vport->back;
11071 
11072 	/* When nic is down, the service task is not running, doesn't update
11073 	 * the port information per second. Query the port information before
11074 	 * return the media type, ensure getting the correct media information.
11075 	 */
11076 	hclge_update_port_info(hdev);
11077 
11078 	if (media_type)
11079 		*media_type = hdev->hw.mac.media_type;
11080 
11081 	if (module_type)
11082 		*module_type = hdev->hw.mac.module_type;
11083 }
11084 
11085 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11086 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
11087 {
11088 	struct hclge_vport *vport = hclge_get_vport(handle);
11089 	struct hclge_dev *hdev = vport->back;
11090 	struct phy_device *phydev = hdev->hw.mac.phydev;
11091 	int mdix_ctrl, mdix, is_resolved;
11092 	unsigned int retval;
11093 
11094 	if (!phydev) {
11095 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11096 		*tp_mdix = ETH_TP_MDI_INVALID;
11097 		return;
11098 	}
11099 
11100 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11101 
11102 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11103 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11104 				    HCLGE_PHY_MDIX_CTRL_S);
11105 
11106 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11107 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11108 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11109 
11110 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11111 
11112 	switch (mdix_ctrl) {
11113 	case 0x0:
11114 		*tp_mdix_ctrl = ETH_TP_MDI;
11115 		break;
11116 	case 0x1:
11117 		*tp_mdix_ctrl = ETH_TP_MDI_X;
11118 		break;
11119 	case 0x3:
11120 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11121 		break;
11122 	default:
11123 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11124 		break;
11125 	}
11126 
11127 	if (!is_resolved)
11128 		*tp_mdix = ETH_TP_MDI_INVALID;
11129 	else if (mdix)
11130 		*tp_mdix = ETH_TP_MDI_X;
11131 	else
11132 		*tp_mdix = ETH_TP_MDI;
11133 }
11134 
11135 static void hclge_info_show(struct hclge_dev *hdev)
11136 {
11137 	struct device *dev = &hdev->pdev->dev;
11138 
11139 	dev_info(dev, "PF info begin:\n");
11140 
11141 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11142 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11143 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11144 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11145 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11146 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11147 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11148 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11149 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11150 	dev_info(dev, "This is %s PF\n",
11151 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11152 	dev_info(dev, "DCB %s\n",
11153 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11154 	dev_info(dev, "MQPRIO %s\n",
11155 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11156 	dev_info(dev, "Default tx spare buffer size: %u\n",
11157 		 hdev->tx_spare_buf_size);
11158 
11159 	dev_info(dev, "PF info end.\n");
11160 }
11161 
11162 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11163 					  struct hclge_vport *vport)
11164 {
11165 	struct hnae3_client *client = vport->nic.client;
11166 	struct hclge_dev *hdev = ae_dev->priv;
11167 	int rst_cnt = hdev->rst_stats.reset_cnt;
11168 	int ret;
11169 
11170 	ret = client->ops->init_instance(&vport->nic);
11171 	if (ret)
11172 		return ret;
11173 
11174 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11175 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11176 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11177 		ret = -EBUSY;
11178 		goto init_nic_err;
11179 	}
11180 
11181 	/* Enable nic hw error interrupts */
11182 	ret = hclge_config_nic_hw_error(hdev, true);
11183 	if (ret) {
11184 		dev_err(&ae_dev->pdev->dev,
11185 			"fail(%d) to enable hw error interrupts\n", ret);
11186 		goto init_nic_err;
11187 	}
11188 
11189 	hnae3_set_client_init_flag(client, ae_dev, 1);
11190 
11191 	if (netif_msg_drv(&hdev->vport->nic))
11192 		hclge_info_show(hdev);
11193 
11194 	return ret;
11195 
11196 init_nic_err:
11197 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11198 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11199 		msleep(HCLGE_WAIT_RESET_DONE);
11200 
11201 	client->ops->uninit_instance(&vport->nic, 0);
11202 
11203 	return ret;
11204 }
11205 
11206 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11207 					   struct hclge_vport *vport)
11208 {
11209 	struct hclge_dev *hdev = ae_dev->priv;
11210 	struct hnae3_client *client;
11211 	int rst_cnt;
11212 	int ret;
11213 
11214 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11215 	    !hdev->nic_client)
11216 		return 0;
11217 
11218 	client = hdev->roce_client;
11219 	ret = hclge_init_roce_base_info(vport);
11220 	if (ret)
11221 		return ret;
11222 
11223 	rst_cnt = hdev->rst_stats.reset_cnt;
11224 	ret = client->ops->init_instance(&vport->roce);
11225 	if (ret)
11226 		return ret;
11227 
11228 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11229 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11230 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11231 		ret = -EBUSY;
11232 		goto init_roce_err;
11233 	}
11234 
11235 	/* Enable roce ras interrupts */
11236 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11237 	if (ret) {
11238 		dev_err(&ae_dev->pdev->dev,
11239 			"fail(%d) to enable roce ras interrupts\n", ret);
11240 		goto init_roce_err;
11241 	}
11242 
11243 	hnae3_set_client_init_flag(client, ae_dev, 1);
11244 
11245 	return 0;
11246 
11247 init_roce_err:
11248 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11249 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11250 		msleep(HCLGE_WAIT_RESET_DONE);
11251 
11252 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11253 
11254 	return ret;
11255 }
11256 
11257 static int hclge_init_client_instance(struct hnae3_client *client,
11258 				      struct hnae3_ae_dev *ae_dev)
11259 {
11260 	struct hclge_dev *hdev = ae_dev->priv;
11261 	struct hclge_vport *vport = &hdev->vport[0];
11262 	int ret;
11263 
11264 	switch (client->type) {
11265 	case HNAE3_CLIENT_KNIC:
11266 		hdev->nic_client = client;
11267 		vport->nic.client = client;
11268 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11269 		if (ret)
11270 			goto clear_nic;
11271 
11272 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11273 		if (ret)
11274 			goto clear_roce;
11275 
11276 		break;
11277 	case HNAE3_CLIENT_ROCE:
11278 		if (hnae3_dev_roce_supported(hdev)) {
11279 			hdev->roce_client = client;
11280 			vport->roce.client = client;
11281 		}
11282 
11283 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11284 		if (ret)
11285 			goto clear_roce;
11286 
11287 		break;
11288 	default:
11289 		return -EINVAL;
11290 	}
11291 
11292 	return 0;
11293 
11294 clear_nic:
11295 	hdev->nic_client = NULL;
11296 	vport->nic.client = NULL;
11297 	return ret;
11298 clear_roce:
11299 	hdev->roce_client = NULL;
11300 	vport->roce.client = NULL;
11301 	return ret;
11302 }
11303 
11304 static void hclge_uninit_client_instance(struct hnae3_client *client,
11305 					 struct hnae3_ae_dev *ae_dev)
11306 {
11307 	struct hclge_dev *hdev = ae_dev->priv;
11308 	struct hclge_vport *vport = &hdev->vport[0];
11309 
11310 	if (hdev->roce_client) {
11311 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11312 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11313 			msleep(HCLGE_WAIT_RESET_DONE);
11314 
11315 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11316 		hdev->roce_client = NULL;
11317 		vport->roce.client = NULL;
11318 	}
11319 	if (client->type == HNAE3_CLIENT_ROCE)
11320 		return;
11321 	if (hdev->nic_client && client->ops->uninit_instance) {
11322 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11323 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11324 			msleep(HCLGE_WAIT_RESET_DONE);
11325 
11326 		client->ops->uninit_instance(&vport->nic, 0);
11327 		hdev->nic_client = NULL;
11328 		vport->nic.client = NULL;
11329 	}
11330 }
11331 
11332 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11333 {
11334 #define HCLGE_MEM_BAR		4
11335 
11336 	struct pci_dev *pdev = hdev->pdev;
11337 	struct hclge_hw *hw = &hdev->hw;
11338 
11339 	/* for device does not have device memory, return directly */
11340 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11341 		return 0;
11342 
11343 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11344 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11345 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11346 	if (!hw->mem_base) {
11347 		dev_err(&pdev->dev, "failed to map device memory\n");
11348 		return -EFAULT;
11349 	}
11350 
11351 	return 0;
11352 }
11353 
11354 static int hclge_pci_init(struct hclge_dev *hdev)
11355 {
11356 	struct pci_dev *pdev = hdev->pdev;
11357 	struct hclge_hw *hw;
11358 	int ret;
11359 
11360 	ret = pci_enable_device(pdev);
11361 	if (ret) {
11362 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11363 		return ret;
11364 	}
11365 
11366 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11367 	if (ret) {
11368 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11369 		if (ret) {
11370 			dev_err(&pdev->dev,
11371 				"can't set consistent PCI DMA");
11372 			goto err_disable_device;
11373 		}
11374 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11375 	}
11376 
11377 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11378 	if (ret) {
11379 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11380 		goto err_disable_device;
11381 	}
11382 
11383 	pci_set_master(pdev);
11384 	hw = &hdev->hw;
11385 	hw->io_base = pcim_iomap(pdev, 2, 0);
11386 	if (!hw->io_base) {
11387 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11388 		ret = -ENOMEM;
11389 		goto err_clr_master;
11390 	}
11391 
11392 	ret = hclge_dev_mem_map(hdev);
11393 	if (ret)
11394 		goto err_unmap_io_base;
11395 
11396 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11397 
11398 	return 0;
11399 
11400 err_unmap_io_base:
11401 	pcim_iounmap(pdev, hdev->hw.io_base);
11402 err_clr_master:
11403 	pci_clear_master(pdev);
11404 	pci_release_regions(pdev);
11405 err_disable_device:
11406 	pci_disable_device(pdev);
11407 
11408 	return ret;
11409 }
11410 
11411 static void hclge_pci_uninit(struct hclge_dev *hdev)
11412 {
11413 	struct pci_dev *pdev = hdev->pdev;
11414 
11415 	if (hdev->hw.mem_base)
11416 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11417 
11418 	pcim_iounmap(pdev, hdev->hw.io_base);
11419 	pci_free_irq_vectors(pdev);
11420 	pci_clear_master(pdev);
11421 	pci_release_mem_regions(pdev);
11422 	pci_disable_device(pdev);
11423 }
11424 
11425 static void hclge_state_init(struct hclge_dev *hdev)
11426 {
11427 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11428 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11429 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11430 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11431 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11432 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11433 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11434 }
11435 
11436 static void hclge_state_uninit(struct hclge_dev *hdev)
11437 {
11438 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11439 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11440 
11441 	if (hdev->reset_timer.function)
11442 		del_timer_sync(&hdev->reset_timer);
11443 	if (hdev->service_task.work.func)
11444 		cancel_delayed_work_sync(&hdev->service_task);
11445 }
11446 
11447 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11448 					enum hnae3_reset_type rst_type)
11449 {
11450 #define HCLGE_RESET_RETRY_WAIT_MS	500
11451 #define HCLGE_RESET_RETRY_CNT	5
11452 
11453 	struct hclge_dev *hdev = ae_dev->priv;
11454 	int retry_cnt = 0;
11455 	int ret;
11456 
11457 retry:
11458 	down(&hdev->reset_sem);
11459 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11460 	hdev->reset_type = rst_type;
11461 	ret = hclge_reset_prepare(hdev);
11462 	if (ret || hdev->reset_pending) {
11463 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11464 			ret);
11465 		if (hdev->reset_pending ||
11466 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11467 			dev_err(&hdev->pdev->dev,
11468 				"reset_pending:0x%lx, retry_cnt:%d\n",
11469 				hdev->reset_pending, retry_cnt);
11470 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11471 			up(&hdev->reset_sem);
11472 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11473 			goto retry;
11474 		}
11475 	}
11476 
11477 	/* disable misc vector before reset done */
11478 	hclge_enable_vector(&hdev->misc_vector, false);
11479 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11480 
11481 	if (hdev->reset_type == HNAE3_FLR_RESET)
11482 		hdev->rst_stats.flr_rst_cnt++;
11483 }
11484 
11485 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11486 {
11487 	struct hclge_dev *hdev = ae_dev->priv;
11488 	int ret;
11489 
11490 	hclge_enable_vector(&hdev->misc_vector, true);
11491 
11492 	ret = hclge_reset_rebuild(hdev);
11493 	if (ret)
11494 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11495 
11496 	hdev->reset_type = HNAE3_NONE_RESET;
11497 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11498 	up(&hdev->reset_sem);
11499 }
11500 
11501 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11502 {
11503 	u16 i;
11504 
11505 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11506 		struct hclge_vport *vport = &hdev->vport[i];
11507 		int ret;
11508 
11509 		 /* Send cmd to clear vport's FUNC_RST_ING */
11510 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11511 		if (ret)
11512 			dev_warn(&hdev->pdev->dev,
11513 				 "clear vport(%u) rst failed %d!\n",
11514 				 vport->vport_id, ret);
11515 	}
11516 }
11517 
11518 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11519 {
11520 	struct hclge_desc desc;
11521 	int ret;
11522 
11523 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11524 
11525 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11526 	/* This new command is only supported by new firmware, it will
11527 	 * fail with older firmware. Error value -EOPNOSUPP can only be
11528 	 * returned by older firmware running this command, to keep code
11529 	 * backward compatible we will override this value and return
11530 	 * success.
11531 	 */
11532 	if (ret && ret != -EOPNOTSUPP) {
11533 		dev_err(&hdev->pdev->dev,
11534 			"failed to clear hw resource, ret = %d\n", ret);
11535 		return ret;
11536 	}
11537 	return 0;
11538 }
11539 
11540 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11541 {
11542 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11543 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11544 }
11545 
11546 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11547 {
11548 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11549 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11550 }
11551 
11552 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11553 {
11554 	struct pci_dev *pdev = ae_dev->pdev;
11555 	struct hclge_dev *hdev;
11556 	int ret;
11557 
11558 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11559 	if (!hdev)
11560 		return -ENOMEM;
11561 
11562 	hdev->pdev = pdev;
11563 	hdev->ae_dev = ae_dev;
11564 	hdev->reset_type = HNAE3_NONE_RESET;
11565 	hdev->reset_level = HNAE3_FUNC_RESET;
11566 	ae_dev->priv = hdev;
11567 
11568 	/* HW supprt 2 layer vlan */
11569 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11570 
11571 	mutex_init(&hdev->vport_lock);
11572 	spin_lock_init(&hdev->fd_rule_lock);
11573 	sema_init(&hdev->reset_sem, 1);
11574 
11575 	ret = hclge_pci_init(hdev);
11576 	if (ret)
11577 		goto out;
11578 
11579 	ret = hclge_devlink_init(hdev);
11580 	if (ret)
11581 		goto err_pci_uninit;
11582 
11583 	/* Firmware command queue initialize */
11584 	ret = hclge_cmd_queue_init(hdev);
11585 	if (ret)
11586 		goto err_devlink_uninit;
11587 
11588 	/* Firmware command initialize */
11589 	ret = hclge_cmd_init(hdev);
11590 	if (ret)
11591 		goto err_cmd_uninit;
11592 
11593 	ret  = hclge_clear_hw_resource(hdev);
11594 	if (ret)
11595 		goto err_cmd_uninit;
11596 
11597 	ret = hclge_get_cap(hdev);
11598 	if (ret)
11599 		goto err_cmd_uninit;
11600 
11601 	ret = hclge_query_dev_specs(hdev);
11602 	if (ret) {
11603 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11604 			ret);
11605 		goto err_cmd_uninit;
11606 	}
11607 
11608 	ret = hclge_configure(hdev);
11609 	if (ret) {
11610 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11611 		goto err_cmd_uninit;
11612 	}
11613 
11614 	ret = hclge_init_msi(hdev);
11615 	if (ret) {
11616 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11617 		goto err_cmd_uninit;
11618 	}
11619 
11620 	ret = hclge_misc_irq_init(hdev);
11621 	if (ret)
11622 		goto err_msi_uninit;
11623 
11624 	ret = hclge_alloc_tqps(hdev);
11625 	if (ret) {
11626 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11627 		goto err_msi_irq_uninit;
11628 	}
11629 
11630 	ret = hclge_alloc_vport(hdev);
11631 	if (ret)
11632 		goto err_msi_irq_uninit;
11633 
11634 	ret = hclge_map_tqp(hdev);
11635 	if (ret)
11636 		goto err_msi_irq_uninit;
11637 
11638 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11639 	    !hnae3_dev_phy_imp_supported(hdev)) {
11640 		ret = hclge_mac_mdio_config(hdev);
11641 		if (ret)
11642 			goto err_msi_irq_uninit;
11643 	}
11644 
11645 	ret = hclge_init_umv_space(hdev);
11646 	if (ret)
11647 		goto err_mdiobus_unreg;
11648 
11649 	ret = hclge_mac_init(hdev);
11650 	if (ret) {
11651 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11652 		goto err_mdiobus_unreg;
11653 	}
11654 
11655 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11656 	if (ret) {
11657 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11658 		goto err_mdiobus_unreg;
11659 	}
11660 
11661 	ret = hclge_config_gro(hdev);
11662 	if (ret)
11663 		goto err_mdiobus_unreg;
11664 
11665 	ret = hclge_init_vlan_config(hdev);
11666 	if (ret) {
11667 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11668 		goto err_mdiobus_unreg;
11669 	}
11670 
11671 	ret = hclge_tm_schd_init(hdev);
11672 	if (ret) {
11673 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11674 		goto err_mdiobus_unreg;
11675 	}
11676 
11677 	ret = hclge_rss_init_cfg(hdev);
11678 	if (ret) {
11679 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11680 		goto err_mdiobus_unreg;
11681 	}
11682 
11683 	ret = hclge_rss_init_hw(hdev);
11684 	if (ret) {
11685 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11686 		goto err_mdiobus_unreg;
11687 	}
11688 
11689 	ret = init_mgr_tbl(hdev);
11690 	if (ret) {
11691 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11692 		goto err_mdiobus_unreg;
11693 	}
11694 
11695 	ret = hclge_init_fd_config(hdev);
11696 	if (ret) {
11697 		dev_err(&pdev->dev,
11698 			"fd table init fail, ret=%d\n", ret);
11699 		goto err_mdiobus_unreg;
11700 	}
11701 
11702 	ret = hclge_ptp_init(hdev);
11703 	if (ret)
11704 		goto err_mdiobus_unreg;
11705 
11706 	INIT_KFIFO(hdev->mac_tnl_log);
11707 
11708 	hclge_dcb_ops_set(hdev);
11709 
11710 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11711 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11712 
11713 	/* Setup affinity after service timer setup because add_timer_on
11714 	 * is called in affinity notify.
11715 	 */
11716 	hclge_misc_affinity_setup(hdev);
11717 
11718 	hclge_clear_all_event_cause(hdev);
11719 	hclge_clear_resetting_state(hdev);
11720 
11721 	/* Log and clear the hw errors those already occurred */
11722 	if (hnae3_dev_ras_imp_supported(hdev))
11723 		hclge_handle_occurred_error(hdev);
11724 	else
11725 		hclge_handle_all_hns_hw_errors(ae_dev);
11726 
11727 	/* request delayed reset for the error recovery because an immediate
11728 	 * global reset on a PF affecting pending initialization of other PFs
11729 	 */
11730 	if (ae_dev->hw_err_reset_req) {
11731 		enum hnae3_reset_type reset_level;
11732 
11733 		reset_level = hclge_get_reset_level(ae_dev,
11734 						    &ae_dev->hw_err_reset_req);
11735 		hclge_set_def_reset_request(ae_dev, reset_level);
11736 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11737 	}
11738 
11739 	hclge_init_rxd_adv_layout(hdev);
11740 
11741 	/* Enable MISC vector(vector0) */
11742 	hclge_enable_vector(&hdev->misc_vector, true);
11743 
11744 	hclge_state_init(hdev);
11745 	hdev->last_reset_time = jiffies;
11746 
11747 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11748 		 HCLGE_DRIVER_NAME);
11749 
11750 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11751 
11752 	return 0;
11753 
11754 err_mdiobus_unreg:
11755 	if (hdev->hw.mac.phydev)
11756 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11757 err_msi_irq_uninit:
11758 	hclge_misc_irq_uninit(hdev);
11759 err_msi_uninit:
11760 	pci_free_irq_vectors(pdev);
11761 err_cmd_uninit:
11762 	hclge_cmd_uninit(hdev);
11763 err_devlink_uninit:
11764 	hclge_devlink_uninit(hdev);
11765 err_pci_uninit:
11766 	pcim_iounmap(pdev, hdev->hw.io_base);
11767 	pci_clear_master(pdev);
11768 	pci_release_regions(pdev);
11769 	pci_disable_device(pdev);
11770 out:
11771 	mutex_destroy(&hdev->vport_lock);
11772 	return ret;
11773 }
11774 
11775 static void hclge_stats_clear(struct hclge_dev *hdev)
11776 {
11777 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11778 }
11779 
11780 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11781 {
11782 	return hclge_config_switch_param(hdev, vf, enable,
11783 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11784 }
11785 
11786 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11787 {
11788 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11789 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11790 					  enable, vf);
11791 }
11792 
11793 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11794 {
11795 	int ret;
11796 
11797 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11798 	if (ret) {
11799 		dev_err(&hdev->pdev->dev,
11800 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11801 			vf, enable ? "on" : "off", ret);
11802 		return ret;
11803 	}
11804 
11805 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11806 	if (ret)
11807 		dev_err(&hdev->pdev->dev,
11808 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11809 			vf, enable ? "on" : "off", ret);
11810 
11811 	return ret;
11812 }
11813 
11814 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11815 				 bool enable)
11816 {
11817 	struct hclge_vport *vport = hclge_get_vport(handle);
11818 	struct hclge_dev *hdev = vport->back;
11819 	u32 new_spoofchk = enable ? 1 : 0;
11820 	int ret;
11821 
11822 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11823 		return -EOPNOTSUPP;
11824 
11825 	vport = hclge_get_vf_vport(hdev, vf);
11826 	if (!vport)
11827 		return -EINVAL;
11828 
11829 	if (vport->vf_info.spoofchk == new_spoofchk)
11830 		return 0;
11831 
11832 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11833 		dev_warn(&hdev->pdev->dev,
11834 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11835 			 vf);
11836 	else if (enable && hclge_is_umv_space_full(vport, true))
11837 		dev_warn(&hdev->pdev->dev,
11838 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11839 			 vf);
11840 
11841 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11842 	if (ret)
11843 		return ret;
11844 
11845 	vport->vf_info.spoofchk = new_spoofchk;
11846 	return 0;
11847 }
11848 
11849 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11850 {
11851 	struct hclge_vport *vport = hdev->vport;
11852 	int ret;
11853 	int i;
11854 
11855 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11856 		return 0;
11857 
11858 	/* resume the vf spoof check state after reset */
11859 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11860 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11861 					       vport->vf_info.spoofchk);
11862 		if (ret)
11863 			return ret;
11864 
11865 		vport++;
11866 	}
11867 
11868 	return 0;
11869 }
11870 
11871 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11872 {
11873 	struct hclge_vport *vport = hclge_get_vport(handle);
11874 	struct hclge_dev *hdev = vport->back;
11875 	u32 new_trusted = enable ? 1 : 0;
11876 
11877 	vport = hclge_get_vf_vport(hdev, vf);
11878 	if (!vport)
11879 		return -EINVAL;
11880 
11881 	if (vport->vf_info.trusted == new_trusted)
11882 		return 0;
11883 
11884 	vport->vf_info.trusted = new_trusted;
11885 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11886 	hclge_task_schedule(hdev, 0);
11887 
11888 	return 0;
11889 }
11890 
11891 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11892 {
11893 	int ret;
11894 	int vf;
11895 
11896 	/* reset vf rate to default value */
11897 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11898 		struct hclge_vport *vport = &hdev->vport[vf];
11899 
11900 		vport->vf_info.max_tx_rate = 0;
11901 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11902 		if (ret)
11903 			dev_err(&hdev->pdev->dev,
11904 				"vf%d failed to reset to default, ret=%d\n",
11905 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11906 	}
11907 }
11908 
11909 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11910 				     int min_tx_rate, int max_tx_rate)
11911 {
11912 	if (min_tx_rate != 0 ||
11913 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11914 		dev_err(&hdev->pdev->dev,
11915 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11916 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11917 		return -EINVAL;
11918 	}
11919 
11920 	return 0;
11921 }
11922 
11923 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11924 			     int min_tx_rate, int max_tx_rate, bool force)
11925 {
11926 	struct hclge_vport *vport = hclge_get_vport(handle);
11927 	struct hclge_dev *hdev = vport->back;
11928 	int ret;
11929 
11930 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11931 	if (ret)
11932 		return ret;
11933 
11934 	vport = hclge_get_vf_vport(hdev, vf);
11935 	if (!vport)
11936 		return -EINVAL;
11937 
11938 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11939 		return 0;
11940 
11941 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11942 	if (ret)
11943 		return ret;
11944 
11945 	vport->vf_info.max_tx_rate = max_tx_rate;
11946 
11947 	return 0;
11948 }
11949 
11950 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11951 {
11952 	struct hnae3_handle *handle = &hdev->vport->nic;
11953 	struct hclge_vport *vport;
11954 	int ret;
11955 	int vf;
11956 
11957 	/* resume the vf max_tx_rate after reset */
11958 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11959 		vport = hclge_get_vf_vport(hdev, vf);
11960 		if (!vport)
11961 			return -EINVAL;
11962 
11963 		/* zero means max rate, after reset, firmware already set it to
11964 		 * max rate, so just continue.
11965 		 */
11966 		if (!vport->vf_info.max_tx_rate)
11967 			continue;
11968 
11969 		ret = hclge_set_vf_rate(handle, vf, 0,
11970 					vport->vf_info.max_tx_rate, true);
11971 		if (ret) {
11972 			dev_err(&hdev->pdev->dev,
11973 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11974 				vf, vport->vf_info.max_tx_rate, ret);
11975 			return ret;
11976 		}
11977 	}
11978 
11979 	return 0;
11980 }
11981 
11982 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11983 {
11984 	struct hclge_vport *vport = hdev->vport;
11985 	int i;
11986 
11987 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11988 		hclge_vport_stop(vport);
11989 		vport++;
11990 	}
11991 }
11992 
11993 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11994 {
11995 	struct hclge_dev *hdev = ae_dev->priv;
11996 	struct pci_dev *pdev = ae_dev->pdev;
11997 	int ret;
11998 
11999 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
12000 
12001 	hclge_stats_clear(hdev);
12002 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12003 	 * so here should not clean table in memory.
12004 	 */
12005 	if (hdev->reset_type == HNAE3_IMP_RESET ||
12006 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
12007 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12008 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12009 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12010 		hclge_reset_umv_space(hdev);
12011 	}
12012 
12013 	ret = hclge_cmd_init(hdev);
12014 	if (ret) {
12015 		dev_err(&pdev->dev, "Cmd queue init failed\n");
12016 		return ret;
12017 	}
12018 
12019 	ret = hclge_map_tqp(hdev);
12020 	if (ret) {
12021 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12022 		return ret;
12023 	}
12024 
12025 	ret = hclge_mac_init(hdev);
12026 	if (ret) {
12027 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12028 		return ret;
12029 	}
12030 
12031 	ret = hclge_tp_port_init(hdev);
12032 	if (ret) {
12033 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12034 			ret);
12035 		return ret;
12036 	}
12037 
12038 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12039 	if (ret) {
12040 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12041 		return ret;
12042 	}
12043 
12044 	ret = hclge_config_gro(hdev);
12045 	if (ret)
12046 		return ret;
12047 
12048 	ret = hclge_init_vlan_config(hdev);
12049 	if (ret) {
12050 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12051 		return ret;
12052 	}
12053 
12054 	ret = hclge_tm_init_hw(hdev, true);
12055 	if (ret) {
12056 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12057 		return ret;
12058 	}
12059 
12060 	ret = hclge_rss_init_hw(hdev);
12061 	if (ret) {
12062 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12063 		return ret;
12064 	}
12065 
12066 	ret = init_mgr_tbl(hdev);
12067 	if (ret) {
12068 		dev_err(&pdev->dev,
12069 			"failed to reinit manager table, ret = %d\n", ret);
12070 		return ret;
12071 	}
12072 
12073 	ret = hclge_init_fd_config(hdev);
12074 	if (ret) {
12075 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12076 		return ret;
12077 	}
12078 
12079 	ret = hclge_ptp_init(hdev);
12080 	if (ret)
12081 		return ret;
12082 
12083 	/* Log and clear the hw errors those already occurred */
12084 	if (hnae3_dev_ras_imp_supported(hdev))
12085 		hclge_handle_occurred_error(hdev);
12086 	else
12087 		hclge_handle_all_hns_hw_errors(ae_dev);
12088 
12089 	/* Re-enable the hw error interrupts because
12090 	 * the interrupts get disabled on global reset.
12091 	 */
12092 	ret = hclge_config_nic_hw_error(hdev, true);
12093 	if (ret) {
12094 		dev_err(&pdev->dev,
12095 			"fail(%d) to re-enable NIC hw error interrupts\n",
12096 			ret);
12097 		return ret;
12098 	}
12099 
12100 	if (hdev->roce_client) {
12101 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
12102 		if (ret) {
12103 			dev_err(&pdev->dev,
12104 				"fail(%d) to re-enable roce ras interrupts\n",
12105 				ret);
12106 			return ret;
12107 		}
12108 	}
12109 
12110 	hclge_reset_vport_state(hdev);
12111 	ret = hclge_reset_vport_spoofchk(hdev);
12112 	if (ret)
12113 		return ret;
12114 
12115 	ret = hclge_resume_vf_rate(hdev);
12116 	if (ret)
12117 		return ret;
12118 
12119 	hclge_init_rxd_adv_layout(hdev);
12120 
12121 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12122 		 HCLGE_DRIVER_NAME);
12123 
12124 	return 0;
12125 }
12126 
12127 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12128 {
12129 	struct hclge_dev *hdev = ae_dev->priv;
12130 	struct hclge_mac *mac = &hdev->hw.mac;
12131 
12132 	hclge_reset_vf_rate(hdev);
12133 	hclge_clear_vf_vlan(hdev);
12134 	hclge_misc_affinity_teardown(hdev);
12135 	hclge_state_uninit(hdev);
12136 	hclge_ptp_uninit(hdev);
12137 	hclge_uninit_rxd_adv_layout(hdev);
12138 	hclge_uninit_mac_table(hdev);
12139 	hclge_del_all_fd_entries(hdev);
12140 
12141 	if (mac->phydev)
12142 		mdiobus_unregister(mac->mdio_bus);
12143 
12144 	/* Disable MISC vector(vector0) */
12145 	hclge_enable_vector(&hdev->misc_vector, false);
12146 	synchronize_irq(hdev->misc_vector.vector_irq);
12147 
12148 	/* Disable all hw interrupts */
12149 	hclge_config_mac_tnl_int(hdev, false);
12150 	hclge_config_nic_hw_error(hdev, false);
12151 	hclge_config_rocee_ras_interrupt(hdev, false);
12152 
12153 	hclge_cmd_uninit(hdev);
12154 	hclge_misc_irq_uninit(hdev);
12155 	hclge_devlink_uninit(hdev);
12156 	hclge_pci_uninit(hdev);
12157 	mutex_destroy(&hdev->vport_lock);
12158 	hclge_uninit_vport_vlan_table(hdev);
12159 	ae_dev->priv = NULL;
12160 }
12161 
12162 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12163 {
12164 	struct hclge_vport *vport = hclge_get_vport(handle);
12165 	struct hclge_dev *hdev = vport->back;
12166 
12167 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12168 }
12169 
12170 static void hclge_get_channels(struct hnae3_handle *handle,
12171 			       struct ethtool_channels *ch)
12172 {
12173 	ch->max_combined = hclge_get_max_channels(handle);
12174 	ch->other_count = 1;
12175 	ch->max_other = 1;
12176 	ch->combined_count = handle->kinfo.rss_size;
12177 }
12178 
12179 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12180 					u16 *alloc_tqps, u16 *max_rss_size)
12181 {
12182 	struct hclge_vport *vport = hclge_get_vport(handle);
12183 	struct hclge_dev *hdev = vport->back;
12184 
12185 	*alloc_tqps = vport->alloc_tqps;
12186 	*max_rss_size = hdev->pf_rss_size_max;
12187 }
12188 
12189 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12190 			      bool rxfh_configured)
12191 {
12192 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12193 	struct hclge_vport *vport = hclge_get_vport(handle);
12194 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12195 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12196 	struct hclge_dev *hdev = vport->back;
12197 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12198 	u16 cur_rss_size = kinfo->rss_size;
12199 	u16 cur_tqps = kinfo->num_tqps;
12200 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12201 	u16 roundup_size;
12202 	u32 *rss_indir;
12203 	unsigned int i;
12204 	int ret;
12205 
12206 	kinfo->req_rss_size = new_tqps_num;
12207 
12208 	ret = hclge_tm_vport_map_update(hdev);
12209 	if (ret) {
12210 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12211 		return ret;
12212 	}
12213 
12214 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
12215 	roundup_size = ilog2(roundup_size);
12216 	/* Set the RSS TC mode according to the new RSS size */
12217 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12218 		tc_valid[i] = 0;
12219 
12220 		if (!(hdev->hw_tc_map & BIT(i)))
12221 			continue;
12222 
12223 		tc_valid[i] = 1;
12224 		tc_size[i] = roundup_size;
12225 		tc_offset[i] = kinfo->rss_size * i;
12226 	}
12227 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12228 	if (ret)
12229 		return ret;
12230 
12231 	/* RSS indirection table has been configured by user */
12232 	if (rxfh_configured)
12233 		goto out;
12234 
12235 	/* Reinitializes the rss indirect table according to the new RSS size */
12236 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12237 			    GFP_KERNEL);
12238 	if (!rss_indir)
12239 		return -ENOMEM;
12240 
12241 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12242 		rss_indir[i] = i % kinfo->rss_size;
12243 
12244 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12245 	if (ret)
12246 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12247 			ret);
12248 
12249 	kfree(rss_indir);
12250 
12251 out:
12252 	if (!ret)
12253 		dev_info(&hdev->pdev->dev,
12254 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12255 			 cur_rss_size, kinfo->rss_size,
12256 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12257 
12258 	return ret;
12259 }
12260 
12261 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12262 			      u32 *regs_num_64_bit)
12263 {
12264 	struct hclge_desc desc;
12265 	u32 total_num;
12266 	int ret;
12267 
12268 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12269 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12270 	if (ret) {
12271 		dev_err(&hdev->pdev->dev,
12272 			"Query register number cmd failed, ret = %d.\n", ret);
12273 		return ret;
12274 	}
12275 
12276 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12277 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12278 
12279 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12280 	if (!total_num)
12281 		return -EINVAL;
12282 
12283 	return 0;
12284 }
12285 
12286 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12287 				 void *data)
12288 {
12289 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12290 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12291 
12292 	struct hclge_desc *desc;
12293 	u32 *reg_val = data;
12294 	__le32 *desc_data;
12295 	int nodata_num;
12296 	int cmd_num;
12297 	int i, k, n;
12298 	int ret;
12299 
12300 	if (regs_num == 0)
12301 		return 0;
12302 
12303 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12304 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12305 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12306 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12307 	if (!desc)
12308 		return -ENOMEM;
12309 
12310 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12311 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12312 	if (ret) {
12313 		dev_err(&hdev->pdev->dev,
12314 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12315 		kfree(desc);
12316 		return ret;
12317 	}
12318 
12319 	for (i = 0; i < cmd_num; i++) {
12320 		if (i == 0) {
12321 			desc_data = (__le32 *)(&desc[i].data[0]);
12322 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12323 		} else {
12324 			desc_data = (__le32 *)(&desc[i]);
12325 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12326 		}
12327 		for (k = 0; k < n; k++) {
12328 			*reg_val++ = le32_to_cpu(*desc_data++);
12329 
12330 			regs_num--;
12331 			if (!regs_num)
12332 				break;
12333 		}
12334 	}
12335 
12336 	kfree(desc);
12337 	return 0;
12338 }
12339 
12340 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12341 				 void *data)
12342 {
12343 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12344 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12345 
12346 	struct hclge_desc *desc;
12347 	u64 *reg_val = data;
12348 	__le64 *desc_data;
12349 	int nodata_len;
12350 	int cmd_num;
12351 	int i, k, n;
12352 	int ret;
12353 
12354 	if (regs_num == 0)
12355 		return 0;
12356 
12357 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12358 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12359 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12360 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12361 	if (!desc)
12362 		return -ENOMEM;
12363 
12364 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12365 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12366 	if (ret) {
12367 		dev_err(&hdev->pdev->dev,
12368 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12369 		kfree(desc);
12370 		return ret;
12371 	}
12372 
12373 	for (i = 0; i < cmd_num; i++) {
12374 		if (i == 0) {
12375 			desc_data = (__le64 *)(&desc[i].data[0]);
12376 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12377 		} else {
12378 			desc_data = (__le64 *)(&desc[i]);
12379 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12380 		}
12381 		for (k = 0; k < n; k++) {
12382 			*reg_val++ = le64_to_cpu(*desc_data++);
12383 
12384 			regs_num--;
12385 			if (!regs_num)
12386 				break;
12387 		}
12388 	}
12389 
12390 	kfree(desc);
12391 	return 0;
12392 }
12393 
12394 #define MAX_SEPARATE_NUM	4
12395 #define SEPARATOR_VALUE		0xFDFCFBFA
12396 #define REG_NUM_PER_LINE	4
12397 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12398 #define REG_SEPARATOR_LINE	1
12399 #define REG_NUM_REMAIN_MASK	3
12400 
12401 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12402 {
12403 	int i;
12404 
12405 	/* initialize command BD except the last one */
12406 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12407 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12408 					   true);
12409 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12410 	}
12411 
12412 	/* initialize the last command BD */
12413 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12414 
12415 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12416 }
12417 
12418 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12419 				    int *bd_num_list,
12420 				    u32 type_num)
12421 {
12422 	u32 entries_per_desc, desc_index, index, offset, i;
12423 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12424 	int ret;
12425 
12426 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12427 	if (ret) {
12428 		dev_err(&hdev->pdev->dev,
12429 			"Get dfx bd num fail, status is %d.\n", ret);
12430 		return ret;
12431 	}
12432 
12433 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12434 	for (i = 0; i < type_num; i++) {
12435 		offset = hclge_dfx_bd_offset_list[i];
12436 		index = offset % entries_per_desc;
12437 		desc_index = offset / entries_per_desc;
12438 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12439 	}
12440 
12441 	return ret;
12442 }
12443 
12444 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12445 				  struct hclge_desc *desc_src, int bd_num,
12446 				  enum hclge_opcode_type cmd)
12447 {
12448 	struct hclge_desc *desc = desc_src;
12449 	int i, ret;
12450 
12451 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12452 	for (i = 0; i < bd_num - 1; i++) {
12453 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12454 		desc++;
12455 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12456 	}
12457 
12458 	desc = desc_src;
12459 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12460 	if (ret)
12461 		dev_err(&hdev->pdev->dev,
12462 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12463 			cmd, ret);
12464 
12465 	return ret;
12466 }
12467 
12468 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12469 				    void *data)
12470 {
12471 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12472 	struct hclge_desc *desc = desc_src;
12473 	u32 *reg = data;
12474 
12475 	entries_per_desc = ARRAY_SIZE(desc->data);
12476 	reg_num = entries_per_desc * bd_num;
12477 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12478 	for (i = 0; i < reg_num; i++) {
12479 		index = i % entries_per_desc;
12480 		desc_index = i / entries_per_desc;
12481 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12482 	}
12483 	for (i = 0; i < separator_num; i++)
12484 		*reg++ = SEPARATOR_VALUE;
12485 
12486 	return reg_num + separator_num;
12487 }
12488 
12489 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12490 {
12491 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12492 	int data_len_per_desc, bd_num, i;
12493 	int *bd_num_list;
12494 	u32 data_len;
12495 	int ret;
12496 
12497 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12498 	if (!bd_num_list)
12499 		return -ENOMEM;
12500 
12501 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12502 	if (ret) {
12503 		dev_err(&hdev->pdev->dev,
12504 			"Get dfx reg bd num fail, status is %d.\n", ret);
12505 		goto out;
12506 	}
12507 
12508 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12509 	*len = 0;
12510 	for (i = 0; i < dfx_reg_type_num; i++) {
12511 		bd_num = bd_num_list[i];
12512 		data_len = data_len_per_desc * bd_num;
12513 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12514 	}
12515 
12516 out:
12517 	kfree(bd_num_list);
12518 	return ret;
12519 }
12520 
12521 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12522 {
12523 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12524 	int bd_num, bd_num_max, buf_len, i;
12525 	struct hclge_desc *desc_src;
12526 	int *bd_num_list;
12527 	u32 *reg = data;
12528 	int ret;
12529 
12530 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12531 	if (!bd_num_list)
12532 		return -ENOMEM;
12533 
12534 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12535 	if (ret) {
12536 		dev_err(&hdev->pdev->dev,
12537 			"Get dfx reg bd num fail, status is %d.\n", ret);
12538 		goto out;
12539 	}
12540 
12541 	bd_num_max = bd_num_list[0];
12542 	for (i = 1; i < dfx_reg_type_num; i++)
12543 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12544 
12545 	buf_len = sizeof(*desc_src) * bd_num_max;
12546 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12547 	if (!desc_src) {
12548 		ret = -ENOMEM;
12549 		goto out;
12550 	}
12551 
12552 	for (i = 0; i < dfx_reg_type_num; i++) {
12553 		bd_num = bd_num_list[i];
12554 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12555 					     hclge_dfx_reg_opcode_list[i]);
12556 		if (ret) {
12557 			dev_err(&hdev->pdev->dev,
12558 				"Get dfx reg fail, status is %d.\n", ret);
12559 			break;
12560 		}
12561 
12562 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12563 	}
12564 
12565 	kfree(desc_src);
12566 out:
12567 	kfree(bd_num_list);
12568 	return ret;
12569 }
12570 
12571 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12572 			      struct hnae3_knic_private_info *kinfo)
12573 {
12574 #define HCLGE_RING_REG_OFFSET		0x200
12575 #define HCLGE_RING_INT_REG_OFFSET	0x4
12576 
12577 	int i, j, reg_num, separator_num;
12578 	int data_num_sum;
12579 	u32 *reg = data;
12580 
12581 	/* fetching per-PF registers valus from PF PCIe register space */
12582 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12583 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12584 	for (i = 0; i < reg_num; i++)
12585 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12586 	for (i = 0; i < separator_num; i++)
12587 		*reg++ = SEPARATOR_VALUE;
12588 	data_num_sum = reg_num + separator_num;
12589 
12590 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12591 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12592 	for (i = 0; i < reg_num; i++)
12593 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12594 	for (i = 0; i < separator_num; i++)
12595 		*reg++ = SEPARATOR_VALUE;
12596 	data_num_sum += reg_num + separator_num;
12597 
12598 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12599 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12600 	for (j = 0; j < kinfo->num_tqps; j++) {
12601 		for (i = 0; i < reg_num; i++)
12602 			*reg++ = hclge_read_dev(&hdev->hw,
12603 						ring_reg_addr_list[i] +
12604 						HCLGE_RING_REG_OFFSET * j);
12605 		for (i = 0; i < separator_num; i++)
12606 			*reg++ = SEPARATOR_VALUE;
12607 	}
12608 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12609 
12610 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12611 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12612 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12613 		for (i = 0; i < reg_num; i++)
12614 			*reg++ = hclge_read_dev(&hdev->hw,
12615 						tqp_intr_reg_addr_list[i] +
12616 						HCLGE_RING_INT_REG_OFFSET * j);
12617 		for (i = 0; i < separator_num; i++)
12618 			*reg++ = SEPARATOR_VALUE;
12619 	}
12620 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12621 
12622 	return data_num_sum;
12623 }
12624 
12625 static int hclge_get_regs_len(struct hnae3_handle *handle)
12626 {
12627 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12628 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12629 	struct hclge_vport *vport = hclge_get_vport(handle);
12630 	struct hclge_dev *hdev = vport->back;
12631 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12632 	int regs_lines_32_bit, regs_lines_64_bit;
12633 	int ret;
12634 
12635 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12636 	if (ret) {
12637 		dev_err(&hdev->pdev->dev,
12638 			"Get register number failed, ret = %d.\n", ret);
12639 		return ret;
12640 	}
12641 
12642 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12643 	if (ret) {
12644 		dev_err(&hdev->pdev->dev,
12645 			"Get dfx reg len failed, ret = %d.\n", ret);
12646 		return ret;
12647 	}
12648 
12649 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12650 		REG_SEPARATOR_LINE;
12651 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12652 		REG_SEPARATOR_LINE;
12653 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12654 		REG_SEPARATOR_LINE;
12655 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12656 		REG_SEPARATOR_LINE;
12657 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12658 		REG_SEPARATOR_LINE;
12659 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12660 		REG_SEPARATOR_LINE;
12661 
12662 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12663 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12664 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12665 }
12666 
12667 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12668 			   void *data)
12669 {
12670 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12671 	struct hclge_vport *vport = hclge_get_vport(handle);
12672 	struct hclge_dev *hdev = vport->back;
12673 	u32 regs_num_32_bit, regs_num_64_bit;
12674 	int i, reg_num, separator_num, ret;
12675 	u32 *reg = data;
12676 
12677 	*version = hdev->fw_version;
12678 
12679 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12680 	if (ret) {
12681 		dev_err(&hdev->pdev->dev,
12682 			"Get register number failed, ret = %d.\n", ret);
12683 		return;
12684 	}
12685 
12686 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12687 
12688 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12689 	if (ret) {
12690 		dev_err(&hdev->pdev->dev,
12691 			"Get 32 bit register failed, ret = %d.\n", ret);
12692 		return;
12693 	}
12694 	reg_num = regs_num_32_bit;
12695 	reg += reg_num;
12696 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12697 	for (i = 0; i < separator_num; i++)
12698 		*reg++ = SEPARATOR_VALUE;
12699 
12700 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12701 	if (ret) {
12702 		dev_err(&hdev->pdev->dev,
12703 			"Get 64 bit register failed, ret = %d.\n", ret);
12704 		return;
12705 	}
12706 	reg_num = regs_num_64_bit * 2;
12707 	reg += reg_num;
12708 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12709 	for (i = 0; i < separator_num; i++)
12710 		*reg++ = SEPARATOR_VALUE;
12711 
12712 	ret = hclge_get_dfx_reg(hdev, reg);
12713 	if (ret)
12714 		dev_err(&hdev->pdev->dev,
12715 			"Get dfx register failed, ret = %d.\n", ret);
12716 }
12717 
12718 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12719 {
12720 	struct hclge_set_led_state_cmd *req;
12721 	struct hclge_desc desc;
12722 	int ret;
12723 
12724 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12725 
12726 	req = (struct hclge_set_led_state_cmd *)desc.data;
12727 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12728 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12729 
12730 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12731 	if (ret)
12732 		dev_err(&hdev->pdev->dev,
12733 			"Send set led state cmd error, ret =%d\n", ret);
12734 
12735 	return ret;
12736 }
12737 
12738 enum hclge_led_status {
12739 	HCLGE_LED_OFF,
12740 	HCLGE_LED_ON,
12741 	HCLGE_LED_NO_CHANGE = 0xFF,
12742 };
12743 
12744 static int hclge_set_led_id(struct hnae3_handle *handle,
12745 			    enum ethtool_phys_id_state status)
12746 {
12747 	struct hclge_vport *vport = hclge_get_vport(handle);
12748 	struct hclge_dev *hdev = vport->back;
12749 
12750 	switch (status) {
12751 	case ETHTOOL_ID_ACTIVE:
12752 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12753 	case ETHTOOL_ID_INACTIVE:
12754 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12755 	default:
12756 		return -EINVAL;
12757 	}
12758 }
12759 
12760 static void hclge_get_link_mode(struct hnae3_handle *handle,
12761 				unsigned long *supported,
12762 				unsigned long *advertising)
12763 {
12764 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12765 	struct hclge_vport *vport = hclge_get_vport(handle);
12766 	struct hclge_dev *hdev = vport->back;
12767 	unsigned int idx = 0;
12768 
12769 	for (; idx < size; idx++) {
12770 		supported[idx] = hdev->hw.mac.supported[idx];
12771 		advertising[idx] = hdev->hw.mac.advertising[idx];
12772 	}
12773 }
12774 
12775 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12776 {
12777 	struct hclge_vport *vport = hclge_get_vport(handle);
12778 	struct hclge_dev *hdev = vport->back;
12779 	bool gro_en_old = hdev->gro_en;
12780 	int ret;
12781 
12782 	hdev->gro_en = enable;
12783 	ret = hclge_config_gro(hdev);
12784 	if (ret)
12785 		hdev->gro_en = gro_en_old;
12786 
12787 	return ret;
12788 }
12789 
12790 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12791 {
12792 	struct hclge_vport *vport = &hdev->vport[0];
12793 	struct hnae3_handle *handle = &vport->nic;
12794 	u8 tmp_flags;
12795 	int ret;
12796 	u16 i;
12797 
12798 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12799 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12800 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12801 	}
12802 
12803 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12804 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12805 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12806 					     tmp_flags & HNAE3_MPE);
12807 		if (!ret) {
12808 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12809 				  &vport->state);
12810 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12811 				&vport->state);
12812 		}
12813 	}
12814 
12815 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12816 		bool uc_en = false;
12817 		bool mc_en = false;
12818 		bool bc_en;
12819 
12820 		vport = &hdev->vport[i];
12821 
12822 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12823 					&vport->state))
12824 			continue;
12825 
12826 		if (vport->vf_info.trusted) {
12827 			uc_en = vport->vf_info.request_uc_en > 0 ||
12828 				vport->overflow_promisc_flags &
12829 				HNAE3_OVERFLOW_UPE;
12830 			mc_en = vport->vf_info.request_mc_en > 0 ||
12831 				vport->overflow_promisc_flags &
12832 				HNAE3_OVERFLOW_MPE;
12833 		}
12834 		bc_en = vport->vf_info.request_bc_en > 0;
12835 
12836 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12837 						 mc_en, bc_en);
12838 		if (ret) {
12839 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12840 				&vport->state);
12841 			return;
12842 		}
12843 		hclge_set_vport_vlan_fltr_change(vport);
12844 	}
12845 }
12846 
12847 static bool hclge_module_existed(struct hclge_dev *hdev)
12848 {
12849 	struct hclge_desc desc;
12850 	u32 existed;
12851 	int ret;
12852 
12853 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12854 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12855 	if (ret) {
12856 		dev_err(&hdev->pdev->dev,
12857 			"failed to get SFP exist state, ret = %d\n", ret);
12858 		return false;
12859 	}
12860 
12861 	existed = le32_to_cpu(desc.data[0]);
12862 
12863 	return existed != 0;
12864 }
12865 
12866 /* need 6 bds(total 140 bytes) in one reading
12867  * return the number of bytes actually read, 0 means read failed.
12868  */
12869 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12870 				     u32 len, u8 *data)
12871 {
12872 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12873 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12874 	u16 read_len;
12875 	u16 copy_len;
12876 	int ret;
12877 	int i;
12878 
12879 	/* setup all 6 bds to read module eeprom info. */
12880 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12881 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12882 					   true);
12883 
12884 		/* bd0~bd4 need next flag */
12885 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12886 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12887 	}
12888 
12889 	/* setup bd0, this bd contains offset and read length. */
12890 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12891 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12892 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12893 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12894 
12895 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12896 	if (ret) {
12897 		dev_err(&hdev->pdev->dev,
12898 			"failed to get SFP eeprom info, ret = %d\n", ret);
12899 		return 0;
12900 	}
12901 
12902 	/* copy sfp info from bd0 to out buffer. */
12903 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12904 	memcpy(data, sfp_info_bd0->data, copy_len);
12905 	read_len = copy_len;
12906 
12907 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12908 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12909 		if (read_len >= len)
12910 			return read_len;
12911 
12912 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12913 		memcpy(data + read_len, desc[i].data, copy_len);
12914 		read_len += copy_len;
12915 	}
12916 
12917 	return read_len;
12918 }
12919 
12920 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12921 				   u32 len, u8 *data)
12922 {
12923 	struct hclge_vport *vport = hclge_get_vport(handle);
12924 	struct hclge_dev *hdev = vport->back;
12925 	u32 read_len = 0;
12926 	u16 data_len;
12927 
12928 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12929 		return -EOPNOTSUPP;
12930 
12931 	if (!hclge_module_existed(hdev))
12932 		return -ENXIO;
12933 
12934 	while (read_len < len) {
12935 		data_len = hclge_get_sfp_eeprom_info(hdev,
12936 						     offset + read_len,
12937 						     len - read_len,
12938 						     data + read_len);
12939 		if (!data_len)
12940 			return -EIO;
12941 
12942 		read_len += data_len;
12943 	}
12944 
12945 	return 0;
12946 }
12947 
12948 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12949 					 u32 *status_code)
12950 {
12951 	struct hclge_vport *vport = hclge_get_vport(handle);
12952 	struct hclge_dev *hdev = vport->back;
12953 	struct hclge_desc desc;
12954 	int ret;
12955 
12956 	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12957 		return -EOPNOTSUPP;
12958 
12959 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12960 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12961 	if (ret) {
12962 		dev_err(&hdev->pdev->dev,
12963 			"failed to query link diagnosis info, ret = %d\n", ret);
12964 		return ret;
12965 	}
12966 
12967 	*status_code = le32_to_cpu(desc.data[0]);
12968 	return 0;
12969 }
12970 
12971 static const struct hnae3_ae_ops hclge_ops = {
12972 	.init_ae_dev = hclge_init_ae_dev,
12973 	.uninit_ae_dev = hclge_uninit_ae_dev,
12974 	.reset_prepare = hclge_reset_prepare_general,
12975 	.reset_done = hclge_reset_done,
12976 	.init_client_instance = hclge_init_client_instance,
12977 	.uninit_client_instance = hclge_uninit_client_instance,
12978 	.map_ring_to_vector = hclge_map_ring_to_vector,
12979 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12980 	.get_vector = hclge_get_vector,
12981 	.put_vector = hclge_put_vector,
12982 	.set_promisc_mode = hclge_set_promisc_mode,
12983 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12984 	.set_loopback = hclge_set_loopback,
12985 	.start = hclge_ae_start,
12986 	.stop = hclge_ae_stop,
12987 	.client_start = hclge_client_start,
12988 	.client_stop = hclge_client_stop,
12989 	.get_status = hclge_get_status,
12990 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12991 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12992 	.get_media_type = hclge_get_media_type,
12993 	.check_port_speed = hclge_check_port_speed,
12994 	.get_fec = hclge_get_fec,
12995 	.set_fec = hclge_set_fec,
12996 	.get_rss_key_size = hclge_get_rss_key_size,
12997 	.get_rss = hclge_get_rss,
12998 	.set_rss = hclge_set_rss,
12999 	.set_rss_tuple = hclge_set_rss_tuple,
13000 	.get_rss_tuple = hclge_get_rss_tuple,
13001 	.get_tc_size = hclge_get_tc_size,
13002 	.get_mac_addr = hclge_get_mac_addr,
13003 	.set_mac_addr = hclge_set_mac_addr,
13004 	.do_ioctl = hclge_do_ioctl,
13005 	.add_uc_addr = hclge_add_uc_addr,
13006 	.rm_uc_addr = hclge_rm_uc_addr,
13007 	.add_mc_addr = hclge_add_mc_addr,
13008 	.rm_mc_addr = hclge_rm_mc_addr,
13009 	.set_autoneg = hclge_set_autoneg,
13010 	.get_autoneg = hclge_get_autoneg,
13011 	.restart_autoneg = hclge_restart_autoneg,
13012 	.halt_autoneg = hclge_halt_autoneg,
13013 	.get_pauseparam = hclge_get_pauseparam,
13014 	.set_pauseparam = hclge_set_pauseparam,
13015 	.set_mtu = hclge_set_mtu,
13016 	.reset_queue = hclge_reset_tqp,
13017 	.get_stats = hclge_get_stats,
13018 	.get_mac_stats = hclge_get_mac_stat,
13019 	.update_stats = hclge_update_stats,
13020 	.get_strings = hclge_get_strings,
13021 	.get_sset_count = hclge_get_sset_count,
13022 	.get_fw_version = hclge_get_fw_version,
13023 	.get_mdix_mode = hclge_get_mdix_mode,
13024 	.enable_vlan_filter = hclge_enable_vlan_filter,
13025 	.set_vlan_filter = hclge_set_vlan_filter,
13026 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
13027 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13028 	.reset_event = hclge_reset_event,
13029 	.get_reset_level = hclge_get_reset_level,
13030 	.set_default_reset_request = hclge_set_def_reset_request,
13031 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13032 	.set_channels = hclge_set_channels,
13033 	.get_channels = hclge_get_channels,
13034 	.get_regs_len = hclge_get_regs_len,
13035 	.get_regs = hclge_get_regs,
13036 	.set_led_id = hclge_set_led_id,
13037 	.get_link_mode = hclge_get_link_mode,
13038 	.add_fd_entry = hclge_add_fd_entry,
13039 	.del_fd_entry = hclge_del_fd_entry,
13040 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13041 	.get_fd_rule_info = hclge_get_fd_rule_info,
13042 	.get_fd_all_rules = hclge_get_all_rules,
13043 	.enable_fd = hclge_enable_fd,
13044 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
13045 	.dbg_read_cmd = hclge_dbg_read_cmd,
13046 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
13047 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
13048 	.ae_dev_resetting = hclge_ae_dev_resetting,
13049 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13050 	.set_gro_en = hclge_gro_en,
13051 	.get_global_queue_id = hclge_covert_handle_qid_global,
13052 	.set_timer_task = hclge_set_timer_task,
13053 	.mac_connect_phy = hclge_mac_connect_phy,
13054 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
13055 	.get_vf_config = hclge_get_vf_config,
13056 	.set_vf_link_state = hclge_set_vf_link_state,
13057 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
13058 	.set_vf_trust = hclge_set_vf_trust,
13059 	.set_vf_rate = hclge_set_vf_rate,
13060 	.set_vf_mac = hclge_set_vf_mac,
13061 	.get_module_eeprom = hclge_get_module_eeprom,
13062 	.get_cmdq_stat = hclge_get_cmdq_stat,
13063 	.add_cls_flower = hclge_add_cls_flower,
13064 	.del_cls_flower = hclge_del_cls_flower,
13065 	.cls_flower_active = hclge_is_cls_flower_active,
13066 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13067 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13068 	.set_tx_hwts_info = hclge_ptp_set_tx_info,
13069 	.get_rx_hwts = hclge_ptp_get_rx_hwts,
13070 	.get_ts_info = hclge_ptp_get_ts_info,
13071 	.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13072 };
13073 
13074 static struct hnae3_ae_algo ae_algo = {
13075 	.ops = &hclge_ops,
13076 	.pdev_id_table = ae_algo_pci_tbl,
13077 };
13078 
13079 static int hclge_init(void)
13080 {
13081 	pr_info("%s is initializing\n", HCLGE_NAME);
13082 
13083 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13084 	if (!hclge_wq) {
13085 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13086 		return -ENOMEM;
13087 	}
13088 
13089 	hnae3_register_ae_algo(&ae_algo);
13090 
13091 	return 0;
13092 }
13093 
13094 static void hclge_exit(void)
13095 {
13096 	hnae3_unregister_ae_algo(&ae_algo);
13097 	destroy_workqueue(hclge_wq);
13098 }
13099 module_init(hclge_init);
13100 module_exit(hclge_exit);
13101 
13102 MODULE_LICENSE("GPL");
13103 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13104 MODULE_DESCRIPTION("HCLGE Driver");
13105 MODULE_VERSION(HCLGE_MOD_VERSION);
13106