1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27 
28 #define HCLGE_NAME			"hclge"
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
64 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
65 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
66 						   unsigned long *addr);
67 static int hclge_set_default_loopback(struct hclge_dev *hdev);
68 
69 static void hclge_sync_mac_table(struct hclge_dev *hdev);
70 static void hclge_restore_hw_table(struct hclge_dev *hdev);
71 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
72 static void hclge_sync_fd_table(struct hclge_dev *hdev);
73 
74 static struct hnae3_ae_algo ae_algo;
75 
76 static struct workqueue_struct *hclge_wq;
77 
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87 	/* required last entry */
88 	{0, }
89 };
90 
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92 
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
94 					 HCLGE_NIC_CSQ_BASEADDR_H_REG,
95 					 HCLGE_NIC_CSQ_DEPTH_REG,
96 					 HCLGE_NIC_CSQ_TAIL_REG,
97 					 HCLGE_NIC_CSQ_HEAD_REG,
98 					 HCLGE_NIC_CRQ_BASEADDR_L_REG,
99 					 HCLGE_NIC_CRQ_BASEADDR_H_REG,
100 					 HCLGE_NIC_CRQ_DEPTH_REG,
101 					 HCLGE_NIC_CRQ_TAIL_REG,
102 					 HCLGE_NIC_CRQ_HEAD_REG,
103 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 					 HCLGE_CMDQ_INTR_STS_REG,
105 					 HCLGE_CMDQ_INTR_EN_REG,
106 					 HCLGE_CMDQ_INTR_GEN_REG};
107 
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 					   HCLGE_PF_OTHER_INT_REG,
110 					   HCLGE_MISC_RESET_STS_REG,
111 					   HCLGE_MISC_VECTOR_INT_STS,
112 					   HCLGE_GLOBAL_RESET_REG,
113 					   HCLGE_FUN_RST_ING,
114 					   HCLGE_GRO_EN_REG};
115 
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 					 HCLGE_RING_RX_ADDR_H_REG,
118 					 HCLGE_RING_RX_BD_NUM_REG,
119 					 HCLGE_RING_RX_BD_LENGTH_REG,
120 					 HCLGE_RING_RX_MERGE_EN_REG,
121 					 HCLGE_RING_RX_TAIL_REG,
122 					 HCLGE_RING_RX_HEAD_REG,
123 					 HCLGE_RING_RX_FBD_NUM_REG,
124 					 HCLGE_RING_RX_OFFSET_REG,
125 					 HCLGE_RING_RX_FBD_OFFSET_REG,
126 					 HCLGE_RING_RX_STASH_REG,
127 					 HCLGE_RING_RX_BD_ERR_REG,
128 					 HCLGE_RING_TX_ADDR_L_REG,
129 					 HCLGE_RING_TX_ADDR_H_REG,
130 					 HCLGE_RING_TX_BD_NUM_REG,
131 					 HCLGE_RING_TX_PRIORITY_REG,
132 					 HCLGE_RING_TX_TC_REG,
133 					 HCLGE_RING_TX_MERGE_EN_REG,
134 					 HCLGE_RING_TX_TAIL_REG,
135 					 HCLGE_RING_TX_HEAD_REG,
136 					 HCLGE_RING_TX_FBD_NUM_REG,
137 					 HCLGE_RING_TX_OFFSET_REG,
138 					 HCLGE_RING_TX_EBD_NUM_REG,
139 					 HCLGE_RING_TX_EBD_OFFSET_REG,
140 					 HCLGE_RING_TX_BD_ERR_REG,
141 					 HCLGE_RING_EN_REG};
142 
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 					     HCLGE_TQP_INTR_GL0_REG,
145 					     HCLGE_TQP_INTR_GL1_REG,
146 					     HCLGE_TQP_INTR_GL2_REG,
147 					     HCLGE_TQP_INTR_RL_REG};
148 
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150 	"App    Loopback test",
151 	"Serdes serial Loopback test",
152 	"Serdes parallel Loopback test",
153 	"Phy    Loopback test"
154 };
155 
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 	{"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 	{"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 	{"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
163 	{"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
165 	{"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
167 	{"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
169 	{"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
171 	{"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
173 	{"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
175 	{"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
177 	{"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
179 	{"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
181 	{"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
183 	{"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
185 	{"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
187 	{"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
189 	{"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
191 	{"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
193 	{"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
195 	{"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
197 	{"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
199 	{"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
201 	{"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
203 	{"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
205 	{"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
207 	{"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
209 	{"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
211 	{"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
213 	{"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
215 	{"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
217 	{"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
219 	{"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
221 	{"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
223 	{"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
225 	{"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
227 	{"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
229 	{"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
231 	{"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
233 	{"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
235 	{"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
237 	{"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
239 	{"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
241 	{"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
243 	{"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
245 	{"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
247 	{"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
249 	{"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
251 	{"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
253 	{"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
255 	{"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
257 	{"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
259 	{"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
261 	{"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
263 	{"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
265 	{"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
267 	{"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
269 	{"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
271 	{"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
273 	{"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
275 	{"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
277 	{"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
279 	{"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
281 	{"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
283 	{"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
285 	{"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
287 	{"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
289 	{"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
291 	{"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
293 	{"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
295 	{"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
297 	{"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
299 	{"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
301 	{"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
303 	{"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
305 	{"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
307 	{"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
309 	{"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
311 	{"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
313 	{"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
315 	{"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
317 	{"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
319 	{"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
321 	{"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
323 	{"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
325 	{"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
327 	{"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
328 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
329 	{"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
330 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
331 	{"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
332 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
333 	{"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
334 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
335 	{"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
336 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
337 
338 	{"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
339 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
340 	{"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
341 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
342 	{"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
343 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
344 	{"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
345 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
346 	{"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
347 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
348 	{"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
349 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
350 	{"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
351 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
352 	{"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
353 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
354 	{"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
355 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
356 	{"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
357 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
358 	{"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
359 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
360 	{"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
361 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
362 };
363 
364 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
365 	{
366 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
367 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
368 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
369 		.i_port_bitmap = 0x1,
370 	},
371 };
372 
373 static const u8 hclge_hash_key[] = {
374 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
375 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
376 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
377 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
378 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
379 };
380 
381 static const u32 hclge_dfx_bd_offset_list[] = {
382 	HCLGE_DFX_BIOS_BD_OFFSET,
383 	HCLGE_DFX_SSU_0_BD_OFFSET,
384 	HCLGE_DFX_SSU_1_BD_OFFSET,
385 	HCLGE_DFX_IGU_BD_OFFSET,
386 	HCLGE_DFX_RPU_0_BD_OFFSET,
387 	HCLGE_DFX_RPU_1_BD_OFFSET,
388 	HCLGE_DFX_NCSI_BD_OFFSET,
389 	HCLGE_DFX_RTC_BD_OFFSET,
390 	HCLGE_DFX_PPP_BD_OFFSET,
391 	HCLGE_DFX_RCB_BD_OFFSET,
392 	HCLGE_DFX_TQP_BD_OFFSET,
393 	HCLGE_DFX_SSU_2_BD_OFFSET
394 };
395 
396 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
397 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
398 	HCLGE_OPC_DFX_SSU_REG_0,
399 	HCLGE_OPC_DFX_SSU_REG_1,
400 	HCLGE_OPC_DFX_IGU_EGU_REG,
401 	HCLGE_OPC_DFX_RPU_REG_0,
402 	HCLGE_OPC_DFX_RPU_REG_1,
403 	HCLGE_OPC_DFX_NCSI_REG,
404 	HCLGE_OPC_DFX_RTC_REG,
405 	HCLGE_OPC_DFX_PPP_REG,
406 	HCLGE_OPC_DFX_RCB_REG,
407 	HCLGE_OPC_DFX_TQP_REG,
408 	HCLGE_OPC_DFX_SSU_REG_2
409 };
410 
411 static const struct key_info meta_data_key_info[] = {
412 	{ PACKET_TYPE_ID, 6 },
413 	{ IP_FRAGEMENT, 1 },
414 	{ ROCE_TYPE, 1 },
415 	{ NEXT_KEY, 5 },
416 	{ VLAN_NUMBER, 2 },
417 	{ SRC_VPORT, 12 },
418 	{ DST_VPORT, 12 },
419 	{ TUNNEL_PACKET, 1 },
420 };
421 
422 static const struct key_info tuple_key_info[] = {
423 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
424 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
425 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
426 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
427 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
428 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
429 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
430 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
431 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
432 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
433 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
434 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
435 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
436 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
437 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
438 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
439 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
440 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
441 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
442 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
443 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
444 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
445 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
446 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
447 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
448 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
449 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
450 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
451 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
452 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
453 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
454 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
455 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
456 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
457 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
458 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
459 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
460 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
461 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
462 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
463 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
464 	{ INNER_DST_IP, 32, KEY_OPT_IP,
465 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
466 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
467 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
468 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
469 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
470 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
471 	  offsetof(struct hclge_fd_rule, tuples.src_port),
472 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
473 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
474 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
475 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
476 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
477 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
478 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
479 };
480 
481 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
482 {
483 #define HCLGE_MAC_CMD_NUM 21
484 
485 	u64 *data = (u64 *)(&hdev->mac_stats);
486 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
487 	__le64 *desc_data;
488 	u32 data_size;
489 	int ret;
490 	u32 i;
491 
492 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
493 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
494 	if (ret) {
495 		dev_err(&hdev->pdev->dev,
496 			"Get MAC pkt stats fail, status = %d.\n", ret);
497 
498 		return ret;
499 	}
500 
501 	/* The first desc has a 64-bit header, so data size need to minus 1 */
502 	data_size = sizeof(desc) / (sizeof(u64)) - 1;
503 
504 	desc_data = (__le64 *)(&desc[0].data[0]);
505 	for (i = 0; i < data_size; i++) {
506 		/* data memory is continuous becase only the first desc has a
507 		 * header in this command
508 		 */
509 		*data += le64_to_cpu(*desc_data);
510 		data++;
511 		desc_data++;
512 	}
513 
514 	return 0;
515 }
516 
517 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
518 {
519 #define HCLGE_REG_NUM_PER_DESC		4
520 
521 	u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
522 	u64 *data = (u64 *)(&hdev->mac_stats);
523 	struct hclge_desc *desc;
524 	__le64 *desc_data;
525 	u32 data_size;
526 	u32 desc_num;
527 	int ret;
528 	u32 i;
529 
530 	/* The first desc has a 64-bit header, so need to consider it */
531 	desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
532 
533 	/* This may be called inside atomic sections,
534 	 * so GFP_ATOMIC is more suitalbe here
535 	 */
536 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
537 	if (!desc)
538 		return -ENOMEM;
539 
540 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
541 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
542 	if (ret) {
543 		kfree(desc);
544 		return ret;
545 	}
546 
547 	data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
548 
549 	desc_data = (__le64 *)(&desc[0].data[0]);
550 	for (i = 0; i < data_size; i++) {
551 		/* data memory is continuous becase only the first desc has a
552 		 * header in this command
553 		 */
554 		*data += le64_to_cpu(*desc_data);
555 		data++;
556 		desc_data++;
557 	}
558 
559 	kfree(desc);
560 
561 	return 0;
562 }
563 
564 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
565 {
566 	struct hclge_desc desc;
567 	int ret;
568 
569 	/* Driver needs total register number of both valid registers and
570 	 * reserved registers, but the old firmware only returns number
571 	 * of valid registers in device V2. To be compatible with these
572 	 * devices, driver uses a fixed value.
573 	 */
574 	if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
575 		*reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
576 		return 0;
577 	}
578 
579 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
580 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
581 	if (ret) {
582 		dev_err(&hdev->pdev->dev,
583 			"failed to query mac statistic reg number, ret = %d\n",
584 			ret);
585 		return ret;
586 	}
587 
588 	*reg_num = le32_to_cpu(desc.data[0]);
589 	if (*reg_num == 0) {
590 		dev_err(&hdev->pdev->dev,
591 			"mac statistic reg number is invalid!\n");
592 		return -ENODATA;
593 	}
594 
595 	return 0;
596 }
597 
598 int hclge_mac_update_stats(struct hclge_dev *hdev)
599 {
600 	/* The firmware supports the new statistics acquisition method */
601 	if (hdev->ae_dev->dev_specs.mac_stats_num)
602 		return hclge_mac_update_stats_complete(hdev);
603 	else
604 		return hclge_mac_update_stats_defective(hdev);
605 }
606 
607 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
608 {
609 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
610 	struct hclge_vport *vport = hclge_get_vport(handle);
611 	struct hclge_dev *hdev = vport->back;
612 	struct hnae3_queue *queue;
613 	struct hclge_desc desc[1];
614 	struct hclge_tqp *tqp;
615 	int ret, i;
616 
617 	for (i = 0; i < kinfo->num_tqps; i++) {
618 		queue = handle->kinfo.tqp[i];
619 		tqp = container_of(queue, struct hclge_tqp, q);
620 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
621 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
622 					   true);
623 
624 		desc[0].data[0] = cpu_to_le32(tqp->index);
625 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
626 		if (ret) {
627 			dev_err(&hdev->pdev->dev,
628 				"Query tqp stat fail, status = %d,queue = %d\n",
629 				ret, i);
630 			return ret;
631 		}
632 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
633 			le32_to_cpu(desc[0].data[1]);
634 	}
635 
636 	for (i = 0; i < kinfo->num_tqps; i++) {
637 		queue = handle->kinfo.tqp[i];
638 		tqp = container_of(queue, struct hclge_tqp, q);
639 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
640 		hclge_cmd_setup_basic_desc(&desc[0],
641 					   HCLGE_OPC_QUERY_TX_STATS,
642 					   true);
643 
644 		desc[0].data[0] = cpu_to_le32(tqp->index);
645 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
646 		if (ret) {
647 			dev_err(&hdev->pdev->dev,
648 				"Query tqp stat fail, status = %d,queue = %d\n",
649 				ret, i);
650 			return ret;
651 		}
652 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
653 			le32_to_cpu(desc[0].data[1]);
654 	}
655 
656 	return 0;
657 }
658 
659 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
660 {
661 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
662 	struct hclge_tqp *tqp;
663 	u64 *buff = data;
664 	int i;
665 
666 	for (i = 0; i < kinfo->num_tqps; i++) {
667 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
668 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
669 	}
670 
671 	for (i = 0; i < kinfo->num_tqps; i++) {
672 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
673 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
674 	}
675 
676 	return buff;
677 }
678 
679 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
680 {
681 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
682 
683 	/* each tqp has TX & RX two queues */
684 	return kinfo->num_tqps * (2);
685 }
686 
687 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
688 {
689 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
690 	u8 *buff = data;
691 	int i;
692 
693 	for (i = 0; i < kinfo->num_tqps; i++) {
694 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
695 			struct hclge_tqp, q);
696 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
697 			 tqp->index);
698 		buff = buff + ETH_GSTRING_LEN;
699 	}
700 
701 	for (i = 0; i < kinfo->num_tqps; i++) {
702 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
703 			struct hclge_tqp, q);
704 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
705 			 tqp->index);
706 		buff = buff + ETH_GSTRING_LEN;
707 	}
708 
709 	return buff;
710 }
711 
712 static int hclge_comm_get_count(struct hclge_dev *hdev,
713 				const struct hclge_comm_stats_str strs[],
714 				u32 size)
715 {
716 	int count = 0;
717 	u32 i;
718 
719 	for (i = 0; i < size; i++)
720 		if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
721 			count++;
722 
723 	return count;
724 }
725 
726 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
727 				 const struct hclge_comm_stats_str strs[],
728 				 int size, u64 *data)
729 {
730 	u64 *buf = data;
731 	u32 i;
732 
733 	for (i = 0; i < size; i++) {
734 		if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
735 			continue;
736 
737 		*buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
738 		buf++;
739 	}
740 
741 	return buf;
742 }
743 
744 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
745 				  const struct hclge_comm_stats_str strs[],
746 				  int size, u8 *data)
747 {
748 	char *buff = (char *)data;
749 	u32 i;
750 
751 	if (stringset != ETH_SS_STATS)
752 		return buff;
753 
754 	for (i = 0; i < size; i++) {
755 		if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
756 			continue;
757 
758 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
759 		buff = buff + ETH_GSTRING_LEN;
760 	}
761 
762 	return (u8 *)buff;
763 }
764 
765 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
766 {
767 	struct hnae3_handle *handle;
768 	int status;
769 
770 	handle = &hdev->vport[0].nic;
771 	if (handle->client) {
772 		status = hclge_tqps_update_stats(handle);
773 		if (status) {
774 			dev_err(&hdev->pdev->dev,
775 				"Update TQPS stats fail, status = %d.\n",
776 				status);
777 		}
778 	}
779 
780 	status = hclge_mac_update_stats(hdev);
781 	if (status)
782 		dev_err(&hdev->pdev->dev,
783 			"Update MAC stats fail, status = %d.\n", status);
784 }
785 
786 static void hclge_update_stats(struct hnae3_handle *handle,
787 			       struct net_device_stats *net_stats)
788 {
789 	struct hclge_vport *vport = hclge_get_vport(handle);
790 	struct hclge_dev *hdev = vport->back;
791 	int status;
792 
793 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
794 		return;
795 
796 	status = hclge_mac_update_stats(hdev);
797 	if (status)
798 		dev_err(&hdev->pdev->dev,
799 			"Update MAC stats fail, status = %d.\n",
800 			status);
801 
802 	status = hclge_tqps_update_stats(handle);
803 	if (status)
804 		dev_err(&hdev->pdev->dev,
805 			"Update TQPS stats fail, status = %d.\n",
806 			status);
807 
808 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
809 }
810 
811 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
812 {
813 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
814 		HNAE3_SUPPORT_PHY_LOOPBACK | \
815 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
816 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
817 
818 	struct hclge_vport *vport = hclge_get_vport(handle);
819 	struct hclge_dev *hdev = vport->back;
820 	int count = 0;
821 
822 	/* Loopback test support rules:
823 	 * mac: only GE mode support
824 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
825 	 * phy: only support when phy device exist on board
826 	 */
827 	if (stringset == ETH_SS_TEST) {
828 		/* clear loopback bit flags at first */
829 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
830 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
831 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
832 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
833 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
834 			count += 1;
835 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
836 		}
837 
838 		count += 2;
839 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
840 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
841 
842 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
843 		     hdev->hw.mac.phydev->drv->set_loopback) ||
844 		    hnae3_dev_phy_imp_supported(hdev)) {
845 			count += 1;
846 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
847 		}
848 	} else if (stringset == ETH_SS_STATS) {
849 		count = hclge_comm_get_count(hdev, g_mac_stats_string,
850 					     ARRAY_SIZE(g_mac_stats_string)) +
851 			hclge_tqps_get_sset_count(handle, stringset);
852 	}
853 
854 	return count;
855 }
856 
857 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
858 			      u8 *data)
859 {
860 	struct hclge_vport *vport = hclge_get_vport(handle);
861 	struct hclge_dev *hdev = vport->back;
862 	u8 *p = (char *)data;
863 	int size;
864 
865 	if (stringset == ETH_SS_STATS) {
866 		size = ARRAY_SIZE(g_mac_stats_string);
867 		p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
868 					   size, p);
869 		p = hclge_tqps_get_strings(handle, p);
870 	} else if (stringset == ETH_SS_TEST) {
871 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
872 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
873 			       ETH_GSTRING_LEN);
874 			p += ETH_GSTRING_LEN;
875 		}
876 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
877 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
878 			       ETH_GSTRING_LEN);
879 			p += ETH_GSTRING_LEN;
880 		}
881 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
882 			memcpy(p,
883 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
884 			       ETH_GSTRING_LEN);
885 			p += ETH_GSTRING_LEN;
886 		}
887 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
888 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
889 			       ETH_GSTRING_LEN);
890 			p += ETH_GSTRING_LEN;
891 		}
892 	}
893 }
894 
895 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
896 {
897 	struct hclge_vport *vport = hclge_get_vport(handle);
898 	struct hclge_dev *hdev = vport->back;
899 	u64 *p;
900 
901 	p = hclge_comm_get_stats(hdev, g_mac_stats_string,
902 				 ARRAY_SIZE(g_mac_stats_string), data);
903 	p = hclge_tqps_get_stats(handle, p);
904 }
905 
906 static void hclge_get_mac_stat(struct hnae3_handle *handle,
907 			       struct hns3_mac_stats *mac_stats)
908 {
909 	struct hclge_vport *vport = hclge_get_vport(handle);
910 	struct hclge_dev *hdev = vport->back;
911 
912 	hclge_update_stats(handle, NULL);
913 
914 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
915 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
916 }
917 
918 static int hclge_parse_func_status(struct hclge_dev *hdev,
919 				   struct hclge_func_status_cmd *status)
920 {
921 #define HCLGE_MAC_ID_MASK	0xF
922 
923 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
924 		return -EINVAL;
925 
926 	/* Set the pf to main pf */
927 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
928 		hdev->flag |= HCLGE_FLAG_MAIN;
929 	else
930 		hdev->flag &= ~HCLGE_FLAG_MAIN;
931 
932 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
933 	return 0;
934 }
935 
936 static int hclge_query_function_status(struct hclge_dev *hdev)
937 {
938 #define HCLGE_QUERY_MAX_CNT	5
939 
940 	struct hclge_func_status_cmd *req;
941 	struct hclge_desc desc;
942 	int timeout = 0;
943 	int ret;
944 
945 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
946 	req = (struct hclge_func_status_cmd *)desc.data;
947 
948 	do {
949 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
950 		if (ret) {
951 			dev_err(&hdev->pdev->dev,
952 				"query function status failed %d.\n", ret);
953 			return ret;
954 		}
955 
956 		/* Check pf reset is done */
957 		if (req->pf_state)
958 			break;
959 		usleep_range(1000, 2000);
960 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
961 
962 	return hclge_parse_func_status(hdev, req);
963 }
964 
965 static int hclge_query_pf_resource(struct hclge_dev *hdev)
966 {
967 	struct hclge_pf_res_cmd *req;
968 	struct hclge_desc desc;
969 	int ret;
970 
971 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
972 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
973 	if (ret) {
974 		dev_err(&hdev->pdev->dev,
975 			"query pf resource failed %d.\n", ret);
976 		return ret;
977 	}
978 
979 	req = (struct hclge_pf_res_cmd *)desc.data;
980 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
981 			 le16_to_cpu(req->ext_tqp_num);
982 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
983 
984 	if (req->tx_buf_size)
985 		hdev->tx_buf_size =
986 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
987 	else
988 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
989 
990 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
991 
992 	if (req->dv_buf_size)
993 		hdev->dv_buf_size =
994 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
995 	else
996 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
997 
998 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
999 
1000 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
1001 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
1002 		dev_err(&hdev->pdev->dev,
1003 			"only %u msi resources available, not enough for pf(min:2).\n",
1004 			hdev->num_nic_msi);
1005 		return -EINVAL;
1006 	}
1007 
1008 	if (hnae3_dev_roce_supported(hdev)) {
1009 		hdev->num_roce_msi =
1010 			le16_to_cpu(req->pf_intr_vector_number_roce);
1011 
1012 		/* PF should have NIC vectors and Roce vectors,
1013 		 * NIC vectors are queued before Roce vectors.
1014 		 */
1015 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
1016 	} else {
1017 		hdev->num_msi = hdev->num_nic_msi;
1018 	}
1019 
1020 	return 0;
1021 }
1022 
1023 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
1024 {
1025 	switch (speed_cmd) {
1026 	case HCLGE_FW_MAC_SPEED_10M:
1027 		*speed = HCLGE_MAC_SPEED_10M;
1028 		break;
1029 	case HCLGE_FW_MAC_SPEED_100M:
1030 		*speed = HCLGE_MAC_SPEED_100M;
1031 		break;
1032 	case HCLGE_FW_MAC_SPEED_1G:
1033 		*speed = HCLGE_MAC_SPEED_1G;
1034 		break;
1035 	case HCLGE_FW_MAC_SPEED_10G:
1036 		*speed = HCLGE_MAC_SPEED_10G;
1037 		break;
1038 	case HCLGE_FW_MAC_SPEED_25G:
1039 		*speed = HCLGE_MAC_SPEED_25G;
1040 		break;
1041 	case HCLGE_FW_MAC_SPEED_40G:
1042 		*speed = HCLGE_MAC_SPEED_40G;
1043 		break;
1044 	case HCLGE_FW_MAC_SPEED_50G:
1045 		*speed = HCLGE_MAC_SPEED_50G;
1046 		break;
1047 	case HCLGE_FW_MAC_SPEED_100G:
1048 		*speed = HCLGE_MAC_SPEED_100G;
1049 		break;
1050 	case HCLGE_FW_MAC_SPEED_200G:
1051 		*speed = HCLGE_MAC_SPEED_200G;
1052 		break;
1053 	default:
1054 		return -EINVAL;
1055 	}
1056 
1057 	return 0;
1058 }
1059 
1060 static const struct hclge_speed_bit_map speed_bit_map[] = {
1061 	{HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
1062 	{HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
1063 	{HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1064 	{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1065 	{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1066 	{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1067 	{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1068 	{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1069 	{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1070 };
1071 
1072 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1073 {
1074 	u16 i;
1075 
1076 	for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1077 		if (speed == speed_bit_map[i].speed) {
1078 			*speed_bit = speed_bit_map[i].speed_bit;
1079 			return 0;
1080 		}
1081 	}
1082 
1083 	return -EINVAL;
1084 }
1085 
1086 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1087 {
1088 	struct hclge_vport *vport = hclge_get_vport(handle);
1089 	struct hclge_dev *hdev = vport->back;
1090 	u32 speed_ability = hdev->hw.mac.speed_ability;
1091 	u32 speed_bit = 0;
1092 	int ret;
1093 
1094 	ret = hclge_get_speed_bit(speed, &speed_bit);
1095 	if (ret)
1096 		return ret;
1097 
1098 	if (speed_bit & speed_ability)
1099 		return 0;
1100 
1101 	return -EINVAL;
1102 }
1103 
1104 static void hclge_convert_setting_sr(u16 speed_ability,
1105 				     unsigned long *link_mode)
1106 {
1107 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1108 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1109 				 link_mode);
1110 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1111 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1112 				 link_mode);
1113 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1114 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1115 				 link_mode);
1116 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1117 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1118 				 link_mode);
1119 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1120 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1121 				 link_mode);
1122 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1123 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1124 				 link_mode);
1125 }
1126 
1127 static void hclge_convert_setting_lr(u16 speed_ability,
1128 				     unsigned long *link_mode)
1129 {
1130 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1131 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1132 				 link_mode);
1133 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1134 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1135 				 link_mode);
1136 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1137 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1138 				 link_mode);
1139 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1141 				 link_mode);
1142 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1143 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1144 				 link_mode);
1145 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1146 		linkmode_set_bit(
1147 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1148 			link_mode);
1149 }
1150 
1151 static void hclge_convert_setting_cr(u16 speed_ability,
1152 				     unsigned long *link_mode)
1153 {
1154 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1156 				 link_mode);
1157 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1158 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1159 				 link_mode);
1160 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1161 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1162 				 link_mode);
1163 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1164 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1165 				 link_mode);
1166 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1167 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1168 				 link_mode);
1169 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1171 				 link_mode);
1172 }
1173 
1174 static void hclge_convert_setting_kr(u16 speed_ability,
1175 				     unsigned long *link_mode)
1176 {
1177 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1178 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1179 				 link_mode);
1180 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1181 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1182 				 link_mode);
1183 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1184 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1185 				 link_mode);
1186 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1187 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1188 				 link_mode);
1189 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1190 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1191 				 link_mode);
1192 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1193 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1194 				 link_mode);
1195 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1197 				 link_mode);
1198 }
1199 
1200 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1201 {
1202 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1203 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1204 
1205 	switch (mac->speed) {
1206 	case HCLGE_MAC_SPEED_10G:
1207 	case HCLGE_MAC_SPEED_40G:
1208 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1209 				 mac->supported);
1210 		mac->fec_ability =
1211 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1212 		break;
1213 	case HCLGE_MAC_SPEED_25G:
1214 	case HCLGE_MAC_SPEED_50G:
1215 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1216 				 mac->supported);
1217 		mac->fec_ability =
1218 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1219 			BIT(HNAE3_FEC_AUTO);
1220 		break;
1221 	case HCLGE_MAC_SPEED_100G:
1222 	case HCLGE_MAC_SPEED_200G:
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1224 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1225 		break;
1226 	default:
1227 		mac->fec_ability = 0;
1228 		break;
1229 	}
1230 }
1231 
1232 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1233 					u16 speed_ability)
1234 {
1235 	struct hclge_mac *mac = &hdev->hw.mac;
1236 
1237 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1238 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1239 				 mac->supported);
1240 
1241 	hclge_convert_setting_sr(speed_ability, mac->supported);
1242 	hclge_convert_setting_lr(speed_ability, mac->supported);
1243 	hclge_convert_setting_cr(speed_ability, mac->supported);
1244 	if (hnae3_dev_fec_supported(hdev))
1245 		hclge_convert_setting_fec(mac);
1246 
1247 	if (hnae3_dev_pause_supported(hdev))
1248 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1249 
1250 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1251 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1252 }
1253 
1254 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1255 					    u16 speed_ability)
1256 {
1257 	struct hclge_mac *mac = &hdev->hw.mac;
1258 
1259 	hclge_convert_setting_kr(speed_ability, mac->supported);
1260 	if (hnae3_dev_fec_supported(hdev))
1261 		hclge_convert_setting_fec(mac);
1262 
1263 	if (hnae3_dev_pause_supported(hdev))
1264 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1265 
1266 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1267 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1268 }
1269 
1270 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1271 					 u16 speed_ability)
1272 {
1273 	unsigned long *supported = hdev->hw.mac.supported;
1274 
1275 	/* default to support all speed for GE port */
1276 	if (!speed_ability)
1277 		speed_ability = HCLGE_SUPPORT_GE;
1278 
1279 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1280 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1281 				 supported);
1282 
1283 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1284 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1285 				 supported);
1286 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1287 				 supported);
1288 	}
1289 
1290 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1291 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1292 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1293 	}
1294 
1295 	if (hnae3_dev_pause_supported(hdev)) {
1296 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1297 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1298 	}
1299 
1300 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1301 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1302 }
1303 
1304 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1305 {
1306 	u8 media_type = hdev->hw.mac.media_type;
1307 
1308 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1309 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1310 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1311 		hclge_parse_copper_link_mode(hdev, speed_ability);
1312 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1313 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1314 }
1315 
1316 static u32 hclge_get_max_speed(u16 speed_ability)
1317 {
1318 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1319 		return HCLGE_MAC_SPEED_200G;
1320 
1321 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1322 		return HCLGE_MAC_SPEED_100G;
1323 
1324 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1325 		return HCLGE_MAC_SPEED_50G;
1326 
1327 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1328 		return HCLGE_MAC_SPEED_40G;
1329 
1330 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1331 		return HCLGE_MAC_SPEED_25G;
1332 
1333 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1334 		return HCLGE_MAC_SPEED_10G;
1335 
1336 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1337 		return HCLGE_MAC_SPEED_1G;
1338 
1339 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1340 		return HCLGE_MAC_SPEED_100M;
1341 
1342 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1343 		return HCLGE_MAC_SPEED_10M;
1344 
1345 	return HCLGE_MAC_SPEED_1G;
1346 }
1347 
1348 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1349 {
1350 #define HCLGE_TX_SPARE_SIZE_UNIT		4096
1351 #define SPEED_ABILITY_EXT_SHIFT			8
1352 
1353 	struct hclge_cfg_param_cmd *req;
1354 	u64 mac_addr_tmp_high;
1355 	u16 speed_ability_ext;
1356 	u64 mac_addr_tmp;
1357 	unsigned int i;
1358 
1359 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1360 
1361 	/* get the configuration */
1362 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1363 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1364 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1365 					    HCLGE_CFG_TQP_DESC_N_M,
1366 					    HCLGE_CFG_TQP_DESC_N_S);
1367 
1368 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1369 					HCLGE_CFG_PHY_ADDR_M,
1370 					HCLGE_CFG_PHY_ADDR_S);
1371 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1372 					  HCLGE_CFG_MEDIA_TP_M,
1373 					  HCLGE_CFG_MEDIA_TP_S);
1374 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1375 					  HCLGE_CFG_RX_BUF_LEN_M,
1376 					  HCLGE_CFG_RX_BUF_LEN_S);
1377 	/* get mac_address */
1378 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1379 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1380 					    HCLGE_CFG_MAC_ADDR_H_M,
1381 					    HCLGE_CFG_MAC_ADDR_H_S);
1382 
1383 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1384 
1385 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1386 					     HCLGE_CFG_DEFAULT_SPEED_M,
1387 					     HCLGE_CFG_DEFAULT_SPEED_S);
1388 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1389 					       HCLGE_CFG_RSS_SIZE_M,
1390 					       HCLGE_CFG_RSS_SIZE_S);
1391 
1392 	for (i = 0; i < ETH_ALEN; i++)
1393 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1394 
1395 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1396 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1397 
1398 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1399 					     HCLGE_CFG_SPEED_ABILITY_M,
1400 					     HCLGE_CFG_SPEED_ABILITY_S);
1401 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1402 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1403 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1404 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1405 
1406 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1407 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1408 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1409 
1410 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1411 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1412 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1413 
1414 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1415 					       HCLGE_CFG_PF_RSS_SIZE_M,
1416 					       HCLGE_CFG_PF_RSS_SIZE_S);
1417 
1418 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1419 	 * power of 2, instead of reading out directly. This would
1420 	 * be more flexible for future changes and expansions.
1421 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1422 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1423 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1424 	 */
1425 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1426 			       1U << cfg->pf_rss_size_max :
1427 			       cfg->vf_rss_size_max;
1428 
1429 	/* The unit of the tx spare buffer size queried from configuration
1430 	 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1431 	 * needed here.
1432 	 */
1433 	cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1434 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1435 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1436 	cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1437 }
1438 
1439 /* hclge_get_cfg: query the static parameter from flash
1440  * @hdev: pointer to struct hclge_dev
1441  * @hcfg: the config structure to be getted
1442  */
1443 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1444 {
1445 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1446 	struct hclge_cfg_param_cmd *req;
1447 	unsigned int i;
1448 	int ret;
1449 
1450 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1451 		u32 offset = 0;
1452 
1453 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1454 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1455 					   true);
1456 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1457 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1458 		/* Len should be united by 4 bytes when send to hardware */
1459 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1460 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1461 		req->offset = cpu_to_le32(offset);
1462 	}
1463 
1464 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1465 	if (ret) {
1466 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1467 		return ret;
1468 	}
1469 
1470 	hclge_parse_cfg(hcfg, desc);
1471 
1472 	return 0;
1473 }
1474 
1475 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1476 {
1477 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1478 
1479 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1480 
1481 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1482 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1483 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1484 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1485 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1486 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1487 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1488 	ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1489 }
1490 
1491 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1492 				  struct hclge_desc *desc)
1493 {
1494 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1495 	struct hclge_dev_specs_0_cmd *req0;
1496 	struct hclge_dev_specs_1_cmd *req1;
1497 
1498 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1499 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1500 
1501 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1502 	ae_dev->dev_specs.rss_ind_tbl_size =
1503 		le16_to_cpu(req0->rss_ind_tbl_size);
1504 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1505 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1506 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1507 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1508 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1509 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1510 	ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1511 	ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1512 }
1513 
1514 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1515 {
1516 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1517 
1518 	if (!dev_specs->max_non_tso_bd_num)
1519 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1520 	if (!dev_specs->rss_ind_tbl_size)
1521 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1522 	if (!dev_specs->rss_key_size)
1523 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1524 	if (!dev_specs->max_tm_rate)
1525 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1526 	if (!dev_specs->max_qset_num)
1527 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1528 	if (!dev_specs->max_int_gl)
1529 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1530 	if (!dev_specs->max_frm_size)
1531 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1532 	if (!dev_specs->umv_size)
1533 		dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1534 }
1535 
1536 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1537 {
1538 	u32 reg_num = 0;
1539 	int ret;
1540 
1541 	ret = hclge_mac_query_reg_num(hdev, &reg_num);
1542 	if (ret && ret != -EOPNOTSUPP)
1543 		return ret;
1544 
1545 	hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1546 	return 0;
1547 }
1548 
1549 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1550 {
1551 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1552 	int ret;
1553 	int i;
1554 
1555 	ret = hclge_query_mac_stats_num(hdev);
1556 	if (ret)
1557 		return ret;
1558 
1559 	/* set default specifications as devices lower than version V3 do not
1560 	 * support querying specifications from firmware.
1561 	 */
1562 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1563 		hclge_set_default_dev_specs(hdev);
1564 		return 0;
1565 	}
1566 
1567 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1568 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1569 					   true);
1570 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1571 	}
1572 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1573 
1574 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1575 	if (ret)
1576 		return ret;
1577 
1578 	hclge_parse_dev_specs(hdev, desc);
1579 	hclge_check_dev_specs(hdev);
1580 
1581 	return 0;
1582 }
1583 
1584 static int hclge_get_cap(struct hclge_dev *hdev)
1585 {
1586 	int ret;
1587 
1588 	ret = hclge_query_function_status(hdev);
1589 	if (ret) {
1590 		dev_err(&hdev->pdev->dev,
1591 			"query function status error %d.\n", ret);
1592 		return ret;
1593 	}
1594 
1595 	/* get pf resource */
1596 	return hclge_query_pf_resource(hdev);
1597 }
1598 
1599 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1600 {
1601 #define HCLGE_MIN_TX_DESC	64
1602 #define HCLGE_MIN_RX_DESC	64
1603 
1604 	if (!is_kdump_kernel())
1605 		return;
1606 
1607 	dev_info(&hdev->pdev->dev,
1608 		 "Running kdump kernel. Using minimal resources\n");
1609 
1610 	/* minimal queue pairs equals to the number of vports */
1611 	hdev->num_tqps = hdev->num_req_vfs + 1;
1612 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1613 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1614 }
1615 
1616 static int hclge_configure(struct hclge_dev *hdev)
1617 {
1618 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1619 	const struct cpumask *cpumask = cpu_online_mask;
1620 	struct hclge_cfg cfg;
1621 	unsigned int i;
1622 	int node, ret;
1623 
1624 	ret = hclge_get_cfg(hdev, &cfg);
1625 	if (ret)
1626 		return ret;
1627 
1628 	hdev->base_tqp_pid = 0;
1629 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1630 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1631 	hdev->rx_buf_len = cfg.rx_buf_len;
1632 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1633 	hdev->hw.mac.media_type = cfg.media_type;
1634 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1635 	hdev->num_tx_desc = cfg.tqp_desc_num;
1636 	hdev->num_rx_desc = cfg.tqp_desc_num;
1637 	hdev->tm_info.num_pg = 1;
1638 	hdev->tc_max = cfg.tc_num;
1639 	hdev->tm_info.hw_pfc_map = 0;
1640 	if (cfg.umv_space)
1641 		hdev->wanted_umv_size = cfg.umv_space;
1642 	else
1643 		hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1644 	hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1645 	hdev->gro_en = true;
1646 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1647 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1648 
1649 	if (hnae3_dev_fd_supported(hdev)) {
1650 		hdev->fd_en = true;
1651 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1652 	}
1653 
1654 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1655 	if (ret) {
1656 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1657 			cfg.default_speed, ret);
1658 		return ret;
1659 	}
1660 
1661 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1662 
1663 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1664 
1665 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1666 	    (hdev->tc_max < 1)) {
1667 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1668 			 hdev->tc_max);
1669 		hdev->tc_max = 1;
1670 	}
1671 
1672 	/* Dev does not support DCB */
1673 	if (!hnae3_dev_dcb_supported(hdev)) {
1674 		hdev->tc_max = 1;
1675 		hdev->pfc_max = 0;
1676 	} else {
1677 		hdev->pfc_max = hdev->tc_max;
1678 	}
1679 
1680 	hdev->tm_info.num_tc = 1;
1681 
1682 	/* Currently not support uncontiuous tc */
1683 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1684 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1685 
1686 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1687 
1688 	hclge_init_kdump_kernel_config(hdev);
1689 
1690 	/* Set the affinity based on numa node */
1691 	node = dev_to_node(&hdev->pdev->dev);
1692 	if (node != NUMA_NO_NODE)
1693 		cpumask = cpumask_of_node(node);
1694 
1695 	cpumask_copy(&hdev->affinity_mask, cpumask);
1696 
1697 	return ret;
1698 }
1699 
1700 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1701 			    u16 tso_mss_max)
1702 {
1703 	struct hclge_cfg_tso_status_cmd *req;
1704 	struct hclge_desc desc;
1705 
1706 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1707 
1708 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1709 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1710 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1711 
1712 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1713 }
1714 
1715 static int hclge_config_gro(struct hclge_dev *hdev)
1716 {
1717 	struct hclge_cfg_gro_status_cmd *req;
1718 	struct hclge_desc desc;
1719 	int ret;
1720 
1721 	if (!hnae3_dev_gro_supported(hdev))
1722 		return 0;
1723 
1724 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1725 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1726 
1727 	req->gro_en = hdev->gro_en ? 1 : 0;
1728 
1729 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1730 	if (ret)
1731 		dev_err(&hdev->pdev->dev,
1732 			"GRO hardware config cmd failed, ret = %d\n", ret);
1733 
1734 	return ret;
1735 }
1736 
1737 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1738 {
1739 	struct hclge_tqp *tqp;
1740 	int i;
1741 
1742 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1743 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1744 	if (!hdev->htqp)
1745 		return -ENOMEM;
1746 
1747 	tqp = hdev->htqp;
1748 
1749 	for (i = 0; i < hdev->num_tqps; i++) {
1750 		tqp->dev = &hdev->pdev->dev;
1751 		tqp->index = i;
1752 
1753 		tqp->q.ae_algo = &ae_algo;
1754 		tqp->q.buf_size = hdev->rx_buf_len;
1755 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1756 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1757 
1758 		/* need an extended offset to configure queues >=
1759 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1760 		 */
1761 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1762 			tqp->q.io_base = hdev->hw.io_base +
1763 					 HCLGE_TQP_REG_OFFSET +
1764 					 i * HCLGE_TQP_REG_SIZE;
1765 		else
1766 			tqp->q.io_base = hdev->hw.io_base +
1767 					 HCLGE_TQP_REG_OFFSET +
1768 					 HCLGE_TQP_EXT_REG_OFFSET +
1769 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1770 					 HCLGE_TQP_REG_SIZE;
1771 
1772 		tqp++;
1773 	}
1774 
1775 	return 0;
1776 }
1777 
1778 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1779 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1780 {
1781 	struct hclge_tqp_map_cmd *req;
1782 	struct hclge_desc desc;
1783 	int ret;
1784 
1785 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1786 
1787 	req = (struct hclge_tqp_map_cmd *)desc.data;
1788 	req->tqp_id = cpu_to_le16(tqp_pid);
1789 	req->tqp_vf = func_id;
1790 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1791 	if (!is_pf)
1792 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1793 	req->tqp_vid = cpu_to_le16(tqp_vid);
1794 
1795 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1796 	if (ret)
1797 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1798 
1799 	return ret;
1800 }
1801 
1802 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1803 {
1804 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1805 	struct hclge_dev *hdev = vport->back;
1806 	int i, alloced;
1807 
1808 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1809 	     alloced < num_tqps; i++) {
1810 		if (!hdev->htqp[i].alloced) {
1811 			hdev->htqp[i].q.handle = &vport->nic;
1812 			hdev->htqp[i].q.tqp_index = alloced;
1813 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1814 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1815 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1816 			hdev->htqp[i].alloced = true;
1817 			alloced++;
1818 		}
1819 	}
1820 	vport->alloc_tqps = alloced;
1821 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1822 				vport->alloc_tqps / hdev->tm_info.num_tc);
1823 
1824 	/* ensure one to one mapping between irq and queue at default */
1825 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1826 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1827 
1828 	return 0;
1829 }
1830 
1831 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1832 			    u16 num_tx_desc, u16 num_rx_desc)
1833 
1834 {
1835 	struct hnae3_handle *nic = &vport->nic;
1836 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1837 	struct hclge_dev *hdev = vport->back;
1838 	int ret;
1839 
1840 	kinfo->num_tx_desc = num_tx_desc;
1841 	kinfo->num_rx_desc = num_rx_desc;
1842 
1843 	kinfo->rx_buf_len = hdev->rx_buf_len;
1844 	kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1845 
1846 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1847 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1848 	if (!kinfo->tqp)
1849 		return -ENOMEM;
1850 
1851 	ret = hclge_assign_tqp(vport, num_tqps);
1852 	if (ret)
1853 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1854 
1855 	return ret;
1856 }
1857 
1858 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1859 				  struct hclge_vport *vport)
1860 {
1861 	struct hnae3_handle *nic = &vport->nic;
1862 	struct hnae3_knic_private_info *kinfo;
1863 	u16 i;
1864 
1865 	kinfo = &nic->kinfo;
1866 	for (i = 0; i < vport->alloc_tqps; i++) {
1867 		struct hclge_tqp *q =
1868 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1869 		bool is_pf;
1870 		int ret;
1871 
1872 		is_pf = !(vport->vport_id);
1873 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1874 					     i, is_pf);
1875 		if (ret)
1876 			return ret;
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 static int hclge_map_tqp(struct hclge_dev *hdev)
1883 {
1884 	struct hclge_vport *vport = hdev->vport;
1885 	u16 i, num_vport;
1886 
1887 	num_vport = hdev->num_req_vfs + 1;
1888 	for (i = 0; i < num_vport; i++)	{
1889 		int ret;
1890 
1891 		ret = hclge_map_tqp_to_vport(hdev, vport);
1892 		if (ret)
1893 			return ret;
1894 
1895 		vport++;
1896 	}
1897 
1898 	return 0;
1899 }
1900 
1901 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1902 {
1903 	struct hnae3_handle *nic = &vport->nic;
1904 	struct hclge_dev *hdev = vport->back;
1905 	int ret;
1906 
1907 	nic->pdev = hdev->pdev;
1908 	nic->ae_algo = &ae_algo;
1909 	nic->numa_node_mask = hdev->numa_node_mask;
1910 	nic->kinfo.io_base = hdev->hw.io_base;
1911 
1912 	ret = hclge_knic_setup(vport, num_tqps,
1913 			       hdev->num_tx_desc, hdev->num_rx_desc);
1914 	if (ret)
1915 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1916 
1917 	return ret;
1918 }
1919 
1920 static int hclge_alloc_vport(struct hclge_dev *hdev)
1921 {
1922 	struct pci_dev *pdev = hdev->pdev;
1923 	struct hclge_vport *vport;
1924 	u32 tqp_main_vport;
1925 	u32 tqp_per_vport;
1926 	int num_vport, i;
1927 	int ret;
1928 
1929 	/* We need to alloc a vport for main NIC of PF */
1930 	num_vport = hdev->num_req_vfs + 1;
1931 
1932 	if (hdev->num_tqps < num_vport) {
1933 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1934 			hdev->num_tqps, num_vport);
1935 		return -EINVAL;
1936 	}
1937 
1938 	/* Alloc the same number of TQPs for every vport */
1939 	tqp_per_vport = hdev->num_tqps / num_vport;
1940 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1941 
1942 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1943 			     GFP_KERNEL);
1944 	if (!vport)
1945 		return -ENOMEM;
1946 
1947 	hdev->vport = vport;
1948 	hdev->num_alloc_vport = num_vport;
1949 
1950 	if (IS_ENABLED(CONFIG_PCI_IOV))
1951 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1952 
1953 	for (i = 0; i < num_vport; i++) {
1954 		vport->back = hdev;
1955 		vport->vport_id = i;
1956 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1957 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1958 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1959 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1960 		vport->req_vlan_fltr_en = true;
1961 		INIT_LIST_HEAD(&vport->vlan_list);
1962 		INIT_LIST_HEAD(&vport->uc_mac_list);
1963 		INIT_LIST_HEAD(&vport->mc_mac_list);
1964 		spin_lock_init(&vport->mac_list_lock);
1965 
1966 		if (i == 0)
1967 			ret = hclge_vport_setup(vport, tqp_main_vport);
1968 		else
1969 			ret = hclge_vport_setup(vport, tqp_per_vport);
1970 		if (ret) {
1971 			dev_err(&pdev->dev,
1972 				"vport setup failed for vport %d, %d\n",
1973 				i, ret);
1974 			return ret;
1975 		}
1976 
1977 		vport++;
1978 	}
1979 
1980 	return 0;
1981 }
1982 
1983 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1984 				    struct hclge_pkt_buf_alloc *buf_alloc)
1985 {
1986 /* TX buffer size is unit by 128 byte */
1987 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1988 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1989 	struct hclge_tx_buff_alloc_cmd *req;
1990 	struct hclge_desc desc;
1991 	int ret;
1992 	u8 i;
1993 
1994 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1995 
1996 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1997 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1998 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1999 
2000 		req->tx_pkt_buff[i] =
2001 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
2002 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
2003 	}
2004 
2005 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2006 	if (ret)
2007 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
2008 			ret);
2009 
2010 	return ret;
2011 }
2012 
2013 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
2014 				 struct hclge_pkt_buf_alloc *buf_alloc)
2015 {
2016 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
2017 
2018 	if (ret)
2019 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
2020 
2021 	return ret;
2022 }
2023 
2024 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
2025 {
2026 	unsigned int i;
2027 	u32 cnt = 0;
2028 
2029 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2030 		if (hdev->hw_tc_map & BIT(i))
2031 			cnt++;
2032 	return cnt;
2033 }
2034 
2035 /* Get the number of pfc enabled TCs, which have private buffer */
2036 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
2037 				  struct hclge_pkt_buf_alloc *buf_alloc)
2038 {
2039 	struct hclge_priv_buf *priv;
2040 	unsigned int i;
2041 	int cnt = 0;
2042 
2043 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2044 		priv = &buf_alloc->priv_buf[i];
2045 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
2046 		    priv->enable)
2047 			cnt++;
2048 	}
2049 
2050 	return cnt;
2051 }
2052 
2053 /* Get the number of pfc disabled TCs, which have private buffer */
2054 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
2055 				     struct hclge_pkt_buf_alloc *buf_alloc)
2056 {
2057 	struct hclge_priv_buf *priv;
2058 	unsigned int i;
2059 	int cnt = 0;
2060 
2061 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2062 		priv = &buf_alloc->priv_buf[i];
2063 		if (hdev->hw_tc_map & BIT(i) &&
2064 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
2065 		    priv->enable)
2066 			cnt++;
2067 	}
2068 
2069 	return cnt;
2070 }
2071 
2072 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2073 {
2074 	struct hclge_priv_buf *priv;
2075 	u32 rx_priv = 0;
2076 	int i;
2077 
2078 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2079 		priv = &buf_alloc->priv_buf[i];
2080 		if (priv->enable)
2081 			rx_priv += priv->buf_size;
2082 	}
2083 	return rx_priv;
2084 }
2085 
2086 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2087 {
2088 	u32 i, total_tx_size = 0;
2089 
2090 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2091 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2092 
2093 	return total_tx_size;
2094 }
2095 
2096 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2097 				struct hclge_pkt_buf_alloc *buf_alloc,
2098 				u32 rx_all)
2099 {
2100 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2101 	u32 tc_num = hclge_get_tc_num(hdev);
2102 	u32 shared_buf, aligned_mps;
2103 	u32 rx_priv;
2104 	int i;
2105 
2106 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2107 
2108 	if (hnae3_dev_dcb_supported(hdev))
2109 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2110 					hdev->dv_buf_size;
2111 	else
2112 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2113 					+ hdev->dv_buf_size;
2114 
2115 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2116 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2117 			     HCLGE_BUF_SIZE_UNIT);
2118 
2119 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2120 	if (rx_all < rx_priv + shared_std)
2121 		return false;
2122 
2123 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2124 	buf_alloc->s_buf.buf_size = shared_buf;
2125 	if (hnae3_dev_dcb_supported(hdev)) {
2126 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2127 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2128 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2129 				  HCLGE_BUF_SIZE_UNIT);
2130 	} else {
2131 		buf_alloc->s_buf.self.high = aligned_mps +
2132 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2133 		buf_alloc->s_buf.self.low = aligned_mps;
2134 	}
2135 
2136 	if (hnae3_dev_dcb_supported(hdev)) {
2137 		hi_thrd = shared_buf - hdev->dv_buf_size;
2138 
2139 		if (tc_num <= NEED_RESERVE_TC_NUM)
2140 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2141 					/ BUF_MAX_PERCENT;
2142 
2143 		if (tc_num)
2144 			hi_thrd = hi_thrd / tc_num;
2145 
2146 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2147 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2148 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2149 	} else {
2150 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2151 		lo_thrd = aligned_mps;
2152 	}
2153 
2154 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2155 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2156 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2157 	}
2158 
2159 	return true;
2160 }
2161 
2162 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2163 				struct hclge_pkt_buf_alloc *buf_alloc)
2164 {
2165 	u32 i, total_size;
2166 
2167 	total_size = hdev->pkt_buf_size;
2168 
2169 	/* alloc tx buffer for all enabled tc */
2170 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2171 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2172 
2173 		if (hdev->hw_tc_map & BIT(i)) {
2174 			if (total_size < hdev->tx_buf_size)
2175 				return -ENOMEM;
2176 
2177 			priv->tx_buf_size = hdev->tx_buf_size;
2178 		} else {
2179 			priv->tx_buf_size = 0;
2180 		}
2181 
2182 		total_size -= priv->tx_buf_size;
2183 	}
2184 
2185 	return 0;
2186 }
2187 
2188 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2189 				  struct hclge_pkt_buf_alloc *buf_alloc)
2190 {
2191 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2192 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2193 	unsigned int i;
2194 
2195 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2196 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2197 
2198 		priv->enable = 0;
2199 		priv->wl.low = 0;
2200 		priv->wl.high = 0;
2201 		priv->buf_size = 0;
2202 
2203 		if (!(hdev->hw_tc_map & BIT(i)))
2204 			continue;
2205 
2206 		priv->enable = 1;
2207 
2208 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2209 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2210 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2211 						HCLGE_BUF_SIZE_UNIT);
2212 		} else {
2213 			priv->wl.low = 0;
2214 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2215 					aligned_mps;
2216 		}
2217 
2218 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2219 	}
2220 
2221 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2222 }
2223 
2224 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2225 					  struct hclge_pkt_buf_alloc *buf_alloc)
2226 {
2227 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2228 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2229 	int i;
2230 
2231 	/* let the last to be cleared first */
2232 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2233 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2234 		unsigned int mask = BIT((unsigned int)i);
2235 
2236 		if (hdev->hw_tc_map & mask &&
2237 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2238 			/* Clear the no pfc TC private buffer */
2239 			priv->wl.low = 0;
2240 			priv->wl.high = 0;
2241 			priv->buf_size = 0;
2242 			priv->enable = 0;
2243 			no_pfc_priv_num--;
2244 		}
2245 
2246 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2247 		    no_pfc_priv_num == 0)
2248 			break;
2249 	}
2250 
2251 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2252 }
2253 
2254 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2255 					struct hclge_pkt_buf_alloc *buf_alloc)
2256 {
2257 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2258 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2259 	int i;
2260 
2261 	/* let the last to be cleared first */
2262 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2263 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2264 		unsigned int mask = BIT((unsigned int)i);
2265 
2266 		if (hdev->hw_tc_map & mask &&
2267 		    hdev->tm_info.hw_pfc_map & mask) {
2268 			/* Reduce the number of pfc TC with private buffer */
2269 			priv->wl.low = 0;
2270 			priv->enable = 0;
2271 			priv->wl.high = 0;
2272 			priv->buf_size = 0;
2273 			pfc_priv_num--;
2274 		}
2275 
2276 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2277 		    pfc_priv_num == 0)
2278 			break;
2279 	}
2280 
2281 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2282 }
2283 
2284 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2285 				      struct hclge_pkt_buf_alloc *buf_alloc)
2286 {
2287 #define COMPENSATE_BUFFER	0x3C00
2288 #define COMPENSATE_HALF_MPS_NUM	5
2289 #define PRIV_WL_GAP		0x1800
2290 
2291 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2292 	u32 tc_num = hclge_get_tc_num(hdev);
2293 	u32 half_mps = hdev->mps >> 1;
2294 	u32 min_rx_priv;
2295 	unsigned int i;
2296 
2297 	if (tc_num)
2298 		rx_priv = rx_priv / tc_num;
2299 
2300 	if (tc_num <= NEED_RESERVE_TC_NUM)
2301 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2302 
2303 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2304 			COMPENSATE_HALF_MPS_NUM * half_mps;
2305 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2306 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2307 	if (rx_priv < min_rx_priv)
2308 		return false;
2309 
2310 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2311 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2312 
2313 		priv->enable = 0;
2314 		priv->wl.low = 0;
2315 		priv->wl.high = 0;
2316 		priv->buf_size = 0;
2317 
2318 		if (!(hdev->hw_tc_map & BIT(i)))
2319 			continue;
2320 
2321 		priv->enable = 1;
2322 		priv->buf_size = rx_priv;
2323 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2324 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2325 	}
2326 
2327 	buf_alloc->s_buf.buf_size = 0;
2328 
2329 	return true;
2330 }
2331 
2332 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2333  * @hdev: pointer to struct hclge_dev
2334  * @buf_alloc: pointer to buffer calculation data
2335  * @return: 0: calculate successful, negative: fail
2336  */
2337 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2338 				struct hclge_pkt_buf_alloc *buf_alloc)
2339 {
2340 	/* When DCB is not supported, rx private buffer is not allocated. */
2341 	if (!hnae3_dev_dcb_supported(hdev)) {
2342 		u32 rx_all = hdev->pkt_buf_size;
2343 
2344 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2345 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2346 			return -ENOMEM;
2347 
2348 		return 0;
2349 	}
2350 
2351 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2352 		return 0;
2353 
2354 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2355 		return 0;
2356 
2357 	/* try to decrease the buffer size */
2358 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2359 		return 0;
2360 
2361 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2362 		return 0;
2363 
2364 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2365 		return 0;
2366 
2367 	return -ENOMEM;
2368 }
2369 
2370 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2371 				   struct hclge_pkt_buf_alloc *buf_alloc)
2372 {
2373 	struct hclge_rx_priv_buff_cmd *req;
2374 	struct hclge_desc desc;
2375 	int ret;
2376 	int i;
2377 
2378 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2379 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2380 
2381 	/* Alloc private buffer TCs */
2382 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2383 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2384 
2385 		req->buf_num[i] =
2386 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2387 		req->buf_num[i] |=
2388 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2389 	}
2390 
2391 	req->shared_buf =
2392 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2393 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2394 
2395 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2396 	if (ret)
2397 		dev_err(&hdev->pdev->dev,
2398 			"rx private buffer alloc cmd failed %d\n", ret);
2399 
2400 	return ret;
2401 }
2402 
2403 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2404 				   struct hclge_pkt_buf_alloc *buf_alloc)
2405 {
2406 	struct hclge_rx_priv_wl_buf *req;
2407 	struct hclge_priv_buf *priv;
2408 	struct hclge_desc desc[2];
2409 	int i, j;
2410 	int ret;
2411 
2412 	for (i = 0; i < 2; i++) {
2413 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2414 					   false);
2415 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2416 
2417 		/* The first descriptor set the NEXT bit to 1 */
2418 		if (i == 0)
2419 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2420 		else
2421 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2422 
2423 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2424 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2425 
2426 			priv = &buf_alloc->priv_buf[idx];
2427 			req->tc_wl[j].high =
2428 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2429 			req->tc_wl[j].high |=
2430 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2431 			req->tc_wl[j].low =
2432 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2433 			req->tc_wl[j].low |=
2434 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2435 		}
2436 	}
2437 
2438 	/* Send 2 descriptor at one time */
2439 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2440 	if (ret)
2441 		dev_err(&hdev->pdev->dev,
2442 			"rx private waterline config cmd failed %d\n",
2443 			ret);
2444 	return ret;
2445 }
2446 
2447 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2448 				    struct hclge_pkt_buf_alloc *buf_alloc)
2449 {
2450 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2451 	struct hclge_rx_com_thrd *req;
2452 	struct hclge_desc desc[2];
2453 	struct hclge_tc_thrd *tc;
2454 	int i, j;
2455 	int ret;
2456 
2457 	for (i = 0; i < 2; i++) {
2458 		hclge_cmd_setup_basic_desc(&desc[i],
2459 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2460 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2461 
2462 		/* The first descriptor set the NEXT bit to 1 */
2463 		if (i == 0)
2464 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2465 		else
2466 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2467 
2468 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2469 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2470 
2471 			req->com_thrd[j].high =
2472 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2473 			req->com_thrd[j].high |=
2474 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2475 			req->com_thrd[j].low =
2476 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2477 			req->com_thrd[j].low |=
2478 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2479 		}
2480 	}
2481 
2482 	/* Send 2 descriptors at one time */
2483 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2484 	if (ret)
2485 		dev_err(&hdev->pdev->dev,
2486 			"common threshold config cmd failed %d\n", ret);
2487 	return ret;
2488 }
2489 
2490 static int hclge_common_wl_config(struct hclge_dev *hdev,
2491 				  struct hclge_pkt_buf_alloc *buf_alloc)
2492 {
2493 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2494 	struct hclge_rx_com_wl *req;
2495 	struct hclge_desc desc;
2496 	int ret;
2497 
2498 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2499 
2500 	req = (struct hclge_rx_com_wl *)desc.data;
2501 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2502 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2503 
2504 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2505 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2506 
2507 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2508 	if (ret)
2509 		dev_err(&hdev->pdev->dev,
2510 			"common waterline config cmd failed %d\n", ret);
2511 
2512 	return ret;
2513 }
2514 
2515 int hclge_buffer_alloc(struct hclge_dev *hdev)
2516 {
2517 	struct hclge_pkt_buf_alloc *pkt_buf;
2518 	int ret;
2519 
2520 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2521 	if (!pkt_buf)
2522 		return -ENOMEM;
2523 
2524 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2525 	if (ret) {
2526 		dev_err(&hdev->pdev->dev,
2527 			"could not calc tx buffer size for all TCs %d\n", ret);
2528 		goto out;
2529 	}
2530 
2531 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2532 	if (ret) {
2533 		dev_err(&hdev->pdev->dev,
2534 			"could not alloc tx buffers %d\n", ret);
2535 		goto out;
2536 	}
2537 
2538 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2539 	if (ret) {
2540 		dev_err(&hdev->pdev->dev,
2541 			"could not calc rx priv buffer size for all TCs %d\n",
2542 			ret);
2543 		goto out;
2544 	}
2545 
2546 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2547 	if (ret) {
2548 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2549 			ret);
2550 		goto out;
2551 	}
2552 
2553 	if (hnae3_dev_dcb_supported(hdev)) {
2554 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2555 		if (ret) {
2556 			dev_err(&hdev->pdev->dev,
2557 				"could not configure rx private waterline %d\n",
2558 				ret);
2559 			goto out;
2560 		}
2561 
2562 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2563 		if (ret) {
2564 			dev_err(&hdev->pdev->dev,
2565 				"could not configure common threshold %d\n",
2566 				ret);
2567 			goto out;
2568 		}
2569 	}
2570 
2571 	ret = hclge_common_wl_config(hdev, pkt_buf);
2572 	if (ret)
2573 		dev_err(&hdev->pdev->dev,
2574 			"could not configure common waterline %d\n", ret);
2575 
2576 out:
2577 	kfree(pkt_buf);
2578 	return ret;
2579 }
2580 
2581 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2582 {
2583 	struct hnae3_handle *roce = &vport->roce;
2584 	struct hnae3_handle *nic = &vport->nic;
2585 	struct hclge_dev *hdev = vport->back;
2586 
2587 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2588 
2589 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2590 		return -EINVAL;
2591 
2592 	roce->rinfo.base_vector = hdev->num_nic_msi;
2593 
2594 	roce->rinfo.netdev = nic->kinfo.netdev;
2595 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2596 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2597 
2598 	roce->pdev = nic->pdev;
2599 	roce->ae_algo = nic->ae_algo;
2600 	roce->numa_node_mask = nic->numa_node_mask;
2601 
2602 	return 0;
2603 }
2604 
2605 static int hclge_init_msi(struct hclge_dev *hdev)
2606 {
2607 	struct pci_dev *pdev = hdev->pdev;
2608 	int vectors;
2609 	int i;
2610 
2611 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2612 					hdev->num_msi,
2613 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2614 	if (vectors < 0) {
2615 		dev_err(&pdev->dev,
2616 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2617 			vectors);
2618 		return vectors;
2619 	}
2620 	if (vectors < hdev->num_msi)
2621 		dev_warn(&hdev->pdev->dev,
2622 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2623 			 hdev->num_msi, vectors);
2624 
2625 	hdev->num_msi = vectors;
2626 	hdev->num_msi_left = vectors;
2627 
2628 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2629 					   sizeof(u16), GFP_KERNEL);
2630 	if (!hdev->vector_status) {
2631 		pci_free_irq_vectors(pdev);
2632 		return -ENOMEM;
2633 	}
2634 
2635 	for (i = 0; i < hdev->num_msi; i++)
2636 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2637 
2638 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2639 					sizeof(int), GFP_KERNEL);
2640 	if (!hdev->vector_irq) {
2641 		pci_free_irq_vectors(pdev);
2642 		return -ENOMEM;
2643 	}
2644 
2645 	return 0;
2646 }
2647 
2648 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2649 {
2650 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2651 		duplex = HCLGE_MAC_FULL;
2652 
2653 	return duplex;
2654 }
2655 
2656 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2657 				      u8 duplex)
2658 {
2659 	struct hclge_config_mac_speed_dup_cmd *req;
2660 	struct hclge_desc desc;
2661 	int ret;
2662 
2663 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2664 
2665 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2666 
2667 	if (duplex)
2668 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2669 
2670 	switch (speed) {
2671 	case HCLGE_MAC_SPEED_10M:
2672 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2673 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2674 		break;
2675 	case HCLGE_MAC_SPEED_100M:
2676 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2677 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2678 		break;
2679 	case HCLGE_MAC_SPEED_1G:
2680 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2681 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2682 		break;
2683 	case HCLGE_MAC_SPEED_10G:
2684 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2685 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2686 		break;
2687 	case HCLGE_MAC_SPEED_25G:
2688 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2689 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2690 		break;
2691 	case HCLGE_MAC_SPEED_40G:
2692 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2693 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2694 		break;
2695 	case HCLGE_MAC_SPEED_50G:
2696 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2697 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2698 		break;
2699 	case HCLGE_MAC_SPEED_100G:
2700 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2701 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2702 		break;
2703 	case HCLGE_MAC_SPEED_200G:
2704 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2705 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2706 		break;
2707 	default:
2708 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2709 		return -EINVAL;
2710 	}
2711 
2712 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2713 		      1);
2714 
2715 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2716 	if (ret) {
2717 		dev_err(&hdev->pdev->dev,
2718 			"mac speed/duplex config cmd failed %d.\n", ret);
2719 		return ret;
2720 	}
2721 
2722 	return 0;
2723 }
2724 
2725 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2726 {
2727 	struct hclge_mac *mac = &hdev->hw.mac;
2728 	int ret;
2729 
2730 	duplex = hclge_check_speed_dup(duplex, speed);
2731 	if (!mac->support_autoneg && mac->speed == speed &&
2732 	    mac->duplex == duplex)
2733 		return 0;
2734 
2735 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2736 	if (ret)
2737 		return ret;
2738 
2739 	hdev->hw.mac.speed = speed;
2740 	hdev->hw.mac.duplex = duplex;
2741 
2742 	return 0;
2743 }
2744 
2745 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2746 				     u8 duplex)
2747 {
2748 	struct hclge_vport *vport = hclge_get_vport(handle);
2749 	struct hclge_dev *hdev = vport->back;
2750 
2751 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2752 }
2753 
2754 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2755 {
2756 	struct hclge_config_auto_neg_cmd *req;
2757 	struct hclge_desc desc;
2758 	u32 flag = 0;
2759 	int ret;
2760 
2761 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2762 
2763 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2764 	if (enable)
2765 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2766 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2767 
2768 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2769 	if (ret)
2770 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2771 			ret);
2772 
2773 	return ret;
2774 }
2775 
2776 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2777 {
2778 	struct hclge_vport *vport = hclge_get_vport(handle);
2779 	struct hclge_dev *hdev = vport->back;
2780 
2781 	if (!hdev->hw.mac.support_autoneg) {
2782 		if (enable) {
2783 			dev_err(&hdev->pdev->dev,
2784 				"autoneg is not supported by current port\n");
2785 			return -EOPNOTSUPP;
2786 		} else {
2787 			return 0;
2788 		}
2789 	}
2790 
2791 	return hclge_set_autoneg_en(hdev, enable);
2792 }
2793 
2794 static int hclge_get_autoneg(struct hnae3_handle *handle)
2795 {
2796 	struct hclge_vport *vport = hclge_get_vport(handle);
2797 	struct hclge_dev *hdev = vport->back;
2798 	struct phy_device *phydev = hdev->hw.mac.phydev;
2799 
2800 	if (phydev)
2801 		return phydev->autoneg;
2802 
2803 	return hdev->hw.mac.autoneg;
2804 }
2805 
2806 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2807 {
2808 	struct hclge_vport *vport = hclge_get_vport(handle);
2809 	struct hclge_dev *hdev = vport->back;
2810 	int ret;
2811 
2812 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2813 
2814 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2815 	if (ret)
2816 		return ret;
2817 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2818 }
2819 
2820 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2821 {
2822 	struct hclge_vport *vport = hclge_get_vport(handle);
2823 	struct hclge_dev *hdev = vport->back;
2824 
2825 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2826 		return hclge_set_autoneg_en(hdev, !halt);
2827 
2828 	return 0;
2829 }
2830 
2831 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2832 {
2833 	struct hclge_config_fec_cmd *req;
2834 	struct hclge_desc desc;
2835 	int ret;
2836 
2837 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2838 
2839 	req = (struct hclge_config_fec_cmd *)desc.data;
2840 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2841 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2842 	if (fec_mode & BIT(HNAE3_FEC_RS))
2843 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2844 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2845 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2846 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2847 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2848 
2849 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2850 	if (ret)
2851 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2852 
2853 	return ret;
2854 }
2855 
2856 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2857 {
2858 	struct hclge_vport *vport = hclge_get_vport(handle);
2859 	struct hclge_dev *hdev = vport->back;
2860 	struct hclge_mac *mac = &hdev->hw.mac;
2861 	int ret;
2862 
2863 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2864 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2865 		return -EINVAL;
2866 	}
2867 
2868 	ret = hclge_set_fec_hw(hdev, fec_mode);
2869 	if (ret)
2870 		return ret;
2871 
2872 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2873 	return 0;
2874 }
2875 
2876 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2877 			  u8 *fec_mode)
2878 {
2879 	struct hclge_vport *vport = hclge_get_vport(handle);
2880 	struct hclge_dev *hdev = vport->back;
2881 	struct hclge_mac *mac = &hdev->hw.mac;
2882 
2883 	if (fec_ability)
2884 		*fec_ability = mac->fec_ability;
2885 	if (fec_mode)
2886 		*fec_mode = mac->fec_mode;
2887 }
2888 
2889 static int hclge_mac_init(struct hclge_dev *hdev)
2890 {
2891 	struct hclge_mac *mac = &hdev->hw.mac;
2892 	int ret;
2893 
2894 	hdev->support_sfp_query = true;
2895 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2896 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2897 					 hdev->hw.mac.duplex);
2898 	if (ret)
2899 		return ret;
2900 
2901 	if (hdev->hw.mac.support_autoneg) {
2902 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2903 		if (ret)
2904 			return ret;
2905 	}
2906 
2907 	mac->link = 0;
2908 
2909 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2910 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2911 		if (ret)
2912 			return ret;
2913 	}
2914 
2915 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2916 	if (ret) {
2917 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2918 		return ret;
2919 	}
2920 
2921 	ret = hclge_set_default_loopback(hdev);
2922 	if (ret)
2923 		return ret;
2924 
2925 	ret = hclge_buffer_alloc(hdev);
2926 	if (ret)
2927 		dev_err(&hdev->pdev->dev,
2928 			"allocate buffer fail, ret=%d\n", ret);
2929 
2930 	return ret;
2931 }
2932 
2933 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2934 {
2935 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2936 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2937 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2938 }
2939 
2940 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2941 {
2942 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2943 	    test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
2944 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2945 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2946 }
2947 
2948 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2949 {
2950 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2951 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2952 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2953 }
2954 
2955 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2956 {
2957 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2958 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2959 		mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
2960 }
2961 
2962 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2963 {
2964 	struct hclge_link_status_cmd *req;
2965 	struct hclge_desc desc;
2966 	int ret;
2967 
2968 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2969 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2970 	if (ret) {
2971 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2972 			ret);
2973 		return ret;
2974 	}
2975 
2976 	req = (struct hclge_link_status_cmd *)desc.data;
2977 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2978 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2979 
2980 	return 0;
2981 }
2982 
2983 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2984 {
2985 	struct phy_device *phydev = hdev->hw.mac.phydev;
2986 
2987 	*link_status = HCLGE_LINK_STATUS_DOWN;
2988 
2989 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2990 		return 0;
2991 
2992 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2993 		return 0;
2994 
2995 	return hclge_get_mac_link_status(hdev, link_status);
2996 }
2997 
2998 static void hclge_push_link_status(struct hclge_dev *hdev)
2999 {
3000 	struct hclge_vport *vport;
3001 	int ret;
3002 	u16 i;
3003 
3004 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3005 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3006 
3007 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3008 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3009 			continue;
3010 
3011 		ret = hclge_push_vf_link_status(vport);
3012 		if (ret) {
3013 			dev_err(&hdev->pdev->dev,
3014 				"failed to push link status to vf%u, ret = %d\n",
3015 				i, ret);
3016 		}
3017 	}
3018 }
3019 
3020 static void hclge_update_link_status(struct hclge_dev *hdev)
3021 {
3022 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3023 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3024 	struct hnae3_client *rclient = hdev->roce_client;
3025 	struct hnae3_client *client = hdev->nic_client;
3026 	int state;
3027 	int ret;
3028 
3029 	if (!client)
3030 		return;
3031 
3032 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3033 		return;
3034 
3035 	ret = hclge_get_mac_phy_link(hdev, &state);
3036 	if (ret) {
3037 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3038 		return;
3039 	}
3040 
3041 	if (state != hdev->hw.mac.link) {
3042 		hdev->hw.mac.link = state;
3043 		client->ops->link_status_change(handle, state);
3044 		hclge_config_mac_tnl_int(hdev, state);
3045 		if (rclient && rclient->ops->link_status_change)
3046 			rclient->ops->link_status_change(rhandle, state);
3047 
3048 		hclge_push_link_status(hdev);
3049 	}
3050 
3051 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3052 }
3053 
3054 static void hclge_update_speed_advertising(struct hclge_mac *mac)
3055 {
3056 	u32 speed_ability;
3057 
3058 	if (hclge_get_speed_bit(mac->speed, &speed_ability))
3059 		return;
3060 
3061 	switch (mac->module_type) {
3062 	case HNAE3_MODULE_TYPE_FIBRE_LR:
3063 		hclge_convert_setting_lr(speed_ability, mac->advertising);
3064 		break;
3065 	case HNAE3_MODULE_TYPE_FIBRE_SR:
3066 	case HNAE3_MODULE_TYPE_AOC:
3067 		hclge_convert_setting_sr(speed_ability, mac->advertising);
3068 		break;
3069 	case HNAE3_MODULE_TYPE_CR:
3070 		hclge_convert_setting_cr(speed_ability, mac->advertising);
3071 		break;
3072 	case HNAE3_MODULE_TYPE_KR:
3073 		hclge_convert_setting_kr(speed_ability, mac->advertising);
3074 		break;
3075 	default:
3076 		break;
3077 	}
3078 }
3079 
3080 static void hclge_update_fec_advertising(struct hclge_mac *mac)
3081 {
3082 	if (mac->fec_mode & BIT(HNAE3_FEC_RS))
3083 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
3084 				 mac->advertising);
3085 	else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
3086 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
3087 				 mac->advertising);
3088 	else
3089 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
3090 				 mac->advertising);
3091 }
3092 
3093 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3094 {
3095 	struct hclge_mac *mac = &hdev->hw.mac;
3096 	bool rx_en, tx_en;
3097 
3098 	switch (hdev->fc_mode_last_time) {
3099 	case HCLGE_FC_RX_PAUSE:
3100 		rx_en = true;
3101 		tx_en = false;
3102 		break;
3103 	case HCLGE_FC_TX_PAUSE:
3104 		rx_en = false;
3105 		tx_en = true;
3106 		break;
3107 	case HCLGE_FC_FULL:
3108 		rx_en = true;
3109 		tx_en = true;
3110 		break;
3111 	default:
3112 		rx_en = false;
3113 		tx_en = false;
3114 		break;
3115 	}
3116 
3117 	linkmode_set_pause(mac->advertising, tx_en, rx_en);
3118 }
3119 
3120 static void hclge_update_advertising(struct hclge_dev *hdev)
3121 {
3122 	struct hclge_mac *mac = &hdev->hw.mac;
3123 
3124 	linkmode_zero(mac->advertising);
3125 	hclge_update_speed_advertising(mac);
3126 	hclge_update_fec_advertising(mac);
3127 	hclge_update_pause_advertising(hdev);
3128 }
3129 
3130 static void hclge_update_port_capability(struct hclge_dev *hdev,
3131 					 struct hclge_mac *mac)
3132 {
3133 	if (hnae3_dev_fec_supported(hdev))
3134 		/* update fec ability by speed */
3135 		hclge_convert_setting_fec(mac);
3136 
3137 	/* firmware can not identify back plane type, the media type
3138 	 * read from configuration can help deal it
3139 	 */
3140 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3141 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3142 		mac->module_type = HNAE3_MODULE_TYPE_KR;
3143 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3144 		mac->module_type = HNAE3_MODULE_TYPE_TP;
3145 
3146 	if (mac->support_autoneg) {
3147 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3148 		linkmode_copy(mac->advertising, mac->supported);
3149 	} else {
3150 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3151 				   mac->supported);
3152 		hclge_update_advertising(hdev);
3153 	}
3154 }
3155 
3156 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3157 {
3158 	struct hclge_sfp_info_cmd *resp;
3159 	struct hclge_desc desc;
3160 	int ret;
3161 
3162 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3163 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3164 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3165 	if (ret == -EOPNOTSUPP) {
3166 		dev_warn(&hdev->pdev->dev,
3167 			 "IMP do not support get SFP speed %d\n", ret);
3168 		return ret;
3169 	} else if (ret) {
3170 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3171 		return ret;
3172 	}
3173 
3174 	*speed = le32_to_cpu(resp->speed);
3175 
3176 	return 0;
3177 }
3178 
3179 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3180 {
3181 	struct hclge_sfp_info_cmd *resp;
3182 	struct hclge_desc desc;
3183 	int ret;
3184 
3185 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3186 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3187 
3188 	resp->query_type = QUERY_ACTIVE_SPEED;
3189 
3190 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3191 	if (ret == -EOPNOTSUPP) {
3192 		dev_warn(&hdev->pdev->dev,
3193 			 "IMP does not support get SFP info %d\n", ret);
3194 		return ret;
3195 	} else if (ret) {
3196 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3197 		return ret;
3198 	}
3199 
3200 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3201 	 * set to mac->speed.
3202 	 */
3203 	if (!le32_to_cpu(resp->speed))
3204 		return 0;
3205 
3206 	mac->speed = le32_to_cpu(resp->speed);
3207 	/* if resp->speed_ability is 0, it means it's an old version
3208 	 * firmware, do not update these params
3209 	 */
3210 	if (resp->speed_ability) {
3211 		mac->module_type = le32_to_cpu(resp->module_type);
3212 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3213 		mac->autoneg = resp->autoneg;
3214 		mac->support_autoneg = resp->autoneg_ability;
3215 		mac->speed_type = QUERY_ACTIVE_SPEED;
3216 		if (!resp->active_fec)
3217 			mac->fec_mode = 0;
3218 		else
3219 			mac->fec_mode = BIT(resp->active_fec);
3220 	} else {
3221 		mac->speed_type = QUERY_SFP_SPEED;
3222 	}
3223 
3224 	return 0;
3225 }
3226 
3227 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3228 					struct ethtool_link_ksettings *cmd)
3229 {
3230 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3231 	struct hclge_vport *vport = hclge_get_vport(handle);
3232 	struct hclge_phy_link_ksetting_0_cmd *req0;
3233 	struct hclge_phy_link_ksetting_1_cmd *req1;
3234 	u32 supported, advertising, lp_advertising;
3235 	struct hclge_dev *hdev = vport->back;
3236 	int ret;
3237 
3238 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3239 				   true);
3240 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3241 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3242 				   true);
3243 
3244 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3245 	if (ret) {
3246 		dev_err(&hdev->pdev->dev,
3247 			"failed to get phy link ksetting, ret = %d.\n", ret);
3248 		return ret;
3249 	}
3250 
3251 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3252 	cmd->base.autoneg = req0->autoneg;
3253 	cmd->base.speed = le32_to_cpu(req0->speed);
3254 	cmd->base.duplex = req0->duplex;
3255 	cmd->base.port = req0->port;
3256 	cmd->base.transceiver = req0->transceiver;
3257 	cmd->base.phy_address = req0->phy_address;
3258 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3259 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3260 	supported = le32_to_cpu(req0->supported);
3261 	advertising = le32_to_cpu(req0->advertising);
3262 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3263 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3264 						supported);
3265 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3266 						advertising);
3267 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3268 						lp_advertising);
3269 
3270 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3271 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3272 	cmd->base.master_slave_state = req1->master_slave_state;
3273 
3274 	return 0;
3275 }
3276 
3277 static int
3278 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3279 			     const struct ethtool_link_ksettings *cmd)
3280 {
3281 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3282 	struct hclge_vport *vport = hclge_get_vport(handle);
3283 	struct hclge_phy_link_ksetting_0_cmd *req0;
3284 	struct hclge_phy_link_ksetting_1_cmd *req1;
3285 	struct hclge_dev *hdev = vport->back;
3286 	u32 advertising;
3287 	int ret;
3288 
3289 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3290 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3291 	     (cmd->base.duplex != DUPLEX_HALF &&
3292 	      cmd->base.duplex != DUPLEX_FULL)))
3293 		return -EINVAL;
3294 
3295 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3296 				   false);
3297 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3298 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3299 				   false);
3300 
3301 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3302 	req0->autoneg = cmd->base.autoneg;
3303 	req0->speed = cpu_to_le32(cmd->base.speed);
3304 	req0->duplex = cmd->base.duplex;
3305 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3306 						cmd->link_modes.advertising);
3307 	req0->advertising = cpu_to_le32(advertising);
3308 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3309 
3310 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3311 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3312 
3313 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3314 	if (ret) {
3315 		dev_err(&hdev->pdev->dev,
3316 			"failed to set phy link ksettings, ret = %d.\n", ret);
3317 		return ret;
3318 	}
3319 
3320 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3321 	hdev->hw.mac.speed = cmd->base.speed;
3322 	hdev->hw.mac.duplex = cmd->base.duplex;
3323 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3324 
3325 	return 0;
3326 }
3327 
3328 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3329 {
3330 	struct ethtool_link_ksettings cmd;
3331 	int ret;
3332 
3333 	if (!hnae3_dev_phy_imp_supported(hdev))
3334 		return 0;
3335 
3336 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3337 	if (ret)
3338 		return ret;
3339 
3340 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3341 	hdev->hw.mac.speed = cmd.base.speed;
3342 	hdev->hw.mac.duplex = cmd.base.duplex;
3343 
3344 	return 0;
3345 }
3346 
3347 static int hclge_tp_port_init(struct hclge_dev *hdev)
3348 {
3349 	struct ethtool_link_ksettings cmd;
3350 
3351 	if (!hnae3_dev_phy_imp_supported(hdev))
3352 		return 0;
3353 
3354 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3355 	cmd.base.speed = hdev->hw.mac.speed;
3356 	cmd.base.duplex = hdev->hw.mac.duplex;
3357 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3358 
3359 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3360 }
3361 
3362 static int hclge_update_port_info(struct hclge_dev *hdev)
3363 {
3364 	struct hclge_mac *mac = &hdev->hw.mac;
3365 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3366 	int ret;
3367 
3368 	/* get the port info from SFP cmd if not copper port */
3369 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3370 		return hclge_update_tp_port_info(hdev);
3371 
3372 	/* if IMP does not support get SFP/qSFP info, return directly */
3373 	if (!hdev->support_sfp_query)
3374 		return 0;
3375 
3376 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3377 		ret = hclge_get_sfp_info(hdev, mac);
3378 	else
3379 		ret = hclge_get_sfp_speed(hdev, &speed);
3380 
3381 	if (ret == -EOPNOTSUPP) {
3382 		hdev->support_sfp_query = false;
3383 		return ret;
3384 	} else if (ret) {
3385 		return ret;
3386 	}
3387 
3388 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3389 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3390 			hclge_update_port_capability(hdev, mac);
3391 			return 0;
3392 		}
3393 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3394 					       HCLGE_MAC_FULL);
3395 	} else {
3396 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3397 			return 0; /* do nothing if no SFP */
3398 
3399 		/* must config full duplex for SFP */
3400 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3401 	}
3402 }
3403 
3404 static int hclge_get_status(struct hnae3_handle *handle)
3405 {
3406 	struct hclge_vport *vport = hclge_get_vport(handle);
3407 	struct hclge_dev *hdev = vport->back;
3408 
3409 	hclge_update_link_status(hdev);
3410 
3411 	return hdev->hw.mac.link;
3412 }
3413 
3414 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3415 {
3416 	if (!pci_num_vf(hdev->pdev)) {
3417 		dev_err(&hdev->pdev->dev,
3418 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3419 		return NULL;
3420 	}
3421 
3422 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3423 		dev_err(&hdev->pdev->dev,
3424 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3425 			vf, pci_num_vf(hdev->pdev));
3426 		return NULL;
3427 	}
3428 
3429 	/* VF start from 1 in vport */
3430 	vf += HCLGE_VF_VPORT_START_NUM;
3431 	return &hdev->vport[vf];
3432 }
3433 
3434 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3435 			       struct ifla_vf_info *ivf)
3436 {
3437 	struct hclge_vport *vport = hclge_get_vport(handle);
3438 	struct hclge_dev *hdev = vport->back;
3439 
3440 	vport = hclge_get_vf_vport(hdev, vf);
3441 	if (!vport)
3442 		return -EINVAL;
3443 
3444 	ivf->vf = vf;
3445 	ivf->linkstate = vport->vf_info.link_state;
3446 	ivf->spoofchk = vport->vf_info.spoofchk;
3447 	ivf->trusted = vport->vf_info.trusted;
3448 	ivf->min_tx_rate = 0;
3449 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3450 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3451 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3452 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3453 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3454 
3455 	return 0;
3456 }
3457 
3458 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3459 				   int link_state)
3460 {
3461 	struct hclge_vport *vport = hclge_get_vport(handle);
3462 	struct hclge_dev *hdev = vport->back;
3463 	int link_state_old;
3464 	int ret;
3465 
3466 	vport = hclge_get_vf_vport(hdev, vf);
3467 	if (!vport)
3468 		return -EINVAL;
3469 
3470 	link_state_old = vport->vf_info.link_state;
3471 	vport->vf_info.link_state = link_state;
3472 
3473 	ret = hclge_push_vf_link_status(vport);
3474 	if (ret) {
3475 		vport->vf_info.link_state = link_state_old;
3476 		dev_err(&hdev->pdev->dev,
3477 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3478 	}
3479 
3480 	return ret;
3481 }
3482 
3483 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3484 {
3485 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3486 
3487 	/* fetch the events from their corresponding regs */
3488 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3489 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3490 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3491 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3492 
3493 	/* Assumption: If by any chance reset and mailbox events are reported
3494 	 * together then we will only process reset event in this go and will
3495 	 * defer the processing of the mailbox events. Since, we would have not
3496 	 * cleared RX CMDQ event this time we would receive again another
3497 	 * interrupt from H/W just for the mailbox.
3498 	 *
3499 	 * check for vector0 reset event sources
3500 	 */
3501 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3502 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3503 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3504 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3505 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3506 		hdev->rst_stats.imp_rst_cnt++;
3507 		return HCLGE_VECTOR0_EVENT_RST;
3508 	}
3509 
3510 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3511 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3512 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3513 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3514 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3515 		hdev->rst_stats.global_rst_cnt++;
3516 		return HCLGE_VECTOR0_EVENT_RST;
3517 	}
3518 
3519 	/* check for vector0 msix event and hardware error event source */
3520 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3521 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3522 		return HCLGE_VECTOR0_EVENT_ERR;
3523 
3524 	/* check for vector0 ptp event source */
3525 	if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3526 		*clearval = msix_src_reg;
3527 		return HCLGE_VECTOR0_EVENT_PTP;
3528 	}
3529 
3530 	/* check for vector0 mailbox(=CMDQ RX) event source */
3531 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3532 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3533 		*clearval = cmdq_src_reg;
3534 		return HCLGE_VECTOR0_EVENT_MBX;
3535 	}
3536 
3537 	/* print other vector0 event source */
3538 	dev_info(&hdev->pdev->dev,
3539 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3540 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3541 
3542 	return HCLGE_VECTOR0_EVENT_OTHER;
3543 }
3544 
3545 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3546 				    u32 regclr)
3547 {
3548 	switch (event_type) {
3549 	case HCLGE_VECTOR0_EVENT_PTP:
3550 	case HCLGE_VECTOR0_EVENT_RST:
3551 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3552 		break;
3553 	case HCLGE_VECTOR0_EVENT_MBX:
3554 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3555 		break;
3556 	default:
3557 		break;
3558 	}
3559 }
3560 
3561 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3562 {
3563 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3564 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3565 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3566 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3567 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3568 }
3569 
3570 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3571 {
3572 	writel(enable ? 1 : 0, vector->addr);
3573 }
3574 
3575 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3576 {
3577 	struct hclge_dev *hdev = data;
3578 	unsigned long flags;
3579 	u32 clearval = 0;
3580 	u32 event_cause;
3581 
3582 	hclge_enable_vector(&hdev->misc_vector, false);
3583 	event_cause = hclge_check_event_cause(hdev, &clearval);
3584 
3585 	/* vector 0 interrupt is shared with reset and mailbox source events. */
3586 	switch (event_cause) {
3587 	case HCLGE_VECTOR0_EVENT_ERR:
3588 		hclge_errhand_task_schedule(hdev);
3589 		break;
3590 	case HCLGE_VECTOR0_EVENT_RST:
3591 		hclge_reset_task_schedule(hdev);
3592 		break;
3593 	case HCLGE_VECTOR0_EVENT_PTP:
3594 		spin_lock_irqsave(&hdev->ptp->lock, flags);
3595 		hclge_ptp_clean_tx_hwts(hdev);
3596 		spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3597 		break;
3598 	case HCLGE_VECTOR0_EVENT_MBX:
3599 		/* If we are here then,
3600 		 * 1. Either we are not handling any mbx task and we are not
3601 		 *    scheduled as well
3602 		 *                        OR
3603 		 * 2. We could be handling a mbx task but nothing more is
3604 		 *    scheduled.
3605 		 * In both cases, we should schedule mbx task as there are more
3606 		 * mbx messages reported by this interrupt.
3607 		 */
3608 		hclge_mbx_task_schedule(hdev);
3609 		break;
3610 	default:
3611 		dev_warn(&hdev->pdev->dev,
3612 			 "received unknown or unhandled event of vector0\n");
3613 		break;
3614 	}
3615 
3616 	hclge_clear_event_cause(hdev, event_cause, clearval);
3617 
3618 	/* Enable interrupt if it is not caused by reset event or error event */
3619 	if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3620 	    event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3621 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3622 		hclge_enable_vector(&hdev->misc_vector, true);
3623 
3624 	return IRQ_HANDLED;
3625 }
3626 
3627 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3628 {
3629 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3630 		dev_warn(&hdev->pdev->dev,
3631 			 "vector(vector_id %d) has been freed.\n", vector_id);
3632 		return;
3633 	}
3634 
3635 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3636 	hdev->num_msi_left += 1;
3637 	hdev->num_msi_used -= 1;
3638 }
3639 
3640 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3641 {
3642 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3643 
3644 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3645 
3646 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3647 	hdev->vector_status[0] = 0;
3648 
3649 	hdev->num_msi_left -= 1;
3650 	hdev->num_msi_used += 1;
3651 }
3652 
3653 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3654 {
3655 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3656 			      &hdev->affinity_mask);
3657 }
3658 
3659 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3660 {
3661 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3662 }
3663 
3664 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3665 {
3666 	int ret;
3667 
3668 	hclge_get_misc_vector(hdev);
3669 
3670 	/* this would be explicitly freed in the end */
3671 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3672 		 HCLGE_NAME, pci_name(hdev->pdev));
3673 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3674 			  0, hdev->misc_vector.name, hdev);
3675 	if (ret) {
3676 		hclge_free_vector(hdev, 0);
3677 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3678 			hdev->misc_vector.vector_irq);
3679 	}
3680 
3681 	return ret;
3682 }
3683 
3684 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3685 {
3686 	free_irq(hdev->misc_vector.vector_irq, hdev);
3687 	hclge_free_vector(hdev, 0);
3688 }
3689 
3690 int hclge_notify_client(struct hclge_dev *hdev,
3691 			enum hnae3_reset_notify_type type)
3692 {
3693 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3694 	struct hnae3_client *client = hdev->nic_client;
3695 	int ret;
3696 
3697 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3698 		return 0;
3699 
3700 	if (!client->ops->reset_notify)
3701 		return -EOPNOTSUPP;
3702 
3703 	ret = client->ops->reset_notify(handle, type);
3704 	if (ret)
3705 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3706 			type, ret);
3707 
3708 	return ret;
3709 }
3710 
3711 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3712 				    enum hnae3_reset_notify_type type)
3713 {
3714 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3715 	struct hnae3_client *client = hdev->roce_client;
3716 	int ret;
3717 
3718 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3719 		return 0;
3720 
3721 	if (!client->ops->reset_notify)
3722 		return -EOPNOTSUPP;
3723 
3724 	ret = client->ops->reset_notify(handle, type);
3725 	if (ret)
3726 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3727 			type, ret);
3728 
3729 	return ret;
3730 }
3731 
3732 static int hclge_reset_wait(struct hclge_dev *hdev)
3733 {
3734 #define HCLGE_RESET_WATI_MS	100
3735 #define HCLGE_RESET_WAIT_CNT	350
3736 
3737 	u32 val, reg, reg_bit;
3738 	u32 cnt = 0;
3739 
3740 	switch (hdev->reset_type) {
3741 	case HNAE3_IMP_RESET:
3742 		reg = HCLGE_GLOBAL_RESET_REG;
3743 		reg_bit = HCLGE_IMP_RESET_BIT;
3744 		break;
3745 	case HNAE3_GLOBAL_RESET:
3746 		reg = HCLGE_GLOBAL_RESET_REG;
3747 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3748 		break;
3749 	case HNAE3_FUNC_RESET:
3750 		reg = HCLGE_FUN_RST_ING;
3751 		reg_bit = HCLGE_FUN_RST_ING_B;
3752 		break;
3753 	default:
3754 		dev_err(&hdev->pdev->dev,
3755 			"Wait for unsupported reset type: %d\n",
3756 			hdev->reset_type);
3757 		return -EINVAL;
3758 	}
3759 
3760 	val = hclge_read_dev(&hdev->hw, reg);
3761 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3762 		msleep(HCLGE_RESET_WATI_MS);
3763 		val = hclge_read_dev(&hdev->hw, reg);
3764 		cnt++;
3765 	}
3766 
3767 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3768 		dev_warn(&hdev->pdev->dev,
3769 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3770 		return -EBUSY;
3771 	}
3772 
3773 	return 0;
3774 }
3775 
3776 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3777 {
3778 	struct hclge_vf_rst_cmd *req;
3779 	struct hclge_desc desc;
3780 
3781 	req = (struct hclge_vf_rst_cmd *)desc.data;
3782 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3783 	req->dest_vfid = func_id;
3784 
3785 	if (reset)
3786 		req->vf_rst = 0x1;
3787 
3788 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3789 }
3790 
3791 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3792 {
3793 	int i;
3794 
3795 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3796 		struct hclge_vport *vport = &hdev->vport[i];
3797 		int ret;
3798 
3799 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3800 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3801 		if (ret) {
3802 			dev_err(&hdev->pdev->dev,
3803 				"set vf(%u) rst failed %d!\n",
3804 				vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3805 				ret);
3806 			return ret;
3807 		}
3808 
3809 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3810 			continue;
3811 
3812 		/* Inform VF to process the reset.
3813 		 * hclge_inform_reset_assert_to_vf may fail if VF
3814 		 * driver is not loaded.
3815 		 */
3816 		ret = hclge_inform_reset_assert_to_vf(vport);
3817 		if (ret)
3818 			dev_warn(&hdev->pdev->dev,
3819 				 "inform reset to vf(%u) failed %d!\n",
3820 				 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3821 				 ret);
3822 	}
3823 
3824 	return 0;
3825 }
3826 
3827 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3828 {
3829 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3830 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3831 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3832 		return;
3833 
3834 	hclge_mbx_handler(hdev);
3835 
3836 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3837 }
3838 
3839 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3840 {
3841 	struct hclge_pf_rst_sync_cmd *req;
3842 	struct hclge_desc desc;
3843 	int cnt = 0;
3844 	int ret;
3845 
3846 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3847 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3848 
3849 	do {
3850 		/* vf need to down netdev by mbx during PF or FLR reset */
3851 		hclge_mailbox_service_task(hdev);
3852 
3853 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3854 		/* for compatible with old firmware, wait
3855 		 * 100 ms for VF to stop IO
3856 		 */
3857 		if (ret == -EOPNOTSUPP) {
3858 			msleep(HCLGE_RESET_SYNC_TIME);
3859 			return;
3860 		} else if (ret) {
3861 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3862 				 ret);
3863 			return;
3864 		} else if (req->all_vf_ready) {
3865 			return;
3866 		}
3867 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3868 		hclge_cmd_reuse_desc(&desc, true);
3869 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3870 
3871 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3872 }
3873 
3874 void hclge_report_hw_error(struct hclge_dev *hdev,
3875 			   enum hnae3_hw_error_type type)
3876 {
3877 	struct hnae3_client *client = hdev->nic_client;
3878 
3879 	if (!client || !client->ops->process_hw_error ||
3880 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3881 		return;
3882 
3883 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3884 }
3885 
3886 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3887 {
3888 	u32 reg_val;
3889 
3890 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3891 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3892 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3893 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3894 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3895 	}
3896 
3897 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3898 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3899 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3900 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3901 	}
3902 }
3903 
3904 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3905 {
3906 	struct hclge_desc desc;
3907 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3908 	int ret;
3909 
3910 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3911 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3912 	req->fun_reset_vfid = func_id;
3913 
3914 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3915 	if (ret)
3916 		dev_err(&hdev->pdev->dev,
3917 			"send function reset cmd fail, status =%d\n", ret);
3918 
3919 	return ret;
3920 }
3921 
3922 static void hclge_do_reset(struct hclge_dev *hdev)
3923 {
3924 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3925 	struct pci_dev *pdev = hdev->pdev;
3926 	u32 val;
3927 
3928 	if (hclge_get_hw_reset_stat(handle)) {
3929 		dev_info(&pdev->dev, "hardware reset not finish\n");
3930 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3931 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3932 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3933 		return;
3934 	}
3935 
3936 	switch (hdev->reset_type) {
3937 	case HNAE3_IMP_RESET:
3938 		dev_info(&pdev->dev, "IMP reset requested\n");
3939 		val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3940 		hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3941 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3942 		break;
3943 	case HNAE3_GLOBAL_RESET:
3944 		dev_info(&pdev->dev, "global reset requested\n");
3945 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3946 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3947 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3948 		break;
3949 	case HNAE3_FUNC_RESET:
3950 		dev_info(&pdev->dev, "PF reset requested\n");
3951 		/* schedule again to check later */
3952 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3953 		hclge_reset_task_schedule(hdev);
3954 		break;
3955 	default:
3956 		dev_warn(&pdev->dev,
3957 			 "unsupported reset type: %d\n", hdev->reset_type);
3958 		break;
3959 	}
3960 }
3961 
3962 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3963 						   unsigned long *addr)
3964 {
3965 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3966 	struct hclge_dev *hdev = ae_dev->priv;
3967 
3968 	/* return the highest priority reset level amongst all */
3969 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3970 		rst_level = HNAE3_IMP_RESET;
3971 		clear_bit(HNAE3_IMP_RESET, addr);
3972 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3973 		clear_bit(HNAE3_FUNC_RESET, addr);
3974 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3975 		rst_level = HNAE3_GLOBAL_RESET;
3976 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3977 		clear_bit(HNAE3_FUNC_RESET, addr);
3978 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3979 		rst_level = HNAE3_FUNC_RESET;
3980 		clear_bit(HNAE3_FUNC_RESET, addr);
3981 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3982 		rst_level = HNAE3_FLR_RESET;
3983 		clear_bit(HNAE3_FLR_RESET, addr);
3984 	}
3985 
3986 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3987 	    rst_level < hdev->reset_type)
3988 		return HNAE3_NONE_RESET;
3989 
3990 	return rst_level;
3991 }
3992 
3993 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3994 {
3995 	u32 clearval = 0;
3996 
3997 	switch (hdev->reset_type) {
3998 	case HNAE3_IMP_RESET:
3999 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
4000 		break;
4001 	case HNAE3_GLOBAL_RESET:
4002 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
4003 		break;
4004 	default:
4005 		break;
4006 	}
4007 
4008 	if (!clearval)
4009 		return;
4010 
4011 	/* For revision 0x20, the reset interrupt source
4012 	 * can only be cleared after hardware reset done
4013 	 */
4014 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4015 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
4016 				clearval);
4017 
4018 	hclge_enable_vector(&hdev->misc_vector, true);
4019 }
4020 
4021 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
4022 {
4023 	u32 reg_val;
4024 
4025 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
4026 	if (enable)
4027 		reg_val |= HCLGE_NIC_SW_RST_RDY;
4028 	else
4029 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
4030 
4031 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
4032 }
4033 
4034 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
4035 {
4036 	int ret;
4037 
4038 	ret = hclge_set_all_vf_rst(hdev, true);
4039 	if (ret)
4040 		return ret;
4041 
4042 	hclge_func_reset_sync_vf(hdev);
4043 
4044 	return 0;
4045 }
4046 
4047 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
4048 {
4049 	u32 reg_val;
4050 	int ret = 0;
4051 
4052 	switch (hdev->reset_type) {
4053 	case HNAE3_FUNC_RESET:
4054 		ret = hclge_func_reset_notify_vf(hdev);
4055 		if (ret)
4056 			return ret;
4057 
4058 		ret = hclge_func_reset_cmd(hdev, 0);
4059 		if (ret) {
4060 			dev_err(&hdev->pdev->dev,
4061 				"asserting function reset fail %d!\n", ret);
4062 			return ret;
4063 		}
4064 
4065 		/* After performaning pf reset, it is not necessary to do the
4066 		 * mailbox handling or send any command to firmware, because
4067 		 * any mailbox handling or command to firmware is only valid
4068 		 * after hclge_cmd_init is called.
4069 		 */
4070 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
4071 		hdev->rst_stats.pf_rst_cnt++;
4072 		break;
4073 	case HNAE3_FLR_RESET:
4074 		ret = hclge_func_reset_notify_vf(hdev);
4075 		if (ret)
4076 			return ret;
4077 		break;
4078 	case HNAE3_IMP_RESET:
4079 		hclge_handle_imp_error(hdev);
4080 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4081 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4082 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4083 		break;
4084 	default:
4085 		break;
4086 	}
4087 
4088 	/* inform hardware that preparatory work is done */
4089 	msleep(HCLGE_RESET_SYNC_TIME);
4090 	hclge_reset_handshake(hdev, true);
4091 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4092 
4093 	return ret;
4094 }
4095 
4096 static void hclge_show_rst_info(struct hclge_dev *hdev)
4097 {
4098 	char *buf;
4099 
4100 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4101 	if (!buf)
4102 		return;
4103 
4104 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4105 
4106 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4107 
4108 	kfree(buf);
4109 }
4110 
4111 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4112 {
4113 #define MAX_RESET_FAIL_CNT 5
4114 
4115 	if (hdev->reset_pending) {
4116 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4117 			 hdev->reset_pending);
4118 		return true;
4119 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4120 		   HCLGE_RESET_INT_M) {
4121 		dev_info(&hdev->pdev->dev,
4122 			 "reset failed because new reset interrupt\n");
4123 		hclge_clear_reset_cause(hdev);
4124 		return false;
4125 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4126 		hdev->rst_stats.reset_fail_cnt++;
4127 		set_bit(hdev->reset_type, &hdev->reset_pending);
4128 		dev_info(&hdev->pdev->dev,
4129 			 "re-schedule reset task(%u)\n",
4130 			 hdev->rst_stats.reset_fail_cnt);
4131 		return true;
4132 	}
4133 
4134 	hclge_clear_reset_cause(hdev);
4135 
4136 	/* recover the handshake status when reset fail */
4137 	hclge_reset_handshake(hdev, true);
4138 
4139 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
4140 
4141 	hclge_show_rst_info(hdev);
4142 
4143 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4144 
4145 	return false;
4146 }
4147 
4148 static void hclge_update_reset_level(struct hclge_dev *hdev)
4149 {
4150 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4151 	enum hnae3_reset_type reset_level;
4152 
4153 	/* reset request will not be set during reset, so clear
4154 	 * pending reset request to avoid unnecessary reset
4155 	 * caused by the same reason.
4156 	 */
4157 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4158 
4159 	/* if default_reset_request has a higher level reset request,
4160 	 * it should be handled as soon as possible. since some errors
4161 	 * need this kind of reset to fix.
4162 	 */
4163 	reset_level = hclge_get_reset_level(ae_dev,
4164 					    &hdev->default_reset_request);
4165 	if (reset_level != HNAE3_NONE_RESET)
4166 		set_bit(reset_level, &hdev->reset_request);
4167 }
4168 
4169 static int hclge_set_rst_done(struct hclge_dev *hdev)
4170 {
4171 	struct hclge_pf_rst_done_cmd *req;
4172 	struct hclge_desc desc;
4173 	int ret;
4174 
4175 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4176 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4177 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4178 
4179 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4180 	/* To be compatible with the old firmware, which does not support
4181 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4182 	 * return success
4183 	 */
4184 	if (ret == -EOPNOTSUPP) {
4185 		dev_warn(&hdev->pdev->dev,
4186 			 "current firmware does not support command(0x%x)!\n",
4187 			 HCLGE_OPC_PF_RST_DONE);
4188 		return 0;
4189 	} else if (ret) {
4190 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4191 			ret);
4192 	}
4193 
4194 	return ret;
4195 }
4196 
4197 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4198 {
4199 	int ret = 0;
4200 
4201 	switch (hdev->reset_type) {
4202 	case HNAE3_FUNC_RESET:
4203 	case HNAE3_FLR_RESET:
4204 		ret = hclge_set_all_vf_rst(hdev, false);
4205 		break;
4206 	case HNAE3_GLOBAL_RESET:
4207 	case HNAE3_IMP_RESET:
4208 		ret = hclge_set_rst_done(hdev);
4209 		break;
4210 	default:
4211 		break;
4212 	}
4213 
4214 	/* clear up the handshake status after re-initialize done */
4215 	hclge_reset_handshake(hdev, false);
4216 
4217 	return ret;
4218 }
4219 
4220 static int hclge_reset_stack(struct hclge_dev *hdev)
4221 {
4222 	int ret;
4223 
4224 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4225 	if (ret)
4226 		return ret;
4227 
4228 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4229 	if (ret)
4230 		return ret;
4231 
4232 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4233 }
4234 
4235 static int hclge_reset_prepare(struct hclge_dev *hdev)
4236 {
4237 	int ret;
4238 
4239 	hdev->rst_stats.reset_cnt++;
4240 	/* perform reset of the stack & ae device for a client */
4241 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4242 	if (ret)
4243 		return ret;
4244 
4245 	rtnl_lock();
4246 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4247 	rtnl_unlock();
4248 	if (ret)
4249 		return ret;
4250 
4251 	return hclge_reset_prepare_wait(hdev);
4252 }
4253 
4254 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4255 {
4256 	int ret;
4257 
4258 	hdev->rst_stats.hw_reset_done_cnt++;
4259 
4260 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4261 	if (ret)
4262 		return ret;
4263 
4264 	rtnl_lock();
4265 	ret = hclge_reset_stack(hdev);
4266 	rtnl_unlock();
4267 	if (ret)
4268 		return ret;
4269 
4270 	hclge_clear_reset_cause(hdev);
4271 
4272 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4273 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4274 	 * times
4275 	 */
4276 	if (ret &&
4277 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4278 		return ret;
4279 
4280 	ret = hclge_reset_prepare_up(hdev);
4281 	if (ret)
4282 		return ret;
4283 
4284 	rtnl_lock();
4285 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4286 	rtnl_unlock();
4287 	if (ret)
4288 		return ret;
4289 
4290 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4291 	if (ret)
4292 		return ret;
4293 
4294 	hdev->last_reset_time = jiffies;
4295 	hdev->rst_stats.reset_fail_cnt = 0;
4296 	hdev->rst_stats.reset_done_cnt++;
4297 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4298 
4299 	hclge_update_reset_level(hdev);
4300 
4301 	return 0;
4302 }
4303 
4304 static void hclge_reset(struct hclge_dev *hdev)
4305 {
4306 	if (hclge_reset_prepare(hdev))
4307 		goto err_reset;
4308 
4309 	if (hclge_reset_wait(hdev))
4310 		goto err_reset;
4311 
4312 	if (hclge_reset_rebuild(hdev))
4313 		goto err_reset;
4314 
4315 	return;
4316 
4317 err_reset:
4318 	if (hclge_reset_err_handle(hdev))
4319 		hclge_reset_task_schedule(hdev);
4320 }
4321 
4322 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4323 {
4324 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4325 	struct hclge_dev *hdev = ae_dev->priv;
4326 
4327 	/* We might end up getting called broadly because of 2 below cases:
4328 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4329 	 *    normalcy is to reset.
4330 	 * 2. A new reset request from the stack due to timeout
4331 	 *
4332 	 * check if this is a new reset request and we are not here just because
4333 	 * last reset attempt did not succeed and watchdog hit us again. We will
4334 	 * know this if last reset request did not occur very recently (watchdog
4335 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4336 	 * In case of new request we reset the "reset level" to PF reset.
4337 	 * And if it is a repeat reset request of the most recent one then we
4338 	 * want to make sure we throttle the reset request. Therefore, we will
4339 	 * not allow it again before 3*HZ times.
4340 	 */
4341 
4342 	if (time_before(jiffies, (hdev->last_reset_time +
4343 				  HCLGE_RESET_INTERVAL))) {
4344 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4345 		return;
4346 	}
4347 
4348 	if (hdev->default_reset_request) {
4349 		hdev->reset_level =
4350 			hclge_get_reset_level(ae_dev,
4351 					      &hdev->default_reset_request);
4352 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4353 		hdev->reset_level = HNAE3_FUNC_RESET;
4354 	}
4355 
4356 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4357 		 hdev->reset_level);
4358 
4359 	/* request reset & schedule reset task */
4360 	set_bit(hdev->reset_level, &hdev->reset_request);
4361 	hclge_reset_task_schedule(hdev);
4362 
4363 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4364 		hdev->reset_level++;
4365 }
4366 
4367 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4368 					enum hnae3_reset_type rst_type)
4369 {
4370 	struct hclge_dev *hdev = ae_dev->priv;
4371 
4372 	set_bit(rst_type, &hdev->default_reset_request);
4373 }
4374 
4375 static void hclge_reset_timer(struct timer_list *t)
4376 {
4377 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4378 
4379 	/* if default_reset_request has no value, it means that this reset
4380 	 * request has already be handled, so just return here
4381 	 */
4382 	if (!hdev->default_reset_request)
4383 		return;
4384 
4385 	dev_info(&hdev->pdev->dev,
4386 		 "triggering reset in reset timer\n");
4387 	hclge_reset_event(hdev->pdev, NULL);
4388 }
4389 
4390 static void hclge_reset_subtask(struct hclge_dev *hdev)
4391 {
4392 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4393 
4394 	/* check if there is any ongoing reset in the hardware. This status can
4395 	 * be checked from reset_pending. If there is then, we need to wait for
4396 	 * hardware to complete reset.
4397 	 *    a. If we are able to figure out in reasonable time that hardware
4398 	 *       has fully resetted then, we can proceed with driver, client
4399 	 *       reset.
4400 	 *    b. else, we can come back later to check this status so re-sched
4401 	 *       now.
4402 	 */
4403 	hdev->last_reset_time = jiffies;
4404 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4405 	if (hdev->reset_type != HNAE3_NONE_RESET)
4406 		hclge_reset(hdev);
4407 
4408 	/* check if we got any *new* reset requests to be honored */
4409 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4410 	if (hdev->reset_type != HNAE3_NONE_RESET)
4411 		hclge_do_reset(hdev);
4412 
4413 	hdev->reset_type = HNAE3_NONE_RESET;
4414 }
4415 
4416 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4417 {
4418 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4419 	enum hnae3_reset_type reset_type;
4420 
4421 	if (ae_dev->hw_err_reset_req) {
4422 		reset_type = hclge_get_reset_level(ae_dev,
4423 						   &ae_dev->hw_err_reset_req);
4424 		hclge_set_def_reset_request(ae_dev, reset_type);
4425 	}
4426 
4427 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4428 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4429 
4430 	/* enable interrupt after error handling complete */
4431 	hclge_enable_vector(&hdev->misc_vector, true);
4432 }
4433 
4434 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4435 {
4436 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4437 
4438 	ae_dev->hw_err_reset_req = 0;
4439 
4440 	if (hclge_find_error_source(hdev)) {
4441 		hclge_handle_error_info_log(ae_dev);
4442 		hclge_handle_mac_tnl(hdev);
4443 	}
4444 
4445 	hclge_handle_err_reset_request(hdev);
4446 }
4447 
4448 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4449 {
4450 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4451 	struct device *dev = &hdev->pdev->dev;
4452 	u32 msix_sts_reg;
4453 
4454 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4455 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4456 		if (hclge_handle_hw_msix_error
4457 				(hdev, &hdev->default_reset_request))
4458 			dev_info(dev, "received msix interrupt 0x%x\n",
4459 				 msix_sts_reg);
4460 	}
4461 
4462 	hclge_handle_hw_ras_error(ae_dev);
4463 
4464 	hclge_handle_err_reset_request(hdev);
4465 }
4466 
4467 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4468 {
4469 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4470 		return;
4471 
4472 	if (hnae3_dev_ras_imp_supported(hdev))
4473 		hclge_handle_err_recovery(hdev);
4474 	else
4475 		hclge_misc_err_recovery(hdev);
4476 }
4477 
4478 static void hclge_reset_service_task(struct hclge_dev *hdev)
4479 {
4480 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4481 		return;
4482 
4483 	down(&hdev->reset_sem);
4484 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4485 
4486 	hclge_reset_subtask(hdev);
4487 
4488 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4489 	up(&hdev->reset_sem);
4490 }
4491 
4492 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4493 {
4494 	int i;
4495 
4496 	/* start from vport 1 for PF is always alive */
4497 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4498 		struct hclge_vport *vport = &hdev->vport[i];
4499 
4500 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4501 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4502 
4503 		/* If vf is not alive, set to default value */
4504 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4505 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4506 	}
4507 }
4508 
4509 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4510 {
4511 	unsigned long delta = round_jiffies_relative(HZ);
4512 
4513 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4514 		return;
4515 
4516 	/* Always handle the link updating to make sure link state is
4517 	 * updated when it is triggered by mbx.
4518 	 */
4519 	hclge_update_link_status(hdev);
4520 	hclge_sync_mac_table(hdev);
4521 	hclge_sync_promisc_mode(hdev);
4522 	hclge_sync_fd_table(hdev);
4523 
4524 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4525 		delta = jiffies - hdev->last_serv_processed;
4526 
4527 		if (delta < round_jiffies_relative(HZ)) {
4528 			delta = round_jiffies_relative(HZ) - delta;
4529 			goto out;
4530 		}
4531 	}
4532 
4533 	hdev->serv_processed_cnt++;
4534 	hclge_update_vport_alive(hdev);
4535 
4536 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4537 		hdev->last_serv_processed = jiffies;
4538 		goto out;
4539 	}
4540 
4541 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4542 		hclge_update_stats_for_all(hdev);
4543 
4544 	hclge_update_port_info(hdev);
4545 	hclge_sync_vlan_filter(hdev);
4546 
4547 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4548 		hclge_rfs_filter_expire(hdev);
4549 
4550 	hdev->last_serv_processed = jiffies;
4551 
4552 out:
4553 	hclge_task_schedule(hdev, delta);
4554 }
4555 
4556 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4557 {
4558 	unsigned long flags;
4559 
4560 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4561 	    !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4562 	    !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4563 		return;
4564 
4565 	/* to prevent concurrence with the irq handler */
4566 	spin_lock_irqsave(&hdev->ptp->lock, flags);
4567 
4568 	/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4569 	 * handler may handle it just before spin_lock_irqsave().
4570 	 */
4571 	if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4572 		hclge_ptp_clean_tx_hwts(hdev);
4573 
4574 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4575 }
4576 
4577 static void hclge_service_task(struct work_struct *work)
4578 {
4579 	struct hclge_dev *hdev =
4580 		container_of(work, struct hclge_dev, service_task.work);
4581 
4582 	hclge_errhand_service_task(hdev);
4583 	hclge_reset_service_task(hdev);
4584 	hclge_ptp_service_task(hdev);
4585 	hclge_mailbox_service_task(hdev);
4586 	hclge_periodic_service_task(hdev);
4587 
4588 	/* Handle error recovery, reset and mbx again in case periodical task
4589 	 * delays the handling by calling hclge_task_schedule() in
4590 	 * hclge_periodic_service_task().
4591 	 */
4592 	hclge_errhand_service_task(hdev);
4593 	hclge_reset_service_task(hdev);
4594 	hclge_mailbox_service_task(hdev);
4595 }
4596 
4597 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4598 {
4599 	/* VF handle has no client */
4600 	if (!handle->client)
4601 		return container_of(handle, struct hclge_vport, nic);
4602 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4603 		return container_of(handle, struct hclge_vport, roce);
4604 	else
4605 		return container_of(handle, struct hclge_vport, nic);
4606 }
4607 
4608 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4609 				  struct hnae3_vector_info *vector_info)
4610 {
4611 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4612 
4613 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4614 
4615 	/* need an extend offset to config vector >= 64 */
4616 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4617 		vector_info->io_addr = hdev->hw.io_base +
4618 				HCLGE_VECTOR_REG_BASE +
4619 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4620 	else
4621 		vector_info->io_addr = hdev->hw.io_base +
4622 				HCLGE_VECTOR_EXT_REG_BASE +
4623 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4624 				HCLGE_VECTOR_REG_OFFSET_H +
4625 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4626 				HCLGE_VECTOR_REG_OFFSET;
4627 
4628 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4629 	hdev->vector_irq[idx] = vector_info->vector;
4630 }
4631 
4632 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4633 			    struct hnae3_vector_info *vector_info)
4634 {
4635 	struct hclge_vport *vport = hclge_get_vport(handle);
4636 	struct hnae3_vector_info *vector = vector_info;
4637 	struct hclge_dev *hdev = vport->back;
4638 	int alloc = 0;
4639 	u16 i = 0;
4640 	u16 j;
4641 
4642 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4643 	vector_num = min(hdev->num_msi_left, vector_num);
4644 
4645 	for (j = 0; j < vector_num; j++) {
4646 		while (++i < hdev->num_nic_msi) {
4647 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4648 				hclge_get_vector_info(hdev, i, vector);
4649 				vector++;
4650 				alloc++;
4651 
4652 				break;
4653 			}
4654 		}
4655 	}
4656 	hdev->num_msi_left -= alloc;
4657 	hdev->num_msi_used += alloc;
4658 
4659 	return alloc;
4660 }
4661 
4662 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4663 {
4664 	int i;
4665 
4666 	for (i = 0; i < hdev->num_msi; i++)
4667 		if (vector == hdev->vector_irq[i])
4668 			return i;
4669 
4670 	return -EINVAL;
4671 }
4672 
4673 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4674 {
4675 	struct hclge_vport *vport = hclge_get_vport(handle);
4676 	struct hclge_dev *hdev = vport->back;
4677 	int vector_id;
4678 
4679 	vector_id = hclge_get_vector_index(hdev, vector);
4680 	if (vector_id < 0) {
4681 		dev_err(&hdev->pdev->dev,
4682 			"Get vector index fail. vector = %d\n", vector);
4683 		return vector_id;
4684 	}
4685 
4686 	hclge_free_vector(hdev, vector_id);
4687 
4688 	return 0;
4689 }
4690 
4691 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4692 {
4693 	return HCLGE_RSS_KEY_SIZE;
4694 }
4695 
4696 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4697 				  const u8 hfunc, const u8 *key)
4698 {
4699 	struct hclge_rss_config_cmd *req;
4700 	unsigned int key_offset = 0;
4701 	struct hclge_desc desc;
4702 	int key_counts;
4703 	int key_size;
4704 	int ret;
4705 
4706 	key_counts = HCLGE_RSS_KEY_SIZE;
4707 	req = (struct hclge_rss_config_cmd *)desc.data;
4708 
4709 	while (key_counts) {
4710 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4711 					   false);
4712 
4713 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4714 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4715 
4716 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4717 		memcpy(req->hash_key,
4718 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4719 
4720 		key_counts -= key_size;
4721 		key_offset++;
4722 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4723 		if (ret) {
4724 			dev_err(&hdev->pdev->dev,
4725 				"Configure RSS config fail, status = %d\n",
4726 				ret);
4727 			return ret;
4728 		}
4729 	}
4730 	return 0;
4731 }
4732 
4733 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4734 {
4735 	struct hclge_rss_indirection_table_cmd *req;
4736 	struct hclge_desc desc;
4737 	int rss_cfg_tbl_num;
4738 	u8 rss_msb_oft;
4739 	u8 rss_msb_val;
4740 	int ret;
4741 	u16 qid;
4742 	int i;
4743 	u32 j;
4744 
4745 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4746 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4747 			  HCLGE_RSS_CFG_TBL_SIZE;
4748 
4749 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4750 		hclge_cmd_setup_basic_desc
4751 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4752 
4753 		req->start_table_index =
4754 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4755 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4756 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4757 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4758 			req->rss_qid_l[j] = qid & 0xff;
4759 			rss_msb_oft =
4760 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4761 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4762 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4763 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4764 		}
4765 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4766 		if (ret) {
4767 			dev_err(&hdev->pdev->dev,
4768 				"Configure rss indir table fail,status = %d\n",
4769 				ret);
4770 			return ret;
4771 		}
4772 	}
4773 	return 0;
4774 }
4775 
4776 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4777 				 u16 *tc_size, u16 *tc_offset)
4778 {
4779 	struct hclge_rss_tc_mode_cmd *req;
4780 	struct hclge_desc desc;
4781 	int ret;
4782 	int i;
4783 
4784 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4785 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4786 
4787 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4788 		u16 mode = 0;
4789 
4790 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4791 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4792 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4793 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4794 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4795 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4796 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4797 
4798 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4799 	}
4800 
4801 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4802 	if (ret)
4803 		dev_err(&hdev->pdev->dev,
4804 			"Configure rss tc mode fail, status = %d\n", ret);
4805 
4806 	return ret;
4807 }
4808 
4809 static void hclge_get_rss_type(struct hclge_vport *vport)
4810 {
4811 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4812 	    vport->rss_tuple_sets.ipv4_udp_en ||
4813 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4814 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4815 	    vport->rss_tuple_sets.ipv6_udp_en ||
4816 	    vport->rss_tuple_sets.ipv6_sctp_en)
4817 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4818 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4819 		 vport->rss_tuple_sets.ipv6_fragment_en)
4820 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4821 	else
4822 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4823 }
4824 
4825 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4826 {
4827 	struct hclge_rss_input_tuple_cmd *req;
4828 	struct hclge_desc desc;
4829 	int ret;
4830 
4831 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4832 
4833 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4834 
4835 	/* Get the tuple cfg from pf */
4836 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4837 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4838 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4839 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4840 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4841 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4842 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4843 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4844 	hclge_get_rss_type(&hdev->vport[0]);
4845 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4846 	if (ret)
4847 		dev_err(&hdev->pdev->dev,
4848 			"Configure rss input fail, status = %d\n", ret);
4849 	return ret;
4850 }
4851 
4852 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4853 			 u8 *key, u8 *hfunc)
4854 {
4855 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4856 	struct hclge_vport *vport = hclge_get_vport(handle);
4857 	int i;
4858 
4859 	/* Get hash algorithm */
4860 	if (hfunc) {
4861 		switch (vport->rss_algo) {
4862 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4863 			*hfunc = ETH_RSS_HASH_TOP;
4864 			break;
4865 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4866 			*hfunc = ETH_RSS_HASH_XOR;
4867 			break;
4868 		default:
4869 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4870 			break;
4871 		}
4872 	}
4873 
4874 	/* Get the RSS Key required by the user */
4875 	if (key)
4876 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4877 
4878 	/* Get indirect table */
4879 	if (indir)
4880 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4881 			indir[i] =  vport->rss_indirection_tbl[i];
4882 
4883 	return 0;
4884 }
4885 
4886 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4887 				 u8 *hash_algo)
4888 {
4889 	switch (hfunc) {
4890 	case ETH_RSS_HASH_TOP:
4891 		*hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4892 		return 0;
4893 	case ETH_RSS_HASH_XOR:
4894 		*hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4895 		return 0;
4896 	case ETH_RSS_HASH_NO_CHANGE:
4897 		*hash_algo = vport->rss_algo;
4898 		return 0;
4899 	default:
4900 		return -EINVAL;
4901 	}
4902 }
4903 
4904 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4905 			 const  u8 *key, const  u8 hfunc)
4906 {
4907 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4908 	struct hclge_vport *vport = hclge_get_vport(handle);
4909 	struct hclge_dev *hdev = vport->back;
4910 	u8 hash_algo;
4911 	int ret, i;
4912 
4913 	ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4914 	if (ret) {
4915 		dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4916 		return ret;
4917 	}
4918 
4919 	/* Set the RSS Hash Key if specififed by the user */
4920 	if (key) {
4921 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4922 		if (ret)
4923 			return ret;
4924 
4925 		/* Update the shadow RSS key with user specified qids */
4926 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4927 	} else {
4928 		ret = hclge_set_rss_algo_key(hdev, hash_algo,
4929 					     vport->rss_hash_key);
4930 		if (ret)
4931 			return ret;
4932 	}
4933 	vport->rss_algo = hash_algo;
4934 
4935 	/* Update the shadow RSS table with user specified qids */
4936 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4937 		vport->rss_indirection_tbl[i] = indir[i];
4938 
4939 	/* Update the hardware */
4940 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4941 }
4942 
4943 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4944 {
4945 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4946 
4947 	if (nfc->data & RXH_L4_B_2_3)
4948 		hash_sets |= HCLGE_D_PORT_BIT;
4949 	else
4950 		hash_sets &= ~HCLGE_D_PORT_BIT;
4951 
4952 	if (nfc->data & RXH_IP_SRC)
4953 		hash_sets |= HCLGE_S_IP_BIT;
4954 	else
4955 		hash_sets &= ~HCLGE_S_IP_BIT;
4956 
4957 	if (nfc->data & RXH_IP_DST)
4958 		hash_sets |= HCLGE_D_IP_BIT;
4959 	else
4960 		hash_sets &= ~HCLGE_D_IP_BIT;
4961 
4962 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4963 		hash_sets |= HCLGE_V_TAG_BIT;
4964 
4965 	return hash_sets;
4966 }
4967 
4968 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4969 				    struct ethtool_rxnfc *nfc,
4970 				    struct hclge_rss_input_tuple_cmd *req)
4971 {
4972 	struct hclge_dev *hdev = vport->back;
4973 	u8 tuple_sets;
4974 
4975 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4976 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4977 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4978 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4979 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4980 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4981 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4982 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4983 
4984 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4985 	switch (nfc->flow_type) {
4986 	case TCP_V4_FLOW:
4987 		req->ipv4_tcp_en = tuple_sets;
4988 		break;
4989 	case TCP_V6_FLOW:
4990 		req->ipv6_tcp_en = tuple_sets;
4991 		break;
4992 	case UDP_V4_FLOW:
4993 		req->ipv4_udp_en = tuple_sets;
4994 		break;
4995 	case UDP_V6_FLOW:
4996 		req->ipv6_udp_en = tuple_sets;
4997 		break;
4998 	case SCTP_V4_FLOW:
4999 		req->ipv4_sctp_en = tuple_sets;
5000 		break;
5001 	case SCTP_V6_FLOW:
5002 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
5003 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
5004 			return -EINVAL;
5005 
5006 		req->ipv6_sctp_en = tuple_sets;
5007 		break;
5008 	case IPV4_FLOW:
5009 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5010 		break;
5011 	case IPV6_FLOW:
5012 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5013 		break;
5014 	default:
5015 		return -EINVAL;
5016 	}
5017 
5018 	return 0;
5019 }
5020 
5021 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
5022 			       struct ethtool_rxnfc *nfc)
5023 {
5024 	struct hclge_vport *vport = hclge_get_vport(handle);
5025 	struct hclge_dev *hdev = vport->back;
5026 	struct hclge_rss_input_tuple_cmd *req;
5027 	struct hclge_desc desc;
5028 	int ret;
5029 
5030 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
5031 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
5032 		return -EINVAL;
5033 
5034 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
5035 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
5036 
5037 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
5038 	if (ret) {
5039 		dev_err(&hdev->pdev->dev,
5040 			"failed to init rss tuple cmd, ret = %d\n", ret);
5041 		return ret;
5042 	}
5043 
5044 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5045 	if (ret) {
5046 		dev_err(&hdev->pdev->dev,
5047 			"Set rss tuple fail, status = %d\n", ret);
5048 		return ret;
5049 	}
5050 
5051 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
5052 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
5053 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
5054 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
5055 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
5056 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
5057 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
5058 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
5059 	hclge_get_rss_type(vport);
5060 	return 0;
5061 }
5062 
5063 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
5064 				     u8 *tuple_sets)
5065 {
5066 	switch (flow_type) {
5067 	case TCP_V4_FLOW:
5068 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
5069 		break;
5070 	case UDP_V4_FLOW:
5071 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
5072 		break;
5073 	case TCP_V6_FLOW:
5074 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
5075 		break;
5076 	case UDP_V6_FLOW:
5077 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
5078 		break;
5079 	case SCTP_V4_FLOW:
5080 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
5081 		break;
5082 	case SCTP_V6_FLOW:
5083 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
5084 		break;
5085 	case IPV4_FLOW:
5086 	case IPV6_FLOW:
5087 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
5088 		break;
5089 	default:
5090 		return -EINVAL;
5091 	}
5092 
5093 	return 0;
5094 }
5095 
5096 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
5097 {
5098 	u64 tuple_data = 0;
5099 
5100 	if (tuple_sets & HCLGE_D_PORT_BIT)
5101 		tuple_data |= RXH_L4_B_2_3;
5102 	if (tuple_sets & HCLGE_S_PORT_BIT)
5103 		tuple_data |= RXH_L4_B_0_1;
5104 	if (tuple_sets & HCLGE_D_IP_BIT)
5105 		tuple_data |= RXH_IP_DST;
5106 	if (tuple_sets & HCLGE_S_IP_BIT)
5107 		tuple_data |= RXH_IP_SRC;
5108 
5109 	return tuple_data;
5110 }
5111 
5112 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
5113 			       struct ethtool_rxnfc *nfc)
5114 {
5115 	struct hclge_vport *vport = hclge_get_vport(handle);
5116 	u8 tuple_sets;
5117 	int ret;
5118 
5119 	nfc->data = 0;
5120 
5121 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
5122 	if (ret || !tuple_sets)
5123 		return ret;
5124 
5125 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
5126 
5127 	return 0;
5128 }
5129 
5130 static int hclge_get_tc_size(struct hnae3_handle *handle)
5131 {
5132 	struct hclge_vport *vport = hclge_get_vport(handle);
5133 	struct hclge_dev *hdev = vport->back;
5134 
5135 	return hdev->pf_rss_size_max;
5136 }
5137 
5138 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
5139 {
5140 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5141 	struct hclge_vport *vport = hdev->vport;
5142 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5143 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5144 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5145 	struct hnae3_tc_info *tc_info;
5146 	u16 roundup_size;
5147 	u16 rss_size;
5148 	int i;
5149 
5150 	tc_info = &vport->nic.kinfo.tc_info;
5151 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5152 		rss_size = tc_info->tqp_count[i];
5153 		tc_valid[i] = 0;
5154 
5155 		if (!(hdev->hw_tc_map & BIT(i)))
5156 			continue;
5157 
5158 		/* tc_size set to hardware is the log2 of roundup power of two
5159 		 * of rss_size, the acutal queue size is limited by indirection
5160 		 * table.
5161 		 */
5162 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5163 		    rss_size == 0) {
5164 			dev_err(&hdev->pdev->dev,
5165 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
5166 				rss_size);
5167 			return -EINVAL;
5168 		}
5169 
5170 		roundup_size = roundup_pow_of_two(rss_size);
5171 		roundup_size = ilog2(roundup_size);
5172 
5173 		tc_valid[i] = 1;
5174 		tc_size[i] = roundup_size;
5175 		tc_offset[i] = tc_info->tqp_offset[i];
5176 	}
5177 
5178 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5179 }
5180 
5181 int hclge_rss_init_hw(struct hclge_dev *hdev)
5182 {
5183 	struct hclge_vport *vport = hdev->vport;
5184 	u16 *rss_indir = vport[0].rss_indirection_tbl;
5185 	u8 *key = vport[0].rss_hash_key;
5186 	u8 hfunc = vport[0].rss_algo;
5187 	int ret;
5188 
5189 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
5190 	if (ret)
5191 		return ret;
5192 
5193 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5194 	if (ret)
5195 		return ret;
5196 
5197 	ret = hclge_set_rss_input_tuple(hdev);
5198 	if (ret)
5199 		return ret;
5200 
5201 	return hclge_init_rss_tc_mode(hdev);
5202 }
5203 
5204 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5205 {
5206 	struct hclge_vport *vport = &hdev->vport[0];
5207 	int i;
5208 
5209 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5210 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5211 }
5212 
5213 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5214 {
5215 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5216 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5217 	struct hclge_vport *vport = &hdev->vport[0];
5218 	u16 *rss_ind_tbl;
5219 
5220 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5221 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5222 
5223 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5224 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5225 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5226 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5227 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5228 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5229 	vport->rss_tuple_sets.ipv6_sctp_en =
5230 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5231 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5232 		HCLGE_RSS_INPUT_TUPLE_SCTP;
5233 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5234 
5235 	vport->rss_algo = rss_algo;
5236 
5237 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5238 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
5239 	if (!rss_ind_tbl)
5240 		return -ENOMEM;
5241 
5242 	vport->rss_indirection_tbl = rss_ind_tbl;
5243 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5244 
5245 	hclge_rss_indir_init_cfg(hdev);
5246 
5247 	return 0;
5248 }
5249 
5250 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5251 				int vector_id, bool en,
5252 				struct hnae3_ring_chain_node *ring_chain)
5253 {
5254 	struct hclge_dev *hdev = vport->back;
5255 	struct hnae3_ring_chain_node *node;
5256 	struct hclge_desc desc;
5257 	struct hclge_ctrl_vector_chain_cmd *req =
5258 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5259 	enum hclge_cmd_status status;
5260 	enum hclge_opcode_type op;
5261 	u16 tqp_type_and_id;
5262 	int i;
5263 
5264 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5265 	hclge_cmd_setup_basic_desc(&desc, op, false);
5266 	req->int_vector_id_l = hnae3_get_field(vector_id,
5267 					       HCLGE_VECTOR_ID_L_M,
5268 					       HCLGE_VECTOR_ID_L_S);
5269 	req->int_vector_id_h = hnae3_get_field(vector_id,
5270 					       HCLGE_VECTOR_ID_H_M,
5271 					       HCLGE_VECTOR_ID_H_S);
5272 
5273 	i = 0;
5274 	for (node = ring_chain; node; node = node->next) {
5275 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5276 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5277 				HCLGE_INT_TYPE_S,
5278 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5279 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5280 				HCLGE_TQP_ID_S, node->tqp_index);
5281 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5282 				HCLGE_INT_GL_IDX_S,
5283 				hnae3_get_field(node->int_gl_idx,
5284 						HNAE3_RING_GL_IDX_M,
5285 						HNAE3_RING_GL_IDX_S));
5286 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5287 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5288 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5289 			req->vfid = vport->vport_id;
5290 
5291 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5292 			if (status) {
5293 				dev_err(&hdev->pdev->dev,
5294 					"Map TQP fail, status is %d.\n",
5295 					status);
5296 				return -EIO;
5297 			}
5298 			i = 0;
5299 
5300 			hclge_cmd_setup_basic_desc(&desc,
5301 						   op,
5302 						   false);
5303 			req->int_vector_id_l =
5304 				hnae3_get_field(vector_id,
5305 						HCLGE_VECTOR_ID_L_M,
5306 						HCLGE_VECTOR_ID_L_S);
5307 			req->int_vector_id_h =
5308 				hnae3_get_field(vector_id,
5309 						HCLGE_VECTOR_ID_H_M,
5310 						HCLGE_VECTOR_ID_H_S);
5311 		}
5312 	}
5313 
5314 	if (i > 0) {
5315 		req->int_cause_num = i;
5316 		req->vfid = vport->vport_id;
5317 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5318 		if (status) {
5319 			dev_err(&hdev->pdev->dev,
5320 				"Map TQP fail, status is %d.\n", status);
5321 			return -EIO;
5322 		}
5323 	}
5324 
5325 	return 0;
5326 }
5327 
5328 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5329 				    struct hnae3_ring_chain_node *ring_chain)
5330 {
5331 	struct hclge_vport *vport = hclge_get_vport(handle);
5332 	struct hclge_dev *hdev = vport->back;
5333 	int vector_id;
5334 
5335 	vector_id = hclge_get_vector_index(hdev, vector);
5336 	if (vector_id < 0) {
5337 		dev_err(&hdev->pdev->dev,
5338 			"failed to get vector index. vector=%d\n", vector);
5339 		return vector_id;
5340 	}
5341 
5342 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5343 }
5344 
5345 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5346 				       struct hnae3_ring_chain_node *ring_chain)
5347 {
5348 	struct hclge_vport *vport = hclge_get_vport(handle);
5349 	struct hclge_dev *hdev = vport->back;
5350 	int vector_id, ret;
5351 
5352 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5353 		return 0;
5354 
5355 	vector_id = hclge_get_vector_index(hdev, vector);
5356 	if (vector_id < 0) {
5357 		dev_err(&handle->pdev->dev,
5358 			"Get vector index fail. ret =%d\n", vector_id);
5359 		return vector_id;
5360 	}
5361 
5362 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5363 	if (ret)
5364 		dev_err(&handle->pdev->dev,
5365 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5366 			vector_id, ret);
5367 
5368 	return ret;
5369 }
5370 
5371 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5372 				      bool en_uc, bool en_mc, bool en_bc)
5373 {
5374 	struct hclge_vport *vport = &hdev->vport[vf_id];
5375 	struct hnae3_handle *handle = &vport->nic;
5376 	struct hclge_promisc_cfg_cmd *req;
5377 	struct hclge_desc desc;
5378 	bool uc_tx_en = en_uc;
5379 	u8 promisc_cfg = 0;
5380 	int ret;
5381 
5382 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5383 
5384 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5385 	req->vf_id = vf_id;
5386 
5387 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5388 		uc_tx_en = false;
5389 
5390 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5391 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5392 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5393 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5394 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5395 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5396 	req->extend_promisc = promisc_cfg;
5397 
5398 	/* to be compatible with DEVICE_VERSION_V1/2 */
5399 	promisc_cfg = 0;
5400 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5401 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5402 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5403 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5404 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5405 	req->promisc = promisc_cfg;
5406 
5407 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5408 	if (ret)
5409 		dev_err(&hdev->pdev->dev,
5410 			"failed to set vport %u promisc mode, ret = %d.\n",
5411 			vf_id, ret);
5412 
5413 	return ret;
5414 }
5415 
5416 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5417 				 bool en_mc_pmc, bool en_bc_pmc)
5418 {
5419 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5420 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5421 }
5422 
5423 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5424 				  bool en_mc_pmc)
5425 {
5426 	struct hclge_vport *vport = hclge_get_vport(handle);
5427 	struct hclge_dev *hdev = vport->back;
5428 	bool en_bc_pmc = true;
5429 
5430 	/* For device whose version below V2, if broadcast promisc enabled,
5431 	 * vlan filter is always bypassed. So broadcast promisc should be
5432 	 * disabled until user enable promisc mode
5433 	 */
5434 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5435 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5436 
5437 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5438 					    en_bc_pmc);
5439 }
5440 
5441 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5442 {
5443 	struct hclge_vport *vport = hclge_get_vport(handle);
5444 
5445 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5446 }
5447 
5448 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5449 {
5450 	if (hlist_empty(&hdev->fd_rule_list))
5451 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5452 }
5453 
5454 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5455 {
5456 	if (!test_bit(location, hdev->fd_bmap)) {
5457 		set_bit(location, hdev->fd_bmap);
5458 		hdev->hclge_fd_rule_num++;
5459 	}
5460 }
5461 
5462 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5463 {
5464 	if (test_bit(location, hdev->fd_bmap)) {
5465 		clear_bit(location, hdev->fd_bmap);
5466 		hdev->hclge_fd_rule_num--;
5467 	}
5468 }
5469 
5470 static void hclge_fd_free_node(struct hclge_dev *hdev,
5471 			       struct hclge_fd_rule *rule)
5472 {
5473 	hlist_del(&rule->rule_node);
5474 	kfree(rule);
5475 	hclge_sync_fd_state(hdev);
5476 }
5477 
5478 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5479 				      struct hclge_fd_rule *old_rule,
5480 				      struct hclge_fd_rule *new_rule,
5481 				      enum HCLGE_FD_NODE_STATE state)
5482 {
5483 	switch (state) {
5484 	case HCLGE_FD_TO_ADD:
5485 	case HCLGE_FD_ACTIVE:
5486 		/* 1) if the new state is TO_ADD, just replace the old rule
5487 		 * with the same location, no matter its state, because the
5488 		 * new rule will be configured to the hardware.
5489 		 * 2) if the new state is ACTIVE, it means the new rule
5490 		 * has been configured to the hardware, so just replace
5491 		 * the old rule node with the same location.
5492 		 * 3) for it doesn't add a new node to the list, so it's
5493 		 * unnecessary to update the rule number and fd_bmap.
5494 		 */
5495 		new_rule->rule_node.next = old_rule->rule_node.next;
5496 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5497 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5498 		kfree(new_rule);
5499 		break;
5500 	case HCLGE_FD_DELETED:
5501 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5502 		hclge_fd_free_node(hdev, old_rule);
5503 		break;
5504 	case HCLGE_FD_TO_DEL:
5505 		/* if new request is TO_DEL, and old rule is existent
5506 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5507 		 * because we delete rule by location, other rule content
5508 		 * is unncessary.
5509 		 * 2) the state of old rule is ACTIVE, we need to change its
5510 		 * state to TO_DEL, so the rule will be deleted when periodic
5511 		 * task being scheduled.
5512 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5513 		 * been added to hardware, so we just delete the rule node from
5514 		 * fd_rule_list directly.
5515 		 */
5516 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5517 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5518 			hclge_fd_free_node(hdev, old_rule);
5519 			return;
5520 		}
5521 		old_rule->state = HCLGE_FD_TO_DEL;
5522 		break;
5523 	}
5524 }
5525 
5526 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5527 						u16 location,
5528 						struct hclge_fd_rule **parent)
5529 {
5530 	struct hclge_fd_rule *rule;
5531 	struct hlist_node *node;
5532 
5533 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5534 		if (rule->location == location)
5535 			return rule;
5536 		else if (rule->location > location)
5537 			return NULL;
5538 		/* record the parent node, use to keep the nodes in fd_rule_list
5539 		 * in ascend order.
5540 		 */
5541 		*parent = rule;
5542 	}
5543 
5544 	return NULL;
5545 }
5546 
5547 /* insert fd rule node in ascend order according to rule->location */
5548 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5549 				      struct hclge_fd_rule *rule,
5550 				      struct hclge_fd_rule *parent)
5551 {
5552 	INIT_HLIST_NODE(&rule->rule_node);
5553 
5554 	if (parent)
5555 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5556 	else
5557 		hlist_add_head(&rule->rule_node, hlist);
5558 }
5559 
5560 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5561 				     struct hclge_fd_user_def_cfg *cfg)
5562 {
5563 	struct hclge_fd_user_def_cfg_cmd *req;
5564 	struct hclge_desc desc;
5565 	u16 data = 0;
5566 	int ret;
5567 
5568 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5569 
5570 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5571 
5572 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5573 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5574 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5575 	req->ol2_cfg = cpu_to_le16(data);
5576 
5577 	data = 0;
5578 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5579 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5580 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5581 	req->ol3_cfg = cpu_to_le16(data);
5582 
5583 	data = 0;
5584 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5585 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5586 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5587 	req->ol4_cfg = cpu_to_le16(data);
5588 
5589 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5590 	if (ret)
5591 		dev_err(&hdev->pdev->dev,
5592 			"failed to set fd user def data, ret= %d\n", ret);
5593 	return ret;
5594 }
5595 
5596 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5597 {
5598 	int ret;
5599 
5600 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5601 		return;
5602 
5603 	if (!locked)
5604 		spin_lock_bh(&hdev->fd_rule_lock);
5605 
5606 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5607 	if (ret)
5608 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5609 
5610 	if (!locked)
5611 		spin_unlock_bh(&hdev->fd_rule_lock);
5612 }
5613 
5614 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5615 					  struct hclge_fd_rule *rule)
5616 {
5617 	struct hlist_head *hlist = &hdev->fd_rule_list;
5618 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5619 	struct hclge_fd_user_def_info *info, *old_info;
5620 	struct hclge_fd_user_def_cfg *cfg;
5621 
5622 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5623 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5624 		return 0;
5625 
5626 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5627 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5628 	info = &rule->ep.user_def;
5629 
5630 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5631 		return 0;
5632 
5633 	if (cfg->ref_cnt > 1)
5634 		goto error;
5635 
5636 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5637 	if (fd_rule) {
5638 		old_info = &fd_rule->ep.user_def;
5639 		if (info->layer == old_info->layer)
5640 			return 0;
5641 	}
5642 
5643 error:
5644 	dev_err(&hdev->pdev->dev,
5645 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5646 		info->layer + 1);
5647 	return -ENOSPC;
5648 }
5649 
5650 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5651 					 struct hclge_fd_rule *rule)
5652 {
5653 	struct hclge_fd_user_def_cfg *cfg;
5654 
5655 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5656 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5657 		return;
5658 
5659 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5660 	if (!cfg->ref_cnt) {
5661 		cfg->offset = rule->ep.user_def.offset;
5662 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5663 	}
5664 	cfg->ref_cnt++;
5665 }
5666 
5667 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5668 					 struct hclge_fd_rule *rule)
5669 {
5670 	struct hclge_fd_user_def_cfg *cfg;
5671 
5672 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5673 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5674 		return;
5675 
5676 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5677 	if (!cfg->ref_cnt)
5678 		return;
5679 
5680 	cfg->ref_cnt--;
5681 	if (!cfg->ref_cnt) {
5682 		cfg->offset = 0;
5683 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5684 	}
5685 }
5686 
5687 static void hclge_update_fd_list(struct hclge_dev *hdev,
5688 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5689 				 struct hclge_fd_rule *new_rule)
5690 {
5691 	struct hlist_head *hlist = &hdev->fd_rule_list;
5692 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5693 
5694 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5695 	if (fd_rule) {
5696 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5697 		if (state == HCLGE_FD_ACTIVE)
5698 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5699 		hclge_sync_fd_user_def_cfg(hdev, true);
5700 
5701 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5702 		return;
5703 	}
5704 
5705 	/* it's unlikely to fail here, because we have checked the rule
5706 	 * exist before.
5707 	 */
5708 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5709 		dev_warn(&hdev->pdev->dev,
5710 			 "failed to delete fd rule %u, it's inexistent\n",
5711 			 location);
5712 		return;
5713 	}
5714 
5715 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5716 	hclge_sync_fd_user_def_cfg(hdev, true);
5717 
5718 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5719 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5720 
5721 	if (state == HCLGE_FD_TO_ADD) {
5722 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5723 		hclge_task_schedule(hdev, 0);
5724 	}
5725 }
5726 
5727 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5728 {
5729 	struct hclge_get_fd_mode_cmd *req;
5730 	struct hclge_desc desc;
5731 	int ret;
5732 
5733 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5734 
5735 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5736 
5737 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5738 	if (ret) {
5739 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5740 		return ret;
5741 	}
5742 
5743 	*fd_mode = req->mode;
5744 
5745 	return ret;
5746 }
5747 
5748 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5749 				   u32 *stage1_entry_num,
5750 				   u32 *stage2_entry_num,
5751 				   u16 *stage1_counter_num,
5752 				   u16 *stage2_counter_num)
5753 {
5754 	struct hclge_get_fd_allocation_cmd *req;
5755 	struct hclge_desc desc;
5756 	int ret;
5757 
5758 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5759 
5760 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5761 
5762 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5763 	if (ret) {
5764 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5765 			ret);
5766 		return ret;
5767 	}
5768 
5769 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5770 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5771 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5772 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5773 
5774 	return ret;
5775 }
5776 
5777 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5778 				   enum HCLGE_FD_STAGE stage_num)
5779 {
5780 	struct hclge_set_fd_key_config_cmd *req;
5781 	struct hclge_fd_key_cfg *stage;
5782 	struct hclge_desc desc;
5783 	int ret;
5784 
5785 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5786 
5787 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5788 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5789 	req->stage = stage_num;
5790 	req->key_select = stage->key_sel;
5791 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5792 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5793 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5794 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5795 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5796 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5797 
5798 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5799 	if (ret)
5800 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5801 
5802 	return ret;
5803 }
5804 
5805 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5806 {
5807 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5808 
5809 	spin_lock_bh(&hdev->fd_rule_lock);
5810 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5811 	spin_unlock_bh(&hdev->fd_rule_lock);
5812 
5813 	hclge_fd_set_user_def_cmd(hdev, cfg);
5814 }
5815 
5816 static int hclge_init_fd_config(struct hclge_dev *hdev)
5817 {
5818 #define LOW_2_WORDS		0x03
5819 	struct hclge_fd_key_cfg *key_cfg;
5820 	int ret;
5821 
5822 	if (!hnae3_dev_fd_supported(hdev))
5823 		return 0;
5824 
5825 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5826 	if (ret)
5827 		return ret;
5828 
5829 	switch (hdev->fd_cfg.fd_mode) {
5830 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5831 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5832 		break;
5833 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5834 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5835 		break;
5836 	default:
5837 		dev_err(&hdev->pdev->dev,
5838 			"Unsupported flow director mode %u\n",
5839 			hdev->fd_cfg.fd_mode);
5840 		return -EOPNOTSUPP;
5841 	}
5842 
5843 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5844 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5845 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5846 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5847 	key_cfg->outer_sipv6_word_en = 0;
5848 	key_cfg->outer_dipv6_word_en = 0;
5849 
5850 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5851 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5852 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5853 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5854 
5855 	/* If use max 400bit key, we can support tuples for ether type */
5856 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5857 		key_cfg->tuple_active |=
5858 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5859 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5860 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5861 	}
5862 
5863 	/* roce_type is used to filter roce frames
5864 	 * dst_vport is used to specify the rule
5865 	 */
5866 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5867 
5868 	ret = hclge_get_fd_allocation(hdev,
5869 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5870 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5871 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5872 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5873 	if (ret)
5874 		return ret;
5875 
5876 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5877 }
5878 
5879 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5880 				int loc, u8 *key, bool is_add)
5881 {
5882 	struct hclge_fd_tcam_config_1_cmd *req1;
5883 	struct hclge_fd_tcam_config_2_cmd *req2;
5884 	struct hclge_fd_tcam_config_3_cmd *req3;
5885 	struct hclge_desc desc[3];
5886 	int ret;
5887 
5888 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5889 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5890 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5891 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5892 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5893 
5894 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5895 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5896 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5897 
5898 	req1->stage = stage;
5899 	req1->xy_sel = sel_x ? 1 : 0;
5900 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5901 	req1->index = cpu_to_le32(loc);
5902 	req1->entry_vld = sel_x ? is_add : 0;
5903 
5904 	if (key) {
5905 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5906 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5907 		       sizeof(req2->tcam_data));
5908 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5909 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5910 	}
5911 
5912 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5913 	if (ret)
5914 		dev_err(&hdev->pdev->dev,
5915 			"config tcam key fail, ret=%d\n",
5916 			ret);
5917 
5918 	return ret;
5919 }
5920 
5921 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5922 			      struct hclge_fd_ad_data *action)
5923 {
5924 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5925 	struct hclge_fd_ad_config_cmd *req;
5926 	struct hclge_desc desc;
5927 	u64 ad_data = 0;
5928 	int ret;
5929 
5930 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5931 
5932 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5933 	req->index = cpu_to_le32(loc);
5934 	req->stage = stage;
5935 
5936 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5937 		      action->write_rule_id_to_bd);
5938 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5939 			action->rule_id);
5940 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5941 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5942 			      action->override_tc);
5943 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5944 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5945 	}
5946 	ad_data <<= 32;
5947 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5948 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5949 		      action->forward_to_direct_queue);
5950 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5951 			action->queue_id);
5952 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5953 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5954 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5955 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5956 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5957 			action->counter_id);
5958 
5959 	req->ad_data = cpu_to_le64(ad_data);
5960 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5961 	if (ret)
5962 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5963 
5964 	return ret;
5965 }
5966 
5967 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5968 				   struct hclge_fd_rule *rule)
5969 {
5970 	int offset, moffset, ip_offset;
5971 	enum HCLGE_FD_KEY_OPT key_opt;
5972 	u16 tmp_x_s, tmp_y_s;
5973 	u32 tmp_x_l, tmp_y_l;
5974 	u8 *p = (u8 *)rule;
5975 	int i;
5976 
5977 	if (rule->unused_tuple & BIT(tuple_bit))
5978 		return true;
5979 
5980 	key_opt = tuple_key_info[tuple_bit].key_opt;
5981 	offset = tuple_key_info[tuple_bit].offset;
5982 	moffset = tuple_key_info[tuple_bit].moffset;
5983 
5984 	switch (key_opt) {
5985 	case KEY_OPT_U8:
5986 		calc_x(*key_x, p[offset], p[moffset]);
5987 		calc_y(*key_y, p[offset], p[moffset]);
5988 
5989 		return true;
5990 	case KEY_OPT_LE16:
5991 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5992 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5993 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5994 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5995 
5996 		return true;
5997 	case KEY_OPT_LE32:
5998 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5999 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
6000 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
6001 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
6002 
6003 		return true;
6004 	case KEY_OPT_MAC:
6005 		for (i = 0; i < ETH_ALEN; i++) {
6006 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
6007 			       p[moffset + i]);
6008 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
6009 			       p[moffset + i]);
6010 		}
6011 
6012 		return true;
6013 	case KEY_OPT_IP:
6014 		ip_offset = IPV4_INDEX * sizeof(u32);
6015 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
6016 		       *(u32 *)(&p[moffset + ip_offset]));
6017 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
6018 		       *(u32 *)(&p[moffset + ip_offset]));
6019 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
6020 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
6021 
6022 		return true;
6023 	default:
6024 		return false;
6025 	}
6026 }
6027 
6028 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
6029 				 u8 vf_id, u8 network_port_id)
6030 {
6031 	u32 port_number = 0;
6032 
6033 	if (port_type == HOST_PORT) {
6034 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
6035 				pf_id);
6036 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
6037 				vf_id);
6038 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
6039 	} else {
6040 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
6041 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
6042 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
6043 	}
6044 
6045 	return port_number;
6046 }
6047 
6048 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
6049 				       __le32 *key_x, __le32 *key_y,
6050 				       struct hclge_fd_rule *rule)
6051 {
6052 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
6053 	u8 cur_pos = 0, tuple_size, shift_bits;
6054 	unsigned int i;
6055 
6056 	for (i = 0; i < MAX_META_DATA; i++) {
6057 		tuple_size = meta_data_key_info[i].key_length;
6058 		tuple_bit = key_cfg->meta_data_active & BIT(i);
6059 
6060 		switch (tuple_bit) {
6061 		case BIT(ROCE_TYPE):
6062 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
6063 			cur_pos += tuple_size;
6064 			break;
6065 		case BIT(DST_VPORT):
6066 			port_number = hclge_get_port_number(HOST_PORT, 0,
6067 							    rule->vf_id, 0);
6068 			hnae3_set_field(meta_data,
6069 					GENMASK(cur_pos + tuple_size, cur_pos),
6070 					cur_pos, port_number);
6071 			cur_pos += tuple_size;
6072 			break;
6073 		default:
6074 			break;
6075 		}
6076 	}
6077 
6078 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
6079 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
6080 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
6081 
6082 	*key_x = cpu_to_le32(tmp_x << shift_bits);
6083 	*key_y = cpu_to_le32(tmp_y << shift_bits);
6084 }
6085 
6086 /* A complete key is combined with meta data key and tuple key.
6087  * Meta data key is stored at the MSB region, and tuple key is stored at
6088  * the LSB region, unused bits will be filled 0.
6089  */
6090 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
6091 			    struct hclge_fd_rule *rule)
6092 {
6093 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
6094 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
6095 	u8 *cur_key_x, *cur_key_y;
6096 	u8 meta_data_region;
6097 	u8 tuple_size;
6098 	int ret;
6099 	u32 i;
6100 
6101 	memset(key_x, 0, sizeof(key_x));
6102 	memset(key_y, 0, sizeof(key_y));
6103 	cur_key_x = key_x;
6104 	cur_key_y = key_y;
6105 
6106 	for (i = 0; i < MAX_TUPLE; i++) {
6107 		bool tuple_valid;
6108 
6109 		tuple_size = tuple_key_info[i].key_length / 8;
6110 		if (!(key_cfg->tuple_active & BIT(i)))
6111 			continue;
6112 
6113 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
6114 						     cur_key_y, rule);
6115 		if (tuple_valid) {
6116 			cur_key_x += tuple_size;
6117 			cur_key_y += tuple_size;
6118 		}
6119 	}
6120 
6121 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
6122 			MAX_META_DATA_LENGTH / 8;
6123 
6124 	hclge_fd_convert_meta_data(key_cfg,
6125 				   (__le32 *)(key_x + meta_data_region),
6126 				   (__le32 *)(key_y + meta_data_region),
6127 				   rule);
6128 
6129 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
6130 				   true);
6131 	if (ret) {
6132 		dev_err(&hdev->pdev->dev,
6133 			"fd key_y config fail, loc=%u, ret=%d\n",
6134 			rule->queue_id, ret);
6135 		return ret;
6136 	}
6137 
6138 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
6139 				   true);
6140 	if (ret)
6141 		dev_err(&hdev->pdev->dev,
6142 			"fd key_x config fail, loc=%u, ret=%d\n",
6143 			rule->queue_id, ret);
6144 	return ret;
6145 }
6146 
6147 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6148 			       struct hclge_fd_rule *rule)
6149 {
6150 	struct hclge_vport *vport = hdev->vport;
6151 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6152 	struct hclge_fd_ad_data ad_data;
6153 
6154 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6155 	ad_data.ad_id = rule->location;
6156 
6157 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6158 		ad_data.drop_packet = true;
6159 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6160 		ad_data.override_tc = true;
6161 		ad_data.queue_id =
6162 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6163 		ad_data.tc_size =
6164 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6165 	} else {
6166 		ad_data.forward_to_direct_queue = true;
6167 		ad_data.queue_id = rule->queue_id;
6168 	}
6169 
6170 	if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6171 		ad_data.use_counter = true;
6172 		ad_data.counter_id = rule->vf_id %
6173 				     hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6174 	} else {
6175 		ad_data.use_counter = false;
6176 		ad_data.counter_id = 0;
6177 	}
6178 
6179 	ad_data.use_next_stage = false;
6180 	ad_data.next_input_key = 0;
6181 
6182 	ad_data.write_rule_id_to_bd = true;
6183 	ad_data.rule_id = rule->location;
6184 
6185 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6186 }
6187 
6188 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6189 				       u32 *unused_tuple)
6190 {
6191 	if (!spec || !unused_tuple)
6192 		return -EINVAL;
6193 
6194 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6195 
6196 	if (!spec->ip4src)
6197 		*unused_tuple |= BIT(INNER_SRC_IP);
6198 
6199 	if (!spec->ip4dst)
6200 		*unused_tuple |= BIT(INNER_DST_IP);
6201 
6202 	if (!spec->psrc)
6203 		*unused_tuple |= BIT(INNER_SRC_PORT);
6204 
6205 	if (!spec->pdst)
6206 		*unused_tuple |= BIT(INNER_DST_PORT);
6207 
6208 	if (!spec->tos)
6209 		*unused_tuple |= BIT(INNER_IP_TOS);
6210 
6211 	return 0;
6212 }
6213 
6214 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6215 				    u32 *unused_tuple)
6216 {
6217 	if (!spec || !unused_tuple)
6218 		return -EINVAL;
6219 
6220 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6221 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6222 
6223 	if (!spec->ip4src)
6224 		*unused_tuple |= BIT(INNER_SRC_IP);
6225 
6226 	if (!spec->ip4dst)
6227 		*unused_tuple |= BIT(INNER_DST_IP);
6228 
6229 	if (!spec->tos)
6230 		*unused_tuple |= BIT(INNER_IP_TOS);
6231 
6232 	if (!spec->proto)
6233 		*unused_tuple |= BIT(INNER_IP_PROTO);
6234 
6235 	if (spec->l4_4_bytes)
6236 		return -EOPNOTSUPP;
6237 
6238 	if (spec->ip_ver != ETH_RX_NFC_IP4)
6239 		return -EOPNOTSUPP;
6240 
6241 	return 0;
6242 }
6243 
6244 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6245 				       u32 *unused_tuple)
6246 {
6247 	if (!spec || !unused_tuple)
6248 		return -EINVAL;
6249 
6250 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6251 
6252 	/* check whether src/dst ip address used */
6253 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6254 		*unused_tuple |= BIT(INNER_SRC_IP);
6255 
6256 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6257 		*unused_tuple |= BIT(INNER_DST_IP);
6258 
6259 	if (!spec->psrc)
6260 		*unused_tuple |= BIT(INNER_SRC_PORT);
6261 
6262 	if (!spec->pdst)
6263 		*unused_tuple |= BIT(INNER_DST_PORT);
6264 
6265 	if (!spec->tclass)
6266 		*unused_tuple |= BIT(INNER_IP_TOS);
6267 
6268 	return 0;
6269 }
6270 
6271 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6272 				    u32 *unused_tuple)
6273 {
6274 	if (!spec || !unused_tuple)
6275 		return -EINVAL;
6276 
6277 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6278 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6279 
6280 	/* check whether src/dst ip address used */
6281 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6282 		*unused_tuple |= BIT(INNER_SRC_IP);
6283 
6284 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6285 		*unused_tuple |= BIT(INNER_DST_IP);
6286 
6287 	if (!spec->l4_proto)
6288 		*unused_tuple |= BIT(INNER_IP_PROTO);
6289 
6290 	if (!spec->tclass)
6291 		*unused_tuple |= BIT(INNER_IP_TOS);
6292 
6293 	if (spec->l4_4_bytes)
6294 		return -EOPNOTSUPP;
6295 
6296 	return 0;
6297 }
6298 
6299 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6300 {
6301 	if (!spec || !unused_tuple)
6302 		return -EINVAL;
6303 
6304 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6305 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6306 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6307 
6308 	if (is_zero_ether_addr(spec->h_source))
6309 		*unused_tuple |= BIT(INNER_SRC_MAC);
6310 
6311 	if (is_zero_ether_addr(spec->h_dest))
6312 		*unused_tuple |= BIT(INNER_DST_MAC);
6313 
6314 	if (!spec->h_proto)
6315 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6316 
6317 	return 0;
6318 }
6319 
6320 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6321 				    struct ethtool_rx_flow_spec *fs,
6322 				    u32 *unused_tuple)
6323 {
6324 	if (fs->flow_type & FLOW_EXT) {
6325 		if (fs->h_ext.vlan_etype) {
6326 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6327 			return -EOPNOTSUPP;
6328 		}
6329 
6330 		if (!fs->h_ext.vlan_tci)
6331 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6332 
6333 		if (fs->m_ext.vlan_tci &&
6334 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6335 			dev_err(&hdev->pdev->dev,
6336 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6337 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6338 			return -EINVAL;
6339 		}
6340 	} else {
6341 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6342 	}
6343 
6344 	if (fs->flow_type & FLOW_MAC_EXT) {
6345 		if (hdev->fd_cfg.fd_mode !=
6346 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6347 			dev_err(&hdev->pdev->dev,
6348 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6349 			return -EOPNOTSUPP;
6350 		}
6351 
6352 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6353 			*unused_tuple |= BIT(INNER_DST_MAC);
6354 		else
6355 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6356 	}
6357 
6358 	return 0;
6359 }
6360 
6361 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6362 				       struct hclge_fd_user_def_info *info)
6363 {
6364 	switch (flow_type) {
6365 	case ETHER_FLOW:
6366 		info->layer = HCLGE_FD_USER_DEF_L2;
6367 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6368 		break;
6369 	case IP_USER_FLOW:
6370 	case IPV6_USER_FLOW:
6371 		info->layer = HCLGE_FD_USER_DEF_L3;
6372 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6373 		break;
6374 	case TCP_V4_FLOW:
6375 	case UDP_V4_FLOW:
6376 	case TCP_V6_FLOW:
6377 	case UDP_V6_FLOW:
6378 		info->layer = HCLGE_FD_USER_DEF_L4;
6379 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6380 		break;
6381 	default:
6382 		return -EOPNOTSUPP;
6383 	}
6384 
6385 	return 0;
6386 }
6387 
6388 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6389 {
6390 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6391 }
6392 
6393 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6394 					 struct ethtool_rx_flow_spec *fs,
6395 					 u32 *unused_tuple,
6396 					 struct hclge_fd_user_def_info *info)
6397 {
6398 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6399 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6400 	u16 data, offset, data_mask, offset_mask;
6401 	int ret;
6402 
6403 	info->layer = HCLGE_FD_USER_DEF_NONE;
6404 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6405 
6406 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6407 		return 0;
6408 
6409 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6410 	 * for data, and bit32~47 is used for offset.
6411 	 */
6412 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6413 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6414 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6415 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6416 
6417 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6418 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6419 		return -EOPNOTSUPP;
6420 	}
6421 
6422 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6423 		dev_err(&hdev->pdev->dev,
6424 			"user-def offset[%u] should be no more than %u\n",
6425 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6426 		return -EINVAL;
6427 	}
6428 
6429 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6430 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6431 		return -EINVAL;
6432 	}
6433 
6434 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6435 	if (ret) {
6436 		dev_err(&hdev->pdev->dev,
6437 			"unsupported flow type for user-def bytes, ret = %d\n",
6438 			ret);
6439 		return ret;
6440 	}
6441 
6442 	info->data = data;
6443 	info->data_mask = data_mask;
6444 	info->offset = offset;
6445 
6446 	return 0;
6447 }
6448 
6449 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6450 			       struct ethtool_rx_flow_spec *fs,
6451 			       u32 *unused_tuple,
6452 			       struct hclge_fd_user_def_info *info)
6453 {
6454 	u32 flow_type;
6455 	int ret;
6456 
6457 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6458 		dev_err(&hdev->pdev->dev,
6459 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6460 			fs->location,
6461 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6462 		return -EINVAL;
6463 	}
6464 
6465 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6466 	if (ret)
6467 		return ret;
6468 
6469 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6470 	switch (flow_type) {
6471 	case SCTP_V4_FLOW:
6472 	case TCP_V4_FLOW:
6473 	case UDP_V4_FLOW:
6474 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6475 						  unused_tuple);
6476 		break;
6477 	case IP_USER_FLOW:
6478 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6479 					       unused_tuple);
6480 		break;
6481 	case SCTP_V6_FLOW:
6482 	case TCP_V6_FLOW:
6483 	case UDP_V6_FLOW:
6484 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6485 						  unused_tuple);
6486 		break;
6487 	case IPV6_USER_FLOW:
6488 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6489 					       unused_tuple);
6490 		break;
6491 	case ETHER_FLOW:
6492 		if (hdev->fd_cfg.fd_mode !=
6493 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6494 			dev_err(&hdev->pdev->dev,
6495 				"ETHER_FLOW is not supported in current fd mode!\n");
6496 			return -EOPNOTSUPP;
6497 		}
6498 
6499 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6500 						 unused_tuple);
6501 		break;
6502 	default:
6503 		dev_err(&hdev->pdev->dev,
6504 			"unsupported protocol type, protocol type = %#x\n",
6505 			flow_type);
6506 		return -EOPNOTSUPP;
6507 	}
6508 
6509 	if (ret) {
6510 		dev_err(&hdev->pdev->dev,
6511 			"failed to check flow union tuple, ret = %d\n",
6512 			ret);
6513 		return ret;
6514 	}
6515 
6516 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6517 }
6518 
6519 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6520 				      struct ethtool_rx_flow_spec *fs,
6521 				      struct hclge_fd_rule *rule, u8 ip_proto)
6522 {
6523 	rule->tuples.src_ip[IPV4_INDEX] =
6524 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6525 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6526 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6527 
6528 	rule->tuples.dst_ip[IPV4_INDEX] =
6529 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6530 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6531 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6532 
6533 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6534 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6535 
6536 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6537 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6538 
6539 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6540 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6541 
6542 	rule->tuples.ether_proto = ETH_P_IP;
6543 	rule->tuples_mask.ether_proto = 0xFFFF;
6544 
6545 	rule->tuples.ip_proto = ip_proto;
6546 	rule->tuples_mask.ip_proto = 0xFF;
6547 }
6548 
6549 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6550 				   struct ethtool_rx_flow_spec *fs,
6551 				   struct hclge_fd_rule *rule)
6552 {
6553 	rule->tuples.src_ip[IPV4_INDEX] =
6554 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6555 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6556 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6557 
6558 	rule->tuples.dst_ip[IPV4_INDEX] =
6559 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6560 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6561 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6562 
6563 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6564 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6565 
6566 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6567 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6568 
6569 	rule->tuples.ether_proto = ETH_P_IP;
6570 	rule->tuples_mask.ether_proto = 0xFFFF;
6571 }
6572 
6573 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6574 				      struct ethtool_rx_flow_spec *fs,
6575 				      struct hclge_fd_rule *rule, u8 ip_proto)
6576 {
6577 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6578 			  IPV6_SIZE);
6579 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6580 			  IPV6_SIZE);
6581 
6582 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6583 			  IPV6_SIZE);
6584 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6585 			  IPV6_SIZE);
6586 
6587 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6588 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6589 
6590 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6591 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6592 
6593 	rule->tuples.ether_proto = ETH_P_IPV6;
6594 	rule->tuples_mask.ether_proto = 0xFFFF;
6595 
6596 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6597 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6598 
6599 	rule->tuples.ip_proto = ip_proto;
6600 	rule->tuples_mask.ip_proto = 0xFF;
6601 }
6602 
6603 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6604 				   struct ethtool_rx_flow_spec *fs,
6605 				   struct hclge_fd_rule *rule)
6606 {
6607 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6608 			  IPV6_SIZE);
6609 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6610 			  IPV6_SIZE);
6611 
6612 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6613 			  IPV6_SIZE);
6614 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6615 			  IPV6_SIZE);
6616 
6617 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6618 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6619 
6620 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6621 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6622 
6623 	rule->tuples.ether_proto = ETH_P_IPV6;
6624 	rule->tuples_mask.ether_proto = 0xFFFF;
6625 }
6626 
6627 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6628 				     struct ethtool_rx_flow_spec *fs,
6629 				     struct hclge_fd_rule *rule)
6630 {
6631 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6632 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6633 
6634 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6635 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6636 
6637 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6638 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6639 }
6640 
6641 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6642 					struct hclge_fd_rule *rule)
6643 {
6644 	switch (info->layer) {
6645 	case HCLGE_FD_USER_DEF_L2:
6646 		rule->tuples.l2_user_def = info->data;
6647 		rule->tuples_mask.l2_user_def = info->data_mask;
6648 		break;
6649 	case HCLGE_FD_USER_DEF_L3:
6650 		rule->tuples.l3_user_def = info->data;
6651 		rule->tuples_mask.l3_user_def = info->data_mask;
6652 		break;
6653 	case HCLGE_FD_USER_DEF_L4:
6654 		rule->tuples.l4_user_def = (u32)info->data << 16;
6655 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6656 		break;
6657 	default:
6658 		break;
6659 	}
6660 
6661 	rule->ep.user_def = *info;
6662 }
6663 
6664 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6665 			      struct ethtool_rx_flow_spec *fs,
6666 			      struct hclge_fd_rule *rule,
6667 			      struct hclge_fd_user_def_info *info)
6668 {
6669 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6670 
6671 	switch (flow_type) {
6672 	case SCTP_V4_FLOW:
6673 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6674 		break;
6675 	case TCP_V4_FLOW:
6676 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6677 		break;
6678 	case UDP_V4_FLOW:
6679 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6680 		break;
6681 	case IP_USER_FLOW:
6682 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6683 		break;
6684 	case SCTP_V6_FLOW:
6685 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6686 		break;
6687 	case TCP_V6_FLOW:
6688 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6689 		break;
6690 	case UDP_V6_FLOW:
6691 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6692 		break;
6693 	case IPV6_USER_FLOW:
6694 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6695 		break;
6696 	case ETHER_FLOW:
6697 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6698 		break;
6699 	default:
6700 		return -EOPNOTSUPP;
6701 	}
6702 
6703 	if (fs->flow_type & FLOW_EXT) {
6704 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6705 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6706 		hclge_fd_get_user_def_tuple(info, rule);
6707 	}
6708 
6709 	if (fs->flow_type & FLOW_MAC_EXT) {
6710 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6711 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6712 	}
6713 
6714 	return 0;
6715 }
6716 
6717 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6718 				struct hclge_fd_rule *rule)
6719 {
6720 	int ret;
6721 
6722 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6723 	if (ret)
6724 		return ret;
6725 
6726 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6727 }
6728 
6729 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6730 				     struct hclge_fd_rule *rule)
6731 {
6732 	int ret;
6733 
6734 	spin_lock_bh(&hdev->fd_rule_lock);
6735 
6736 	if (hdev->fd_active_type != rule->rule_type &&
6737 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6738 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6739 		dev_err(&hdev->pdev->dev,
6740 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6741 			rule->rule_type, hdev->fd_active_type);
6742 		spin_unlock_bh(&hdev->fd_rule_lock);
6743 		return -EINVAL;
6744 	}
6745 
6746 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6747 	if (ret)
6748 		goto out;
6749 
6750 	ret = hclge_clear_arfs_rules(hdev);
6751 	if (ret)
6752 		goto out;
6753 
6754 	ret = hclge_fd_config_rule(hdev, rule);
6755 	if (ret)
6756 		goto out;
6757 
6758 	rule->state = HCLGE_FD_ACTIVE;
6759 	hdev->fd_active_type = rule->rule_type;
6760 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6761 
6762 out:
6763 	spin_unlock_bh(&hdev->fd_rule_lock);
6764 	return ret;
6765 }
6766 
6767 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6768 {
6769 	struct hclge_vport *vport = hclge_get_vport(handle);
6770 	struct hclge_dev *hdev = vport->back;
6771 
6772 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6773 }
6774 
6775 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6776 				      u16 *vport_id, u8 *action, u16 *queue_id)
6777 {
6778 	struct hclge_vport *vport = hdev->vport;
6779 
6780 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6781 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6782 	} else {
6783 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6784 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6785 		u16 tqps;
6786 
6787 		/* To keep consistent with user's configuration, minus 1 when
6788 		 * printing 'vf', because vf id from ethtool is added 1 for vf.
6789 		 */
6790 		if (vf > hdev->num_req_vfs) {
6791 			dev_err(&hdev->pdev->dev,
6792 				"Error: vf id (%u) should be less than %u\n",
6793 				vf - 1, hdev->num_req_vfs);
6794 			return -EINVAL;
6795 		}
6796 
6797 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6798 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6799 
6800 		if (ring >= tqps) {
6801 			dev_err(&hdev->pdev->dev,
6802 				"Error: queue id (%u) > max tqp num (%u)\n",
6803 				ring, tqps - 1);
6804 			return -EINVAL;
6805 		}
6806 
6807 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6808 		*queue_id = ring;
6809 	}
6810 
6811 	return 0;
6812 }
6813 
6814 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6815 			      struct ethtool_rxnfc *cmd)
6816 {
6817 	struct hclge_vport *vport = hclge_get_vport(handle);
6818 	struct hclge_dev *hdev = vport->back;
6819 	struct hclge_fd_user_def_info info;
6820 	u16 dst_vport_id = 0, q_index = 0;
6821 	struct ethtool_rx_flow_spec *fs;
6822 	struct hclge_fd_rule *rule;
6823 	u32 unused = 0;
6824 	u8 action;
6825 	int ret;
6826 
6827 	if (!hnae3_dev_fd_supported(hdev)) {
6828 		dev_err(&hdev->pdev->dev,
6829 			"flow table director is not supported\n");
6830 		return -EOPNOTSUPP;
6831 	}
6832 
6833 	if (!hdev->fd_en) {
6834 		dev_err(&hdev->pdev->dev,
6835 			"please enable flow director first\n");
6836 		return -EOPNOTSUPP;
6837 	}
6838 
6839 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6840 
6841 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6842 	if (ret)
6843 		return ret;
6844 
6845 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6846 					 &action, &q_index);
6847 	if (ret)
6848 		return ret;
6849 
6850 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6851 	if (!rule)
6852 		return -ENOMEM;
6853 
6854 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6855 	if (ret) {
6856 		kfree(rule);
6857 		return ret;
6858 	}
6859 
6860 	rule->flow_type = fs->flow_type;
6861 	rule->location = fs->location;
6862 	rule->unused_tuple = unused;
6863 	rule->vf_id = dst_vport_id;
6864 	rule->queue_id = q_index;
6865 	rule->action = action;
6866 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6867 
6868 	ret = hclge_add_fd_entry_common(hdev, rule);
6869 	if (ret)
6870 		kfree(rule);
6871 
6872 	return ret;
6873 }
6874 
6875 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6876 			      struct ethtool_rxnfc *cmd)
6877 {
6878 	struct hclge_vport *vport = hclge_get_vport(handle);
6879 	struct hclge_dev *hdev = vport->back;
6880 	struct ethtool_rx_flow_spec *fs;
6881 	int ret;
6882 
6883 	if (!hnae3_dev_fd_supported(hdev))
6884 		return -EOPNOTSUPP;
6885 
6886 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6887 
6888 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6889 		return -EINVAL;
6890 
6891 	spin_lock_bh(&hdev->fd_rule_lock);
6892 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6893 	    !test_bit(fs->location, hdev->fd_bmap)) {
6894 		dev_err(&hdev->pdev->dev,
6895 			"Delete fail, rule %u is inexistent\n", fs->location);
6896 		spin_unlock_bh(&hdev->fd_rule_lock);
6897 		return -ENOENT;
6898 	}
6899 
6900 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6901 				   NULL, false);
6902 	if (ret)
6903 		goto out;
6904 
6905 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6906 
6907 out:
6908 	spin_unlock_bh(&hdev->fd_rule_lock);
6909 	return ret;
6910 }
6911 
6912 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6913 					 bool clear_list)
6914 {
6915 	struct hclge_fd_rule *rule;
6916 	struct hlist_node *node;
6917 	u16 location;
6918 
6919 	if (!hnae3_dev_fd_supported(hdev))
6920 		return;
6921 
6922 	spin_lock_bh(&hdev->fd_rule_lock);
6923 
6924 	for_each_set_bit(location, hdev->fd_bmap,
6925 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6926 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6927 				     NULL, false);
6928 
6929 	if (clear_list) {
6930 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6931 					  rule_node) {
6932 			hlist_del(&rule->rule_node);
6933 			kfree(rule);
6934 		}
6935 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6936 		hdev->hclge_fd_rule_num = 0;
6937 		bitmap_zero(hdev->fd_bmap,
6938 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6939 	}
6940 
6941 	spin_unlock_bh(&hdev->fd_rule_lock);
6942 }
6943 
6944 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6945 {
6946 	hclge_clear_fd_rules_in_list(hdev, true);
6947 	hclge_fd_disable_user_def(hdev);
6948 }
6949 
6950 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6951 {
6952 	struct hclge_vport *vport = hclge_get_vport(handle);
6953 	struct hclge_dev *hdev = vport->back;
6954 	struct hclge_fd_rule *rule;
6955 	struct hlist_node *node;
6956 
6957 	/* Return ok here, because reset error handling will check this
6958 	 * return value. If error is returned here, the reset process will
6959 	 * fail.
6960 	 */
6961 	if (!hnae3_dev_fd_supported(hdev))
6962 		return 0;
6963 
6964 	/* if fd is disabled, should not restore it when reset */
6965 	if (!hdev->fd_en)
6966 		return 0;
6967 
6968 	spin_lock_bh(&hdev->fd_rule_lock);
6969 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6970 		if (rule->state == HCLGE_FD_ACTIVE)
6971 			rule->state = HCLGE_FD_TO_ADD;
6972 	}
6973 	spin_unlock_bh(&hdev->fd_rule_lock);
6974 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6975 
6976 	return 0;
6977 }
6978 
6979 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6980 				 struct ethtool_rxnfc *cmd)
6981 {
6982 	struct hclge_vport *vport = hclge_get_vport(handle);
6983 	struct hclge_dev *hdev = vport->back;
6984 
6985 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6986 		return -EOPNOTSUPP;
6987 
6988 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6989 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6990 
6991 	return 0;
6992 }
6993 
6994 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6995 				     struct ethtool_tcpip4_spec *spec,
6996 				     struct ethtool_tcpip4_spec *spec_mask)
6997 {
6998 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6999 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
7000 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
7001 
7002 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
7003 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
7004 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
7005 
7006 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
7007 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
7008 			0 : cpu_to_be16(rule->tuples_mask.src_port);
7009 
7010 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
7011 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
7012 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
7013 
7014 	spec->tos = rule->tuples.ip_tos;
7015 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7016 			0 : rule->tuples_mask.ip_tos;
7017 }
7018 
7019 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
7020 				  struct ethtool_usrip4_spec *spec,
7021 				  struct ethtool_usrip4_spec *spec_mask)
7022 {
7023 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
7024 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
7025 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
7026 
7027 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
7028 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
7029 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
7030 
7031 	spec->tos = rule->tuples.ip_tos;
7032 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7033 			0 : rule->tuples_mask.ip_tos;
7034 
7035 	spec->proto = rule->tuples.ip_proto;
7036 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
7037 			0 : rule->tuples_mask.ip_proto;
7038 
7039 	spec->ip_ver = ETH_RX_NFC_IP4;
7040 }
7041 
7042 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
7043 				     struct ethtool_tcpip6_spec *spec,
7044 				     struct ethtool_tcpip6_spec *spec_mask)
7045 {
7046 	cpu_to_be32_array(spec->ip6src,
7047 			  rule->tuples.src_ip, IPV6_SIZE);
7048 	cpu_to_be32_array(spec->ip6dst,
7049 			  rule->tuples.dst_ip, IPV6_SIZE);
7050 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
7051 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
7052 	else
7053 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
7054 				  IPV6_SIZE);
7055 
7056 	if (rule->unused_tuple & BIT(INNER_DST_IP))
7057 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
7058 	else
7059 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
7060 				  IPV6_SIZE);
7061 
7062 	spec->tclass = rule->tuples.ip_tos;
7063 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7064 			0 : rule->tuples_mask.ip_tos;
7065 
7066 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
7067 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
7068 			0 : cpu_to_be16(rule->tuples_mask.src_port);
7069 
7070 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
7071 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
7072 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
7073 }
7074 
7075 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
7076 				  struct ethtool_usrip6_spec *spec,
7077 				  struct ethtool_usrip6_spec *spec_mask)
7078 {
7079 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
7080 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
7081 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
7082 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
7083 	else
7084 		cpu_to_be32_array(spec_mask->ip6src,
7085 				  rule->tuples_mask.src_ip, IPV6_SIZE);
7086 
7087 	if (rule->unused_tuple & BIT(INNER_DST_IP))
7088 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
7089 	else
7090 		cpu_to_be32_array(spec_mask->ip6dst,
7091 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
7092 
7093 	spec->tclass = rule->tuples.ip_tos;
7094 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7095 			0 : rule->tuples_mask.ip_tos;
7096 
7097 	spec->l4_proto = rule->tuples.ip_proto;
7098 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
7099 			0 : rule->tuples_mask.ip_proto;
7100 }
7101 
7102 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
7103 				    struct ethhdr *spec,
7104 				    struct ethhdr *spec_mask)
7105 {
7106 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
7107 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
7108 
7109 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
7110 		eth_zero_addr(spec_mask->h_source);
7111 	else
7112 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
7113 
7114 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
7115 		eth_zero_addr(spec_mask->h_dest);
7116 	else
7117 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
7118 
7119 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
7120 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
7121 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
7122 }
7123 
7124 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
7125 				       struct hclge_fd_rule *rule)
7126 {
7127 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
7128 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
7129 		fs->h_ext.data[0] = 0;
7130 		fs->h_ext.data[1] = 0;
7131 		fs->m_ext.data[0] = 0;
7132 		fs->m_ext.data[1] = 0;
7133 	} else {
7134 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
7135 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
7136 		fs->m_ext.data[0] =
7137 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
7138 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
7139 	}
7140 }
7141 
7142 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7143 				  struct hclge_fd_rule *rule)
7144 {
7145 	if (fs->flow_type & FLOW_EXT) {
7146 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7147 		fs->m_ext.vlan_tci =
7148 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7149 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7150 
7151 		hclge_fd_get_user_def_info(fs, rule);
7152 	}
7153 
7154 	if (fs->flow_type & FLOW_MAC_EXT) {
7155 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7156 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
7157 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
7158 		else
7159 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
7160 					rule->tuples_mask.dst_mac);
7161 	}
7162 }
7163 
7164 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7165 				  struct ethtool_rxnfc *cmd)
7166 {
7167 	struct hclge_vport *vport = hclge_get_vport(handle);
7168 	struct hclge_fd_rule *rule = NULL;
7169 	struct hclge_dev *hdev = vport->back;
7170 	struct ethtool_rx_flow_spec *fs;
7171 	struct hlist_node *node2;
7172 
7173 	if (!hnae3_dev_fd_supported(hdev))
7174 		return -EOPNOTSUPP;
7175 
7176 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7177 
7178 	spin_lock_bh(&hdev->fd_rule_lock);
7179 
7180 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7181 		if (rule->location >= fs->location)
7182 			break;
7183 	}
7184 
7185 	if (!rule || fs->location != rule->location) {
7186 		spin_unlock_bh(&hdev->fd_rule_lock);
7187 
7188 		return -ENOENT;
7189 	}
7190 
7191 	fs->flow_type = rule->flow_type;
7192 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7193 	case SCTP_V4_FLOW:
7194 	case TCP_V4_FLOW:
7195 	case UDP_V4_FLOW:
7196 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7197 					 &fs->m_u.tcp_ip4_spec);
7198 		break;
7199 	case IP_USER_FLOW:
7200 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7201 				      &fs->m_u.usr_ip4_spec);
7202 		break;
7203 	case SCTP_V6_FLOW:
7204 	case TCP_V6_FLOW:
7205 	case UDP_V6_FLOW:
7206 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7207 					 &fs->m_u.tcp_ip6_spec);
7208 		break;
7209 	case IPV6_USER_FLOW:
7210 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7211 				      &fs->m_u.usr_ip6_spec);
7212 		break;
7213 	/* The flow type of fd rule has been checked before adding in to rule
7214 	 * list. As other flow types have been handled, it must be ETHER_FLOW
7215 	 * for the default case
7216 	 */
7217 	default:
7218 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7219 					&fs->m_u.ether_spec);
7220 		break;
7221 	}
7222 
7223 	hclge_fd_get_ext_info(fs, rule);
7224 
7225 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7226 		fs->ring_cookie = RX_CLS_FLOW_DISC;
7227 	} else {
7228 		u64 vf_id;
7229 
7230 		fs->ring_cookie = rule->queue_id;
7231 		vf_id = rule->vf_id;
7232 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7233 		fs->ring_cookie |= vf_id;
7234 	}
7235 
7236 	spin_unlock_bh(&hdev->fd_rule_lock);
7237 
7238 	return 0;
7239 }
7240 
7241 static int hclge_get_all_rules(struct hnae3_handle *handle,
7242 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
7243 {
7244 	struct hclge_vport *vport = hclge_get_vport(handle);
7245 	struct hclge_dev *hdev = vport->back;
7246 	struct hclge_fd_rule *rule;
7247 	struct hlist_node *node2;
7248 	int cnt = 0;
7249 
7250 	if (!hnae3_dev_fd_supported(hdev))
7251 		return -EOPNOTSUPP;
7252 
7253 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7254 
7255 	spin_lock_bh(&hdev->fd_rule_lock);
7256 	hlist_for_each_entry_safe(rule, node2,
7257 				  &hdev->fd_rule_list, rule_node) {
7258 		if (cnt == cmd->rule_cnt) {
7259 			spin_unlock_bh(&hdev->fd_rule_lock);
7260 			return -EMSGSIZE;
7261 		}
7262 
7263 		if (rule->state == HCLGE_FD_TO_DEL)
7264 			continue;
7265 
7266 		rule_locs[cnt] = rule->location;
7267 		cnt++;
7268 	}
7269 
7270 	spin_unlock_bh(&hdev->fd_rule_lock);
7271 
7272 	cmd->rule_cnt = cnt;
7273 
7274 	return 0;
7275 }
7276 
7277 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7278 				     struct hclge_fd_rule_tuples *tuples)
7279 {
7280 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7281 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7282 
7283 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7284 	tuples->ip_proto = fkeys->basic.ip_proto;
7285 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7286 
7287 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7288 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7289 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7290 	} else {
7291 		int i;
7292 
7293 		for (i = 0; i < IPV6_SIZE; i++) {
7294 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7295 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7296 		}
7297 	}
7298 }
7299 
7300 /* traverse all rules, check whether an existed rule has the same tuples */
7301 static struct hclge_fd_rule *
7302 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7303 			  const struct hclge_fd_rule_tuples *tuples)
7304 {
7305 	struct hclge_fd_rule *rule = NULL;
7306 	struct hlist_node *node;
7307 
7308 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7309 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7310 			return rule;
7311 	}
7312 
7313 	return NULL;
7314 }
7315 
7316 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7317 				     struct hclge_fd_rule *rule)
7318 {
7319 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7320 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7321 			     BIT(INNER_SRC_PORT);
7322 	rule->action = 0;
7323 	rule->vf_id = 0;
7324 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7325 	rule->state = HCLGE_FD_TO_ADD;
7326 	if (tuples->ether_proto == ETH_P_IP) {
7327 		if (tuples->ip_proto == IPPROTO_TCP)
7328 			rule->flow_type = TCP_V4_FLOW;
7329 		else
7330 			rule->flow_type = UDP_V4_FLOW;
7331 	} else {
7332 		if (tuples->ip_proto == IPPROTO_TCP)
7333 			rule->flow_type = TCP_V6_FLOW;
7334 		else
7335 			rule->flow_type = UDP_V6_FLOW;
7336 	}
7337 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7338 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7339 }
7340 
7341 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7342 				      u16 flow_id, struct flow_keys *fkeys)
7343 {
7344 	struct hclge_vport *vport = hclge_get_vport(handle);
7345 	struct hclge_fd_rule_tuples new_tuples = {};
7346 	struct hclge_dev *hdev = vport->back;
7347 	struct hclge_fd_rule *rule;
7348 	u16 bit_id;
7349 
7350 	if (!hnae3_dev_fd_supported(hdev))
7351 		return -EOPNOTSUPP;
7352 
7353 	/* when there is already fd rule existed add by user,
7354 	 * arfs should not work
7355 	 */
7356 	spin_lock_bh(&hdev->fd_rule_lock);
7357 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7358 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7359 		spin_unlock_bh(&hdev->fd_rule_lock);
7360 		return -EOPNOTSUPP;
7361 	}
7362 
7363 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7364 
7365 	/* check is there flow director filter existed for this flow,
7366 	 * if not, create a new filter for it;
7367 	 * if filter exist with different queue id, modify the filter;
7368 	 * if filter exist with same queue id, do nothing
7369 	 */
7370 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7371 	if (!rule) {
7372 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7373 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7374 			spin_unlock_bh(&hdev->fd_rule_lock);
7375 			return -ENOSPC;
7376 		}
7377 
7378 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7379 		if (!rule) {
7380 			spin_unlock_bh(&hdev->fd_rule_lock);
7381 			return -ENOMEM;
7382 		}
7383 
7384 		rule->location = bit_id;
7385 		rule->arfs.flow_id = flow_id;
7386 		rule->queue_id = queue_id;
7387 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7388 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7389 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7390 	} else if (rule->queue_id != queue_id) {
7391 		rule->queue_id = queue_id;
7392 		rule->state = HCLGE_FD_TO_ADD;
7393 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7394 		hclge_task_schedule(hdev, 0);
7395 	}
7396 	spin_unlock_bh(&hdev->fd_rule_lock);
7397 	return rule->location;
7398 }
7399 
7400 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7401 {
7402 #ifdef CONFIG_RFS_ACCEL
7403 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7404 	struct hclge_fd_rule *rule;
7405 	struct hlist_node *node;
7406 
7407 	spin_lock_bh(&hdev->fd_rule_lock);
7408 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7409 		spin_unlock_bh(&hdev->fd_rule_lock);
7410 		return;
7411 	}
7412 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7413 		if (rule->state != HCLGE_FD_ACTIVE)
7414 			continue;
7415 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7416 					rule->arfs.flow_id, rule->location)) {
7417 			rule->state = HCLGE_FD_TO_DEL;
7418 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7419 		}
7420 	}
7421 	spin_unlock_bh(&hdev->fd_rule_lock);
7422 #endif
7423 }
7424 
7425 /* make sure being called after lock up with fd_rule_lock */
7426 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7427 {
7428 #ifdef CONFIG_RFS_ACCEL
7429 	struct hclge_fd_rule *rule;
7430 	struct hlist_node *node;
7431 	int ret;
7432 
7433 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7434 		return 0;
7435 
7436 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7437 		switch (rule->state) {
7438 		case HCLGE_FD_TO_DEL:
7439 		case HCLGE_FD_ACTIVE:
7440 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7441 						   rule->location, NULL, false);
7442 			if (ret)
7443 				return ret;
7444 			fallthrough;
7445 		case HCLGE_FD_TO_ADD:
7446 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7447 			hlist_del(&rule->rule_node);
7448 			kfree(rule);
7449 			break;
7450 		default:
7451 			break;
7452 		}
7453 	}
7454 	hclge_sync_fd_state(hdev);
7455 
7456 #endif
7457 	return 0;
7458 }
7459 
7460 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7461 				    struct hclge_fd_rule *rule)
7462 {
7463 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7464 		struct flow_match_basic match;
7465 		u16 ethtype_key, ethtype_mask;
7466 
7467 		flow_rule_match_basic(flow, &match);
7468 		ethtype_key = ntohs(match.key->n_proto);
7469 		ethtype_mask = ntohs(match.mask->n_proto);
7470 
7471 		if (ethtype_key == ETH_P_ALL) {
7472 			ethtype_key = 0;
7473 			ethtype_mask = 0;
7474 		}
7475 		rule->tuples.ether_proto = ethtype_key;
7476 		rule->tuples_mask.ether_proto = ethtype_mask;
7477 		rule->tuples.ip_proto = match.key->ip_proto;
7478 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7479 	} else {
7480 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7481 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7482 	}
7483 }
7484 
7485 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7486 				  struct hclge_fd_rule *rule)
7487 {
7488 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7489 		struct flow_match_eth_addrs match;
7490 
7491 		flow_rule_match_eth_addrs(flow, &match);
7492 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7493 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7494 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7495 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7496 	} else {
7497 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7498 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7499 	}
7500 }
7501 
7502 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7503 				   struct hclge_fd_rule *rule)
7504 {
7505 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7506 		struct flow_match_vlan match;
7507 
7508 		flow_rule_match_vlan(flow, &match);
7509 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7510 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7511 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7512 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7513 	} else {
7514 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7515 	}
7516 }
7517 
7518 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7519 				 struct hclge_fd_rule *rule)
7520 {
7521 	u16 addr_type = 0;
7522 
7523 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7524 		struct flow_match_control match;
7525 
7526 		flow_rule_match_control(flow, &match);
7527 		addr_type = match.key->addr_type;
7528 	}
7529 
7530 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7531 		struct flow_match_ipv4_addrs match;
7532 
7533 		flow_rule_match_ipv4_addrs(flow, &match);
7534 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7535 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7536 						be32_to_cpu(match.mask->src);
7537 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7538 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7539 						be32_to_cpu(match.mask->dst);
7540 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7541 		struct flow_match_ipv6_addrs match;
7542 
7543 		flow_rule_match_ipv6_addrs(flow, &match);
7544 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7545 				  IPV6_SIZE);
7546 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7547 				  match.mask->src.s6_addr32, IPV6_SIZE);
7548 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7549 				  IPV6_SIZE);
7550 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7551 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7552 	} else {
7553 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7554 		rule->unused_tuple |= BIT(INNER_DST_IP);
7555 	}
7556 }
7557 
7558 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7559 				   struct hclge_fd_rule *rule)
7560 {
7561 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7562 		struct flow_match_ports match;
7563 
7564 		flow_rule_match_ports(flow, &match);
7565 
7566 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7567 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7568 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7569 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7570 	} else {
7571 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7572 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7573 	}
7574 }
7575 
7576 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7577 				  struct flow_cls_offload *cls_flower,
7578 				  struct hclge_fd_rule *rule)
7579 {
7580 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7581 	struct flow_dissector *dissector = flow->match.dissector;
7582 
7583 	if (dissector->used_keys &
7584 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7585 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7586 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7587 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7588 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7589 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7590 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7591 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7592 			dissector->used_keys);
7593 		return -EOPNOTSUPP;
7594 	}
7595 
7596 	hclge_get_cls_key_basic(flow, rule);
7597 	hclge_get_cls_key_mac(flow, rule);
7598 	hclge_get_cls_key_vlan(flow, rule);
7599 	hclge_get_cls_key_ip(flow, rule);
7600 	hclge_get_cls_key_port(flow, rule);
7601 
7602 	return 0;
7603 }
7604 
7605 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7606 				  struct flow_cls_offload *cls_flower, int tc)
7607 {
7608 	u32 prio = cls_flower->common.prio;
7609 
7610 	if (tc < 0 || tc > hdev->tc_max) {
7611 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7612 		return -EINVAL;
7613 	}
7614 
7615 	if (prio == 0 ||
7616 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7617 		dev_err(&hdev->pdev->dev,
7618 			"prio %u should be in range[1, %u]\n",
7619 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7620 		return -EINVAL;
7621 	}
7622 
7623 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7624 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7625 		return -EINVAL;
7626 	}
7627 	return 0;
7628 }
7629 
7630 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7631 				struct flow_cls_offload *cls_flower,
7632 				int tc)
7633 {
7634 	struct hclge_vport *vport = hclge_get_vport(handle);
7635 	struct hclge_dev *hdev = vport->back;
7636 	struct hclge_fd_rule *rule;
7637 	int ret;
7638 
7639 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7640 	if (ret) {
7641 		dev_err(&hdev->pdev->dev,
7642 			"failed to check cls flower params, ret = %d\n", ret);
7643 		return ret;
7644 	}
7645 
7646 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7647 	if (!rule)
7648 		return -ENOMEM;
7649 
7650 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7651 	if (ret) {
7652 		kfree(rule);
7653 		return ret;
7654 	}
7655 
7656 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7657 	rule->cls_flower.tc = tc;
7658 	rule->location = cls_flower->common.prio - 1;
7659 	rule->vf_id = 0;
7660 	rule->cls_flower.cookie = cls_flower->cookie;
7661 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7662 
7663 	ret = hclge_add_fd_entry_common(hdev, rule);
7664 	if (ret)
7665 		kfree(rule);
7666 
7667 	return ret;
7668 }
7669 
7670 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7671 						   unsigned long cookie)
7672 {
7673 	struct hclge_fd_rule *rule;
7674 	struct hlist_node *node;
7675 
7676 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7677 		if (rule->cls_flower.cookie == cookie)
7678 			return rule;
7679 	}
7680 
7681 	return NULL;
7682 }
7683 
7684 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7685 				struct flow_cls_offload *cls_flower)
7686 {
7687 	struct hclge_vport *vport = hclge_get_vport(handle);
7688 	struct hclge_dev *hdev = vport->back;
7689 	struct hclge_fd_rule *rule;
7690 	int ret;
7691 
7692 	spin_lock_bh(&hdev->fd_rule_lock);
7693 
7694 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7695 	if (!rule) {
7696 		spin_unlock_bh(&hdev->fd_rule_lock);
7697 		return -EINVAL;
7698 	}
7699 
7700 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7701 				   NULL, false);
7702 	if (ret) {
7703 		spin_unlock_bh(&hdev->fd_rule_lock);
7704 		return ret;
7705 	}
7706 
7707 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7708 	spin_unlock_bh(&hdev->fd_rule_lock);
7709 
7710 	return 0;
7711 }
7712 
7713 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7714 {
7715 	struct hclge_fd_rule *rule;
7716 	struct hlist_node *node;
7717 	int ret = 0;
7718 
7719 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7720 		return;
7721 
7722 	spin_lock_bh(&hdev->fd_rule_lock);
7723 
7724 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7725 		switch (rule->state) {
7726 		case HCLGE_FD_TO_ADD:
7727 			ret = hclge_fd_config_rule(hdev, rule);
7728 			if (ret)
7729 				goto out;
7730 			rule->state = HCLGE_FD_ACTIVE;
7731 			break;
7732 		case HCLGE_FD_TO_DEL:
7733 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7734 						   rule->location, NULL, false);
7735 			if (ret)
7736 				goto out;
7737 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7738 			hclge_fd_free_node(hdev, rule);
7739 			break;
7740 		default:
7741 			break;
7742 		}
7743 	}
7744 
7745 out:
7746 	if (ret)
7747 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7748 
7749 	spin_unlock_bh(&hdev->fd_rule_lock);
7750 }
7751 
7752 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7753 {
7754 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7755 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7756 
7757 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7758 	}
7759 
7760 	hclge_sync_fd_user_def_cfg(hdev, false);
7761 
7762 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7763 }
7764 
7765 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7766 {
7767 	struct hclge_vport *vport = hclge_get_vport(handle);
7768 	struct hclge_dev *hdev = vport->back;
7769 
7770 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7771 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7772 }
7773 
7774 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7775 {
7776 	struct hclge_vport *vport = hclge_get_vport(handle);
7777 	struct hclge_dev *hdev = vport->back;
7778 
7779 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7780 }
7781 
7782 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7783 {
7784 	struct hclge_vport *vport = hclge_get_vport(handle);
7785 	struct hclge_dev *hdev = vport->back;
7786 
7787 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7788 }
7789 
7790 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7791 {
7792 	struct hclge_vport *vport = hclge_get_vport(handle);
7793 	struct hclge_dev *hdev = vport->back;
7794 
7795 	return hdev->rst_stats.hw_reset_done_cnt;
7796 }
7797 
7798 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7799 {
7800 	struct hclge_vport *vport = hclge_get_vport(handle);
7801 	struct hclge_dev *hdev = vport->back;
7802 
7803 	hdev->fd_en = enable;
7804 
7805 	if (!enable)
7806 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7807 	else
7808 		hclge_restore_fd_entries(handle);
7809 
7810 	hclge_task_schedule(hdev, 0);
7811 }
7812 
7813 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7814 {
7815 	struct hclge_desc desc;
7816 	struct hclge_config_mac_mode_cmd *req =
7817 		(struct hclge_config_mac_mode_cmd *)desc.data;
7818 	u32 loop_en = 0;
7819 	int ret;
7820 
7821 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7822 
7823 	if (enable) {
7824 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7825 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7826 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7827 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7828 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7829 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7830 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7831 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7832 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7833 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7834 	}
7835 
7836 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7837 
7838 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7839 	if (ret)
7840 		dev_err(&hdev->pdev->dev,
7841 			"mac enable fail, ret =%d.\n", ret);
7842 }
7843 
7844 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7845 				     u8 switch_param, u8 param_mask)
7846 {
7847 	struct hclge_mac_vlan_switch_cmd *req;
7848 	struct hclge_desc desc;
7849 	u32 func_id;
7850 	int ret;
7851 
7852 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7853 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7854 
7855 	/* read current config parameter */
7856 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7857 				   true);
7858 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7859 	req->func_id = cpu_to_le32(func_id);
7860 
7861 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7862 	if (ret) {
7863 		dev_err(&hdev->pdev->dev,
7864 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7865 		return ret;
7866 	}
7867 
7868 	/* modify and write new config parameter */
7869 	hclge_cmd_reuse_desc(&desc, false);
7870 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7871 	req->param_mask = param_mask;
7872 
7873 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7874 	if (ret)
7875 		dev_err(&hdev->pdev->dev,
7876 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7877 	return ret;
7878 }
7879 
7880 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7881 				       int link_ret)
7882 {
7883 #define HCLGE_PHY_LINK_STATUS_NUM  200
7884 
7885 	struct phy_device *phydev = hdev->hw.mac.phydev;
7886 	int i = 0;
7887 	int ret;
7888 
7889 	do {
7890 		ret = phy_read_status(phydev);
7891 		if (ret) {
7892 			dev_err(&hdev->pdev->dev,
7893 				"phy update link status fail, ret = %d\n", ret);
7894 			return;
7895 		}
7896 
7897 		if (phydev->link == link_ret)
7898 			break;
7899 
7900 		msleep(HCLGE_LINK_STATUS_MS);
7901 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7902 }
7903 
7904 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7905 {
7906 #define HCLGE_MAC_LINK_STATUS_NUM  100
7907 
7908 	int link_status;
7909 	int i = 0;
7910 	int ret;
7911 
7912 	do {
7913 		ret = hclge_get_mac_link_status(hdev, &link_status);
7914 		if (ret)
7915 			return ret;
7916 		if (link_status == link_ret)
7917 			return 0;
7918 
7919 		msleep(HCLGE_LINK_STATUS_MS);
7920 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7921 	return -EBUSY;
7922 }
7923 
7924 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7925 					  bool is_phy)
7926 {
7927 	int link_ret;
7928 
7929 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7930 
7931 	if (is_phy)
7932 		hclge_phy_link_status_wait(hdev, link_ret);
7933 
7934 	return hclge_mac_link_status_wait(hdev, link_ret);
7935 }
7936 
7937 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7938 {
7939 	struct hclge_config_mac_mode_cmd *req;
7940 	struct hclge_desc desc;
7941 	u32 loop_en;
7942 	int ret;
7943 
7944 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7945 	/* 1 Read out the MAC mode config at first */
7946 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7947 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7948 	if (ret) {
7949 		dev_err(&hdev->pdev->dev,
7950 			"mac loopback get fail, ret =%d.\n", ret);
7951 		return ret;
7952 	}
7953 
7954 	/* 2 Then setup the loopback flag */
7955 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7956 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7957 
7958 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7959 
7960 	/* 3 Config mac work mode with loopback flag
7961 	 * and its original configure parameters
7962 	 */
7963 	hclge_cmd_reuse_desc(&desc, false);
7964 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7965 	if (ret)
7966 		dev_err(&hdev->pdev->dev,
7967 			"mac loopback set fail, ret =%d.\n", ret);
7968 	return ret;
7969 }
7970 
7971 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7972 				     enum hnae3_loop loop_mode)
7973 {
7974 #define HCLGE_COMMON_LB_RETRY_MS	10
7975 #define HCLGE_COMMON_LB_RETRY_NUM	100
7976 
7977 	struct hclge_common_lb_cmd *req;
7978 	struct hclge_desc desc;
7979 	int ret, i = 0;
7980 	u8 loop_mode_b;
7981 
7982 	req = (struct hclge_common_lb_cmd *)desc.data;
7983 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7984 
7985 	switch (loop_mode) {
7986 	case HNAE3_LOOP_SERIAL_SERDES:
7987 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7988 		break;
7989 	case HNAE3_LOOP_PARALLEL_SERDES:
7990 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7991 		break;
7992 	case HNAE3_LOOP_PHY:
7993 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7994 		break;
7995 	default:
7996 		dev_err(&hdev->pdev->dev,
7997 			"unsupported common loopback mode %d\n", loop_mode);
7998 		return -ENOTSUPP;
7999 	}
8000 
8001 	if (en) {
8002 		req->enable = loop_mode_b;
8003 		req->mask = loop_mode_b;
8004 	} else {
8005 		req->mask = loop_mode_b;
8006 	}
8007 
8008 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8009 	if (ret) {
8010 		dev_err(&hdev->pdev->dev,
8011 			"common loopback set fail, ret = %d\n", ret);
8012 		return ret;
8013 	}
8014 
8015 	do {
8016 		msleep(HCLGE_COMMON_LB_RETRY_MS);
8017 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
8018 					   true);
8019 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8020 		if (ret) {
8021 			dev_err(&hdev->pdev->dev,
8022 				"common loopback get, ret = %d\n", ret);
8023 			return ret;
8024 		}
8025 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
8026 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
8027 
8028 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
8029 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
8030 		return -EBUSY;
8031 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
8032 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
8033 		return -EIO;
8034 	}
8035 	return ret;
8036 }
8037 
8038 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
8039 				     enum hnae3_loop loop_mode)
8040 {
8041 	int ret;
8042 
8043 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
8044 	if (ret)
8045 		return ret;
8046 
8047 	hclge_cfg_mac_mode(hdev, en);
8048 
8049 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
8050 	if (ret)
8051 		dev_err(&hdev->pdev->dev,
8052 			"serdes loopback config mac mode timeout\n");
8053 
8054 	return ret;
8055 }
8056 
8057 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
8058 				     struct phy_device *phydev)
8059 {
8060 	int ret;
8061 
8062 	if (!phydev->suspended) {
8063 		ret = phy_suspend(phydev);
8064 		if (ret)
8065 			return ret;
8066 	}
8067 
8068 	ret = phy_resume(phydev);
8069 	if (ret)
8070 		return ret;
8071 
8072 	return phy_loopback(phydev, true);
8073 }
8074 
8075 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
8076 				      struct phy_device *phydev)
8077 {
8078 	int ret;
8079 
8080 	ret = phy_loopback(phydev, false);
8081 	if (ret)
8082 		return ret;
8083 
8084 	return phy_suspend(phydev);
8085 }
8086 
8087 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
8088 {
8089 	struct phy_device *phydev = hdev->hw.mac.phydev;
8090 	int ret;
8091 
8092 	if (!phydev) {
8093 		if (hnae3_dev_phy_imp_supported(hdev))
8094 			return hclge_set_common_loopback(hdev, en,
8095 							 HNAE3_LOOP_PHY);
8096 		return -ENOTSUPP;
8097 	}
8098 
8099 	if (en)
8100 		ret = hclge_enable_phy_loopback(hdev, phydev);
8101 	else
8102 		ret = hclge_disable_phy_loopback(hdev, phydev);
8103 	if (ret) {
8104 		dev_err(&hdev->pdev->dev,
8105 			"set phy loopback fail, ret = %d\n", ret);
8106 		return ret;
8107 	}
8108 
8109 	hclge_cfg_mac_mode(hdev, en);
8110 
8111 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
8112 	if (ret)
8113 		dev_err(&hdev->pdev->dev,
8114 			"phy loopback config mac mode timeout\n");
8115 
8116 	return ret;
8117 }
8118 
8119 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
8120 				     u16 stream_id, bool enable)
8121 {
8122 	struct hclge_desc desc;
8123 	struct hclge_cfg_com_tqp_queue_cmd *req =
8124 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
8125 
8126 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
8127 	req->tqp_id = cpu_to_le16(tqp_id);
8128 	req->stream_id = cpu_to_le16(stream_id);
8129 	if (enable)
8130 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
8131 
8132 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8133 }
8134 
8135 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
8136 {
8137 	struct hclge_vport *vport = hclge_get_vport(handle);
8138 	struct hclge_dev *hdev = vport->back;
8139 	int ret;
8140 	u16 i;
8141 
8142 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
8143 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8144 		if (ret)
8145 			return ret;
8146 	}
8147 	return 0;
8148 }
8149 
8150 static int hclge_set_loopback(struct hnae3_handle *handle,
8151 			      enum hnae3_loop loop_mode, bool en)
8152 {
8153 	struct hclge_vport *vport = hclge_get_vport(handle);
8154 	struct hclge_dev *hdev = vport->back;
8155 	int ret;
8156 
8157 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8158 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8159 	 * the same, the packets are looped back in the SSU. If SSU loopback
8160 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8161 	 */
8162 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8163 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8164 
8165 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8166 						HCLGE_SWITCH_ALW_LPBK_MASK);
8167 		if (ret)
8168 			return ret;
8169 	}
8170 
8171 	switch (loop_mode) {
8172 	case HNAE3_LOOP_APP:
8173 		ret = hclge_set_app_loopback(hdev, en);
8174 		break;
8175 	case HNAE3_LOOP_SERIAL_SERDES:
8176 	case HNAE3_LOOP_PARALLEL_SERDES:
8177 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
8178 		break;
8179 	case HNAE3_LOOP_PHY:
8180 		ret = hclge_set_phy_loopback(hdev, en);
8181 		break;
8182 	default:
8183 		ret = -ENOTSUPP;
8184 		dev_err(&hdev->pdev->dev,
8185 			"loop_mode %d is not supported\n", loop_mode);
8186 		break;
8187 	}
8188 
8189 	if (ret)
8190 		return ret;
8191 
8192 	ret = hclge_tqp_enable(handle, en);
8193 	if (ret)
8194 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8195 			en ? "enable" : "disable", ret);
8196 
8197 	return ret;
8198 }
8199 
8200 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8201 {
8202 	int ret;
8203 
8204 	ret = hclge_set_app_loopback(hdev, false);
8205 	if (ret)
8206 		return ret;
8207 
8208 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8209 	if (ret)
8210 		return ret;
8211 
8212 	return hclge_cfg_common_loopback(hdev, false,
8213 					 HNAE3_LOOP_PARALLEL_SERDES);
8214 }
8215 
8216 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8217 {
8218 	struct hclge_vport *vport = hclge_get_vport(handle);
8219 	struct hnae3_knic_private_info *kinfo;
8220 	struct hnae3_queue *queue;
8221 	struct hclge_tqp *tqp;
8222 	int i;
8223 
8224 	kinfo = &vport->nic.kinfo;
8225 	for (i = 0; i < kinfo->num_tqps; i++) {
8226 		queue = handle->kinfo.tqp[i];
8227 		tqp = container_of(queue, struct hclge_tqp, q);
8228 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8229 	}
8230 }
8231 
8232 static void hclge_flush_link_update(struct hclge_dev *hdev)
8233 {
8234 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8235 
8236 	unsigned long last = hdev->serv_processed_cnt;
8237 	int i = 0;
8238 
8239 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8240 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8241 	       last == hdev->serv_processed_cnt)
8242 		usleep_range(1, 1);
8243 }
8244 
8245 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8246 {
8247 	struct hclge_vport *vport = hclge_get_vport(handle);
8248 	struct hclge_dev *hdev = vport->back;
8249 
8250 	if (enable) {
8251 		hclge_task_schedule(hdev, 0);
8252 	} else {
8253 		/* Set the DOWN flag here to disable link updating */
8254 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8255 
8256 		/* flush memory to make sure DOWN is seen by service task */
8257 		smp_mb__before_atomic();
8258 		hclge_flush_link_update(hdev);
8259 	}
8260 }
8261 
8262 static int hclge_ae_start(struct hnae3_handle *handle)
8263 {
8264 	struct hclge_vport *vport = hclge_get_vport(handle);
8265 	struct hclge_dev *hdev = vport->back;
8266 
8267 	/* mac enable */
8268 	hclge_cfg_mac_mode(hdev, true);
8269 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8270 	hdev->hw.mac.link = 0;
8271 
8272 	/* reset tqp stats */
8273 	hclge_reset_tqp_stats(handle);
8274 
8275 	hclge_mac_start_phy(hdev);
8276 
8277 	return 0;
8278 }
8279 
8280 static void hclge_ae_stop(struct hnae3_handle *handle)
8281 {
8282 	struct hclge_vport *vport = hclge_get_vport(handle);
8283 	struct hclge_dev *hdev = vport->back;
8284 
8285 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8286 	spin_lock_bh(&hdev->fd_rule_lock);
8287 	hclge_clear_arfs_rules(hdev);
8288 	spin_unlock_bh(&hdev->fd_rule_lock);
8289 
8290 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
8291 	 * so it only need to stop phy here.
8292 	 */
8293 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8294 	    hdev->reset_type != HNAE3_FUNC_RESET &&
8295 	    hdev->reset_type != HNAE3_FLR_RESET) {
8296 		hclge_mac_stop_phy(hdev);
8297 		hclge_update_link_status(hdev);
8298 		return;
8299 	}
8300 
8301 	hclge_reset_tqp(handle);
8302 
8303 	hclge_config_mac_tnl_int(hdev, false);
8304 
8305 	/* Mac disable */
8306 	hclge_cfg_mac_mode(hdev, false);
8307 
8308 	hclge_mac_stop_phy(hdev);
8309 
8310 	/* reset tqp stats */
8311 	hclge_reset_tqp_stats(handle);
8312 	hclge_update_link_status(hdev);
8313 }
8314 
8315 int hclge_vport_start(struct hclge_vport *vport)
8316 {
8317 	struct hclge_dev *hdev = vport->back;
8318 
8319 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8320 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8321 	vport->last_active_jiffies = jiffies;
8322 
8323 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8324 		if (vport->vport_id) {
8325 			hclge_restore_mac_table_common(vport);
8326 			hclge_restore_vport_vlan_table(vport);
8327 		} else {
8328 			hclge_restore_hw_table(hdev);
8329 		}
8330 	}
8331 
8332 	clear_bit(vport->vport_id, hdev->vport_config_block);
8333 
8334 	return 0;
8335 }
8336 
8337 void hclge_vport_stop(struct hclge_vport *vport)
8338 {
8339 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8340 }
8341 
8342 static int hclge_client_start(struct hnae3_handle *handle)
8343 {
8344 	struct hclge_vport *vport = hclge_get_vport(handle);
8345 
8346 	return hclge_vport_start(vport);
8347 }
8348 
8349 static void hclge_client_stop(struct hnae3_handle *handle)
8350 {
8351 	struct hclge_vport *vport = hclge_get_vport(handle);
8352 
8353 	hclge_vport_stop(vport);
8354 }
8355 
8356 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8357 					 u16 cmdq_resp, u8  resp_code,
8358 					 enum hclge_mac_vlan_tbl_opcode op)
8359 {
8360 	struct hclge_dev *hdev = vport->back;
8361 
8362 	if (cmdq_resp) {
8363 		dev_err(&hdev->pdev->dev,
8364 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8365 			cmdq_resp);
8366 		return -EIO;
8367 	}
8368 
8369 	if (op == HCLGE_MAC_VLAN_ADD) {
8370 		if (!resp_code || resp_code == 1)
8371 			return 0;
8372 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8373 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8374 			return -ENOSPC;
8375 
8376 		dev_err(&hdev->pdev->dev,
8377 			"add mac addr failed for undefined, code=%u.\n",
8378 			resp_code);
8379 		return -EIO;
8380 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8381 		if (!resp_code) {
8382 			return 0;
8383 		} else if (resp_code == 1) {
8384 			dev_dbg(&hdev->pdev->dev,
8385 				"remove mac addr failed for miss.\n");
8386 			return -ENOENT;
8387 		}
8388 
8389 		dev_err(&hdev->pdev->dev,
8390 			"remove mac addr failed for undefined, code=%u.\n",
8391 			resp_code);
8392 		return -EIO;
8393 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8394 		if (!resp_code) {
8395 			return 0;
8396 		} else if (resp_code == 1) {
8397 			dev_dbg(&hdev->pdev->dev,
8398 				"lookup mac addr failed for miss.\n");
8399 			return -ENOENT;
8400 		}
8401 
8402 		dev_err(&hdev->pdev->dev,
8403 			"lookup mac addr failed for undefined, code=%u.\n",
8404 			resp_code);
8405 		return -EIO;
8406 	}
8407 
8408 	dev_err(&hdev->pdev->dev,
8409 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8410 
8411 	return -EINVAL;
8412 }
8413 
8414 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8415 {
8416 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8417 
8418 	unsigned int word_num;
8419 	unsigned int bit_num;
8420 
8421 	if (vfid > 255 || vfid < 0)
8422 		return -EIO;
8423 
8424 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8425 		word_num = vfid / 32;
8426 		bit_num  = vfid % 32;
8427 		if (clr)
8428 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8429 		else
8430 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8431 	} else {
8432 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8433 		bit_num  = vfid % 32;
8434 		if (clr)
8435 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8436 		else
8437 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8438 	}
8439 
8440 	return 0;
8441 }
8442 
8443 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8444 {
8445 #define HCLGE_DESC_NUMBER 3
8446 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8447 	int i, j;
8448 
8449 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8450 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8451 			if (desc[i].data[j])
8452 				return false;
8453 
8454 	return true;
8455 }
8456 
8457 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8458 				   const u8 *addr, bool is_mc)
8459 {
8460 	const unsigned char *mac_addr = addr;
8461 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8462 		       (mac_addr[0]) | (mac_addr[1] << 8);
8463 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8464 
8465 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8466 	if (is_mc) {
8467 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8468 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8469 	}
8470 
8471 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8472 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8473 }
8474 
8475 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8476 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8477 {
8478 	struct hclge_dev *hdev = vport->back;
8479 	struct hclge_desc desc;
8480 	u8 resp_code;
8481 	u16 retval;
8482 	int ret;
8483 
8484 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8485 
8486 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8487 
8488 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8489 	if (ret) {
8490 		dev_err(&hdev->pdev->dev,
8491 			"del mac addr failed for cmd_send, ret =%d.\n",
8492 			ret);
8493 		return ret;
8494 	}
8495 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8496 	retval = le16_to_cpu(desc.retval);
8497 
8498 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8499 					     HCLGE_MAC_VLAN_REMOVE);
8500 }
8501 
8502 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8503 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8504 				     struct hclge_desc *desc,
8505 				     bool is_mc)
8506 {
8507 	struct hclge_dev *hdev = vport->back;
8508 	u8 resp_code;
8509 	u16 retval;
8510 	int ret;
8511 
8512 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8513 	if (is_mc) {
8514 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8515 		memcpy(desc[0].data,
8516 		       req,
8517 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8518 		hclge_cmd_setup_basic_desc(&desc[1],
8519 					   HCLGE_OPC_MAC_VLAN_ADD,
8520 					   true);
8521 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8522 		hclge_cmd_setup_basic_desc(&desc[2],
8523 					   HCLGE_OPC_MAC_VLAN_ADD,
8524 					   true);
8525 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8526 	} else {
8527 		memcpy(desc[0].data,
8528 		       req,
8529 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8530 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8531 	}
8532 	if (ret) {
8533 		dev_err(&hdev->pdev->dev,
8534 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8535 			ret);
8536 		return ret;
8537 	}
8538 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8539 	retval = le16_to_cpu(desc[0].retval);
8540 
8541 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8542 					     HCLGE_MAC_VLAN_LKUP);
8543 }
8544 
8545 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8546 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8547 				  struct hclge_desc *mc_desc)
8548 {
8549 	struct hclge_dev *hdev = vport->back;
8550 	int cfg_status;
8551 	u8 resp_code;
8552 	u16 retval;
8553 	int ret;
8554 
8555 	if (!mc_desc) {
8556 		struct hclge_desc desc;
8557 
8558 		hclge_cmd_setup_basic_desc(&desc,
8559 					   HCLGE_OPC_MAC_VLAN_ADD,
8560 					   false);
8561 		memcpy(desc.data, req,
8562 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8563 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8564 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8565 		retval = le16_to_cpu(desc.retval);
8566 
8567 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8568 							   resp_code,
8569 							   HCLGE_MAC_VLAN_ADD);
8570 	} else {
8571 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8572 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8573 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8574 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8575 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8576 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8577 		memcpy(mc_desc[0].data, req,
8578 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8579 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8580 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8581 		retval = le16_to_cpu(mc_desc[0].retval);
8582 
8583 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8584 							   resp_code,
8585 							   HCLGE_MAC_VLAN_ADD);
8586 	}
8587 
8588 	if (ret) {
8589 		dev_err(&hdev->pdev->dev,
8590 			"add mac addr failed for cmd_send, ret =%d.\n",
8591 			ret);
8592 		return ret;
8593 	}
8594 
8595 	return cfg_status;
8596 }
8597 
8598 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8599 			       u16 *allocated_size)
8600 {
8601 	struct hclge_umv_spc_alc_cmd *req;
8602 	struct hclge_desc desc;
8603 	int ret;
8604 
8605 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8606 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8607 
8608 	req->space_size = cpu_to_le32(space_size);
8609 
8610 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8611 	if (ret) {
8612 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8613 			ret);
8614 		return ret;
8615 	}
8616 
8617 	*allocated_size = le32_to_cpu(desc.data[1]);
8618 
8619 	return 0;
8620 }
8621 
8622 static int hclge_init_umv_space(struct hclge_dev *hdev)
8623 {
8624 	u16 allocated_size = 0;
8625 	int ret;
8626 
8627 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8628 	if (ret)
8629 		return ret;
8630 
8631 	if (allocated_size < hdev->wanted_umv_size)
8632 		dev_warn(&hdev->pdev->dev,
8633 			 "failed to alloc umv space, want %u, get %u\n",
8634 			 hdev->wanted_umv_size, allocated_size);
8635 
8636 	hdev->max_umv_size = allocated_size;
8637 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8638 	hdev->share_umv_size = hdev->priv_umv_size +
8639 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8640 
8641 	if (hdev->ae_dev->dev_specs.mc_mac_size)
8642 		set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8643 
8644 	return 0;
8645 }
8646 
8647 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8648 {
8649 	struct hclge_vport *vport;
8650 	int i;
8651 
8652 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8653 		vport = &hdev->vport[i];
8654 		vport->used_umv_num = 0;
8655 	}
8656 
8657 	mutex_lock(&hdev->vport_lock);
8658 	hdev->share_umv_size = hdev->priv_umv_size +
8659 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8660 	mutex_unlock(&hdev->vport_lock);
8661 
8662 	hdev->used_mc_mac_num = 0;
8663 }
8664 
8665 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8666 {
8667 	struct hclge_dev *hdev = vport->back;
8668 	bool is_full;
8669 
8670 	if (need_lock)
8671 		mutex_lock(&hdev->vport_lock);
8672 
8673 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8674 		   hdev->share_umv_size == 0);
8675 
8676 	if (need_lock)
8677 		mutex_unlock(&hdev->vport_lock);
8678 
8679 	return is_full;
8680 }
8681 
8682 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8683 {
8684 	struct hclge_dev *hdev = vport->back;
8685 
8686 	if (is_free) {
8687 		if (vport->used_umv_num > hdev->priv_umv_size)
8688 			hdev->share_umv_size++;
8689 
8690 		if (vport->used_umv_num > 0)
8691 			vport->used_umv_num--;
8692 	} else {
8693 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8694 		    hdev->share_umv_size > 0)
8695 			hdev->share_umv_size--;
8696 		vport->used_umv_num++;
8697 	}
8698 }
8699 
8700 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8701 						  const u8 *mac_addr)
8702 {
8703 	struct hclge_mac_node *mac_node, *tmp;
8704 
8705 	list_for_each_entry_safe(mac_node, tmp, list, node)
8706 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8707 			return mac_node;
8708 
8709 	return NULL;
8710 }
8711 
8712 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8713 				  enum HCLGE_MAC_NODE_STATE state)
8714 {
8715 	switch (state) {
8716 	/* from set_rx_mode or tmp_add_list */
8717 	case HCLGE_MAC_TO_ADD:
8718 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8719 			mac_node->state = HCLGE_MAC_ACTIVE;
8720 		break;
8721 	/* only from set_rx_mode */
8722 	case HCLGE_MAC_TO_DEL:
8723 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8724 			list_del(&mac_node->node);
8725 			kfree(mac_node);
8726 		} else {
8727 			mac_node->state = HCLGE_MAC_TO_DEL;
8728 		}
8729 		break;
8730 	/* only from tmp_add_list, the mac_node->state won't be
8731 	 * ACTIVE.
8732 	 */
8733 	case HCLGE_MAC_ACTIVE:
8734 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8735 			mac_node->state = HCLGE_MAC_ACTIVE;
8736 
8737 		break;
8738 	}
8739 }
8740 
8741 int hclge_update_mac_list(struct hclge_vport *vport,
8742 			  enum HCLGE_MAC_NODE_STATE state,
8743 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8744 			  const unsigned char *addr)
8745 {
8746 	struct hclge_dev *hdev = vport->back;
8747 	struct hclge_mac_node *mac_node;
8748 	struct list_head *list;
8749 
8750 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8751 		&vport->uc_mac_list : &vport->mc_mac_list;
8752 
8753 	spin_lock_bh(&vport->mac_list_lock);
8754 
8755 	/* if the mac addr is already in the mac list, no need to add a new
8756 	 * one into it, just check the mac addr state, convert it to a new
8757 	 * state, or just remove it, or do nothing.
8758 	 */
8759 	mac_node = hclge_find_mac_node(list, addr);
8760 	if (mac_node) {
8761 		hclge_update_mac_node(mac_node, state);
8762 		spin_unlock_bh(&vport->mac_list_lock);
8763 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8764 		return 0;
8765 	}
8766 
8767 	/* if this address is never added, unnecessary to delete */
8768 	if (state == HCLGE_MAC_TO_DEL) {
8769 		spin_unlock_bh(&vport->mac_list_lock);
8770 		dev_err(&hdev->pdev->dev,
8771 			"failed to delete address %pM from mac list\n",
8772 			addr);
8773 		return -ENOENT;
8774 	}
8775 
8776 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8777 	if (!mac_node) {
8778 		spin_unlock_bh(&vport->mac_list_lock);
8779 		return -ENOMEM;
8780 	}
8781 
8782 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8783 
8784 	mac_node->state = state;
8785 	ether_addr_copy(mac_node->mac_addr, addr);
8786 	list_add_tail(&mac_node->node, list);
8787 
8788 	spin_unlock_bh(&vport->mac_list_lock);
8789 
8790 	return 0;
8791 }
8792 
8793 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8794 			     const unsigned char *addr)
8795 {
8796 	struct hclge_vport *vport = hclge_get_vport(handle);
8797 
8798 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8799 				     addr);
8800 }
8801 
8802 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8803 			     const unsigned char *addr)
8804 {
8805 	struct hclge_dev *hdev = vport->back;
8806 	struct hclge_mac_vlan_tbl_entry_cmd req;
8807 	struct hclge_desc desc;
8808 	u16 egress_port = 0;
8809 	int ret;
8810 
8811 	/* mac addr check */
8812 	if (is_zero_ether_addr(addr) ||
8813 	    is_broadcast_ether_addr(addr) ||
8814 	    is_multicast_ether_addr(addr)) {
8815 		dev_err(&hdev->pdev->dev,
8816 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8817 			 addr, is_zero_ether_addr(addr),
8818 			 is_broadcast_ether_addr(addr),
8819 			 is_multicast_ether_addr(addr));
8820 		return -EINVAL;
8821 	}
8822 
8823 	memset(&req, 0, sizeof(req));
8824 
8825 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8826 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8827 
8828 	req.egress_port = cpu_to_le16(egress_port);
8829 
8830 	hclge_prepare_mac_addr(&req, addr, false);
8831 
8832 	/* Lookup the mac address in the mac_vlan table, and add
8833 	 * it if the entry is inexistent. Repeated unicast entry
8834 	 * is not allowed in the mac vlan table.
8835 	 */
8836 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8837 	if (ret == -ENOENT) {
8838 		mutex_lock(&hdev->vport_lock);
8839 		if (!hclge_is_umv_space_full(vport, false)) {
8840 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8841 			if (!ret)
8842 				hclge_update_umv_space(vport, false);
8843 			mutex_unlock(&hdev->vport_lock);
8844 			return ret;
8845 		}
8846 		mutex_unlock(&hdev->vport_lock);
8847 
8848 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8849 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8850 				hdev->priv_umv_size);
8851 
8852 		return -ENOSPC;
8853 	}
8854 
8855 	/* check if we just hit the duplicate */
8856 	if (!ret)
8857 		return -EEXIST;
8858 
8859 	return ret;
8860 }
8861 
8862 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8863 			    const unsigned char *addr)
8864 {
8865 	struct hclge_vport *vport = hclge_get_vport(handle);
8866 
8867 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8868 				     addr);
8869 }
8870 
8871 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8872 			    const unsigned char *addr)
8873 {
8874 	struct hclge_dev *hdev = vport->back;
8875 	struct hclge_mac_vlan_tbl_entry_cmd req;
8876 	int ret;
8877 
8878 	/* mac addr check */
8879 	if (is_zero_ether_addr(addr) ||
8880 	    is_broadcast_ether_addr(addr) ||
8881 	    is_multicast_ether_addr(addr)) {
8882 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8883 			addr);
8884 		return -EINVAL;
8885 	}
8886 
8887 	memset(&req, 0, sizeof(req));
8888 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8889 	hclge_prepare_mac_addr(&req, addr, false);
8890 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8891 	if (!ret) {
8892 		mutex_lock(&hdev->vport_lock);
8893 		hclge_update_umv_space(vport, true);
8894 		mutex_unlock(&hdev->vport_lock);
8895 	} else if (ret == -ENOENT) {
8896 		ret = 0;
8897 	}
8898 
8899 	return ret;
8900 }
8901 
8902 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8903 			     const unsigned char *addr)
8904 {
8905 	struct hclge_vport *vport = hclge_get_vport(handle);
8906 
8907 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8908 				     addr);
8909 }
8910 
8911 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8912 			     const unsigned char *addr)
8913 {
8914 	struct hclge_dev *hdev = vport->back;
8915 	struct hclge_mac_vlan_tbl_entry_cmd req;
8916 	struct hclge_desc desc[3];
8917 	bool is_new_addr = false;
8918 	int status;
8919 
8920 	/* mac addr check */
8921 	if (!is_multicast_ether_addr(addr)) {
8922 		dev_err(&hdev->pdev->dev,
8923 			"Add mc mac err! invalid mac:%pM.\n",
8924 			 addr);
8925 		return -EINVAL;
8926 	}
8927 	memset(&req, 0, sizeof(req));
8928 	hclge_prepare_mac_addr(&req, addr, true);
8929 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8930 	if (status) {
8931 		if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8932 		    hdev->used_mc_mac_num >=
8933 		    hdev->ae_dev->dev_specs.mc_mac_size)
8934 			goto err_no_space;
8935 
8936 		is_new_addr = true;
8937 
8938 		/* This mac addr do not exist, add new entry for it */
8939 		memset(desc[0].data, 0, sizeof(desc[0].data));
8940 		memset(desc[1].data, 0, sizeof(desc[0].data));
8941 		memset(desc[2].data, 0, sizeof(desc[0].data));
8942 	}
8943 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8944 	if (status)
8945 		return status;
8946 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8947 	if (status == -ENOSPC)
8948 		goto err_no_space;
8949 	else if (!status && is_new_addr)
8950 		hdev->used_mc_mac_num++;
8951 
8952 	return status;
8953 
8954 err_no_space:
8955 	/* if already overflow, not to print each time */
8956 	if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
8957 		vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8958 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8959 	}
8960 
8961 	return -ENOSPC;
8962 }
8963 
8964 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8965 			    const unsigned char *addr)
8966 {
8967 	struct hclge_vport *vport = hclge_get_vport(handle);
8968 
8969 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8970 				     addr);
8971 }
8972 
8973 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8974 			    const unsigned char *addr)
8975 {
8976 	struct hclge_dev *hdev = vport->back;
8977 	struct hclge_mac_vlan_tbl_entry_cmd req;
8978 	enum hclge_cmd_status status;
8979 	struct hclge_desc desc[3];
8980 
8981 	/* mac addr check */
8982 	if (!is_multicast_ether_addr(addr)) {
8983 		dev_dbg(&hdev->pdev->dev,
8984 			"Remove mc mac err! invalid mac:%pM.\n",
8985 			 addr);
8986 		return -EINVAL;
8987 	}
8988 
8989 	memset(&req, 0, sizeof(req));
8990 	hclge_prepare_mac_addr(&req, addr, true);
8991 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8992 	if (!status) {
8993 		/* This mac addr exist, remove this handle's VFID for it */
8994 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8995 		if (status)
8996 			return status;
8997 
8998 		if (hclge_is_all_function_id_zero(desc)) {
8999 			/* All the vfid is zero, so need to delete this entry */
9000 			status = hclge_remove_mac_vlan_tbl(vport, &req);
9001 			if (!status)
9002 				hdev->used_mc_mac_num--;
9003 		} else {
9004 			/* Not all the vfid is zero, update the vfid */
9005 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
9006 		}
9007 	} else if (status == -ENOENT) {
9008 		status = 0;
9009 	}
9010 
9011 	return status;
9012 }
9013 
9014 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
9015 				      struct list_head *list,
9016 				      enum HCLGE_MAC_ADDR_TYPE mac_type)
9017 {
9018 	int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
9019 	struct hclge_mac_node *mac_node, *tmp;
9020 	int ret;
9021 
9022 	if (mac_type == HCLGE_MAC_ADDR_UC)
9023 		sync = hclge_add_uc_addr_common;
9024 	else
9025 		sync = hclge_add_mc_addr_common;
9026 
9027 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9028 		ret = sync(vport, mac_node->mac_addr);
9029 		if (!ret) {
9030 			mac_node->state = HCLGE_MAC_ACTIVE;
9031 		} else {
9032 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
9033 				&vport->state);
9034 
9035 			/* If one unicast mac address is existing in hardware,
9036 			 * we need to try whether other unicast mac addresses
9037 			 * are new addresses that can be added.
9038 			 * Multicast mac address can be reusable, even though
9039 			 * there is no space to add new multicast mac address,
9040 			 * we should check whether other mac addresses are
9041 			 * existing in hardware for reuse.
9042 			 */
9043 			if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
9044 			    (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
9045 				break;
9046 		}
9047 	}
9048 }
9049 
9050 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
9051 					struct list_head *list,
9052 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9053 {
9054 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9055 	struct hclge_mac_node *mac_node, *tmp;
9056 	int ret;
9057 
9058 	if (mac_type == HCLGE_MAC_ADDR_UC)
9059 		unsync = hclge_rm_uc_addr_common;
9060 	else
9061 		unsync = hclge_rm_mc_addr_common;
9062 
9063 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9064 		ret = unsync(vport, mac_node->mac_addr);
9065 		if (!ret || ret == -ENOENT) {
9066 			list_del(&mac_node->node);
9067 			kfree(mac_node);
9068 		} else {
9069 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
9070 				&vport->state);
9071 			break;
9072 		}
9073 	}
9074 }
9075 
9076 static bool hclge_sync_from_add_list(struct list_head *add_list,
9077 				     struct list_head *mac_list)
9078 {
9079 	struct hclge_mac_node *mac_node, *tmp, *new_node;
9080 	bool all_added = true;
9081 
9082 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
9083 		if (mac_node->state == HCLGE_MAC_TO_ADD)
9084 			all_added = false;
9085 
9086 		/* if the mac address from tmp_add_list is not in the
9087 		 * uc/mc_mac_list, it means have received a TO_DEL request
9088 		 * during the time window of adding the mac address into mac
9089 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
9090 		 * then it will be removed at next time. else it must be TO_ADD,
9091 		 * this address hasn't been added into mac table,
9092 		 * so just remove the mac node.
9093 		 */
9094 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
9095 		if (new_node) {
9096 			hclge_update_mac_node(new_node, mac_node->state);
9097 			list_del(&mac_node->node);
9098 			kfree(mac_node);
9099 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
9100 			mac_node->state = HCLGE_MAC_TO_DEL;
9101 			list_move_tail(&mac_node->node, mac_list);
9102 		} else {
9103 			list_del(&mac_node->node);
9104 			kfree(mac_node);
9105 		}
9106 	}
9107 
9108 	return all_added;
9109 }
9110 
9111 static void hclge_sync_from_del_list(struct list_head *del_list,
9112 				     struct list_head *mac_list)
9113 {
9114 	struct hclge_mac_node *mac_node, *tmp, *new_node;
9115 
9116 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
9117 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
9118 		if (new_node) {
9119 			/* If the mac addr exists in the mac list, it means
9120 			 * received a new TO_ADD request during the time window
9121 			 * of configuring the mac address. For the mac node
9122 			 * state is TO_ADD, and the address is already in the
9123 			 * in the hardware(due to delete fail), so we just need
9124 			 * to change the mac node state to ACTIVE.
9125 			 */
9126 			new_node->state = HCLGE_MAC_ACTIVE;
9127 			list_del(&mac_node->node);
9128 			kfree(mac_node);
9129 		} else {
9130 			list_move_tail(&mac_node->node, mac_list);
9131 		}
9132 	}
9133 }
9134 
9135 static void hclge_update_overflow_flags(struct hclge_vport *vport,
9136 					enum HCLGE_MAC_ADDR_TYPE mac_type,
9137 					bool is_all_added)
9138 {
9139 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9140 		if (is_all_added)
9141 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
9142 		else
9143 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
9144 	} else {
9145 		if (is_all_added)
9146 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
9147 		else
9148 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
9149 	}
9150 }
9151 
9152 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
9153 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
9154 {
9155 	struct hclge_mac_node *mac_node, *tmp, *new_node;
9156 	struct list_head tmp_add_list, tmp_del_list;
9157 	struct list_head *list;
9158 	bool all_added;
9159 
9160 	INIT_LIST_HEAD(&tmp_add_list);
9161 	INIT_LIST_HEAD(&tmp_del_list);
9162 
9163 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
9164 	 * we can add/delete these mac addr outside the spin lock
9165 	 */
9166 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9167 		&vport->uc_mac_list : &vport->mc_mac_list;
9168 
9169 	spin_lock_bh(&vport->mac_list_lock);
9170 
9171 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9172 		switch (mac_node->state) {
9173 		case HCLGE_MAC_TO_DEL:
9174 			list_move_tail(&mac_node->node, &tmp_del_list);
9175 			break;
9176 		case HCLGE_MAC_TO_ADD:
9177 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9178 			if (!new_node)
9179 				goto stop_traverse;
9180 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9181 			new_node->state = mac_node->state;
9182 			list_add_tail(&new_node->node, &tmp_add_list);
9183 			break;
9184 		default:
9185 			break;
9186 		}
9187 	}
9188 
9189 stop_traverse:
9190 	spin_unlock_bh(&vport->mac_list_lock);
9191 
9192 	/* delete first, in order to get max mac table space for adding */
9193 	hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
9194 	hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
9195 
9196 	/* if some mac addresses were added/deleted fail, move back to the
9197 	 * mac_list, and retry at next time.
9198 	 */
9199 	spin_lock_bh(&vport->mac_list_lock);
9200 
9201 	hclge_sync_from_del_list(&tmp_del_list, list);
9202 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9203 
9204 	spin_unlock_bh(&vport->mac_list_lock);
9205 
9206 	hclge_update_overflow_flags(vport, mac_type, all_added);
9207 }
9208 
9209 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9210 {
9211 	struct hclge_dev *hdev = vport->back;
9212 
9213 	if (test_bit(vport->vport_id, hdev->vport_config_block))
9214 		return false;
9215 
9216 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9217 		return true;
9218 
9219 	return false;
9220 }
9221 
9222 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9223 {
9224 	int i;
9225 
9226 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9227 		struct hclge_vport *vport = &hdev->vport[i];
9228 
9229 		if (!hclge_need_sync_mac_table(vport))
9230 			continue;
9231 
9232 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9233 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9234 	}
9235 }
9236 
9237 static void hclge_build_del_list(struct list_head *list,
9238 				 bool is_del_list,
9239 				 struct list_head *tmp_del_list)
9240 {
9241 	struct hclge_mac_node *mac_cfg, *tmp;
9242 
9243 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9244 		switch (mac_cfg->state) {
9245 		case HCLGE_MAC_TO_DEL:
9246 		case HCLGE_MAC_ACTIVE:
9247 			list_move_tail(&mac_cfg->node, tmp_del_list);
9248 			break;
9249 		case HCLGE_MAC_TO_ADD:
9250 			if (is_del_list) {
9251 				list_del(&mac_cfg->node);
9252 				kfree(mac_cfg);
9253 			}
9254 			break;
9255 		}
9256 	}
9257 }
9258 
9259 static void hclge_unsync_del_list(struct hclge_vport *vport,
9260 				  int (*unsync)(struct hclge_vport *vport,
9261 						const unsigned char *addr),
9262 				  bool is_del_list,
9263 				  struct list_head *tmp_del_list)
9264 {
9265 	struct hclge_mac_node *mac_cfg, *tmp;
9266 	int ret;
9267 
9268 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9269 		ret = unsync(vport, mac_cfg->mac_addr);
9270 		if (!ret || ret == -ENOENT) {
9271 			/* clear all mac addr from hardware, but remain these
9272 			 * mac addr in the mac list, and restore them after
9273 			 * vf reset finished.
9274 			 */
9275 			if (!is_del_list &&
9276 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9277 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9278 			} else {
9279 				list_del(&mac_cfg->node);
9280 				kfree(mac_cfg);
9281 			}
9282 		} else if (is_del_list) {
9283 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9284 		}
9285 	}
9286 }
9287 
9288 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9289 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9290 {
9291 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9292 	struct hclge_dev *hdev = vport->back;
9293 	struct list_head tmp_del_list, *list;
9294 
9295 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9296 		list = &vport->uc_mac_list;
9297 		unsync = hclge_rm_uc_addr_common;
9298 	} else {
9299 		list = &vport->mc_mac_list;
9300 		unsync = hclge_rm_mc_addr_common;
9301 	}
9302 
9303 	INIT_LIST_HEAD(&tmp_del_list);
9304 
9305 	if (!is_del_list)
9306 		set_bit(vport->vport_id, hdev->vport_config_block);
9307 
9308 	spin_lock_bh(&vport->mac_list_lock);
9309 
9310 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9311 
9312 	spin_unlock_bh(&vport->mac_list_lock);
9313 
9314 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9315 
9316 	spin_lock_bh(&vport->mac_list_lock);
9317 
9318 	hclge_sync_from_del_list(&tmp_del_list, list);
9319 
9320 	spin_unlock_bh(&vport->mac_list_lock);
9321 }
9322 
9323 /* remove all mac address when uninitailize */
9324 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9325 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9326 {
9327 	struct hclge_mac_node *mac_node, *tmp;
9328 	struct hclge_dev *hdev = vport->back;
9329 	struct list_head tmp_del_list, *list;
9330 
9331 	INIT_LIST_HEAD(&tmp_del_list);
9332 
9333 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9334 		&vport->uc_mac_list : &vport->mc_mac_list;
9335 
9336 	spin_lock_bh(&vport->mac_list_lock);
9337 
9338 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9339 		switch (mac_node->state) {
9340 		case HCLGE_MAC_TO_DEL:
9341 		case HCLGE_MAC_ACTIVE:
9342 			list_move_tail(&mac_node->node, &tmp_del_list);
9343 			break;
9344 		case HCLGE_MAC_TO_ADD:
9345 			list_del(&mac_node->node);
9346 			kfree(mac_node);
9347 			break;
9348 		}
9349 	}
9350 
9351 	spin_unlock_bh(&vport->mac_list_lock);
9352 
9353 	hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
9354 
9355 	if (!list_empty(&tmp_del_list))
9356 		dev_warn(&hdev->pdev->dev,
9357 			 "uninit %s mac list for vport %u not completely.\n",
9358 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9359 			 vport->vport_id);
9360 
9361 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9362 		list_del(&mac_node->node);
9363 		kfree(mac_node);
9364 	}
9365 }
9366 
9367 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9368 {
9369 	struct hclge_vport *vport;
9370 	int i;
9371 
9372 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9373 		vport = &hdev->vport[i];
9374 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9375 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9376 	}
9377 }
9378 
9379 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9380 					      u16 cmdq_resp, u8 resp_code)
9381 {
9382 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9383 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9384 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9385 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9386 
9387 	int return_status;
9388 
9389 	if (cmdq_resp) {
9390 		dev_err(&hdev->pdev->dev,
9391 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9392 			cmdq_resp);
9393 		return -EIO;
9394 	}
9395 
9396 	switch (resp_code) {
9397 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9398 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9399 		return_status = 0;
9400 		break;
9401 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9402 		dev_err(&hdev->pdev->dev,
9403 			"add mac ethertype failed for manager table overflow.\n");
9404 		return_status = -EIO;
9405 		break;
9406 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9407 		dev_err(&hdev->pdev->dev,
9408 			"add mac ethertype failed for key conflict.\n");
9409 		return_status = -EIO;
9410 		break;
9411 	default:
9412 		dev_err(&hdev->pdev->dev,
9413 			"add mac ethertype failed for undefined, code=%u.\n",
9414 			resp_code);
9415 		return_status = -EIO;
9416 	}
9417 
9418 	return return_status;
9419 }
9420 
9421 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9422 			    u8 *mac_addr)
9423 {
9424 	struct hclge_vport *vport = hclge_get_vport(handle);
9425 	struct hclge_dev *hdev = vport->back;
9426 
9427 	vport = hclge_get_vf_vport(hdev, vf);
9428 	if (!vport)
9429 		return -EINVAL;
9430 
9431 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9432 		dev_info(&hdev->pdev->dev,
9433 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9434 			 mac_addr);
9435 		return 0;
9436 	}
9437 
9438 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9439 
9440 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9441 		dev_info(&hdev->pdev->dev,
9442 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9443 			 vf, mac_addr);
9444 		return hclge_inform_reset_assert_to_vf(vport);
9445 	}
9446 
9447 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9448 		 vf, mac_addr);
9449 	return 0;
9450 }
9451 
9452 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9453 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9454 {
9455 	struct hclge_desc desc;
9456 	u8 resp_code;
9457 	u16 retval;
9458 	int ret;
9459 
9460 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9461 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9462 
9463 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9464 	if (ret) {
9465 		dev_err(&hdev->pdev->dev,
9466 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9467 			ret);
9468 		return ret;
9469 	}
9470 
9471 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9472 	retval = le16_to_cpu(desc.retval);
9473 
9474 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9475 }
9476 
9477 static int init_mgr_tbl(struct hclge_dev *hdev)
9478 {
9479 	int ret;
9480 	int i;
9481 
9482 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9483 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9484 		if (ret) {
9485 			dev_err(&hdev->pdev->dev,
9486 				"add mac ethertype failed, ret =%d.\n",
9487 				ret);
9488 			return ret;
9489 		}
9490 	}
9491 
9492 	return 0;
9493 }
9494 
9495 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9496 {
9497 	struct hclge_vport *vport = hclge_get_vport(handle);
9498 	struct hclge_dev *hdev = vport->back;
9499 
9500 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9501 }
9502 
9503 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9504 				       const u8 *old_addr, const u8 *new_addr)
9505 {
9506 	struct list_head *list = &vport->uc_mac_list;
9507 	struct hclge_mac_node *old_node, *new_node;
9508 
9509 	new_node = hclge_find_mac_node(list, new_addr);
9510 	if (!new_node) {
9511 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9512 		if (!new_node)
9513 			return -ENOMEM;
9514 
9515 		new_node->state = HCLGE_MAC_TO_ADD;
9516 		ether_addr_copy(new_node->mac_addr, new_addr);
9517 		list_add(&new_node->node, list);
9518 	} else {
9519 		if (new_node->state == HCLGE_MAC_TO_DEL)
9520 			new_node->state = HCLGE_MAC_ACTIVE;
9521 
9522 		/* make sure the new addr is in the list head, avoid dev
9523 		 * addr may be not re-added into mac table for the umv space
9524 		 * limitation after global/imp reset which will clear mac
9525 		 * table by hardware.
9526 		 */
9527 		list_move(&new_node->node, list);
9528 	}
9529 
9530 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9531 		old_node = hclge_find_mac_node(list, old_addr);
9532 		if (old_node) {
9533 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9534 				list_del(&old_node->node);
9535 				kfree(old_node);
9536 			} else {
9537 				old_node->state = HCLGE_MAC_TO_DEL;
9538 			}
9539 		}
9540 	}
9541 
9542 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9543 
9544 	return 0;
9545 }
9546 
9547 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9548 			      bool is_first)
9549 {
9550 	const unsigned char *new_addr = (const unsigned char *)p;
9551 	struct hclge_vport *vport = hclge_get_vport(handle);
9552 	struct hclge_dev *hdev = vport->back;
9553 	unsigned char *old_addr = NULL;
9554 	int ret;
9555 
9556 	/* mac addr check */
9557 	if (is_zero_ether_addr(new_addr) ||
9558 	    is_broadcast_ether_addr(new_addr) ||
9559 	    is_multicast_ether_addr(new_addr)) {
9560 		dev_err(&hdev->pdev->dev,
9561 			"change uc mac err! invalid mac: %pM.\n",
9562 			 new_addr);
9563 		return -EINVAL;
9564 	}
9565 
9566 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9567 	if (ret) {
9568 		dev_err(&hdev->pdev->dev,
9569 			"failed to configure mac pause address, ret = %d\n",
9570 			ret);
9571 		return ret;
9572 	}
9573 
9574 	if (!is_first)
9575 		old_addr = hdev->hw.mac.mac_addr;
9576 
9577 	spin_lock_bh(&vport->mac_list_lock);
9578 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9579 	if (ret) {
9580 		dev_err(&hdev->pdev->dev,
9581 			"failed to change the mac addr:%pM, ret = %d\n",
9582 			new_addr, ret);
9583 		spin_unlock_bh(&vport->mac_list_lock);
9584 
9585 		if (!is_first)
9586 			hclge_pause_addr_cfg(hdev, old_addr);
9587 
9588 		return ret;
9589 	}
9590 	/* we must update dev addr with spin lock protect, preventing dev addr
9591 	 * being removed by set_rx_mode path.
9592 	 */
9593 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9594 	spin_unlock_bh(&vport->mac_list_lock);
9595 
9596 	hclge_task_schedule(hdev, 0);
9597 
9598 	return 0;
9599 }
9600 
9601 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9602 {
9603 	struct mii_ioctl_data *data = if_mii(ifr);
9604 
9605 	if (!hnae3_dev_phy_imp_supported(hdev))
9606 		return -EOPNOTSUPP;
9607 
9608 	switch (cmd) {
9609 	case SIOCGMIIPHY:
9610 		data->phy_id = hdev->hw.mac.phy_addr;
9611 		/* this command reads phy id and register at the same time */
9612 		fallthrough;
9613 	case SIOCGMIIREG:
9614 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9615 		return 0;
9616 
9617 	case SIOCSMIIREG:
9618 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9619 	default:
9620 		return -EOPNOTSUPP;
9621 	}
9622 }
9623 
9624 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9625 			  int cmd)
9626 {
9627 	struct hclge_vport *vport = hclge_get_vport(handle);
9628 	struct hclge_dev *hdev = vport->back;
9629 
9630 	switch (cmd) {
9631 	case SIOCGHWTSTAMP:
9632 		return hclge_ptp_get_cfg(hdev, ifr);
9633 	case SIOCSHWTSTAMP:
9634 		return hclge_ptp_set_cfg(hdev, ifr);
9635 	default:
9636 		if (!hdev->hw.mac.phydev)
9637 			return hclge_mii_ioctl(hdev, ifr, cmd);
9638 	}
9639 
9640 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9641 }
9642 
9643 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9644 					     bool bypass_en)
9645 {
9646 	struct hclge_port_vlan_filter_bypass_cmd *req;
9647 	struct hclge_desc desc;
9648 	int ret;
9649 
9650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9651 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9652 	req->vf_id = vf_id;
9653 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9654 		      bypass_en ? 1 : 0);
9655 
9656 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9657 	if (ret)
9658 		dev_err(&hdev->pdev->dev,
9659 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9660 			vf_id, ret);
9661 
9662 	return ret;
9663 }
9664 
9665 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9666 				      u8 fe_type, bool filter_en, u8 vf_id)
9667 {
9668 	struct hclge_vlan_filter_ctrl_cmd *req;
9669 	struct hclge_desc desc;
9670 	int ret;
9671 
9672 	/* read current vlan filter parameter */
9673 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9674 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9675 	req->vlan_type = vlan_type;
9676 	req->vf_id = vf_id;
9677 
9678 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9679 	if (ret) {
9680 		dev_err(&hdev->pdev->dev,
9681 			"failed to get vlan filter config, ret = %d.\n", ret);
9682 		return ret;
9683 	}
9684 
9685 	/* modify and write new config parameter */
9686 	hclge_cmd_reuse_desc(&desc, false);
9687 	req->vlan_fe = filter_en ?
9688 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9689 
9690 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9691 	if (ret)
9692 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9693 			ret);
9694 
9695 	return ret;
9696 }
9697 
9698 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9699 {
9700 	struct hclge_dev *hdev = vport->back;
9701 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9702 	int ret;
9703 
9704 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9705 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9706 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9707 						  enable, vport->vport_id);
9708 
9709 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9710 					 HCLGE_FILTER_FE_EGRESS, enable,
9711 					 vport->vport_id);
9712 	if (ret)
9713 		return ret;
9714 
9715 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9716 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9717 							!enable);
9718 	} else if (!vport->vport_id) {
9719 		if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9720 			enable = false;
9721 
9722 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9723 						 HCLGE_FILTER_FE_INGRESS,
9724 						 enable, 0);
9725 	}
9726 
9727 	return ret;
9728 }
9729 
9730 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9731 {
9732 	struct hnae3_handle *handle = &vport->nic;
9733 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9734 	struct hclge_dev *hdev = vport->back;
9735 
9736 	if (vport->vport_id) {
9737 		if (vport->port_base_vlan_cfg.state !=
9738 			HNAE3_PORT_BASE_VLAN_DISABLE)
9739 			return true;
9740 
9741 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9742 			return false;
9743 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9744 		return false;
9745 	}
9746 
9747 	if (!vport->req_vlan_fltr_en)
9748 		return false;
9749 
9750 	/* compatible with former device, always enable vlan filter */
9751 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9752 		return true;
9753 
9754 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9755 		if (vlan->vlan_id != 0)
9756 			return true;
9757 
9758 	return false;
9759 }
9760 
9761 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9762 {
9763 	struct hclge_dev *hdev = vport->back;
9764 	bool need_en;
9765 	int ret;
9766 
9767 	mutex_lock(&hdev->vport_lock);
9768 
9769 	vport->req_vlan_fltr_en = request_en;
9770 
9771 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9772 	if (need_en == vport->cur_vlan_fltr_en) {
9773 		mutex_unlock(&hdev->vport_lock);
9774 		return 0;
9775 	}
9776 
9777 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9778 	if (ret) {
9779 		mutex_unlock(&hdev->vport_lock);
9780 		return ret;
9781 	}
9782 
9783 	vport->cur_vlan_fltr_en = need_en;
9784 
9785 	mutex_unlock(&hdev->vport_lock);
9786 
9787 	return 0;
9788 }
9789 
9790 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9791 {
9792 	struct hclge_vport *vport = hclge_get_vport(handle);
9793 
9794 	return hclge_enable_vport_vlan_filter(vport, enable);
9795 }
9796 
9797 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9798 					bool is_kill, u16 vlan,
9799 					struct hclge_desc *desc)
9800 {
9801 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9802 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9803 	u8 vf_byte_val;
9804 	u8 vf_byte_off;
9805 	int ret;
9806 
9807 	hclge_cmd_setup_basic_desc(&desc[0],
9808 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9809 	hclge_cmd_setup_basic_desc(&desc[1],
9810 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9811 
9812 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9813 
9814 	vf_byte_off = vfid / 8;
9815 	vf_byte_val = 1 << (vfid % 8);
9816 
9817 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9818 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9819 
9820 	req0->vlan_id  = cpu_to_le16(vlan);
9821 	req0->vlan_cfg = is_kill;
9822 
9823 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9824 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9825 	else
9826 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9827 
9828 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9829 	if (ret) {
9830 		dev_err(&hdev->pdev->dev,
9831 			"Send vf vlan command fail, ret =%d.\n",
9832 			ret);
9833 		return ret;
9834 	}
9835 
9836 	return 0;
9837 }
9838 
9839 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9840 					  bool is_kill, struct hclge_desc *desc)
9841 {
9842 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9843 
9844 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9845 
9846 	if (!is_kill) {
9847 #define HCLGE_VF_VLAN_NO_ENTRY	2
9848 		if (!req->resp_code || req->resp_code == 1)
9849 			return 0;
9850 
9851 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9852 			set_bit(vfid, hdev->vf_vlan_full);
9853 			dev_warn(&hdev->pdev->dev,
9854 				 "vf vlan table is full, vf vlan filter is disabled\n");
9855 			return 0;
9856 		}
9857 
9858 		dev_err(&hdev->pdev->dev,
9859 			"Add vf vlan filter fail, ret =%u.\n",
9860 			req->resp_code);
9861 	} else {
9862 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9863 		if (!req->resp_code)
9864 			return 0;
9865 
9866 		/* vf vlan filter is disabled when vf vlan table is full,
9867 		 * then new vlan id will not be added into vf vlan table.
9868 		 * Just return 0 without warning, avoid massive verbose
9869 		 * print logs when unload.
9870 		 */
9871 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9872 			return 0;
9873 
9874 		dev_err(&hdev->pdev->dev,
9875 			"Kill vf vlan filter fail, ret =%u.\n",
9876 			req->resp_code);
9877 	}
9878 
9879 	return -EIO;
9880 }
9881 
9882 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9883 				    bool is_kill, u16 vlan)
9884 {
9885 	struct hclge_vport *vport = &hdev->vport[vfid];
9886 	struct hclge_desc desc[2];
9887 	int ret;
9888 
9889 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9890 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9891 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9892 	 * new vlan, because tx packets with these vlan id will be dropped.
9893 	 */
9894 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9895 		if (vport->vf_info.spoofchk && vlan) {
9896 			dev_err(&hdev->pdev->dev,
9897 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9898 			return -EPERM;
9899 		}
9900 		return 0;
9901 	}
9902 
9903 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9904 	if (ret)
9905 		return ret;
9906 
9907 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9908 }
9909 
9910 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9911 				      u16 vlan_id, bool is_kill)
9912 {
9913 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9914 	struct hclge_desc desc;
9915 	u8 vlan_offset_byte_val;
9916 	u8 vlan_offset_byte;
9917 	u8 vlan_offset_160;
9918 	int ret;
9919 
9920 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9921 
9922 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9923 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9924 			   HCLGE_VLAN_BYTE_SIZE;
9925 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9926 
9927 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9928 	req->vlan_offset = vlan_offset_160;
9929 	req->vlan_cfg = is_kill;
9930 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9931 
9932 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9933 	if (ret)
9934 		dev_err(&hdev->pdev->dev,
9935 			"port vlan command, send fail, ret =%d.\n", ret);
9936 	return ret;
9937 }
9938 
9939 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9940 				    u16 vport_id, u16 vlan_id,
9941 				    bool is_kill)
9942 {
9943 	u16 vport_idx, vport_num = 0;
9944 	int ret;
9945 
9946 	if (is_kill && !vlan_id)
9947 		return 0;
9948 
9949 	if (vlan_id >= VLAN_N_VID)
9950 		return -EINVAL;
9951 
9952 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9953 	if (ret) {
9954 		dev_err(&hdev->pdev->dev,
9955 			"Set %u vport vlan filter config fail, ret =%d.\n",
9956 			vport_id, ret);
9957 		return ret;
9958 	}
9959 
9960 	/* vlan 0 may be added twice when 8021q module is enabled */
9961 	if (!is_kill && !vlan_id &&
9962 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9963 		return 0;
9964 
9965 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9966 		dev_err(&hdev->pdev->dev,
9967 			"Add port vlan failed, vport %u is already in vlan %u\n",
9968 			vport_id, vlan_id);
9969 		return -EINVAL;
9970 	}
9971 
9972 	if (is_kill &&
9973 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9974 		dev_err(&hdev->pdev->dev,
9975 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9976 			vport_id, vlan_id);
9977 		return -EINVAL;
9978 	}
9979 
9980 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9981 		vport_num++;
9982 
9983 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9984 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9985 						 is_kill);
9986 
9987 	return ret;
9988 }
9989 
9990 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9991 {
9992 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9993 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9994 	struct hclge_dev *hdev = vport->back;
9995 	struct hclge_desc desc;
9996 	u16 bmap_index;
9997 	int status;
9998 
9999 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
10000 
10001 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
10002 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
10003 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
10004 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
10005 		      vcfg->accept_tag1 ? 1 : 0);
10006 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
10007 		      vcfg->accept_untag1 ? 1 : 0);
10008 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
10009 		      vcfg->accept_tag2 ? 1 : 0);
10010 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
10011 		      vcfg->accept_untag2 ? 1 : 0);
10012 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
10013 		      vcfg->insert_tag1_en ? 1 : 0);
10014 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
10015 		      vcfg->insert_tag2_en ? 1 : 0);
10016 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
10017 		      vcfg->tag_shift_mode_en ? 1 : 0);
10018 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
10019 
10020 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
10021 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
10022 			HCLGE_VF_NUM_PER_BYTE;
10023 	req->vf_bitmap[bmap_index] =
10024 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
10025 
10026 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10027 	if (status)
10028 		dev_err(&hdev->pdev->dev,
10029 			"Send port txvlan cfg command fail, ret =%d\n",
10030 			status);
10031 
10032 	return status;
10033 }
10034 
10035 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
10036 {
10037 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
10038 	struct hclge_vport_vtag_rx_cfg_cmd *req;
10039 	struct hclge_dev *hdev = vport->back;
10040 	struct hclge_desc desc;
10041 	u16 bmap_index;
10042 	int status;
10043 
10044 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
10045 
10046 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
10047 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
10048 		      vcfg->strip_tag1_en ? 1 : 0);
10049 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
10050 		      vcfg->strip_tag2_en ? 1 : 0);
10051 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
10052 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
10053 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
10054 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
10055 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
10056 		      vcfg->strip_tag1_discard_en ? 1 : 0);
10057 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
10058 		      vcfg->strip_tag2_discard_en ? 1 : 0);
10059 
10060 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
10061 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
10062 			HCLGE_VF_NUM_PER_BYTE;
10063 	req->vf_bitmap[bmap_index] =
10064 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
10065 
10066 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10067 	if (status)
10068 		dev_err(&hdev->pdev->dev,
10069 			"Send port rxvlan cfg command fail, ret =%d\n",
10070 			status);
10071 
10072 	return status;
10073 }
10074 
10075 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
10076 				  u16 port_base_vlan_state,
10077 				  u16 vlan_tag, u8 qos)
10078 {
10079 	int ret;
10080 
10081 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10082 		vport->txvlan_cfg.accept_tag1 = true;
10083 		vport->txvlan_cfg.insert_tag1_en = false;
10084 		vport->txvlan_cfg.default_tag1 = 0;
10085 	} else {
10086 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
10087 
10088 		vport->txvlan_cfg.accept_tag1 =
10089 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
10090 		vport->txvlan_cfg.insert_tag1_en = true;
10091 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
10092 						 vlan_tag;
10093 	}
10094 
10095 	vport->txvlan_cfg.accept_untag1 = true;
10096 
10097 	/* accept_tag2 and accept_untag2 are not supported on
10098 	 * pdev revision(0x20), new revision support them,
10099 	 * this two fields can not be configured by user.
10100 	 */
10101 	vport->txvlan_cfg.accept_tag2 = true;
10102 	vport->txvlan_cfg.accept_untag2 = true;
10103 	vport->txvlan_cfg.insert_tag2_en = false;
10104 	vport->txvlan_cfg.default_tag2 = 0;
10105 	vport->txvlan_cfg.tag_shift_mode_en = true;
10106 
10107 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10108 		vport->rxvlan_cfg.strip_tag1_en = false;
10109 		vport->rxvlan_cfg.strip_tag2_en =
10110 				vport->rxvlan_cfg.rx_vlan_offload_en;
10111 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10112 	} else {
10113 		vport->rxvlan_cfg.strip_tag1_en =
10114 				vport->rxvlan_cfg.rx_vlan_offload_en;
10115 		vport->rxvlan_cfg.strip_tag2_en = true;
10116 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10117 	}
10118 
10119 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10120 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10121 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10122 
10123 	ret = hclge_set_vlan_tx_offload_cfg(vport);
10124 	if (ret)
10125 		return ret;
10126 
10127 	return hclge_set_vlan_rx_offload_cfg(vport);
10128 }
10129 
10130 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10131 {
10132 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10133 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10134 	struct hclge_desc desc;
10135 	int status;
10136 
10137 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10138 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10139 	rx_req->ot_fst_vlan_type =
10140 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10141 	rx_req->ot_sec_vlan_type =
10142 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10143 	rx_req->in_fst_vlan_type =
10144 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10145 	rx_req->in_sec_vlan_type =
10146 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10147 
10148 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10149 	if (status) {
10150 		dev_err(&hdev->pdev->dev,
10151 			"Send rxvlan protocol type command fail, ret =%d\n",
10152 			status);
10153 		return status;
10154 	}
10155 
10156 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10157 
10158 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10159 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10160 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10161 
10162 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10163 	if (status)
10164 		dev_err(&hdev->pdev->dev,
10165 			"Send txvlan protocol type command fail, ret =%d\n",
10166 			status);
10167 
10168 	return status;
10169 }
10170 
10171 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10172 {
10173 #define HCLGE_DEF_VLAN_TYPE		0x8100
10174 
10175 	struct hnae3_handle *handle = &hdev->vport[0].nic;
10176 	struct hclge_vport *vport;
10177 	int ret;
10178 	int i;
10179 
10180 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10181 		/* for revision 0x21, vf vlan filter is per function */
10182 		for (i = 0; i < hdev->num_alloc_vport; i++) {
10183 			vport = &hdev->vport[i];
10184 			ret = hclge_set_vlan_filter_ctrl(hdev,
10185 							 HCLGE_FILTER_TYPE_VF,
10186 							 HCLGE_FILTER_FE_EGRESS,
10187 							 true,
10188 							 vport->vport_id);
10189 			if (ret)
10190 				return ret;
10191 			vport->cur_vlan_fltr_en = true;
10192 		}
10193 
10194 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10195 						 HCLGE_FILTER_FE_INGRESS, true,
10196 						 0);
10197 		if (ret)
10198 			return ret;
10199 	} else {
10200 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10201 						 HCLGE_FILTER_FE_EGRESS_V1_B,
10202 						 true, 0);
10203 		if (ret)
10204 			return ret;
10205 	}
10206 
10207 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10208 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10209 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10210 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10211 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10212 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10213 
10214 	ret = hclge_set_vlan_protocol_type(hdev);
10215 	if (ret)
10216 		return ret;
10217 
10218 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10219 		u16 vlan_tag;
10220 		u8 qos;
10221 
10222 		vport = &hdev->vport[i];
10223 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10224 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
10225 
10226 		ret = hclge_vlan_offload_cfg(vport,
10227 					     vport->port_base_vlan_cfg.state,
10228 					     vlan_tag, qos);
10229 		if (ret)
10230 			return ret;
10231 	}
10232 
10233 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10234 }
10235 
10236 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10237 				       bool writen_to_tbl)
10238 {
10239 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10240 
10241 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10242 		if (vlan->vlan_id == vlan_id)
10243 			return;
10244 
10245 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10246 	if (!vlan)
10247 		return;
10248 
10249 	vlan->hd_tbl_status = writen_to_tbl;
10250 	vlan->vlan_id = vlan_id;
10251 
10252 	list_add_tail(&vlan->node, &vport->vlan_list);
10253 }
10254 
10255 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10256 {
10257 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10258 	struct hclge_dev *hdev = vport->back;
10259 	int ret;
10260 
10261 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10262 		if (!vlan->hd_tbl_status) {
10263 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10264 						       vport->vport_id,
10265 						       vlan->vlan_id, false);
10266 			if (ret) {
10267 				dev_err(&hdev->pdev->dev,
10268 					"restore vport vlan list failed, ret=%d\n",
10269 					ret);
10270 				return ret;
10271 			}
10272 		}
10273 		vlan->hd_tbl_status = true;
10274 	}
10275 
10276 	return 0;
10277 }
10278 
10279 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10280 				      bool is_write_tbl)
10281 {
10282 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10283 	struct hclge_dev *hdev = vport->back;
10284 
10285 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10286 		if (vlan->vlan_id == vlan_id) {
10287 			if (is_write_tbl && vlan->hd_tbl_status)
10288 				hclge_set_vlan_filter_hw(hdev,
10289 							 htons(ETH_P_8021Q),
10290 							 vport->vport_id,
10291 							 vlan_id,
10292 							 true);
10293 
10294 			list_del(&vlan->node);
10295 			kfree(vlan);
10296 			break;
10297 		}
10298 	}
10299 }
10300 
10301 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10302 {
10303 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10304 	struct hclge_dev *hdev = vport->back;
10305 
10306 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10307 		if (vlan->hd_tbl_status)
10308 			hclge_set_vlan_filter_hw(hdev,
10309 						 htons(ETH_P_8021Q),
10310 						 vport->vport_id,
10311 						 vlan->vlan_id,
10312 						 true);
10313 
10314 		vlan->hd_tbl_status = false;
10315 		if (is_del_list) {
10316 			list_del(&vlan->node);
10317 			kfree(vlan);
10318 		}
10319 	}
10320 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10321 }
10322 
10323 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10324 {
10325 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10326 	struct hclge_vport *vport;
10327 	int i;
10328 
10329 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10330 		vport = &hdev->vport[i];
10331 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10332 			list_del(&vlan->node);
10333 			kfree(vlan);
10334 		}
10335 	}
10336 }
10337 
10338 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10339 {
10340 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10341 	struct hclge_dev *hdev = vport->back;
10342 	u16 vlan_proto;
10343 	u16 vlan_id;
10344 	u16 state;
10345 	int ret;
10346 
10347 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10348 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10349 	state = vport->port_base_vlan_cfg.state;
10350 
10351 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10352 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10353 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10354 					 vport->vport_id, vlan_id,
10355 					 false);
10356 		return;
10357 	}
10358 
10359 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10360 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10361 					       vport->vport_id,
10362 					       vlan->vlan_id, false);
10363 		if (ret)
10364 			break;
10365 		vlan->hd_tbl_status = true;
10366 	}
10367 }
10368 
10369 /* For global reset and imp reset, hardware will clear the mac table,
10370  * so we change the mac address state from ACTIVE to TO_ADD, then they
10371  * can be restored in the service task after reset complete. Furtherly,
10372  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10373  * be restored after reset, so just remove these mac nodes from mac_list.
10374  */
10375 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10376 {
10377 	struct hclge_mac_node *mac_node, *tmp;
10378 
10379 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10380 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10381 			mac_node->state = HCLGE_MAC_TO_ADD;
10382 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10383 			list_del(&mac_node->node);
10384 			kfree(mac_node);
10385 		}
10386 	}
10387 }
10388 
10389 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10390 {
10391 	spin_lock_bh(&vport->mac_list_lock);
10392 
10393 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10394 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10395 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10396 
10397 	spin_unlock_bh(&vport->mac_list_lock);
10398 }
10399 
10400 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10401 {
10402 	struct hclge_vport *vport = &hdev->vport[0];
10403 	struct hnae3_handle *handle = &vport->nic;
10404 
10405 	hclge_restore_mac_table_common(vport);
10406 	hclge_restore_vport_vlan_table(vport);
10407 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10408 	hclge_restore_fd_entries(handle);
10409 }
10410 
10411 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10412 {
10413 	struct hclge_vport *vport = hclge_get_vport(handle);
10414 
10415 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10416 		vport->rxvlan_cfg.strip_tag1_en = false;
10417 		vport->rxvlan_cfg.strip_tag2_en = enable;
10418 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10419 	} else {
10420 		vport->rxvlan_cfg.strip_tag1_en = enable;
10421 		vport->rxvlan_cfg.strip_tag2_en = true;
10422 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10423 	}
10424 
10425 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10426 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10427 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10428 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10429 
10430 	return hclge_set_vlan_rx_offload_cfg(vport);
10431 }
10432 
10433 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10434 {
10435 	struct hclge_dev *hdev = vport->back;
10436 
10437 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10438 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10439 }
10440 
10441 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10442 					    u16 port_base_vlan_state,
10443 					    struct hclge_vlan_info *new_info,
10444 					    struct hclge_vlan_info *old_info)
10445 {
10446 	struct hclge_dev *hdev = vport->back;
10447 	int ret;
10448 
10449 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10450 		hclge_rm_vport_all_vlan_table(vport, false);
10451 		/* force clear VLAN 0 */
10452 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10453 		if (ret)
10454 			return ret;
10455 		return hclge_set_vlan_filter_hw(hdev,
10456 						 htons(new_info->vlan_proto),
10457 						 vport->vport_id,
10458 						 new_info->vlan_tag,
10459 						 false);
10460 	}
10461 
10462 	/* force add VLAN 0 */
10463 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10464 	if (ret)
10465 		return ret;
10466 
10467 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10468 				       vport->vport_id, old_info->vlan_tag,
10469 				       true);
10470 	if (ret)
10471 		return ret;
10472 
10473 	return hclge_add_vport_all_vlan_table(vport);
10474 }
10475 
10476 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10477 					  const struct hclge_vlan_info *old_cfg)
10478 {
10479 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10480 		return true;
10481 
10482 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10483 		return true;
10484 
10485 	return false;
10486 }
10487 
10488 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10489 				    struct hclge_vlan_info *vlan_info)
10490 {
10491 	struct hnae3_handle *nic = &vport->nic;
10492 	struct hclge_vlan_info *old_vlan_info;
10493 	struct hclge_dev *hdev = vport->back;
10494 	int ret;
10495 
10496 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10497 
10498 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10499 				     vlan_info->qos);
10500 	if (ret)
10501 		return ret;
10502 
10503 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10504 		goto out;
10505 
10506 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10507 		/* add new VLAN tag */
10508 		ret = hclge_set_vlan_filter_hw(hdev,
10509 					       htons(vlan_info->vlan_proto),
10510 					       vport->vport_id,
10511 					       vlan_info->vlan_tag,
10512 					       false);
10513 		if (ret)
10514 			return ret;
10515 
10516 		/* remove old VLAN tag */
10517 		if (old_vlan_info->vlan_tag == 0)
10518 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10519 						       true, 0);
10520 		else
10521 			ret = hclge_set_vlan_filter_hw(hdev,
10522 						       htons(ETH_P_8021Q),
10523 						       vport->vport_id,
10524 						       old_vlan_info->vlan_tag,
10525 						       true);
10526 		if (ret) {
10527 			dev_err(&hdev->pdev->dev,
10528 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10529 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10530 			return ret;
10531 		}
10532 
10533 		goto out;
10534 	}
10535 
10536 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10537 					       old_vlan_info);
10538 	if (ret)
10539 		return ret;
10540 
10541 out:
10542 	vport->port_base_vlan_cfg.state = state;
10543 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10544 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10545 	else
10546 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10547 
10548 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10549 	hclge_set_vport_vlan_fltr_change(vport);
10550 
10551 	return 0;
10552 }
10553 
10554 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10555 					  enum hnae3_port_base_vlan_state state,
10556 					  u16 vlan, u8 qos)
10557 {
10558 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10559 		if (!vlan && !qos)
10560 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10561 
10562 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10563 	}
10564 
10565 	if (!vlan && !qos)
10566 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10567 
10568 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10569 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10570 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10571 
10572 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10573 }
10574 
10575 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10576 				    u16 vlan, u8 qos, __be16 proto)
10577 {
10578 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10579 	struct hclge_vport *vport = hclge_get_vport(handle);
10580 	struct hclge_dev *hdev = vport->back;
10581 	struct hclge_vlan_info vlan_info;
10582 	u16 state;
10583 	int ret;
10584 
10585 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10586 		return -EOPNOTSUPP;
10587 
10588 	vport = hclge_get_vf_vport(hdev, vfid);
10589 	if (!vport)
10590 		return -EINVAL;
10591 
10592 	/* qos is a 3 bits value, so can not be bigger than 7 */
10593 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10594 		return -EINVAL;
10595 	if (proto != htons(ETH_P_8021Q))
10596 		return -EPROTONOSUPPORT;
10597 
10598 	state = hclge_get_port_base_vlan_state(vport,
10599 					       vport->port_base_vlan_cfg.state,
10600 					       vlan, qos);
10601 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10602 		return 0;
10603 
10604 	vlan_info.vlan_tag = vlan;
10605 	vlan_info.qos = qos;
10606 	vlan_info.vlan_proto = ntohs(proto);
10607 
10608 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10609 	if (ret) {
10610 		dev_err(&hdev->pdev->dev,
10611 			"failed to update port base vlan for vf %d, ret = %d\n",
10612 			vfid, ret);
10613 		return ret;
10614 	}
10615 
10616 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10617 	 * VLAN state.
10618 	 */
10619 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10620 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10621 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10622 						  vport->vport_id, state,
10623 						  &vlan_info);
10624 
10625 	return 0;
10626 }
10627 
10628 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10629 {
10630 	struct hclge_vlan_info *vlan_info;
10631 	struct hclge_vport *vport;
10632 	int ret;
10633 	int vf;
10634 
10635 	/* clear port base vlan for all vf */
10636 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10637 		vport = &hdev->vport[vf];
10638 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10639 
10640 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10641 					       vport->vport_id,
10642 					       vlan_info->vlan_tag, true);
10643 		if (ret)
10644 			dev_err(&hdev->pdev->dev,
10645 				"failed to clear vf vlan for vf%d, ret = %d\n",
10646 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10647 	}
10648 }
10649 
10650 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10651 			  u16 vlan_id, bool is_kill)
10652 {
10653 	struct hclge_vport *vport = hclge_get_vport(handle);
10654 	struct hclge_dev *hdev = vport->back;
10655 	bool writen_to_tbl = false;
10656 	int ret = 0;
10657 
10658 	/* When device is resetting or reset failed, firmware is unable to
10659 	 * handle mailbox. Just record the vlan id, and remove it after
10660 	 * reset finished.
10661 	 */
10662 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10663 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10664 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10665 		return -EBUSY;
10666 	}
10667 
10668 	/* when port base vlan enabled, we use port base vlan as the vlan
10669 	 * filter entry. In this case, we don't update vlan filter table
10670 	 * when user add new vlan or remove exist vlan, just update the vport
10671 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10672 	 * table until port base vlan disabled
10673 	 */
10674 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10675 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10676 					       vlan_id, is_kill);
10677 		writen_to_tbl = true;
10678 	}
10679 
10680 	if (!ret) {
10681 		if (is_kill)
10682 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10683 		else
10684 			hclge_add_vport_vlan_table(vport, vlan_id,
10685 						   writen_to_tbl);
10686 	} else if (is_kill) {
10687 		/* when remove hw vlan filter failed, record the vlan id,
10688 		 * and try to remove it from hw later, to be consistence
10689 		 * with stack
10690 		 */
10691 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10692 	}
10693 
10694 	hclge_set_vport_vlan_fltr_change(vport);
10695 
10696 	return ret;
10697 }
10698 
10699 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10700 {
10701 	struct hclge_vport *vport;
10702 	int ret;
10703 	u16 i;
10704 
10705 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10706 		vport = &hdev->vport[i];
10707 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10708 					&vport->state))
10709 			continue;
10710 
10711 		ret = hclge_enable_vport_vlan_filter(vport,
10712 						     vport->req_vlan_fltr_en);
10713 		if (ret) {
10714 			dev_err(&hdev->pdev->dev,
10715 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10716 				vport->vport_id, ret);
10717 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10718 				&vport->state);
10719 			return;
10720 		}
10721 	}
10722 }
10723 
10724 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10725 {
10726 #define HCLGE_MAX_SYNC_COUNT	60
10727 
10728 	int i, ret, sync_cnt = 0;
10729 	u16 vlan_id;
10730 
10731 	/* start from vport 1 for PF is always alive */
10732 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10733 		struct hclge_vport *vport = &hdev->vport[i];
10734 
10735 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10736 					 VLAN_N_VID);
10737 		while (vlan_id != VLAN_N_VID) {
10738 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10739 						       vport->vport_id, vlan_id,
10740 						       true);
10741 			if (ret && ret != -EINVAL)
10742 				return;
10743 
10744 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10745 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10746 			hclge_set_vport_vlan_fltr_change(vport);
10747 
10748 			sync_cnt++;
10749 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10750 				return;
10751 
10752 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10753 						 VLAN_N_VID);
10754 		}
10755 	}
10756 
10757 	hclge_sync_vlan_fltr_state(hdev);
10758 }
10759 
10760 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10761 {
10762 	struct hclge_config_max_frm_size_cmd *req;
10763 	struct hclge_desc desc;
10764 
10765 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10766 
10767 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10768 	req->max_frm_size = cpu_to_le16(new_mps);
10769 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10770 
10771 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10772 }
10773 
10774 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10775 {
10776 	struct hclge_vport *vport = hclge_get_vport(handle);
10777 
10778 	return hclge_set_vport_mtu(vport, new_mtu);
10779 }
10780 
10781 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10782 {
10783 	struct hclge_dev *hdev = vport->back;
10784 	int i, max_frm_size, ret;
10785 
10786 	/* HW supprt 2 layer vlan */
10787 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10788 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10789 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10790 		return -EINVAL;
10791 
10792 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10793 	mutex_lock(&hdev->vport_lock);
10794 	/* VF's mps must fit within hdev->mps */
10795 	if (vport->vport_id && max_frm_size > hdev->mps) {
10796 		mutex_unlock(&hdev->vport_lock);
10797 		return -EINVAL;
10798 	} else if (vport->vport_id) {
10799 		vport->mps = max_frm_size;
10800 		mutex_unlock(&hdev->vport_lock);
10801 		return 0;
10802 	}
10803 
10804 	/* PF's mps must be greater then VF's mps */
10805 	for (i = 1; i < hdev->num_alloc_vport; i++)
10806 		if (max_frm_size < hdev->vport[i].mps) {
10807 			mutex_unlock(&hdev->vport_lock);
10808 			return -EINVAL;
10809 		}
10810 
10811 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10812 
10813 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10814 	if (ret) {
10815 		dev_err(&hdev->pdev->dev,
10816 			"Change mtu fail, ret =%d\n", ret);
10817 		goto out;
10818 	}
10819 
10820 	hdev->mps = max_frm_size;
10821 	vport->mps = max_frm_size;
10822 
10823 	ret = hclge_buffer_alloc(hdev);
10824 	if (ret)
10825 		dev_err(&hdev->pdev->dev,
10826 			"Allocate buffer fail, ret =%d\n", ret);
10827 
10828 out:
10829 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10830 	mutex_unlock(&hdev->vport_lock);
10831 	return ret;
10832 }
10833 
10834 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10835 				    bool enable)
10836 {
10837 	struct hclge_reset_tqp_queue_cmd *req;
10838 	struct hclge_desc desc;
10839 	int ret;
10840 
10841 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10842 
10843 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10844 	req->tqp_id = cpu_to_le16(queue_id);
10845 	if (enable)
10846 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10847 
10848 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10849 	if (ret) {
10850 		dev_err(&hdev->pdev->dev,
10851 			"Send tqp reset cmd error, status =%d\n", ret);
10852 		return ret;
10853 	}
10854 
10855 	return 0;
10856 }
10857 
10858 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10859 				  u8 *reset_status)
10860 {
10861 	struct hclge_reset_tqp_queue_cmd *req;
10862 	struct hclge_desc desc;
10863 	int ret;
10864 
10865 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10866 
10867 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10868 	req->tqp_id = cpu_to_le16(queue_id);
10869 
10870 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10871 	if (ret) {
10872 		dev_err(&hdev->pdev->dev,
10873 			"Get reset status error, status =%d\n", ret);
10874 		return ret;
10875 	}
10876 
10877 	*reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10878 
10879 	return 0;
10880 }
10881 
10882 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10883 {
10884 	struct hnae3_queue *queue;
10885 	struct hclge_tqp *tqp;
10886 
10887 	queue = handle->kinfo.tqp[queue_id];
10888 	tqp = container_of(queue, struct hclge_tqp, q);
10889 
10890 	return tqp->index;
10891 }
10892 
10893 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10894 {
10895 	struct hclge_vport *vport = hclge_get_vport(handle);
10896 	struct hclge_dev *hdev = vport->back;
10897 	u16 reset_try_times = 0;
10898 	u8 reset_status;
10899 	u16 queue_gid;
10900 	int ret;
10901 	u16 i;
10902 
10903 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10904 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10905 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10906 		if (ret) {
10907 			dev_err(&hdev->pdev->dev,
10908 				"failed to send reset tqp cmd, ret = %d\n",
10909 				ret);
10910 			return ret;
10911 		}
10912 
10913 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10914 			ret = hclge_get_reset_status(hdev, queue_gid,
10915 						     &reset_status);
10916 			if (ret)
10917 				return ret;
10918 
10919 			if (reset_status)
10920 				break;
10921 
10922 			/* Wait for tqp hw reset */
10923 			usleep_range(1000, 1200);
10924 		}
10925 
10926 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10927 			dev_err(&hdev->pdev->dev,
10928 				"wait for tqp hw reset timeout\n");
10929 			return -ETIME;
10930 		}
10931 
10932 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10933 		if (ret) {
10934 			dev_err(&hdev->pdev->dev,
10935 				"failed to deassert soft reset, ret = %d\n",
10936 				ret);
10937 			return ret;
10938 		}
10939 		reset_try_times = 0;
10940 	}
10941 	return 0;
10942 }
10943 
10944 static int hclge_reset_rcb(struct hnae3_handle *handle)
10945 {
10946 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10947 #define HCLGE_RESET_RCB_SUCCESS		1U
10948 
10949 	struct hclge_vport *vport = hclge_get_vport(handle);
10950 	struct hclge_dev *hdev = vport->back;
10951 	struct hclge_reset_cmd *req;
10952 	struct hclge_desc desc;
10953 	u8 return_status;
10954 	u16 queue_gid;
10955 	int ret;
10956 
10957 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10958 
10959 	req = (struct hclge_reset_cmd *)desc.data;
10960 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10961 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10962 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10963 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10964 
10965 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10966 	if (ret) {
10967 		dev_err(&hdev->pdev->dev,
10968 			"failed to send rcb reset cmd, ret = %d\n", ret);
10969 		return ret;
10970 	}
10971 
10972 	return_status = req->fun_reset_rcb_return_status;
10973 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10974 		return 0;
10975 
10976 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10977 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10978 			return_status);
10979 		return -EIO;
10980 	}
10981 
10982 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10983 	 * again to reset all tqps
10984 	 */
10985 	return hclge_reset_tqp_cmd(handle);
10986 }
10987 
10988 int hclge_reset_tqp(struct hnae3_handle *handle)
10989 {
10990 	struct hclge_vport *vport = hclge_get_vport(handle);
10991 	struct hclge_dev *hdev = vport->back;
10992 	int ret;
10993 
10994 	/* only need to disable PF's tqp */
10995 	if (!vport->vport_id) {
10996 		ret = hclge_tqp_enable(handle, false);
10997 		if (ret) {
10998 			dev_err(&hdev->pdev->dev,
10999 				"failed to disable tqp, ret = %d\n", ret);
11000 			return ret;
11001 		}
11002 	}
11003 
11004 	return hclge_reset_rcb(handle);
11005 }
11006 
11007 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
11008 {
11009 	struct hclge_vport *vport = hclge_get_vport(handle);
11010 	struct hclge_dev *hdev = vport->back;
11011 
11012 	return hdev->fw_version;
11013 }
11014 
11015 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
11016 {
11017 	struct phy_device *phydev = hdev->hw.mac.phydev;
11018 
11019 	if (!phydev)
11020 		return;
11021 
11022 	phy_set_asym_pause(phydev, rx_en, tx_en);
11023 }
11024 
11025 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
11026 {
11027 	int ret;
11028 
11029 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
11030 		return 0;
11031 
11032 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
11033 	if (ret)
11034 		dev_err(&hdev->pdev->dev,
11035 			"configure pauseparam error, ret = %d.\n", ret);
11036 
11037 	return ret;
11038 }
11039 
11040 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
11041 {
11042 	struct phy_device *phydev = hdev->hw.mac.phydev;
11043 	u16 remote_advertising = 0;
11044 	u16 local_advertising;
11045 	u32 rx_pause, tx_pause;
11046 	u8 flowctl;
11047 
11048 	if (!phydev->link || !phydev->autoneg)
11049 		return 0;
11050 
11051 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
11052 
11053 	if (phydev->pause)
11054 		remote_advertising = LPA_PAUSE_CAP;
11055 
11056 	if (phydev->asym_pause)
11057 		remote_advertising |= LPA_PAUSE_ASYM;
11058 
11059 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
11060 					   remote_advertising);
11061 	tx_pause = flowctl & FLOW_CTRL_TX;
11062 	rx_pause = flowctl & FLOW_CTRL_RX;
11063 
11064 	if (phydev->duplex == HCLGE_MAC_HALF) {
11065 		tx_pause = 0;
11066 		rx_pause = 0;
11067 	}
11068 
11069 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
11070 }
11071 
11072 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
11073 				 u32 *rx_en, u32 *tx_en)
11074 {
11075 	struct hclge_vport *vport = hclge_get_vport(handle);
11076 	struct hclge_dev *hdev = vport->back;
11077 	u8 media_type = hdev->hw.mac.media_type;
11078 
11079 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
11080 		    hclge_get_autoneg(handle) : 0;
11081 
11082 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11083 		*rx_en = 0;
11084 		*tx_en = 0;
11085 		return;
11086 	}
11087 
11088 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
11089 		*rx_en = 1;
11090 		*tx_en = 0;
11091 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
11092 		*tx_en = 1;
11093 		*rx_en = 0;
11094 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
11095 		*rx_en = 1;
11096 		*tx_en = 1;
11097 	} else {
11098 		*rx_en = 0;
11099 		*tx_en = 0;
11100 	}
11101 }
11102 
11103 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11104 					 u32 rx_en, u32 tx_en)
11105 {
11106 	if (rx_en && tx_en)
11107 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
11108 	else if (rx_en && !tx_en)
11109 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11110 	else if (!rx_en && tx_en)
11111 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11112 	else
11113 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
11114 
11115 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11116 }
11117 
11118 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11119 				u32 rx_en, u32 tx_en)
11120 {
11121 	struct hclge_vport *vport = hclge_get_vport(handle);
11122 	struct hclge_dev *hdev = vport->back;
11123 	struct phy_device *phydev = hdev->hw.mac.phydev;
11124 	u32 fc_autoneg;
11125 
11126 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11127 		fc_autoneg = hclge_get_autoneg(handle);
11128 		if (auto_neg != fc_autoneg) {
11129 			dev_info(&hdev->pdev->dev,
11130 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11131 			return -EOPNOTSUPP;
11132 		}
11133 	}
11134 
11135 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11136 		dev_info(&hdev->pdev->dev,
11137 			 "Priority flow control enabled. Cannot set link flow control.\n");
11138 		return -EOPNOTSUPP;
11139 	}
11140 
11141 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11142 
11143 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11144 
11145 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11146 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11147 
11148 	if (phydev)
11149 		return phy_start_aneg(phydev);
11150 
11151 	return -EOPNOTSUPP;
11152 }
11153 
11154 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11155 					  u8 *auto_neg, u32 *speed, u8 *duplex)
11156 {
11157 	struct hclge_vport *vport = hclge_get_vport(handle);
11158 	struct hclge_dev *hdev = vport->back;
11159 
11160 	if (speed)
11161 		*speed = hdev->hw.mac.speed;
11162 	if (duplex)
11163 		*duplex = hdev->hw.mac.duplex;
11164 	if (auto_neg)
11165 		*auto_neg = hdev->hw.mac.autoneg;
11166 }
11167 
11168 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11169 				 u8 *module_type)
11170 {
11171 	struct hclge_vport *vport = hclge_get_vport(handle);
11172 	struct hclge_dev *hdev = vport->back;
11173 
11174 	/* When nic is down, the service task is not running, doesn't update
11175 	 * the port information per second. Query the port information before
11176 	 * return the media type, ensure getting the correct media information.
11177 	 */
11178 	hclge_update_port_info(hdev);
11179 
11180 	if (media_type)
11181 		*media_type = hdev->hw.mac.media_type;
11182 
11183 	if (module_type)
11184 		*module_type = hdev->hw.mac.module_type;
11185 }
11186 
11187 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11188 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
11189 {
11190 	struct hclge_vport *vport = hclge_get_vport(handle);
11191 	struct hclge_dev *hdev = vport->back;
11192 	struct phy_device *phydev = hdev->hw.mac.phydev;
11193 	int mdix_ctrl, mdix, is_resolved;
11194 	unsigned int retval;
11195 
11196 	if (!phydev) {
11197 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11198 		*tp_mdix = ETH_TP_MDI_INVALID;
11199 		return;
11200 	}
11201 
11202 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11203 
11204 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11205 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11206 				    HCLGE_PHY_MDIX_CTRL_S);
11207 
11208 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11209 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11210 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11211 
11212 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11213 
11214 	switch (mdix_ctrl) {
11215 	case 0x0:
11216 		*tp_mdix_ctrl = ETH_TP_MDI;
11217 		break;
11218 	case 0x1:
11219 		*tp_mdix_ctrl = ETH_TP_MDI_X;
11220 		break;
11221 	case 0x3:
11222 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11223 		break;
11224 	default:
11225 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11226 		break;
11227 	}
11228 
11229 	if (!is_resolved)
11230 		*tp_mdix = ETH_TP_MDI_INVALID;
11231 	else if (mdix)
11232 		*tp_mdix = ETH_TP_MDI_X;
11233 	else
11234 		*tp_mdix = ETH_TP_MDI;
11235 }
11236 
11237 static void hclge_info_show(struct hclge_dev *hdev)
11238 {
11239 	struct device *dev = &hdev->pdev->dev;
11240 
11241 	dev_info(dev, "PF info begin:\n");
11242 
11243 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11244 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11245 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11246 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11247 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11248 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11249 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11250 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11251 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11252 	dev_info(dev, "This is %s PF\n",
11253 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11254 	dev_info(dev, "DCB %s\n",
11255 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11256 	dev_info(dev, "MQPRIO %s\n",
11257 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11258 	dev_info(dev, "Default tx spare buffer size: %u\n",
11259 		 hdev->tx_spare_buf_size);
11260 
11261 	dev_info(dev, "PF info end.\n");
11262 }
11263 
11264 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11265 					  struct hclge_vport *vport)
11266 {
11267 	struct hnae3_client *client = vport->nic.client;
11268 	struct hclge_dev *hdev = ae_dev->priv;
11269 	int rst_cnt = hdev->rst_stats.reset_cnt;
11270 	int ret;
11271 
11272 	ret = client->ops->init_instance(&vport->nic);
11273 	if (ret)
11274 		return ret;
11275 
11276 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11277 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11278 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11279 		ret = -EBUSY;
11280 		goto init_nic_err;
11281 	}
11282 
11283 	/* Enable nic hw error interrupts */
11284 	ret = hclge_config_nic_hw_error(hdev, true);
11285 	if (ret) {
11286 		dev_err(&ae_dev->pdev->dev,
11287 			"fail(%d) to enable hw error interrupts\n", ret);
11288 		goto init_nic_err;
11289 	}
11290 
11291 	hnae3_set_client_init_flag(client, ae_dev, 1);
11292 
11293 	if (netif_msg_drv(&hdev->vport->nic))
11294 		hclge_info_show(hdev);
11295 
11296 	return ret;
11297 
11298 init_nic_err:
11299 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11300 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11301 		msleep(HCLGE_WAIT_RESET_DONE);
11302 
11303 	client->ops->uninit_instance(&vport->nic, 0);
11304 
11305 	return ret;
11306 }
11307 
11308 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11309 					   struct hclge_vport *vport)
11310 {
11311 	struct hclge_dev *hdev = ae_dev->priv;
11312 	struct hnae3_client *client;
11313 	int rst_cnt;
11314 	int ret;
11315 
11316 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11317 	    !hdev->nic_client)
11318 		return 0;
11319 
11320 	client = hdev->roce_client;
11321 	ret = hclge_init_roce_base_info(vport);
11322 	if (ret)
11323 		return ret;
11324 
11325 	rst_cnt = hdev->rst_stats.reset_cnt;
11326 	ret = client->ops->init_instance(&vport->roce);
11327 	if (ret)
11328 		return ret;
11329 
11330 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11331 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11332 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11333 		ret = -EBUSY;
11334 		goto init_roce_err;
11335 	}
11336 
11337 	/* Enable roce ras interrupts */
11338 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11339 	if (ret) {
11340 		dev_err(&ae_dev->pdev->dev,
11341 			"fail(%d) to enable roce ras interrupts\n", ret);
11342 		goto init_roce_err;
11343 	}
11344 
11345 	hnae3_set_client_init_flag(client, ae_dev, 1);
11346 
11347 	return 0;
11348 
11349 init_roce_err:
11350 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11351 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11352 		msleep(HCLGE_WAIT_RESET_DONE);
11353 
11354 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11355 
11356 	return ret;
11357 }
11358 
11359 static int hclge_init_client_instance(struct hnae3_client *client,
11360 				      struct hnae3_ae_dev *ae_dev)
11361 {
11362 	struct hclge_dev *hdev = ae_dev->priv;
11363 	struct hclge_vport *vport = &hdev->vport[0];
11364 	int ret;
11365 
11366 	switch (client->type) {
11367 	case HNAE3_CLIENT_KNIC:
11368 		hdev->nic_client = client;
11369 		vport->nic.client = client;
11370 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11371 		if (ret)
11372 			goto clear_nic;
11373 
11374 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11375 		if (ret)
11376 			goto clear_roce;
11377 
11378 		break;
11379 	case HNAE3_CLIENT_ROCE:
11380 		if (hnae3_dev_roce_supported(hdev)) {
11381 			hdev->roce_client = client;
11382 			vport->roce.client = client;
11383 		}
11384 
11385 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11386 		if (ret)
11387 			goto clear_roce;
11388 
11389 		break;
11390 	default:
11391 		return -EINVAL;
11392 	}
11393 
11394 	return 0;
11395 
11396 clear_nic:
11397 	hdev->nic_client = NULL;
11398 	vport->nic.client = NULL;
11399 	return ret;
11400 clear_roce:
11401 	hdev->roce_client = NULL;
11402 	vport->roce.client = NULL;
11403 	return ret;
11404 }
11405 
11406 static void hclge_uninit_client_instance(struct hnae3_client *client,
11407 					 struct hnae3_ae_dev *ae_dev)
11408 {
11409 	struct hclge_dev *hdev = ae_dev->priv;
11410 	struct hclge_vport *vport = &hdev->vport[0];
11411 
11412 	if (hdev->roce_client) {
11413 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11414 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11415 			msleep(HCLGE_WAIT_RESET_DONE);
11416 
11417 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11418 		hdev->roce_client = NULL;
11419 		vport->roce.client = NULL;
11420 	}
11421 	if (client->type == HNAE3_CLIENT_ROCE)
11422 		return;
11423 	if (hdev->nic_client && client->ops->uninit_instance) {
11424 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11425 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11426 			msleep(HCLGE_WAIT_RESET_DONE);
11427 
11428 		client->ops->uninit_instance(&vport->nic, 0);
11429 		hdev->nic_client = NULL;
11430 		vport->nic.client = NULL;
11431 	}
11432 }
11433 
11434 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11435 {
11436 #define HCLGE_MEM_BAR		4
11437 
11438 	struct pci_dev *pdev = hdev->pdev;
11439 	struct hclge_hw *hw = &hdev->hw;
11440 
11441 	/* for device does not have device memory, return directly */
11442 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11443 		return 0;
11444 
11445 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11446 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11447 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11448 	if (!hw->mem_base) {
11449 		dev_err(&pdev->dev, "failed to map device memory\n");
11450 		return -EFAULT;
11451 	}
11452 
11453 	return 0;
11454 }
11455 
11456 static int hclge_pci_init(struct hclge_dev *hdev)
11457 {
11458 	struct pci_dev *pdev = hdev->pdev;
11459 	struct hclge_hw *hw;
11460 	int ret;
11461 
11462 	ret = pci_enable_device(pdev);
11463 	if (ret) {
11464 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11465 		return ret;
11466 	}
11467 
11468 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11469 	if (ret) {
11470 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11471 		if (ret) {
11472 			dev_err(&pdev->dev,
11473 				"can't set consistent PCI DMA");
11474 			goto err_disable_device;
11475 		}
11476 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11477 	}
11478 
11479 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11480 	if (ret) {
11481 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11482 		goto err_disable_device;
11483 	}
11484 
11485 	pci_set_master(pdev);
11486 	hw = &hdev->hw;
11487 	hw->io_base = pcim_iomap(pdev, 2, 0);
11488 	if (!hw->io_base) {
11489 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11490 		ret = -ENOMEM;
11491 		goto err_clr_master;
11492 	}
11493 
11494 	ret = hclge_dev_mem_map(hdev);
11495 	if (ret)
11496 		goto err_unmap_io_base;
11497 
11498 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11499 
11500 	return 0;
11501 
11502 err_unmap_io_base:
11503 	pcim_iounmap(pdev, hdev->hw.io_base);
11504 err_clr_master:
11505 	pci_clear_master(pdev);
11506 	pci_release_regions(pdev);
11507 err_disable_device:
11508 	pci_disable_device(pdev);
11509 
11510 	return ret;
11511 }
11512 
11513 static void hclge_pci_uninit(struct hclge_dev *hdev)
11514 {
11515 	struct pci_dev *pdev = hdev->pdev;
11516 
11517 	if (hdev->hw.mem_base)
11518 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11519 
11520 	pcim_iounmap(pdev, hdev->hw.io_base);
11521 	pci_free_irq_vectors(pdev);
11522 	pci_clear_master(pdev);
11523 	pci_release_mem_regions(pdev);
11524 	pci_disable_device(pdev);
11525 }
11526 
11527 static void hclge_state_init(struct hclge_dev *hdev)
11528 {
11529 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11530 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11531 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11532 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11533 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11534 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11535 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11536 }
11537 
11538 static void hclge_state_uninit(struct hclge_dev *hdev)
11539 {
11540 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11541 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11542 
11543 	if (hdev->reset_timer.function)
11544 		del_timer_sync(&hdev->reset_timer);
11545 	if (hdev->service_task.work.func)
11546 		cancel_delayed_work_sync(&hdev->service_task);
11547 }
11548 
11549 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11550 					enum hnae3_reset_type rst_type)
11551 {
11552 #define HCLGE_RESET_RETRY_WAIT_MS	500
11553 #define HCLGE_RESET_RETRY_CNT	5
11554 
11555 	struct hclge_dev *hdev = ae_dev->priv;
11556 	int retry_cnt = 0;
11557 	int ret;
11558 
11559 retry:
11560 	down(&hdev->reset_sem);
11561 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11562 	hdev->reset_type = rst_type;
11563 	ret = hclge_reset_prepare(hdev);
11564 	if (ret || hdev->reset_pending) {
11565 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11566 			ret);
11567 		if (hdev->reset_pending ||
11568 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11569 			dev_err(&hdev->pdev->dev,
11570 				"reset_pending:0x%lx, retry_cnt:%d\n",
11571 				hdev->reset_pending, retry_cnt);
11572 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11573 			up(&hdev->reset_sem);
11574 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11575 			goto retry;
11576 		}
11577 	}
11578 
11579 	/* disable misc vector before reset done */
11580 	hclge_enable_vector(&hdev->misc_vector, false);
11581 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11582 
11583 	if (hdev->reset_type == HNAE3_FLR_RESET)
11584 		hdev->rst_stats.flr_rst_cnt++;
11585 }
11586 
11587 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11588 {
11589 	struct hclge_dev *hdev = ae_dev->priv;
11590 	int ret;
11591 
11592 	hclge_enable_vector(&hdev->misc_vector, true);
11593 
11594 	ret = hclge_reset_rebuild(hdev);
11595 	if (ret)
11596 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11597 
11598 	hdev->reset_type = HNAE3_NONE_RESET;
11599 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11600 	up(&hdev->reset_sem);
11601 }
11602 
11603 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11604 {
11605 	u16 i;
11606 
11607 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11608 		struct hclge_vport *vport = &hdev->vport[i];
11609 		int ret;
11610 
11611 		 /* Send cmd to clear vport's FUNC_RST_ING */
11612 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11613 		if (ret)
11614 			dev_warn(&hdev->pdev->dev,
11615 				 "clear vport(%u) rst failed %d!\n",
11616 				 vport->vport_id, ret);
11617 	}
11618 }
11619 
11620 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11621 {
11622 	struct hclge_desc desc;
11623 	int ret;
11624 
11625 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11626 
11627 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11628 	/* This new command is only supported by new firmware, it will
11629 	 * fail with older firmware. Error value -EOPNOSUPP can only be
11630 	 * returned by older firmware running this command, to keep code
11631 	 * backward compatible we will override this value and return
11632 	 * success.
11633 	 */
11634 	if (ret && ret != -EOPNOTSUPP) {
11635 		dev_err(&hdev->pdev->dev,
11636 			"failed to clear hw resource, ret = %d\n", ret);
11637 		return ret;
11638 	}
11639 	return 0;
11640 }
11641 
11642 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11643 {
11644 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11645 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11646 }
11647 
11648 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11649 {
11650 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11651 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11652 }
11653 
11654 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11655 {
11656 	struct pci_dev *pdev = ae_dev->pdev;
11657 	struct hclge_dev *hdev;
11658 	int ret;
11659 
11660 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11661 	if (!hdev)
11662 		return -ENOMEM;
11663 
11664 	hdev->pdev = pdev;
11665 	hdev->ae_dev = ae_dev;
11666 	hdev->reset_type = HNAE3_NONE_RESET;
11667 	hdev->reset_level = HNAE3_FUNC_RESET;
11668 	ae_dev->priv = hdev;
11669 
11670 	/* HW supprt 2 layer vlan */
11671 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11672 
11673 	mutex_init(&hdev->vport_lock);
11674 	spin_lock_init(&hdev->fd_rule_lock);
11675 	sema_init(&hdev->reset_sem, 1);
11676 
11677 	ret = hclge_pci_init(hdev);
11678 	if (ret)
11679 		goto out;
11680 
11681 	ret = hclge_devlink_init(hdev);
11682 	if (ret)
11683 		goto err_pci_uninit;
11684 
11685 	/* Firmware command queue initialize */
11686 	ret = hclge_cmd_queue_init(hdev);
11687 	if (ret)
11688 		goto err_devlink_uninit;
11689 
11690 	/* Firmware command initialize */
11691 	ret = hclge_cmd_init(hdev);
11692 	if (ret)
11693 		goto err_cmd_uninit;
11694 
11695 	ret  = hclge_clear_hw_resource(hdev);
11696 	if (ret)
11697 		goto err_cmd_uninit;
11698 
11699 	ret = hclge_get_cap(hdev);
11700 	if (ret)
11701 		goto err_cmd_uninit;
11702 
11703 	ret = hclge_query_dev_specs(hdev);
11704 	if (ret) {
11705 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11706 			ret);
11707 		goto err_cmd_uninit;
11708 	}
11709 
11710 	ret = hclge_configure(hdev);
11711 	if (ret) {
11712 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11713 		goto err_cmd_uninit;
11714 	}
11715 
11716 	ret = hclge_init_msi(hdev);
11717 	if (ret) {
11718 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11719 		goto err_cmd_uninit;
11720 	}
11721 
11722 	ret = hclge_misc_irq_init(hdev);
11723 	if (ret)
11724 		goto err_msi_uninit;
11725 
11726 	ret = hclge_alloc_tqps(hdev);
11727 	if (ret) {
11728 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11729 		goto err_msi_irq_uninit;
11730 	}
11731 
11732 	ret = hclge_alloc_vport(hdev);
11733 	if (ret)
11734 		goto err_msi_irq_uninit;
11735 
11736 	ret = hclge_map_tqp(hdev);
11737 	if (ret)
11738 		goto err_msi_irq_uninit;
11739 
11740 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11741 	    !hnae3_dev_phy_imp_supported(hdev)) {
11742 		ret = hclge_mac_mdio_config(hdev);
11743 		if (ret)
11744 			goto err_msi_irq_uninit;
11745 	}
11746 
11747 	ret = hclge_init_umv_space(hdev);
11748 	if (ret)
11749 		goto err_mdiobus_unreg;
11750 
11751 	ret = hclge_mac_init(hdev);
11752 	if (ret) {
11753 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11754 		goto err_mdiobus_unreg;
11755 	}
11756 
11757 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11758 	if (ret) {
11759 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11760 		goto err_mdiobus_unreg;
11761 	}
11762 
11763 	ret = hclge_config_gro(hdev);
11764 	if (ret)
11765 		goto err_mdiobus_unreg;
11766 
11767 	ret = hclge_init_vlan_config(hdev);
11768 	if (ret) {
11769 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11770 		goto err_mdiobus_unreg;
11771 	}
11772 
11773 	ret = hclge_tm_schd_init(hdev);
11774 	if (ret) {
11775 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11776 		goto err_mdiobus_unreg;
11777 	}
11778 
11779 	ret = hclge_rss_init_cfg(hdev);
11780 	if (ret) {
11781 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11782 		goto err_mdiobus_unreg;
11783 	}
11784 
11785 	ret = hclge_rss_init_hw(hdev);
11786 	if (ret) {
11787 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11788 		goto err_mdiobus_unreg;
11789 	}
11790 
11791 	ret = init_mgr_tbl(hdev);
11792 	if (ret) {
11793 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11794 		goto err_mdiobus_unreg;
11795 	}
11796 
11797 	ret = hclge_init_fd_config(hdev);
11798 	if (ret) {
11799 		dev_err(&pdev->dev,
11800 			"fd table init fail, ret=%d\n", ret);
11801 		goto err_mdiobus_unreg;
11802 	}
11803 
11804 	ret = hclge_ptp_init(hdev);
11805 	if (ret)
11806 		goto err_mdiobus_unreg;
11807 
11808 	INIT_KFIFO(hdev->mac_tnl_log);
11809 
11810 	hclge_dcb_ops_set(hdev);
11811 
11812 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11813 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11814 
11815 	/* Setup affinity after service timer setup because add_timer_on
11816 	 * is called in affinity notify.
11817 	 */
11818 	hclge_misc_affinity_setup(hdev);
11819 
11820 	hclge_clear_all_event_cause(hdev);
11821 	hclge_clear_resetting_state(hdev);
11822 
11823 	/* Log and clear the hw errors those already occurred */
11824 	if (hnae3_dev_ras_imp_supported(hdev))
11825 		hclge_handle_occurred_error(hdev);
11826 	else
11827 		hclge_handle_all_hns_hw_errors(ae_dev);
11828 
11829 	/* request delayed reset for the error recovery because an immediate
11830 	 * global reset on a PF affecting pending initialization of other PFs
11831 	 */
11832 	if (ae_dev->hw_err_reset_req) {
11833 		enum hnae3_reset_type reset_level;
11834 
11835 		reset_level = hclge_get_reset_level(ae_dev,
11836 						    &ae_dev->hw_err_reset_req);
11837 		hclge_set_def_reset_request(ae_dev, reset_level);
11838 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11839 	}
11840 
11841 	hclge_init_rxd_adv_layout(hdev);
11842 
11843 	/* Enable MISC vector(vector0) */
11844 	hclge_enable_vector(&hdev->misc_vector, true);
11845 
11846 	hclge_state_init(hdev);
11847 	hdev->last_reset_time = jiffies;
11848 
11849 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11850 		 HCLGE_DRIVER_NAME);
11851 
11852 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11853 
11854 	return 0;
11855 
11856 err_mdiobus_unreg:
11857 	if (hdev->hw.mac.phydev)
11858 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11859 err_msi_irq_uninit:
11860 	hclge_misc_irq_uninit(hdev);
11861 err_msi_uninit:
11862 	pci_free_irq_vectors(pdev);
11863 err_cmd_uninit:
11864 	hclge_cmd_uninit(hdev);
11865 err_devlink_uninit:
11866 	hclge_devlink_uninit(hdev);
11867 err_pci_uninit:
11868 	pcim_iounmap(pdev, hdev->hw.io_base);
11869 	pci_clear_master(pdev);
11870 	pci_release_regions(pdev);
11871 	pci_disable_device(pdev);
11872 out:
11873 	mutex_destroy(&hdev->vport_lock);
11874 	return ret;
11875 }
11876 
11877 static void hclge_stats_clear(struct hclge_dev *hdev)
11878 {
11879 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11880 }
11881 
11882 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11883 {
11884 	return hclge_config_switch_param(hdev, vf, enable,
11885 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11886 }
11887 
11888 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11889 {
11890 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11891 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11892 					  enable, vf);
11893 }
11894 
11895 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11896 {
11897 	int ret;
11898 
11899 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11900 	if (ret) {
11901 		dev_err(&hdev->pdev->dev,
11902 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11903 			vf, enable ? "on" : "off", ret);
11904 		return ret;
11905 	}
11906 
11907 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11908 	if (ret)
11909 		dev_err(&hdev->pdev->dev,
11910 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11911 			vf, enable ? "on" : "off", ret);
11912 
11913 	return ret;
11914 }
11915 
11916 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11917 				 bool enable)
11918 {
11919 	struct hclge_vport *vport = hclge_get_vport(handle);
11920 	struct hclge_dev *hdev = vport->back;
11921 	u32 new_spoofchk = enable ? 1 : 0;
11922 	int ret;
11923 
11924 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11925 		return -EOPNOTSUPP;
11926 
11927 	vport = hclge_get_vf_vport(hdev, vf);
11928 	if (!vport)
11929 		return -EINVAL;
11930 
11931 	if (vport->vf_info.spoofchk == new_spoofchk)
11932 		return 0;
11933 
11934 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11935 		dev_warn(&hdev->pdev->dev,
11936 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11937 			 vf);
11938 	else if (enable && hclge_is_umv_space_full(vport, true))
11939 		dev_warn(&hdev->pdev->dev,
11940 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11941 			 vf);
11942 
11943 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11944 	if (ret)
11945 		return ret;
11946 
11947 	vport->vf_info.spoofchk = new_spoofchk;
11948 	return 0;
11949 }
11950 
11951 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11952 {
11953 	struct hclge_vport *vport = hdev->vport;
11954 	int ret;
11955 	int i;
11956 
11957 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11958 		return 0;
11959 
11960 	/* resume the vf spoof check state after reset */
11961 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11962 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11963 					       vport->vf_info.spoofchk);
11964 		if (ret)
11965 			return ret;
11966 
11967 		vport++;
11968 	}
11969 
11970 	return 0;
11971 }
11972 
11973 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11974 {
11975 	struct hclge_vport *vport = hclge_get_vport(handle);
11976 	struct hclge_dev *hdev = vport->back;
11977 	u32 new_trusted = enable ? 1 : 0;
11978 
11979 	vport = hclge_get_vf_vport(hdev, vf);
11980 	if (!vport)
11981 		return -EINVAL;
11982 
11983 	if (vport->vf_info.trusted == new_trusted)
11984 		return 0;
11985 
11986 	vport->vf_info.trusted = new_trusted;
11987 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11988 	hclge_task_schedule(hdev, 0);
11989 
11990 	return 0;
11991 }
11992 
11993 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11994 {
11995 	int ret;
11996 	int vf;
11997 
11998 	/* reset vf rate to default value */
11999 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
12000 		struct hclge_vport *vport = &hdev->vport[vf];
12001 
12002 		vport->vf_info.max_tx_rate = 0;
12003 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
12004 		if (ret)
12005 			dev_err(&hdev->pdev->dev,
12006 				"vf%d failed to reset to default, ret=%d\n",
12007 				vf - HCLGE_VF_VPORT_START_NUM, ret);
12008 	}
12009 }
12010 
12011 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
12012 				     int min_tx_rate, int max_tx_rate)
12013 {
12014 	if (min_tx_rate != 0 ||
12015 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
12016 		dev_err(&hdev->pdev->dev,
12017 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
12018 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
12019 		return -EINVAL;
12020 	}
12021 
12022 	return 0;
12023 }
12024 
12025 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
12026 			     int min_tx_rate, int max_tx_rate, bool force)
12027 {
12028 	struct hclge_vport *vport = hclge_get_vport(handle);
12029 	struct hclge_dev *hdev = vport->back;
12030 	int ret;
12031 
12032 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
12033 	if (ret)
12034 		return ret;
12035 
12036 	vport = hclge_get_vf_vport(hdev, vf);
12037 	if (!vport)
12038 		return -EINVAL;
12039 
12040 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
12041 		return 0;
12042 
12043 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
12044 	if (ret)
12045 		return ret;
12046 
12047 	vport->vf_info.max_tx_rate = max_tx_rate;
12048 
12049 	return 0;
12050 }
12051 
12052 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12053 {
12054 	struct hnae3_handle *handle = &hdev->vport->nic;
12055 	struct hclge_vport *vport;
12056 	int ret;
12057 	int vf;
12058 
12059 	/* resume the vf max_tx_rate after reset */
12060 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12061 		vport = hclge_get_vf_vport(hdev, vf);
12062 		if (!vport)
12063 			return -EINVAL;
12064 
12065 		/* zero means max rate, after reset, firmware already set it to
12066 		 * max rate, so just continue.
12067 		 */
12068 		if (!vport->vf_info.max_tx_rate)
12069 			continue;
12070 
12071 		ret = hclge_set_vf_rate(handle, vf, 0,
12072 					vport->vf_info.max_tx_rate, true);
12073 		if (ret) {
12074 			dev_err(&hdev->pdev->dev,
12075 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
12076 				vf, vport->vf_info.max_tx_rate, ret);
12077 			return ret;
12078 		}
12079 	}
12080 
12081 	return 0;
12082 }
12083 
12084 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12085 {
12086 	struct hclge_vport *vport = hdev->vport;
12087 	int i;
12088 
12089 	for (i = 0; i < hdev->num_alloc_vport; i++) {
12090 		hclge_vport_stop(vport);
12091 		vport++;
12092 	}
12093 }
12094 
12095 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12096 {
12097 	struct hclge_dev *hdev = ae_dev->priv;
12098 	struct pci_dev *pdev = ae_dev->pdev;
12099 	int ret;
12100 
12101 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
12102 
12103 	hclge_stats_clear(hdev);
12104 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12105 	 * so here should not clean table in memory.
12106 	 */
12107 	if (hdev->reset_type == HNAE3_IMP_RESET ||
12108 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
12109 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12110 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12111 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12112 		hclge_reset_umv_space(hdev);
12113 	}
12114 
12115 	ret = hclge_cmd_init(hdev);
12116 	if (ret) {
12117 		dev_err(&pdev->dev, "Cmd queue init failed\n");
12118 		return ret;
12119 	}
12120 
12121 	ret = hclge_map_tqp(hdev);
12122 	if (ret) {
12123 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12124 		return ret;
12125 	}
12126 
12127 	ret = hclge_mac_init(hdev);
12128 	if (ret) {
12129 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12130 		return ret;
12131 	}
12132 
12133 	ret = hclge_tp_port_init(hdev);
12134 	if (ret) {
12135 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12136 			ret);
12137 		return ret;
12138 	}
12139 
12140 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12141 	if (ret) {
12142 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12143 		return ret;
12144 	}
12145 
12146 	ret = hclge_config_gro(hdev);
12147 	if (ret)
12148 		return ret;
12149 
12150 	ret = hclge_init_vlan_config(hdev);
12151 	if (ret) {
12152 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12153 		return ret;
12154 	}
12155 
12156 	ret = hclge_tm_init_hw(hdev, true);
12157 	if (ret) {
12158 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12159 		return ret;
12160 	}
12161 
12162 	ret = hclge_rss_init_hw(hdev);
12163 	if (ret) {
12164 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12165 		return ret;
12166 	}
12167 
12168 	ret = init_mgr_tbl(hdev);
12169 	if (ret) {
12170 		dev_err(&pdev->dev,
12171 			"failed to reinit manager table, ret = %d\n", ret);
12172 		return ret;
12173 	}
12174 
12175 	ret = hclge_init_fd_config(hdev);
12176 	if (ret) {
12177 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12178 		return ret;
12179 	}
12180 
12181 	ret = hclge_ptp_init(hdev);
12182 	if (ret)
12183 		return ret;
12184 
12185 	/* Log and clear the hw errors those already occurred */
12186 	if (hnae3_dev_ras_imp_supported(hdev))
12187 		hclge_handle_occurred_error(hdev);
12188 	else
12189 		hclge_handle_all_hns_hw_errors(ae_dev);
12190 
12191 	/* Re-enable the hw error interrupts because
12192 	 * the interrupts get disabled on global reset.
12193 	 */
12194 	ret = hclge_config_nic_hw_error(hdev, true);
12195 	if (ret) {
12196 		dev_err(&pdev->dev,
12197 			"fail(%d) to re-enable NIC hw error interrupts\n",
12198 			ret);
12199 		return ret;
12200 	}
12201 
12202 	if (hdev->roce_client) {
12203 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
12204 		if (ret) {
12205 			dev_err(&pdev->dev,
12206 				"fail(%d) to re-enable roce ras interrupts\n",
12207 				ret);
12208 			return ret;
12209 		}
12210 	}
12211 
12212 	hclge_reset_vport_state(hdev);
12213 	ret = hclge_reset_vport_spoofchk(hdev);
12214 	if (ret)
12215 		return ret;
12216 
12217 	ret = hclge_resume_vf_rate(hdev);
12218 	if (ret)
12219 		return ret;
12220 
12221 	hclge_init_rxd_adv_layout(hdev);
12222 
12223 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12224 		 HCLGE_DRIVER_NAME);
12225 
12226 	return 0;
12227 }
12228 
12229 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12230 {
12231 	struct hclge_dev *hdev = ae_dev->priv;
12232 	struct hclge_mac *mac = &hdev->hw.mac;
12233 
12234 	hclge_reset_vf_rate(hdev);
12235 	hclge_clear_vf_vlan(hdev);
12236 	hclge_misc_affinity_teardown(hdev);
12237 	hclge_state_uninit(hdev);
12238 	hclge_ptp_uninit(hdev);
12239 	hclge_uninit_rxd_adv_layout(hdev);
12240 	hclge_uninit_mac_table(hdev);
12241 	hclge_del_all_fd_entries(hdev);
12242 
12243 	if (mac->phydev)
12244 		mdiobus_unregister(mac->mdio_bus);
12245 
12246 	/* Disable MISC vector(vector0) */
12247 	hclge_enable_vector(&hdev->misc_vector, false);
12248 	synchronize_irq(hdev->misc_vector.vector_irq);
12249 
12250 	/* Disable all hw interrupts */
12251 	hclge_config_mac_tnl_int(hdev, false);
12252 	hclge_config_nic_hw_error(hdev, false);
12253 	hclge_config_rocee_ras_interrupt(hdev, false);
12254 
12255 	hclge_cmd_uninit(hdev);
12256 	hclge_misc_irq_uninit(hdev);
12257 	hclge_devlink_uninit(hdev);
12258 	hclge_pci_uninit(hdev);
12259 	mutex_destroy(&hdev->vport_lock);
12260 	hclge_uninit_vport_vlan_table(hdev);
12261 	ae_dev->priv = NULL;
12262 }
12263 
12264 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12265 {
12266 	struct hclge_vport *vport = hclge_get_vport(handle);
12267 	struct hclge_dev *hdev = vport->back;
12268 
12269 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12270 }
12271 
12272 static void hclge_get_channels(struct hnae3_handle *handle,
12273 			       struct ethtool_channels *ch)
12274 {
12275 	ch->max_combined = hclge_get_max_channels(handle);
12276 	ch->other_count = 1;
12277 	ch->max_other = 1;
12278 	ch->combined_count = handle->kinfo.rss_size;
12279 }
12280 
12281 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12282 					u16 *alloc_tqps, u16 *max_rss_size)
12283 {
12284 	struct hclge_vport *vport = hclge_get_vport(handle);
12285 	struct hclge_dev *hdev = vport->back;
12286 
12287 	*alloc_tqps = vport->alloc_tqps;
12288 	*max_rss_size = hdev->pf_rss_size_max;
12289 }
12290 
12291 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12292 			      bool rxfh_configured)
12293 {
12294 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12295 	struct hclge_vport *vport = hclge_get_vport(handle);
12296 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12297 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12298 	struct hclge_dev *hdev = vport->back;
12299 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12300 	u16 cur_rss_size = kinfo->rss_size;
12301 	u16 cur_tqps = kinfo->num_tqps;
12302 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12303 	u16 roundup_size;
12304 	u32 *rss_indir;
12305 	unsigned int i;
12306 	int ret;
12307 
12308 	kinfo->req_rss_size = new_tqps_num;
12309 
12310 	ret = hclge_tm_vport_map_update(hdev);
12311 	if (ret) {
12312 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12313 		return ret;
12314 	}
12315 
12316 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
12317 	roundup_size = ilog2(roundup_size);
12318 	/* Set the RSS TC mode according to the new RSS size */
12319 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12320 		tc_valid[i] = 0;
12321 
12322 		if (!(hdev->hw_tc_map & BIT(i)))
12323 			continue;
12324 
12325 		tc_valid[i] = 1;
12326 		tc_size[i] = roundup_size;
12327 		tc_offset[i] = kinfo->rss_size * i;
12328 	}
12329 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12330 	if (ret)
12331 		return ret;
12332 
12333 	/* RSS indirection table has been configured by user */
12334 	if (rxfh_configured)
12335 		goto out;
12336 
12337 	/* Reinitializes the rss indirect table according to the new RSS size */
12338 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12339 			    GFP_KERNEL);
12340 	if (!rss_indir)
12341 		return -ENOMEM;
12342 
12343 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12344 		rss_indir[i] = i % kinfo->rss_size;
12345 
12346 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12347 	if (ret)
12348 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12349 			ret);
12350 
12351 	kfree(rss_indir);
12352 
12353 out:
12354 	if (!ret)
12355 		dev_info(&hdev->pdev->dev,
12356 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12357 			 cur_rss_size, kinfo->rss_size,
12358 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12359 
12360 	return ret;
12361 }
12362 
12363 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12364 			      u32 *regs_num_64_bit)
12365 {
12366 	struct hclge_desc desc;
12367 	u32 total_num;
12368 	int ret;
12369 
12370 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12371 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12372 	if (ret) {
12373 		dev_err(&hdev->pdev->dev,
12374 			"Query register number cmd failed, ret = %d.\n", ret);
12375 		return ret;
12376 	}
12377 
12378 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12379 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12380 
12381 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12382 	if (!total_num)
12383 		return -EINVAL;
12384 
12385 	return 0;
12386 }
12387 
12388 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12389 				 void *data)
12390 {
12391 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12392 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12393 
12394 	struct hclge_desc *desc;
12395 	u32 *reg_val = data;
12396 	__le32 *desc_data;
12397 	int nodata_num;
12398 	int cmd_num;
12399 	int i, k, n;
12400 	int ret;
12401 
12402 	if (regs_num == 0)
12403 		return 0;
12404 
12405 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12406 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12407 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12408 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12409 	if (!desc)
12410 		return -ENOMEM;
12411 
12412 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12413 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12414 	if (ret) {
12415 		dev_err(&hdev->pdev->dev,
12416 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12417 		kfree(desc);
12418 		return ret;
12419 	}
12420 
12421 	for (i = 0; i < cmd_num; i++) {
12422 		if (i == 0) {
12423 			desc_data = (__le32 *)(&desc[i].data[0]);
12424 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12425 		} else {
12426 			desc_data = (__le32 *)(&desc[i]);
12427 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12428 		}
12429 		for (k = 0; k < n; k++) {
12430 			*reg_val++ = le32_to_cpu(*desc_data++);
12431 
12432 			regs_num--;
12433 			if (!regs_num)
12434 				break;
12435 		}
12436 	}
12437 
12438 	kfree(desc);
12439 	return 0;
12440 }
12441 
12442 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12443 				 void *data)
12444 {
12445 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12446 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12447 
12448 	struct hclge_desc *desc;
12449 	u64 *reg_val = data;
12450 	__le64 *desc_data;
12451 	int nodata_len;
12452 	int cmd_num;
12453 	int i, k, n;
12454 	int ret;
12455 
12456 	if (regs_num == 0)
12457 		return 0;
12458 
12459 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12460 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12461 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12462 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12463 	if (!desc)
12464 		return -ENOMEM;
12465 
12466 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12467 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12468 	if (ret) {
12469 		dev_err(&hdev->pdev->dev,
12470 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12471 		kfree(desc);
12472 		return ret;
12473 	}
12474 
12475 	for (i = 0; i < cmd_num; i++) {
12476 		if (i == 0) {
12477 			desc_data = (__le64 *)(&desc[i].data[0]);
12478 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12479 		} else {
12480 			desc_data = (__le64 *)(&desc[i]);
12481 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12482 		}
12483 		for (k = 0; k < n; k++) {
12484 			*reg_val++ = le64_to_cpu(*desc_data++);
12485 
12486 			regs_num--;
12487 			if (!regs_num)
12488 				break;
12489 		}
12490 	}
12491 
12492 	kfree(desc);
12493 	return 0;
12494 }
12495 
12496 #define MAX_SEPARATE_NUM	4
12497 #define SEPARATOR_VALUE		0xFDFCFBFA
12498 #define REG_NUM_PER_LINE	4
12499 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12500 #define REG_SEPARATOR_LINE	1
12501 #define REG_NUM_REMAIN_MASK	3
12502 
12503 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12504 {
12505 	int i;
12506 
12507 	/* initialize command BD except the last one */
12508 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12509 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12510 					   true);
12511 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12512 	}
12513 
12514 	/* initialize the last command BD */
12515 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12516 
12517 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12518 }
12519 
12520 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12521 				    int *bd_num_list,
12522 				    u32 type_num)
12523 {
12524 	u32 entries_per_desc, desc_index, index, offset, i;
12525 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12526 	int ret;
12527 
12528 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12529 	if (ret) {
12530 		dev_err(&hdev->pdev->dev,
12531 			"Get dfx bd num fail, status is %d.\n", ret);
12532 		return ret;
12533 	}
12534 
12535 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12536 	for (i = 0; i < type_num; i++) {
12537 		offset = hclge_dfx_bd_offset_list[i];
12538 		index = offset % entries_per_desc;
12539 		desc_index = offset / entries_per_desc;
12540 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12541 	}
12542 
12543 	return ret;
12544 }
12545 
12546 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12547 				  struct hclge_desc *desc_src, int bd_num,
12548 				  enum hclge_opcode_type cmd)
12549 {
12550 	struct hclge_desc *desc = desc_src;
12551 	int i, ret;
12552 
12553 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12554 	for (i = 0; i < bd_num - 1; i++) {
12555 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12556 		desc++;
12557 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12558 	}
12559 
12560 	desc = desc_src;
12561 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12562 	if (ret)
12563 		dev_err(&hdev->pdev->dev,
12564 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12565 			cmd, ret);
12566 
12567 	return ret;
12568 }
12569 
12570 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12571 				    void *data)
12572 {
12573 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12574 	struct hclge_desc *desc = desc_src;
12575 	u32 *reg = data;
12576 
12577 	entries_per_desc = ARRAY_SIZE(desc->data);
12578 	reg_num = entries_per_desc * bd_num;
12579 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12580 	for (i = 0; i < reg_num; i++) {
12581 		index = i % entries_per_desc;
12582 		desc_index = i / entries_per_desc;
12583 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12584 	}
12585 	for (i = 0; i < separator_num; i++)
12586 		*reg++ = SEPARATOR_VALUE;
12587 
12588 	return reg_num + separator_num;
12589 }
12590 
12591 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12592 {
12593 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12594 	int data_len_per_desc, bd_num, i;
12595 	int *bd_num_list;
12596 	u32 data_len;
12597 	int ret;
12598 
12599 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12600 	if (!bd_num_list)
12601 		return -ENOMEM;
12602 
12603 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12604 	if (ret) {
12605 		dev_err(&hdev->pdev->dev,
12606 			"Get dfx reg bd num fail, status is %d.\n", ret);
12607 		goto out;
12608 	}
12609 
12610 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12611 	*len = 0;
12612 	for (i = 0; i < dfx_reg_type_num; i++) {
12613 		bd_num = bd_num_list[i];
12614 		data_len = data_len_per_desc * bd_num;
12615 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12616 	}
12617 
12618 out:
12619 	kfree(bd_num_list);
12620 	return ret;
12621 }
12622 
12623 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12624 {
12625 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12626 	int bd_num, bd_num_max, buf_len, i;
12627 	struct hclge_desc *desc_src;
12628 	int *bd_num_list;
12629 	u32 *reg = data;
12630 	int ret;
12631 
12632 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12633 	if (!bd_num_list)
12634 		return -ENOMEM;
12635 
12636 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12637 	if (ret) {
12638 		dev_err(&hdev->pdev->dev,
12639 			"Get dfx reg bd num fail, status is %d.\n", ret);
12640 		goto out;
12641 	}
12642 
12643 	bd_num_max = bd_num_list[0];
12644 	for (i = 1; i < dfx_reg_type_num; i++)
12645 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12646 
12647 	buf_len = sizeof(*desc_src) * bd_num_max;
12648 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12649 	if (!desc_src) {
12650 		ret = -ENOMEM;
12651 		goto out;
12652 	}
12653 
12654 	for (i = 0; i < dfx_reg_type_num; i++) {
12655 		bd_num = bd_num_list[i];
12656 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12657 					     hclge_dfx_reg_opcode_list[i]);
12658 		if (ret) {
12659 			dev_err(&hdev->pdev->dev,
12660 				"Get dfx reg fail, status is %d.\n", ret);
12661 			break;
12662 		}
12663 
12664 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12665 	}
12666 
12667 	kfree(desc_src);
12668 out:
12669 	kfree(bd_num_list);
12670 	return ret;
12671 }
12672 
12673 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12674 			      struct hnae3_knic_private_info *kinfo)
12675 {
12676 #define HCLGE_RING_REG_OFFSET		0x200
12677 #define HCLGE_RING_INT_REG_OFFSET	0x4
12678 
12679 	int i, j, reg_num, separator_num;
12680 	int data_num_sum;
12681 	u32 *reg = data;
12682 
12683 	/* fetching per-PF registers valus from PF PCIe register space */
12684 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12685 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12686 	for (i = 0; i < reg_num; i++)
12687 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12688 	for (i = 0; i < separator_num; i++)
12689 		*reg++ = SEPARATOR_VALUE;
12690 	data_num_sum = reg_num + separator_num;
12691 
12692 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12693 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12694 	for (i = 0; i < reg_num; i++)
12695 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12696 	for (i = 0; i < separator_num; i++)
12697 		*reg++ = SEPARATOR_VALUE;
12698 	data_num_sum += reg_num + separator_num;
12699 
12700 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12701 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12702 	for (j = 0; j < kinfo->num_tqps; j++) {
12703 		for (i = 0; i < reg_num; i++)
12704 			*reg++ = hclge_read_dev(&hdev->hw,
12705 						ring_reg_addr_list[i] +
12706 						HCLGE_RING_REG_OFFSET * j);
12707 		for (i = 0; i < separator_num; i++)
12708 			*reg++ = SEPARATOR_VALUE;
12709 	}
12710 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12711 
12712 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12713 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12714 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12715 		for (i = 0; i < reg_num; i++)
12716 			*reg++ = hclge_read_dev(&hdev->hw,
12717 						tqp_intr_reg_addr_list[i] +
12718 						HCLGE_RING_INT_REG_OFFSET * j);
12719 		for (i = 0; i < separator_num; i++)
12720 			*reg++ = SEPARATOR_VALUE;
12721 	}
12722 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12723 
12724 	return data_num_sum;
12725 }
12726 
12727 static int hclge_get_regs_len(struct hnae3_handle *handle)
12728 {
12729 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12730 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12731 	struct hclge_vport *vport = hclge_get_vport(handle);
12732 	struct hclge_dev *hdev = vport->back;
12733 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12734 	int regs_lines_32_bit, regs_lines_64_bit;
12735 	int ret;
12736 
12737 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12738 	if (ret) {
12739 		dev_err(&hdev->pdev->dev,
12740 			"Get register number failed, ret = %d.\n", ret);
12741 		return ret;
12742 	}
12743 
12744 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12745 	if (ret) {
12746 		dev_err(&hdev->pdev->dev,
12747 			"Get dfx reg len failed, ret = %d.\n", ret);
12748 		return ret;
12749 	}
12750 
12751 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12752 		REG_SEPARATOR_LINE;
12753 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12754 		REG_SEPARATOR_LINE;
12755 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12756 		REG_SEPARATOR_LINE;
12757 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12758 		REG_SEPARATOR_LINE;
12759 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12760 		REG_SEPARATOR_LINE;
12761 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12762 		REG_SEPARATOR_LINE;
12763 
12764 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12765 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12766 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12767 }
12768 
12769 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12770 			   void *data)
12771 {
12772 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12773 	struct hclge_vport *vport = hclge_get_vport(handle);
12774 	struct hclge_dev *hdev = vport->back;
12775 	u32 regs_num_32_bit, regs_num_64_bit;
12776 	int i, reg_num, separator_num, ret;
12777 	u32 *reg = data;
12778 
12779 	*version = hdev->fw_version;
12780 
12781 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12782 	if (ret) {
12783 		dev_err(&hdev->pdev->dev,
12784 			"Get register number failed, ret = %d.\n", ret);
12785 		return;
12786 	}
12787 
12788 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12789 
12790 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12791 	if (ret) {
12792 		dev_err(&hdev->pdev->dev,
12793 			"Get 32 bit register failed, ret = %d.\n", ret);
12794 		return;
12795 	}
12796 	reg_num = regs_num_32_bit;
12797 	reg += reg_num;
12798 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12799 	for (i = 0; i < separator_num; i++)
12800 		*reg++ = SEPARATOR_VALUE;
12801 
12802 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12803 	if (ret) {
12804 		dev_err(&hdev->pdev->dev,
12805 			"Get 64 bit register failed, ret = %d.\n", ret);
12806 		return;
12807 	}
12808 	reg_num = regs_num_64_bit * 2;
12809 	reg += reg_num;
12810 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12811 	for (i = 0; i < separator_num; i++)
12812 		*reg++ = SEPARATOR_VALUE;
12813 
12814 	ret = hclge_get_dfx_reg(hdev, reg);
12815 	if (ret)
12816 		dev_err(&hdev->pdev->dev,
12817 			"Get dfx register failed, ret = %d.\n", ret);
12818 }
12819 
12820 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12821 {
12822 	struct hclge_set_led_state_cmd *req;
12823 	struct hclge_desc desc;
12824 	int ret;
12825 
12826 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12827 
12828 	req = (struct hclge_set_led_state_cmd *)desc.data;
12829 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12830 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12831 
12832 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12833 	if (ret)
12834 		dev_err(&hdev->pdev->dev,
12835 			"Send set led state cmd error, ret =%d\n", ret);
12836 
12837 	return ret;
12838 }
12839 
12840 enum hclge_led_status {
12841 	HCLGE_LED_OFF,
12842 	HCLGE_LED_ON,
12843 	HCLGE_LED_NO_CHANGE = 0xFF,
12844 };
12845 
12846 static int hclge_set_led_id(struct hnae3_handle *handle,
12847 			    enum ethtool_phys_id_state status)
12848 {
12849 	struct hclge_vport *vport = hclge_get_vport(handle);
12850 	struct hclge_dev *hdev = vport->back;
12851 
12852 	switch (status) {
12853 	case ETHTOOL_ID_ACTIVE:
12854 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12855 	case ETHTOOL_ID_INACTIVE:
12856 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12857 	default:
12858 		return -EINVAL;
12859 	}
12860 }
12861 
12862 static void hclge_get_link_mode(struct hnae3_handle *handle,
12863 				unsigned long *supported,
12864 				unsigned long *advertising)
12865 {
12866 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12867 	struct hclge_vport *vport = hclge_get_vport(handle);
12868 	struct hclge_dev *hdev = vport->back;
12869 	unsigned int idx = 0;
12870 
12871 	for (; idx < size; idx++) {
12872 		supported[idx] = hdev->hw.mac.supported[idx];
12873 		advertising[idx] = hdev->hw.mac.advertising[idx];
12874 	}
12875 }
12876 
12877 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12878 {
12879 	struct hclge_vport *vport = hclge_get_vport(handle);
12880 	struct hclge_dev *hdev = vport->back;
12881 	bool gro_en_old = hdev->gro_en;
12882 	int ret;
12883 
12884 	hdev->gro_en = enable;
12885 	ret = hclge_config_gro(hdev);
12886 	if (ret)
12887 		hdev->gro_en = gro_en_old;
12888 
12889 	return ret;
12890 }
12891 
12892 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12893 {
12894 	struct hclge_vport *vport = &hdev->vport[0];
12895 	struct hnae3_handle *handle = &vport->nic;
12896 	u8 tmp_flags;
12897 	int ret;
12898 	u16 i;
12899 
12900 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12901 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12902 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12903 	}
12904 
12905 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12906 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12907 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12908 					     tmp_flags & HNAE3_MPE);
12909 		if (!ret) {
12910 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12911 				  &vport->state);
12912 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12913 				&vport->state);
12914 		}
12915 	}
12916 
12917 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12918 		bool uc_en = false;
12919 		bool mc_en = false;
12920 		bool bc_en;
12921 
12922 		vport = &hdev->vport[i];
12923 
12924 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12925 					&vport->state))
12926 			continue;
12927 
12928 		if (vport->vf_info.trusted) {
12929 			uc_en = vport->vf_info.request_uc_en > 0 ||
12930 				vport->overflow_promisc_flags &
12931 				HNAE3_OVERFLOW_UPE;
12932 			mc_en = vport->vf_info.request_mc_en > 0 ||
12933 				vport->overflow_promisc_flags &
12934 				HNAE3_OVERFLOW_MPE;
12935 		}
12936 		bc_en = vport->vf_info.request_bc_en > 0;
12937 
12938 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12939 						 mc_en, bc_en);
12940 		if (ret) {
12941 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12942 				&vport->state);
12943 			return;
12944 		}
12945 		hclge_set_vport_vlan_fltr_change(vport);
12946 	}
12947 }
12948 
12949 static bool hclge_module_existed(struct hclge_dev *hdev)
12950 {
12951 	struct hclge_desc desc;
12952 	u32 existed;
12953 	int ret;
12954 
12955 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12956 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12957 	if (ret) {
12958 		dev_err(&hdev->pdev->dev,
12959 			"failed to get SFP exist state, ret = %d\n", ret);
12960 		return false;
12961 	}
12962 
12963 	existed = le32_to_cpu(desc.data[0]);
12964 
12965 	return existed != 0;
12966 }
12967 
12968 /* need 6 bds(total 140 bytes) in one reading
12969  * return the number of bytes actually read, 0 means read failed.
12970  */
12971 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12972 				     u32 len, u8 *data)
12973 {
12974 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12975 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12976 	u16 read_len;
12977 	u16 copy_len;
12978 	int ret;
12979 	int i;
12980 
12981 	/* setup all 6 bds to read module eeprom info. */
12982 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12983 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12984 					   true);
12985 
12986 		/* bd0~bd4 need next flag */
12987 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12988 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12989 	}
12990 
12991 	/* setup bd0, this bd contains offset and read length. */
12992 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12993 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12994 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12995 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12996 
12997 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12998 	if (ret) {
12999 		dev_err(&hdev->pdev->dev,
13000 			"failed to get SFP eeprom info, ret = %d\n", ret);
13001 		return 0;
13002 	}
13003 
13004 	/* copy sfp info from bd0 to out buffer. */
13005 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
13006 	memcpy(data, sfp_info_bd0->data, copy_len);
13007 	read_len = copy_len;
13008 
13009 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
13010 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
13011 		if (read_len >= len)
13012 			return read_len;
13013 
13014 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
13015 		memcpy(data + read_len, desc[i].data, copy_len);
13016 		read_len += copy_len;
13017 	}
13018 
13019 	return read_len;
13020 }
13021 
13022 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
13023 				   u32 len, u8 *data)
13024 {
13025 	struct hclge_vport *vport = hclge_get_vport(handle);
13026 	struct hclge_dev *hdev = vport->back;
13027 	u32 read_len = 0;
13028 	u16 data_len;
13029 
13030 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
13031 		return -EOPNOTSUPP;
13032 
13033 	if (!hclge_module_existed(hdev))
13034 		return -ENXIO;
13035 
13036 	while (read_len < len) {
13037 		data_len = hclge_get_sfp_eeprom_info(hdev,
13038 						     offset + read_len,
13039 						     len - read_len,
13040 						     data + read_len);
13041 		if (!data_len)
13042 			return -EIO;
13043 
13044 		read_len += data_len;
13045 	}
13046 
13047 	return 0;
13048 }
13049 
13050 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
13051 					 u32 *status_code)
13052 {
13053 	struct hclge_vport *vport = hclge_get_vport(handle);
13054 	struct hclge_dev *hdev = vport->back;
13055 	struct hclge_desc desc;
13056 	int ret;
13057 
13058 	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
13059 		return -EOPNOTSUPP;
13060 
13061 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
13062 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
13063 	if (ret) {
13064 		dev_err(&hdev->pdev->dev,
13065 			"failed to query link diagnosis info, ret = %d\n", ret);
13066 		return ret;
13067 	}
13068 
13069 	*status_code = le32_to_cpu(desc.data[0]);
13070 	return 0;
13071 }
13072 
13073 static const struct hnae3_ae_ops hclge_ops = {
13074 	.init_ae_dev = hclge_init_ae_dev,
13075 	.uninit_ae_dev = hclge_uninit_ae_dev,
13076 	.reset_prepare = hclge_reset_prepare_general,
13077 	.reset_done = hclge_reset_done,
13078 	.init_client_instance = hclge_init_client_instance,
13079 	.uninit_client_instance = hclge_uninit_client_instance,
13080 	.map_ring_to_vector = hclge_map_ring_to_vector,
13081 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
13082 	.get_vector = hclge_get_vector,
13083 	.put_vector = hclge_put_vector,
13084 	.set_promisc_mode = hclge_set_promisc_mode,
13085 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
13086 	.set_loopback = hclge_set_loopback,
13087 	.start = hclge_ae_start,
13088 	.stop = hclge_ae_stop,
13089 	.client_start = hclge_client_start,
13090 	.client_stop = hclge_client_stop,
13091 	.get_status = hclge_get_status,
13092 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
13093 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
13094 	.get_media_type = hclge_get_media_type,
13095 	.check_port_speed = hclge_check_port_speed,
13096 	.get_fec = hclge_get_fec,
13097 	.set_fec = hclge_set_fec,
13098 	.get_rss_key_size = hclge_get_rss_key_size,
13099 	.get_rss = hclge_get_rss,
13100 	.set_rss = hclge_set_rss,
13101 	.set_rss_tuple = hclge_set_rss_tuple,
13102 	.get_rss_tuple = hclge_get_rss_tuple,
13103 	.get_tc_size = hclge_get_tc_size,
13104 	.get_mac_addr = hclge_get_mac_addr,
13105 	.set_mac_addr = hclge_set_mac_addr,
13106 	.do_ioctl = hclge_do_ioctl,
13107 	.add_uc_addr = hclge_add_uc_addr,
13108 	.rm_uc_addr = hclge_rm_uc_addr,
13109 	.add_mc_addr = hclge_add_mc_addr,
13110 	.rm_mc_addr = hclge_rm_mc_addr,
13111 	.set_autoneg = hclge_set_autoneg,
13112 	.get_autoneg = hclge_get_autoneg,
13113 	.restart_autoneg = hclge_restart_autoneg,
13114 	.halt_autoneg = hclge_halt_autoneg,
13115 	.get_pauseparam = hclge_get_pauseparam,
13116 	.set_pauseparam = hclge_set_pauseparam,
13117 	.set_mtu = hclge_set_mtu,
13118 	.reset_queue = hclge_reset_tqp,
13119 	.get_stats = hclge_get_stats,
13120 	.get_mac_stats = hclge_get_mac_stat,
13121 	.update_stats = hclge_update_stats,
13122 	.get_strings = hclge_get_strings,
13123 	.get_sset_count = hclge_get_sset_count,
13124 	.get_fw_version = hclge_get_fw_version,
13125 	.get_mdix_mode = hclge_get_mdix_mode,
13126 	.enable_vlan_filter = hclge_enable_vlan_filter,
13127 	.set_vlan_filter = hclge_set_vlan_filter,
13128 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
13129 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13130 	.reset_event = hclge_reset_event,
13131 	.get_reset_level = hclge_get_reset_level,
13132 	.set_default_reset_request = hclge_set_def_reset_request,
13133 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13134 	.set_channels = hclge_set_channels,
13135 	.get_channels = hclge_get_channels,
13136 	.get_regs_len = hclge_get_regs_len,
13137 	.get_regs = hclge_get_regs,
13138 	.set_led_id = hclge_set_led_id,
13139 	.get_link_mode = hclge_get_link_mode,
13140 	.add_fd_entry = hclge_add_fd_entry,
13141 	.del_fd_entry = hclge_del_fd_entry,
13142 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13143 	.get_fd_rule_info = hclge_get_fd_rule_info,
13144 	.get_fd_all_rules = hclge_get_all_rules,
13145 	.enable_fd = hclge_enable_fd,
13146 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
13147 	.dbg_read_cmd = hclge_dbg_read_cmd,
13148 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
13149 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
13150 	.ae_dev_resetting = hclge_ae_dev_resetting,
13151 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13152 	.set_gro_en = hclge_gro_en,
13153 	.get_global_queue_id = hclge_covert_handle_qid_global,
13154 	.set_timer_task = hclge_set_timer_task,
13155 	.mac_connect_phy = hclge_mac_connect_phy,
13156 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
13157 	.get_vf_config = hclge_get_vf_config,
13158 	.set_vf_link_state = hclge_set_vf_link_state,
13159 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
13160 	.set_vf_trust = hclge_set_vf_trust,
13161 	.set_vf_rate = hclge_set_vf_rate,
13162 	.set_vf_mac = hclge_set_vf_mac,
13163 	.get_module_eeprom = hclge_get_module_eeprom,
13164 	.get_cmdq_stat = hclge_get_cmdq_stat,
13165 	.add_cls_flower = hclge_add_cls_flower,
13166 	.del_cls_flower = hclge_del_cls_flower,
13167 	.cls_flower_active = hclge_is_cls_flower_active,
13168 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13169 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13170 	.set_tx_hwts_info = hclge_ptp_set_tx_info,
13171 	.get_rx_hwts = hclge_ptp_get_rx_hwts,
13172 	.get_ts_info = hclge_ptp_get_ts_info,
13173 	.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13174 };
13175 
13176 static struct hnae3_ae_algo ae_algo = {
13177 	.ops = &hclge_ops,
13178 	.pdev_id_table = ae_algo_pci_tbl,
13179 };
13180 
13181 static int hclge_init(void)
13182 {
13183 	pr_info("%s is initializing\n", HCLGE_NAME);
13184 
13185 	hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
13186 	if (!hclge_wq) {
13187 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13188 		return -ENOMEM;
13189 	}
13190 
13191 	hnae3_register_ae_algo(&ae_algo);
13192 
13193 	return 0;
13194 }
13195 
13196 static void hclge_exit(void)
13197 {
13198 	hnae3_unregister_ae_algo_prepare(&ae_algo);
13199 	hnae3_unregister_ae_algo(&ae_algo);
13200 	destroy_workqueue(hclge_wq);
13201 }
13202 module_init(hclge_init);
13203 module_exit(hclge_exit);
13204 
13205 MODULE_LICENSE("GPL");
13206 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13207 MODULE_DESCRIPTION("HCLGE Driver");
13208 MODULE_VERSION(HCLGE_MOD_VERSION);
13209