1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27 
28 #define HCLGE_NAME			"hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 
32 #define HCLGE_BUF_SIZE_UNIT	256U
33 #define HCLGE_BUF_MUL_BY	2
34 #define HCLGE_BUF_DIV_BY	2
35 #define NEED_RESERVE_TC_NUM	2
36 #define BUF_MAX_PERCENT		100
37 #define BUF_RESERVE_PERCENT	90
38 
39 #define HCLGE_RESET_MAX_FAIL_CNT	5
40 #define HCLGE_RESET_SYNC_TIME		100
41 #define HCLGE_PF_RESET_SYNC_TIME	20
42 #define HCLGE_PF_RESET_SYNC_CNT		1500
43 
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57 
58 #define HCLGE_LINK_STATUS_MS	10
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75 
76 static struct hnae3_ae_algo ae_algo;
77 
78 static struct workqueue_struct *hclge_wq;
79 
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 	/* required last entry */
90 	{0, }
91 };
92 
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 					 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 					 HCLGE_NIC_CSQ_DEPTH_REG,
98 					 HCLGE_NIC_CSQ_TAIL_REG,
99 					 HCLGE_NIC_CSQ_HEAD_REG,
100 					 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 					 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 					 HCLGE_NIC_CRQ_DEPTH_REG,
103 					 HCLGE_NIC_CRQ_TAIL_REG,
104 					 HCLGE_NIC_CRQ_HEAD_REG,
105 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 					 HCLGE_CMDQ_INTR_STS_REG,
107 					 HCLGE_CMDQ_INTR_EN_REG,
108 					 HCLGE_CMDQ_INTR_GEN_REG};
109 
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 					   HCLGE_PF_OTHER_INT_REG,
112 					   HCLGE_MISC_RESET_STS_REG,
113 					   HCLGE_MISC_VECTOR_INT_STS,
114 					   HCLGE_GLOBAL_RESET_REG,
115 					   HCLGE_FUN_RST_ING,
116 					   HCLGE_GRO_EN_REG};
117 
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 					 HCLGE_RING_RX_ADDR_H_REG,
120 					 HCLGE_RING_RX_BD_NUM_REG,
121 					 HCLGE_RING_RX_BD_LENGTH_REG,
122 					 HCLGE_RING_RX_MERGE_EN_REG,
123 					 HCLGE_RING_RX_TAIL_REG,
124 					 HCLGE_RING_RX_HEAD_REG,
125 					 HCLGE_RING_RX_FBD_NUM_REG,
126 					 HCLGE_RING_RX_OFFSET_REG,
127 					 HCLGE_RING_RX_FBD_OFFSET_REG,
128 					 HCLGE_RING_RX_STASH_REG,
129 					 HCLGE_RING_RX_BD_ERR_REG,
130 					 HCLGE_RING_TX_ADDR_L_REG,
131 					 HCLGE_RING_TX_ADDR_H_REG,
132 					 HCLGE_RING_TX_BD_NUM_REG,
133 					 HCLGE_RING_TX_PRIORITY_REG,
134 					 HCLGE_RING_TX_TC_REG,
135 					 HCLGE_RING_TX_MERGE_EN_REG,
136 					 HCLGE_RING_TX_TAIL_REG,
137 					 HCLGE_RING_TX_HEAD_REG,
138 					 HCLGE_RING_TX_FBD_NUM_REG,
139 					 HCLGE_RING_TX_OFFSET_REG,
140 					 HCLGE_RING_TX_EBD_NUM_REG,
141 					 HCLGE_RING_TX_EBD_OFFSET_REG,
142 					 HCLGE_RING_TX_BD_ERR_REG,
143 					 HCLGE_RING_EN_REG};
144 
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 					     HCLGE_TQP_INTR_GL0_REG,
147 					     HCLGE_TQP_INTR_GL1_REG,
148 					     HCLGE_TQP_INTR_GL2_REG,
149 					     HCLGE_TQP_INTR_RL_REG};
150 
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 	"App    Loopback test",
153 	"Serdes serial Loopback test",
154 	"Serdes parallel Loopback test",
155 	"Phy    Loopback test"
156 };
157 
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 	{"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 	{"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 	{"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
165 	{"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
167 	{"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
169 	{"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
171 	{"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
173 	{"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
175 	{"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
177 	{"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
179 	{"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
181 	{"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
183 	{"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
185 	{"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
187 	{"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
189 	{"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
191 	{"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
193 	{"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
195 	{"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
197 	{"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
199 	{"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
201 	{"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
203 	{"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
205 	{"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
207 	{"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
209 	{"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
211 	{"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
213 	{"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
215 	{"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
217 	{"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
219 	{"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
221 	{"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
223 	{"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
225 	{"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
227 	{"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
229 	{"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
231 	{"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
233 	{"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
235 	{"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
237 	{"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
239 	{"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
241 	{"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
243 	{"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
245 	{"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
247 	{"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
249 	{"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
251 	{"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
253 	{"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
255 	{"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
257 	{"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
259 	{"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
261 	{"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
263 	{"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
265 	{"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
267 	{"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
269 	{"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
271 	{"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
273 	{"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
275 	{"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
277 	{"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
279 	{"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
281 	{"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
283 	{"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
285 	{"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
287 	{"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
289 	{"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
291 	{"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
293 	{"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
295 	{"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
297 	{"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
299 	{"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
301 	{"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
303 	{"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
305 	{"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
307 	{"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
309 	{"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
311 	{"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
313 	{"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
315 	{"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
317 	{"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
319 	{"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
321 	{"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
323 	{"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
325 	{"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
327 	{"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
328 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
329 	{"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
330 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
331 	{"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
332 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
333 	{"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
334 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
335 	{"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
336 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
337 	{"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
338 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
339 
340 	{"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
341 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
342 	{"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
343 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
344 	{"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
345 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
346 	{"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
347 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
348 	{"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
349 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
350 	{"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
351 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
352 	{"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
353 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
354 	{"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
355 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
356 	{"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
357 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
358 	{"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
359 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
360 	{"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
361 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
362 	{"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
363 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
364 };
365 
366 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
367 	{
368 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
369 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
370 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
371 		.i_port_bitmap = 0x1,
372 	},
373 };
374 
375 static const u8 hclge_hash_key[] = {
376 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
377 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
378 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
379 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
380 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
381 };
382 
383 static const u32 hclge_dfx_bd_offset_list[] = {
384 	HCLGE_DFX_BIOS_BD_OFFSET,
385 	HCLGE_DFX_SSU_0_BD_OFFSET,
386 	HCLGE_DFX_SSU_1_BD_OFFSET,
387 	HCLGE_DFX_IGU_BD_OFFSET,
388 	HCLGE_DFX_RPU_0_BD_OFFSET,
389 	HCLGE_DFX_RPU_1_BD_OFFSET,
390 	HCLGE_DFX_NCSI_BD_OFFSET,
391 	HCLGE_DFX_RTC_BD_OFFSET,
392 	HCLGE_DFX_PPP_BD_OFFSET,
393 	HCLGE_DFX_RCB_BD_OFFSET,
394 	HCLGE_DFX_TQP_BD_OFFSET,
395 	HCLGE_DFX_SSU_2_BD_OFFSET
396 };
397 
398 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
399 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
400 	HCLGE_OPC_DFX_SSU_REG_0,
401 	HCLGE_OPC_DFX_SSU_REG_1,
402 	HCLGE_OPC_DFX_IGU_EGU_REG,
403 	HCLGE_OPC_DFX_RPU_REG_0,
404 	HCLGE_OPC_DFX_RPU_REG_1,
405 	HCLGE_OPC_DFX_NCSI_REG,
406 	HCLGE_OPC_DFX_RTC_REG,
407 	HCLGE_OPC_DFX_PPP_REG,
408 	HCLGE_OPC_DFX_RCB_REG,
409 	HCLGE_OPC_DFX_TQP_REG,
410 	HCLGE_OPC_DFX_SSU_REG_2
411 };
412 
413 static const struct key_info meta_data_key_info[] = {
414 	{ PACKET_TYPE_ID, 6 },
415 	{ IP_FRAGEMENT, 1 },
416 	{ ROCE_TYPE, 1 },
417 	{ NEXT_KEY, 5 },
418 	{ VLAN_NUMBER, 2 },
419 	{ SRC_VPORT, 12 },
420 	{ DST_VPORT, 12 },
421 	{ TUNNEL_PACKET, 1 },
422 };
423 
424 static const struct key_info tuple_key_info[] = {
425 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
426 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
427 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
428 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
429 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
430 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
431 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
432 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
433 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
434 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
435 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
436 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
437 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
438 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
439 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
440 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
441 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
442 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
444 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
445 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
446 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
447 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
448 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
449 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
450 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
451 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
452 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
453 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
454 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
455 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
456 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
457 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
458 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
459 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
460 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
461 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
462 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
463 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
464 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
465 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
466 	{ INNER_DST_IP, 32, KEY_OPT_IP,
467 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
468 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
469 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
470 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
471 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
472 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
473 	  offsetof(struct hclge_fd_rule, tuples.src_port),
474 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
475 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
476 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
477 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
478 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
479 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
480 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
481 };
482 
483 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
484 {
485 #define HCLGE_MAC_CMD_NUM 21
486 
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
489 	__le64 *desc_data;
490 	u32 data_size;
491 	int ret;
492 	u32 i;
493 
494 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
495 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
496 	if (ret) {
497 		dev_err(&hdev->pdev->dev,
498 			"Get MAC pkt stats fail, status = %d.\n", ret);
499 
500 		return ret;
501 	}
502 
503 	/* The first desc has a 64-bit header, so data size need to minus 1 */
504 	data_size = sizeof(desc) / (sizeof(u64)) - 1;
505 
506 	desc_data = (__le64 *)(&desc[0].data[0]);
507 	for (i = 0; i < data_size; i++) {
508 		/* data memory is continuous becase only the first desc has a
509 		 * header in this command
510 		 */
511 		*data += le64_to_cpu(*desc_data);
512 		data++;
513 		desc_data++;
514 	}
515 
516 	return 0;
517 }
518 
519 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
520 {
521 #define HCLGE_REG_NUM_PER_DESC		4
522 
523 	u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
524 	u64 *data = (u64 *)(&hdev->mac_stats);
525 	struct hclge_desc *desc;
526 	__le64 *desc_data;
527 	u32 data_size;
528 	u32 desc_num;
529 	int ret;
530 	u32 i;
531 
532 	/* The first desc has a 64-bit header, so need to consider it */
533 	desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
534 
535 	/* This may be called inside atomic sections,
536 	 * so GFP_ATOMIC is more suitalbe here
537 	 */
538 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
539 	if (!desc)
540 		return -ENOMEM;
541 
542 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
543 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
544 	if (ret) {
545 		kfree(desc);
546 		return ret;
547 	}
548 
549 	data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
550 
551 	desc_data = (__le64 *)(&desc[0].data[0]);
552 	for (i = 0; i < data_size; i++) {
553 		/* data memory is continuous becase only the first desc has a
554 		 * header in this command
555 		 */
556 		*data += le64_to_cpu(*desc_data);
557 		data++;
558 		desc_data++;
559 	}
560 
561 	kfree(desc);
562 
563 	return 0;
564 }
565 
566 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
567 {
568 	struct hclge_desc desc;
569 	int ret;
570 
571 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
572 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
573 	if (ret) {
574 		dev_err(&hdev->pdev->dev,
575 			"failed to query mac statistic reg number, ret = %d\n",
576 			ret);
577 		return ret;
578 	}
579 
580 	*reg_num = le32_to_cpu(desc.data[0]);
581 	if (*reg_num == 0) {
582 		dev_err(&hdev->pdev->dev,
583 			"mac statistic reg number is invalid!\n");
584 		return -ENODATA;
585 	}
586 
587 	return 0;
588 }
589 
590 static int hclge_mac_update_stats(struct hclge_dev *hdev)
591 {
592 	/* The firmware supports the new statistics acquisition method */
593 	if (hdev->ae_dev->dev_specs.mac_stats_num)
594 		return hclge_mac_update_stats_complete(hdev);
595 	else
596 		return hclge_mac_update_stats_defective(hdev);
597 }
598 
599 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
600 {
601 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
602 	struct hclge_vport *vport = hclge_get_vport(handle);
603 	struct hclge_dev *hdev = vport->back;
604 	struct hnae3_queue *queue;
605 	struct hclge_desc desc[1];
606 	struct hclge_tqp *tqp;
607 	int ret, i;
608 
609 	for (i = 0; i < kinfo->num_tqps; i++) {
610 		queue = handle->kinfo.tqp[i];
611 		tqp = container_of(queue, struct hclge_tqp, q);
612 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
613 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
614 					   true);
615 
616 		desc[0].data[0] = cpu_to_le32(tqp->index);
617 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
618 		if (ret) {
619 			dev_err(&hdev->pdev->dev,
620 				"Query tqp stat fail, status = %d,queue = %d\n",
621 				ret, i);
622 			return ret;
623 		}
624 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
625 			le32_to_cpu(desc[0].data[1]);
626 	}
627 
628 	for (i = 0; i < kinfo->num_tqps; i++) {
629 		queue = handle->kinfo.tqp[i];
630 		tqp = container_of(queue, struct hclge_tqp, q);
631 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
632 		hclge_cmd_setup_basic_desc(&desc[0],
633 					   HCLGE_OPC_QUERY_TX_STATS,
634 					   true);
635 
636 		desc[0].data[0] = cpu_to_le32(tqp->index);
637 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
638 		if (ret) {
639 			dev_err(&hdev->pdev->dev,
640 				"Query tqp stat fail, status = %d,queue = %d\n",
641 				ret, i);
642 			return ret;
643 		}
644 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
645 			le32_to_cpu(desc[0].data[1]);
646 	}
647 
648 	return 0;
649 }
650 
651 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
652 {
653 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
654 	struct hclge_tqp *tqp;
655 	u64 *buff = data;
656 	int i;
657 
658 	for (i = 0; i < kinfo->num_tqps; i++) {
659 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
660 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
661 	}
662 
663 	for (i = 0; i < kinfo->num_tqps; i++) {
664 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
665 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
666 	}
667 
668 	return buff;
669 }
670 
671 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
672 {
673 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
674 
675 	/* each tqp has TX & RX two queues */
676 	return kinfo->num_tqps * (2);
677 }
678 
679 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
680 {
681 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
682 	u8 *buff = data;
683 	int i;
684 
685 	for (i = 0; i < kinfo->num_tqps; i++) {
686 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
687 			struct hclge_tqp, q);
688 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
689 			 tqp->index);
690 		buff = buff + ETH_GSTRING_LEN;
691 	}
692 
693 	for (i = 0; i < kinfo->num_tqps; i++) {
694 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
695 			struct hclge_tqp, q);
696 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
697 			 tqp->index);
698 		buff = buff + ETH_GSTRING_LEN;
699 	}
700 
701 	return buff;
702 }
703 
704 static int hclge_comm_get_count(struct hclge_dev *hdev,
705 				const struct hclge_comm_stats_str strs[],
706 				u32 size)
707 {
708 	int count = 0;
709 	u32 i;
710 
711 	for (i = 0; i < size; i++)
712 		if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
713 			count++;
714 
715 	return count;
716 }
717 
718 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
719 				 const struct hclge_comm_stats_str strs[],
720 				 int size, u64 *data)
721 {
722 	u64 *buf = data;
723 	u32 i;
724 
725 	for (i = 0; i < size; i++) {
726 		if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
727 			continue;
728 
729 		*buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
730 		buf++;
731 	}
732 
733 	return buf;
734 }
735 
736 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
737 				  const struct hclge_comm_stats_str strs[],
738 				  int size, u8 *data)
739 {
740 	char *buff = (char *)data;
741 	u32 i;
742 
743 	if (stringset != ETH_SS_STATS)
744 		return buff;
745 
746 	for (i = 0; i < size; i++) {
747 		if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
748 			continue;
749 
750 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
751 		buff = buff + ETH_GSTRING_LEN;
752 	}
753 
754 	return (u8 *)buff;
755 }
756 
757 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
758 {
759 	struct hnae3_handle *handle;
760 	int status;
761 
762 	handle = &hdev->vport[0].nic;
763 	if (handle->client) {
764 		status = hclge_tqps_update_stats(handle);
765 		if (status) {
766 			dev_err(&hdev->pdev->dev,
767 				"Update TQPS stats fail, status = %d.\n",
768 				status);
769 		}
770 	}
771 
772 	status = hclge_mac_update_stats(hdev);
773 	if (status)
774 		dev_err(&hdev->pdev->dev,
775 			"Update MAC stats fail, status = %d.\n", status);
776 }
777 
778 static void hclge_update_stats(struct hnae3_handle *handle,
779 			       struct net_device_stats *net_stats)
780 {
781 	struct hclge_vport *vport = hclge_get_vport(handle);
782 	struct hclge_dev *hdev = vport->back;
783 	int status;
784 
785 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
786 		return;
787 
788 	status = hclge_mac_update_stats(hdev);
789 	if (status)
790 		dev_err(&hdev->pdev->dev,
791 			"Update MAC stats fail, status = %d.\n",
792 			status);
793 
794 	status = hclge_tqps_update_stats(handle);
795 	if (status)
796 		dev_err(&hdev->pdev->dev,
797 			"Update TQPS stats fail, status = %d.\n",
798 			status);
799 
800 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
801 }
802 
803 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
804 {
805 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
806 		HNAE3_SUPPORT_PHY_LOOPBACK | \
807 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
808 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
809 
810 	struct hclge_vport *vport = hclge_get_vport(handle);
811 	struct hclge_dev *hdev = vport->back;
812 	int count = 0;
813 
814 	/* Loopback test support rules:
815 	 * mac: only GE mode support
816 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
817 	 * phy: only support when phy device exist on board
818 	 */
819 	if (stringset == ETH_SS_TEST) {
820 		/* clear loopback bit flags at first */
821 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
822 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
823 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
824 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
825 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
826 			count += 1;
827 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
828 		}
829 
830 		count += 2;
831 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
832 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
833 
834 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
835 		     hdev->hw.mac.phydev->drv->set_loopback) ||
836 		    hnae3_dev_phy_imp_supported(hdev)) {
837 			count += 1;
838 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
839 		}
840 	} else if (stringset == ETH_SS_STATS) {
841 		count = hclge_comm_get_count(hdev, g_mac_stats_string,
842 					     ARRAY_SIZE(g_mac_stats_string)) +
843 			hclge_tqps_get_sset_count(handle, stringset);
844 	}
845 
846 	return count;
847 }
848 
849 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
850 			      u8 *data)
851 {
852 	struct hclge_vport *vport = hclge_get_vport(handle);
853 	struct hclge_dev *hdev = vport->back;
854 	u8 *p = (char *)data;
855 	int size;
856 
857 	if (stringset == ETH_SS_STATS) {
858 		size = ARRAY_SIZE(g_mac_stats_string);
859 		p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
860 					   size, p);
861 		p = hclge_tqps_get_strings(handle, p);
862 	} else if (stringset == ETH_SS_TEST) {
863 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
864 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
865 			       ETH_GSTRING_LEN);
866 			p += ETH_GSTRING_LEN;
867 		}
868 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
869 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
870 			       ETH_GSTRING_LEN);
871 			p += ETH_GSTRING_LEN;
872 		}
873 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
874 			memcpy(p,
875 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
876 			       ETH_GSTRING_LEN);
877 			p += ETH_GSTRING_LEN;
878 		}
879 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
880 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
881 			       ETH_GSTRING_LEN);
882 			p += ETH_GSTRING_LEN;
883 		}
884 	}
885 }
886 
887 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
888 {
889 	struct hclge_vport *vport = hclge_get_vport(handle);
890 	struct hclge_dev *hdev = vport->back;
891 	u64 *p;
892 
893 	p = hclge_comm_get_stats(hdev, g_mac_stats_string,
894 				 ARRAY_SIZE(g_mac_stats_string), data);
895 	p = hclge_tqps_get_stats(handle, p);
896 }
897 
898 static void hclge_get_mac_stat(struct hnae3_handle *handle,
899 			       struct hns3_mac_stats *mac_stats)
900 {
901 	struct hclge_vport *vport = hclge_get_vport(handle);
902 	struct hclge_dev *hdev = vport->back;
903 
904 	hclge_update_stats(handle, NULL);
905 
906 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
907 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
908 }
909 
910 static int hclge_parse_func_status(struct hclge_dev *hdev,
911 				   struct hclge_func_status_cmd *status)
912 {
913 #define HCLGE_MAC_ID_MASK	0xF
914 
915 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
916 		return -EINVAL;
917 
918 	/* Set the pf to main pf */
919 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
920 		hdev->flag |= HCLGE_FLAG_MAIN;
921 	else
922 		hdev->flag &= ~HCLGE_FLAG_MAIN;
923 
924 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
925 	return 0;
926 }
927 
928 static int hclge_query_function_status(struct hclge_dev *hdev)
929 {
930 #define HCLGE_QUERY_MAX_CNT	5
931 
932 	struct hclge_func_status_cmd *req;
933 	struct hclge_desc desc;
934 	int timeout = 0;
935 	int ret;
936 
937 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
938 	req = (struct hclge_func_status_cmd *)desc.data;
939 
940 	do {
941 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
942 		if (ret) {
943 			dev_err(&hdev->pdev->dev,
944 				"query function status failed %d.\n", ret);
945 			return ret;
946 		}
947 
948 		/* Check pf reset is done */
949 		if (req->pf_state)
950 			break;
951 		usleep_range(1000, 2000);
952 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
953 
954 	return hclge_parse_func_status(hdev, req);
955 }
956 
957 static int hclge_query_pf_resource(struct hclge_dev *hdev)
958 {
959 	struct hclge_pf_res_cmd *req;
960 	struct hclge_desc desc;
961 	int ret;
962 
963 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
964 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
965 	if (ret) {
966 		dev_err(&hdev->pdev->dev,
967 			"query pf resource failed %d.\n", ret);
968 		return ret;
969 	}
970 
971 	req = (struct hclge_pf_res_cmd *)desc.data;
972 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
973 			 le16_to_cpu(req->ext_tqp_num);
974 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
975 
976 	if (req->tx_buf_size)
977 		hdev->tx_buf_size =
978 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
979 	else
980 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
981 
982 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
983 
984 	if (req->dv_buf_size)
985 		hdev->dv_buf_size =
986 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
987 	else
988 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
989 
990 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
991 
992 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
993 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
994 		dev_err(&hdev->pdev->dev,
995 			"only %u msi resources available, not enough for pf(min:2).\n",
996 			hdev->num_nic_msi);
997 		return -EINVAL;
998 	}
999 
1000 	if (hnae3_dev_roce_supported(hdev)) {
1001 		hdev->num_roce_msi =
1002 			le16_to_cpu(req->pf_intr_vector_number_roce);
1003 
1004 		/* PF should have NIC vectors and Roce vectors,
1005 		 * NIC vectors are queued before Roce vectors.
1006 		 */
1007 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
1008 	} else {
1009 		hdev->num_msi = hdev->num_nic_msi;
1010 	}
1011 
1012 	return 0;
1013 }
1014 
1015 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
1016 {
1017 	switch (speed_cmd) {
1018 	case HCLGE_FW_MAC_SPEED_10M:
1019 		*speed = HCLGE_MAC_SPEED_10M;
1020 		break;
1021 	case HCLGE_FW_MAC_SPEED_100M:
1022 		*speed = HCLGE_MAC_SPEED_100M;
1023 		break;
1024 	case HCLGE_FW_MAC_SPEED_1G:
1025 		*speed = HCLGE_MAC_SPEED_1G;
1026 		break;
1027 	case HCLGE_FW_MAC_SPEED_10G:
1028 		*speed = HCLGE_MAC_SPEED_10G;
1029 		break;
1030 	case HCLGE_FW_MAC_SPEED_25G:
1031 		*speed = HCLGE_MAC_SPEED_25G;
1032 		break;
1033 	case HCLGE_FW_MAC_SPEED_40G:
1034 		*speed = HCLGE_MAC_SPEED_40G;
1035 		break;
1036 	case HCLGE_FW_MAC_SPEED_50G:
1037 		*speed = HCLGE_MAC_SPEED_50G;
1038 		break;
1039 	case HCLGE_FW_MAC_SPEED_100G:
1040 		*speed = HCLGE_MAC_SPEED_100G;
1041 		break;
1042 	case HCLGE_FW_MAC_SPEED_200G:
1043 		*speed = HCLGE_MAC_SPEED_200G;
1044 		break;
1045 	default:
1046 		return -EINVAL;
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 static const struct hclge_speed_bit_map speed_bit_map[] = {
1053 	{HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
1054 	{HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
1055 	{HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1056 	{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1057 	{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1058 	{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1059 	{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1060 	{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1061 	{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1062 };
1063 
1064 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1065 {
1066 	u16 i;
1067 
1068 	for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1069 		if (speed == speed_bit_map[i].speed) {
1070 			*speed_bit = speed_bit_map[i].speed_bit;
1071 			return 0;
1072 		}
1073 	}
1074 
1075 	return -EINVAL;
1076 }
1077 
1078 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1079 {
1080 	struct hclge_vport *vport = hclge_get_vport(handle);
1081 	struct hclge_dev *hdev = vport->back;
1082 	u32 speed_ability = hdev->hw.mac.speed_ability;
1083 	u32 speed_bit = 0;
1084 	int ret;
1085 
1086 	ret = hclge_get_speed_bit(speed, &speed_bit);
1087 	if (ret)
1088 		return ret;
1089 
1090 	if (speed_bit & speed_ability)
1091 		return 0;
1092 
1093 	return -EINVAL;
1094 }
1095 
1096 static void hclge_convert_setting_sr(u16 speed_ability,
1097 				     unsigned long *link_mode)
1098 {
1099 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1101 				 link_mode);
1102 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1104 				 link_mode);
1105 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1106 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1107 				 link_mode);
1108 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1109 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1110 				 link_mode);
1111 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1112 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1113 				 link_mode);
1114 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1115 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1116 				 link_mode);
1117 }
1118 
1119 static void hclge_convert_setting_lr(u16 speed_ability,
1120 				     unsigned long *link_mode)
1121 {
1122 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1123 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1124 				 link_mode);
1125 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1126 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1127 				 link_mode);
1128 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1129 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1130 				 link_mode);
1131 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1132 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1133 				 link_mode);
1134 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1135 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1136 				 link_mode);
1137 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1138 		linkmode_set_bit(
1139 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1140 			link_mode);
1141 }
1142 
1143 static void hclge_convert_setting_cr(u16 speed_ability,
1144 				     unsigned long *link_mode)
1145 {
1146 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1148 				 link_mode);
1149 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1150 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1151 				 link_mode);
1152 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1153 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1154 				 link_mode);
1155 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1156 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1157 				 link_mode);
1158 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1159 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1160 				 link_mode);
1161 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1162 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1163 				 link_mode);
1164 }
1165 
1166 static void hclge_convert_setting_kr(u16 speed_ability,
1167 				     unsigned long *link_mode)
1168 {
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1171 				 link_mode);
1172 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1173 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1174 				 link_mode);
1175 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1176 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1177 				 link_mode);
1178 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1179 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1180 				 link_mode);
1181 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1182 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1183 				 link_mode);
1184 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1185 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1186 				 link_mode);
1187 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1188 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1189 				 link_mode);
1190 }
1191 
1192 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1193 {
1194 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1195 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1196 
1197 	switch (mac->speed) {
1198 	case HCLGE_MAC_SPEED_10G:
1199 	case HCLGE_MAC_SPEED_40G:
1200 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1201 				 mac->supported);
1202 		mac->fec_ability =
1203 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1204 		break;
1205 	case HCLGE_MAC_SPEED_25G:
1206 	case HCLGE_MAC_SPEED_50G:
1207 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1208 				 mac->supported);
1209 		mac->fec_ability =
1210 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1211 			BIT(HNAE3_FEC_AUTO);
1212 		break;
1213 	case HCLGE_MAC_SPEED_100G:
1214 	case HCLGE_MAC_SPEED_200G:
1215 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1216 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1217 		break;
1218 	default:
1219 		mac->fec_ability = 0;
1220 		break;
1221 	}
1222 }
1223 
1224 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1225 					u16 speed_ability)
1226 {
1227 	struct hclge_mac *mac = &hdev->hw.mac;
1228 
1229 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1230 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1231 				 mac->supported);
1232 
1233 	hclge_convert_setting_sr(speed_ability, mac->supported);
1234 	hclge_convert_setting_lr(speed_ability, mac->supported);
1235 	hclge_convert_setting_cr(speed_ability, mac->supported);
1236 	if (hnae3_dev_fec_supported(hdev))
1237 		hclge_convert_setting_fec(mac);
1238 
1239 	if (hnae3_dev_pause_supported(hdev))
1240 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1241 
1242 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1243 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1244 }
1245 
1246 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1247 					    u16 speed_ability)
1248 {
1249 	struct hclge_mac *mac = &hdev->hw.mac;
1250 
1251 	hclge_convert_setting_kr(speed_ability, mac->supported);
1252 	if (hnae3_dev_fec_supported(hdev))
1253 		hclge_convert_setting_fec(mac);
1254 
1255 	if (hnae3_dev_pause_supported(hdev))
1256 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1257 
1258 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1259 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1260 }
1261 
1262 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1263 					 u16 speed_ability)
1264 {
1265 	unsigned long *supported = hdev->hw.mac.supported;
1266 
1267 	/* default to support all speed for GE port */
1268 	if (!speed_ability)
1269 		speed_ability = HCLGE_SUPPORT_GE;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1272 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1273 				 supported);
1274 
1275 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1276 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1277 				 supported);
1278 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1279 				 supported);
1280 	}
1281 
1282 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1283 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1284 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1285 	}
1286 
1287 	if (hnae3_dev_pause_supported(hdev)) {
1288 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1289 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1290 	}
1291 
1292 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1293 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1294 }
1295 
1296 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1297 {
1298 	u8 media_type = hdev->hw.mac.media_type;
1299 
1300 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1301 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1302 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1303 		hclge_parse_copper_link_mode(hdev, speed_ability);
1304 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1305 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1306 }
1307 
1308 static u32 hclge_get_max_speed(u16 speed_ability)
1309 {
1310 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1311 		return HCLGE_MAC_SPEED_200G;
1312 
1313 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1314 		return HCLGE_MAC_SPEED_100G;
1315 
1316 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1317 		return HCLGE_MAC_SPEED_50G;
1318 
1319 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1320 		return HCLGE_MAC_SPEED_40G;
1321 
1322 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1323 		return HCLGE_MAC_SPEED_25G;
1324 
1325 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1326 		return HCLGE_MAC_SPEED_10G;
1327 
1328 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1329 		return HCLGE_MAC_SPEED_1G;
1330 
1331 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1332 		return HCLGE_MAC_SPEED_100M;
1333 
1334 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1335 		return HCLGE_MAC_SPEED_10M;
1336 
1337 	return HCLGE_MAC_SPEED_1G;
1338 }
1339 
1340 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1341 {
1342 #define HCLGE_TX_SPARE_SIZE_UNIT		4096
1343 #define SPEED_ABILITY_EXT_SHIFT			8
1344 
1345 	struct hclge_cfg_param_cmd *req;
1346 	u64 mac_addr_tmp_high;
1347 	u16 speed_ability_ext;
1348 	u64 mac_addr_tmp;
1349 	unsigned int i;
1350 
1351 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1352 
1353 	/* get the configuration */
1354 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1355 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1356 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1357 					    HCLGE_CFG_TQP_DESC_N_M,
1358 					    HCLGE_CFG_TQP_DESC_N_S);
1359 
1360 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1361 					HCLGE_CFG_PHY_ADDR_M,
1362 					HCLGE_CFG_PHY_ADDR_S);
1363 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1364 					  HCLGE_CFG_MEDIA_TP_M,
1365 					  HCLGE_CFG_MEDIA_TP_S);
1366 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1367 					  HCLGE_CFG_RX_BUF_LEN_M,
1368 					  HCLGE_CFG_RX_BUF_LEN_S);
1369 	/* get mac_address */
1370 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1371 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1372 					    HCLGE_CFG_MAC_ADDR_H_M,
1373 					    HCLGE_CFG_MAC_ADDR_H_S);
1374 
1375 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1376 
1377 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1378 					     HCLGE_CFG_DEFAULT_SPEED_M,
1379 					     HCLGE_CFG_DEFAULT_SPEED_S);
1380 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1381 					       HCLGE_CFG_RSS_SIZE_M,
1382 					       HCLGE_CFG_RSS_SIZE_S);
1383 
1384 	for (i = 0; i < ETH_ALEN; i++)
1385 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1386 
1387 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1388 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1389 
1390 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1391 					     HCLGE_CFG_SPEED_ABILITY_M,
1392 					     HCLGE_CFG_SPEED_ABILITY_S);
1393 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1394 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1395 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1396 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1397 
1398 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1399 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1400 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1401 
1402 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1403 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1404 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1405 
1406 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1407 					       HCLGE_CFG_PF_RSS_SIZE_M,
1408 					       HCLGE_CFG_PF_RSS_SIZE_S);
1409 
1410 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1411 	 * power of 2, instead of reading out directly. This would
1412 	 * be more flexible for future changes and expansions.
1413 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1414 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1415 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1416 	 */
1417 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1418 			       1U << cfg->pf_rss_size_max :
1419 			       cfg->vf_rss_size_max;
1420 
1421 	/* The unit of the tx spare buffer size queried from configuration
1422 	 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1423 	 * needed here.
1424 	 */
1425 	cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1426 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1427 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1428 	cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1429 }
1430 
1431 /* hclge_get_cfg: query the static parameter from flash
1432  * @hdev: pointer to struct hclge_dev
1433  * @hcfg: the config structure to be getted
1434  */
1435 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1436 {
1437 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1438 	struct hclge_cfg_param_cmd *req;
1439 	unsigned int i;
1440 	int ret;
1441 
1442 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1443 		u32 offset = 0;
1444 
1445 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1446 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1447 					   true);
1448 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1449 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1450 		/* Len should be united by 4 bytes when send to hardware */
1451 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1452 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1453 		req->offset = cpu_to_le32(offset);
1454 	}
1455 
1456 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1457 	if (ret) {
1458 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1459 		return ret;
1460 	}
1461 
1462 	hclge_parse_cfg(hcfg, desc);
1463 
1464 	return 0;
1465 }
1466 
1467 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1468 {
1469 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1470 
1471 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1472 
1473 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1474 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1475 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1476 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1477 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1478 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1479 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1480 	ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1481 }
1482 
1483 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1484 				  struct hclge_desc *desc)
1485 {
1486 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1487 	struct hclge_dev_specs_0_cmd *req0;
1488 	struct hclge_dev_specs_1_cmd *req1;
1489 
1490 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1491 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1492 
1493 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1494 	ae_dev->dev_specs.rss_ind_tbl_size =
1495 		le16_to_cpu(req0->rss_ind_tbl_size);
1496 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1497 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1498 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1499 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1500 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1501 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1502 	ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1503 	ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1504 }
1505 
1506 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1507 {
1508 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1509 
1510 	if (!dev_specs->max_non_tso_bd_num)
1511 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1512 	if (!dev_specs->rss_ind_tbl_size)
1513 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1514 	if (!dev_specs->rss_key_size)
1515 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1516 	if (!dev_specs->max_tm_rate)
1517 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1518 	if (!dev_specs->max_qset_num)
1519 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1520 	if (!dev_specs->max_int_gl)
1521 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1522 	if (!dev_specs->max_frm_size)
1523 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1524 	if (!dev_specs->umv_size)
1525 		dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1526 }
1527 
1528 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1529 {
1530 	u32 reg_num = 0;
1531 	int ret;
1532 
1533 	ret = hclge_mac_query_reg_num(hdev, &reg_num);
1534 	if (ret && ret != -EOPNOTSUPP)
1535 		return ret;
1536 
1537 	hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1538 	return 0;
1539 }
1540 
1541 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1542 {
1543 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1544 	int ret;
1545 	int i;
1546 
1547 	ret = hclge_query_mac_stats_num(hdev);
1548 	if (ret)
1549 		return ret;
1550 
1551 	/* set default specifications as devices lower than version V3 do not
1552 	 * support querying specifications from firmware.
1553 	 */
1554 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1555 		hclge_set_default_dev_specs(hdev);
1556 		return 0;
1557 	}
1558 
1559 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1560 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1561 					   true);
1562 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1563 	}
1564 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1565 
1566 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1567 	if (ret)
1568 		return ret;
1569 
1570 	hclge_parse_dev_specs(hdev, desc);
1571 	hclge_check_dev_specs(hdev);
1572 
1573 	return 0;
1574 }
1575 
1576 static int hclge_get_cap(struct hclge_dev *hdev)
1577 {
1578 	int ret;
1579 
1580 	ret = hclge_query_function_status(hdev);
1581 	if (ret) {
1582 		dev_err(&hdev->pdev->dev,
1583 			"query function status error %d.\n", ret);
1584 		return ret;
1585 	}
1586 
1587 	/* get pf resource */
1588 	return hclge_query_pf_resource(hdev);
1589 }
1590 
1591 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1592 {
1593 #define HCLGE_MIN_TX_DESC	64
1594 #define HCLGE_MIN_RX_DESC	64
1595 
1596 	if (!is_kdump_kernel())
1597 		return;
1598 
1599 	dev_info(&hdev->pdev->dev,
1600 		 "Running kdump kernel. Using minimal resources\n");
1601 
1602 	/* minimal queue pairs equals to the number of vports */
1603 	hdev->num_tqps = hdev->num_req_vfs + 1;
1604 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1605 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1606 }
1607 
1608 static int hclge_configure(struct hclge_dev *hdev)
1609 {
1610 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1611 	const struct cpumask *cpumask = cpu_online_mask;
1612 	struct hclge_cfg cfg;
1613 	unsigned int i;
1614 	int node, ret;
1615 
1616 	ret = hclge_get_cfg(hdev, &cfg);
1617 	if (ret)
1618 		return ret;
1619 
1620 	hdev->base_tqp_pid = 0;
1621 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1622 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1623 	hdev->rx_buf_len = cfg.rx_buf_len;
1624 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1625 	hdev->hw.mac.media_type = cfg.media_type;
1626 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1627 	hdev->num_tx_desc = cfg.tqp_desc_num;
1628 	hdev->num_rx_desc = cfg.tqp_desc_num;
1629 	hdev->tm_info.num_pg = 1;
1630 	hdev->tc_max = cfg.tc_num;
1631 	hdev->tm_info.hw_pfc_map = 0;
1632 	if (cfg.umv_space)
1633 		hdev->wanted_umv_size = cfg.umv_space;
1634 	else
1635 		hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1636 	hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1637 	hdev->gro_en = true;
1638 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1639 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1640 
1641 	if (hnae3_dev_fd_supported(hdev)) {
1642 		hdev->fd_en = true;
1643 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1644 	}
1645 
1646 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1647 	if (ret) {
1648 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1649 			cfg.default_speed, ret);
1650 		return ret;
1651 	}
1652 
1653 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1654 
1655 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1656 
1657 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1658 	    (hdev->tc_max < 1)) {
1659 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1660 			 hdev->tc_max);
1661 		hdev->tc_max = 1;
1662 	}
1663 
1664 	/* Dev does not support DCB */
1665 	if (!hnae3_dev_dcb_supported(hdev)) {
1666 		hdev->tc_max = 1;
1667 		hdev->pfc_max = 0;
1668 	} else {
1669 		hdev->pfc_max = hdev->tc_max;
1670 	}
1671 
1672 	hdev->tm_info.num_tc = 1;
1673 
1674 	/* Currently not support uncontiuous tc */
1675 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1676 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1677 
1678 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1679 
1680 	hclge_init_kdump_kernel_config(hdev);
1681 
1682 	/* Set the affinity based on numa node */
1683 	node = dev_to_node(&hdev->pdev->dev);
1684 	if (node != NUMA_NO_NODE)
1685 		cpumask = cpumask_of_node(node);
1686 
1687 	cpumask_copy(&hdev->affinity_mask, cpumask);
1688 
1689 	return ret;
1690 }
1691 
1692 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1693 			    u16 tso_mss_max)
1694 {
1695 	struct hclge_cfg_tso_status_cmd *req;
1696 	struct hclge_desc desc;
1697 
1698 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1699 
1700 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1701 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1702 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1703 
1704 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1705 }
1706 
1707 static int hclge_config_gro(struct hclge_dev *hdev)
1708 {
1709 	struct hclge_cfg_gro_status_cmd *req;
1710 	struct hclge_desc desc;
1711 	int ret;
1712 
1713 	if (!hnae3_dev_gro_supported(hdev))
1714 		return 0;
1715 
1716 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1717 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1718 
1719 	req->gro_en = hdev->gro_en ? 1 : 0;
1720 
1721 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1722 	if (ret)
1723 		dev_err(&hdev->pdev->dev,
1724 			"GRO hardware config cmd failed, ret = %d\n", ret);
1725 
1726 	return ret;
1727 }
1728 
1729 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1730 {
1731 	struct hclge_tqp *tqp;
1732 	int i;
1733 
1734 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1735 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1736 	if (!hdev->htqp)
1737 		return -ENOMEM;
1738 
1739 	tqp = hdev->htqp;
1740 
1741 	for (i = 0; i < hdev->num_tqps; i++) {
1742 		tqp->dev = &hdev->pdev->dev;
1743 		tqp->index = i;
1744 
1745 		tqp->q.ae_algo = &ae_algo;
1746 		tqp->q.buf_size = hdev->rx_buf_len;
1747 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1748 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1749 
1750 		/* need an extended offset to configure queues >=
1751 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1752 		 */
1753 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1754 			tqp->q.io_base = hdev->hw.io_base +
1755 					 HCLGE_TQP_REG_OFFSET +
1756 					 i * HCLGE_TQP_REG_SIZE;
1757 		else
1758 			tqp->q.io_base = hdev->hw.io_base +
1759 					 HCLGE_TQP_REG_OFFSET +
1760 					 HCLGE_TQP_EXT_REG_OFFSET +
1761 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1762 					 HCLGE_TQP_REG_SIZE;
1763 
1764 		tqp++;
1765 	}
1766 
1767 	return 0;
1768 }
1769 
1770 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1771 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1772 {
1773 	struct hclge_tqp_map_cmd *req;
1774 	struct hclge_desc desc;
1775 	int ret;
1776 
1777 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1778 
1779 	req = (struct hclge_tqp_map_cmd *)desc.data;
1780 	req->tqp_id = cpu_to_le16(tqp_pid);
1781 	req->tqp_vf = func_id;
1782 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1783 	if (!is_pf)
1784 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1785 	req->tqp_vid = cpu_to_le16(tqp_vid);
1786 
1787 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1788 	if (ret)
1789 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1790 
1791 	return ret;
1792 }
1793 
1794 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1795 {
1796 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1797 	struct hclge_dev *hdev = vport->back;
1798 	int i, alloced;
1799 
1800 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1801 	     alloced < num_tqps; i++) {
1802 		if (!hdev->htqp[i].alloced) {
1803 			hdev->htqp[i].q.handle = &vport->nic;
1804 			hdev->htqp[i].q.tqp_index = alloced;
1805 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1806 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1807 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1808 			hdev->htqp[i].alloced = true;
1809 			alloced++;
1810 		}
1811 	}
1812 	vport->alloc_tqps = alloced;
1813 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1814 				vport->alloc_tqps / hdev->tm_info.num_tc);
1815 
1816 	/* ensure one to one mapping between irq and queue at default */
1817 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1818 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1819 
1820 	return 0;
1821 }
1822 
1823 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1824 			    u16 num_tx_desc, u16 num_rx_desc)
1825 
1826 {
1827 	struct hnae3_handle *nic = &vport->nic;
1828 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1829 	struct hclge_dev *hdev = vport->back;
1830 	int ret;
1831 
1832 	kinfo->num_tx_desc = num_tx_desc;
1833 	kinfo->num_rx_desc = num_rx_desc;
1834 
1835 	kinfo->rx_buf_len = hdev->rx_buf_len;
1836 	kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1837 
1838 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1839 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1840 	if (!kinfo->tqp)
1841 		return -ENOMEM;
1842 
1843 	ret = hclge_assign_tqp(vport, num_tqps);
1844 	if (ret)
1845 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1846 
1847 	return ret;
1848 }
1849 
1850 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1851 				  struct hclge_vport *vport)
1852 {
1853 	struct hnae3_handle *nic = &vport->nic;
1854 	struct hnae3_knic_private_info *kinfo;
1855 	u16 i;
1856 
1857 	kinfo = &nic->kinfo;
1858 	for (i = 0; i < vport->alloc_tqps; i++) {
1859 		struct hclge_tqp *q =
1860 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1861 		bool is_pf;
1862 		int ret;
1863 
1864 		is_pf = !(vport->vport_id);
1865 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1866 					     i, is_pf);
1867 		if (ret)
1868 			return ret;
1869 	}
1870 
1871 	return 0;
1872 }
1873 
1874 static int hclge_map_tqp(struct hclge_dev *hdev)
1875 {
1876 	struct hclge_vport *vport = hdev->vport;
1877 	u16 i, num_vport;
1878 
1879 	num_vport = hdev->num_req_vfs + 1;
1880 	for (i = 0; i < num_vport; i++)	{
1881 		int ret;
1882 
1883 		ret = hclge_map_tqp_to_vport(hdev, vport);
1884 		if (ret)
1885 			return ret;
1886 
1887 		vport++;
1888 	}
1889 
1890 	return 0;
1891 }
1892 
1893 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1894 {
1895 	struct hnae3_handle *nic = &vport->nic;
1896 	struct hclge_dev *hdev = vport->back;
1897 	int ret;
1898 
1899 	nic->pdev = hdev->pdev;
1900 	nic->ae_algo = &ae_algo;
1901 	nic->numa_node_mask = hdev->numa_node_mask;
1902 	nic->kinfo.io_base = hdev->hw.io_base;
1903 
1904 	ret = hclge_knic_setup(vport, num_tqps,
1905 			       hdev->num_tx_desc, hdev->num_rx_desc);
1906 	if (ret)
1907 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1908 
1909 	return ret;
1910 }
1911 
1912 static int hclge_alloc_vport(struct hclge_dev *hdev)
1913 {
1914 	struct pci_dev *pdev = hdev->pdev;
1915 	struct hclge_vport *vport;
1916 	u32 tqp_main_vport;
1917 	u32 tqp_per_vport;
1918 	int num_vport, i;
1919 	int ret;
1920 
1921 	/* We need to alloc a vport for main NIC of PF */
1922 	num_vport = hdev->num_req_vfs + 1;
1923 
1924 	if (hdev->num_tqps < num_vport) {
1925 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1926 			hdev->num_tqps, num_vport);
1927 		return -EINVAL;
1928 	}
1929 
1930 	/* Alloc the same number of TQPs for every vport */
1931 	tqp_per_vport = hdev->num_tqps / num_vport;
1932 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1933 
1934 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1935 			     GFP_KERNEL);
1936 	if (!vport)
1937 		return -ENOMEM;
1938 
1939 	hdev->vport = vport;
1940 	hdev->num_alloc_vport = num_vport;
1941 
1942 	if (IS_ENABLED(CONFIG_PCI_IOV))
1943 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1944 
1945 	for (i = 0; i < num_vport; i++) {
1946 		vport->back = hdev;
1947 		vport->vport_id = i;
1948 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1949 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1950 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1951 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1952 		vport->req_vlan_fltr_en = true;
1953 		INIT_LIST_HEAD(&vport->vlan_list);
1954 		INIT_LIST_HEAD(&vport->uc_mac_list);
1955 		INIT_LIST_HEAD(&vport->mc_mac_list);
1956 		spin_lock_init(&vport->mac_list_lock);
1957 
1958 		if (i == 0)
1959 			ret = hclge_vport_setup(vport, tqp_main_vport);
1960 		else
1961 			ret = hclge_vport_setup(vport, tqp_per_vport);
1962 		if (ret) {
1963 			dev_err(&pdev->dev,
1964 				"vport setup failed for vport %d, %d\n",
1965 				i, ret);
1966 			return ret;
1967 		}
1968 
1969 		vport++;
1970 	}
1971 
1972 	return 0;
1973 }
1974 
1975 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1976 				    struct hclge_pkt_buf_alloc *buf_alloc)
1977 {
1978 /* TX buffer size is unit by 128 byte */
1979 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1980 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1981 	struct hclge_tx_buff_alloc_cmd *req;
1982 	struct hclge_desc desc;
1983 	int ret;
1984 	u8 i;
1985 
1986 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1987 
1988 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1989 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1990 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1991 
1992 		req->tx_pkt_buff[i] =
1993 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1994 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1995 	}
1996 
1997 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1998 	if (ret)
1999 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
2000 			ret);
2001 
2002 	return ret;
2003 }
2004 
2005 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
2006 				 struct hclge_pkt_buf_alloc *buf_alloc)
2007 {
2008 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
2009 
2010 	if (ret)
2011 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
2012 
2013 	return ret;
2014 }
2015 
2016 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
2017 {
2018 	unsigned int i;
2019 	u32 cnt = 0;
2020 
2021 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2022 		if (hdev->hw_tc_map & BIT(i))
2023 			cnt++;
2024 	return cnt;
2025 }
2026 
2027 /* Get the number of pfc enabled TCs, which have private buffer */
2028 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
2029 				  struct hclge_pkt_buf_alloc *buf_alloc)
2030 {
2031 	struct hclge_priv_buf *priv;
2032 	unsigned int i;
2033 	int cnt = 0;
2034 
2035 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2036 		priv = &buf_alloc->priv_buf[i];
2037 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
2038 		    priv->enable)
2039 			cnt++;
2040 	}
2041 
2042 	return cnt;
2043 }
2044 
2045 /* Get the number of pfc disabled TCs, which have private buffer */
2046 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
2047 				     struct hclge_pkt_buf_alloc *buf_alloc)
2048 {
2049 	struct hclge_priv_buf *priv;
2050 	unsigned int i;
2051 	int cnt = 0;
2052 
2053 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2054 		priv = &buf_alloc->priv_buf[i];
2055 		if (hdev->hw_tc_map & BIT(i) &&
2056 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
2057 		    priv->enable)
2058 			cnt++;
2059 	}
2060 
2061 	return cnt;
2062 }
2063 
2064 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2065 {
2066 	struct hclge_priv_buf *priv;
2067 	u32 rx_priv = 0;
2068 	int i;
2069 
2070 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2071 		priv = &buf_alloc->priv_buf[i];
2072 		if (priv->enable)
2073 			rx_priv += priv->buf_size;
2074 	}
2075 	return rx_priv;
2076 }
2077 
2078 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2079 {
2080 	u32 i, total_tx_size = 0;
2081 
2082 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2083 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2084 
2085 	return total_tx_size;
2086 }
2087 
2088 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2089 				struct hclge_pkt_buf_alloc *buf_alloc,
2090 				u32 rx_all)
2091 {
2092 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2093 	u32 tc_num = hclge_get_tc_num(hdev);
2094 	u32 shared_buf, aligned_mps;
2095 	u32 rx_priv;
2096 	int i;
2097 
2098 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2099 
2100 	if (hnae3_dev_dcb_supported(hdev))
2101 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2102 					hdev->dv_buf_size;
2103 	else
2104 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2105 					+ hdev->dv_buf_size;
2106 
2107 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2108 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2109 			     HCLGE_BUF_SIZE_UNIT);
2110 
2111 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2112 	if (rx_all < rx_priv + shared_std)
2113 		return false;
2114 
2115 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2116 	buf_alloc->s_buf.buf_size = shared_buf;
2117 	if (hnae3_dev_dcb_supported(hdev)) {
2118 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2119 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2120 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2121 				  HCLGE_BUF_SIZE_UNIT);
2122 	} else {
2123 		buf_alloc->s_buf.self.high = aligned_mps +
2124 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2125 		buf_alloc->s_buf.self.low = aligned_mps;
2126 	}
2127 
2128 	if (hnae3_dev_dcb_supported(hdev)) {
2129 		hi_thrd = shared_buf - hdev->dv_buf_size;
2130 
2131 		if (tc_num <= NEED_RESERVE_TC_NUM)
2132 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2133 					/ BUF_MAX_PERCENT;
2134 
2135 		if (tc_num)
2136 			hi_thrd = hi_thrd / tc_num;
2137 
2138 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2139 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2140 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2141 	} else {
2142 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2143 		lo_thrd = aligned_mps;
2144 	}
2145 
2146 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2147 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2148 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2149 	}
2150 
2151 	return true;
2152 }
2153 
2154 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2155 				struct hclge_pkt_buf_alloc *buf_alloc)
2156 {
2157 	u32 i, total_size;
2158 
2159 	total_size = hdev->pkt_buf_size;
2160 
2161 	/* alloc tx buffer for all enabled tc */
2162 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2163 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2164 
2165 		if (hdev->hw_tc_map & BIT(i)) {
2166 			if (total_size < hdev->tx_buf_size)
2167 				return -ENOMEM;
2168 
2169 			priv->tx_buf_size = hdev->tx_buf_size;
2170 		} else {
2171 			priv->tx_buf_size = 0;
2172 		}
2173 
2174 		total_size -= priv->tx_buf_size;
2175 	}
2176 
2177 	return 0;
2178 }
2179 
2180 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2181 				  struct hclge_pkt_buf_alloc *buf_alloc)
2182 {
2183 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2184 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2185 	unsigned int i;
2186 
2187 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2188 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2189 
2190 		priv->enable = 0;
2191 		priv->wl.low = 0;
2192 		priv->wl.high = 0;
2193 		priv->buf_size = 0;
2194 
2195 		if (!(hdev->hw_tc_map & BIT(i)))
2196 			continue;
2197 
2198 		priv->enable = 1;
2199 
2200 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2201 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2202 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2203 						HCLGE_BUF_SIZE_UNIT);
2204 		} else {
2205 			priv->wl.low = 0;
2206 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2207 					aligned_mps;
2208 		}
2209 
2210 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2211 	}
2212 
2213 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2214 }
2215 
2216 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2217 					  struct hclge_pkt_buf_alloc *buf_alloc)
2218 {
2219 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2220 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2221 	int i;
2222 
2223 	/* let the last to be cleared first */
2224 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2225 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2226 		unsigned int mask = BIT((unsigned int)i);
2227 
2228 		if (hdev->hw_tc_map & mask &&
2229 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2230 			/* Clear the no pfc TC private buffer */
2231 			priv->wl.low = 0;
2232 			priv->wl.high = 0;
2233 			priv->buf_size = 0;
2234 			priv->enable = 0;
2235 			no_pfc_priv_num--;
2236 		}
2237 
2238 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2239 		    no_pfc_priv_num == 0)
2240 			break;
2241 	}
2242 
2243 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2244 }
2245 
2246 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2247 					struct hclge_pkt_buf_alloc *buf_alloc)
2248 {
2249 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2250 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2251 	int i;
2252 
2253 	/* let the last to be cleared first */
2254 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2255 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2256 		unsigned int mask = BIT((unsigned int)i);
2257 
2258 		if (hdev->hw_tc_map & mask &&
2259 		    hdev->tm_info.hw_pfc_map & mask) {
2260 			/* Reduce the number of pfc TC with private buffer */
2261 			priv->wl.low = 0;
2262 			priv->enable = 0;
2263 			priv->wl.high = 0;
2264 			priv->buf_size = 0;
2265 			pfc_priv_num--;
2266 		}
2267 
2268 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2269 		    pfc_priv_num == 0)
2270 			break;
2271 	}
2272 
2273 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2274 }
2275 
2276 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2277 				      struct hclge_pkt_buf_alloc *buf_alloc)
2278 {
2279 #define COMPENSATE_BUFFER	0x3C00
2280 #define COMPENSATE_HALF_MPS_NUM	5
2281 #define PRIV_WL_GAP		0x1800
2282 
2283 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2284 	u32 tc_num = hclge_get_tc_num(hdev);
2285 	u32 half_mps = hdev->mps >> 1;
2286 	u32 min_rx_priv;
2287 	unsigned int i;
2288 
2289 	if (tc_num)
2290 		rx_priv = rx_priv / tc_num;
2291 
2292 	if (tc_num <= NEED_RESERVE_TC_NUM)
2293 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2294 
2295 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2296 			COMPENSATE_HALF_MPS_NUM * half_mps;
2297 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2298 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2299 	if (rx_priv < min_rx_priv)
2300 		return false;
2301 
2302 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2303 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2304 
2305 		priv->enable = 0;
2306 		priv->wl.low = 0;
2307 		priv->wl.high = 0;
2308 		priv->buf_size = 0;
2309 
2310 		if (!(hdev->hw_tc_map & BIT(i)))
2311 			continue;
2312 
2313 		priv->enable = 1;
2314 		priv->buf_size = rx_priv;
2315 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2316 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2317 	}
2318 
2319 	buf_alloc->s_buf.buf_size = 0;
2320 
2321 	return true;
2322 }
2323 
2324 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2325  * @hdev: pointer to struct hclge_dev
2326  * @buf_alloc: pointer to buffer calculation data
2327  * @return: 0: calculate successful, negative: fail
2328  */
2329 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2330 				struct hclge_pkt_buf_alloc *buf_alloc)
2331 {
2332 	/* When DCB is not supported, rx private buffer is not allocated. */
2333 	if (!hnae3_dev_dcb_supported(hdev)) {
2334 		u32 rx_all = hdev->pkt_buf_size;
2335 
2336 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2337 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2338 			return -ENOMEM;
2339 
2340 		return 0;
2341 	}
2342 
2343 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2344 		return 0;
2345 
2346 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2347 		return 0;
2348 
2349 	/* try to decrease the buffer size */
2350 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2351 		return 0;
2352 
2353 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2354 		return 0;
2355 
2356 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2357 		return 0;
2358 
2359 	return -ENOMEM;
2360 }
2361 
2362 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2363 				   struct hclge_pkt_buf_alloc *buf_alloc)
2364 {
2365 	struct hclge_rx_priv_buff_cmd *req;
2366 	struct hclge_desc desc;
2367 	int ret;
2368 	int i;
2369 
2370 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2371 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2372 
2373 	/* Alloc private buffer TCs */
2374 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2375 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2376 
2377 		req->buf_num[i] =
2378 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2379 		req->buf_num[i] |=
2380 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2381 	}
2382 
2383 	req->shared_buf =
2384 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2385 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2386 
2387 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2388 	if (ret)
2389 		dev_err(&hdev->pdev->dev,
2390 			"rx private buffer alloc cmd failed %d\n", ret);
2391 
2392 	return ret;
2393 }
2394 
2395 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2396 				   struct hclge_pkt_buf_alloc *buf_alloc)
2397 {
2398 	struct hclge_rx_priv_wl_buf *req;
2399 	struct hclge_priv_buf *priv;
2400 	struct hclge_desc desc[2];
2401 	int i, j;
2402 	int ret;
2403 
2404 	for (i = 0; i < 2; i++) {
2405 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2406 					   false);
2407 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2408 
2409 		/* The first descriptor set the NEXT bit to 1 */
2410 		if (i == 0)
2411 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2412 		else
2413 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2414 
2415 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2416 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2417 
2418 			priv = &buf_alloc->priv_buf[idx];
2419 			req->tc_wl[j].high =
2420 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2421 			req->tc_wl[j].high |=
2422 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2423 			req->tc_wl[j].low =
2424 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2425 			req->tc_wl[j].low |=
2426 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2427 		}
2428 	}
2429 
2430 	/* Send 2 descriptor at one time */
2431 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2432 	if (ret)
2433 		dev_err(&hdev->pdev->dev,
2434 			"rx private waterline config cmd failed %d\n",
2435 			ret);
2436 	return ret;
2437 }
2438 
2439 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2440 				    struct hclge_pkt_buf_alloc *buf_alloc)
2441 {
2442 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2443 	struct hclge_rx_com_thrd *req;
2444 	struct hclge_desc desc[2];
2445 	struct hclge_tc_thrd *tc;
2446 	int i, j;
2447 	int ret;
2448 
2449 	for (i = 0; i < 2; i++) {
2450 		hclge_cmd_setup_basic_desc(&desc[i],
2451 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2452 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2453 
2454 		/* The first descriptor set the NEXT bit to 1 */
2455 		if (i == 0)
2456 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2457 		else
2458 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2459 
2460 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2461 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2462 
2463 			req->com_thrd[j].high =
2464 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2465 			req->com_thrd[j].high |=
2466 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2467 			req->com_thrd[j].low =
2468 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2469 			req->com_thrd[j].low |=
2470 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2471 		}
2472 	}
2473 
2474 	/* Send 2 descriptors at one time */
2475 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2476 	if (ret)
2477 		dev_err(&hdev->pdev->dev,
2478 			"common threshold config cmd failed %d\n", ret);
2479 	return ret;
2480 }
2481 
2482 static int hclge_common_wl_config(struct hclge_dev *hdev,
2483 				  struct hclge_pkt_buf_alloc *buf_alloc)
2484 {
2485 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2486 	struct hclge_rx_com_wl *req;
2487 	struct hclge_desc desc;
2488 	int ret;
2489 
2490 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2491 
2492 	req = (struct hclge_rx_com_wl *)desc.data;
2493 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2494 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2495 
2496 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2497 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2498 
2499 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2500 	if (ret)
2501 		dev_err(&hdev->pdev->dev,
2502 			"common waterline config cmd failed %d\n", ret);
2503 
2504 	return ret;
2505 }
2506 
2507 int hclge_buffer_alloc(struct hclge_dev *hdev)
2508 {
2509 	struct hclge_pkt_buf_alloc *pkt_buf;
2510 	int ret;
2511 
2512 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2513 	if (!pkt_buf)
2514 		return -ENOMEM;
2515 
2516 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2517 	if (ret) {
2518 		dev_err(&hdev->pdev->dev,
2519 			"could not calc tx buffer size for all TCs %d\n", ret);
2520 		goto out;
2521 	}
2522 
2523 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2524 	if (ret) {
2525 		dev_err(&hdev->pdev->dev,
2526 			"could not alloc tx buffers %d\n", ret);
2527 		goto out;
2528 	}
2529 
2530 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2531 	if (ret) {
2532 		dev_err(&hdev->pdev->dev,
2533 			"could not calc rx priv buffer size for all TCs %d\n",
2534 			ret);
2535 		goto out;
2536 	}
2537 
2538 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2539 	if (ret) {
2540 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2541 			ret);
2542 		goto out;
2543 	}
2544 
2545 	if (hnae3_dev_dcb_supported(hdev)) {
2546 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2547 		if (ret) {
2548 			dev_err(&hdev->pdev->dev,
2549 				"could not configure rx private waterline %d\n",
2550 				ret);
2551 			goto out;
2552 		}
2553 
2554 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2555 		if (ret) {
2556 			dev_err(&hdev->pdev->dev,
2557 				"could not configure common threshold %d\n",
2558 				ret);
2559 			goto out;
2560 		}
2561 	}
2562 
2563 	ret = hclge_common_wl_config(hdev, pkt_buf);
2564 	if (ret)
2565 		dev_err(&hdev->pdev->dev,
2566 			"could not configure common waterline %d\n", ret);
2567 
2568 out:
2569 	kfree(pkt_buf);
2570 	return ret;
2571 }
2572 
2573 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2574 {
2575 	struct hnae3_handle *roce = &vport->roce;
2576 	struct hnae3_handle *nic = &vport->nic;
2577 	struct hclge_dev *hdev = vport->back;
2578 
2579 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2580 
2581 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2582 		return -EINVAL;
2583 
2584 	roce->rinfo.base_vector = hdev->roce_base_vector;
2585 
2586 	roce->rinfo.netdev = nic->kinfo.netdev;
2587 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2588 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2589 
2590 	roce->pdev = nic->pdev;
2591 	roce->ae_algo = nic->ae_algo;
2592 	roce->numa_node_mask = nic->numa_node_mask;
2593 
2594 	return 0;
2595 }
2596 
2597 static int hclge_init_msi(struct hclge_dev *hdev)
2598 {
2599 	struct pci_dev *pdev = hdev->pdev;
2600 	int vectors;
2601 	int i;
2602 
2603 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2604 					hdev->num_msi,
2605 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2606 	if (vectors < 0) {
2607 		dev_err(&pdev->dev,
2608 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2609 			vectors);
2610 		return vectors;
2611 	}
2612 	if (vectors < hdev->num_msi)
2613 		dev_warn(&hdev->pdev->dev,
2614 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2615 			 hdev->num_msi, vectors);
2616 
2617 	hdev->num_msi = vectors;
2618 	hdev->num_msi_left = vectors;
2619 
2620 	hdev->base_msi_vector = pdev->irq;
2621 	hdev->roce_base_vector = hdev->base_msi_vector +
2622 				hdev->num_nic_msi;
2623 
2624 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2625 					   sizeof(u16), GFP_KERNEL);
2626 	if (!hdev->vector_status) {
2627 		pci_free_irq_vectors(pdev);
2628 		return -ENOMEM;
2629 	}
2630 
2631 	for (i = 0; i < hdev->num_msi; i++)
2632 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2633 
2634 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2635 					sizeof(int), GFP_KERNEL);
2636 	if (!hdev->vector_irq) {
2637 		pci_free_irq_vectors(pdev);
2638 		return -ENOMEM;
2639 	}
2640 
2641 	return 0;
2642 }
2643 
2644 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2645 {
2646 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2647 		duplex = HCLGE_MAC_FULL;
2648 
2649 	return duplex;
2650 }
2651 
2652 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2653 				      u8 duplex)
2654 {
2655 	struct hclge_config_mac_speed_dup_cmd *req;
2656 	struct hclge_desc desc;
2657 	int ret;
2658 
2659 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2660 
2661 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2662 
2663 	if (duplex)
2664 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2665 
2666 	switch (speed) {
2667 	case HCLGE_MAC_SPEED_10M:
2668 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2669 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2670 		break;
2671 	case HCLGE_MAC_SPEED_100M:
2672 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2673 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2674 		break;
2675 	case HCLGE_MAC_SPEED_1G:
2676 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2677 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2678 		break;
2679 	case HCLGE_MAC_SPEED_10G:
2680 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2681 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2682 		break;
2683 	case HCLGE_MAC_SPEED_25G:
2684 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2685 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2686 		break;
2687 	case HCLGE_MAC_SPEED_40G:
2688 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2689 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2690 		break;
2691 	case HCLGE_MAC_SPEED_50G:
2692 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2693 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2694 		break;
2695 	case HCLGE_MAC_SPEED_100G:
2696 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2697 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2698 		break;
2699 	case HCLGE_MAC_SPEED_200G:
2700 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2701 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2702 		break;
2703 	default:
2704 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2705 		return -EINVAL;
2706 	}
2707 
2708 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2709 		      1);
2710 
2711 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2712 	if (ret) {
2713 		dev_err(&hdev->pdev->dev,
2714 			"mac speed/duplex config cmd failed %d.\n", ret);
2715 		return ret;
2716 	}
2717 
2718 	return 0;
2719 }
2720 
2721 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2722 {
2723 	struct hclge_mac *mac = &hdev->hw.mac;
2724 	int ret;
2725 
2726 	duplex = hclge_check_speed_dup(duplex, speed);
2727 	if (!mac->support_autoneg && mac->speed == speed &&
2728 	    mac->duplex == duplex)
2729 		return 0;
2730 
2731 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2732 	if (ret)
2733 		return ret;
2734 
2735 	hdev->hw.mac.speed = speed;
2736 	hdev->hw.mac.duplex = duplex;
2737 
2738 	return 0;
2739 }
2740 
2741 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2742 				     u8 duplex)
2743 {
2744 	struct hclge_vport *vport = hclge_get_vport(handle);
2745 	struct hclge_dev *hdev = vport->back;
2746 
2747 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2748 }
2749 
2750 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2751 {
2752 	struct hclge_config_auto_neg_cmd *req;
2753 	struct hclge_desc desc;
2754 	u32 flag = 0;
2755 	int ret;
2756 
2757 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2758 
2759 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2760 	if (enable)
2761 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2762 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2763 
2764 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2765 	if (ret)
2766 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2767 			ret);
2768 
2769 	return ret;
2770 }
2771 
2772 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2773 {
2774 	struct hclge_vport *vport = hclge_get_vport(handle);
2775 	struct hclge_dev *hdev = vport->back;
2776 
2777 	if (!hdev->hw.mac.support_autoneg) {
2778 		if (enable) {
2779 			dev_err(&hdev->pdev->dev,
2780 				"autoneg is not supported by current port\n");
2781 			return -EOPNOTSUPP;
2782 		} else {
2783 			return 0;
2784 		}
2785 	}
2786 
2787 	return hclge_set_autoneg_en(hdev, enable);
2788 }
2789 
2790 static int hclge_get_autoneg(struct hnae3_handle *handle)
2791 {
2792 	struct hclge_vport *vport = hclge_get_vport(handle);
2793 	struct hclge_dev *hdev = vport->back;
2794 	struct phy_device *phydev = hdev->hw.mac.phydev;
2795 
2796 	if (phydev)
2797 		return phydev->autoneg;
2798 
2799 	return hdev->hw.mac.autoneg;
2800 }
2801 
2802 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2803 {
2804 	struct hclge_vport *vport = hclge_get_vport(handle);
2805 	struct hclge_dev *hdev = vport->back;
2806 	int ret;
2807 
2808 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2809 
2810 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2811 	if (ret)
2812 		return ret;
2813 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2814 }
2815 
2816 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2817 {
2818 	struct hclge_vport *vport = hclge_get_vport(handle);
2819 	struct hclge_dev *hdev = vport->back;
2820 
2821 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2822 		return hclge_set_autoneg_en(hdev, !halt);
2823 
2824 	return 0;
2825 }
2826 
2827 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2828 {
2829 	struct hclge_config_fec_cmd *req;
2830 	struct hclge_desc desc;
2831 	int ret;
2832 
2833 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2834 
2835 	req = (struct hclge_config_fec_cmd *)desc.data;
2836 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2837 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2838 	if (fec_mode & BIT(HNAE3_FEC_RS))
2839 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2840 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2841 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2842 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2843 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2844 
2845 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2846 	if (ret)
2847 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2848 
2849 	return ret;
2850 }
2851 
2852 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2853 {
2854 	struct hclge_vport *vport = hclge_get_vport(handle);
2855 	struct hclge_dev *hdev = vport->back;
2856 	struct hclge_mac *mac = &hdev->hw.mac;
2857 	int ret;
2858 
2859 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2860 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2861 		return -EINVAL;
2862 	}
2863 
2864 	ret = hclge_set_fec_hw(hdev, fec_mode);
2865 	if (ret)
2866 		return ret;
2867 
2868 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2869 	return 0;
2870 }
2871 
2872 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2873 			  u8 *fec_mode)
2874 {
2875 	struct hclge_vport *vport = hclge_get_vport(handle);
2876 	struct hclge_dev *hdev = vport->back;
2877 	struct hclge_mac *mac = &hdev->hw.mac;
2878 
2879 	if (fec_ability)
2880 		*fec_ability = mac->fec_ability;
2881 	if (fec_mode)
2882 		*fec_mode = mac->fec_mode;
2883 }
2884 
2885 static int hclge_mac_init(struct hclge_dev *hdev)
2886 {
2887 	struct hclge_mac *mac = &hdev->hw.mac;
2888 	int ret;
2889 
2890 	hdev->support_sfp_query = true;
2891 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2892 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2893 					 hdev->hw.mac.duplex);
2894 	if (ret)
2895 		return ret;
2896 
2897 	if (hdev->hw.mac.support_autoneg) {
2898 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2899 		if (ret)
2900 			return ret;
2901 	}
2902 
2903 	mac->link = 0;
2904 
2905 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2906 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2907 		if (ret)
2908 			return ret;
2909 	}
2910 
2911 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2912 	if (ret) {
2913 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2914 		return ret;
2915 	}
2916 
2917 	ret = hclge_set_default_loopback(hdev);
2918 	if (ret)
2919 		return ret;
2920 
2921 	ret = hclge_buffer_alloc(hdev);
2922 	if (ret)
2923 		dev_err(&hdev->pdev->dev,
2924 			"allocate buffer fail, ret=%d\n", ret);
2925 
2926 	return ret;
2927 }
2928 
2929 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2930 {
2931 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2932 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2933 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2934 }
2935 
2936 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2937 {
2938 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2939 	    test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
2940 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2941 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2942 }
2943 
2944 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2945 {
2946 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2947 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2948 		mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2949 }
2950 
2951 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2952 {
2953 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2954 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2955 		mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
2956 }
2957 
2958 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2959 {
2960 	struct hclge_link_status_cmd *req;
2961 	struct hclge_desc desc;
2962 	int ret;
2963 
2964 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2965 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2966 	if (ret) {
2967 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2968 			ret);
2969 		return ret;
2970 	}
2971 
2972 	req = (struct hclge_link_status_cmd *)desc.data;
2973 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2974 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2975 
2976 	return 0;
2977 }
2978 
2979 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2980 {
2981 	struct phy_device *phydev = hdev->hw.mac.phydev;
2982 
2983 	*link_status = HCLGE_LINK_STATUS_DOWN;
2984 
2985 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2986 		return 0;
2987 
2988 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2989 		return 0;
2990 
2991 	return hclge_get_mac_link_status(hdev, link_status);
2992 }
2993 
2994 static void hclge_push_link_status(struct hclge_dev *hdev)
2995 {
2996 	struct hclge_vport *vport;
2997 	int ret;
2998 	u16 i;
2999 
3000 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3001 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3002 
3003 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3004 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3005 			continue;
3006 
3007 		ret = hclge_push_vf_link_status(vport);
3008 		if (ret) {
3009 			dev_err(&hdev->pdev->dev,
3010 				"failed to push link status to vf%u, ret = %d\n",
3011 				i, ret);
3012 		}
3013 	}
3014 }
3015 
3016 static void hclge_update_link_status(struct hclge_dev *hdev)
3017 {
3018 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3019 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3020 	struct hnae3_client *rclient = hdev->roce_client;
3021 	struct hnae3_client *client = hdev->nic_client;
3022 	int state;
3023 	int ret;
3024 
3025 	if (!client)
3026 		return;
3027 
3028 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3029 		return;
3030 
3031 	ret = hclge_get_mac_phy_link(hdev, &state);
3032 	if (ret) {
3033 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3034 		return;
3035 	}
3036 
3037 	if (state != hdev->hw.mac.link) {
3038 		hdev->hw.mac.link = state;
3039 		client->ops->link_status_change(handle, state);
3040 		hclge_config_mac_tnl_int(hdev, state);
3041 		if (rclient && rclient->ops->link_status_change)
3042 			rclient->ops->link_status_change(rhandle, state);
3043 
3044 		hclge_push_link_status(hdev);
3045 	}
3046 
3047 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3048 }
3049 
3050 static void hclge_update_speed_advertising(struct hclge_mac *mac)
3051 {
3052 	u32 speed_ability;
3053 
3054 	if (hclge_get_speed_bit(mac->speed, &speed_ability))
3055 		return;
3056 
3057 	switch (mac->module_type) {
3058 	case HNAE3_MODULE_TYPE_FIBRE_LR:
3059 		hclge_convert_setting_lr(speed_ability, mac->advertising);
3060 		break;
3061 	case HNAE3_MODULE_TYPE_FIBRE_SR:
3062 	case HNAE3_MODULE_TYPE_AOC:
3063 		hclge_convert_setting_sr(speed_ability, mac->advertising);
3064 		break;
3065 	case HNAE3_MODULE_TYPE_CR:
3066 		hclge_convert_setting_cr(speed_ability, mac->advertising);
3067 		break;
3068 	case HNAE3_MODULE_TYPE_KR:
3069 		hclge_convert_setting_kr(speed_ability, mac->advertising);
3070 		break;
3071 	default:
3072 		break;
3073 	}
3074 }
3075 
3076 static void hclge_update_fec_advertising(struct hclge_mac *mac)
3077 {
3078 	if (mac->fec_mode & BIT(HNAE3_FEC_RS))
3079 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
3080 				 mac->advertising);
3081 	else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
3082 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
3083 				 mac->advertising);
3084 	else
3085 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
3086 				 mac->advertising);
3087 }
3088 
3089 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3090 {
3091 	struct hclge_mac *mac = &hdev->hw.mac;
3092 	bool rx_en, tx_en;
3093 
3094 	switch (hdev->fc_mode_last_time) {
3095 	case HCLGE_FC_RX_PAUSE:
3096 		rx_en = true;
3097 		tx_en = false;
3098 		break;
3099 	case HCLGE_FC_TX_PAUSE:
3100 		rx_en = false;
3101 		tx_en = true;
3102 		break;
3103 	case HCLGE_FC_FULL:
3104 		rx_en = true;
3105 		tx_en = true;
3106 		break;
3107 	default:
3108 		rx_en = false;
3109 		tx_en = false;
3110 		break;
3111 	}
3112 
3113 	linkmode_set_pause(mac->advertising, tx_en, rx_en);
3114 }
3115 
3116 static void hclge_update_advertising(struct hclge_dev *hdev)
3117 {
3118 	struct hclge_mac *mac = &hdev->hw.mac;
3119 
3120 	linkmode_zero(mac->advertising);
3121 	hclge_update_speed_advertising(mac);
3122 	hclge_update_fec_advertising(mac);
3123 	hclge_update_pause_advertising(hdev);
3124 }
3125 
3126 static void hclge_update_port_capability(struct hclge_dev *hdev,
3127 					 struct hclge_mac *mac)
3128 {
3129 	if (hnae3_dev_fec_supported(hdev))
3130 		/* update fec ability by speed */
3131 		hclge_convert_setting_fec(mac);
3132 
3133 	/* firmware can not identify back plane type, the media type
3134 	 * read from configuration can help deal it
3135 	 */
3136 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3137 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3138 		mac->module_type = HNAE3_MODULE_TYPE_KR;
3139 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3140 		mac->module_type = HNAE3_MODULE_TYPE_TP;
3141 
3142 	if (mac->support_autoneg) {
3143 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3144 		linkmode_copy(mac->advertising, mac->supported);
3145 	} else {
3146 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3147 				   mac->supported);
3148 		hclge_update_advertising(hdev);
3149 	}
3150 }
3151 
3152 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3153 {
3154 	struct hclge_sfp_info_cmd *resp;
3155 	struct hclge_desc desc;
3156 	int ret;
3157 
3158 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3159 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3160 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3161 	if (ret == -EOPNOTSUPP) {
3162 		dev_warn(&hdev->pdev->dev,
3163 			 "IMP do not support get SFP speed %d\n", ret);
3164 		return ret;
3165 	} else if (ret) {
3166 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3167 		return ret;
3168 	}
3169 
3170 	*speed = le32_to_cpu(resp->speed);
3171 
3172 	return 0;
3173 }
3174 
3175 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3176 {
3177 	struct hclge_sfp_info_cmd *resp;
3178 	struct hclge_desc desc;
3179 	int ret;
3180 
3181 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3182 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3183 
3184 	resp->query_type = QUERY_ACTIVE_SPEED;
3185 
3186 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3187 	if (ret == -EOPNOTSUPP) {
3188 		dev_warn(&hdev->pdev->dev,
3189 			 "IMP does not support get SFP info %d\n", ret);
3190 		return ret;
3191 	} else if (ret) {
3192 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3193 		return ret;
3194 	}
3195 
3196 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3197 	 * set to mac->speed.
3198 	 */
3199 	if (!le32_to_cpu(resp->speed))
3200 		return 0;
3201 
3202 	mac->speed = le32_to_cpu(resp->speed);
3203 	/* if resp->speed_ability is 0, it means it's an old version
3204 	 * firmware, do not update these params
3205 	 */
3206 	if (resp->speed_ability) {
3207 		mac->module_type = le32_to_cpu(resp->module_type);
3208 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3209 		mac->autoneg = resp->autoneg;
3210 		mac->support_autoneg = resp->autoneg_ability;
3211 		mac->speed_type = QUERY_ACTIVE_SPEED;
3212 		if (!resp->active_fec)
3213 			mac->fec_mode = 0;
3214 		else
3215 			mac->fec_mode = BIT(resp->active_fec);
3216 	} else {
3217 		mac->speed_type = QUERY_SFP_SPEED;
3218 	}
3219 
3220 	return 0;
3221 }
3222 
3223 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3224 					struct ethtool_link_ksettings *cmd)
3225 {
3226 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3227 	struct hclge_vport *vport = hclge_get_vport(handle);
3228 	struct hclge_phy_link_ksetting_0_cmd *req0;
3229 	struct hclge_phy_link_ksetting_1_cmd *req1;
3230 	u32 supported, advertising, lp_advertising;
3231 	struct hclge_dev *hdev = vport->back;
3232 	int ret;
3233 
3234 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3235 				   true);
3236 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3237 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3238 				   true);
3239 
3240 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3241 	if (ret) {
3242 		dev_err(&hdev->pdev->dev,
3243 			"failed to get phy link ksetting, ret = %d.\n", ret);
3244 		return ret;
3245 	}
3246 
3247 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3248 	cmd->base.autoneg = req0->autoneg;
3249 	cmd->base.speed = le32_to_cpu(req0->speed);
3250 	cmd->base.duplex = req0->duplex;
3251 	cmd->base.port = req0->port;
3252 	cmd->base.transceiver = req0->transceiver;
3253 	cmd->base.phy_address = req0->phy_address;
3254 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3255 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3256 	supported = le32_to_cpu(req0->supported);
3257 	advertising = le32_to_cpu(req0->advertising);
3258 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3259 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3260 						supported);
3261 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3262 						advertising);
3263 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3264 						lp_advertising);
3265 
3266 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3267 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3268 	cmd->base.master_slave_state = req1->master_slave_state;
3269 
3270 	return 0;
3271 }
3272 
3273 static int
3274 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3275 			     const struct ethtool_link_ksettings *cmd)
3276 {
3277 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3278 	struct hclge_vport *vport = hclge_get_vport(handle);
3279 	struct hclge_phy_link_ksetting_0_cmd *req0;
3280 	struct hclge_phy_link_ksetting_1_cmd *req1;
3281 	struct hclge_dev *hdev = vport->back;
3282 	u32 advertising;
3283 	int ret;
3284 
3285 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3286 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3287 	     (cmd->base.duplex != DUPLEX_HALF &&
3288 	      cmd->base.duplex != DUPLEX_FULL)))
3289 		return -EINVAL;
3290 
3291 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3292 				   false);
3293 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3294 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3295 				   false);
3296 
3297 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3298 	req0->autoneg = cmd->base.autoneg;
3299 	req0->speed = cpu_to_le32(cmd->base.speed);
3300 	req0->duplex = cmd->base.duplex;
3301 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3302 						cmd->link_modes.advertising);
3303 	req0->advertising = cpu_to_le32(advertising);
3304 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3305 
3306 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3307 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3308 
3309 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3310 	if (ret) {
3311 		dev_err(&hdev->pdev->dev,
3312 			"failed to set phy link ksettings, ret = %d.\n", ret);
3313 		return ret;
3314 	}
3315 
3316 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3317 	hdev->hw.mac.speed = cmd->base.speed;
3318 	hdev->hw.mac.duplex = cmd->base.duplex;
3319 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3320 
3321 	return 0;
3322 }
3323 
3324 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3325 {
3326 	struct ethtool_link_ksettings cmd;
3327 	int ret;
3328 
3329 	if (!hnae3_dev_phy_imp_supported(hdev))
3330 		return 0;
3331 
3332 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3333 	if (ret)
3334 		return ret;
3335 
3336 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3337 	hdev->hw.mac.speed = cmd.base.speed;
3338 	hdev->hw.mac.duplex = cmd.base.duplex;
3339 
3340 	return 0;
3341 }
3342 
3343 static int hclge_tp_port_init(struct hclge_dev *hdev)
3344 {
3345 	struct ethtool_link_ksettings cmd;
3346 
3347 	if (!hnae3_dev_phy_imp_supported(hdev))
3348 		return 0;
3349 
3350 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3351 	cmd.base.speed = hdev->hw.mac.speed;
3352 	cmd.base.duplex = hdev->hw.mac.duplex;
3353 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3354 
3355 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3356 }
3357 
3358 static int hclge_update_port_info(struct hclge_dev *hdev)
3359 {
3360 	struct hclge_mac *mac = &hdev->hw.mac;
3361 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3362 	int ret;
3363 
3364 	/* get the port info from SFP cmd if not copper port */
3365 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3366 		return hclge_update_tp_port_info(hdev);
3367 
3368 	/* if IMP does not support get SFP/qSFP info, return directly */
3369 	if (!hdev->support_sfp_query)
3370 		return 0;
3371 
3372 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3373 		ret = hclge_get_sfp_info(hdev, mac);
3374 	else
3375 		ret = hclge_get_sfp_speed(hdev, &speed);
3376 
3377 	if (ret == -EOPNOTSUPP) {
3378 		hdev->support_sfp_query = false;
3379 		return ret;
3380 	} else if (ret) {
3381 		return ret;
3382 	}
3383 
3384 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3385 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3386 			hclge_update_port_capability(hdev, mac);
3387 			return 0;
3388 		}
3389 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3390 					       HCLGE_MAC_FULL);
3391 	} else {
3392 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3393 			return 0; /* do nothing if no SFP */
3394 
3395 		/* must config full duplex for SFP */
3396 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3397 	}
3398 }
3399 
3400 static int hclge_get_status(struct hnae3_handle *handle)
3401 {
3402 	struct hclge_vport *vport = hclge_get_vport(handle);
3403 	struct hclge_dev *hdev = vport->back;
3404 
3405 	hclge_update_link_status(hdev);
3406 
3407 	return hdev->hw.mac.link;
3408 }
3409 
3410 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3411 {
3412 	if (!pci_num_vf(hdev->pdev)) {
3413 		dev_err(&hdev->pdev->dev,
3414 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3415 		return NULL;
3416 	}
3417 
3418 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3419 		dev_err(&hdev->pdev->dev,
3420 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3421 			vf, pci_num_vf(hdev->pdev));
3422 		return NULL;
3423 	}
3424 
3425 	/* VF start from 1 in vport */
3426 	vf += HCLGE_VF_VPORT_START_NUM;
3427 	return &hdev->vport[vf];
3428 }
3429 
3430 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3431 			       struct ifla_vf_info *ivf)
3432 {
3433 	struct hclge_vport *vport = hclge_get_vport(handle);
3434 	struct hclge_dev *hdev = vport->back;
3435 
3436 	vport = hclge_get_vf_vport(hdev, vf);
3437 	if (!vport)
3438 		return -EINVAL;
3439 
3440 	ivf->vf = vf;
3441 	ivf->linkstate = vport->vf_info.link_state;
3442 	ivf->spoofchk = vport->vf_info.spoofchk;
3443 	ivf->trusted = vport->vf_info.trusted;
3444 	ivf->min_tx_rate = 0;
3445 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3446 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3447 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3448 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3449 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3450 
3451 	return 0;
3452 }
3453 
3454 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3455 				   int link_state)
3456 {
3457 	struct hclge_vport *vport = hclge_get_vport(handle);
3458 	struct hclge_dev *hdev = vport->back;
3459 	int link_state_old;
3460 	int ret;
3461 
3462 	vport = hclge_get_vf_vport(hdev, vf);
3463 	if (!vport)
3464 		return -EINVAL;
3465 
3466 	link_state_old = vport->vf_info.link_state;
3467 	vport->vf_info.link_state = link_state;
3468 
3469 	ret = hclge_push_vf_link_status(vport);
3470 	if (ret) {
3471 		vport->vf_info.link_state = link_state_old;
3472 		dev_err(&hdev->pdev->dev,
3473 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3474 	}
3475 
3476 	return ret;
3477 }
3478 
3479 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3480 {
3481 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3482 
3483 	/* fetch the events from their corresponding regs */
3484 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3485 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3486 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3487 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3488 
3489 	/* Assumption: If by any chance reset and mailbox events are reported
3490 	 * together then we will only process reset event in this go and will
3491 	 * defer the processing of the mailbox events. Since, we would have not
3492 	 * cleared RX CMDQ event this time we would receive again another
3493 	 * interrupt from H/W just for the mailbox.
3494 	 *
3495 	 * check for vector0 reset event sources
3496 	 */
3497 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3498 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3499 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3500 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3501 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3502 		hdev->rst_stats.imp_rst_cnt++;
3503 		return HCLGE_VECTOR0_EVENT_RST;
3504 	}
3505 
3506 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3507 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3508 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3509 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3510 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3511 		hdev->rst_stats.global_rst_cnt++;
3512 		return HCLGE_VECTOR0_EVENT_RST;
3513 	}
3514 
3515 	/* check for vector0 msix event and hardware error event source */
3516 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3517 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3518 		return HCLGE_VECTOR0_EVENT_ERR;
3519 
3520 	/* check for vector0 ptp event source */
3521 	if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3522 		*clearval = msix_src_reg;
3523 		return HCLGE_VECTOR0_EVENT_PTP;
3524 	}
3525 
3526 	/* check for vector0 mailbox(=CMDQ RX) event source */
3527 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3528 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3529 		*clearval = cmdq_src_reg;
3530 		return HCLGE_VECTOR0_EVENT_MBX;
3531 	}
3532 
3533 	/* print other vector0 event source */
3534 	dev_info(&hdev->pdev->dev,
3535 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3536 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3537 
3538 	return HCLGE_VECTOR0_EVENT_OTHER;
3539 }
3540 
3541 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3542 				    u32 regclr)
3543 {
3544 	switch (event_type) {
3545 	case HCLGE_VECTOR0_EVENT_PTP:
3546 	case HCLGE_VECTOR0_EVENT_RST:
3547 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3548 		break;
3549 	case HCLGE_VECTOR0_EVENT_MBX:
3550 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3551 		break;
3552 	default:
3553 		break;
3554 	}
3555 }
3556 
3557 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3558 {
3559 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3560 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3561 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3562 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3563 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3564 }
3565 
3566 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3567 {
3568 	writel(enable ? 1 : 0, vector->addr);
3569 }
3570 
3571 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3572 {
3573 	struct hclge_dev *hdev = data;
3574 	unsigned long flags;
3575 	u32 clearval = 0;
3576 	u32 event_cause;
3577 
3578 	hclge_enable_vector(&hdev->misc_vector, false);
3579 	event_cause = hclge_check_event_cause(hdev, &clearval);
3580 
3581 	/* vector 0 interrupt is shared with reset and mailbox source events. */
3582 	switch (event_cause) {
3583 	case HCLGE_VECTOR0_EVENT_ERR:
3584 		hclge_errhand_task_schedule(hdev);
3585 		break;
3586 	case HCLGE_VECTOR0_EVENT_RST:
3587 		hclge_reset_task_schedule(hdev);
3588 		break;
3589 	case HCLGE_VECTOR0_EVENT_PTP:
3590 		spin_lock_irqsave(&hdev->ptp->lock, flags);
3591 		hclge_ptp_clean_tx_hwts(hdev);
3592 		spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3593 		break;
3594 	case HCLGE_VECTOR0_EVENT_MBX:
3595 		/* If we are here then,
3596 		 * 1. Either we are not handling any mbx task and we are not
3597 		 *    scheduled as well
3598 		 *                        OR
3599 		 * 2. We could be handling a mbx task but nothing more is
3600 		 *    scheduled.
3601 		 * In both cases, we should schedule mbx task as there are more
3602 		 * mbx messages reported by this interrupt.
3603 		 */
3604 		hclge_mbx_task_schedule(hdev);
3605 		break;
3606 	default:
3607 		dev_warn(&hdev->pdev->dev,
3608 			 "received unknown or unhandled event of vector0\n");
3609 		break;
3610 	}
3611 
3612 	hclge_clear_event_cause(hdev, event_cause, clearval);
3613 
3614 	/* Enable interrupt if it is not caused by reset event or error event */
3615 	if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3616 	    event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3617 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3618 		hclge_enable_vector(&hdev->misc_vector, true);
3619 
3620 	return IRQ_HANDLED;
3621 }
3622 
3623 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3624 {
3625 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3626 		dev_warn(&hdev->pdev->dev,
3627 			 "vector(vector_id %d) has been freed.\n", vector_id);
3628 		return;
3629 	}
3630 
3631 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3632 	hdev->num_msi_left += 1;
3633 	hdev->num_msi_used -= 1;
3634 }
3635 
3636 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3637 {
3638 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3639 
3640 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3641 
3642 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3643 	hdev->vector_status[0] = 0;
3644 
3645 	hdev->num_msi_left -= 1;
3646 	hdev->num_msi_used += 1;
3647 }
3648 
3649 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3650 {
3651 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3652 			      &hdev->affinity_mask);
3653 }
3654 
3655 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3656 {
3657 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3658 }
3659 
3660 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3661 {
3662 	int ret;
3663 
3664 	hclge_get_misc_vector(hdev);
3665 
3666 	/* this would be explicitly freed in the end */
3667 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3668 		 HCLGE_NAME, pci_name(hdev->pdev));
3669 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3670 			  0, hdev->misc_vector.name, hdev);
3671 	if (ret) {
3672 		hclge_free_vector(hdev, 0);
3673 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3674 			hdev->misc_vector.vector_irq);
3675 	}
3676 
3677 	return ret;
3678 }
3679 
3680 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3681 {
3682 	free_irq(hdev->misc_vector.vector_irq, hdev);
3683 	hclge_free_vector(hdev, 0);
3684 }
3685 
3686 int hclge_notify_client(struct hclge_dev *hdev,
3687 			enum hnae3_reset_notify_type type)
3688 {
3689 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3690 	struct hnae3_client *client = hdev->nic_client;
3691 	int ret;
3692 
3693 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3694 		return 0;
3695 
3696 	if (!client->ops->reset_notify)
3697 		return -EOPNOTSUPP;
3698 
3699 	ret = client->ops->reset_notify(handle, type);
3700 	if (ret)
3701 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3702 			type, ret);
3703 
3704 	return ret;
3705 }
3706 
3707 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3708 				    enum hnae3_reset_notify_type type)
3709 {
3710 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3711 	struct hnae3_client *client = hdev->roce_client;
3712 	int ret;
3713 
3714 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3715 		return 0;
3716 
3717 	if (!client->ops->reset_notify)
3718 		return -EOPNOTSUPP;
3719 
3720 	ret = client->ops->reset_notify(handle, type);
3721 	if (ret)
3722 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3723 			type, ret);
3724 
3725 	return ret;
3726 }
3727 
3728 static int hclge_reset_wait(struct hclge_dev *hdev)
3729 {
3730 #define HCLGE_RESET_WATI_MS	100
3731 #define HCLGE_RESET_WAIT_CNT	350
3732 
3733 	u32 val, reg, reg_bit;
3734 	u32 cnt = 0;
3735 
3736 	switch (hdev->reset_type) {
3737 	case HNAE3_IMP_RESET:
3738 		reg = HCLGE_GLOBAL_RESET_REG;
3739 		reg_bit = HCLGE_IMP_RESET_BIT;
3740 		break;
3741 	case HNAE3_GLOBAL_RESET:
3742 		reg = HCLGE_GLOBAL_RESET_REG;
3743 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3744 		break;
3745 	case HNAE3_FUNC_RESET:
3746 		reg = HCLGE_FUN_RST_ING;
3747 		reg_bit = HCLGE_FUN_RST_ING_B;
3748 		break;
3749 	default:
3750 		dev_err(&hdev->pdev->dev,
3751 			"Wait for unsupported reset type: %d\n",
3752 			hdev->reset_type);
3753 		return -EINVAL;
3754 	}
3755 
3756 	val = hclge_read_dev(&hdev->hw, reg);
3757 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3758 		msleep(HCLGE_RESET_WATI_MS);
3759 		val = hclge_read_dev(&hdev->hw, reg);
3760 		cnt++;
3761 	}
3762 
3763 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3764 		dev_warn(&hdev->pdev->dev,
3765 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3766 		return -EBUSY;
3767 	}
3768 
3769 	return 0;
3770 }
3771 
3772 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3773 {
3774 	struct hclge_vf_rst_cmd *req;
3775 	struct hclge_desc desc;
3776 
3777 	req = (struct hclge_vf_rst_cmd *)desc.data;
3778 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3779 	req->dest_vfid = func_id;
3780 
3781 	if (reset)
3782 		req->vf_rst = 0x1;
3783 
3784 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3785 }
3786 
3787 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3788 {
3789 	int i;
3790 
3791 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3792 		struct hclge_vport *vport = &hdev->vport[i];
3793 		int ret;
3794 
3795 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3796 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3797 		if (ret) {
3798 			dev_err(&hdev->pdev->dev,
3799 				"set vf(%u) rst failed %d!\n",
3800 				vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3801 				ret);
3802 			return ret;
3803 		}
3804 
3805 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3806 			continue;
3807 
3808 		/* Inform VF to process the reset.
3809 		 * hclge_inform_reset_assert_to_vf may fail if VF
3810 		 * driver is not loaded.
3811 		 */
3812 		ret = hclge_inform_reset_assert_to_vf(vport);
3813 		if (ret)
3814 			dev_warn(&hdev->pdev->dev,
3815 				 "inform reset to vf(%u) failed %d!\n",
3816 				 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3817 				 ret);
3818 	}
3819 
3820 	return 0;
3821 }
3822 
3823 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3824 {
3825 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3826 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3827 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3828 		return;
3829 
3830 	hclge_mbx_handler(hdev);
3831 
3832 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3833 }
3834 
3835 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3836 {
3837 	struct hclge_pf_rst_sync_cmd *req;
3838 	struct hclge_desc desc;
3839 	int cnt = 0;
3840 	int ret;
3841 
3842 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3843 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3844 
3845 	do {
3846 		/* vf need to down netdev by mbx during PF or FLR reset */
3847 		hclge_mailbox_service_task(hdev);
3848 
3849 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3850 		/* for compatible with old firmware, wait
3851 		 * 100 ms for VF to stop IO
3852 		 */
3853 		if (ret == -EOPNOTSUPP) {
3854 			msleep(HCLGE_RESET_SYNC_TIME);
3855 			return;
3856 		} else if (ret) {
3857 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3858 				 ret);
3859 			return;
3860 		} else if (req->all_vf_ready) {
3861 			return;
3862 		}
3863 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3864 		hclge_cmd_reuse_desc(&desc, true);
3865 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3866 
3867 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3868 }
3869 
3870 void hclge_report_hw_error(struct hclge_dev *hdev,
3871 			   enum hnae3_hw_error_type type)
3872 {
3873 	struct hnae3_client *client = hdev->nic_client;
3874 
3875 	if (!client || !client->ops->process_hw_error ||
3876 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3877 		return;
3878 
3879 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3880 }
3881 
3882 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3883 {
3884 	u32 reg_val;
3885 
3886 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3887 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3888 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3889 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3890 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3891 	}
3892 
3893 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3894 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3895 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3896 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3897 	}
3898 }
3899 
3900 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3901 {
3902 	struct hclge_desc desc;
3903 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3904 	int ret;
3905 
3906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3907 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3908 	req->fun_reset_vfid = func_id;
3909 
3910 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3911 	if (ret)
3912 		dev_err(&hdev->pdev->dev,
3913 			"send function reset cmd fail, status =%d\n", ret);
3914 
3915 	return ret;
3916 }
3917 
3918 static void hclge_do_reset(struct hclge_dev *hdev)
3919 {
3920 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3921 	struct pci_dev *pdev = hdev->pdev;
3922 	u32 val;
3923 
3924 	if (hclge_get_hw_reset_stat(handle)) {
3925 		dev_info(&pdev->dev, "hardware reset not finish\n");
3926 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3927 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3928 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3929 		return;
3930 	}
3931 
3932 	switch (hdev->reset_type) {
3933 	case HNAE3_IMP_RESET:
3934 		dev_info(&pdev->dev, "IMP reset requested\n");
3935 		val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3936 		hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3937 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3938 		break;
3939 	case HNAE3_GLOBAL_RESET:
3940 		dev_info(&pdev->dev, "global reset requested\n");
3941 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3942 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3943 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3944 		break;
3945 	case HNAE3_FUNC_RESET:
3946 		dev_info(&pdev->dev, "PF reset requested\n");
3947 		/* schedule again to check later */
3948 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3949 		hclge_reset_task_schedule(hdev);
3950 		break;
3951 	default:
3952 		dev_warn(&pdev->dev,
3953 			 "unsupported reset type: %d\n", hdev->reset_type);
3954 		break;
3955 	}
3956 }
3957 
3958 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3959 						   unsigned long *addr)
3960 {
3961 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3962 	struct hclge_dev *hdev = ae_dev->priv;
3963 
3964 	/* return the highest priority reset level amongst all */
3965 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3966 		rst_level = HNAE3_IMP_RESET;
3967 		clear_bit(HNAE3_IMP_RESET, addr);
3968 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3969 		clear_bit(HNAE3_FUNC_RESET, addr);
3970 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3971 		rst_level = HNAE3_GLOBAL_RESET;
3972 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3973 		clear_bit(HNAE3_FUNC_RESET, addr);
3974 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3975 		rst_level = HNAE3_FUNC_RESET;
3976 		clear_bit(HNAE3_FUNC_RESET, addr);
3977 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3978 		rst_level = HNAE3_FLR_RESET;
3979 		clear_bit(HNAE3_FLR_RESET, addr);
3980 	}
3981 
3982 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3983 	    rst_level < hdev->reset_type)
3984 		return HNAE3_NONE_RESET;
3985 
3986 	return rst_level;
3987 }
3988 
3989 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3990 {
3991 	u32 clearval = 0;
3992 
3993 	switch (hdev->reset_type) {
3994 	case HNAE3_IMP_RESET:
3995 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3996 		break;
3997 	case HNAE3_GLOBAL_RESET:
3998 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3999 		break;
4000 	default:
4001 		break;
4002 	}
4003 
4004 	if (!clearval)
4005 		return;
4006 
4007 	/* For revision 0x20, the reset interrupt source
4008 	 * can only be cleared after hardware reset done
4009 	 */
4010 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4011 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
4012 				clearval);
4013 
4014 	hclge_enable_vector(&hdev->misc_vector, true);
4015 }
4016 
4017 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
4018 {
4019 	u32 reg_val;
4020 
4021 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
4022 	if (enable)
4023 		reg_val |= HCLGE_NIC_SW_RST_RDY;
4024 	else
4025 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
4026 
4027 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
4028 }
4029 
4030 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
4031 {
4032 	int ret;
4033 
4034 	ret = hclge_set_all_vf_rst(hdev, true);
4035 	if (ret)
4036 		return ret;
4037 
4038 	hclge_func_reset_sync_vf(hdev);
4039 
4040 	return 0;
4041 }
4042 
4043 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
4044 {
4045 	u32 reg_val;
4046 	int ret = 0;
4047 
4048 	switch (hdev->reset_type) {
4049 	case HNAE3_FUNC_RESET:
4050 		ret = hclge_func_reset_notify_vf(hdev);
4051 		if (ret)
4052 			return ret;
4053 
4054 		ret = hclge_func_reset_cmd(hdev, 0);
4055 		if (ret) {
4056 			dev_err(&hdev->pdev->dev,
4057 				"asserting function reset fail %d!\n", ret);
4058 			return ret;
4059 		}
4060 
4061 		/* After performaning pf reset, it is not necessary to do the
4062 		 * mailbox handling or send any command to firmware, because
4063 		 * any mailbox handling or command to firmware is only valid
4064 		 * after hclge_cmd_init is called.
4065 		 */
4066 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
4067 		hdev->rst_stats.pf_rst_cnt++;
4068 		break;
4069 	case HNAE3_FLR_RESET:
4070 		ret = hclge_func_reset_notify_vf(hdev);
4071 		if (ret)
4072 			return ret;
4073 		break;
4074 	case HNAE3_IMP_RESET:
4075 		hclge_handle_imp_error(hdev);
4076 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4077 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4078 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4079 		break;
4080 	default:
4081 		break;
4082 	}
4083 
4084 	/* inform hardware that preparatory work is done */
4085 	msleep(HCLGE_RESET_SYNC_TIME);
4086 	hclge_reset_handshake(hdev, true);
4087 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4088 
4089 	return ret;
4090 }
4091 
4092 static void hclge_show_rst_info(struct hclge_dev *hdev)
4093 {
4094 	char *buf;
4095 
4096 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4097 	if (!buf)
4098 		return;
4099 
4100 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4101 
4102 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4103 
4104 	kfree(buf);
4105 }
4106 
4107 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4108 {
4109 #define MAX_RESET_FAIL_CNT 5
4110 
4111 	if (hdev->reset_pending) {
4112 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4113 			 hdev->reset_pending);
4114 		return true;
4115 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4116 		   HCLGE_RESET_INT_M) {
4117 		dev_info(&hdev->pdev->dev,
4118 			 "reset failed because new reset interrupt\n");
4119 		hclge_clear_reset_cause(hdev);
4120 		return false;
4121 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4122 		hdev->rst_stats.reset_fail_cnt++;
4123 		set_bit(hdev->reset_type, &hdev->reset_pending);
4124 		dev_info(&hdev->pdev->dev,
4125 			 "re-schedule reset task(%u)\n",
4126 			 hdev->rst_stats.reset_fail_cnt);
4127 		return true;
4128 	}
4129 
4130 	hclge_clear_reset_cause(hdev);
4131 
4132 	/* recover the handshake status when reset fail */
4133 	hclge_reset_handshake(hdev, true);
4134 
4135 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
4136 
4137 	hclge_show_rst_info(hdev);
4138 
4139 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4140 
4141 	return false;
4142 }
4143 
4144 static void hclge_update_reset_level(struct hclge_dev *hdev)
4145 {
4146 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4147 	enum hnae3_reset_type reset_level;
4148 
4149 	/* reset request will not be set during reset, so clear
4150 	 * pending reset request to avoid unnecessary reset
4151 	 * caused by the same reason.
4152 	 */
4153 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4154 
4155 	/* if default_reset_request has a higher level reset request,
4156 	 * it should be handled as soon as possible. since some errors
4157 	 * need this kind of reset to fix.
4158 	 */
4159 	reset_level = hclge_get_reset_level(ae_dev,
4160 					    &hdev->default_reset_request);
4161 	if (reset_level != HNAE3_NONE_RESET)
4162 		set_bit(reset_level, &hdev->reset_request);
4163 }
4164 
4165 static int hclge_set_rst_done(struct hclge_dev *hdev)
4166 {
4167 	struct hclge_pf_rst_done_cmd *req;
4168 	struct hclge_desc desc;
4169 	int ret;
4170 
4171 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4172 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4173 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4174 
4175 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4176 	/* To be compatible with the old firmware, which does not support
4177 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4178 	 * return success
4179 	 */
4180 	if (ret == -EOPNOTSUPP) {
4181 		dev_warn(&hdev->pdev->dev,
4182 			 "current firmware does not support command(0x%x)!\n",
4183 			 HCLGE_OPC_PF_RST_DONE);
4184 		return 0;
4185 	} else if (ret) {
4186 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4187 			ret);
4188 	}
4189 
4190 	return ret;
4191 }
4192 
4193 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4194 {
4195 	int ret = 0;
4196 
4197 	switch (hdev->reset_type) {
4198 	case HNAE3_FUNC_RESET:
4199 	case HNAE3_FLR_RESET:
4200 		ret = hclge_set_all_vf_rst(hdev, false);
4201 		break;
4202 	case HNAE3_GLOBAL_RESET:
4203 	case HNAE3_IMP_RESET:
4204 		ret = hclge_set_rst_done(hdev);
4205 		break;
4206 	default:
4207 		break;
4208 	}
4209 
4210 	/* clear up the handshake status after re-initialize done */
4211 	hclge_reset_handshake(hdev, false);
4212 
4213 	return ret;
4214 }
4215 
4216 static int hclge_reset_stack(struct hclge_dev *hdev)
4217 {
4218 	int ret;
4219 
4220 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4221 	if (ret)
4222 		return ret;
4223 
4224 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4225 	if (ret)
4226 		return ret;
4227 
4228 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4229 }
4230 
4231 static int hclge_reset_prepare(struct hclge_dev *hdev)
4232 {
4233 	int ret;
4234 
4235 	hdev->rst_stats.reset_cnt++;
4236 	/* perform reset of the stack & ae device for a client */
4237 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4238 	if (ret)
4239 		return ret;
4240 
4241 	rtnl_lock();
4242 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4243 	rtnl_unlock();
4244 	if (ret)
4245 		return ret;
4246 
4247 	return hclge_reset_prepare_wait(hdev);
4248 }
4249 
4250 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4251 {
4252 	int ret;
4253 
4254 	hdev->rst_stats.hw_reset_done_cnt++;
4255 
4256 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4257 	if (ret)
4258 		return ret;
4259 
4260 	rtnl_lock();
4261 	ret = hclge_reset_stack(hdev);
4262 	rtnl_unlock();
4263 	if (ret)
4264 		return ret;
4265 
4266 	hclge_clear_reset_cause(hdev);
4267 
4268 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4269 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4270 	 * times
4271 	 */
4272 	if (ret &&
4273 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4274 		return ret;
4275 
4276 	ret = hclge_reset_prepare_up(hdev);
4277 	if (ret)
4278 		return ret;
4279 
4280 	rtnl_lock();
4281 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4282 	rtnl_unlock();
4283 	if (ret)
4284 		return ret;
4285 
4286 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4287 	if (ret)
4288 		return ret;
4289 
4290 	hdev->last_reset_time = jiffies;
4291 	hdev->rst_stats.reset_fail_cnt = 0;
4292 	hdev->rst_stats.reset_done_cnt++;
4293 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4294 
4295 	hclge_update_reset_level(hdev);
4296 
4297 	return 0;
4298 }
4299 
4300 static void hclge_reset(struct hclge_dev *hdev)
4301 {
4302 	if (hclge_reset_prepare(hdev))
4303 		goto err_reset;
4304 
4305 	if (hclge_reset_wait(hdev))
4306 		goto err_reset;
4307 
4308 	if (hclge_reset_rebuild(hdev))
4309 		goto err_reset;
4310 
4311 	return;
4312 
4313 err_reset:
4314 	if (hclge_reset_err_handle(hdev))
4315 		hclge_reset_task_schedule(hdev);
4316 }
4317 
4318 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4319 {
4320 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4321 	struct hclge_dev *hdev = ae_dev->priv;
4322 
4323 	/* We might end up getting called broadly because of 2 below cases:
4324 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4325 	 *    normalcy is to reset.
4326 	 * 2. A new reset request from the stack due to timeout
4327 	 *
4328 	 * check if this is a new reset request and we are not here just because
4329 	 * last reset attempt did not succeed and watchdog hit us again. We will
4330 	 * know this if last reset request did not occur very recently (watchdog
4331 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4332 	 * In case of new request we reset the "reset level" to PF reset.
4333 	 * And if it is a repeat reset request of the most recent one then we
4334 	 * want to make sure we throttle the reset request. Therefore, we will
4335 	 * not allow it again before 3*HZ times.
4336 	 */
4337 
4338 	if (time_before(jiffies, (hdev->last_reset_time +
4339 				  HCLGE_RESET_INTERVAL))) {
4340 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4341 		return;
4342 	}
4343 
4344 	if (hdev->default_reset_request) {
4345 		hdev->reset_level =
4346 			hclge_get_reset_level(ae_dev,
4347 					      &hdev->default_reset_request);
4348 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4349 		hdev->reset_level = HNAE3_FUNC_RESET;
4350 	}
4351 
4352 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4353 		 hdev->reset_level);
4354 
4355 	/* request reset & schedule reset task */
4356 	set_bit(hdev->reset_level, &hdev->reset_request);
4357 	hclge_reset_task_schedule(hdev);
4358 
4359 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4360 		hdev->reset_level++;
4361 }
4362 
4363 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4364 					enum hnae3_reset_type rst_type)
4365 {
4366 	struct hclge_dev *hdev = ae_dev->priv;
4367 
4368 	set_bit(rst_type, &hdev->default_reset_request);
4369 }
4370 
4371 static void hclge_reset_timer(struct timer_list *t)
4372 {
4373 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4374 
4375 	/* if default_reset_request has no value, it means that this reset
4376 	 * request has already be handled, so just return here
4377 	 */
4378 	if (!hdev->default_reset_request)
4379 		return;
4380 
4381 	dev_info(&hdev->pdev->dev,
4382 		 "triggering reset in reset timer\n");
4383 	hclge_reset_event(hdev->pdev, NULL);
4384 }
4385 
4386 static void hclge_reset_subtask(struct hclge_dev *hdev)
4387 {
4388 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4389 
4390 	/* check if there is any ongoing reset in the hardware. This status can
4391 	 * be checked from reset_pending. If there is then, we need to wait for
4392 	 * hardware to complete reset.
4393 	 *    a. If we are able to figure out in reasonable time that hardware
4394 	 *       has fully resetted then, we can proceed with driver, client
4395 	 *       reset.
4396 	 *    b. else, we can come back later to check this status so re-sched
4397 	 *       now.
4398 	 */
4399 	hdev->last_reset_time = jiffies;
4400 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4401 	if (hdev->reset_type != HNAE3_NONE_RESET)
4402 		hclge_reset(hdev);
4403 
4404 	/* check if we got any *new* reset requests to be honored */
4405 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4406 	if (hdev->reset_type != HNAE3_NONE_RESET)
4407 		hclge_do_reset(hdev);
4408 
4409 	hdev->reset_type = HNAE3_NONE_RESET;
4410 }
4411 
4412 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4413 {
4414 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4415 	enum hnae3_reset_type reset_type;
4416 
4417 	if (ae_dev->hw_err_reset_req) {
4418 		reset_type = hclge_get_reset_level(ae_dev,
4419 						   &ae_dev->hw_err_reset_req);
4420 		hclge_set_def_reset_request(ae_dev, reset_type);
4421 	}
4422 
4423 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4424 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4425 
4426 	/* enable interrupt after error handling complete */
4427 	hclge_enable_vector(&hdev->misc_vector, true);
4428 }
4429 
4430 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4431 {
4432 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4433 
4434 	ae_dev->hw_err_reset_req = 0;
4435 
4436 	if (hclge_find_error_source(hdev)) {
4437 		hclge_handle_error_info_log(ae_dev);
4438 		hclge_handle_mac_tnl(hdev);
4439 	}
4440 
4441 	hclge_handle_err_reset_request(hdev);
4442 }
4443 
4444 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4445 {
4446 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4447 	struct device *dev = &hdev->pdev->dev;
4448 	u32 msix_sts_reg;
4449 
4450 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4451 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4452 		if (hclge_handle_hw_msix_error
4453 				(hdev, &hdev->default_reset_request))
4454 			dev_info(dev, "received msix interrupt 0x%x\n",
4455 				 msix_sts_reg);
4456 	}
4457 
4458 	hclge_handle_hw_ras_error(ae_dev);
4459 
4460 	hclge_handle_err_reset_request(hdev);
4461 }
4462 
4463 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4464 {
4465 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4466 		return;
4467 
4468 	if (hnae3_dev_ras_imp_supported(hdev))
4469 		hclge_handle_err_recovery(hdev);
4470 	else
4471 		hclge_misc_err_recovery(hdev);
4472 }
4473 
4474 static void hclge_reset_service_task(struct hclge_dev *hdev)
4475 {
4476 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4477 		return;
4478 
4479 	down(&hdev->reset_sem);
4480 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4481 
4482 	hclge_reset_subtask(hdev);
4483 
4484 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4485 	up(&hdev->reset_sem);
4486 }
4487 
4488 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4489 {
4490 	int i;
4491 
4492 	/* start from vport 1 for PF is always alive */
4493 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4494 		struct hclge_vport *vport = &hdev->vport[i];
4495 
4496 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4497 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4498 
4499 		/* If vf is not alive, set to default value */
4500 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4501 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4502 	}
4503 }
4504 
4505 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4506 {
4507 	unsigned long delta = round_jiffies_relative(HZ);
4508 
4509 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4510 		return;
4511 
4512 	/* Always handle the link updating to make sure link state is
4513 	 * updated when it is triggered by mbx.
4514 	 */
4515 	hclge_update_link_status(hdev);
4516 	hclge_sync_mac_table(hdev);
4517 	hclge_sync_promisc_mode(hdev);
4518 	hclge_sync_fd_table(hdev);
4519 
4520 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4521 		delta = jiffies - hdev->last_serv_processed;
4522 
4523 		if (delta < round_jiffies_relative(HZ)) {
4524 			delta = round_jiffies_relative(HZ) - delta;
4525 			goto out;
4526 		}
4527 	}
4528 
4529 	hdev->serv_processed_cnt++;
4530 	hclge_update_vport_alive(hdev);
4531 
4532 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4533 		hdev->last_serv_processed = jiffies;
4534 		goto out;
4535 	}
4536 
4537 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4538 		hclge_update_stats_for_all(hdev);
4539 
4540 	hclge_update_port_info(hdev);
4541 	hclge_sync_vlan_filter(hdev);
4542 
4543 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4544 		hclge_rfs_filter_expire(hdev);
4545 
4546 	hdev->last_serv_processed = jiffies;
4547 
4548 out:
4549 	hclge_task_schedule(hdev, delta);
4550 }
4551 
4552 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4553 {
4554 	unsigned long flags;
4555 
4556 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4557 	    !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4558 	    !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4559 		return;
4560 
4561 	/* to prevent concurrence with the irq handler */
4562 	spin_lock_irqsave(&hdev->ptp->lock, flags);
4563 
4564 	/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4565 	 * handler may handle it just before spin_lock_irqsave().
4566 	 */
4567 	if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4568 		hclge_ptp_clean_tx_hwts(hdev);
4569 
4570 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4571 }
4572 
4573 static void hclge_service_task(struct work_struct *work)
4574 {
4575 	struct hclge_dev *hdev =
4576 		container_of(work, struct hclge_dev, service_task.work);
4577 
4578 	hclge_errhand_service_task(hdev);
4579 	hclge_reset_service_task(hdev);
4580 	hclge_ptp_service_task(hdev);
4581 	hclge_mailbox_service_task(hdev);
4582 	hclge_periodic_service_task(hdev);
4583 
4584 	/* Handle error recovery, reset and mbx again in case periodical task
4585 	 * delays the handling by calling hclge_task_schedule() in
4586 	 * hclge_periodic_service_task().
4587 	 */
4588 	hclge_errhand_service_task(hdev);
4589 	hclge_reset_service_task(hdev);
4590 	hclge_mailbox_service_task(hdev);
4591 }
4592 
4593 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4594 {
4595 	/* VF handle has no client */
4596 	if (!handle->client)
4597 		return container_of(handle, struct hclge_vport, nic);
4598 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4599 		return container_of(handle, struct hclge_vport, roce);
4600 	else
4601 		return container_of(handle, struct hclge_vport, nic);
4602 }
4603 
4604 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4605 				  struct hnae3_vector_info *vector_info)
4606 {
4607 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4608 
4609 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4610 
4611 	/* need an extend offset to config vector >= 64 */
4612 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4613 		vector_info->io_addr = hdev->hw.io_base +
4614 				HCLGE_VECTOR_REG_BASE +
4615 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4616 	else
4617 		vector_info->io_addr = hdev->hw.io_base +
4618 				HCLGE_VECTOR_EXT_REG_BASE +
4619 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4620 				HCLGE_VECTOR_REG_OFFSET_H +
4621 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4622 				HCLGE_VECTOR_REG_OFFSET;
4623 
4624 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4625 	hdev->vector_irq[idx] = vector_info->vector;
4626 }
4627 
4628 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4629 			    struct hnae3_vector_info *vector_info)
4630 {
4631 	struct hclge_vport *vport = hclge_get_vport(handle);
4632 	struct hnae3_vector_info *vector = vector_info;
4633 	struct hclge_dev *hdev = vport->back;
4634 	int alloc = 0;
4635 	u16 i = 0;
4636 	u16 j;
4637 
4638 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4639 	vector_num = min(hdev->num_msi_left, vector_num);
4640 
4641 	for (j = 0; j < vector_num; j++) {
4642 		while (++i < hdev->num_nic_msi) {
4643 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4644 				hclge_get_vector_info(hdev, i, vector);
4645 				vector++;
4646 				alloc++;
4647 
4648 				break;
4649 			}
4650 		}
4651 	}
4652 	hdev->num_msi_left -= alloc;
4653 	hdev->num_msi_used += alloc;
4654 
4655 	return alloc;
4656 }
4657 
4658 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4659 {
4660 	int i;
4661 
4662 	for (i = 0; i < hdev->num_msi; i++)
4663 		if (vector == hdev->vector_irq[i])
4664 			return i;
4665 
4666 	return -EINVAL;
4667 }
4668 
4669 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4670 {
4671 	struct hclge_vport *vport = hclge_get_vport(handle);
4672 	struct hclge_dev *hdev = vport->back;
4673 	int vector_id;
4674 
4675 	vector_id = hclge_get_vector_index(hdev, vector);
4676 	if (vector_id < 0) {
4677 		dev_err(&hdev->pdev->dev,
4678 			"Get vector index fail. vector = %d\n", vector);
4679 		return vector_id;
4680 	}
4681 
4682 	hclge_free_vector(hdev, vector_id);
4683 
4684 	return 0;
4685 }
4686 
4687 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4688 {
4689 	return HCLGE_RSS_KEY_SIZE;
4690 }
4691 
4692 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4693 				  const u8 hfunc, const u8 *key)
4694 {
4695 	struct hclge_rss_config_cmd *req;
4696 	unsigned int key_offset = 0;
4697 	struct hclge_desc desc;
4698 	int key_counts;
4699 	int key_size;
4700 	int ret;
4701 
4702 	key_counts = HCLGE_RSS_KEY_SIZE;
4703 	req = (struct hclge_rss_config_cmd *)desc.data;
4704 
4705 	while (key_counts) {
4706 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4707 					   false);
4708 
4709 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4710 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4711 
4712 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4713 		memcpy(req->hash_key,
4714 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4715 
4716 		key_counts -= key_size;
4717 		key_offset++;
4718 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4719 		if (ret) {
4720 			dev_err(&hdev->pdev->dev,
4721 				"Configure RSS config fail, status = %d\n",
4722 				ret);
4723 			return ret;
4724 		}
4725 	}
4726 	return 0;
4727 }
4728 
4729 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4730 {
4731 	struct hclge_rss_indirection_table_cmd *req;
4732 	struct hclge_desc desc;
4733 	int rss_cfg_tbl_num;
4734 	u8 rss_msb_oft;
4735 	u8 rss_msb_val;
4736 	int ret;
4737 	u16 qid;
4738 	int i;
4739 	u32 j;
4740 
4741 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4742 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4743 			  HCLGE_RSS_CFG_TBL_SIZE;
4744 
4745 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4746 		hclge_cmd_setup_basic_desc
4747 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4748 
4749 		req->start_table_index =
4750 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4751 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4752 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4753 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4754 			req->rss_qid_l[j] = qid & 0xff;
4755 			rss_msb_oft =
4756 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4757 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4758 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4759 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4760 		}
4761 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4762 		if (ret) {
4763 			dev_err(&hdev->pdev->dev,
4764 				"Configure rss indir table fail,status = %d\n",
4765 				ret);
4766 			return ret;
4767 		}
4768 	}
4769 	return 0;
4770 }
4771 
4772 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4773 				 u16 *tc_size, u16 *tc_offset)
4774 {
4775 	struct hclge_rss_tc_mode_cmd *req;
4776 	struct hclge_desc desc;
4777 	int ret;
4778 	int i;
4779 
4780 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4781 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4782 
4783 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4784 		u16 mode = 0;
4785 
4786 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4787 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4788 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4789 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4790 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4791 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4792 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4793 
4794 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4795 	}
4796 
4797 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4798 	if (ret)
4799 		dev_err(&hdev->pdev->dev,
4800 			"Configure rss tc mode fail, status = %d\n", ret);
4801 
4802 	return ret;
4803 }
4804 
4805 static void hclge_get_rss_type(struct hclge_vport *vport)
4806 {
4807 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4808 	    vport->rss_tuple_sets.ipv4_udp_en ||
4809 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4810 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4811 	    vport->rss_tuple_sets.ipv6_udp_en ||
4812 	    vport->rss_tuple_sets.ipv6_sctp_en)
4813 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4814 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4815 		 vport->rss_tuple_sets.ipv6_fragment_en)
4816 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4817 	else
4818 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4819 }
4820 
4821 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4822 {
4823 	struct hclge_rss_input_tuple_cmd *req;
4824 	struct hclge_desc desc;
4825 	int ret;
4826 
4827 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4828 
4829 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4830 
4831 	/* Get the tuple cfg from pf */
4832 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4833 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4834 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4835 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4836 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4837 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4838 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4839 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4840 	hclge_get_rss_type(&hdev->vport[0]);
4841 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4842 	if (ret)
4843 		dev_err(&hdev->pdev->dev,
4844 			"Configure rss input fail, status = %d\n", ret);
4845 	return ret;
4846 }
4847 
4848 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4849 			 u8 *key, u8 *hfunc)
4850 {
4851 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4852 	struct hclge_vport *vport = hclge_get_vport(handle);
4853 	int i;
4854 
4855 	/* Get hash algorithm */
4856 	if (hfunc) {
4857 		switch (vport->rss_algo) {
4858 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4859 			*hfunc = ETH_RSS_HASH_TOP;
4860 			break;
4861 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4862 			*hfunc = ETH_RSS_HASH_XOR;
4863 			break;
4864 		default:
4865 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4866 			break;
4867 		}
4868 	}
4869 
4870 	/* Get the RSS Key required by the user */
4871 	if (key)
4872 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4873 
4874 	/* Get indirect table */
4875 	if (indir)
4876 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4877 			indir[i] =  vport->rss_indirection_tbl[i];
4878 
4879 	return 0;
4880 }
4881 
4882 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4883 				 u8 *hash_algo)
4884 {
4885 	switch (hfunc) {
4886 	case ETH_RSS_HASH_TOP:
4887 		*hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4888 		return 0;
4889 	case ETH_RSS_HASH_XOR:
4890 		*hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4891 		return 0;
4892 	case ETH_RSS_HASH_NO_CHANGE:
4893 		*hash_algo = vport->rss_algo;
4894 		return 0;
4895 	default:
4896 		return -EINVAL;
4897 	}
4898 }
4899 
4900 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4901 			 const  u8 *key, const  u8 hfunc)
4902 {
4903 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4904 	struct hclge_vport *vport = hclge_get_vport(handle);
4905 	struct hclge_dev *hdev = vport->back;
4906 	u8 hash_algo;
4907 	int ret, i;
4908 
4909 	ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4910 	if (ret) {
4911 		dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4912 		return ret;
4913 	}
4914 
4915 	/* Set the RSS Hash Key if specififed by the user */
4916 	if (key) {
4917 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4918 		if (ret)
4919 			return ret;
4920 
4921 		/* Update the shadow RSS key with user specified qids */
4922 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4923 	} else {
4924 		ret = hclge_set_rss_algo_key(hdev, hash_algo,
4925 					     vport->rss_hash_key);
4926 		if (ret)
4927 			return ret;
4928 	}
4929 	vport->rss_algo = hash_algo;
4930 
4931 	/* Update the shadow RSS table with user specified qids */
4932 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4933 		vport->rss_indirection_tbl[i] = indir[i];
4934 
4935 	/* Update the hardware */
4936 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4937 }
4938 
4939 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4940 {
4941 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4942 
4943 	if (nfc->data & RXH_L4_B_2_3)
4944 		hash_sets |= HCLGE_D_PORT_BIT;
4945 	else
4946 		hash_sets &= ~HCLGE_D_PORT_BIT;
4947 
4948 	if (nfc->data & RXH_IP_SRC)
4949 		hash_sets |= HCLGE_S_IP_BIT;
4950 	else
4951 		hash_sets &= ~HCLGE_S_IP_BIT;
4952 
4953 	if (nfc->data & RXH_IP_DST)
4954 		hash_sets |= HCLGE_D_IP_BIT;
4955 	else
4956 		hash_sets &= ~HCLGE_D_IP_BIT;
4957 
4958 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4959 		hash_sets |= HCLGE_V_TAG_BIT;
4960 
4961 	return hash_sets;
4962 }
4963 
4964 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4965 				    struct ethtool_rxnfc *nfc,
4966 				    struct hclge_rss_input_tuple_cmd *req)
4967 {
4968 	struct hclge_dev *hdev = vport->back;
4969 	u8 tuple_sets;
4970 
4971 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4972 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4973 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4974 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4975 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4976 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4977 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4978 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4979 
4980 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4981 	switch (nfc->flow_type) {
4982 	case TCP_V4_FLOW:
4983 		req->ipv4_tcp_en = tuple_sets;
4984 		break;
4985 	case TCP_V6_FLOW:
4986 		req->ipv6_tcp_en = tuple_sets;
4987 		break;
4988 	case UDP_V4_FLOW:
4989 		req->ipv4_udp_en = tuple_sets;
4990 		break;
4991 	case UDP_V6_FLOW:
4992 		req->ipv6_udp_en = tuple_sets;
4993 		break;
4994 	case SCTP_V4_FLOW:
4995 		req->ipv4_sctp_en = tuple_sets;
4996 		break;
4997 	case SCTP_V6_FLOW:
4998 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4999 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
5000 			return -EINVAL;
5001 
5002 		req->ipv6_sctp_en = tuple_sets;
5003 		break;
5004 	case IPV4_FLOW:
5005 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5006 		break;
5007 	case IPV6_FLOW:
5008 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5009 		break;
5010 	default:
5011 		return -EINVAL;
5012 	}
5013 
5014 	return 0;
5015 }
5016 
5017 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
5018 			       struct ethtool_rxnfc *nfc)
5019 {
5020 	struct hclge_vport *vport = hclge_get_vport(handle);
5021 	struct hclge_dev *hdev = vport->back;
5022 	struct hclge_rss_input_tuple_cmd *req;
5023 	struct hclge_desc desc;
5024 	int ret;
5025 
5026 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
5027 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
5028 		return -EINVAL;
5029 
5030 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
5031 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
5032 
5033 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
5034 	if (ret) {
5035 		dev_err(&hdev->pdev->dev,
5036 			"failed to init rss tuple cmd, ret = %d\n", ret);
5037 		return ret;
5038 	}
5039 
5040 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5041 	if (ret) {
5042 		dev_err(&hdev->pdev->dev,
5043 			"Set rss tuple fail, status = %d\n", ret);
5044 		return ret;
5045 	}
5046 
5047 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
5048 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
5049 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
5050 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
5051 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
5052 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
5053 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
5054 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
5055 	hclge_get_rss_type(vport);
5056 	return 0;
5057 }
5058 
5059 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
5060 				     u8 *tuple_sets)
5061 {
5062 	switch (flow_type) {
5063 	case TCP_V4_FLOW:
5064 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
5065 		break;
5066 	case UDP_V4_FLOW:
5067 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
5068 		break;
5069 	case TCP_V6_FLOW:
5070 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
5071 		break;
5072 	case UDP_V6_FLOW:
5073 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
5074 		break;
5075 	case SCTP_V4_FLOW:
5076 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
5077 		break;
5078 	case SCTP_V6_FLOW:
5079 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
5080 		break;
5081 	case IPV4_FLOW:
5082 	case IPV6_FLOW:
5083 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
5084 		break;
5085 	default:
5086 		return -EINVAL;
5087 	}
5088 
5089 	return 0;
5090 }
5091 
5092 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
5093 {
5094 	u64 tuple_data = 0;
5095 
5096 	if (tuple_sets & HCLGE_D_PORT_BIT)
5097 		tuple_data |= RXH_L4_B_2_3;
5098 	if (tuple_sets & HCLGE_S_PORT_BIT)
5099 		tuple_data |= RXH_L4_B_0_1;
5100 	if (tuple_sets & HCLGE_D_IP_BIT)
5101 		tuple_data |= RXH_IP_DST;
5102 	if (tuple_sets & HCLGE_S_IP_BIT)
5103 		tuple_data |= RXH_IP_SRC;
5104 
5105 	return tuple_data;
5106 }
5107 
5108 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
5109 			       struct ethtool_rxnfc *nfc)
5110 {
5111 	struct hclge_vport *vport = hclge_get_vport(handle);
5112 	u8 tuple_sets;
5113 	int ret;
5114 
5115 	nfc->data = 0;
5116 
5117 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
5118 	if (ret || !tuple_sets)
5119 		return ret;
5120 
5121 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
5122 
5123 	return 0;
5124 }
5125 
5126 static int hclge_get_tc_size(struct hnae3_handle *handle)
5127 {
5128 	struct hclge_vport *vport = hclge_get_vport(handle);
5129 	struct hclge_dev *hdev = vport->back;
5130 
5131 	return hdev->pf_rss_size_max;
5132 }
5133 
5134 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
5135 {
5136 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5137 	struct hclge_vport *vport = hdev->vport;
5138 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5139 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5140 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5141 	struct hnae3_tc_info *tc_info;
5142 	u16 roundup_size;
5143 	u16 rss_size;
5144 	int i;
5145 
5146 	tc_info = &vport->nic.kinfo.tc_info;
5147 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5148 		rss_size = tc_info->tqp_count[i];
5149 		tc_valid[i] = 0;
5150 
5151 		if (!(hdev->hw_tc_map & BIT(i)))
5152 			continue;
5153 
5154 		/* tc_size set to hardware is the log2 of roundup power of two
5155 		 * of rss_size, the acutal queue size is limited by indirection
5156 		 * table.
5157 		 */
5158 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5159 		    rss_size == 0) {
5160 			dev_err(&hdev->pdev->dev,
5161 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
5162 				rss_size);
5163 			return -EINVAL;
5164 		}
5165 
5166 		roundup_size = roundup_pow_of_two(rss_size);
5167 		roundup_size = ilog2(roundup_size);
5168 
5169 		tc_valid[i] = 1;
5170 		tc_size[i] = roundup_size;
5171 		tc_offset[i] = tc_info->tqp_offset[i];
5172 	}
5173 
5174 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5175 }
5176 
5177 int hclge_rss_init_hw(struct hclge_dev *hdev)
5178 {
5179 	struct hclge_vport *vport = hdev->vport;
5180 	u16 *rss_indir = vport[0].rss_indirection_tbl;
5181 	u8 *key = vport[0].rss_hash_key;
5182 	u8 hfunc = vport[0].rss_algo;
5183 	int ret;
5184 
5185 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
5186 	if (ret)
5187 		return ret;
5188 
5189 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5190 	if (ret)
5191 		return ret;
5192 
5193 	ret = hclge_set_rss_input_tuple(hdev);
5194 	if (ret)
5195 		return ret;
5196 
5197 	return hclge_init_rss_tc_mode(hdev);
5198 }
5199 
5200 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5201 {
5202 	struct hclge_vport *vport = &hdev->vport[0];
5203 	int i;
5204 
5205 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5206 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5207 }
5208 
5209 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5210 {
5211 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5212 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5213 	struct hclge_vport *vport = &hdev->vport[0];
5214 	u16 *rss_ind_tbl;
5215 
5216 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5217 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5218 
5219 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5220 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5221 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5222 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5223 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5224 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5225 	vport->rss_tuple_sets.ipv6_sctp_en =
5226 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5227 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5228 		HCLGE_RSS_INPUT_TUPLE_SCTP;
5229 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5230 
5231 	vport->rss_algo = rss_algo;
5232 
5233 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5234 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
5235 	if (!rss_ind_tbl)
5236 		return -ENOMEM;
5237 
5238 	vport->rss_indirection_tbl = rss_ind_tbl;
5239 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5240 
5241 	hclge_rss_indir_init_cfg(hdev);
5242 
5243 	return 0;
5244 }
5245 
5246 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5247 				int vector_id, bool en,
5248 				struct hnae3_ring_chain_node *ring_chain)
5249 {
5250 	struct hclge_dev *hdev = vport->back;
5251 	struct hnae3_ring_chain_node *node;
5252 	struct hclge_desc desc;
5253 	struct hclge_ctrl_vector_chain_cmd *req =
5254 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5255 	enum hclge_cmd_status status;
5256 	enum hclge_opcode_type op;
5257 	u16 tqp_type_and_id;
5258 	int i;
5259 
5260 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5261 	hclge_cmd_setup_basic_desc(&desc, op, false);
5262 	req->int_vector_id_l = hnae3_get_field(vector_id,
5263 					       HCLGE_VECTOR_ID_L_M,
5264 					       HCLGE_VECTOR_ID_L_S);
5265 	req->int_vector_id_h = hnae3_get_field(vector_id,
5266 					       HCLGE_VECTOR_ID_H_M,
5267 					       HCLGE_VECTOR_ID_H_S);
5268 
5269 	i = 0;
5270 	for (node = ring_chain; node; node = node->next) {
5271 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5272 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5273 				HCLGE_INT_TYPE_S,
5274 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5275 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5276 				HCLGE_TQP_ID_S, node->tqp_index);
5277 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5278 				HCLGE_INT_GL_IDX_S,
5279 				hnae3_get_field(node->int_gl_idx,
5280 						HNAE3_RING_GL_IDX_M,
5281 						HNAE3_RING_GL_IDX_S));
5282 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5283 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5284 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5285 			req->vfid = vport->vport_id;
5286 
5287 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5288 			if (status) {
5289 				dev_err(&hdev->pdev->dev,
5290 					"Map TQP fail, status is %d.\n",
5291 					status);
5292 				return -EIO;
5293 			}
5294 			i = 0;
5295 
5296 			hclge_cmd_setup_basic_desc(&desc,
5297 						   op,
5298 						   false);
5299 			req->int_vector_id_l =
5300 				hnae3_get_field(vector_id,
5301 						HCLGE_VECTOR_ID_L_M,
5302 						HCLGE_VECTOR_ID_L_S);
5303 			req->int_vector_id_h =
5304 				hnae3_get_field(vector_id,
5305 						HCLGE_VECTOR_ID_H_M,
5306 						HCLGE_VECTOR_ID_H_S);
5307 		}
5308 	}
5309 
5310 	if (i > 0) {
5311 		req->int_cause_num = i;
5312 		req->vfid = vport->vport_id;
5313 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5314 		if (status) {
5315 			dev_err(&hdev->pdev->dev,
5316 				"Map TQP fail, status is %d.\n", status);
5317 			return -EIO;
5318 		}
5319 	}
5320 
5321 	return 0;
5322 }
5323 
5324 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5325 				    struct hnae3_ring_chain_node *ring_chain)
5326 {
5327 	struct hclge_vport *vport = hclge_get_vport(handle);
5328 	struct hclge_dev *hdev = vport->back;
5329 	int vector_id;
5330 
5331 	vector_id = hclge_get_vector_index(hdev, vector);
5332 	if (vector_id < 0) {
5333 		dev_err(&hdev->pdev->dev,
5334 			"failed to get vector index. vector=%d\n", vector);
5335 		return vector_id;
5336 	}
5337 
5338 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5339 }
5340 
5341 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5342 				       struct hnae3_ring_chain_node *ring_chain)
5343 {
5344 	struct hclge_vport *vport = hclge_get_vport(handle);
5345 	struct hclge_dev *hdev = vport->back;
5346 	int vector_id, ret;
5347 
5348 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5349 		return 0;
5350 
5351 	vector_id = hclge_get_vector_index(hdev, vector);
5352 	if (vector_id < 0) {
5353 		dev_err(&handle->pdev->dev,
5354 			"Get vector index fail. ret =%d\n", vector_id);
5355 		return vector_id;
5356 	}
5357 
5358 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5359 	if (ret)
5360 		dev_err(&handle->pdev->dev,
5361 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5362 			vector_id, ret);
5363 
5364 	return ret;
5365 }
5366 
5367 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5368 				      bool en_uc, bool en_mc, bool en_bc)
5369 {
5370 	struct hclge_vport *vport = &hdev->vport[vf_id];
5371 	struct hnae3_handle *handle = &vport->nic;
5372 	struct hclge_promisc_cfg_cmd *req;
5373 	struct hclge_desc desc;
5374 	bool uc_tx_en = en_uc;
5375 	u8 promisc_cfg = 0;
5376 	int ret;
5377 
5378 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5379 
5380 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5381 	req->vf_id = vf_id;
5382 
5383 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5384 		uc_tx_en = false;
5385 
5386 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5387 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5388 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5389 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5390 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5391 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5392 	req->extend_promisc = promisc_cfg;
5393 
5394 	/* to be compatible with DEVICE_VERSION_V1/2 */
5395 	promisc_cfg = 0;
5396 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5397 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5398 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5399 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5400 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5401 	req->promisc = promisc_cfg;
5402 
5403 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5404 	if (ret)
5405 		dev_err(&hdev->pdev->dev,
5406 			"failed to set vport %u promisc mode, ret = %d.\n",
5407 			vf_id, ret);
5408 
5409 	return ret;
5410 }
5411 
5412 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5413 				 bool en_mc_pmc, bool en_bc_pmc)
5414 {
5415 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5416 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5417 }
5418 
5419 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5420 				  bool en_mc_pmc)
5421 {
5422 	struct hclge_vport *vport = hclge_get_vport(handle);
5423 	struct hclge_dev *hdev = vport->back;
5424 	bool en_bc_pmc = true;
5425 
5426 	/* For device whose version below V2, if broadcast promisc enabled,
5427 	 * vlan filter is always bypassed. So broadcast promisc should be
5428 	 * disabled until user enable promisc mode
5429 	 */
5430 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5431 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5432 
5433 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5434 					    en_bc_pmc);
5435 }
5436 
5437 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5438 {
5439 	struct hclge_vport *vport = hclge_get_vport(handle);
5440 
5441 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5442 }
5443 
5444 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5445 {
5446 	if (hlist_empty(&hdev->fd_rule_list))
5447 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5448 }
5449 
5450 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5451 {
5452 	if (!test_bit(location, hdev->fd_bmap)) {
5453 		set_bit(location, hdev->fd_bmap);
5454 		hdev->hclge_fd_rule_num++;
5455 	}
5456 }
5457 
5458 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5459 {
5460 	if (test_bit(location, hdev->fd_bmap)) {
5461 		clear_bit(location, hdev->fd_bmap);
5462 		hdev->hclge_fd_rule_num--;
5463 	}
5464 }
5465 
5466 static void hclge_fd_free_node(struct hclge_dev *hdev,
5467 			       struct hclge_fd_rule *rule)
5468 {
5469 	hlist_del(&rule->rule_node);
5470 	kfree(rule);
5471 	hclge_sync_fd_state(hdev);
5472 }
5473 
5474 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5475 				      struct hclge_fd_rule *old_rule,
5476 				      struct hclge_fd_rule *new_rule,
5477 				      enum HCLGE_FD_NODE_STATE state)
5478 {
5479 	switch (state) {
5480 	case HCLGE_FD_TO_ADD:
5481 	case HCLGE_FD_ACTIVE:
5482 		/* 1) if the new state is TO_ADD, just replace the old rule
5483 		 * with the same location, no matter its state, because the
5484 		 * new rule will be configured to the hardware.
5485 		 * 2) if the new state is ACTIVE, it means the new rule
5486 		 * has been configured to the hardware, so just replace
5487 		 * the old rule node with the same location.
5488 		 * 3) for it doesn't add a new node to the list, so it's
5489 		 * unnecessary to update the rule number and fd_bmap.
5490 		 */
5491 		new_rule->rule_node.next = old_rule->rule_node.next;
5492 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5493 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5494 		kfree(new_rule);
5495 		break;
5496 	case HCLGE_FD_DELETED:
5497 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5498 		hclge_fd_free_node(hdev, old_rule);
5499 		break;
5500 	case HCLGE_FD_TO_DEL:
5501 		/* if new request is TO_DEL, and old rule is existent
5502 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5503 		 * because we delete rule by location, other rule content
5504 		 * is unncessary.
5505 		 * 2) the state of old rule is ACTIVE, we need to change its
5506 		 * state to TO_DEL, so the rule will be deleted when periodic
5507 		 * task being scheduled.
5508 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5509 		 * been added to hardware, so we just delete the rule node from
5510 		 * fd_rule_list directly.
5511 		 */
5512 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5513 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5514 			hclge_fd_free_node(hdev, old_rule);
5515 			return;
5516 		}
5517 		old_rule->state = HCLGE_FD_TO_DEL;
5518 		break;
5519 	}
5520 }
5521 
5522 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5523 						u16 location,
5524 						struct hclge_fd_rule **parent)
5525 {
5526 	struct hclge_fd_rule *rule;
5527 	struct hlist_node *node;
5528 
5529 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5530 		if (rule->location == location)
5531 			return rule;
5532 		else if (rule->location > location)
5533 			return NULL;
5534 		/* record the parent node, use to keep the nodes in fd_rule_list
5535 		 * in ascend order.
5536 		 */
5537 		*parent = rule;
5538 	}
5539 
5540 	return NULL;
5541 }
5542 
5543 /* insert fd rule node in ascend order according to rule->location */
5544 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5545 				      struct hclge_fd_rule *rule,
5546 				      struct hclge_fd_rule *parent)
5547 {
5548 	INIT_HLIST_NODE(&rule->rule_node);
5549 
5550 	if (parent)
5551 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5552 	else
5553 		hlist_add_head(&rule->rule_node, hlist);
5554 }
5555 
5556 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5557 				     struct hclge_fd_user_def_cfg *cfg)
5558 {
5559 	struct hclge_fd_user_def_cfg_cmd *req;
5560 	struct hclge_desc desc;
5561 	u16 data = 0;
5562 	int ret;
5563 
5564 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5565 
5566 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5567 
5568 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5569 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5570 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5571 	req->ol2_cfg = cpu_to_le16(data);
5572 
5573 	data = 0;
5574 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5575 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5576 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5577 	req->ol3_cfg = cpu_to_le16(data);
5578 
5579 	data = 0;
5580 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5581 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5582 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5583 	req->ol4_cfg = cpu_to_le16(data);
5584 
5585 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5586 	if (ret)
5587 		dev_err(&hdev->pdev->dev,
5588 			"failed to set fd user def data, ret= %d\n", ret);
5589 	return ret;
5590 }
5591 
5592 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5593 {
5594 	int ret;
5595 
5596 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5597 		return;
5598 
5599 	if (!locked)
5600 		spin_lock_bh(&hdev->fd_rule_lock);
5601 
5602 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5603 	if (ret)
5604 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5605 
5606 	if (!locked)
5607 		spin_unlock_bh(&hdev->fd_rule_lock);
5608 }
5609 
5610 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5611 					  struct hclge_fd_rule *rule)
5612 {
5613 	struct hlist_head *hlist = &hdev->fd_rule_list;
5614 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5615 	struct hclge_fd_user_def_info *info, *old_info;
5616 	struct hclge_fd_user_def_cfg *cfg;
5617 
5618 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5619 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5620 		return 0;
5621 
5622 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5623 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5624 	info = &rule->ep.user_def;
5625 
5626 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5627 		return 0;
5628 
5629 	if (cfg->ref_cnt > 1)
5630 		goto error;
5631 
5632 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5633 	if (fd_rule) {
5634 		old_info = &fd_rule->ep.user_def;
5635 		if (info->layer == old_info->layer)
5636 			return 0;
5637 	}
5638 
5639 error:
5640 	dev_err(&hdev->pdev->dev,
5641 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5642 		info->layer + 1);
5643 	return -ENOSPC;
5644 }
5645 
5646 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5647 					 struct hclge_fd_rule *rule)
5648 {
5649 	struct hclge_fd_user_def_cfg *cfg;
5650 
5651 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5652 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5653 		return;
5654 
5655 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5656 	if (!cfg->ref_cnt) {
5657 		cfg->offset = rule->ep.user_def.offset;
5658 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5659 	}
5660 	cfg->ref_cnt++;
5661 }
5662 
5663 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5664 					 struct hclge_fd_rule *rule)
5665 {
5666 	struct hclge_fd_user_def_cfg *cfg;
5667 
5668 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5669 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5670 		return;
5671 
5672 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5673 	if (!cfg->ref_cnt)
5674 		return;
5675 
5676 	cfg->ref_cnt--;
5677 	if (!cfg->ref_cnt) {
5678 		cfg->offset = 0;
5679 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5680 	}
5681 }
5682 
5683 static void hclge_update_fd_list(struct hclge_dev *hdev,
5684 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5685 				 struct hclge_fd_rule *new_rule)
5686 {
5687 	struct hlist_head *hlist = &hdev->fd_rule_list;
5688 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5689 
5690 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5691 	if (fd_rule) {
5692 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5693 		if (state == HCLGE_FD_ACTIVE)
5694 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5695 		hclge_sync_fd_user_def_cfg(hdev, true);
5696 
5697 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5698 		return;
5699 	}
5700 
5701 	/* it's unlikely to fail here, because we have checked the rule
5702 	 * exist before.
5703 	 */
5704 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5705 		dev_warn(&hdev->pdev->dev,
5706 			 "failed to delete fd rule %u, it's inexistent\n",
5707 			 location);
5708 		return;
5709 	}
5710 
5711 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5712 	hclge_sync_fd_user_def_cfg(hdev, true);
5713 
5714 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5715 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5716 
5717 	if (state == HCLGE_FD_TO_ADD) {
5718 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5719 		hclge_task_schedule(hdev, 0);
5720 	}
5721 }
5722 
5723 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5724 {
5725 	struct hclge_get_fd_mode_cmd *req;
5726 	struct hclge_desc desc;
5727 	int ret;
5728 
5729 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5730 
5731 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5732 
5733 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5734 	if (ret) {
5735 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5736 		return ret;
5737 	}
5738 
5739 	*fd_mode = req->mode;
5740 
5741 	return ret;
5742 }
5743 
5744 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5745 				   u32 *stage1_entry_num,
5746 				   u32 *stage2_entry_num,
5747 				   u16 *stage1_counter_num,
5748 				   u16 *stage2_counter_num)
5749 {
5750 	struct hclge_get_fd_allocation_cmd *req;
5751 	struct hclge_desc desc;
5752 	int ret;
5753 
5754 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5755 
5756 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5757 
5758 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5759 	if (ret) {
5760 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5761 			ret);
5762 		return ret;
5763 	}
5764 
5765 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5766 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5767 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5768 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5769 
5770 	return ret;
5771 }
5772 
5773 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5774 				   enum HCLGE_FD_STAGE stage_num)
5775 {
5776 	struct hclge_set_fd_key_config_cmd *req;
5777 	struct hclge_fd_key_cfg *stage;
5778 	struct hclge_desc desc;
5779 	int ret;
5780 
5781 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5782 
5783 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5784 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5785 	req->stage = stage_num;
5786 	req->key_select = stage->key_sel;
5787 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5788 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5789 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5790 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5791 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5792 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5793 
5794 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5795 	if (ret)
5796 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5797 
5798 	return ret;
5799 }
5800 
5801 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5802 {
5803 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5804 
5805 	spin_lock_bh(&hdev->fd_rule_lock);
5806 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5807 	spin_unlock_bh(&hdev->fd_rule_lock);
5808 
5809 	hclge_fd_set_user_def_cmd(hdev, cfg);
5810 }
5811 
5812 static int hclge_init_fd_config(struct hclge_dev *hdev)
5813 {
5814 #define LOW_2_WORDS		0x03
5815 	struct hclge_fd_key_cfg *key_cfg;
5816 	int ret;
5817 
5818 	if (!hnae3_dev_fd_supported(hdev))
5819 		return 0;
5820 
5821 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5822 	if (ret)
5823 		return ret;
5824 
5825 	switch (hdev->fd_cfg.fd_mode) {
5826 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5827 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5828 		break;
5829 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5830 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5831 		break;
5832 	default:
5833 		dev_err(&hdev->pdev->dev,
5834 			"Unsupported flow director mode %u\n",
5835 			hdev->fd_cfg.fd_mode);
5836 		return -EOPNOTSUPP;
5837 	}
5838 
5839 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5840 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5841 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5842 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5843 	key_cfg->outer_sipv6_word_en = 0;
5844 	key_cfg->outer_dipv6_word_en = 0;
5845 
5846 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5847 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5848 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5849 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5850 
5851 	/* If use max 400bit key, we can support tuples for ether type */
5852 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5853 		key_cfg->tuple_active |=
5854 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5855 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5856 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5857 	}
5858 
5859 	/* roce_type is used to filter roce frames
5860 	 * dst_vport is used to specify the rule
5861 	 */
5862 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5863 
5864 	ret = hclge_get_fd_allocation(hdev,
5865 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5866 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5867 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5868 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5869 	if (ret)
5870 		return ret;
5871 
5872 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5873 }
5874 
5875 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5876 				int loc, u8 *key, bool is_add)
5877 {
5878 	struct hclge_fd_tcam_config_1_cmd *req1;
5879 	struct hclge_fd_tcam_config_2_cmd *req2;
5880 	struct hclge_fd_tcam_config_3_cmd *req3;
5881 	struct hclge_desc desc[3];
5882 	int ret;
5883 
5884 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5885 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5886 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5887 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5888 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5889 
5890 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5891 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5892 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5893 
5894 	req1->stage = stage;
5895 	req1->xy_sel = sel_x ? 1 : 0;
5896 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5897 	req1->index = cpu_to_le32(loc);
5898 	req1->entry_vld = sel_x ? is_add : 0;
5899 
5900 	if (key) {
5901 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5902 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5903 		       sizeof(req2->tcam_data));
5904 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5905 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5906 	}
5907 
5908 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5909 	if (ret)
5910 		dev_err(&hdev->pdev->dev,
5911 			"config tcam key fail, ret=%d\n",
5912 			ret);
5913 
5914 	return ret;
5915 }
5916 
5917 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5918 			      struct hclge_fd_ad_data *action)
5919 {
5920 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5921 	struct hclge_fd_ad_config_cmd *req;
5922 	struct hclge_desc desc;
5923 	u64 ad_data = 0;
5924 	int ret;
5925 
5926 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5927 
5928 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5929 	req->index = cpu_to_le32(loc);
5930 	req->stage = stage;
5931 
5932 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5933 		      action->write_rule_id_to_bd);
5934 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5935 			action->rule_id);
5936 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5937 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5938 			      action->override_tc);
5939 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5940 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5941 	}
5942 	ad_data <<= 32;
5943 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5944 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5945 		      action->forward_to_direct_queue);
5946 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5947 			action->queue_id);
5948 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5949 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5950 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5951 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5952 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5953 			action->counter_id);
5954 
5955 	req->ad_data = cpu_to_le64(ad_data);
5956 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5957 	if (ret)
5958 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5959 
5960 	return ret;
5961 }
5962 
5963 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5964 				   struct hclge_fd_rule *rule)
5965 {
5966 	int offset, moffset, ip_offset;
5967 	enum HCLGE_FD_KEY_OPT key_opt;
5968 	u16 tmp_x_s, tmp_y_s;
5969 	u32 tmp_x_l, tmp_y_l;
5970 	u8 *p = (u8 *)rule;
5971 	int i;
5972 
5973 	if (rule->unused_tuple & BIT(tuple_bit))
5974 		return true;
5975 
5976 	key_opt = tuple_key_info[tuple_bit].key_opt;
5977 	offset = tuple_key_info[tuple_bit].offset;
5978 	moffset = tuple_key_info[tuple_bit].moffset;
5979 
5980 	switch (key_opt) {
5981 	case KEY_OPT_U8:
5982 		calc_x(*key_x, p[offset], p[moffset]);
5983 		calc_y(*key_y, p[offset], p[moffset]);
5984 
5985 		return true;
5986 	case KEY_OPT_LE16:
5987 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5988 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5989 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5990 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5991 
5992 		return true;
5993 	case KEY_OPT_LE32:
5994 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5995 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5996 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5997 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5998 
5999 		return true;
6000 	case KEY_OPT_MAC:
6001 		for (i = 0; i < ETH_ALEN; i++) {
6002 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
6003 			       p[moffset + i]);
6004 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
6005 			       p[moffset + i]);
6006 		}
6007 
6008 		return true;
6009 	case KEY_OPT_IP:
6010 		ip_offset = IPV4_INDEX * sizeof(u32);
6011 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
6012 		       *(u32 *)(&p[moffset + ip_offset]));
6013 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
6014 		       *(u32 *)(&p[moffset + ip_offset]));
6015 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
6016 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
6017 
6018 		return true;
6019 	default:
6020 		return false;
6021 	}
6022 }
6023 
6024 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
6025 				 u8 vf_id, u8 network_port_id)
6026 {
6027 	u32 port_number = 0;
6028 
6029 	if (port_type == HOST_PORT) {
6030 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
6031 				pf_id);
6032 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
6033 				vf_id);
6034 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
6035 	} else {
6036 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
6037 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
6038 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
6039 	}
6040 
6041 	return port_number;
6042 }
6043 
6044 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
6045 				       __le32 *key_x, __le32 *key_y,
6046 				       struct hclge_fd_rule *rule)
6047 {
6048 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
6049 	u8 cur_pos = 0, tuple_size, shift_bits;
6050 	unsigned int i;
6051 
6052 	for (i = 0; i < MAX_META_DATA; i++) {
6053 		tuple_size = meta_data_key_info[i].key_length;
6054 		tuple_bit = key_cfg->meta_data_active & BIT(i);
6055 
6056 		switch (tuple_bit) {
6057 		case BIT(ROCE_TYPE):
6058 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
6059 			cur_pos += tuple_size;
6060 			break;
6061 		case BIT(DST_VPORT):
6062 			port_number = hclge_get_port_number(HOST_PORT, 0,
6063 							    rule->vf_id, 0);
6064 			hnae3_set_field(meta_data,
6065 					GENMASK(cur_pos + tuple_size, cur_pos),
6066 					cur_pos, port_number);
6067 			cur_pos += tuple_size;
6068 			break;
6069 		default:
6070 			break;
6071 		}
6072 	}
6073 
6074 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
6075 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
6076 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
6077 
6078 	*key_x = cpu_to_le32(tmp_x << shift_bits);
6079 	*key_y = cpu_to_le32(tmp_y << shift_bits);
6080 }
6081 
6082 /* A complete key is combined with meta data key and tuple key.
6083  * Meta data key is stored at the MSB region, and tuple key is stored at
6084  * the LSB region, unused bits will be filled 0.
6085  */
6086 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
6087 			    struct hclge_fd_rule *rule)
6088 {
6089 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
6090 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
6091 	u8 *cur_key_x, *cur_key_y;
6092 	u8 meta_data_region;
6093 	u8 tuple_size;
6094 	int ret;
6095 	u32 i;
6096 
6097 	memset(key_x, 0, sizeof(key_x));
6098 	memset(key_y, 0, sizeof(key_y));
6099 	cur_key_x = key_x;
6100 	cur_key_y = key_y;
6101 
6102 	for (i = 0; i < MAX_TUPLE; i++) {
6103 		bool tuple_valid;
6104 
6105 		tuple_size = tuple_key_info[i].key_length / 8;
6106 		if (!(key_cfg->tuple_active & BIT(i)))
6107 			continue;
6108 
6109 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
6110 						     cur_key_y, rule);
6111 		if (tuple_valid) {
6112 			cur_key_x += tuple_size;
6113 			cur_key_y += tuple_size;
6114 		}
6115 	}
6116 
6117 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
6118 			MAX_META_DATA_LENGTH / 8;
6119 
6120 	hclge_fd_convert_meta_data(key_cfg,
6121 				   (__le32 *)(key_x + meta_data_region),
6122 				   (__le32 *)(key_y + meta_data_region),
6123 				   rule);
6124 
6125 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
6126 				   true);
6127 	if (ret) {
6128 		dev_err(&hdev->pdev->dev,
6129 			"fd key_y config fail, loc=%u, ret=%d\n",
6130 			rule->queue_id, ret);
6131 		return ret;
6132 	}
6133 
6134 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
6135 				   true);
6136 	if (ret)
6137 		dev_err(&hdev->pdev->dev,
6138 			"fd key_x config fail, loc=%u, ret=%d\n",
6139 			rule->queue_id, ret);
6140 	return ret;
6141 }
6142 
6143 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6144 			       struct hclge_fd_rule *rule)
6145 {
6146 	struct hclge_vport *vport = hdev->vport;
6147 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6148 	struct hclge_fd_ad_data ad_data;
6149 
6150 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6151 	ad_data.ad_id = rule->location;
6152 
6153 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6154 		ad_data.drop_packet = true;
6155 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6156 		ad_data.override_tc = true;
6157 		ad_data.queue_id =
6158 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6159 		ad_data.tc_size =
6160 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6161 	} else {
6162 		ad_data.forward_to_direct_queue = true;
6163 		ad_data.queue_id = rule->queue_id;
6164 	}
6165 
6166 	if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6167 		ad_data.use_counter = true;
6168 		ad_data.counter_id = rule->vf_id %
6169 				     hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6170 	} else {
6171 		ad_data.use_counter = false;
6172 		ad_data.counter_id = 0;
6173 	}
6174 
6175 	ad_data.use_next_stage = false;
6176 	ad_data.next_input_key = 0;
6177 
6178 	ad_data.write_rule_id_to_bd = true;
6179 	ad_data.rule_id = rule->location;
6180 
6181 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6182 }
6183 
6184 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6185 				       u32 *unused_tuple)
6186 {
6187 	if (!spec || !unused_tuple)
6188 		return -EINVAL;
6189 
6190 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6191 
6192 	if (!spec->ip4src)
6193 		*unused_tuple |= BIT(INNER_SRC_IP);
6194 
6195 	if (!spec->ip4dst)
6196 		*unused_tuple |= BIT(INNER_DST_IP);
6197 
6198 	if (!spec->psrc)
6199 		*unused_tuple |= BIT(INNER_SRC_PORT);
6200 
6201 	if (!spec->pdst)
6202 		*unused_tuple |= BIT(INNER_DST_PORT);
6203 
6204 	if (!spec->tos)
6205 		*unused_tuple |= BIT(INNER_IP_TOS);
6206 
6207 	return 0;
6208 }
6209 
6210 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6211 				    u32 *unused_tuple)
6212 {
6213 	if (!spec || !unused_tuple)
6214 		return -EINVAL;
6215 
6216 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6217 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6218 
6219 	if (!spec->ip4src)
6220 		*unused_tuple |= BIT(INNER_SRC_IP);
6221 
6222 	if (!spec->ip4dst)
6223 		*unused_tuple |= BIT(INNER_DST_IP);
6224 
6225 	if (!spec->tos)
6226 		*unused_tuple |= BIT(INNER_IP_TOS);
6227 
6228 	if (!spec->proto)
6229 		*unused_tuple |= BIT(INNER_IP_PROTO);
6230 
6231 	if (spec->l4_4_bytes)
6232 		return -EOPNOTSUPP;
6233 
6234 	if (spec->ip_ver != ETH_RX_NFC_IP4)
6235 		return -EOPNOTSUPP;
6236 
6237 	return 0;
6238 }
6239 
6240 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6241 				       u32 *unused_tuple)
6242 {
6243 	if (!spec || !unused_tuple)
6244 		return -EINVAL;
6245 
6246 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6247 
6248 	/* check whether src/dst ip address used */
6249 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6250 		*unused_tuple |= BIT(INNER_SRC_IP);
6251 
6252 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6253 		*unused_tuple |= BIT(INNER_DST_IP);
6254 
6255 	if (!spec->psrc)
6256 		*unused_tuple |= BIT(INNER_SRC_PORT);
6257 
6258 	if (!spec->pdst)
6259 		*unused_tuple |= BIT(INNER_DST_PORT);
6260 
6261 	if (!spec->tclass)
6262 		*unused_tuple |= BIT(INNER_IP_TOS);
6263 
6264 	return 0;
6265 }
6266 
6267 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6268 				    u32 *unused_tuple)
6269 {
6270 	if (!spec || !unused_tuple)
6271 		return -EINVAL;
6272 
6273 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6274 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6275 
6276 	/* check whether src/dst ip address used */
6277 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6278 		*unused_tuple |= BIT(INNER_SRC_IP);
6279 
6280 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6281 		*unused_tuple |= BIT(INNER_DST_IP);
6282 
6283 	if (!spec->l4_proto)
6284 		*unused_tuple |= BIT(INNER_IP_PROTO);
6285 
6286 	if (!spec->tclass)
6287 		*unused_tuple |= BIT(INNER_IP_TOS);
6288 
6289 	if (spec->l4_4_bytes)
6290 		return -EOPNOTSUPP;
6291 
6292 	return 0;
6293 }
6294 
6295 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6296 {
6297 	if (!spec || !unused_tuple)
6298 		return -EINVAL;
6299 
6300 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6301 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6302 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6303 
6304 	if (is_zero_ether_addr(spec->h_source))
6305 		*unused_tuple |= BIT(INNER_SRC_MAC);
6306 
6307 	if (is_zero_ether_addr(spec->h_dest))
6308 		*unused_tuple |= BIT(INNER_DST_MAC);
6309 
6310 	if (!spec->h_proto)
6311 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6312 
6313 	return 0;
6314 }
6315 
6316 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6317 				    struct ethtool_rx_flow_spec *fs,
6318 				    u32 *unused_tuple)
6319 {
6320 	if (fs->flow_type & FLOW_EXT) {
6321 		if (fs->h_ext.vlan_etype) {
6322 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6323 			return -EOPNOTSUPP;
6324 		}
6325 
6326 		if (!fs->h_ext.vlan_tci)
6327 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6328 
6329 		if (fs->m_ext.vlan_tci &&
6330 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6331 			dev_err(&hdev->pdev->dev,
6332 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6333 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6334 			return -EINVAL;
6335 		}
6336 	} else {
6337 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6338 	}
6339 
6340 	if (fs->flow_type & FLOW_MAC_EXT) {
6341 		if (hdev->fd_cfg.fd_mode !=
6342 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6343 			dev_err(&hdev->pdev->dev,
6344 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6345 			return -EOPNOTSUPP;
6346 		}
6347 
6348 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6349 			*unused_tuple |= BIT(INNER_DST_MAC);
6350 		else
6351 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6352 	}
6353 
6354 	return 0;
6355 }
6356 
6357 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6358 				       struct hclge_fd_user_def_info *info)
6359 {
6360 	switch (flow_type) {
6361 	case ETHER_FLOW:
6362 		info->layer = HCLGE_FD_USER_DEF_L2;
6363 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6364 		break;
6365 	case IP_USER_FLOW:
6366 	case IPV6_USER_FLOW:
6367 		info->layer = HCLGE_FD_USER_DEF_L3;
6368 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6369 		break;
6370 	case TCP_V4_FLOW:
6371 	case UDP_V4_FLOW:
6372 	case TCP_V6_FLOW:
6373 	case UDP_V6_FLOW:
6374 		info->layer = HCLGE_FD_USER_DEF_L4;
6375 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6376 		break;
6377 	default:
6378 		return -EOPNOTSUPP;
6379 	}
6380 
6381 	return 0;
6382 }
6383 
6384 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6385 {
6386 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6387 }
6388 
6389 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6390 					 struct ethtool_rx_flow_spec *fs,
6391 					 u32 *unused_tuple,
6392 					 struct hclge_fd_user_def_info *info)
6393 {
6394 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6395 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6396 	u16 data, offset, data_mask, offset_mask;
6397 	int ret;
6398 
6399 	info->layer = HCLGE_FD_USER_DEF_NONE;
6400 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6401 
6402 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6403 		return 0;
6404 
6405 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6406 	 * for data, and bit32~47 is used for offset.
6407 	 */
6408 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6409 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6410 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6411 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6412 
6413 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6414 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6415 		return -EOPNOTSUPP;
6416 	}
6417 
6418 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6419 		dev_err(&hdev->pdev->dev,
6420 			"user-def offset[%u] should be no more than %u\n",
6421 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6422 		return -EINVAL;
6423 	}
6424 
6425 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6426 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6427 		return -EINVAL;
6428 	}
6429 
6430 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6431 	if (ret) {
6432 		dev_err(&hdev->pdev->dev,
6433 			"unsupported flow type for user-def bytes, ret = %d\n",
6434 			ret);
6435 		return ret;
6436 	}
6437 
6438 	info->data = data;
6439 	info->data_mask = data_mask;
6440 	info->offset = offset;
6441 
6442 	return 0;
6443 }
6444 
6445 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6446 			       struct ethtool_rx_flow_spec *fs,
6447 			       u32 *unused_tuple,
6448 			       struct hclge_fd_user_def_info *info)
6449 {
6450 	u32 flow_type;
6451 	int ret;
6452 
6453 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6454 		dev_err(&hdev->pdev->dev,
6455 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6456 			fs->location,
6457 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6458 		return -EINVAL;
6459 	}
6460 
6461 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6462 	if (ret)
6463 		return ret;
6464 
6465 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6466 	switch (flow_type) {
6467 	case SCTP_V4_FLOW:
6468 	case TCP_V4_FLOW:
6469 	case UDP_V4_FLOW:
6470 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6471 						  unused_tuple);
6472 		break;
6473 	case IP_USER_FLOW:
6474 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6475 					       unused_tuple);
6476 		break;
6477 	case SCTP_V6_FLOW:
6478 	case TCP_V6_FLOW:
6479 	case UDP_V6_FLOW:
6480 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6481 						  unused_tuple);
6482 		break;
6483 	case IPV6_USER_FLOW:
6484 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6485 					       unused_tuple);
6486 		break;
6487 	case ETHER_FLOW:
6488 		if (hdev->fd_cfg.fd_mode !=
6489 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6490 			dev_err(&hdev->pdev->dev,
6491 				"ETHER_FLOW is not supported in current fd mode!\n");
6492 			return -EOPNOTSUPP;
6493 		}
6494 
6495 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6496 						 unused_tuple);
6497 		break;
6498 	default:
6499 		dev_err(&hdev->pdev->dev,
6500 			"unsupported protocol type, protocol type = %#x\n",
6501 			flow_type);
6502 		return -EOPNOTSUPP;
6503 	}
6504 
6505 	if (ret) {
6506 		dev_err(&hdev->pdev->dev,
6507 			"failed to check flow union tuple, ret = %d\n",
6508 			ret);
6509 		return ret;
6510 	}
6511 
6512 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6513 }
6514 
6515 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6516 				      struct ethtool_rx_flow_spec *fs,
6517 				      struct hclge_fd_rule *rule, u8 ip_proto)
6518 {
6519 	rule->tuples.src_ip[IPV4_INDEX] =
6520 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6521 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6522 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6523 
6524 	rule->tuples.dst_ip[IPV4_INDEX] =
6525 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6526 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6527 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6528 
6529 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6530 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6531 
6532 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6533 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6534 
6535 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6536 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6537 
6538 	rule->tuples.ether_proto = ETH_P_IP;
6539 	rule->tuples_mask.ether_proto = 0xFFFF;
6540 
6541 	rule->tuples.ip_proto = ip_proto;
6542 	rule->tuples_mask.ip_proto = 0xFF;
6543 }
6544 
6545 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6546 				   struct ethtool_rx_flow_spec *fs,
6547 				   struct hclge_fd_rule *rule)
6548 {
6549 	rule->tuples.src_ip[IPV4_INDEX] =
6550 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6551 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6552 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6553 
6554 	rule->tuples.dst_ip[IPV4_INDEX] =
6555 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6556 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6557 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6558 
6559 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6560 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6561 
6562 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6563 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6564 
6565 	rule->tuples.ether_proto = ETH_P_IP;
6566 	rule->tuples_mask.ether_proto = 0xFFFF;
6567 }
6568 
6569 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6570 				      struct ethtool_rx_flow_spec *fs,
6571 				      struct hclge_fd_rule *rule, u8 ip_proto)
6572 {
6573 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6574 			  IPV6_SIZE);
6575 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6576 			  IPV6_SIZE);
6577 
6578 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6579 			  IPV6_SIZE);
6580 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6581 			  IPV6_SIZE);
6582 
6583 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6584 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6585 
6586 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6587 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6588 
6589 	rule->tuples.ether_proto = ETH_P_IPV6;
6590 	rule->tuples_mask.ether_proto = 0xFFFF;
6591 
6592 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6593 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6594 
6595 	rule->tuples.ip_proto = ip_proto;
6596 	rule->tuples_mask.ip_proto = 0xFF;
6597 }
6598 
6599 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6600 				   struct ethtool_rx_flow_spec *fs,
6601 				   struct hclge_fd_rule *rule)
6602 {
6603 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6604 			  IPV6_SIZE);
6605 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6606 			  IPV6_SIZE);
6607 
6608 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6609 			  IPV6_SIZE);
6610 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6611 			  IPV6_SIZE);
6612 
6613 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6614 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6615 
6616 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6617 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6618 
6619 	rule->tuples.ether_proto = ETH_P_IPV6;
6620 	rule->tuples_mask.ether_proto = 0xFFFF;
6621 }
6622 
6623 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6624 				     struct ethtool_rx_flow_spec *fs,
6625 				     struct hclge_fd_rule *rule)
6626 {
6627 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6628 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6629 
6630 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6631 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6632 
6633 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6634 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6635 }
6636 
6637 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6638 					struct hclge_fd_rule *rule)
6639 {
6640 	switch (info->layer) {
6641 	case HCLGE_FD_USER_DEF_L2:
6642 		rule->tuples.l2_user_def = info->data;
6643 		rule->tuples_mask.l2_user_def = info->data_mask;
6644 		break;
6645 	case HCLGE_FD_USER_DEF_L3:
6646 		rule->tuples.l3_user_def = info->data;
6647 		rule->tuples_mask.l3_user_def = info->data_mask;
6648 		break;
6649 	case HCLGE_FD_USER_DEF_L4:
6650 		rule->tuples.l4_user_def = (u32)info->data << 16;
6651 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6652 		break;
6653 	default:
6654 		break;
6655 	}
6656 
6657 	rule->ep.user_def = *info;
6658 }
6659 
6660 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6661 			      struct ethtool_rx_flow_spec *fs,
6662 			      struct hclge_fd_rule *rule,
6663 			      struct hclge_fd_user_def_info *info)
6664 {
6665 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6666 
6667 	switch (flow_type) {
6668 	case SCTP_V4_FLOW:
6669 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6670 		break;
6671 	case TCP_V4_FLOW:
6672 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6673 		break;
6674 	case UDP_V4_FLOW:
6675 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6676 		break;
6677 	case IP_USER_FLOW:
6678 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6679 		break;
6680 	case SCTP_V6_FLOW:
6681 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6682 		break;
6683 	case TCP_V6_FLOW:
6684 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6685 		break;
6686 	case UDP_V6_FLOW:
6687 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6688 		break;
6689 	case IPV6_USER_FLOW:
6690 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6691 		break;
6692 	case ETHER_FLOW:
6693 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6694 		break;
6695 	default:
6696 		return -EOPNOTSUPP;
6697 	}
6698 
6699 	if (fs->flow_type & FLOW_EXT) {
6700 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6701 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6702 		hclge_fd_get_user_def_tuple(info, rule);
6703 	}
6704 
6705 	if (fs->flow_type & FLOW_MAC_EXT) {
6706 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6707 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6708 	}
6709 
6710 	return 0;
6711 }
6712 
6713 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6714 				struct hclge_fd_rule *rule)
6715 {
6716 	int ret;
6717 
6718 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6719 	if (ret)
6720 		return ret;
6721 
6722 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6723 }
6724 
6725 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6726 				     struct hclge_fd_rule *rule)
6727 {
6728 	int ret;
6729 
6730 	spin_lock_bh(&hdev->fd_rule_lock);
6731 
6732 	if (hdev->fd_active_type != rule->rule_type &&
6733 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6734 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6735 		dev_err(&hdev->pdev->dev,
6736 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6737 			rule->rule_type, hdev->fd_active_type);
6738 		spin_unlock_bh(&hdev->fd_rule_lock);
6739 		return -EINVAL;
6740 	}
6741 
6742 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6743 	if (ret)
6744 		goto out;
6745 
6746 	ret = hclge_clear_arfs_rules(hdev);
6747 	if (ret)
6748 		goto out;
6749 
6750 	ret = hclge_fd_config_rule(hdev, rule);
6751 	if (ret)
6752 		goto out;
6753 
6754 	rule->state = HCLGE_FD_ACTIVE;
6755 	hdev->fd_active_type = rule->rule_type;
6756 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6757 
6758 out:
6759 	spin_unlock_bh(&hdev->fd_rule_lock);
6760 	return ret;
6761 }
6762 
6763 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6764 {
6765 	struct hclge_vport *vport = hclge_get_vport(handle);
6766 	struct hclge_dev *hdev = vport->back;
6767 
6768 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6769 }
6770 
6771 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6772 				      u16 *vport_id, u8 *action, u16 *queue_id)
6773 {
6774 	struct hclge_vport *vport = hdev->vport;
6775 
6776 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6777 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6778 	} else {
6779 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6780 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6781 		u16 tqps;
6782 
6783 		/* To keep consistent with user's configuration, minus 1 when
6784 		 * printing 'vf', because vf id from ethtool is added 1 for vf.
6785 		 */
6786 		if (vf > hdev->num_req_vfs) {
6787 			dev_err(&hdev->pdev->dev,
6788 				"Error: vf id (%u) should be less than %u\n",
6789 				vf - 1, hdev->num_req_vfs);
6790 			return -EINVAL;
6791 		}
6792 
6793 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6794 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6795 
6796 		if (ring >= tqps) {
6797 			dev_err(&hdev->pdev->dev,
6798 				"Error: queue id (%u) > max tqp num (%u)\n",
6799 				ring, tqps - 1);
6800 			return -EINVAL;
6801 		}
6802 
6803 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6804 		*queue_id = ring;
6805 	}
6806 
6807 	return 0;
6808 }
6809 
6810 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6811 			      struct ethtool_rxnfc *cmd)
6812 {
6813 	struct hclge_vport *vport = hclge_get_vport(handle);
6814 	struct hclge_dev *hdev = vport->back;
6815 	struct hclge_fd_user_def_info info;
6816 	u16 dst_vport_id = 0, q_index = 0;
6817 	struct ethtool_rx_flow_spec *fs;
6818 	struct hclge_fd_rule *rule;
6819 	u32 unused = 0;
6820 	u8 action;
6821 	int ret;
6822 
6823 	if (!hnae3_dev_fd_supported(hdev)) {
6824 		dev_err(&hdev->pdev->dev,
6825 			"flow table director is not supported\n");
6826 		return -EOPNOTSUPP;
6827 	}
6828 
6829 	if (!hdev->fd_en) {
6830 		dev_err(&hdev->pdev->dev,
6831 			"please enable flow director first\n");
6832 		return -EOPNOTSUPP;
6833 	}
6834 
6835 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6836 
6837 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6838 	if (ret)
6839 		return ret;
6840 
6841 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6842 					 &action, &q_index);
6843 	if (ret)
6844 		return ret;
6845 
6846 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6847 	if (!rule)
6848 		return -ENOMEM;
6849 
6850 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6851 	if (ret) {
6852 		kfree(rule);
6853 		return ret;
6854 	}
6855 
6856 	rule->flow_type = fs->flow_type;
6857 	rule->location = fs->location;
6858 	rule->unused_tuple = unused;
6859 	rule->vf_id = dst_vport_id;
6860 	rule->queue_id = q_index;
6861 	rule->action = action;
6862 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6863 
6864 	ret = hclge_add_fd_entry_common(hdev, rule);
6865 	if (ret)
6866 		kfree(rule);
6867 
6868 	return ret;
6869 }
6870 
6871 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6872 			      struct ethtool_rxnfc *cmd)
6873 {
6874 	struct hclge_vport *vport = hclge_get_vport(handle);
6875 	struct hclge_dev *hdev = vport->back;
6876 	struct ethtool_rx_flow_spec *fs;
6877 	int ret;
6878 
6879 	if (!hnae3_dev_fd_supported(hdev))
6880 		return -EOPNOTSUPP;
6881 
6882 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6883 
6884 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6885 		return -EINVAL;
6886 
6887 	spin_lock_bh(&hdev->fd_rule_lock);
6888 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6889 	    !test_bit(fs->location, hdev->fd_bmap)) {
6890 		dev_err(&hdev->pdev->dev,
6891 			"Delete fail, rule %u is inexistent\n", fs->location);
6892 		spin_unlock_bh(&hdev->fd_rule_lock);
6893 		return -ENOENT;
6894 	}
6895 
6896 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6897 				   NULL, false);
6898 	if (ret)
6899 		goto out;
6900 
6901 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6902 
6903 out:
6904 	spin_unlock_bh(&hdev->fd_rule_lock);
6905 	return ret;
6906 }
6907 
6908 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6909 					 bool clear_list)
6910 {
6911 	struct hclge_fd_rule *rule;
6912 	struct hlist_node *node;
6913 	u16 location;
6914 
6915 	if (!hnae3_dev_fd_supported(hdev))
6916 		return;
6917 
6918 	spin_lock_bh(&hdev->fd_rule_lock);
6919 
6920 	for_each_set_bit(location, hdev->fd_bmap,
6921 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6922 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6923 				     NULL, false);
6924 
6925 	if (clear_list) {
6926 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6927 					  rule_node) {
6928 			hlist_del(&rule->rule_node);
6929 			kfree(rule);
6930 		}
6931 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6932 		hdev->hclge_fd_rule_num = 0;
6933 		bitmap_zero(hdev->fd_bmap,
6934 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6935 	}
6936 
6937 	spin_unlock_bh(&hdev->fd_rule_lock);
6938 }
6939 
6940 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6941 {
6942 	hclge_clear_fd_rules_in_list(hdev, true);
6943 	hclge_fd_disable_user_def(hdev);
6944 }
6945 
6946 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6947 {
6948 	struct hclge_vport *vport = hclge_get_vport(handle);
6949 	struct hclge_dev *hdev = vport->back;
6950 	struct hclge_fd_rule *rule;
6951 	struct hlist_node *node;
6952 
6953 	/* Return ok here, because reset error handling will check this
6954 	 * return value. If error is returned here, the reset process will
6955 	 * fail.
6956 	 */
6957 	if (!hnae3_dev_fd_supported(hdev))
6958 		return 0;
6959 
6960 	/* if fd is disabled, should not restore it when reset */
6961 	if (!hdev->fd_en)
6962 		return 0;
6963 
6964 	spin_lock_bh(&hdev->fd_rule_lock);
6965 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6966 		if (rule->state == HCLGE_FD_ACTIVE)
6967 			rule->state = HCLGE_FD_TO_ADD;
6968 	}
6969 	spin_unlock_bh(&hdev->fd_rule_lock);
6970 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6971 
6972 	return 0;
6973 }
6974 
6975 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6976 				 struct ethtool_rxnfc *cmd)
6977 {
6978 	struct hclge_vport *vport = hclge_get_vport(handle);
6979 	struct hclge_dev *hdev = vport->back;
6980 
6981 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6982 		return -EOPNOTSUPP;
6983 
6984 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6985 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6986 
6987 	return 0;
6988 }
6989 
6990 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6991 				     struct ethtool_tcpip4_spec *spec,
6992 				     struct ethtool_tcpip4_spec *spec_mask)
6993 {
6994 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6995 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6996 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6997 
6998 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6999 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
7000 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
7001 
7002 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
7003 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
7004 			0 : cpu_to_be16(rule->tuples_mask.src_port);
7005 
7006 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
7007 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
7008 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
7009 
7010 	spec->tos = rule->tuples.ip_tos;
7011 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7012 			0 : rule->tuples_mask.ip_tos;
7013 }
7014 
7015 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
7016 				  struct ethtool_usrip4_spec *spec,
7017 				  struct ethtool_usrip4_spec *spec_mask)
7018 {
7019 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
7020 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
7021 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
7022 
7023 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
7024 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
7025 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
7026 
7027 	spec->tos = rule->tuples.ip_tos;
7028 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7029 			0 : rule->tuples_mask.ip_tos;
7030 
7031 	spec->proto = rule->tuples.ip_proto;
7032 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
7033 			0 : rule->tuples_mask.ip_proto;
7034 
7035 	spec->ip_ver = ETH_RX_NFC_IP4;
7036 }
7037 
7038 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
7039 				     struct ethtool_tcpip6_spec *spec,
7040 				     struct ethtool_tcpip6_spec *spec_mask)
7041 {
7042 	cpu_to_be32_array(spec->ip6src,
7043 			  rule->tuples.src_ip, IPV6_SIZE);
7044 	cpu_to_be32_array(spec->ip6dst,
7045 			  rule->tuples.dst_ip, IPV6_SIZE);
7046 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
7047 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
7048 	else
7049 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
7050 				  IPV6_SIZE);
7051 
7052 	if (rule->unused_tuple & BIT(INNER_DST_IP))
7053 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
7054 	else
7055 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
7056 				  IPV6_SIZE);
7057 
7058 	spec->tclass = rule->tuples.ip_tos;
7059 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7060 			0 : rule->tuples_mask.ip_tos;
7061 
7062 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
7063 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
7064 			0 : cpu_to_be16(rule->tuples_mask.src_port);
7065 
7066 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
7067 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
7068 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
7069 }
7070 
7071 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
7072 				  struct ethtool_usrip6_spec *spec,
7073 				  struct ethtool_usrip6_spec *spec_mask)
7074 {
7075 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
7076 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
7077 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
7078 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
7079 	else
7080 		cpu_to_be32_array(spec_mask->ip6src,
7081 				  rule->tuples_mask.src_ip, IPV6_SIZE);
7082 
7083 	if (rule->unused_tuple & BIT(INNER_DST_IP))
7084 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
7085 	else
7086 		cpu_to_be32_array(spec_mask->ip6dst,
7087 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
7088 
7089 	spec->tclass = rule->tuples.ip_tos;
7090 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7091 			0 : rule->tuples_mask.ip_tos;
7092 
7093 	spec->l4_proto = rule->tuples.ip_proto;
7094 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
7095 			0 : rule->tuples_mask.ip_proto;
7096 }
7097 
7098 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
7099 				    struct ethhdr *spec,
7100 				    struct ethhdr *spec_mask)
7101 {
7102 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
7103 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
7104 
7105 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
7106 		eth_zero_addr(spec_mask->h_source);
7107 	else
7108 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
7109 
7110 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
7111 		eth_zero_addr(spec_mask->h_dest);
7112 	else
7113 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
7114 
7115 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
7116 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
7117 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
7118 }
7119 
7120 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
7121 				       struct hclge_fd_rule *rule)
7122 {
7123 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
7124 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
7125 		fs->h_ext.data[0] = 0;
7126 		fs->h_ext.data[1] = 0;
7127 		fs->m_ext.data[0] = 0;
7128 		fs->m_ext.data[1] = 0;
7129 	} else {
7130 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
7131 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
7132 		fs->m_ext.data[0] =
7133 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
7134 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
7135 	}
7136 }
7137 
7138 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7139 				  struct hclge_fd_rule *rule)
7140 {
7141 	if (fs->flow_type & FLOW_EXT) {
7142 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7143 		fs->m_ext.vlan_tci =
7144 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7145 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7146 
7147 		hclge_fd_get_user_def_info(fs, rule);
7148 	}
7149 
7150 	if (fs->flow_type & FLOW_MAC_EXT) {
7151 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7152 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
7153 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
7154 		else
7155 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
7156 					rule->tuples_mask.dst_mac);
7157 	}
7158 }
7159 
7160 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7161 				  struct ethtool_rxnfc *cmd)
7162 {
7163 	struct hclge_vport *vport = hclge_get_vport(handle);
7164 	struct hclge_fd_rule *rule = NULL;
7165 	struct hclge_dev *hdev = vport->back;
7166 	struct ethtool_rx_flow_spec *fs;
7167 	struct hlist_node *node2;
7168 
7169 	if (!hnae3_dev_fd_supported(hdev))
7170 		return -EOPNOTSUPP;
7171 
7172 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7173 
7174 	spin_lock_bh(&hdev->fd_rule_lock);
7175 
7176 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7177 		if (rule->location >= fs->location)
7178 			break;
7179 	}
7180 
7181 	if (!rule || fs->location != rule->location) {
7182 		spin_unlock_bh(&hdev->fd_rule_lock);
7183 
7184 		return -ENOENT;
7185 	}
7186 
7187 	fs->flow_type = rule->flow_type;
7188 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7189 	case SCTP_V4_FLOW:
7190 	case TCP_V4_FLOW:
7191 	case UDP_V4_FLOW:
7192 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7193 					 &fs->m_u.tcp_ip4_spec);
7194 		break;
7195 	case IP_USER_FLOW:
7196 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7197 				      &fs->m_u.usr_ip4_spec);
7198 		break;
7199 	case SCTP_V6_FLOW:
7200 	case TCP_V6_FLOW:
7201 	case UDP_V6_FLOW:
7202 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7203 					 &fs->m_u.tcp_ip6_spec);
7204 		break;
7205 	case IPV6_USER_FLOW:
7206 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7207 				      &fs->m_u.usr_ip6_spec);
7208 		break;
7209 	/* The flow type of fd rule has been checked before adding in to rule
7210 	 * list. As other flow types have been handled, it must be ETHER_FLOW
7211 	 * for the default case
7212 	 */
7213 	default:
7214 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7215 					&fs->m_u.ether_spec);
7216 		break;
7217 	}
7218 
7219 	hclge_fd_get_ext_info(fs, rule);
7220 
7221 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7222 		fs->ring_cookie = RX_CLS_FLOW_DISC;
7223 	} else {
7224 		u64 vf_id;
7225 
7226 		fs->ring_cookie = rule->queue_id;
7227 		vf_id = rule->vf_id;
7228 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7229 		fs->ring_cookie |= vf_id;
7230 	}
7231 
7232 	spin_unlock_bh(&hdev->fd_rule_lock);
7233 
7234 	return 0;
7235 }
7236 
7237 static int hclge_get_all_rules(struct hnae3_handle *handle,
7238 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
7239 {
7240 	struct hclge_vport *vport = hclge_get_vport(handle);
7241 	struct hclge_dev *hdev = vport->back;
7242 	struct hclge_fd_rule *rule;
7243 	struct hlist_node *node2;
7244 	int cnt = 0;
7245 
7246 	if (!hnae3_dev_fd_supported(hdev))
7247 		return -EOPNOTSUPP;
7248 
7249 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7250 
7251 	spin_lock_bh(&hdev->fd_rule_lock);
7252 	hlist_for_each_entry_safe(rule, node2,
7253 				  &hdev->fd_rule_list, rule_node) {
7254 		if (cnt == cmd->rule_cnt) {
7255 			spin_unlock_bh(&hdev->fd_rule_lock);
7256 			return -EMSGSIZE;
7257 		}
7258 
7259 		if (rule->state == HCLGE_FD_TO_DEL)
7260 			continue;
7261 
7262 		rule_locs[cnt] = rule->location;
7263 		cnt++;
7264 	}
7265 
7266 	spin_unlock_bh(&hdev->fd_rule_lock);
7267 
7268 	cmd->rule_cnt = cnt;
7269 
7270 	return 0;
7271 }
7272 
7273 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7274 				     struct hclge_fd_rule_tuples *tuples)
7275 {
7276 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7277 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7278 
7279 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7280 	tuples->ip_proto = fkeys->basic.ip_proto;
7281 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7282 
7283 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7284 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7285 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7286 	} else {
7287 		int i;
7288 
7289 		for (i = 0; i < IPV6_SIZE; i++) {
7290 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7291 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7292 		}
7293 	}
7294 }
7295 
7296 /* traverse all rules, check whether an existed rule has the same tuples */
7297 static struct hclge_fd_rule *
7298 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7299 			  const struct hclge_fd_rule_tuples *tuples)
7300 {
7301 	struct hclge_fd_rule *rule = NULL;
7302 	struct hlist_node *node;
7303 
7304 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7305 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7306 			return rule;
7307 	}
7308 
7309 	return NULL;
7310 }
7311 
7312 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7313 				     struct hclge_fd_rule *rule)
7314 {
7315 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7316 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7317 			     BIT(INNER_SRC_PORT);
7318 	rule->action = 0;
7319 	rule->vf_id = 0;
7320 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7321 	rule->state = HCLGE_FD_TO_ADD;
7322 	if (tuples->ether_proto == ETH_P_IP) {
7323 		if (tuples->ip_proto == IPPROTO_TCP)
7324 			rule->flow_type = TCP_V4_FLOW;
7325 		else
7326 			rule->flow_type = UDP_V4_FLOW;
7327 	} else {
7328 		if (tuples->ip_proto == IPPROTO_TCP)
7329 			rule->flow_type = TCP_V6_FLOW;
7330 		else
7331 			rule->flow_type = UDP_V6_FLOW;
7332 	}
7333 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7334 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7335 }
7336 
7337 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7338 				      u16 flow_id, struct flow_keys *fkeys)
7339 {
7340 	struct hclge_vport *vport = hclge_get_vport(handle);
7341 	struct hclge_fd_rule_tuples new_tuples = {};
7342 	struct hclge_dev *hdev = vport->back;
7343 	struct hclge_fd_rule *rule;
7344 	u16 bit_id;
7345 
7346 	if (!hnae3_dev_fd_supported(hdev))
7347 		return -EOPNOTSUPP;
7348 
7349 	/* when there is already fd rule existed add by user,
7350 	 * arfs should not work
7351 	 */
7352 	spin_lock_bh(&hdev->fd_rule_lock);
7353 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7354 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7355 		spin_unlock_bh(&hdev->fd_rule_lock);
7356 		return -EOPNOTSUPP;
7357 	}
7358 
7359 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7360 
7361 	/* check is there flow director filter existed for this flow,
7362 	 * if not, create a new filter for it;
7363 	 * if filter exist with different queue id, modify the filter;
7364 	 * if filter exist with same queue id, do nothing
7365 	 */
7366 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7367 	if (!rule) {
7368 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7369 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7370 			spin_unlock_bh(&hdev->fd_rule_lock);
7371 			return -ENOSPC;
7372 		}
7373 
7374 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7375 		if (!rule) {
7376 			spin_unlock_bh(&hdev->fd_rule_lock);
7377 			return -ENOMEM;
7378 		}
7379 
7380 		rule->location = bit_id;
7381 		rule->arfs.flow_id = flow_id;
7382 		rule->queue_id = queue_id;
7383 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7384 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7385 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7386 	} else if (rule->queue_id != queue_id) {
7387 		rule->queue_id = queue_id;
7388 		rule->state = HCLGE_FD_TO_ADD;
7389 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7390 		hclge_task_schedule(hdev, 0);
7391 	}
7392 	spin_unlock_bh(&hdev->fd_rule_lock);
7393 	return rule->location;
7394 }
7395 
7396 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7397 {
7398 #ifdef CONFIG_RFS_ACCEL
7399 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7400 	struct hclge_fd_rule *rule;
7401 	struct hlist_node *node;
7402 
7403 	spin_lock_bh(&hdev->fd_rule_lock);
7404 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7405 		spin_unlock_bh(&hdev->fd_rule_lock);
7406 		return;
7407 	}
7408 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7409 		if (rule->state != HCLGE_FD_ACTIVE)
7410 			continue;
7411 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7412 					rule->arfs.flow_id, rule->location)) {
7413 			rule->state = HCLGE_FD_TO_DEL;
7414 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7415 		}
7416 	}
7417 	spin_unlock_bh(&hdev->fd_rule_lock);
7418 #endif
7419 }
7420 
7421 /* make sure being called after lock up with fd_rule_lock */
7422 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7423 {
7424 #ifdef CONFIG_RFS_ACCEL
7425 	struct hclge_fd_rule *rule;
7426 	struct hlist_node *node;
7427 	int ret;
7428 
7429 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7430 		return 0;
7431 
7432 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7433 		switch (rule->state) {
7434 		case HCLGE_FD_TO_DEL:
7435 		case HCLGE_FD_ACTIVE:
7436 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7437 						   rule->location, NULL, false);
7438 			if (ret)
7439 				return ret;
7440 			fallthrough;
7441 		case HCLGE_FD_TO_ADD:
7442 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7443 			hlist_del(&rule->rule_node);
7444 			kfree(rule);
7445 			break;
7446 		default:
7447 			break;
7448 		}
7449 	}
7450 	hclge_sync_fd_state(hdev);
7451 
7452 #endif
7453 	return 0;
7454 }
7455 
7456 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7457 				    struct hclge_fd_rule *rule)
7458 {
7459 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7460 		struct flow_match_basic match;
7461 		u16 ethtype_key, ethtype_mask;
7462 
7463 		flow_rule_match_basic(flow, &match);
7464 		ethtype_key = ntohs(match.key->n_proto);
7465 		ethtype_mask = ntohs(match.mask->n_proto);
7466 
7467 		if (ethtype_key == ETH_P_ALL) {
7468 			ethtype_key = 0;
7469 			ethtype_mask = 0;
7470 		}
7471 		rule->tuples.ether_proto = ethtype_key;
7472 		rule->tuples_mask.ether_proto = ethtype_mask;
7473 		rule->tuples.ip_proto = match.key->ip_proto;
7474 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7475 	} else {
7476 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7477 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7478 	}
7479 }
7480 
7481 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7482 				  struct hclge_fd_rule *rule)
7483 {
7484 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7485 		struct flow_match_eth_addrs match;
7486 
7487 		flow_rule_match_eth_addrs(flow, &match);
7488 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7489 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7490 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7491 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7492 	} else {
7493 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7494 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7495 	}
7496 }
7497 
7498 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7499 				   struct hclge_fd_rule *rule)
7500 {
7501 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7502 		struct flow_match_vlan match;
7503 
7504 		flow_rule_match_vlan(flow, &match);
7505 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7506 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7507 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7508 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7509 	} else {
7510 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7511 	}
7512 }
7513 
7514 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7515 				 struct hclge_fd_rule *rule)
7516 {
7517 	u16 addr_type = 0;
7518 
7519 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7520 		struct flow_match_control match;
7521 
7522 		flow_rule_match_control(flow, &match);
7523 		addr_type = match.key->addr_type;
7524 	}
7525 
7526 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7527 		struct flow_match_ipv4_addrs match;
7528 
7529 		flow_rule_match_ipv4_addrs(flow, &match);
7530 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7531 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7532 						be32_to_cpu(match.mask->src);
7533 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7534 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7535 						be32_to_cpu(match.mask->dst);
7536 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7537 		struct flow_match_ipv6_addrs match;
7538 
7539 		flow_rule_match_ipv6_addrs(flow, &match);
7540 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7541 				  IPV6_SIZE);
7542 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7543 				  match.mask->src.s6_addr32, IPV6_SIZE);
7544 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7545 				  IPV6_SIZE);
7546 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7547 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7548 	} else {
7549 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7550 		rule->unused_tuple |= BIT(INNER_DST_IP);
7551 	}
7552 }
7553 
7554 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7555 				   struct hclge_fd_rule *rule)
7556 {
7557 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7558 		struct flow_match_ports match;
7559 
7560 		flow_rule_match_ports(flow, &match);
7561 
7562 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7563 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7564 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7565 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7566 	} else {
7567 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7568 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7569 	}
7570 }
7571 
7572 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7573 				  struct flow_cls_offload *cls_flower,
7574 				  struct hclge_fd_rule *rule)
7575 {
7576 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7577 	struct flow_dissector *dissector = flow->match.dissector;
7578 
7579 	if (dissector->used_keys &
7580 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7581 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7582 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7583 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7584 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7585 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7586 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7587 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7588 			dissector->used_keys);
7589 		return -EOPNOTSUPP;
7590 	}
7591 
7592 	hclge_get_cls_key_basic(flow, rule);
7593 	hclge_get_cls_key_mac(flow, rule);
7594 	hclge_get_cls_key_vlan(flow, rule);
7595 	hclge_get_cls_key_ip(flow, rule);
7596 	hclge_get_cls_key_port(flow, rule);
7597 
7598 	return 0;
7599 }
7600 
7601 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7602 				  struct flow_cls_offload *cls_flower, int tc)
7603 {
7604 	u32 prio = cls_flower->common.prio;
7605 
7606 	if (tc < 0 || tc > hdev->tc_max) {
7607 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7608 		return -EINVAL;
7609 	}
7610 
7611 	if (prio == 0 ||
7612 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7613 		dev_err(&hdev->pdev->dev,
7614 			"prio %u should be in range[1, %u]\n",
7615 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7616 		return -EINVAL;
7617 	}
7618 
7619 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7620 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7621 		return -EINVAL;
7622 	}
7623 	return 0;
7624 }
7625 
7626 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7627 				struct flow_cls_offload *cls_flower,
7628 				int tc)
7629 {
7630 	struct hclge_vport *vport = hclge_get_vport(handle);
7631 	struct hclge_dev *hdev = vport->back;
7632 	struct hclge_fd_rule *rule;
7633 	int ret;
7634 
7635 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7636 	if (ret) {
7637 		dev_err(&hdev->pdev->dev,
7638 			"failed to check cls flower params, ret = %d\n", ret);
7639 		return ret;
7640 	}
7641 
7642 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7643 	if (!rule)
7644 		return -ENOMEM;
7645 
7646 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7647 	if (ret) {
7648 		kfree(rule);
7649 		return ret;
7650 	}
7651 
7652 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7653 	rule->cls_flower.tc = tc;
7654 	rule->location = cls_flower->common.prio - 1;
7655 	rule->vf_id = 0;
7656 	rule->cls_flower.cookie = cls_flower->cookie;
7657 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7658 
7659 	ret = hclge_add_fd_entry_common(hdev, rule);
7660 	if (ret)
7661 		kfree(rule);
7662 
7663 	return ret;
7664 }
7665 
7666 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7667 						   unsigned long cookie)
7668 {
7669 	struct hclge_fd_rule *rule;
7670 	struct hlist_node *node;
7671 
7672 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7673 		if (rule->cls_flower.cookie == cookie)
7674 			return rule;
7675 	}
7676 
7677 	return NULL;
7678 }
7679 
7680 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7681 				struct flow_cls_offload *cls_flower)
7682 {
7683 	struct hclge_vport *vport = hclge_get_vport(handle);
7684 	struct hclge_dev *hdev = vport->back;
7685 	struct hclge_fd_rule *rule;
7686 	int ret;
7687 
7688 	spin_lock_bh(&hdev->fd_rule_lock);
7689 
7690 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7691 	if (!rule) {
7692 		spin_unlock_bh(&hdev->fd_rule_lock);
7693 		return -EINVAL;
7694 	}
7695 
7696 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7697 				   NULL, false);
7698 	if (ret) {
7699 		spin_unlock_bh(&hdev->fd_rule_lock);
7700 		return ret;
7701 	}
7702 
7703 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7704 	spin_unlock_bh(&hdev->fd_rule_lock);
7705 
7706 	return 0;
7707 }
7708 
7709 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7710 {
7711 	struct hclge_fd_rule *rule;
7712 	struct hlist_node *node;
7713 	int ret = 0;
7714 
7715 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7716 		return;
7717 
7718 	spin_lock_bh(&hdev->fd_rule_lock);
7719 
7720 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7721 		switch (rule->state) {
7722 		case HCLGE_FD_TO_ADD:
7723 			ret = hclge_fd_config_rule(hdev, rule);
7724 			if (ret)
7725 				goto out;
7726 			rule->state = HCLGE_FD_ACTIVE;
7727 			break;
7728 		case HCLGE_FD_TO_DEL:
7729 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7730 						   rule->location, NULL, false);
7731 			if (ret)
7732 				goto out;
7733 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7734 			hclge_fd_free_node(hdev, rule);
7735 			break;
7736 		default:
7737 			break;
7738 		}
7739 	}
7740 
7741 out:
7742 	if (ret)
7743 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7744 
7745 	spin_unlock_bh(&hdev->fd_rule_lock);
7746 }
7747 
7748 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7749 {
7750 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7751 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7752 
7753 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7754 	}
7755 
7756 	hclge_sync_fd_user_def_cfg(hdev, false);
7757 
7758 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7759 }
7760 
7761 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7762 {
7763 	struct hclge_vport *vport = hclge_get_vport(handle);
7764 	struct hclge_dev *hdev = vport->back;
7765 
7766 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7767 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7768 }
7769 
7770 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7771 {
7772 	struct hclge_vport *vport = hclge_get_vport(handle);
7773 	struct hclge_dev *hdev = vport->back;
7774 
7775 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7776 }
7777 
7778 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7779 {
7780 	struct hclge_vport *vport = hclge_get_vport(handle);
7781 	struct hclge_dev *hdev = vport->back;
7782 
7783 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7784 }
7785 
7786 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7787 {
7788 	struct hclge_vport *vport = hclge_get_vport(handle);
7789 	struct hclge_dev *hdev = vport->back;
7790 
7791 	return hdev->rst_stats.hw_reset_done_cnt;
7792 }
7793 
7794 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7795 {
7796 	struct hclge_vport *vport = hclge_get_vport(handle);
7797 	struct hclge_dev *hdev = vport->back;
7798 
7799 	hdev->fd_en = enable;
7800 
7801 	if (!enable)
7802 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7803 	else
7804 		hclge_restore_fd_entries(handle);
7805 
7806 	hclge_task_schedule(hdev, 0);
7807 }
7808 
7809 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7810 {
7811 	struct hclge_desc desc;
7812 	struct hclge_config_mac_mode_cmd *req =
7813 		(struct hclge_config_mac_mode_cmd *)desc.data;
7814 	u32 loop_en = 0;
7815 	int ret;
7816 
7817 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7818 
7819 	if (enable) {
7820 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7821 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7822 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7823 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7824 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7825 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7826 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7827 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7828 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7829 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7830 	}
7831 
7832 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7833 
7834 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7835 	if (ret)
7836 		dev_err(&hdev->pdev->dev,
7837 			"mac enable fail, ret =%d.\n", ret);
7838 }
7839 
7840 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7841 				     u8 switch_param, u8 param_mask)
7842 {
7843 	struct hclge_mac_vlan_switch_cmd *req;
7844 	struct hclge_desc desc;
7845 	u32 func_id;
7846 	int ret;
7847 
7848 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7849 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7850 
7851 	/* read current config parameter */
7852 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7853 				   true);
7854 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7855 	req->func_id = cpu_to_le32(func_id);
7856 
7857 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7858 	if (ret) {
7859 		dev_err(&hdev->pdev->dev,
7860 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7861 		return ret;
7862 	}
7863 
7864 	/* modify and write new config parameter */
7865 	hclge_cmd_reuse_desc(&desc, false);
7866 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7867 	req->param_mask = param_mask;
7868 
7869 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7870 	if (ret)
7871 		dev_err(&hdev->pdev->dev,
7872 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7873 	return ret;
7874 }
7875 
7876 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7877 				       int link_ret)
7878 {
7879 #define HCLGE_PHY_LINK_STATUS_NUM  200
7880 
7881 	struct phy_device *phydev = hdev->hw.mac.phydev;
7882 	int i = 0;
7883 	int ret;
7884 
7885 	do {
7886 		ret = phy_read_status(phydev);
7887 		if (ret) {
7888 			dev_err(&hdev->pdev->dev,
7889 				"phy update link status fail, ret = %d\n", ret);
7890 			return;
7891 		}
7892 
7893 		if (phydev->link == link_ret)
7894 			break;
7895 
7896 		msleep(HCLGE_LINK_STATUS_MS);
7897 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7898 }
7899 
7900 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7901 {
7902 #define HCLGE_MAC_LINK_STATUS_NUM  100
7903 
7904 	int link_status;
7905 	int i = 0;
7906 	int ret;
7907 
7908 	do {
7909 		ret = hclge_get_mac_link_status(hdev, &link_status);
7910 		if (ret)
7911 			return ret;
7912 		if (link_status == link_ret)
7913 			return 0;
7914 
7915 		msleep(HCLGE_LINK_STATUS_MS);
7916 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7917 	return -EBUSY;
7918 }
7919 
7920 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7921 					  bool is_phy)
7922 {
7923 	int link_ret;
7924 
7925 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7926 
7927 	if (is_phy)
7928 		hclge_phy_link_status_wait(hdev, link_ret);
7929 
7930 	return hclge_mac_link_status_wait(hdev, link_ret);
7931 }
7932 
7933 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7934 {
7935 	struct hclge_config_mac_mode_cmd *req;
7936 	struct hclge_desc desc;
7937 	u32 loop_en;
7938 	int ret;
7939 
7940 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7941 	/* 1 Read out the MAC mode config at first */
7942 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7943 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7944 	if (ret) {
7945 		dev_err(&hdev->pdev->dev,
7946 			"mac loopback get fail, ret =%d.\n", ret);
7947 		return ret;
7948 	}
7949 
7950 	/* 2 Then setup the loopback flag */
7951 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7952 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7953 
7954 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7955 
7956 	/* 3 Config mac work mode with loopback flag
7957 	 * and its original configure parameters
7958 	 */
7959 	hclge_cmd_reuse_desc(&desc, false);
7960 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7961 	if (ret)
7962 		dev_err(&hdev->pdev->dev,
7963 			"mac loopback set fail, ret =%d.\n", ret);
7964 	return ret;
7965 }
7966 
7967 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7968 				     enum hnae3_loop loop_mode)
7969 {
7970 #define HCLGE_COMMON_LB_RETRY_MS	10
7971 #define HCLGE_COMMON_LB_RETRY_NUM	100
7972 
7973 	struct hclge_common_lb_cmd *req;
7974 	struct hclge_desc desc;
7975 	int ret, i = 0;
7976 	u8 loop_mode_b;
7977 
7978 	req = (struct hclge_common_lb_cmd *)desc.data;
7979 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7980 
7981 	switch (loop_mode) {
7982 	case HNAE3_LOOP_SERIAL_SERDES:
7983 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7984 		break;
7985 	case HNAE3_LOOP_PARALLEL_SERDES:
7986 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7987 		break;
7988 	case HNAE3_LOOP_PHY:
7989 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7990 		break;
7991 	default:
7992 		dev_err(&hdev->pdev->dev,
7993 			"unsupported common loopback mode %d\n", loop_mode);
7994 		return -ENOTSUPP;
7995 	}
7996 
7997 	if (en) {
7998 		req->enable = loop_mode_b;
7999 		req->mask = loop_mode_b;
8000 	} else {
8001 		req->mask = loop_mode_b;
8002 	}
8003 
8004 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8005 	if (ret) {
8006 		dev_err(&hdev->pdev->dev,
8007 			"common loopback set fail, ret = %d\n", ret);
8008 		return ret;
8009 	}
8010 
8011 	do {
8012 		msleep(HCLGE_COMMON_LB_RETRY_MS);
8013 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
8014 					   true);
8015 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8016 		if (ret) {
8017 			dev_err(&hdev->pdev->dev,
8018 				"common loopback get, ret = %d\n", ret);
8019 			return ret;
8020 		}
8021 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
8022 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
8023 
8024 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
8025 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
8026 		return -EBUSY;
8027 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
8028 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
8029 		return -EIO;
8030 	}
8031 	return ret;
8032 }
8033 
8034 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
8035 				     enum hnae3_loop loop_mode)
8036 {
8037 	int ret;
8038 
8039 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
8040 	if (ret)
8041 		return ret;
8042 
8043 	hclge_cfg_mac_mode(hdev, en);
8044 
8045 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
8046 	if (ret)
8047 		dev_err(&hdev->pdev->dev,
8048 			"serdes loopback config mac mode timeout\n");
8049 
8050 	return ret;
8051 }
8052 
8053 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
8054 				     struct phy_device *phydev)
8055 {
8056 	int ret;
8057 
8058 	if (!phydev->suspended) {
8059 		ret = phy_suspend(phydev);
8060 		if (ret)
8061 			return ret;
8062 	}
8063 
8064 	ret = phy_resume(phydev);
8065 	if (ret)
8066 		return ret;
8067 
8068 	return phy_loopback(phydev, true);
8069 }
8070 
8071 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
8072 				      struct phy_device *phydev)
8073 {
8074 	int ret;
8075 
8076 	ret = phy_loopback(phydev, false);
8077 	if (ret)
8078 		return ret;
8079 
8080 	return phy_suspend(phydev);
8081 }
8082 
8083 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
8084 {
8085 	struct phy_device *phydev = hdev->hw.mac.phydev;
8086 	int ret;
8087 
8088 	if (!phydev) {
8089 		if (hnae3_dev_phy_imp_supported(hdev))
8090 			return hclge_set_common_loopback(hdev, en,
8091 							 HNAE3_LOOP_PHY);
8092 		return -ENOTSUPP;
8093 	}
8094 
8095 	if (en)
8096 		ret = hclge_enable_phy_loopback(hdev, phydev);
8097 	else
8098 		ret = hclge_disable_phy_loopback(hdev, phydev);
8099 	if (ret) {
8100 		dev_err(&hdev->pdev->dev,
8101 			"set phy loopback fail, ret = %d\n", ret);
8102 		return ret;
8103 	}
8104 
8105 	hclge_cfg_mac_mode(hdev, en);
8106 
8107 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
8108 	if (ret)
8109 		dev_err(&hdev->pdev->dev,
8110 			"phy loopback config mac mode timeout\n");
8111 
8112 	return ret;
8113 }
8114 
8115 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
8116 				     u16 stream_id, bool enable)
8117 {
8118 	struct hclge_desc desc;
8119 	struct hclge_cfg_com_tqp_queue_cmd *req =
8120 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
8121 
8122 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
8123 	req->tqp_id = cpu_to_le16(tqp_id);
8124 	req->stream_id = cpu_to_le16(stream_id);
8125 	if (enable)
8126 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
8127 
8128 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8129 }
8130 
8131 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
8132 {
8133 	struct hclge_vport *vport = hclge_get_vport(handle);
8134 	struct hclge_dev *hdev = vport->back;
8135 	int ret;
8136 	u16 i;
8137 
8138 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
8139 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8140 		if (ret)
8141 			return ret;
8142 	}
8143 	return 0;
8144 }
8145 
8146 static int hclge_set_loopback(struct hnae3_handle *handle,
8147 			      enum hnae3_loop loop_mode, bool en)
8148 {
8149 	struct hclge_vport *vport = hclge_get_vport(handle);
8150 	struct hclge_dev *hdev = vport->back;
8151 	int ret;
8152 
8153 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8154 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8155 	 * the same, the packets are looped back in the SSU. If SSU loopback
8156 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8157 	 */
8158 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8159 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8160 
8161 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8162 						HCLGE_SWITCH_ALW_LPBK_MASK);
8163 		if (ret)
8164 			return ret;
8165 	}
8166 
8167 	switch (loop_mode) {
8168 	case HNAE3_LOOP_APP:
8169 		ret = hclge_set_app_loopback(hdev, en);
8170 		break;
8171 	case HNAE3_LOOP_SERIAL_SERDES:
8172 	case HNAE3_LOOP_PARALLEL_SERDES:
8173 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
8174 		break;
8175 	case HNAE3_LOOP_PHY:
8176 		ret = hclge_set_phy_loopback(hdev, en);
8177 		break;
8178 	default:
8179 		ret = -ENOTSUPP;
8180 		dev_err(&hdev->pdev->dev,
8181 			"loop_mode %d is not supported\n", loop_mode);
8182 		break;
8183 	}
8184 
8185 	if (ret)
8186 		return ret;
8187 
8188 	ret = hclge_tqp_enable(handle, en);
8189 	if (ret)
8190 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8191 			en ? "enable" : "disable", ret);
8192 
8193 	return ret;
8194 }
8195 
8196 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8197 {
8198 	int ret;
8199 
8200 	ret = hclge_set_app_loopback(hdev, false);
8201 	if (ret)
8202 		return ret;
8203 
8204 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8205 	if (ret)
8206 		return ret;
8207 
8208 	return hclge_cfg_common_loopback(hdev, false,
8209 					 HNAE3_LOOP_PARALLEL_SERDES);
8210 }
8211 
8212 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8213 {
8214 	struct hclge_vport *vport = hclge_get_vport(handle);
8215 	struct hnae3_knic_private_info *kinfo;
8216 	struct hnae3_queue *queue;
8217 	struct hclge_tqp *tqp;
8218 	int i;
8219 
8220 	kinfo = &vport->nic.kinfo;
8221 	for (i = 0; i < kinfo->num_tqps; i++) {
8222 		queue = handle->kinfo.tqp[i];
8223 		tqp = container_of(queue, struct hclge_tqp, q);
8224 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8225 	}
8226 }
8227 
8228 static void hclge_flush_link_update(struct hclge_dev *hdev)
8229 {
8230 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8231 
8232 	unsigned long last = hdev->serv_processed_cnt;
8233 	int i = 0;
8234 
8235 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8236 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8237 	       last == hdev->serv_processed_cnt)
8238 		usleep_range(1, 1);
8239 }
8240 
8241 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8242 {
8243 	struct hclge_vport *vport = hclge_get_vport(handle);
8244 	struct hclge_dev *hdev = vport->back;
8245 
8246 	if (enable) {
8247 		hclge_task_schedule(hdev, 0);
8248 	} else {
8249 		/* Set the DOWN flag here to disable link updating */
8250 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8251 
8252 		/* flush memory to make sure DOWN is seen by service task */
8253 		smp_mb__before_atomic();
8254 		hclge_flush_link_update(hdev);
8255 	}
8256 }
8257 
8258 static int hclge_ae_start(struct hnae3_handle *handle)
8259 {
8260 	struct hclge_vport *vport = hclge_get_vport(handle);
8261 	struct hclge_dev *hdev = vport->back;
8262 
8263 	/* mac enable */
8264 	hclge_cfg_mac_mode(hdev, true);
8265 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8266 	hdev->hw.mac.link = 0;
8267 
8268 	/* reset tqp stats */
8269 	hclge_reset_tqp_stats(handle);
8270 
8271 	hclge_mac_start_phy(hdev);
8272 
8273 	return 0;
8274 }
8275 
8276 static void hclge_ae_stop(struct hnae3_handle *handle)
8277 {
8278 	struct hclge_vport *vport = hclge_get_vport(handle);
8279 	struct hclge_dev *hdev = vport->back;
8280 
8281 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8282 	spin_lock_bh(&hdev->fd_rule_lock);
8283 	hclge_clear_arfs_rules(hdev);
8284 	spin_unlock_bh(&hdev->fd_rule_lock);
8285 
8286 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
8287 	 * so it only need to stop phy here.
8288 	 */
8289 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8290 	    hdev->reset_type != HNAE3_FUNC_RESET &&
8291 	    hdev->reset_type != HNAE3_FLR_RESET) {
8292 		hclge_mac_stop_phy(hdev);
8293 		hclge_update_link_status(hdev);
8294 		return;
8295 	}
8296 
8297 	hclge_reset_tqp(handle);
8298 
8299 	hclge_config_mac_tnl_int(hdev, false);
8300 
8301 	/* Mac disable */
8302 	hclge_cfg_mac_mode(hdev, false);
8303 
8304 	hclge_mac_stop_phy(hdev);
8305 
8306 	/* reset tqp stats */
8307 	hclge_reset_tqp_stats(handle);
8308 	hclge_update_link_status(hdev);
8309 }
8310 
8311 int hclge_vport_start(struct hclge_vport *vport)
8312 {
8313 	struct hclge_dev *hdev = vport->back;
8314 
8315 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8316 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8317 	vport->last_active_jiffies = jiffies;
8318 
8319 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8320 		if (vport->vport_id) {
8321 			hclge_restore_mac_table_common(vport);
8322 			hclge_restore_vport_vlan_table(vport);
8323 		} else {
8324 			hclge_restore_hw_table(hdev);
8325 		}
8326 	}
8327 
8328 	clear_bit(vport->vport_id, hdev->vport_config_block);
8329 
8330 	return 0;
8331 }
8332 
8333 void hclge_vport_stop(struct hclge_vport *vport)
8334 {
8335 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8336 }
8337 
8338 static int hclge_client_start(struct hnae3_handle *handle)
8339 {
8340 	struct hclge_vport *vport = hclge_get_vport(handle);
8341 
8342 	return hclge_vport_start(vport);
8343 }
8344 
8345 static void hclge_client_stop(struct hnae3_handle *handle)
8346 {
8347 	struct hclge_vport *vport = hclge_get_vport(handle);
8348 
8349 	hclge_vport_stop(vport);
8350 }
8351 
8352 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8353 					 u16 cmdq_resp, u8  resp_code,
8354 					 enum hclge_mac_vlan_tbl_opcode op)
8355 {
8356 	struct hclge_dev *hdev = vport->back;
8357 
8358 	if (cmdq_resp) {
8359 		dev_err(&hdev->pdev->dev,
8360 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8361 			cmdq_resp);
8362 		return -EIO;
8363 	}
8364 
8365 	if (op == HCLGE_MAC_VLAN_ADD) {
8366 		if (!resp_code || resp_code == 1)
8367 			return 0;
8368 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8369 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8370 			return -ENOSPC;
8371 
8372 		dev_err(&hdev->pdev->dev,
8373 			"add mac addr failed for undefined, code=%u.\n",
8374 			resp_code);
8375 		return -EIO;
8376 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8377 		if (!resp_code) {
8378 			return 0;
8379 		} else if (resp_code == 1) {
8380 			dev_dbg(&hdev->pdev->dev,
8381 				"remove mac addr failed for miss.\n");
8382 			return -ENOENT;
8383 		}
8384 
8385 		dev_err(&hdev->pdev->dev,
8386 			"remove mac addr failed for undefined, code=%u.\n",
8387 			resp_code);
8388 		return -EIO;
8389 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8390 		if (!resp_code) {
8391 			return 0;
8392 		} else if (resp_code == 1) {
8393 			dev_dbg(&hdev->pdev->dev,
8394 				"lookup mac addr failed for miss.\n");
8395 			return -ENOENT;
8396 		}
8397 
8398 		dev_err(&hdev->pdev->dev,
8399 			"lookup mac addr failed for undefined, code=%u.\n",
8400 			resp_code);
8401 		return -EIO;
8402 	}
8403 
8404 	dev_err(&hdev->pdev->dev,
8405 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8406 
8407 	return -EINVAL;
8408 }
8409 
8410 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8411 {
8412 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8413 
8414 	unsigned int word_num;
8415 	unsigned int bit_num;
8416 
8417 	if (vfid > 255 || vfid < 0)
8418 		return -EIO;
8419 
8420 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8421 		word_num = vfid / 32;
8422 		bit_num  = vfid % 32;
8423 		if (clr)
8424 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8425 		else
8426 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8427 	} else {
8428 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8429 		bit_num  = vfid % 32;
8430 		if (clr)
8431 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8432 		else
8433 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8434 	}
8435 
8436 	return 0;
8437 }
8438 
8439 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8440 {
8441 #define HCLGE_DESC_NUMBER 3
8442 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8443 	int i, j;
8444 
8445 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8446 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8447 			if (desc[i].data[j])
8448 				return false;
8449 
8450 	return true;
8451 }
8452 
8453 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8454 				   const u8 *addr, bool is_mc)
8455 {
8456 	const unsigned char *mac_addr = addr;
8457 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8458 		       (mac_addr[0]) | (mac_addr[1] << 8);
8459 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8460 
8461 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8462 	if (is_mc) {
8463 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8464 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8465 	}
8466 
8467 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8468 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8469 }
8470 
8471 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8472 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8473 {
8474 	struct hclge_dev *hdev = vport->back;
8475 	struct hclge_desc desc;
8476 	u8 resp_code;
8477 	u16 retval;
8478 	int ret;
8479 
8480 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8481 
8482 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8483 
8484 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8485 	if (ret) {
8486 		dev_err(&hdev->pdev->dev,
8487 			"del mac addr failed for cmd_send, ret =%d.\n",
8488 			ret);
8489 		return ret;
8490 	}
8491 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8492 	retval = le16_to_cpu(desc.retval);
8493 
8494 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8495 					     HCLGE_MAC_VLAN_REMOVE);
8496 }
8497 
8498 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8499 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8500 				     struct hclge_desc *desc,
8501 				     bool is_mc)
8502 {
8503 	struct hclge_dev *hdev = vport->back;
8504 	u8 resp_code;
8505 	u16 retval;
8506 	int ret;
8507 
8508 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8509 	if (is_mc) {
8510 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8511 		memcpy(desc[0].data,
8512 		       req,
8513 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8514 		hclge_cmd_setup_basic_desc(&desc[1],
8515 					   HCLGE_OPC_MAC_VLAN_ADD,
8516 					   true);
8517 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8518 		hclge_cmd_setup_basic_desc(&desc[2],
8519 					   HCLGE_OPC_MAC_VLAN_ADD,
8520 					   true);
8521 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8522 	} else {
8523 		memcpy(desc[0].data,
8524 		       req,
8525 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8526 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8527 	}
8528 	if (ret) {
8529 		dev_err(&hdev->pdev->dev,
8530 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8531 			ret);
8532 		return ret;
8533 	}
8534 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8535 	retval = le16_to_cpu(desc[0].retval);
8536 
8537 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8538 					     HCLGE_MAC_VLAN_LKUP);
8539 }
8540 
8541 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8542 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8543 				  struct hclge_desc *mc_desc)
8544 {
8545 	struct hclge_dev *hdev = vport->back;
8546 	int cfg_status;
8547 	u8 resp_code;
8548 	u16 retval;
8549 	int ret;
8550 
8551 	if (!mc_desc) {
8552 		struct hclge_desc desc;
8553 
8554 		hclge_cmd_setup_basic_desc(&desc,
8555 					   HCLGE_OPC_MAC_VLAN_ADD,
8556 					   false);
8557 		memcpy(desc.data, req,
8558 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8559 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8560 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8561 		retval = le16_to_cpu(desc.retval);
8562 
8563 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8564 							   resp_code,
8565 							   HCLGE_MAC_VLAN_ADD);
8566 	} else {
8567 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8568 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8569 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8570 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8571 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8572 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8573 		memcpy(mc_desc[0].data, req,
8574 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8575 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8576 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8577 		retval = le16_to_cpu(mc_desc[0].retval);
8578 
8579 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8580 							   resp_code,
8581 							   HCLGE_MAC_VLAN_ADD);
8582 	}
8583 
8584 	if (ret) {
8585 		dev_err(&hdev->pdev->dev,
8586 			"add mac addr failed for cmd_send, ret =%d.\n",
8587 			ret);
8588 		return ret;
8589 	}
8590 
8591 	return cfg_status;
8592 }
8593 
8594 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8595 			       u16 *allocated_size)
8596 {
8597 	struct hclge_umv_spc_alc_cmd *req;
8598 	struct hclge_desc desc;
8599 	int ret;
8600 
8601 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8602 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8603 
8604 	req->space_size = cpu_to_le32(space_size);
8605 
8606 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8607 	if (ret) {
8608 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8609 			ret);
8610 		return ret;
8611 	}
8612 
8613 	*allocated_size = le32_to_cpu(desc.data[1]);
8614 
8615 	return 0;
8616 }
8617 
8618 static int hclge_init_umv_space(struct hclge_dev *hdev)
8619 {
8620 	u16 allocated_size = 0;
8621 	int ret;
8622 
8623 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8624 	if (ret)
8625 		return ret;
8626 
8627 	if (allocated_size < hdev->wanted_umv_size)
8628 		dev_warn(&hdev->pdev->dev,
8629 			 "failed to alloc umv space, want %u, get %u\n",
8630 			 hdev->wanted_umv_size, allocated_size);
8631 
8632 	hdev->max_umv_size = allocated_size;
8633 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8634 	hdev->share_umv_size = hdev->priv_umv_size +
8635 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8636 
8637 	if (hdev->ae_dev->dev_specs.mc_mac_size)
8638 		set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8639 
8640 	return 0;
8641 }
8642 
8643 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8644 {
8645 	struct hclge_vport *vport;
8646 	int i;
8647 
8648 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8649 		vport = &hdev->vport[i];
8650 		vport->used_umv_num = 0;
8651 	}
8652 
8653 	mutex_lock(&hdev->vport_lock);
8654 	hdev->share_umv_size = hdev->priv_umv_size +
8655 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8656 	mutex_unlock(&hdev->vport_lock);
8657 
8658 	hdev->used_mc_mac_num = 0;
8659 }
8660 
8661 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8662 {
8663 	struct hclge_dev *hdev = vport->back;
8664 	bool is_full;
8665 
8666 	if (need_lock)
8667 		mutex_lock(&hdev->vport_lock);
8668 
8669 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8670 		   hdev->share_umv_size == 0);
8671 
8672 	if (need_lock)
8673 		mutex_unlock(&hdev->vport_lock);
8674 
8675 	return is_full;
8676 }
8677 
8678 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8679 {
8680 	struct hclge_dev *hdev = vport->back;
8681 
8682 	if (is_free) {
8683 		if (vport->used_umv_num > hdev->priv_umv_size)
8684 			hdev->share_umv_size++;
8685 
8686 		if (vport->used_umv_num > 0)
8687 			vport->used_umv_num--;
8688 	} else {
8689 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8690 		    hdev->share_umv_size > 0)
8691 			hdev->share_umv_size--;
8692 		vport->used_umv_num++;
8693 	}
8694 }
8695 
8696 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8697 						  const u8 *mac_addr)
8698 {
8699 	struct hclge_mac_node *mac_node, *tmp;
8700 
8701 	list_for_each_entry_safe(mac_node, tmp, list, node)
8702 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8703 			return mac_node;
8704 
8705 	return NULL;
8706 }
8707 
8708 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8709 				  enum HCLGE_MAC_NODE_STATE state)
8710 {
8711 	switch (state) {
8712 	/* from set_rx_mode or tmp_add_list */
8713 	case HCLGE_MAC_TO_ADD:
8714 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8715 			mac_node->state = HCLGE_MAC_ACTIVE;
8716 		break;
8717 	/* only from set_rx_mode */
8718 	case HCLGE_MAC_TO_DEL:
8719 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8720 			list_del(&mac_node->node);
8721 			kfree(mac_node);
8722 		} else {
8723 			mac_node->state = HCLGE_MAC_TO_DEL;
8724 		}
8725 		break;
8726 	/* only from tmp_add_list, the mac_node->state won't be
8727 	 * ACTIVE.
8728 	 */
8729 	case HCLGE_MAC_ACTIVE:
8730 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8731 			mac_node->state = HCLGE_MAC_ACTIVE;
8732 
8733 		break;
8734 	}
8735 }
8736 
8737 int hclge_update_mac_list(struct hclge_vport *vport,
8738 			  enum HCLGE_MAC_NODE_STATE state,
8739 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8740 			  const unsigned char *addr)
8741 {
8742 	struct hclge_dev *hdev = vport->back;
8743 	struct hclge_mac_node *mac_node;
8744 	struct list_head *list;
8745 
8746 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8747 		&vport->uc_mac_list : &vport->mc_mac_list;
8748 
8749 	spin_lock_bh(&vport->mac_list_lock);
8750 
8751 	/* if the mac addr is already in the mac list, no need to add a new
8752 	 * one into it, just check the mac addr state, convert it to a new
8753 	 * state, or just remove it, or do nothing.
8754 	 */
8755 	mac_node = hclge_find_mac_node(list, addr);
8756 	if (mac_node) {
8757 		hclge_update_mac_node(mac_node, state);
8758 		spin_unlock_bh(&vport->mac_list_lock);
8759 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8760 		return 0;
8761 	}
8762 
8763 	/* if this address is never added, unnecessary to delete */
8764 	if (state == HCLGE_MAC_TO_DEL) {
8765 		spin_unlock_bh(&vport->mac_list_lock);
8766 		dev_err(&hdev->pdev->dev,
8767 			"failed to delete address %pM from mac list\n",
8768 			addr);
8769 		return -ENOENT;
8770 	}
8771 
8772 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8773 	if (!mac_node) {
8774 		spin_unlock_bh(&vport->mac_list_lock);
8775 		return -ENOMEM;
8776 	}
8777 
8778 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8779 
8780 	mac_node->state = state;
8781 	ether_addr_copy(mac_node->mac_addr, addr);
8782 	list_add_tail(&mac_node->node, list);
8783 
8784 	spin_unlock_bh(&vport->mac_list_lock);
8785 
8786 	return 0;
8787 }
8788 
8789 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8790 			     const unsigned char *addr)
8791 {
8792 	struct hclge_vport *vport = hclge_get_vport(handle);
8793 
8794 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8795 				     addr);
8796 }
8797 
8798 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8799 			     const unsigned char *addr)
8800 {
8801 	struct hclge_dev *hdev = vport->back;
8802 	struct hclge_mac_vlan_tbl_entry_cmd req;
8803 	struct hclge_desc desc;
8804 	u16 egress_port = 0;
8805 	int ret;
8806 
8807 	/* mac addr check */
8808 	if (is_zero_ether_addr(addr) ||
8809 	    is_broadcast_ether_addr(addr) ||
8810 	    is_multicast_ether_addr(addr)) {
8811 		dev_err(&hdev->pdev->dev,
8812 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8813 			 addr, is_zero_ether_addr(addr),
8814 			 is_broadcast_ether_addr(addr),
8815 			 is_multicast_ether_addr(addr));
8816 		return -EINVAL;
8817 	}
8818 
8819 	memset(&req, 0, sizeof(req));
8820 
8821 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8822 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8823 
8824 	req.egress_port = cpu_to_le16(egress_port);
8825 
8826 	hclge_prepare_mac_addr(&req, addr, false);
8827 
8828 	/* Lookup the mac address in the mac_vlan table, and add
8829 	 * it if the entry is inexistent. Repeated unicast entry
8830 	 * is not allowed in the mac vlan table.
8831 	 */
8832 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8833 	if (ret == -ENOENT) {
8834 		mutex_lock(&hdev->vport_lock);
8835 		if (!hclge_is_umv_space_full(vport, false)) {
8836 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8837 			if (!ret)
8838 				hclge_update_umv_space(vport, false);
8839 			mutex_unlock(&hdev->vport_lock);
8840 			return ret;
8841 		}
8842 		mutex_unlock(&hdev->vport_lock);
8843 
8844 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8845 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8846 				hdev->priv_umv_size);
8847 
8848 		return -ENOSPC;
8849 	}
8850 
8851 	/* check if we just hit the duplicate */
8852 	if (!ret)
8853 		return -EEXIST;
8854 
8855 	return ret;
8856 }
8857 
8858 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8859 			    const unsigned char *addr)
8860 {
8861 	struct hclge_vport *vport = hclge_get_vport(handle);
8862 
8863 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8864 				     addr);
8865 }
8866 
8867 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8868 			    const unsigned char *addr)
8869 {
8870 	struct hclge_dev *hdev = vport->back;
8871 	struct hclge_mac_vlan_tbl_entry_cmd req;
8872 	int ret;
8873 
8874 	/* mac addr check */
8875 	if (is_zero_ether_addr(addr) ||
8876 	    is_broadcast_ether_addr(addr) ||
8877 	    is_multicast_ether_addr(addr)) {
8878 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8879 			addr);
8880 		return -EINVAL;
8881 	}
8882 
8883 	memset(&req, 0, sizeof(req));
8884 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8885 	hclge_prepare_mac_addr(&req, addr, false);
8886 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8887 	if (!ret) {
8888 		mutex_lock(&hdev->vport_lock);
8889 		hclge_update_umv_space(vport, true);
8890 		mutex_unlock(&hdev->vport_lock);
8891 	} else if (ret == -ENOENT) {
8892 		ret = 0;
8893 	}
8894 
8895 	return ret;
8896 }
8897 
8898 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8899 			     const unsigned char *addr)
8900 {
8901 	struct hclge_vport *vport = hclge_get_vport(handle);
8902 
8903 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8904 				     addr);
8905 }
8906 
8907 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8908 			     const unsigned char *addr)
8909 {
8910 	struct hclge_dev *hdev = vport->back;
8911 	struct hclge_mac_vlan_tbl_entry_cmd req;
8912 	struct hclge_desc desc[3];
8913 	bool is_new_addr = false;
8914 	int status;
8915 
8916 	/* mac addr check */
8917 	if (!is_multicast_ether_addr(addr)) {
8918 		dev_err(&hdev->pdev->dev,
8919 			"Add mc mac err! invalid mac:%pM.\n",
8920 			 addr);
8921 		return -EINVAL;
8922 	}
8923 	memset(&req, 0, sizeof(req));
8924 	hclge_prepare_mac_addr(&req, addr, true);
8925 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8926 	if (status) {
8927 		if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8928 		    hdev->used_mc_mac_num >=
8929 		    hdev->ae_dev->dev_specs.mc_mac_size)
8930 			goto err_no_space;
8931 
8932 		is_new_addr = true;
8933 
8934 		/* This mac addr do not exist, add new entry for it */
8935 		memset(desc[0].data, 0, sizeof(desc[0].data));
8936 		memset(desc[1].data, 0, sizeof(desc[0].data));
8937 		memset(desc[2].data, 0, sizeof(desc[0].data));
8938 	}
8939 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8940 	if (status)
8941 		return status;
8942 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8943 	if (status == -ENOSPC)
8944 		goto err_no_space;
8945 	else if (!status && is_new_addr)
8946 		hdev->used_mc_mac_num++;
8947 
8948 	return status;
8949 
8950 err_no_space:
8951 	/* if already overflow, not to print each time */
8952 	if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8953 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8954 	return -ENOSPC;
8955 }
8956 
8957 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8958 			    const unsigned char *addr)
8959 {
8960 	struct hclge_vport *vport = hclge_get_vport(handle);
8961 
8962 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8963 				     addr);
8964 }
8965 
8966 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8967 			    const unsigned char *addr)
8968 {
8969 	struct hclge_dev *hdev = vport->back;
8970 	struct hclge_mac_vlan_tbl_entry_cmd req;
8971 	enum hclge_cmd_status status;
8972 	struct hclge_desc desc[3];
8973 
8974 	/* mac addr check */
8975 	if (!is_multicast_ether_addr(addr)) {
8976 		dev_dbg(&hdev->pdev->dev,
8977 			"Remove mc mac err! invalid mac:%pM.\n",
8978 			 addr);
8979 		return -EINVAL;
8980 	}
8981 
8982 	memset(&req, 0, sizeof(req));
8983 	hclge_prepare_mac_addr(&req, addr, true);
8984 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8985 	if (!status) {
8986 		/* This mac addr exist, remove this handle's VFID for it */
8987 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8988 		if (status)
8989 			return status;
8990 
8991 		if (hclge_is_all_function_id_zero(desc)) {
8992 			/* All the vfid is zero, so need to delete this entry */
8993 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8994 			if (!status)
8995 				hdev->used_mc_mac_num--;
8996 		} else {
8997 			/* Not all the vfid is zero, update the vfid */
8998 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8999 		}
9000 	} else if (status == -ENOENT) {
9001 		status = 0;
9002 	}
9003 
9004 	return status;
9005 }
9006 
9007 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
9008 				      struct list_head *list,
9009 				      int (*sync)(struct hclge_vport *,
9010 						  const unsigned char *))
9011 {
9012 	struct hclge_mac_node *mac_node, *tmp;
9013 	int ret;
9014 
9015 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9016 		ret = sync(vport, mac_node->mac_addr);
9017 		if (!ret) {
9018 			mac_node->state = HCLGE_MAC_ACTIVE;
9019 		} else {
9020 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
9021 				&vport->state);
9022 
9023 			/* If one unicast mac address is existing in hardware,
9024 			 * we need to try whether other unicast mac addresses
9025 			 * are new addresses that can be added.
9026 			 */
9027 			if (ret != -EEXIST)
9028 				break;
9029 		}
9030 	}
9031 }
9032 
9033 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
9034 					struct list_head *list,
9035 					int (*unsync)(struct hclge_vport *,
9036 						      const unsigned char *))
9037 {
9038 	struct hclge_mac_node *mac_node, *tmp;
9039 	int ret;
9040 
9041 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9042 		ret = unsync(vport, mac_node->mac_addr);
9043 		if (!ret || ret == -ENOENT) {
9044 			list_del(&mac_node->node);
9045 			kfree(mac_node);
9046 		} else {
9047 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
9048 				&vport->state);
9049 			break;
9050 		}
9051 	}
9052 }
9053 
9054 static bool hclge_sync_from_add_list(struct list_head *add_list,
9055 				     struct list_head *mac_list)
9056 {
9057 	struct hclge_mac_node *mac_node, *tmp, *new_node;
9058 	bool all_added = true;
9059 
9060 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
9061 		if (mac_node->state == HCLGE_MAC_TO_ADD)
9062 			all_added = false;
9063 
9064 		/* if the mac address from tmp_add_list is not in the
9065 		 * uc/mc_mac_list, it means have received a TO_DEL request
9066 		 * during the time window of adding the mac address into mac
9067 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
9068 		 * then it will be removed at next time. else it must be TO_ADD,
9069 		 * this address hasn't been added into mac table,
9070 		 * so just remove the mac node.
9071 		 */
9072 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
9073 		if (new_node) {
9074 			hclge_update_mac_node(new_node, mac_node->state);
9075 			list_del(&mac_node->node);
9076 			kfree(mac_node);
9077 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
9078 			mac_node->state = HCLGE_MAC_TO_DEL;
9079 			list_move_tail(&mac_node->node, mac_list);
9080 		} else {
9081 			list_del(&mac_node->node);
9082 			kfree(mac_node);
9083 		}
9084 	}
9085 
9086 	return all_added;
9087 }
9088 
9089 static void hclge_sync_from_del_list(struct list_head *del_list,
9090 				     struct list_head *mac_list)
9091 {
9092 	struct hclge_mac_node *mac_node, *tmp, *new_node;
9093 
9094 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
9095 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
9096 		if (new_node) {
9097 			/* If the mac addr exists in the mac list, it means
9098 			 * received a new TO_ADD request during the time window
9099 			 * of configuring the mac address. For the mac node
9100 			 * state is TO_ADD, and the address is already in the
9101 			 * in the hardware(due to delete fail), so we just need
9102 			 * to change the mac node state to ACTIVE.
9103 			 */
9104 			new_node->state = HCLGE_MAC_ACTIVE;
9105 			list_del(&mac_node->node);
9106 			kfree(mac_node);
9107 		} else {
9108 			list_move_tail(&mac_node->node, mac_list);
9109 		}
9110 	}
9111 }
9112 
9113 static void hclge_update_overflow_flags(struct hclge_vport *vport,
9114 					enum HCLGE_MAC_ADDR_TYPE mac_type,
9115 					bool is_all_added)
9116 {
9117 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9118 		if (is_all_added)
9119 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
9120 		else
9121 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
9122 	} else {
9123 		if (is_all_added)
9124 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
9125 		else
9126 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
9127 	}
9128 }
9129 
9130 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
9131 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
9132 {
9133 	struct hclge_mac_node *mac_node, *tmp, *new_node;
9134 	struct list_head tmp_add_list, tmp_del_list;
9135 	struct list_head *list;
9136 	bool all_added;
9137 
9138 	INIT_LIST_HEAD(&tmp_add_list);
9139 	INIT_LIST_HEAD(&tmp_del_list);
9140 
9141 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
9142 	 * we can add/delete these mac addr outside the spin lock
9143 	 */
9144 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9145 		&vport->uc_mac_list : &vport->mc_mac_list;
9146 
9147 	spin_lock_bh(&vport->mac_list_lock);
9148 
9149 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9150 		switch (mac_node->state) {
9151 		case HCLGE_MAC_TO_DEL:
9152 			list_move_tail(&mac_node->node, &tmp_del_list);
9153 			break;
9154 		case HCLGE_MAC_TO_ADD:
9155 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9156 			if (!new_node)
9157 				goto stop_traverse;
9158 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9159 			new_node->state = mac_node->state;
9160 			list_add_tail(&new_node->node, &tmp_add_list);
9161 			break;
9162 		default:
9163 			break;
9164 		}
9165 	}
9166 
9167 stop_traverse:
9168 	spin_unlock_bh(&vport->mac_list_lock);
9169 
9170 	/* delete first, in order to get max mac table space for adding */
9171 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9172 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9173 					    hclge_rm_uc_addr_common);
9174 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
9175 					  hclge_add_uc_addr_common);
9176 	} else {
9177 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9178 					    hclge_rm_mc_addr_common);
9179 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
9180 					  hclge_add_mc_addr_common);
9181 	}
9182 
9183 	/* if some mac addresses were added/deleted fail, move back to the
9184 	 * mac_list, and retry at next time.
9185 	 */
9186 	spin_lock_bh(&vport->mac_list_lock);
9187 
9188 	hclge_sync_from_del_list(&tmp_del_list, list);
9189 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9190 
9191 	spin_unlock_bh(&vport->mac_list_lock);
9192 
9193 	hclge_update_overflow_flags(vport, mac_type, all_added);
9194 }
9195 
9196 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9197 {
9198 	struct hclge_dev *hdev = vport->back;
9199 
9200 	if (test_bit(vport->vport_id, hdev->vport_config_block))
9201 		return false;
9202 
9203 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9204 		return true;
9205 
9206 	return false;
9207 }
9208 
9209 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9210 {
9211 	int i;
9212 
9213 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9214 		struct hclge_vport *vport = &hdev->vport[i];
9215 
9216 		if (!hclge_need_sync_mac_table(vport))
9217 			continue;
9218 
9219 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9220 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9221 	}
9222 }
9223 
9224 static void hclge_build_del_list(struct list_head *list,
9225 				 bool is_del_list,
9226 				 struct list_head *tmp_del_list)
9227 {
9228 	struct hclge_mac_node *mac_cfg, *tmp;
9229 
9230 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9231 		switch (mac_cfg->state) {
9232 		case HCLGE_MAC_TO_DEL:
9233 		case HCLGE_MAC_ACTIVE:
9234 			list_move_tail(&mac_cfg->node, tmp_del_list);
9235 			break;
9236 		case HCLGE_MAC_TO_ADD:
9237 			if (is_del_list) {
9238 				list_del(&mac_cfg->node);
9239 				kfree(mac_cfg);
9240 			}
9241 			break;
9242 		}
9243 	}
9244 }
9245 
9246 static void hclge_unsync_del_list(struct hclge_vport *vport,
9247 				  int (*unsync)(struct hclge_vport *vport,
9248 						const unsigned char *addr),
9249 				  bool is_del_list,
9250 				  struct list_head *tmp_del_list)
9251 {
9252 	struct hclge_mac_node *mac_cfg, *tmp;
9253 	int ret;
9254 
9255 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9256 		ret = unsync(vport, mac_cfg->mac_addr);
9257 		if (!ret || ret == -ENOENT) {
9258 			/* clear all mac addr from hardware, but remain these
9259 			 * mac addr in the mac list, and restore them after
9260 			 * vf reset finished.
9261 			 */
9262 			if (!is_del_list &&
9263 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9264 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9265 			} else {
9266 				list_del(&mac_cfg->node);
9267 				kfree(mac_cfg);
9268 			}
9269 		} else if (is_del_list) {
9270 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9271 		}
9272 	}
9273 }
9274 
9275 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9276 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9277 {
9278 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9279 	struct hclge_dev *hdev = vport->back;
9280 	struct list_head tmp_del_list, *list;
9281 
9282 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9283 		list = &vport->uc_mac_list;
9284 		unsync = hclge_rm_uc_addr_common;
9285 	} else {
9286 		list = &vport->mc_mac_list;
9287 		unsync = hclge_rm_mc_addr_common;
9288 	}
9289 
9290 	INIT_LIST_HEAD(&tmp_del_list);
9291 
9292 	if (!is_del_list)
9293 		set_bit(vport->vport_id, hdev->vport_config_block);
9294 
9295 	spin_lock_bh(&vport->mac_list_lock);
9296 
9297 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9298 
9299 	spin_unlock_bh(&vport->mac_list_lock);
9300 
9301 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9302 
9303 	spin_lock_bh(&vport->mac_list_lock);
9304 
9305 	hclge_sync_from_del_list(&tmp_del_list, list);
9306 
9307 	spin_unlock_bh(&vport->mac_list_lock);
9308 }
9309 
9310 /* remove all mac address when uninitailize */
9311 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9312 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9313 {
9314 	struct hclge_mac_node *mac_node, *tmp;
9315 	struct hclge_dev *hdev = vport->back;
9316 	struct list_head tmp_del_list, *list;
9317 
9318 	INIT_LIST_HEAD(&tmp_del_list);
9319 
9320 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9321 		&vport->uc_mac_list : &vport->mc_mac_list;
9322 
9323 	spin_lock_bh(&vport->mac_list_lock);
9324 
9325 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9326 		switch (mac_node->state) {
9327 		case HCLGE_MAC_TO_DEL:
9328 		case HCLGE_MAC_ACTIVE:
9329 			list_move_tail(&mac_node->node, &tmp_del_list);
9330 			break;
9331 		case HCLGE_MAC_TO_ADD:
9332 			list_del(&mac_node->node);
9333 			kfree(mac_node);
9334 			break;
9335 		}
9336 	}
9337 
9338 	spin_unlock_bh(&vport->mac_list_lock);
9339 
9340 	if (mac_type == HCLGE_MAC_ADDR_UC)
9341 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9342 					    hclge_rm_uc_addr_common);
9343 	else
9344 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9345 					    hclge_rm_mc_addr_common);
9346 
9347 	if (!list_empty(&tmp_del_list))
9348 		dev_warn(&hdev->pdev->dev,
9349 			 "uninit %s mac list for vport %u not completely.\n",
9350 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9351 			 vport->vport_id);
9352 
9353 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9354 		list_del(&mac_node->node);
9355 		kfree(mac_node);
9356 	}
9357 }
9358 
9359 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9360 {
9361 	struct hclge_vport *vport;
9362 	int i;
9363 
9364 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9365 		vport = &hdev->vport[i];
9366 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9367 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9368 	}
9369 }
9370 
9371 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9372 					      u16 cmdq_resp, u8 resp_code)
9373 {
9374 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9375 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9376 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9377 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9378 
9379 	int return_status;
9380 
9381 	if (cmdq_resp) {
9382 		dev_err(&hdev->pdev->dev,
9383 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9384 			cmdq_resp);
9385 		return -EIO;
9386 	}
9387 
9388 	switch (resp_code) {
9389 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9390 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9391 		return_status = 0;
9392 		break;
9393 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9394 		dev_err(&hdev->pdev->dev,
9395 			"add mac ethertype failed for manager table overflow.\n");
9396 		return_status = -EIO;
9397 		break;
9398 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9399 		dev_err(&hdev->pdev->dev,
9400 			"add mac ethertype failed for key conflict.\n");
9401 		return_status = -EIO;
9402 		break;
9403 	default:
9404 		dev_err(&hdev->pdev->dev,
9405 			"add mac ethertype failed for undefined, code=%u.\n",
9406 			resp_code);
9407 		return_status = -EIO;
9408 	}
9409 
9410 	return return_status;
9411 }
9412 
9413 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9414 				     u8 *mac_addr)
9415 {
9416 	struct hclge_mac_vlan_tbl_entry_cmd req;
9417 	struct hclge_dev *hdev = vport->back;
9418 	struct hclge_desc desc;
9419 	u16 egress_port = 0;
9420 	int i;
9421 
9422 	if (is_zero_ether_addr(mac_addr))
9423 		return false;
9424 
9425 	memset(&req, 0, sizeof(req));
9426 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9427 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9428 	req.egress_port = cpu_to_le16(egress_port);
9429 	hclge_prepare_mac_addr(&req, mac_addr, false);
9430 
9431 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9432 		return true;
9433 
9434 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9435 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9436 		if (i != vf_idx &&
9437 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9438 			return true;
9439 
9440 	return false;
9441 }
9442 
9443 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9444 			    u8 *mac_addr)
9445 {
9446 	struct hclge_vport *vport = hclge_get_vport(handle);
9447 	struct hclge_dev *hdev = vport->back;
9448 
9449 	vport = hclge_get_vf_vport(hdev, vf);
9450 	if (!vport)
9451 		return -EINVAL;
9452 
9453 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9454 		dev_info(&hdev->pdev->dev,
9455 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9456 			 mac_addr);
9457 		return 0;
9458 	}
9459 
9460 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9461 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9462 			mac_addr);
9463 		return -EEXIST;
9464 	}
9465 
9466 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9467 
9468 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9469 		dev_info(&hdev->pdev->dev,
9470 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9471 			 vf, mac_addr);
9472 		return hclge_inform_reset_assert_to_vf(vport);
9473 	}
9474 
9475 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9476 		 vf, mac_addr);
9477 	return 0;
9478 }
9479 
9480 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9481 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9482 {
9483 	struct hclge_desc desc;
9484 	u8 resp_code;
9485 	u16 retval;
9486 	int ret;
9487 
9488 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9489 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9490 
9491 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9492 	if (ret) {
9493 		dev_err(&hdev->pdev->dev,
9494 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9495 			ret);
9496 		return ret;
9497 	}
9498 
9499 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9500 	retval = le16_to_cpu(desc.retval);
9501 
9502 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9503 }
9504 
9505 static int init_mgr_tbl(struct hclge_dev *hdev)
9506 {
9507 	int ret;
9508 	int i;
9509 
9510 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9511 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9512 		if (ret) {
9513 			dev_err(&hdev->pdev->dev,
9514 				"add mac ethertype failed, ret =%d.\n",
9515 				ret);
9516 			return ret;
9517 		}
9518 	}
9519 
9520 	return 0;
9521 }
9522 
9523 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9524 {
9525 	struct hclge_vport *vport = hclge_get_vport(handle);
9526 	struct hclge_dev *hdev = vport->back;
9527 
9528 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9529 }
9530 
9531 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9532 				       const u8 *old_addr, const u8 *new_addr)
9533 {
9534 	struct list_head *list = &vport->uc_mac_list;
9535 	struct hclge_mac_node *old_node, *new_node;
9536 
9537 	new_node = hclge_find_mac_node(list, new_addr);
9538 	if (!new_node) {
9539 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9540 		if (!new_node)
9541 			return -ENOMEM;
9542 
9543 		new_node->state = HCLGE_MAC_TO_ADD;
9544 		ether_addr_copy(new_node->mac_addr, new_addr);
9545 		list_add(&new_node->node, list);
9546 	} else {
9547 		if (new_node->state == HCLGE_MAC_TO_DEL)
9548 			new_node->state = HCLGE_MAC_ACTIVE;
9549 
9550 		/* make sure the new addr is in the list head, avoid dev
9551 		 * addr may be not re-added into mac table for the umv space
9552 		 * limitation after global/imp reset which will clear mac
9553 		 * table by hardware.
9554 		 */
9555 		list_move(&new_node->node, list);
9556 	}
9557 
9558 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9559 		old_node = hclge_find_mac_node(list, old_addr);
9560 		if (old_node) {
9561 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9562 				list_del(&old_node->node);
9563 				kfree(old_node);
9564 			} else {
9565 				old_node->state = HCLGE_MAC_TO_DEL;
9566 			}
9567 		}
9568 	}
9569 
9570 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9571 
9572 	return 0;
9573 }
9574 
9575 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9576 			      bool is_first)
9577 {
9578 	const unsigned char *new_addr = (const unsigned char *)p;
9579 	struct hclge_vport *vport = hclge_get_vport(handle);
9580 	struct hclge_dev *hdev = vport->back;
9581 	unsigned char *old_addr = NULL;
9582 	int ret;
9583 
9584 	/* mac addr check */
9585 	if (is_zero_ether_addr(new_addr) ||
9586 	    is_broadcast_ether_addr(new_addr) ||
9587 	    is_multicast_ether_addr(new_addr)) {
9588 		dev_err(&hdev->pdev->dev,
9589 			"change uc mac err! invalid mac: %pM.\n",
9590 			 new_addr);
9591 		return -EINVAL;
9592 	}
9593 
9594 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9595 	if (ret) {
9596 		dev_err(&hdev->pdev->dev,
9597 			"failed to configure mac pause address, ret = %d\n",
9598 			ret);
9599 		return ret;
9600 	}
9601 
9602 	if (!is_first)
9603 		old_addr = hdev->hw.mac.mac_addr;
9604 
9605 	spin_lock_bh(&vport->mac_list_lock);
9606 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9607 	if (ret) {
9608 		dev_err(&hdev->pdev->dev,
9609 			"failed to change the mac addr:%pM, ret = %d\n",
9610 			new_addr, ret);
9611 		spin_unlock_bh(&vport->mac_list_lock);
9612 
9613 		if (!is_first)
9614 			hclge_pause_addr_cfg(hdev, old_addr);
9615 
9616 		return ret;
9617 	}
9618 	/* we must update dev addr with spin lock protect, preventing dev addr
9619 	 * being removed by set_rx_mode path.
9620 	 */
9621 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9622 	spin_unlock_bh(&vport->mac_list_lock);
9623 
9624 	hclge_task_schedule(hdev, 0);
9625 
9626 	return 0;
9627 }
9628 
9629 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9630 {
9631 	struct mii_ioctl_data *data = if_mii(ifr);
9632 
9633 	if (!hnae3_dev_phy_imp_supported(hdev))
9634 		return -EOPNOTSUPP;
9635 
9636 	switch (cmd) {
9637 	case SIOCGMIIPHY:
9638 		data->phy_id = hdev->hw.mac.phy_addr;
9639 		/* this command reads phy id and register at the same time */
9640 		fallthrough;
9641 	case SIOCGMIIREG:
9642 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9643 		return 0;
9644 
9645 	case SIOCSMIIREG:
9646 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9647 	default:
9648 		return -EOPNOTSUPP;
9649 	}
9650 }
9651 
9652 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9653 			  int cmd)
9654 {
9655 	struct hclge_vport *vport = hclge_get_vport(handle);
9656 	struct hclge_dev *hdev = vport->back;
9657 
9658 	switch (cmd) {
9659 	case SIOCGHWTSTAMP:
9660 		return hclge_ptp_get_cfg(hdev, ifr);
9661 	case SIOCSHWTSTAMP:
9662 		return hclge_ptp_set_cfg(hdev, ifr);
9663 	default:
9664 		if (!hdev->hw.mac.phydev)
9665 			return hclge_mii_ioctl(hdev, ifr, cmd);
9666 	}
9667 
9668 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9669 }
9670 
9671 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9672 					     bool bypass_en)
9673 {
9674 	struct hclge_port_vlan_filter_bypass_cmd *req;
9675 	struct hclge_desc desc;
9676 	int ret;
9677 
9678 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9679 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9680 	req->vf_id = vf_id;
9681 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9682 		      bypass_en ? 1 : 0);
9683 
9684 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9685 	if (ret)
9686 		dev_err(&hdev->pdev->dev,
9687 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9688 			vf_id, ret);
9689 
9690 	return ret;
9691 }
9692 
9693 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9694 				      u8 fe_type, bool filter_en, u8 vf_id)
9695 {
9696 	struct hclge_vlan_filter_ctrl_cmd *req;
9697 	struct hclge_desc desc;
9698 	int ret;
9699 
9700 	/* read current vlan filter parameter */
9701 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9702 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9703 	req->vlan_type = vlan_type;
9704 	req->vf_id = vf_id;
9705 
9706 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9707 	if (ret) {
9708 		dev_err(&hdev->pdev->dev,
9709 			"failed to get vlan filter config, ret = %d.\n", ret);
9710 		return ret;
9711 	}
9712 
9713 	/* modify and write new config parameter */
9714 	hclge_cmd_reuse_desc(&desc, false);
9715 	req->vlan_fe = filter_en ?
9716 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9717 
9718 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9719 	if (ret)
9720 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9721 			ret);
9722 
9723 	return ret;
9724 }
9725 
9726 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9727 {
9728 	struct hclge_dev *hdev = vport->back;
9729 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9730 	int ret;
9731 
9732 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9733 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9734 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9735 						  enable, vport->vport_id);
9736 
9737 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9738 					 HCLGE_FILTER_FE_EGRESS, enable,
9739 					 vport->vport_id);
9740 	if (ret)
9741 		return ret;
9742 
9743 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9744 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9745 							!enable);
9746 	} else if (!vport->vport_id) {
9747 		if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9748 			enable = false;
9749 
9750 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9751 						 HCLGE_FILTER_FE_INGRESS,
9752 						 enable, 0);
9753 	}
9754 
9755 	return ret;
9756 }
9757 
9758 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9759 {
9760 	struct hnae3_handle *handle = &vport->nic;
9761 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9762 	struct hclge_dev *hdev = vport->back;
9763 
9764 	if (vport->vport_id) {
9765 		if (vport->port_base_vlan_cfg.state !=
9766 			HNAE3_PORT_BASE_VLAN_DISABLE)
9767 			return true;
9768 
9769 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9770 			return false;
9771 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9772 		return false;
9773 	}
9774 
9775 	if (!vport->req_vlan_fltr_en)
9776 		return false;
9777 
9778 	/* compatible with former device, always enable vlan filter */
9779 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9780 		return true;
9781 
9782 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9783 		if (vlan->vlan_id != 0)
9784 			return true;
9785 
9786 	return false;
9787 }
9788 
9789 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9790 {
9791 	struct hclge_dev *hdev = vport->back;
9792 	bool need_en;
9793 	int ret;
9794 
9795 	mutex_lock(&hdev->vport_lock);
9796 
9797 	vport->req_vlan_fltr_en = request_en;
9798 
9799 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9800 	if (need_en == vport->cur_vlan_fltr_en) {
9801 		mutex_unlock(&hdev->vport_lock);
9802 		return 0;
9803 	}
9804 
9805 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9806 	if (ret) {
9807 		mutex_unlock(&hdev->vport_lock);
9808 		return ret;
9809 	}
9810 
9811 	vport->cur_vlan_fltr_en = need_en;
9812 
9813 	mutex_unlock(&hdev->vport_lock);
9814 
9815 	return 0;
9816 }
9817 
9818 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9819 {
9820 	struct hclge_vport *vport = hclge_get_vport(handle);
9821 
9822 	return hclge_enable_vport_vlan_filter(vport, enable);
9823 }
9824 
9825 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9826 					bool is_kill, u16 vlan,
9827 					struct hclge_desc *desc)
9828 {
9829 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9830 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9831 	u8 vf_byte_val;
9832 	u8 vf_byte_off;
9833 	int ret;
9834 
9835 	hclge_cmd_setup_basic_desc(&desc[0],
9836 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9837 	hclge_cmd_setup_basic_desc(&desc[1],
9838 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9839 
9840 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9841 
9842 	vf_byte_off = vfid / 8;
9843 	vf_byte_val = 1 << (vfid % 8);
9844 
9845 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9846 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9847 
9848 	req0->vlan_id  = cpu_to_le16(vlan);
9849 	req0->vlan_cfg = is_kill;
9850 
9851 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9852 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9853 	else
9854 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9855 
9856 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9857 	if (ret) {
9858 		dev_err(&hdev->pdev->dev,
9859 			"Send vf vlan command fail, ret =%d.\n",
9860 			ret);
9861 		return ret;
9862 	}
9863 
9864 	return 0;
9865 }
9866 
9867 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9868 					  bool is_kill, struct hclge_desc *desc)
9869 {
9870 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9871 
9872 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9873 
9874 	if (!is_kill) {
9875 #define HCLGE_VF_VLAN_NO_ENTRY	2
9876 		if (!req->resp_code || req->resp_code == 1)
9877 			return 0;
9878 
9879 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9880 			set_bit(vfid, hdev->vf_vlan_full);
9881 			dev_warn(&hdev->pdev->dev,
9882 				 "vf vlan table is full, vf vlan filter is disabled\n");
9883 			return 0;
9884 		}
9885 
9886 		dev_err(&hdev->pdev->dev,
9887 			"Add vf vlan filter fail, ret =%u.\n",
9888 			req->resp_code);
9889 	} else {
9890 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9891 		if (!req->resp_code)
9892 			return 0;
9893 
9894 		/* vf vlan filter is disabled when vf vlan table is full,
9895 		 * then new vlan id will not be added into vf vlan table.
9896 		 * Just return 0 without warning, avoid massive verbose
9897 		 * print logs when unload.
9898 		 */
9899 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9900 			return 0;
9901 
9902 		dev_err(&hdev->pdev->dev,
9903 			"Kill vf vlan filter fail, ret =%u.\n",
9904 			req->resp_code);
9905 	}
9906 
9907 	return -EIO;
9908 }
9909 
9910 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9911 				    bool is_kill, u16 vlan)
9912 {
9913 	struct hclge_vport *vport = &hdev->vport[vfid];
9914 	struct hclge_desc desc[2];
9915 	int ret;
9916 
9917 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9918 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9919 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9920 	 * new vlan, because tx packets with these vlan id will be dropped.
9921 	 */
9922 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9923 		if (vport->vf_info.spoofchk && vlan) {
9924 			dev_err(&hdev->pdev->dev,
9925 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9926 			return -EPERM;
9927 		}
9928 		return 0;
9929 	}
9930 
9931 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9932 	if (ret)
9933 		return ret;
9934 
9935 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9936 }
9937 
9938 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9939 				      u16 vlan_id, bool is_kill)
9940 {
9941 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9942 	struct hclge_desc desc;
9943 	u8 vlan_offset_byte_val;
9944 	u8 vlan_offset_byte;
9945 	u8 vlan_offset_160;
9946 	int ret;
9947 
9948 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9949 
9950 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9951 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9952 			   HCLGE_VLAN_BYTE_SIZE;
9953 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9954 
9955 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9956 	req->vlan_offset = vlan_offset_160;
9957 	req->vlan_cfg = is_kill;
9958 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9959 
9960 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9961 	if (ret)
9962 		dev_err(&hdev->pdev->dev,
9963 			"port vlan command, send fail, ret =%d.\n", ret);
9964 	return ret;
9965 }
9966 
9967 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9968 				    u16 vport_id, u16 vlan_id,
9969 				    bool is_kill)
9970 {
9971 	u16 vport_idx, vport_num = 0;
9972 	int ret;
9973 
9974 	if (is_kill && !vlan_id)
9975 		return 0;
9976 
9977 	if (vlan_id >= VLAN_N_VID)
9978 		return -EINVAL;
9979 
9980 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9981 	if (ret) {
9982 		dev_err(&hdev->pdev->dev,
9983 			"Set %u vport vlan filter config fail, ret =%d.\n",
9984 			vport_id, ret);
9985 		return ret;
9986 	}
9987 
9988 	/* vlan 0 may be added twice when 8021q module is enabled */
9989 	if (!is_kill && !vlan_id &&
9990 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9991 		return 0;
9992 
9993 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9994 		dev_err(&hdev->pdev->dev,
9995 			"Add port vlan failed, vport %u is already in vlan %u\n",
9996 			vport_id, vlan_id);
9997 		return -EINVAL;
9998 	}
9999 
10000 	if (is_kill &&
10001 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
10002 		dev_err(&hdev->pdev->dev,
10003 			"Delete port vlan failed, vport %u is not in vlan %u\n",
10004 			vport_id, vlan_id);
10005 		return -EINVAL;
10006 	}
10007 
10008 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
10009 		vport_num++;
10010 
10011 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
10012 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
10013 						 is_kill);
10014 
10015 	return ret;
10016 }
10017 
10018 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
10019 {
10020 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
10021 	struct hclge_vport_vtag_tx_cfg_cmd *req;
10022 	struct hclge_dev *hdev = vport->back;
10023 	struct hclge_desc desc;
10024 	u16 bmap_index;
10025 	int status;
10026 
10027 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
10028 
10029 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
10030 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
10031 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
10032 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
10033 		      vcfg->accept_tag1 ? 1 : 0);
10034 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
10035 		      vcfg->accept_untag1 ? 1 : 0);
10036 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
10037 		      vcfg->accept_tag2 ? 1 : 0);
10038 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
10039 		      vcfg->accept_untag2 ? 1 : 0);
10040 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
10041 		      vcfg->insert_tag1_en ? 1 : 0);
10042 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
10043 		      vcfg->insert_tag2_en ? 1 : 0);
10044 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
10045 		      vcfg->tag_shift_mode_en ? 1 : 0);
10046 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
10047 
10048 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
10049 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
10050 			HCLGE_VF_NUM_PER_BYTE;
10051 	req->vf_bitmap[bmap_index] =
10052 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
10053 
10054 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10055 	if (status)
10056 		dev_err(&hdev->pdev->dev,
10057 			"Send port txvlan cfg command fail, ret =%d\n",
10058 			status);
10059 
10060 	return status;
10061 }
10062 
10063 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
10064 {
10065 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
10066 	struct hclge_vport_vtag_rx_cfg_cmd *req;
10067 	struct hclge_dev *hdev = vport->back;
10068 	struct hclge_desc desc;
10069 	u16 bmap_index;
10070 	int status;
10071 
10072 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
10073 
10074 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
10075 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
10076 		      vcfg->strip_tag1_en ? 1 : 0);
10077 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
10078 		      vcfg->strip_tag2_en ? 1 : 0);
10079 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
10080 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
10081 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
10082 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
10083 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
10084 		      vcfg->strip_tag1_discard_en ? 1 : 0);
10085 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
10086 		      vcfg->strip_tag2_discard_en ? 1 : 0);
10087 
10088 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
10089 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
10090 			HCLGE_VF_NUM_PER_BYTE;
10091 	req->vf_bitmap[bmap_index] =
10092 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
10093 
10094 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10095 	if (status)
10096 		dev_err(&hdev->pdev->dev,
10097 			"Send port rxvlan cfg command fail, ret =%d\n",
10098 			status);
10099 
10100 	return status;
10101 }
10102 
10103 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
10104 				  u16 port_base_vlan_state,
10105 				  u16 vlan_tag, u8 qos)
10106 {
10107 	int ret;
10108 
10109 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10110 		vport->txvlan_cfg.accept_tag1 = true;
10111 		vport->txvlan_cfg.insert_tag1_en = false;
10112 		vport->txvlan_cfg.default_tag1 = 0;
10113 	} else {
10114 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
10115 
10116 		vport->txvlan_cfg.accept_tag1 =
10117 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
10118 		vport->txvlan_cfg.insert_tag1_en = true;
10119 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
10120 						 vlan_tag;
10121 	}
10122 
10123 	vport->txvlan_cfg.accept_untag1 = true;
10124 
10125 	/* accept_tag2 and accept_untag2 are not supported on
10126 	 * pdev revision(0x20), new revision support them,
10127 	 * this two fields can not be configured by user.
10128 	 */
10129 	vport->txvlan_cfg.accept_tag2 = true;
10130 	vport->txvlan_cfg.accept_untag2 = true;
10131 	vport->txvlan_cfg.insert_tag2_en = false;
10132 	vport->txvlan_cfg.default_tag2 = 0;
10133 	vport->txvlan_cfg.tag_shift_mode_en = true;
10134 
10135 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10136 		vport->rxvlan_cfg.strip_tag1_en = false;
10137 		vport->rxvlan_cfg.strip_tag2_en =
10138 				vport->rxvlan_cfg.rx_vlan_offload_en;
10139 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10140 	} else {
10141 		vport->rxvlan_cfg.strip_tag1_en =
10142 				vport->rxvlan_cfg.rx_vlan_offload_en;
10143 		vport->rxvlan_cfg.strip_tag2_en = true;
10144 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10145 	}
10146 
10147 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10148 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10149 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10150 
10151 	ret = hclge_set_vlan_tx_offload_cfg(vport);
10152 	if (ret)
10153 		return ret;
10154 
10155 	return hclge_set_vlan_rx_offload_cfg(vport);
10156 }
10157 
10158 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10159 {
10160 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10161 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10162 	struct hclge_desc desc;
10163 	int status;
10164 
10165 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10166 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10167 	rx_req->ot_fst_vlan_type =
10168 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10169 	rx_req->ot_sec_vlan_type =
10170 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10171 	rx_req->in_fst_vlan_type =
10172 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10173 	rx_req->in_sec_vlan_type =
10174 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10175 
10176 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10177 	if (status) {
10178 		dev_err(&hdev->pdev->dev,
10179 			"Send rxvlan protocol type command fail, ret =%d\n",
10180 			status);
10181 		return status;
10182 	}
10183 
10184 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10185 
10186 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10187 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10188 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10189 
10190 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10191 	if (status)
10192 		dev_err(&hdev->pdev->dev,
10193 			"Send txvlan protocol type command fail, ret =%d\n",
10194 			status);
10195 
10196 	return status;
10197 }
10198 
10199 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10200 {
10201 #define HCLGE_DEF_VLAN_TYPE		0x8100
10202 
10203 	struct hnae3_handle *handle = &hdev->vport[0].nic;
10204 	struct hclge_vport *vport;
10205 	int ret;
10206 	int i;
10207 
10208 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10209 		/* for revision 0x21, vf vlan filter is per function */
10210 		for (i = 0; i < hdev->num_alloc_vport; i++) {
10211 			vport = &hdev->vport[i];
10212 			ret = hclge_set_vlan_filter_ctrl(hdev,
10213 							 HCLGE_FILTER_TYPE_VF,
10214 							 HCLGE_FILTER_FE_EGRESS,
10215 							 true,
10216 							 vport->vport_id);
10217 			if (ret)
10218 				return ret;
10219 			vport->cur_vlan_fltr_en = true;
10220 		}
10221 
10222 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10223 						 HCLGE_FILTER_FE_INGRESS, true,
10224 						 0);
10225 		if (ret)
10226 			return ret;
10227 	} else {
10228 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10229 						 HCLGE_FILTER_FE_EGRESS_V1_B,
10230 						 true, 0);
10231 		if (ret)
10232 			return ret;
10233 	}
10234 
10235 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10236 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10237 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10238 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10239 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10240 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10241 
10242 	ret = hclge_set_vlan_protocol_type(hdev);
10243 	if (ret)
10244 		return ret;
10245 
10246 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10247 		u16 vlan_tag;
10248 		u8 qos;
10249 
10250 		vport = &hdev->vport[i];
10251 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10252 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
10253 
10254 		ret = hclge_vlan_offload_cfg(vport,
10255 					     vport->port_base_vlan_cfg.state,
10256 					     vlan_tag, qos);
10257 		if (ret)
10258 			return ret;
10259 	}
10260 
10261 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10262 }
10263 
10264 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10265 				       bool writen_to_tbl)
10266 {
10267 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10268 
10269 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10270 		if (vlan->vlan_id == vlan_id)
10271 			return;
10272 
10273 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10274 	if (!vlan)
10275 		return;
10276 
10277 	vlan->hd_tbl_status = writen_to_tbl;
10278 	vlan->vlan_id = vlan_id;
10279 
10280 	list_add_tail(&vlan->node, &vport->vlan_list);
10281 }
10282 
10283 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10284 {
10285 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10286 	struct hclge_dev *hdev = vport->back;
10287 	int ret;
10288 
10289 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10290 		if (!vlan->hd_tbl_status) {
10291 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10292 						       vport->vport_id,
10293 						       vlan->vlan_id, false);
10294 			if (ret) {
10295 				dev_err(&hdev->pdev->dev,
10296 					"restore vport vlan list failed, ret=%d\n",
10297 					ret);
10298 				return ret;
10299 			}
10300 		}
10301 		vlan->hd_tbl_status = true;
10302 	}
10303 
10304 	return 0;
10305 }
10306 
10307 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10308 				      bool is_write_tbl)
10309 {
10310 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10311 	struct hclge_dev *hdev = vport->back;
10312 
10313 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10314 		if (vlan->vlan_id == vlan_id) {
10315 			if (is_write_tbl && vlan->hd_tbl_status)
10316 				hclge_set_vlan_filter_hw(hdev,
10317 							 htons(ETH_P_8021Q),
10318 							 vport->vport_id,
10319 							 vlan_id,
10320 							 true);
10321 
10322 			list_del(&vlan->node);
10323 			kfree(vlan);
10324 			break;
10325 		}
10326 	}
10327 }
10328 
10329 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10330 {
10331 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10332 	struct hclge_dev *hdev = vport->back;
10333 
10334 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10335 		if (vlan->hd_tbl_status)
10336 			hclge_set_vlan_filter_hw(hdev,
10337 						 htons(ETH_P_8021Q),
10338 						 vport->vport_id,
10339 						 vlan->vlan_id,
10340 						 true);
10341 
10342 		vlan->hd_tbl_status = false;
10343 		if (is_del_list) {
10344 			list_del(&vlan->node);
10345 			kfree(vlan);
10346 		}
10347 	}
10348 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10349 }
10350 
10351 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10352 {
10353 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10354 	struct hclge_vport *vport;
10355 	int i;
10356 
10357 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10358 		vport = &hdev->vport[i];
10359 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10360 			list_del(&vlan->node);
10361 			kfree(vlan);
10362 		}
10363 	}
10364 }
10365 
10366 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10367 {
10368 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10369 	struct hclge_dev *hdev = vport->back;
10370 	u16 vlan_proto;
10371 	u16 vlan_id;
10372 	u16 state;
10373 	int ret;
10374 
10375 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10376 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10377 	state = vport->port_base_vlan_cfg.state;
10378 
10379 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10380 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10381 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10382 					 vport->vport_id, vlan_id,
10383 					 false);
10384 		return;
10385 	}
10386 
10387 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10388 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10389 					       vport->vport_id,
10390 					       vlan->vlan_id, false);
10391 		if (ret)
10392 			break;
10393 		vlan->hd_tbl_status = true;
10394 	}
10395 }
10396 
10397 /* For global reset and imp reset, hardware will clear the mac table,
10398  * so we change the mac address state from ACTIVE to TO_ADD, then they
10399  * can be restored in the service task after reset complete. Furtherly,
10400  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10401  * be restored after reset, so just remove these mac nodes from mac_list.
10402  */
10403 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10404 {
10405 	struct hclge_mac_node *mac_node, *tmp;
10406 
10407 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10408 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10409 			mac_node->state = HCLGE_MAC_TO_ADD;
10410 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10411 			list_del(&mac_node->node);
10412 			kfree(mac_node);
10413 		}
10414 	}
10415 }
10416 
10417 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10418 {
10419 	spin_lock_bh(&vport->mac_list_lock);
10420 
10421 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10422 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10423 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10424 
10425 	spin_unlock_bh(&vport->mac_list_lock);
10426 }
10427 
10428 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10429 {
10430 	struct hclge_vport *vport = &hdev->vport[0];
10431 	struct hnae3_handle *handle = &vport->nic;
10432 
10433 	hclge_restore_mac_table_common(vport);
10434 	hclge_restore_vport_vlan_table(vport);
10435 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10436 	hclge_restore_fd_entries(handle);
10437 }
10438 
10439 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10440 {
10441 	struct hclge_vport *vport = hclge_get_vport(handle);
10442 
10443 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10444 		vport->rxvlan_cfg.strip_tag1_en = false;
10445 		vport->rxvlan_cfg.strip_tag2_en = enable;
10446 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10447 	} else {
10448 		vport->rxvlan_cfg.strip_tag1_en = enable;
10449 		vport->rxvlan_cfg.strip_tag2_en = true;
10450 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10451 	}
10452 
10453 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10454 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10455 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10456 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10457 
10458 	return hclge_set_vlan_rx_offload_cfg(vport);
10459 }
10460 
10461 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10462 {
10463 	struct hclge_dev *hdev = vport->back;
10464 
10465 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10466 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10467 }
10468 
10469 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10470 					    u16 port_base_vlan_state,
10471 					    struct hclge_vlan_info *new_info,
10472 					    struct hclge_vlan_info *old_info)
10473 {
10474 	struct hclge_dev *hdev = vport->back;
10475 	int ret;
10476 
10477 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10478 		hclge_rm_vport_all_vlan_table(vport, false);
10479 		/* force clear VLAN 0 */
10480 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10481 		if (ret)
10482 			return ret;
10483 		return hclge_set_vlan_filter_hw(hdev,
10484 						 htons(new_info->vlan_proto),
10485 						 vport->vport_id,
10486 						 new_info->vlan_tag,
10487 						 false);
10488 	}
10489 
10490 	/* force add VLAN 0 */
10491 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10492 	if (ret)
10493 		return ret;
10494 
10495 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10496 				       vport->vport_id, old_info->vlan_tag,
10497 				       true);
10498 	if (ret)
10499 		return ret;
10500 
10501 	return hclge_add_vport_all_vlan_table(vport);
10502 }
10503 
10504 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10505 					  const struct hclge_vlan_info *old_cfg)
10506 {
10507 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10508 		return true;
10509 
10510 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10511 		return true;
10512 
10513 	return false;
10514 }
10515 
10516 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10517 				    struct hclge_vlan_info *vlan_info)
10518 {
10519 	struct hnae3_handle *nic = &vport->nic;
10520 	struct hclge_vlan_info *old_vlan_info;
10521 	struct hclge_dev *hdev = vport->back;
10522 	int ret;
10523 
10524 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10525 
10526 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10527 				     vlan_info->qos);
10528 	if (ret)
10529 		return ret;
10530 
10531 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10532 		goto out;
10533 
10534 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10535 		/* add new VLAN tag */
10536 		ret = hclge_set_vlan_filter_hw(hdev,
10537 					       htons(vlan_info->vlan_proto),
10538 					       vport->vport_id,
10539 					       vlan_info->vlan_tag,
10540 					       false);
10541 		if (ret)
10542 			return ret;
10543 
10544 		/* remove old VLAN tag */
10545 		if (old_vlan_info->vlan_tag == 0)
10546 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10547 						       true, 0);
10548 		else
10549 			ret = hclge_set_vlan_filter_hw(hdev,
10550 						       htons(ETH_P_8021Q),
10551 						       vport->vport_id,
10552 						       old_vlan_info->vlan_tag,
10553 						       true);
10554 		if (ret) {
10555 			dev_err(&hdev->pdev->dev,
10556 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10557 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10558 			return ret;
10559 		}
10560 
10561 		goto out;
10562 	}
10563 
10564 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10565 					       old_vlan_info);
10566 	if (ret)
10567 		return ret;
10568 
10569 out:
10570 	vport->port_base_vlan_cfg.state = state;
10571 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10572 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10573 	else
10574 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10575 
10576 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10577 	hclge_set_vport_vlan_fltr_change(vport);
10578 
10579 	return 0;
10580 }
10581 
10582 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10583 					  enum hnae3_port_base_vlan_state state,
10584 					  u16 vlan, u8 qos)
10585 {
10586 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10587 		if (!vlan && !qos)
10588 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10589 
10590 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10591 	}
10592 
10593 	if (!vlan && !qos)
10594 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10595 
10596 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10597 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10598 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10599 
10600 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10601 }
10602 
10603 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10604 				    u16 vlan, u8 qos, __be16 proto)
10605 {
10606 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10607 	struct hclge_vport *vport = hclge_get_vport(handle);
10608 	struct hclge_dev *hdev = vport->back;
10609 	struct hclge_vlan_info vlan_info;
10610 	u16 state;
10611 	int ret;
10612 
10613 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10614 		return -EOPNOTSUPP;
10615 
10616 	vport = hclge_get_vf_vport(hdev, vfid);
10617 	if (!vport)
10618 		return -EINVAL;
10619 
10620 	/* qos is a 3 bits value, so can not be bigger than 7 */
10621 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10622 		return -EINVAL;
10623 	if (proto != htons(ETH_P_8021Q))
10624 		return -EPROTONOSUPPORT;
10625 
10626 	state = hclge_get_port_base_vlan_state(vport,
10627 					       vport->port_base_vlan_cfg.state,
10628 					       vlan, qos);
10629 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10630 		return 0;
10631 
10632 	vlan_info.vlan_tag = vlan;
10633 	vlan_info.qos = qos;
10634 	vlan_info.vlan_proto = ntohs(proto);
10635 
10636 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10637 	if (ret) {
10638 		dev_err(&hdev->pdev->dev,
10639 			"failed to update port base vlan for vf %d, ret = %d\n",
10640 			vfid, ret);
10641 		return ret;
10642 	}
10643 
10644 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10645 	 * VLAN state.
10646 	 */
10647 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10648 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10649 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10650 						  vport->vport_id, state,
10651 						  &vlan_info);
10652 
10653 	return 0;
10654 }
10655 
10656 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10657 {
10658 	struct hclge_vlan_info *vlan_info;
10659 	struct hclge_vport *vport;
10660 	int ret;
10661 	int vf;
10662 
10663 	/* clear port base vlan for all vf */
10664 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10665 		vport = &hdev->vport[vf];
10666 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10667 
10668 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10669 					       vport->vport_id,
10670 					       vlan_info->vlan_tag, true);
10671 		if (ret)
10672 			dev_err(&hdev->pdev->dev,
10673 				"failed to clear vf vlan for vf%d, ret = %d\n",
10674 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10675 	}
10676 }
10677 
10678 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10679 			  u16 vlan_id, bool is_kill)
10680 {
10681 	struct hclge_vport *vport = hclge_get_vport(handle);
10682 	struct hclge_dev *hdev = vport->back;
10683 	bool writen_to_tbl = false;
10684 	int ret = 0;
10685 
10686 	/* When device is resetting or reset failed, firmware is unable to
10687 	 * handle mailbox. Just record the vlan id, and remove it after
10688 	 * reset finished.
10689 	 */
10690 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10691 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10692 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10693 		return -EBUSY;
10694 	}
10695 
10696 	/* when port base vlan enabled, we use port base vlan as the vlan
10697 	 * filter entry. In this case, we don't update vlan filter table
10698 	 * when user add new vlan or remove exist vlan, just update the vport
10699 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10700 	 * table until port base vlan disabled
10701 	 */
10702 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10703 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10704 					       vlan_id, is_kill);
10705 		writen_to_tbl = true;
10706 	}
10707 
10708 	if (!ret) {
10709 		if (is_kill)
10710 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10711 		else
10712 			hclge_add_vport_vlan_table(vport, vlan_id,
10713 						   writen_to_tbl);
10714 	} else if (is_kill) {
10715 		/* when remove hw vlan filter failed, record the vlan id,
10716 		 * and try to remove it from hw later, to be consistence
10717 		 * with stack
10718 		 */
10719 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10720 	}
10721 
10722 	hclge_set_vport_vlan_fltr_change(vport);
10723 
10724 	return ret;
10725 }
10726 
10727 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10728 {
10729 	struct hclge_vport *vport;
10730 	int ret;
10731 	u16 i;
10732 
10733 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10734 		vport = &hdev->vport[i];
10735 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10736 					&vport->state))
10737 			continue;
10738 
10739 		ret = hclge_enable_vport_vlan_filter(vport,
10740 						     vport->req_vlan_fltr_en);
10741 		if (ret) {
10742 			dev_err(&hdev->pdev->dev,
10743 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10744 				vport->vport_id, ret);
10745 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10746 				&vport->state);
10747 			return;
10748 		}
10749 	}
10750 }
10751 
10752 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10753 {
10754 #define HCLGE_MAX_SYNC_COUNT	60
10755 
10756 	int i, ret, sync_cnt = 0;
10757 	u16 vlan_id;
10758 
10759 	/* start from vport 1 for PF is always alive */
10760 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10761 		struct hclge_vport *vport = &hdev->vport[i];
10762 
10763 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10764 					 VLAN_N_VID);
10765 		while (vlan_id != VLAN_N_VID) {
10766 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10767 						       vport->vport_id, vlan_id,
10768 						       true);
10769 			if (ret && ret != -EINVAL)
10770 				return;
10771 
10772 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10773 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10774 			hclge_set_vport_vlan_fltr_change(vport);
10775 
10776 			sync_cnt++;
10777 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10778 				return;
10779 
10780 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10781 						 VLAN_N_VID);
10782 		}
10783 	}
10784 
10785 	hclge_sync_vlan_fltr_state(hdev);
10786 }
10787 
10788 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10789 {
10790 	struct hclge_config_max_frm_size_cmd *req;
10791 	struct hclge_desc desc;
10792 
10793 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10794 
10795 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10796 	req->max_frm_size = cpu_to_le16(new_mps);
10797 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10798 
10799 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10800 }
10801 
10802 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10803 {
10804 	struct hclge_vport *vport = hclge_get_vport(handle);
10805 
10806 	return hclge_set_vport_mtu(vport, new_mtu);
10807 }
10808 
10809 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10810 {
10811 	struct hclge_dev *hdev = vport->back;
10812 	int i, max_frm_size, ret;
10813 
10814 	/* HW supprt 2 layer vlan */
10815 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10816 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10817 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10818 		return -EINVAL;
10819 
10820 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10821 	mutex_lock(&hdev->vport_lock);
10822 	/* VF's mps must fit within hdev->mps */
10823 	if (vport->vport_id && max_frm_size > hdev->mps) {
10824 		mutex_unlock(&hdev->vport_lock);
10825 		return -EINVAL;
10826 	} else if (vport->vport_id) {
10827 		vport->mps = max_frm_size;
10828 		mutex_unlock(&hdev->vport_lock);
10829 		return 0;
10830 	}
10831 
10832 	/* PF's mps must be greater then VF's mps */
10833 	for (i = 1; i < hdev->num_alloc_vport; i++)
10834 		if (max_frm_size < hdev->vport[i].mps) {
10835 			mutex_unlock(&hdev->vport_lock);
10836 			return -EINVAL;
10837 		}
10838 
10839 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10840 
10841 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10842 	if (ret) {
10843 		dev_err(&hdev->pdev->dev,
10844 			"Change mtu fail, ret =%d\n", ret);
10845 		goto out;
10846 	}
10847 
10848 	hdev->mps = max_frm_size;
10849 	vport->mps = max_frm_size;
10850 
10851 	ret = hclge_buffer_alloc(hdev);
10852 	if (ret)
10853 		dev_err(&hdev->pdev->dev,
10854 			"Allocate buffer fail, ret =%d\n", ret);
10855 
10856 out:
10857 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10858 	mutex_unlock(&hdev->vport_lock);
10859 	return ret;
10860 }
10861 
10862 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10863 				    bool enable)
10864 {
10865 	struct hclge_reset_tqp_queue_cmd *req;
10866 	struct hclge_desc desc;
10867 	int ret;
10868 
10869 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10870 
10871 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10872 	req->tqp_id = cpu_to_le16(queue_id);
10873 	if (enable)
10874 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10875 
10876 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10877 	if (ret) {
10878 		dev_err(&hdev->pdev->dev,
10879 			"Send tqp reset cmd error, status =%d\n", ret);
10880 		return ret;
10881 	}
10882 
10883 	return 0;
10884 }
10885 
10886 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10887 				  u8 *reset_status)
10888 {
10889 	struct hclge_reset_tqp_queue_cmd *req;
10890 	struct hclge_desc desc;
10891 	int ret;
10892 
10893 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10894 
10895 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10896 	req->tqp_id = cpu_to_le16(queue_id);
10897 
10898 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10899 	if (ret) {
10900 		dev_err(&hdev->pdev->dev,
10901 			"Get reset status error, status =%d\n", ret);
10902 		return ret;
10903 	}
10904 
10905 	*reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10906 
10907 	return 0;
10908 }
10909 
10910 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10911 {
10912 	struct hnae3_queue *queue;
10913 	struct hclge_tqp *tqp;
10914 
10915 	queue = handle->kinfo.tqp[queue_id];
10916 	tqp = container_of(queue, struct hclge_tqp, q);
10917 
10918 	return tqp->index;
10919 }
10920 
10921 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10922 {
10923 	struct hclge_vport *vport = hclge_get_vport(handle);
10924 	struct hclge_dev *hdev = vport->back;
10925 	u16 reset_try_times = 0;
10926 	u8 reset_status;
10927 	u16 queue_gid;
10928 	int ret;
10929 	u16 i;
10930 
10931 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10932 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10933 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10934 		if (ret) {
10935 			dev_err(&hdev->pdev->dev,
10936 				"failed to send reset tqp cmd, ret = %d\n",
10937 				ret);
10938 			return ret;
10939 		}
10940 
10941 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10942 			ret = hclge_get_reset_status(hdev, queue_gid,
10943 						     &reset_status);
10944 			if (ret)
10945 				return ret;
10946 
10947 			if (reset_status)
10948 				break;
10949 
10950 			/* Wait for tqp hw reset */
10951 			usleep_range(1000, 1200);
10952 		}
10953 
10954 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10955 			dev_err(&hdev->pdev->dev,
10956 				"wait for tqp hw reset timeout\n");
10957 			return -ETIME;
10958 		}
10959 
10960 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10961 		if (ret) {
10962 			dev_err(&hdev->pdev->dev,
10963 				"failed to deassert soft reset, ret = %d\n",
10964 				ret);
10965 			return ret;
10966 		}
10967 		reset_try_times = 0;
10968 	}
10969 	return 0;
10970 }
10971 
10972 static int hclge_reset_rcb(struct hnae3_handle *handle)
10973 {
10974 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10975 #define HCLGE_RESET_RCB_SUCCESS		1U
10976 
10977 	struct hclge_vport *vport = hclge_get_vport(handle);
10978 	struct hclge_dev *hdev = vport->back;
10979 	struct hclge_reset_cmd *req;
10980 	struct hclge_desc desc;
10981 	u8 return_status;
10982 	u16 queue_gid;
10983 	int ret;
10984 
10985 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10986 
10987 	req = (struct hclge_reset_cmd *)desc.data;
10988 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10989 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10990 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10991 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10992 
10993 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10994 	if (ret) {
10995 		dev_err(&hdev->pdev->dev,
10996 			"failed to send rcb reset cmd, ret = %d\n", ret);
10997 		return ret;
10998 	}
10999 
11000 	return_status = req->fun_reset_rcb_return_status;
11001 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
11002 		return 0;
11003 
11004 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
11005 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
11006 			return_status);
11007 		return -EIO;
11008 	}
11009 
11010 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
11011 	 * again to reset all tqps
11012 	 */
11013 	return hclge_reset_tqp_cmd(handle);
11014 }
11015 
11016 int hclge_reset_tqp(struct hnae3_handle *handle)
11017 {
11018 	struct hclge_vport *vport = hclge_get_vport(handle);
11019 	struct hclge_dev *hdev = vport->back;
11020 	int ret;
11021 
11022 	/* only need to disable PF's tqp */
11023 	if (!vport->vport_id) {
11024 		ret = hclge_tqp_enable(handle, false);
11025 		if (ret) {
11026 			dev_err(&hdev->pdev->dev,
11027 				"failed to disable tqp, ret = %d\n", ret);
11028 			return ret;
11029 		}
11030 	}
11031 
11032 	return hclge_reset_rcb(handle);
11033 }
11034 
11035 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
11036 {
11037 	struct hclge_vport *vport = hclge_get_vport(handle);
11038 	struct hclge_dev *hdev = vport->back;
11039 
11040 	return hdev->fw_version;
11041 }
11042 
11043 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
11044 {
11045 	struct phy_device *phydev = hdev->hw.mac.phydev;
11046 
11047 	if (!phydev)
11048 		return;
11049 
11050 	phy_set_asym_pause(phydev, rx_en, tx_en);
11051 }
11052 
11053 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
11054 {
11055 	int ret;
11056 
11057 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
11058 		return 0;
11059 
11060 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
11061 	if (ret)
11062 		dev_err(&hdev->pdev->dev,
11063 			"configure pauseparam error, ret = %d.\n", ret);
11064 
11065 	return ret;
11066 }
11067 
11068 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
11069 {
11070 	struct phy_device *phydev = hdev->hw.mac.phydev;
11071 	u16 remote_advertising = 0;
11072 	u16 local_advertising;
11073 	u32 rx_pause, tx_pause;
11074 	u8 flowctl;
11075 
11076 	if (!phydev->link || !phydev->autoneg)
11077 		return 0;
11078 
11079 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
11080 
11081 	if (phydev->pause)
11082 		remote_advertising = LPA_PAUSE_CAP;
11083 
11084 	if (phydev->asym_pause)
11085 		remote_advertising |= LPA_PAUSE_ASYM;
11086 
11087 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
11088 					   remote_advertising);
11089 	tx_pause = flowctl & FLOW_CTRL_TX;
11090 	rx_pause = flowctl & FLOW_CTRL_RX;
11091 
11092 	if (phydev->duplex == HCLGE_MAC_HALF) {
11093 		tx_pause = 0;
11094 		rx_pause = 0;
11095 	}
11096 
11097 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
11098 }
11099 
11100 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
11101 				 u32 *rx_en, u32 *tx_en)
11102 {
11103 	struct hclge_vport *vport = hclge_get_vport(handle);
11104 	struct hclge_dev *hdev = vport->back;
11105 	u8 media_type = hdev->hw.mac.media_type;
11106 
11107 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
11108 		    hclge_get_autoneg(handle) : 0;
11109 
11110 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11111 		*rx_en = 0;
11112 		*tx_en = 0;
11113 		return;
11114 	}
11115 
11116 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
11117 		*rx_en = 1;
11118 		*tx_en = 0;
11119 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
11120 		*tx_en = 1;
11121 		*rx_en = 0;
11122 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
11123 		*rx_en = 1;
11124 		*tx_en = 1;
11125 	} else {
11126 		*rx_en = 0;
11127 		*tx_en = 0;
11128 	}
11129 }
11130 
11131 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11132 					 u32 rx_en, u32 tx_en)
11133 {
11134 	if (rx_en && tx_en)
11135 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
11136 	else if (rx_en && !tx_en)
11137 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11138 	else if (!rx_en && tx_en)
11139 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11140 	else
11141 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
11142 
11143 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11144 }
11145 
11146 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11147 				u32 rx_en, u32 tx_en)
11148 {
11149 	struct hclge_vport *vport = hclge_get_vport(handle);
11150 	struct hclge_dev *hdev = vport->back;
11151 	struct phy_device *phydev = hdev->hw.mac.phydev;
11152 	u32 fc_autoneg;
11153 
11154 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11155 		fc_autoneg = hclge_get_autoneg(handle);
11156 		if (auto_neg != fc_autoneg) {
11157 			dev_info(&hdev->pdev->dev,
11158 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11159 			return -EOPNOTSUPP;
11160 		}
11161 	}
11162 
11163 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11164 		dev_info(&hdev->pdev->dev,
11165 			 "Priority flow control enabled. Cannot set link flow control.\n");
11166 		return -EOPNOTSUPP;
11167 	}
11168 
11169 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11170 
11171 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11172 
11173 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11174 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11175 
11176 	if (phydev)
11177 		return phy_start_aneg(phydev);
11178 
11179 	return -EOPNOTSUPP;
11180 }
11181 
11182 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11183 					  u8 *auto_neg, u32 *speed, u8 *duplex)
11184 {
11185 	struct hclge_vport *vport = hclge_get_vport(handle);
11186 	struct hclge_dev *hdev = vport->back;
11187 
11188 	if (speed)
11189 		*speed = hdev->hw.mac.speed;
11190 	if (duplex)
11191 		*duplex = hdev->hw.mac.duplex;
11192 	if (auto_neg)
11193 		*auto_neg = hdev->hw.mac.autoneg;
11194 }
11195 
11196 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11197 				 u8 *module_type)
11198 {
11199 	struct hclge_vport *vport = hclge_get_vport(handle);
11200 	struct hclge_dev *hdev = vport->back;
11201 
11202 	/* When nic is down, the service task is not running, doesn't update
11203 	 * the port information per second. Query the port information before
11204 	 * return the media type, ensure getting the correct media information.
11205 	 */
11206 	hclge_update_port_info(hdev);
11207 
11208 	if (media_type)
11209 		*media_type = hdev->hw.mac.media_type;
11210 
11211 	if (module_type)
11212 		*module_type = hdev->hw.mac.module_type;
11213 }
11214 
11215 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11216 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
11217 {
11218 	struct hclge_vport *vport = hclge_get_vport(handle);
11219 	struct hclge_dev *hdev = vport->back;
11220 	struct phy_device *phydev = hdev->hw.mac.phydev;
11221 	int mdix_ctrl, mdix, is_resolved;
11222 	unsigned int retval;
11223 
11224 	if (!phydev) {
11225 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11226 		*tp_mdix = ETH_TP_MDI_INVALID;
11227 		return;
11228 	}
11229 
11230 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11231 
11232 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11233 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11234 				    HCLGE_PHY_MDIX_CTRL_S);
11235 
11236 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11237 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11238 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11239 
11240 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11241 
11242 	switch (mdix_ctrl) {
11243 	case 0x0:
11244 		*tp_mdix_ctrl = ETH_TP_MDI;
11245 		break;
11246 	case 0x1:
11247 		*tp_mdix_ctrl = ETH_TP_MDI_X;
11248 		break;
11249 	case 0x3:
11250 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11251 		break;
11252 	default:
11253 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11254 		break;
11255 	}
11256 
11257 	if (!is_resolved)
11258 		*tp_mdix = ETH_TP_MDI_INVALID;
11259 	else if (mdix)
11260 		*tp_mdix = ETH_TP_MDI_X;
11261 	else
11262 		*tp_mdix = ETH_TP_MDI;
11263 }
11264 
11265 static void hclge_info_show(struct hclge_dev *hdev)
11266 {
11267 	struct device *dev = &hdev->pdev->dev;
11268 
11269 	dev_info(dev, "PF info begin:\n");
11270 
11271 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11272 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11273 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11274 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11275 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11276 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11277 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11278 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11279 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11280 	dev_info(dev, "This is %s PF\n",
11281 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11282 	dev_info(dev, "DCB %s\n",
11283 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11284 	dev_info(dev, "MQPRIO %s\n",
11285 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11286 	dev_info(dev, "Default tx spare buffer size: %u\n",
11287 		 hdev->tx_spare_buf_size);
11288 
11289 	dev_info(dev, "PF info end.\n");
11290 }
11291 
11292 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11293 					  struct hclge_vport *vport)
11294 {
11295 	struct hnae3_client *client = vport->nic.client;
11296 	struct hclge_dev *hdev = ae_dev->priv;
11297 	int rst_cnt = hdev->rst_stats.reset_cnt;
11298 	int ret;
11299 
11300 	ret = client->ops->init_instance(&vport->nic);
11301 	if (ret)
11302 		return ret;
11303 
11304 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11305 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11306 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11307 		ret = -EBUSY;
11308 		goto init_nic_err;
11309 	}
11310 
11311 	/* Enable nic hw error interrupts */
11312 	ret = hclge_config_nic_hw_error(hdev, true);
11313 	if (ret) {
11314 		dev_err(&ae_dev->pdev->dev,
11315 			"fail(%d) to enable hw error interrupts\n", ret);
11316 		goto init_nic_err;
11317 	}
11318 
11319 	hnae3_set_client_init_flag(client, ae_dev, 1);
11320 
11321 	if (netif_msg_drv(&hdev->vport->nic))
11322 		hclge_info_show(hdev);
11323 
11324 	return ret;
11325 
11326 init_nic_err:
11327 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11328 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11329 		msleep(HCLGE_WAIT_RESET_DONE);
11330 
11331 	client->ops->uninit_instance(&vport->nic, 0);
11332 
11333 	return ret;
11334 }
11335 
11336 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11337 					   struct hclge_vport *vport)
11338 {
11339 	struct hclge_dev *hdev = ae_dev->priv;
11340 	struct hnae3_client *client;
11341 	int rst_cnt;
11342 	int ret;
11343 
11344 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11345 	    !hdev->nic_client)
11346 		return 0;
11347 
11348 	client = hdev->roce_client;
11349 	ret = hclge_init_roce_base_info(vport);
11350 	if (ret)
11351 		return ret;
11352 
11353 	rst_cnt = hdev->rst_stats.reset_cnt;
11354 	ret = client->ops->init_instance(&vport->roce);
11355 	if (ret)
11356 		return ret;
11357 
11358 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11359 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11360 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11361 		ret = -EBUSY;
11362 		goto init_roce_err;
11363 	}
11364 
11365 	/* Enable roce ras interrupts */
11366 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11367 	if (ret) {
11368 		dev_err(&ae_dev->pdev->dev,
11369 			"fail(%d) to enable roce ras interrupts\n", ret);
11370 		goto init_roce_err;
11371 	}
11372 
11373 	hnae3_set_client_init_flag(client, ae_dev, 1);
11374 
11375 	return 0;
11376 
11377 init_roce_err:
11378 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11379 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11380 		msleep(HCLGE_WAIT_RESET_DONE);
11381 
11382 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11383 
11384 	return ret;
11385 }
11386 
11387 static int hclge_init_client_instance(struct hnae3_client *client,
11388 				      struct hnae3_ae_dev *ae_dev)
11389 {
11390 	struct hclge_dev *hdev = ae_dev->priv;
11391 	struct hclge_vport *vport = &hdev->vport[0];
11392 	int ret;
11393 
11394 	switch (client->type) {
11395 	case HNAE3_CLIENT_KNIC:
11396 		hdev->nic_client = client;
11397 		vport->nic.client = client;
11398 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11399 		if (ret)
11400 			goto clear_nic;
11401 
11402 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11403 		if (ret)
11404 			goto clear_roce;
11405 
11406 		break;
11407 	case HNAE3_CLIENT_ROCE:
11408 		if (hnae3_dev_roce_supported(hdev)) {
11409 			hdev->roce_client = client;
11410 			vport->roce.client = client;
11411 		}
11412 
11413 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11414 		if (ret)
11415 			goto clear_roce;
11416 
11417 		break;
11418 	default:
11419 		return -EINVAL;
11420 	}
11421 
11422 	return 0;
11423 
11424 clear_nic:
11425 	hdev->nic_client = NULL;
11426 	vport->nic.client = NULL;
11427 	return ret;
11428 clear_roce:
11429 	hdev->roce_client = NULL;
11430 	vport->roce.client = NULL;
11431 	return ret;
11432 }
11433 
11434 static void hclge_uninit_client_instance(struct hnae3_client *client,
11435 					 struct hnae3_ae_dev *ae_dev)
11436 {
11437 	struct hclge_dev *hdev = ae_dev->priv;
11438 	struct hclge_vport *vport = &hdev->vport[0];
11439 
11440 	if (hdev->roce_client) {
11441 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11442 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11443 			msleep(HCLGE_WAIT_RESET_DONE);
11444 
11445 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11446 		hdev->roce_client = NULL;
11447 		vport->roce.client = NULL;
11448 	}
11449 	if (client->type == HNAE3_CLIENT_ROCE)
11450 		return;
11451 	if (hdev->nic_client && client->ops->uninit_instance) {
11452 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11453 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11454 			msleep(HCLGE_WAIT_RESET_DONE);
11455 
11456 		client->ops->uninit_instance(&vport->nic, 0);
11457 		hdev->nic_client = NULL;
11458 		vport->nic.client = NULL;
11459 	}
11460 }
11461 
11462 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11463 {
11464 #define HCLGE_MEM_BAR		4
11465 
11466 	struct pci_dev *pdev = hdev->pdev;
11467 	struct hclge_hw *hw = &hdev->hw;
11468 
11469 	/* for device does not have device memory, return directly */
11470 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11471 		return 0;
11472 
11473 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11474 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11475 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11476 	if (!hw->mem_base) {
11477 		dev_err(&pdev->dev, "failed to map device memory\n");
11478 		return -EFAULT;
11479 	}
11480 
11481 	return 0;
11482 }
11483 
11484 static int hclge_pci_init(struct hclge_dev *hdev)
11485 {
11486 	struct pci_dev *pdev = hdev->pdev;
11487 	struct hclge_hw *hw;
11488 	int ret;
11489 
11490 	ret = pci_enable_device(pdev);
11491 	if (ret) {
11492 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11493 		return ret;
11494 	}
11495 
11496 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11497 	if (ret) {
11498 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11499 		if (ret) {
11500 			dev_err(&pdev->dev,
11501 				"can't set consistent PCI DMA");
11502 			goto err_disable_device;
11503 		}
11504 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11505 	}
11506 
11507 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11508 	if (ret) {
11509 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11510 		goto err_disable_device;
11511 	}
11512 
11513 	pci_set_master(pdev);
11514 	hw = &hdev->hw;
11515 	hw->io_base = pcim_iomap(pdev, 2, 0);
11516 	if (!hw->io_base) {
11517 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11518 		ret = -ENOMEM;
11519 		goto err_clr_master;
11520 	}
11521 
11522 	ret = hclge_dev_mem_map(hdev);
11523 	if (ret)
11524 		goto err_unmap_io_base;
11525 
11526 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11527 
11528 	return 0;
11529 
11530 err_unmap_io_base:
11531 	pcim_iounmap(pdev, hdev->hw.io_base);
11532 err_clr_master:
11533 	pci_clear_master(pdev);
11534 	pci_release_regions(pdev);
11535 err_disable_device:
11536 	pci_disable_device(pdev);
11537 
11538 	return ret;
11539 }
11540 
11541 static void hclge_pci_uninit(struct hclge_dev *hdev)
11542 {
11543 	struct pci_dev *pdev = hdev->pdev;
11544 
11545 	if (hdev->hw.mem_base)
11546 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11547 
11548 	pcim_iounmap(pdev, hdev->hw.io_base);
11549 	pci_free_irq_vectors(pdev);
11550 	pci_clear_master(pdev);
11551 	pci_release_mem_regions(pdev);
11552 	pci_disable_device(pdev);
11553 }
11554 
11555 static void hclge_state_init(struct hclge_dev *hdev)
11556 {
11557 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11558 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11559 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11560 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11561 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11562 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11563 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11564 }
11565 
11566 static void hclge_state_uninit(struct hclge_dev *hdev)
11567 {
11568 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11569 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11570 
11571 	if (hdev->reset_timer.function)
11572 		del_timer_sync(&hdev->reset_timer);
11573 	if (hdev->service_task.work.func)
11574 		cancel_delayed_work_sync(&hdev->service_task);
11575 }
11576 
11577 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11578 					enum hnae3_reset_type rst_type)
11579 {
11580 #define HCLGE_RESET_RETRY_WAIT_MS	500
11581 #define HCLGE_RESET_RETRY_CNT	5
11582 
11583 	struct hclge_dev *hdev = ae_dev->priv;
11584 	int retry_cnt = 0;
11585 	int ret;
11586 
11587 retry:
11588 	down(&hdev->reset_sem);
11589 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11590 	hdev->reset_type = rst_type;
11591 	ret = hclge_reset_prepare(hdev);
11592 	if (ret || hdev->reset_pending) {
11593 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11594 			ret);
11595 		if (hdev->reset_pending ||
11596 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11597 			dev_err(&hdev->pdev->dev,
11598 				"reset_pending:0x%lx, retry_cnt:%d\n",
11599 				hdev->reset_pending, retry_cnt);
11600 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11601 			up(&hdev->reset_sem);
11602 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11603 			goto retry;
11604 		}
11605 	}
11606 
11607 	/* disable misc vector before reset done */
11608 	hclge_enable_vector(&hdev->misc_vector, false);
11609 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11610 
11611 	if (hdev->reset_type == HNAE3_FLR_RESET)
11612 		hdev->rst_stats.flr_rst_cnt++;
11613 }
11614 
11615 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11616 {
11617 	struct hclge_dev *hdev = ae_dev->priv;
11618 	int ret;
11619 
11620 	hclge_enable_vector(&hdev->misc_vector, true);
11621 
11622 	ret = hclge_reset_rebuild(hdev);
11623 	if (ret)
11624 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11625 
11626 	hdev->reset_type = HNAE3_NONE_RESET;
11627 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11628 	up(&hdev->reset_sem);
11629 }
11630 
11631 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11632 {
11633 	u16 i;
11634 
11635 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11636 		struct hclge_vport *vport = &hdev->vport[i];
11637 		int ret;
11638 
11639 		 /* Send cmd to clear vport's FUNC_RST_ING */
11640 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11641 		if (ret)
11642 			dev_warn(&hdev->pdev->dev,
11643 				 "clear vport(%u) rst failed %d!\n",
11644 				 vport->vport_id, ret);
11645 	}
11646 }
11647 
11648 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11649 {
11650 	struct hclge_desc desc;
11651 	int ret;
11652 
11653 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11654 
11655 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11656 	/* This new command is only supported by new firmware, it will
11657 	 * fail with older firmware. Error value -EOPNOSUPP can only be
11658 	 * returned by older firmware running this command, to keep code
11659 	 * backward compatible we will override this value and return
11660 	 * success.
11661 	 */
11662 	if (ret && ret != -EOPNOTSUPP) {
11663 		dev_err(&hdev->pdev->dev,
11664 			"failed to clear hw resource, ret = %d\n", ret);
11665 		return ret;
11666 	}
11667 	return 0;
11668 }
11669 
11670 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11671 {
11672 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11673 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11674 }
11675 
11676 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11677 {
11678 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11679 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11680 }
11681 
11682 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11683 {
11684 	struct pci_dev *pdev = ae_dev->pdev;
11685 	struct hclge_dev *hdev;
11686 	int ret;
11687 
11688 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11689 	if (!hdev)
11690 		return -ENOMEM;
11691 
11692 	hdev->pdev = pdev;
11693 	hdev->ae_dev = ae_dev;
11694 	hdev->reset_type = HNAE3_NONE_RESET;
11695 	hdev->reset_level = HNAE3_FUNC_RESET;
11696 	ae_dev->priv = hdev;
11697 
11698 	/* HW supprt 2 layer vlan */
11699 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11700 
11701 	mutex_init(&hdev->vport_lock);
11702 	spin_lock_init(&hdev->fd_rule_lock);
11703 	sema_init(&hdev->reset_sem, 1);
11704 
11705 	ret = hclge_pci_init(hdev);
11706 	if (ret)
11707 		goto out;
11708 
11709 	ret = hclge_devlink_init(hdev);
11710 	if (ret)
11711 		goto err_pci_uninit;
11712 
11713 	/* Firmware command queue initialize */
11714 	ret = hclge_cmd_queue_init(hdev);
11715 	if (ret)
11716 		goto err_devlink_uninit;
11717 
11718 	/* Firmware command initialize */
11719 	ret = hclge_cmd_init(hdev);
11720 	if (ret)
11721 		goto err_cmd_uninit;
11722 
11723 	ret  = hclge_clear_hw_resource(hdev);
11724 	if (ret)
11725 		goto err_cmd_uninit;
11726 
11727 	ret = hclge_get_cap(hdev);
11728 	if (ret)
11729 		goto err_cmd_uninit;
11730 
11731 	ret = hclge_query_dev_specs(hdev);
11732 	if (ret) {
11733 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11734 			ret);
11735 		goto err_cmd_uninit;
11736 	}
11737 
11738 	ret = hclge_configure(hdev);
11739 	if (ret) {
11740 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11741 		goto err_cmd_uninit;
11742 	}
11743 
11744 	ret = hclge_init_msi(hdev);
11745 	if (ret) {
11746 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11747 		goto err_cmd_uninit;
11748 	}
11749 
11750 	ret = hclge_misc_irq_init(hdev);
11751 	if (ret)
11752 		goto err_msi_uninit;
11753 
11754 	ret = hclge_alloc_tqps(hdev);
11755 	if (ret) {
11756 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11757 		goto err_msi_irq_uninit;
11758 	}
11759 
11760 	ret = hclge_alloc_vport(hdev);
11761 	if (ret)
11762 		goto err_msi_irq_uninit;
11763 
11764 	ret = hclge_map_tqp(hdev);
11765 	if (ret)
11766 		goto err_msi_irq_uninit;
11767 
11768 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11769 	    !hnae3_dev_phy_imp_supported(hdev)) {
11770 		ret = hclge_mac_mdio_config(hdev);
11771 		if (ret)
11772 			goto err_msi_irq_uninit;
11773 	}
11774 
11775 	ret = hclge_init_umv_space(hdev);
11776 	if (ret)
11777 		goto err_mdiobus_unreg;
11778 
11779 	ret = hclge_mac_init(hdev);
11780 	if (ret) {
11781 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11782 		goto err_mdiobus_unreg;
11783 	}
11784 
11785 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11786 	if (ret) {
11787 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11788 		goto err_mdiobus_unreg;
11789 	}
11790 
11791 	ret = hclge_config_gro(hdev);
11792 	if (ret)
11793 		goto err_mdiobus_unreg;
11794 
11795 	ret = hclge_init_vlan_config(hdev);
11796 	if (ret) {
11797 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11798 		goto err_mdiobus_unreg;
11799 	}
11800 
11801 	ret = hclge_tm_schd_init(hdev);
11802 	if (ret) {
11803 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11804 		goto err_mdiobus_unreg;
11805 	}
11806 
11807 	ret = hclge_rss_init_cfg(hdev);
11808 	if (ret) {
11809 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11810 		goto err_mdiobus_unreg;
11811 	}
11812 
11813 	ret = hclge_rss_init_hw(hdev);
11814 	if (ret) {
11815 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11816 		goto err_mdiobus_unreg;
11817 	}
11818 
11819 	ret = init_mgr_tbl(hdev);
11820 	if (ret) {
11821 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11822 		goto err_mdiobus_unreg;
11823 	}
11824 
11825 	ret = hclge_init_fd_config(hdev);
11826 	if (ret) {
11827 		dev_err(&pdev->dev,
11828 			"fd table init fail, ret=%d\n", ret);
11829 		goto err_mdiobus_unreg;
11830 	}
11831 
11832 	ret = hclge_ptp_init(hdev);
11833 	if (ret)
11834 		goto err_mdiobus_unreg;
11835 
11836 	INIT_KFIFO(hdev->mac_tnl_log);
11837 
11838 	hclge_dcb_ops_set(hdev);
11839 
11840 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11841 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11842 
11843 	/* Setup affinity after service timer setup because add_timer_on
11844 	 * is called in affinity notify.
11845 	 */
11846 	hclge_misc_affinity_setup(hdev);
11847 
11848 	hclge_clear_all_event_cause(hdev);
11849 	hclge_clear_resetting_state(hdev);
11850 
11851 	/* Log and clear the hw errors those already occurred */
11852 	if (hnae3_dev_ras_imp_supported(hdev))
11853 		hclge_handle_occurred_error(hdev);
11854 	else
11855 		hclge_handle_all_hns_hw_errors(ae_dev);
11856 
11857 	/* request delayed reset for the error recovery because an immediate
11858 	 * global reset on a PF affecting pending initialization of other PFs
11859 	 */
11860 	if (ae_dev->hw_err_reset_req) {
11861 		enum hnae3_reset_type reset_level;
11862 
11863 		reset_level = hclge_get_reset_level(ae_dev,
11864 						    &ae_dev->hw_err_reset_req);
11865 		hclge_set_def_reset_request(ae_dev, reset_level);
11866 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11867 	}
11868 
11869 	hclge_init_rxd_adv_layout(hdev);
11870 
11871 	/* Enable MISC vector(vector0) */
11872 	hclge_enable_vector(&hdev->misc_vector, true);
11873 
11874 	hclge_state_init(hdev);
11875 	hdev->last_reset_time = jiffies;
11876 
11877 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11878 		 HCLGE_DRIVER_NAME);
11879 
11880 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11881 
11882 	return 0;
11883 
11884 err_mdiobus_unreg:
11885 	if (hdev->hw.mac.phydev)
11886 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11887 err_msi_irq_uninit:
11888 	hclge_misc_irq_uninit(hdev);
11889 err_msi_uninit:
11890 	pci_free_irq_vectors(pdev);
11891 err_cmd_uninit:
11892 	hclge_cmd_uninit(hdev);
11893 err_devlink_uninit:
11894 	hclge_devlink_uninit(hdev);
11895 err_pci_uninit:
11896 	pcim_iounmap(pdev, hdev->hw.io_base);
11897 	pci_clear_master(pdev);
11898 	pci_release_regions(pdev);
11899 	pci_disable_device(pdev);
11900 out:
11901 	mutex_destroy(&hdev->vport_lock);
11902 	return ret;
11903 }
11904 
11905 static void hclge_stats_clear(struct hclge_dev *hdev)
11906 {
11907 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11908 }
11909 
11910 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11911 {
11912 	return hclge_config_switch_param(hdev, vf, enable,
11913 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11914 }
11915 
11916 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11917 {
11918 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11919 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11920 					  enable, vf);
11921 }
11922 
11923 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11924 {
11925 	int ret;
11926 
11927 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11928 	if (ret) {
11929 		dev_err(&hdev->pdev->dev,
11930 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11931 			vf, enable ? "on" : "off", ret);
11932 		return ret;
11933 	}
11934 
11935 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11936 	if (ret)
11937 		dev_err(&hdev->pdev->dev,
11938 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11939 			vf, enable ? "on" : "off", ret);
11940 
11941 	return ret;
11942 }
11943 
11944 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11945 				 bool enable)
11946 {
11947 	struct hclge_vport *vport = hclge_get_vport(handle);
11948 	struct hclge_dev *hdev = vport->back;
11949 	u32 new_spoofchk = enable ? 1 : 0;
11950 	int ret;
11951 
11952 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11953 		return -EOPNOTSUPP;
11954 
11955 	vport = hclge_get_vf_vport(hdev, vf);
11956 	if (!vport)
11957 		return -EINVAL;
11958 
11959 	if (vport->vf_info.spoofchk == new_spoofchk)
11960 		return 0;
11961 
11962 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11963 		dev_warn(&hdev->pdev->dev,
11964 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11965 			 vf);
11966 	else if (enable && hclge_is_umv_space_full(vport, true))
11967 		dev_warn(&hdev->pdev->dev,
11968 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11969 			 vf);
11970 
11971 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11972 	if (ret)
11973 		return ret;
11974 
11975 	vport->vf_info.spoofchk = new_spoofchk;
11976 	return 0;
11977 }
11978 
11979 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11980 {
11981 	struct hclge_vport *vport = hdev->vport;
11982 	int ret;
11983 	int i;
11984 
11985 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11986 		return 0;
11987 
11988 	/* resume the vf spoof check state after reset */
11989 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11990 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11991 					       vport->vf_info.spoofchk);
11992 		if (ret)
11993 			return ret;
11994 
11995 		vport++;
11996 	}
11997 
11998 	return 0;
11999 }
12000 
12001 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
12002 {
12003 	struct hclge_vport *vport = hclge_get_vport(handle);
12004 	struct hclge_dev *hdev = vport->back;
12005 	u32 new_trusted = enable ? 1 : 0;
12006 
12007 	vport = hclge_get_vf_vport(hdev, vf);
12008 	if (!vport)
12009 		return -EINVAL;
12010 
12011 	if (vport->vf_info.trusted == new_trusted)
12012 		return 0;
12013 
12014 	vport->vf_info.trusted = new_trusted;
12015 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12016 	hclge_task_schedule(hdev, 0);
12017 
12018 	return 0;
12019 }
12020 
12021 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
12022 {
12023 	int ret;
12024 	int vf;
12025 
12026 	/* reset vf rate to default value */
12027 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
12028 		struct hclge_vport *vport = &hdev->vport[vf];
12029 
12030 		vport->vf_info.max_tx_rate = 0;
12031 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
12032 		if (ret)
12033 			dev_err(&hdev->pdev->dev,
12034 				"vf%d failed to reset to default, ret=%d\n",
12035 				vf - HCLGE_VF_VPORT_START_NUM, ret);
12036 	}
12037 }
12038 
12039 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
12040 				     int min_tx_rate, int max_tx_rate)
12041 {
12042 	if (min_tx_rate != 0 ||
12043 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
12044 		dev_err(&hdev->pdev->dev,
12045 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
12046 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
12047 		return -EINVAL;
12048 	}
12049 
12050 	return 0;
12051 }
12052 
12053 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
12054 			     int min_tx_rate, int max_tx_rate, bool force)
12055 {
12056 	struct hclge_vport *vport = hclge_get_vport(handle);
12057 	struct hclge_dev *hdev = vport->back;
12058 	int ret;
12059 
12060 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
12061 	if (ret)
12062 		return ret;
12063 
12064 	vport = hclge_get_vf_vport(hdev, vf);
12065 	if (!vport)
12066 		return -EINVAL;
12067 
12068 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
12069 		return 0;
12070 
12071 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
12072 	if (ret)
12073 		return ret;
12074 
12075 	vport->vf_info.max_tx_rate = max_tx_rate;
12076 
12077 	return 0;
12078 }
12079 
12080 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12081 {
12082 	struct hnae3_handle *handle = &hdev->vport->nic;
12083 	struct hclge_vport *vport;
12084 	int ret;
12085 	int vf;
12086 
12087 	/* resume the vf max_tx_rate after reset */
12088 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12089 		vport = hclge_get_vf_vport(hdev, vf);
12090 		if (!vport)
12091 			return -EINVAL;
12092 
12093 		/* zero means max rate, after reset, firmware already set it to
12094 		 * max rate, so just continue.
12095 		 */
12096 		if (!vport->vf_info.max_tx_rate)
12097 			continue;
12098 
12099 		ret = hclge_set_vf_rate(handle, vf, 0,
12100 					vport->vf_info.max_tx_rate, true);
12101 		if (ret) {
12102 			dev_err(&hdev->pdev->dev,
12103 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
12104 				vf, vport->vf_info.max_tx_rate, ret);
12105 			return ret;
12106 		}
12107 	}
12108 
12109 	return 0;
12110 }
12111 
12112 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12113 {
12114 	struct hclge_vport *vport = hdev->vport;
12115 	int i;
12116 
12117 	for (i = 0; i < hdev->num_alloc_vport; i++) {
12118 		hclge_vport_stop(vport);
12119 		vport++;
12120 	}
12121 }
12122 
12123 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12124 {
12125 	struct hclge_dev *hdev = ae_dev->priv;
12126 	struct pci_dev *pdev = ae_dev->pdev;
12127 	int ret;
12128 
12129 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
12130 
12131 	hclge_stats_clear(hdev);
12132 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12133 	 * so here should not clean table in memory.
12134 	 */
12135 	if (hdev->reset_type == HNAE3_IMP_RESET ||
12136 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
12137 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12138 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12139 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12140 		hclge_reset_umv_space(hdev);
12141 	}
12142 
12143 	ret = hclge_cmd_init(hdev);
12144 	if (ret) {
12145 		dev_err(&pdev->dev, "Cmd queue init failed\n");
12146 		return ret;
12147 	}
12148 
12149 	ret = hclge_map_tqp(hdev);
12150 	if (ret) {
12151 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12152 		return ret;
12153 	}
12154 
12155 	ret = hclge_mac_init(hdev);
12156 	if (ret) {
12157 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12158 		return ret;
12159 	}
12160 
12161 	ret = hclge_tp_port_init(hdev);
12162 	if (ret) {
12163 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12164 			ret);
12165 		return ret;
12166 	}
12167 
12168 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12169 	if (ret) {
12170 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12171 		return ret;
12172 	}
12173 
12174 	ret = hclge_config_gro(hdev);
12175 	if (ret)
12176 		return ret;
12177 
12178 	ret = hclge_init_vlan_config(hdev);
12179 	if (ret) {
12180 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12181 		return ret;
12182 	}
12183 
12184 	ret = hclge_tm_init_hw(hdev, true);
12185 	if (ret) {
12186 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12187 		return ret;
12188 	}
12189 
12190 	ret = hclge_rss_init_hw(hdev);
12191 	if (ret) {
12192 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12193 		return ret;
12194 	}
12195 
12196 	ret = init_mgr_tbl(hdev);
12197 	if (ret) {
12198 		dev_err(&pdev->dev,
12199 			"failed to reinit manager table, ret = %d\n", ret);
12200 		return ret;
12201 	}
12202 
12203 	ret = hclge_init_fd_config(hdev);
12204 	if (ret) {
12205 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12206 		return ret;
12207 	}
12208 
12209 	ret = hclge_ptp_init(hdev);
12210 	if (ret)
12211 		return ret;
12212 
12213 	/* Log and clear the hw errors those already occurred */
12214 	if (hnae3_dev_ras_imp_supported(hdev))
12215 		hclge_handle_occurred_error(hdev);
12216 	else
12217 		hclge_handle_all_hns_hw_errors(ae_dev);
12218 
12219 	/* Re-enable the hw error interrupts because
12220 	 * the interrupts get disabled on global reset.
12221 	 */
12222 	ret = hclge_config_nic_hw_error(hdev, true);
12223 	if (ret) {
12224 		dev_err(&pdev->dev,
12225 			"fail(%d) to re-enable NIC hw error interrupts\n",
12226 			ret);
12227 		return ret;
12228 	}
12229 
12230 	if (hdev->roce_client) {
12231 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
12232 		if (ret) {
12233 			dev_err(&pdev->dev,
12234 				"fail(%d) to re-enable roce ras interrupts\n",
12235 				ret);
12236 			return ret;
12237 		}
12238 	}
12239 
12240 	hclge_reset_vport_state(hdev);
12241 	ret = hclge_reset_vport_spoofchk(hdev);
12242 	if (ret)
12243 		return ret;
12244 
12245 	ret = hclge_resume_vf_rate(hdev);
12246 	if (ret)
12247 		return ret;
12248 
12249 	hclge_init_rxd_adv_layout(hdev);
12250 
12251 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12252 		 HCLGE_DRIVER_NAME);
12253 
12254 	return 0;
12255 }
12256 
12257 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12258 {
12259 	struct hclge_dev *hdev = ae_dev->priv;
12260 	struct hclge_mac *mac = &hdev->hw.mac;
12261 
12262 	hclge_reset_vf_rate(hdev);
12263 	hclge_clear_vf_vlan(hdev);
12264 	hclge_misc_affinity_teardown(hdev);
12265 	hclge_state_uninit(hdev);
12266 	hclge_ptp_uninit(hdev);
12267 	hclge_uninit_rxd_adv_layout(hdev);
12268 	hclge_uninit_mac_table(hdev);
12269 	hclge_del_all_fd_entries(hdev);
12270 
12271 	if (mac->phydev)
12272 		mdiobus_unregister(mac->mdio_bus);
12273 
12274 	/* Disable MISC vector(vector0) */
12275 	hclge_enable_vector(&hdev->misc_vector, false);
12276 	synchronize_irq(hdev->misc_vector.vector_irq);
12277 
12278 	/* Disable all hw interrupts */
12279 	hclge_config_mac_tnl_int(hdev, false);
12280 	hclge_config_nic_hw_error(hdev, false);
12281 	hclge_config_rocee_ras_interrupt(hdev, false);
12282 
12283 	hclge_cmd_uninit(hdev);
12284 	hclge_misc_irq_uninit(hdev);
12285 	hclge_devlink_uninit(hdev);
12286 	hclge_pci_uninit(hdev);
12287 	mutex_destroy(&hdev->vport_lock);
12288 	hclge_uninit_vport_vlan_table(hdev);
12289 	ae_dev->priv = NULL;
12290 }
12291 
12292 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12293 {
12294 	struct hclge_vport *vport = hclge_get_vport(handle);
12295 	struct hclge_dev *hdev = vport->back;
12296 
12297 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12298 }
12299 
12300 static void hclge_get_channels(struct hnae3_handle *handle,
12301 			       struct ethtool_channels *ch)
12302 {
12303 	ch->max_combined = hclge_get_max_channels(handle);
12304 	ch->other_count = 1;
12305 	ch->max_other = 1;
12306 	ch->combined_count = handle->kinfo.rss_size;
12307 }
12308 
12309 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12310 					u16 *alloc_tqps, u16 *max_rss_size)
12311 {
12312 	struct hclge_vport *vport = hclge_get_vport(handle);
12313 	struct hclge_dev *hdev = vport->back;
12314 
12315 	*alloc_tqps = vport->alloc_tqps;
12316 	*max_rss_size = hdev->pf_rss_size_max;
12317 }
12318 
12319 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12320 			      bool rxfh_configured)
12321 {
12322 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12323 	struct hclge_vport *vport = hclge_get_vport(handle);
12324 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12325 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12326 	struct hclge_dev *hdev = vport->back;
12327 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12328 	u16 cur_rss_size = kinfo->rss_size;
12329 	u16 cur_tqps = kinfo->num_tqps;
12330 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12331 	u16 roundup_size;
12332 	u32 *rss_indir;
12333 	unsigned int i;
12334 	int ret;
12335 
12336 	kinfo->req_rss_size = new_tqps_num;
12337 
12338 	ret = hclge_tm_vport_map_update(hdev);
12339 	if (ret) {
12340 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12341 		return ret;
12342 	}
12343 
12344 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
12345 	roundup_size = ilog2(roundup_size);
12346 	/* Set the RSS TC mode according to the new RSS size */
12347 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12348 		tc_valid[i] = 0;
12349 
12350 		if (!(hdev->hw_tc_map & BIT(i)))
12351 			continue;
12352 
12353 		tc_valid[i] = 1;
12354 		tc_size[i] = roundup_size;
12355 		tc_offset[i] = kinfo->rss_size * i;
12356 	}
12357 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12358 	if (ret)
12359 		return ret;
12360 
12361 	/* RSS indirection table has been configured by user */
12362 	if (rxfh_configured)
12363 		goto out;
12364 
12365 	/* Reinitializes the rss indirect table according to the new RSS size */
12366 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12367 			    GFP_KERNEL);
12368 	if (!rss_indir)
12369 		return -ENOMEM;
12370 
12371 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12372 		rss_indir[i] = i % kinfo->rss_size;
12373 
12374 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12375 	if (ret)
12376 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12377 			ret);
12378 
12379 	kfree(rss_indir);
12380 
12381 out:
12382 	if (!ret)
12383 		dev_info(&hdev->pdev->dev,
12384 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12385 			 cur_rss_size, kinfo->rss_size,
12386 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12387 
12388 	return ret;
12389 }
12390 
12391 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12392 			      u32 *regs_num_64_bit)
12393 {
12394 	struct hclge_desc desc;
12395 	u32 total_num;
12396 	int ret;
12397 
12398 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12399 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12400 	if (ret) {
12401 		dev_err(&hdev->pdev->dev,
12402 			"Query register number cmd failed, ret = %d.\n", ret);
12403 		return ret;
12404 	}
12405 
12406 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12407 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12408 
12409 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12410 	if (!total_num)
12411 		return -EINVAL;
12412 
12413 	return 0;
12414 }
12415 
12416 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12417 				 void *data)
12418 {
12419 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12420 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12421 
12422 	struct hclge_desc *desc;
12423 	u32 *reg_val = data;
12424 	__le32 *desc_data;
12425 	int nodata_num;
12426 	int cmd_num;
12427 	int i, k, n;
12428 	int ret;
12429 
12430 	if (regs_num == 0)
12431 		return 0;
12432 
12433 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12434 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12435 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12436 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12437 	if (!desc)
12438 		return -ENOMEM;
12439 
12440 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12441 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12442 	if (ret) {
12443 		dev_err(&hdev->pdev->dev,
12444 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12445 		kfree(desc);
12446 		return ret;
12447 	}
12448 
12449 	for (i = 0; i < cmd_num; i++) {
12450 		if (i == 0) {
12451 			desc_data = (__le32 *)(&desc[i].data[0]);
12452 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12453 		} else {
12454 			desc_data = (__le32 *)(&desc[i]);
12455 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12456 		}
12457 		for (k = 0; k < n; k++) {
12458 			*reg_val++ = le32_to_cpu(*desc_data++);
12459 
12460 			regs_num--;
12461 			if (!regs_num)
12462 				break;
12463 		}
12464 	}
12465 
12466 	kfree(desc);
12467 	return 0;
12468 }
12469 
12470 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12471 				 void *data)
12472 {
12473 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12474 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12475 
12476 	struct hclge_desc *desc;
12477 	u64 *reg_val = data;
12478 	__le64 *desc_data;
12479 	int nodata_len;
12480 	int cmd_num;
12481 	int i, k, n;
12482 	int ret;
12483 
12484 	if (regs_num == 0)
12485 		return 0;
12486 
12487 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12488 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12489 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12490 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12491 	if (!desc)
12492 		return -ENOMEM;
12493 
12494 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12495 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12496 	if (ret) {
12497 		dev_err(&hdev->pdev->dev,
12498 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12499 		kfree(desc);
12500 		return ret;
12501 	}
12502 
12503 	for (i = 0; i < cmd_num; i++) {
12504 		if (i == 0) {
12505 			desc_data = (__le64 *)(&desc[i].data[0]);
12506 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12507 		} else {
12508 			desc_data = (__le64 *)(&desc[i]);
12509 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12510 		}
12511 		for (k = 0; k < n; k++) {
12512 			*reg_val++ = le64_to_cpu(*desc_data++);
12513 
12514 			regs_num--;
12515 			if (!regs_num)
12516 				break;
12517 		}
12518 	}
12519 
12520 	kfree(desc);
12521 	return 0;
12522 }
12523 
12524 #define MAX_SEPARATE_NUM	4
12525 #define SEPARATOR_VALUE		0xFDFCFBFA
12526 #define REG_NUM_PER_LINE	4
12527 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12528 #define REG_SEPARATOR_LINE	1
12529 #define REG_NUM_REMAIN_MASK	3
12530 
12531 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12532 {
12533 	int i;
12534 
12535 	/* initialize command BD except the last one */
12536 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12537 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12538 					   true);
12539 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12540 	}
12541 
12542 	/* initialize the last command BD */
12543 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12544 
12545 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12546 }
12547 
12548 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12549 				    int *bd_num_list,
12550 				    u32 type_num)
12551 {
12552 	u32 entries_per_desc, desc_index, index, offset, i;
12553 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12554 	int ret;
12555 
12556 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12557 	if (ret) {
12558 		dev_err(&hdev->pdev->dev,
12559 			"Get dfx bd num fail, status is %d.\n", ret);
12560 		return ret;
12561 	}
12562 
12563 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12564 	for (i = 0; i < type_num; i++) {
12565 		offset = hclge_dfx_bd_offset_list[i];
12566 		index = offset % entries_per_desc;
12567 		desc_index = offset / entries_per_desc;
12568 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12569 	}
12570 
12571 	return ret;
12572 }
12573 
12574 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12575 				  struct hclge_desc *desc_src, int bd_num,
12576 				  enum hclge_opcode_type cmd)
12577 {
12578 	struct hclge_desc *desc = desc_src;
12579 	int i, ret;
12580 
12581 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12582 	for (i = 0; i < bd_num - 1; i++) {
12583 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12584 		desc++;
12585 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12586 	}
12587 
12588 	desc = desc_src;
12589 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12590 	if (ret)
12591 		dev_err(&hdev->pdev->dev,
12592 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12593 			cmd, ret);
12594 
12595 	return ret;
12596 }
12597 
12598 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12599 				    void *data)
12600 {
12601 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12602 	struct hclge_desc *desc = desc_src;
12603 	u32 *reg = data;
12604 
12605 	entries_per_desc = ARRAY_SIZE(desc->data);
12606 	reg_num = entries_per_desc * bd_num;
12607 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12608 	for (i = 0; i < reg_num; i++) {
12609 		index = i % entries_per_desc;
12610 		desc_index = i / entries_per_desc;
12611 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12612 	}
12613 	for (i = 0; i < separator_num; i++)
12614 		*reg++ = SEPARATOR_VALUE;
12615 
12616 	return reg_num + separator_num;
12617 }
12618 
12619 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12620 {
12621 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12622 	int data_len_per_desc, bd_num, i;
12623 	int *bd_num_list;
12624 	u32 data_len;
12625 	int ret;
12626 
12627 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12628 	if (!bd_num_list)
12629 		return -ENOMEM;
12630 
12631 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12632 	if (ret) {
12633 		dev_err(&hdev->pdev->dev,
12634 			"Get dfx reg bd num fail, status is %d.\n", ret);
12635 		goto out;
12636 	}
12637 
12638 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12639 	*len = 0;
12640 	for (i = 0; i < dfx_reg_type_num; i++) {
12641 		bd_num = bd_num_list[i];
12642 		data_len = data_len_per_desc * bd_num;
12643 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12644 	}
12645 
12646 out:
12647 	kfree(bd_num_list);
12648 	return ret;
12649 }
12650 
12651 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12652 {
12653 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12654 	int bd_num, bd_num_max, buf_len, i;
12655 	struct hclge_desc *desc_src;
12656 	int *bd_num_list;
12657 	u32 *reg = data;
12658 	int ret;
12659 
12660 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12661 	if (!bd_num_list)
12662 		return -ENOMEM;
12663 
12664 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12665 	if (ret) {
12666 		dev_err(&hdev->pdev->dev,
12667 			"Get dfx reg bd num fail, status is %d.\n", ret);
12668 		goto out;
12669 	}
12670 
12671 	bd_num_max = bd_num_list[0];
12672 	for (i = 1; i < dfx_reg_type_num; i++)
12673 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12674 
12675 	buf_len = sizeof(*desc_src) * bd_num_max;
12676 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12677 	if (!desc_src) {
12678 		ret = -ENOMEM;
12679 		goto out;
12680 	}
12681 
12682 	for (i = 0; i < dfx_reg_type_num; i++) {
12683 		bd_num = bd_num_list[i];
12684 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12685 					     hclge_dfx_reg_opcode_list[i]);
12686 		if (ret) {
12687 			dev_err(&hdev->pdev->dev,
12688 				"Get dfx reg fail, status is %d.\n", ret);
12689 			break;
12690 		}
12691 
12692 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12693 	}
12694 
12695 	kfree(desc_src);
12696 out:
12697 	kfree(bd_num_list);
12698 	return ret;
12699 }
12700 
12701 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12702 			      struct hnae3_knic_private_info *kinfo)
12703 {
12704 #define HCLGE_RING_REG_OFFSET		0x200
12705 #define HCLGE_RING_INT_REG_OFFSET	0x4
12706 
12707 	int i, j, reg_num, separator_num;
12708 	int data_num_sum;
12709 	u32 *reg = data;
12710 
12711 	/* fetching per-PF registers valus from PF PCIe register space */
12712 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12713 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12714 	for (i = 0; i < reg_num; i++)
12715 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12716 	for (i = 0; i < separator_num; i++)
12717 		*reg++ = SEPARATOR_VALUE;
12718 	data_num_sum = reg_num + separator_num;
12719 
12720 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12721 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12722 	for (i = 0; i < reg_num; i++)
12723 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12724 	for (i = 0; i < separator_num; i++)
12725 		*reg++ = SEPARATOR_VALUE;
12726 	data_num_sum += reg_num + separator_num;
12727 
12728 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12729 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12730 	for (j = 0; j < kinfo->num_tqps; j++) {
12731 		for (i = 0; i < reg_num; i++)
12732 			*reg++ = hclge_read_dev(&hdev->hw,
12733 						ring_reg_addr_list[i] +
12734 						HCLGE_RING_REG_OFFSET * j);
12735 		for (i = 0; i < separator_num; i++)
12736 			*reg++ = SEPARATOR_VALUE;
12737 	}
12738 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12739 
12740 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12741 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12742 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12743 		for (i = 0; i < reg_num; i++)
12744 			*reg++ = hclge_read_dev(&hdev->hw,
12745 						tqp_intr_reg_addr_list[i] +
12746 						HCLGE_RING_INT_REG_OFFSET * j);
12747 		for (i = 0; i < separator_num; i++)
12748 			*reg++ = SEPARATOR_VALUE;
12749 	}
12750 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12751 
12752 	return data_num_sum;
12753 }
12754 
12755 static int hclge_get_regs_len(struct hnae3_handle *handle)
12756 {
12757 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12758 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12759 	struct hclge_vport *vport = hclge_get_vport(handle);
12760 	struct hclge_dev *hdev = vport->back;
12761 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12762 	int regs_lines_32_bit, regs_lines_64_bit;
12763 	int ret;
12764 
12765 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12766 	if (ret) {
12767 		dev_err(&hdev->pdev->dev,
12768 			"Get register number failed, ret = %d.\n", ret);
12769 		return ret;
12770 	}
12771 
12772 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12773 	if (ret) {
12774 		dev_err(&hdev->pdev->dev,
12775 			"Get dfx reg len failed, ret = %d.\n", ret);
12776 		return ret;
12777 	}
12778 
12779 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12780 		REG_SEPARATOR_LINE;
12781 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12782 		REG_SEPARATOR_LINE;
12783 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12784 		REG_SEPARATOR_LINE;
12785 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12786 		REG_SEPARATOR_LINE;
12787 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12788 		REG_SEPARATOR_LINE;
12789 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12790 		REG_SEPARATOR_LINE;
12791 
12792 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12793 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12794 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12795 }
12796 
12797 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12798 			   void *data)
12799 {
12800 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12801 	struct hclge_vport *vport = hclge_get_vport(handle);
12802 	struct hclge_dev *hdev = vport->back;
12803 	u32 regs_num_32_bit, regs_num_64_bit;
12804 	int i, reg_num, separator_num, ret;
12805 	u32 *reg = data;
12806 
12807 	*version = hdev->fw_version;
12808 
12809 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12810 	if (ret) {
12811 		dev_err(&hdev->pdev->dev,
12812 			"Get register number failed, ret = %d.\n", ret);
12813 		return;
12814 	}
12815 
12816 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12817 
12818 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12819 	if (ret) {
12820 		dev_err(&hdev->pdev->dev,
12821 			"Get 32 bit register failed, ret = %d.\n", ret);
12822 		return;
12823 	}
12824 	reg_num = regs_num_32_bit;
12825 	reg += reg_num;
12826 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12827 	for (i = 0; i < separator_num; i++)
12828 		*reg++ = SEPARATOR_VALUE;
12829 
12830 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12831 	if (ret) {
12832 		dev_err(&hdev->pdev->dev,
12833 			"Get 64 bit register failed, ret = %d.\n", ret);
12834 		return;
12835 	}
12836 	reg_num = regs_num_64_bit * 2;
12837 	reg += reg_num;
12838 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12839 	for (i = 0; i < separator_num; i++)
12840 		*reg++ = SEPARATOR_VALUE;
12841 
12842 	ret = hclge_get_dfx_reg(hdev, reg);
12843 	if (ret)
12844 		dev_err(&hdev->pdev->dev,
12845 			"Get dfx register failed, ret = %d.\n", ret);
12846 }
12847 
12848 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12849 {
12850 	struct hclge_set_led_state_cmd *req;
12851 	struct hclge_desc desc;
12852 	int ret;
12853 
12854 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12855 
12856 	req = (struct hclge_set_led_state_cmd *)desc.data;
12857 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12858 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12859 
12860 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12861 	if (ret)
12862 		dev_err(&hdev->pdev->dev,
12863 			"Send set led state cmd error, ret =%d\n", ret);
12864 
12865 	return ret;
12866 }
12867 
12868 enum hclge_led_status {
12869 	HCLGE_LED_OFF,
12870 	HCLGE_LED_ON,
12871 	HCLGE_LED_NO_CHANGE = 0xFF,
12872 };
12873 
12874 static int hclge_set_led_id(struct hnae3_handle *handle,
12875 			    enum ethtool_phys_id_state status)
12876 {
12877 	struct hclge_vport *vport = hclge_get_vport(handle);
12878 	struct hclge_dev *hdev = vport->back;
12879 
12880 	switch (status) {
12881 	case ETHTOOL_ID_ACTIVE:
12882 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12883 	case ETHTOOL_ID_INACTIVE:
12884 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12885 	default:
12886 		return -EINVAL;
12887 	}
12888 }
12889 
12890 static void hclge_get_link_mode(struct hnae3_handle *handle,
12891 				unsigned long *supported,
12892 				unsigned long *advertising)
12893 {
12894 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12895 	struct hclge_vport *vport = hclge_get_vport(handle);
12896 	struct hclge_dev *hdev = vport->back;
12897 	unsigned int idx = 0;
12898 
12899 	for (; idx < size; idx++) {
12900 		supported[idx] = hdev->hw.mac.supported[idx];
12901 		advertising[idx] = hdev->hw.mac.advertising[idx];
12902 	}
12903 }
12904 
12905 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12906 {
12907 	struct hclge_vport *vport = hclge_get_vport(handle);
12908 	struct hclge_dev *hdev = vport->back;
12909 	bool gro_en_old = hdev->gro_en;
12910 	int ret;
12911 
12912 	hdev->gro_en = enable;
12913 	ret = hclge_config_gro(hdev);
12914 	if (ret)
12915 		hdev->gro_en = gro_en_old;
12916 
12917 	return ret;
12918 }
12919 
12920 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12921 {
12922 	struct hclge_vport *vport = &hdev->vport[0];
12923 	struct hnae3_handle *handle = &vport->nic;
12924 	u8 tmp_flags;
12925 	int ret;
12926 	u16 i;
12927 
12928 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12929 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12930 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12931 	}
12932 
12933 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12934 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12935 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12936 					     tmp_flags & HNAE3_MPE);
12937 		if (!ret) {
12938 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12939 				  &vport->state);
12940 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12941 				&vport->state);
12942 		}
12943 	}
12944 
12945 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12946 		bool uc_en = false;
12947 		bool mc_en = false;
12948 		bool bc_en;
12949 
12950 		vport = &hdev->vport[i];
12951 
12952 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12953 					&vport->state))
12954 			continue;
12955 
12956 		if (vport->vf_info.trusted) {
12957 			uc_en = vport->vf_info.request_uc_en > 0 ||
12958 				vport->overflow_promisc_flags &
12959 				HNAE3_OVERFLOW_UPE;
12960 			mc_en = vport->vf_info.request_mc_en > 0 ||
12961 				vport->overflow_promisc_flags &
12962 				HNAE3_OVERFLOW_MPE;
12963 		}
12964 		bc_en = vport->vf_info.request_bc_en > 0;
12965 
12966 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12967 						 mc_en, bc_en);
12968 		if (ret) {
12969 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12970 				&vport->state);
12971 			return;
12972 		}
12973 		hclge_set_vport_vlan_fltr_change(vport);
12974 	}
12975 }
12976 
12977 static bool hclge_module_existed(struct hclge_dev *hdev)
12978 {
12979 	struct hclge_desc desc;
12980 	u32 existed;
12981 	int ret;
12982 
12983 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12984 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12985 	if (ret) {
12986 		dev_err(&hdev->pdev->dev,
12987 			"failed to get SFP exist state, ret = %d\n", ret);
12988 		return false;
12989 	}
12990 
12991 	existed = le32_to_cpu(desc.data[0]);
12992 
12993 	return existed != 0;
12994 }
12995 
12996 /* need 6 bds(total 140 bytes) in one reading
12997  * return the number of bytes actually read, 0 means read failed.
12998  */
12999 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
13000 				     u32 len, u8 *data)
13001 {
13002 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
13003 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
13004 	u16 read_len;
13005 	u16 copy_len;
13006 	int ret;
13007 	int i;
13008 
13009 	/* setup all 6 bds to read module eeprom info. */
13010 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
13011 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
13012 					   true);
13013 
13014 		/* bd0~bd4 need next flag */
13015 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
13016 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
13017 	}
13018 
13019 	/* setup bd0, this bd contains offset and read length. */
13020 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
13021 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
13022 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
13023 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
13024 
13025 	ret = hclge_cmd_send(&hdev->hw, desc, i);
13026 	if (ret) {
13027 		dev_err(&hdev->pdev->dev,
13028 			"failed to get SFP eeprom info, ret = %d\n", ret);
13029 		return 0;
13030 	}
13031 
13032 	/* copy sfp info from bd0 to out buffer. */
13033 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
13034 	memcpy(data, sfp_info_bd0->data, copy_len);
13035 	read_len = copy_len;
13036 
13037 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
13038 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
13039 		if (read_len >= len)
13040 			return read_len;
13041 
13042 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
13043 		memcpy(data + read_len, desc[i].data, copy_len);
13044 		read_len += copy_len;
13045 	}
13046 
13047 	return read_len;
13048 }
13049 
13050 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
13051 				   u32 len, u8 *data)
13052 {
13053 	struct hclge_vport *vport = hclge_get_vport(handle);
13054 	struct hclge_dev *hdev = vport->back;
13055 	u32 read_len = 0;
13056 	u16 data_len;
13057 
13058 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
13059 		return -EOPNOTSUPP;
13060 
13061 	if (!hclge_module_existed(hdev))
13062 		return -ENXIO;
13063 
13064 	while (read_len < len) {
13065 		data_len = hclge_get_sfp_eeprom_info(hdev,
13066 						     offset + read_len,
13067 						     len - read_len,
13068 						     data + read_len);
13069 		if (!data_len)
13070 			return -EIO;
13071 
13072 		read_len += data_len;
13073 	}
13074 
13075 	return 0;
13076 }
13077 
13078 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
13079 					 u32 *status_code)
13080 {
13081 	struct hclge_vport *vport = hclge_get_vport(handle);
13082 	struct hclge_dev *hdev = vport->back;
13083 	struct hclge_desc desc;
13084 	int ret;
13085 
13086 	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
13087 		return -EOPNOTSUPP;
13088 
13089 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
13090 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
13091 	if (ret) {
13092 		dev_err(&hdev->pdev->dev,
13093 			"failed to query link diagnosis info, ret = %d\n", ret);
13094 		return ret;
13095 	}
13096 
13097 	*status_code = le32_to_cpu(desc.data[0]);
13098 	return 0;
13099 }
13100 
13101 static const struct hnae3_ae_ops hclge_ops = {
13102 	.init_ae_dev = hclge_init_ae_dev,
13103 	.uninit_ae_dev = hclge_uninit_ae_dev,
13104 	.reset_prepare = hclge_reset_prepare_general,
13105 	.reset_done = hclge_reset_done,
13106 	.init_client_instance = hclge_init_client_instance,
13107 	.uninit_client_instance = hclge_uninit_client_instance,
13108 	.map_ring_to_vector = hclge_map_ring_to_vector,
13109 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
13110 	.get_vector = hclge_get_vector,
13111 	.put_vector = hclge_put_vector,
13112 	.set_promisc_mode = hclge_set_promisc_mode,
13113 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
13114 	.set_loopback = hclge_set_loopback,
13115 	.start = hclge_ae_start,
13116 	.stop = hclge_ae_stop,
13117 	.client_start = hclge_client_start,
13118 	.client_stop = hclge_client_stop,
13119 	.get_status = hclge_get_status,
13120 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
13121 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
13122 	.get_media_type = hclge_get_media_type,
13123 	.check_port_speed = hclge_check_port_speed,
13124 	.get_fec = hclge_get_fec,
13125 	.set_fec = hclge_set_fec,
13126 	.get_rss_key_size = hclge_get_rss_key_size,
13127 	.get_rss = hclge_get_rss,
13128 	.set_rss = hclge_set_rss,
13129 	.set_rss_tuple = hclge_set_rss_tuple,
13130 	.get_rss_tuple = hclge_get_rss_tuple,
13131 	.get_tc_size = hclge_get_tc_size,
13132 	.get_mac_addr = hclge_get_mac_addr,
13133 	.set_mac_addr = hclge_set_mac_addr,
13134 	.do_ioctl = hclge_do_ioctl,
13135 	.add_uc_addr = hclge_add_uc_addr,
13136 	.rm_uc_addr = hclge_rm_uc_addr,
13137 	.add_mc_addr = hclge_add_mc_addr,
13138 	.rm_mc_addr = hclge_rm_mc_addr,
13139 	.set_autoneg = hclge_set_autoneg,
13140 	.get_autoneg = hclge_get_autoneg,
13141 	.restart_autoneg = hclge_restart_autoneg,
13142 	.halt_autoneg = hclge_halt_autoneg,
13143 	.get_pauseparam = hclge_get_pauseparam,
13144 	.set_pauseparam = hclge_set_pauseparam,
13145 	.set_mtu = hclge_set_mtu,
13146 	.reset_queue = hclge_reset_tqp,
13147 	.get_stats = hclge_get_stats,
13148 	.get_mac_stats = hclge_get_mac_stat,
13149 	.update_stats = hclge_update_stats,
13150 	.get_strings = hclge_get_strings,
13151 	.get_sset_count = hclge_get_sset_count,
13152 	.get_fw_version = hclge_get_fw_version,
13153 	.get_mdix_mode = hclge_get_mdix_mode,
13154 	.enable_vlan_filter = hclge_enable_vlan_filter,
13155 	.set_vlan_filter = hclge_set_vlan_filter,
13156 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
13157 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13158 	.reset_event = hclge_reset_event,
13159 	.get_reset_level = hclge_get_reset_level,
13160 	.set_default_reset_request = hclge_set_def_reset_request,
13161 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13162 	.set_channels = hclge_set_channels,
13163 	.get_channels = hclge_get_channels,
13164 	.get_regs_len = hclge_get_regs_len,
13165 	.get_regs = hclge_get_regs,
13166 	.set_led_id = hclge_set_led_id,
13167 	.get_link_mode = hclge_get_link_mode,
13168 	.add_fd_entry = hclge_add_fd_entry,
13169 	.del_fd_entry = hclge_del_fd_entry,
13170 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13171 	.get_fd_rule_info = hclge_get_fd_rule_info,
13172 	.get_fd_all_rules = hclge_get_all_rules,
13173 	.enable_fd = hclge_enable_fd,
13174 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
13175 	.dbg_read_cmd = hclge_dbg_read_cmd,
13176 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
13177 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
13178 	.ae_dev_resetting = hclge_ae_dev_resetting,
13179 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13180 	.set_gro_en = hclge_gro_en,
13181 	.get_global_queue_id = hclge_covert_handle_qid_global,
13182 	.set_timer_task = hclge_set_timer_task,
13183 	.mac_connect_phy = hclge_mac_connect_phy,
13184 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
13185 	.get_vf_config = hclge_get_vf_config,
13186 	.set_vf_link_state = hclge_set_vf_link_state,
13187 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
13188 	.set_vf_trust = hclge_set_vf_trust,
13189 	.set_vf_rate = hclge_set_vf_rate,
13190 	.set_vf_mac = hclge_set_vf_mac,
13191 	.get_module_eeprom = hclge_get_module_eeprom,
13192 	.get_cmdq_stat = hclge_get_cmdq_stat,
13193 	.add_cls_flower = hclge_add_cls_flower,
13194 	.del_cls_flower = hclge_del_cls_flower,
13195 	.cls_flower_active = hclge_is_cls_flower_active,
13196 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13197 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13198 	.set_tx_hwts_info = hclge_ptp_set_tx_info,
13199 	.get_rx_hwts = hclge_ptp_get_rx_hwts,
13200 	.get_ts_info = hclge_ptp_get_ts_info,
13201 	.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13202 };
13203 
13204 static struct hnae3_ae_algo ae_algo = {
13205 	.ops = &hclge_ops,
13206 	.pdev_id_table = ae_algo_pci_tbl,
13207 };
13208 
13209 static int hclge_init(void)
13210 {
13211 	pr_info("%s is initializing\n", HCLGE_NAME);
13212 
13213 	hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
13214 	if (!hclge_wq) {
13215 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13216 		return -ENOMEM;
13217 	}
13218 
13219 	hnae3_register_ae_algo(&ae_algo);
13220 
13221 	return 0;
13222 }
13223 
13224 static void hclge_exit(void)
13225 {
13226 	hnae3_unregister_ae_algo_prepare(&ae_algo);
13227 	hnae3_unregister_ae_algo(&ae_algo);
13228 	destroy_workqueue(hclge_wq);
13229 }
13230 module_init(hclge_init);
13231 module_exit(hclge_exit);
13232 
13233 MODULE_LICENSE("GPL");
13234 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13235 MODULE_DESCRIPTION("HCLGE Driver");
13236 MODULE_VERSION(HCLGE_MOD_VERSION);
13237