1 /*
2  * Copyright (c) 2016-2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 
21 #include "hclge_cmd.h"
22 #include "hclge_dcb.h"
23 #include "hclge_main.h"
24 #include "hclge_mdio.h"
25 #include "hclge_tm.h"
26 #include "hnae3.h"
27 
28 #define HCLGE_NAME			"hclge"
29 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
32 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
33 
34 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
35 				     enum hclge_mta_dmac_sel_type mta_mac_sel,
36 				     bool enable);
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
38 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
39 
40 static struct hnae3_ae_algo ae_algo;
41 
42 static const struct pci_device_id ae_algo_pci_tbl[] = {
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
45 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
46 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
47 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
48 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
49 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
50 	/* required last entry */
51 	{0, }
52 };
53 
54 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
55 	"Mac    Loopback test",
56 	"Serdes Loopback test",
57 	"Phy    Loopback test"
58 };
59 
60 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
61 	{"igu_rx_oversize_pkt",
62 		HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
63 	{"igu_rx_undersize_pkt",
64 		HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
65 	{"igu_rx_out_all_pkt",
66 		HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
67 	{"igu_rx_uni_pkt",
68 		HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
69 	{"igu_rx_multi_pkt",
70 		HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
71 	{"igu_rx_broad_pkt",
72 		HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
73 	{"egu_tx_out_all_pkt",
74 		HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
75 	{"egu_tx_uni_pkt",
76 		HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
77 	{"egu_tx_multi_pkt",
78 		HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
79 	{"egu_tx_broad_pkt",
80 		HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
81 	{"ssu_ppp_mac_key_num",
82 		HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
83 	{"ssu_ppp_host_key_num",
84 		HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
85 	{"ppp_ssu_mac_rlt_num",
86 		HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
87 	{"ppp_ssu_host_rlt_num",
88 		HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
89 	{"ssu_tx_in_num",
90 		HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
91 	{"ssu_tx_out_num",
92 		HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
93 	{"ssu_rx_in_num",
94 		HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
95 	{"ssu_rx_out_num",
96 		HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
97 };
98 
99 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
100 	{"igu_rx_err_pkt",
101 		HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
102 	{"igu_rx_no_eof_pkt",
103 		HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
104 	{"igu_rx_no_sof_pkt",
105 		HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
106 	{"egu_tx_1588_pkt",
107 		HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
108 	{"ssu_full_drop_num",
109 		HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
110 	{"ssu_part_drop_num",
111 		HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
112 	{"ppp_key_drop_num",
113 		HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
114 	{"ppp_rlt_drop_num",
115 		HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
116 	{"ssu_key_drop_num",
117 		HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
118 	{"pkt_curr_buf_cnt",
119 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
120 	{"qcn_fb_rcv_cnt",
121 		HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
122 	{"qcn_fb_drop_cnt",
123 		HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
124 	{"qcn_fb_invaild_cnt",
125 		HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
126 	{"rx_packet_tc0_in_cnt",
127 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
128 	{"rx_packet_tc1_in_cnt",
129 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
130 	{"rx_packet_tc2_in_cnt",
131 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
132 	{"rx_packet_tc3_in_cnt",
133 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
134 	{"rx_packet_tc4_in_cnt",
135 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
136 	{"rx_packet_tc5_in_cnt",
137 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
138 	{"rx_packet_tc6_in_cnt",
139 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
140 	{"rx_packet_tc7_in_cnt",
141 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
142 	{"rx_packet_tc0_out_cnt",
143 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
144 	{"rx_packet_tc1_out_cnt",
145 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
146 	{"rx_packet_tc2_out_cnt",
147 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
148 	{"rx_packet_tc3_out_cnt",
149 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
150 	{"rx_packet_tc4_out_cnt",
151 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
152 	{"rx_packet_tc5_out_cnt",
153 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
154 	{"rx_packet_tc6_out_cnt",
155 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
156 	{"rx_packet_tc7_out_cnt",
157 		HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
158 	{"tx_packet_tc0_in_cnt",
159 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
160 	{"tx_packet_tc1_in_cnt",
161 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
162 	{"tx_packet_tc2_in_cnt",
163 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
164 	{"tx_packet_tc3_in_cnt",
165 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
166 	{"tx_packet_tc4_in_cnt",
167 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
168 	{"tx_packet_tc5_in_cnt",
169 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
170 	{"tx_packet_tc6_in_cnt",
171 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
172 	{"tx_packet_tc7_in_cnt",
173 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
174 	{"tx_packet_tc0_out_cnt",
175 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
176 	{"tx_packet_tc1_out_cnt",
177 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
178 	{"tx_packet_tc2_out_cnt",
179 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
180 	{"tx_packet_tc3_out_cnt",
181 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
182 	{"tx_packet_tc4_out_cnt",
183 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
184 	{"tx_packet_tc5_out_cnt",
185 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
186 	{"tx_packet_tc6_out_cnt",
187 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
188 	{"tx_packet_tc7_out_cnt",
189 		HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
190 	{"pkt_curr_buf_tc0_cnt",
191 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
192 	{"pkt_curr_buf_tc1_cnt",
193 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
194 	{"pkt_curr_buf_tc2_cnt",
195 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
196 	{"pkt_curr_buf_tc3_cnt",
197 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
198 	{"pkt_curr_buf_tc4_cnt",
199 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
200 	{"pkt_curr_buf_tc5_cnt",
201 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
202 	{"pkt_curr_buf_tc6_cnt",
203 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
204 	{"pkt_curr_buf_tc7_cnt",
205 		HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
206 	{"mb_uncopy_num",
207 		HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
208 	{"lo_pri_unicast_rlt_drop_num",
209 		HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
210 	{"hi_pri_multicast_rlt_drop_num",
211 		HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
212 	{"lo_pri_multicast_rlt_drop_num",
213 		HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
214 	{"rx_oq_drop_pkt_cnt",
215 		HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
216 	{"tx_oq_drop_pkt_cnt",
217 		HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
218 	{"nic_l2_err_drop_pkt_cnt",
219 		HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
220 	{"roc_l2_err_drop_pkt_cnt",
221 		HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
222 };
223 
224 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
225 	{"mac_tx_mac_pause_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
227 	{"mac_rx_mac_pause_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
229 	{"mac_tx_pfc_pri0_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
231 	{"mac_tx_pfc_pri1_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
233 	{"mac_tx_pfc_pri2_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
235 	{"mac_tx_pfc_pri3_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
237 	{"mac_tx_pfc_pri4_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
239 	{"mac_tx_pfc_pri5_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
241 	{"mac_tx_pfc_pri6_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
243 	{"mac_tx_pfc_pri7_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
245 	{"mac_rx_pfc_pri0_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
247 	{"mac_rx_pfc_pri1_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
249 	{"mac_rx_pfc_pri2_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
251 	{"mac_rx_pfc_pri3_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
253 	{"mac_rx_pfc_pri4_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
255 	{"mac_rx_pfc_pri5_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
257 	{"mac_rx_pfc_pri6_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
259 	{"mac_rx_pfc_pri7_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
261 	{"mac_tx_total_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
263 	{"mac_tx_total_oct_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
265 	{"mac_tx_good_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
267 	{"mac_tx_bad_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
269 	{"mac_tx_good_oct_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
271 	{"mac_tx_bad_oct_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
273 	{"mac_tx_uni_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
275 	{"mac_tx_multi_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
277 	{"mac_tx_broad_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
279 	{"mac_tx_undersize_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
281 	{"mac_tx_overrsize_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
283 	{"mac_tx_64_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
285 	{"mac_tx_65_127_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
287 	{"mac_tx_128_255_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
289 	{"mac_tx_256_511_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
291 	{"mac_tx_512_1023_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
293 	{"mac_tx_1024_1518_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
295 	{"mac_tx_1519_max_oct_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
297 	{"mac_rx_total_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
299 	{"mac_rx_total_oct_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
301 	{"mac_rx_good_pkt_num",
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
303 	{"mac_rx_bad_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
305 	{"mac_rx_good_oct_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
307 	{"mac_rx_bad_oct_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
309 	{"mac_rx_uni_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
311 	{"mac_rx_multi_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
313 	{"mac_rx_broad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
315 	{"mac_rx_undersize_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
317 	{"mac_rx_overrsize_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
319 	{"mac_rx_64_oct_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
321 	{"mac_rx_65_127_oct_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
323 	{"mac_rx_128_255_oct_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
325 	{"mac_rx_256_511_oct_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
327 	{"mac_rx_512_1023_oct_pkt_num",
328 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
329 	{"mac_rx_1024_1518_oct_pkt_num",
330 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
331 	{"mac_rx_1519_max_oct_pkt_num",
332 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
333 
334 	{"mac_trans_fragment_pkt_num",
335 		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
336 	{"mac_trans_undermin_pkt_num",
337 		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
338 	{"mac_trans_jabber_pkt_num",
339 		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
340 	{"mac_trans_err_all_pkt_num",
341 		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
342 	{"mac_trans_from_app_good_pkt_num",
343 		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
344 	{"mac_trans_from_app_bad_pkt_num",
345 		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
346 	{"mac_rcv_fragment_pkt_num",
347 		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
348 	{"mac_rcv_undermin_pkt_num",
349 		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
350 	{"mac_rcv_jabber_pkt_num",
351 		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
352 	{"mac_rcv_fcs_err_pkt_num",
353 		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
354 	{"mac_rcv_send_app_good_pkt_num",
355 		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
356 	{"mac_rcv_send_app_bad_pkt_num",
357 		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
358 };
359 
360 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
361 {
362 #define HCLGE_64_BIT_CMD_NUM 5
363 #define HCLGE_64_BIT_RTN_DATANUM 4
364 	u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
365 	struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
366 	__le64 *desc_data;
367 	int i, k, n;
368 	int ret;
369 
370 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
371 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
372 	if (ret) {
373 		dev_err(&hdev->pdev->dev,
374 			"Get 64 bit pkt stats fail, status = %d.\n", ret);
375 		return ret;
376 	}
377 
378 	for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
379 		if (unlikely(i == 0)) {
380 			desc_data = (__le64 *)(&desc[i].data[0]);
381 			n = HCLGE_64_BIT_RTN_DATANUM - 1;
382 		} else {
383 			desc_data = (__le64 *)(&desc[i]);
384 			n = HCLGE_64_BIT_RTN_DATANUM;
385 		}
386 		for (k = 0; k < n; k++) {
387 			*data++ += le64_to_cpu(*desc_data);
388 			desc_data++;
389 		}
390 	}
391 
392 	return 0;
393 }
394 
395 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
396 {
397 	stats->pkt_curr_buf_cnt     = 0;
398 	stats->pkt_curr_buf_tc0_cnt = 0;
399 	stats->pkt_curr_buf_tc1_cnt = 0;
400 	stats->pkt_curr_buf_tc2_cnt = 0;
401 	stats->pkt_curr_buf_tc3_cnt = 0;
402 	stats->pkt_curr_buf_tc4_cnt = 0;
403 	stats->pkt_curr_buf_tc5_cnt = 0;
404 	stats->pkt_curr_buf_tc6_cnt = 0;
405 	stats->pkt_curr_buf_tc7_cnt = 0;
406 }
407 
408 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
409 {
410 #define HCLGE_32_BIT_CMD_NUM 8
411 #define HCLGE_32_BIT_RTN_DATANUM 8
412 
413 	struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
414 	struct hclge_32_bit_stats *all_32_bit_stats;
415 	__le32 *desc_data;
416 	int i, k, n;
417 	u64 *data;
418 	int ret;
419 
420 	all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
421 	data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
422 
423 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
424 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
425 	if (ret) {
426 		dev_err(&hdev->pdev->dev,
427 			"Get 32 bit pkt stats fail, status = %d.\n", ret);
428 
429 		return ret;
430 	}
431 
432 	hclge_reset_partial_32bit_counter(all_32_bit_stats);
433 	for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
434 		if (unlikely(i == 0)) {
435 			__le16 *desc_data_16bit;
436 
437 			all_32_bit_stats->igu_rx_err_pkt +=
438 				le32_to_cpu(desc[i].data[0]);
439 
440 			desc_data_16bit = (__le16 *)&desc[i].data[1];
441 			all_32_bit_stats->igu_rx_no_eof_pkt +=
442 				le16_to_cpu(*desc_data_16bit);
443 
444 			desc_data_16bit++;
445 			all_32_bit_stats->igu_rx_no_sof_pkt +=
446 				le16_to_cpu(*desc_data_16bit);
447 
448 			desc_data = &desc[i].data[2];
449 			n = HCLGE_32_BIT_RTN_DATANUM - 4;
450 		} else {
451 			desc_data = (__le32 *)&desc[i];
452 			n = HCLGE_32_BIT_RTN_DATANUM;
453 		}
454 		for (k = 0; k < n; k++) {
455 			*data++ += le32_to_cpu(*desc_data);
456 			desc_data++;
457 		}
458 	}
459 
460 	return 0;
461 }
462 
463 static int hclge_mac_update_stats(struct hclge_dev *hdev)
464 {
465 #define HCLGE_MAC_CMD_NUM 17
466 #define HCLGE_RTN_DATA_NUM 4
467 
468 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
469 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
470 	__le64 *desc_data;
471 	int i, k, n;
472 	int ret;
473 
474 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
475 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
476 	if (ret) {
477 		dev_err(&hdev->pdev->dev,
478 			"Get MAC pkt stats fail, status = %d.\n", ret);
479 
480 		return ret;
481 	}
482 
483 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
484 		if (unlikely(i == 0)) {
485 			desc_data = (__le64 *)(&desc[i].data[0]);
486 			n = HCLGE_RTN_DATA_NUM - 2;
487 		} else {
488 			desc_data = (__le64 *)(&desc[i]);
489 			n = HCLGE_RTN_DATA_NUM;
490 		}
491 		for (k = 0; k < n; k++) {
492 			*data++ += le64_to_cpu(*desc_data);
493 			desc_data++;
494 		}
495 	}
496 
497 	return 0;
498 }
499 
500 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
501 {
502 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
503 	struct hclge_vport *vport = hclge_get_vport(handle);
504 	struct hclge_dev *hdev = vport->back;
505 	struct hnae3_queue *queue;
506 	struct hclge_desc desc[1];
507 	struct hclge_tqp *tqp;
508 	int ret, i;
509 
510 	for (i = 0; i < kinfo->num_tqps; i++) {
511 		queue = handle->kinfo.tqp[i];
512 		tqp = container_of(queue, struct hclge_tqp, q);
513 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
514 		hclge_cmd_setup_basic_desc(&desc[0],
515 					   HCLGE_OPC_QUERY_RX_STATUS,
516 					   true);
517 
518 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
519 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
520 		if (ret) {
521 			dev_err(&hdev->pdev->dev,
522 				"Query tqp stat fail, status = %d,queue = %d\n",
523 				ret,	i);
524 			return ret;
525 		}
526 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
527 			le32_to_cpu(desc[0].data[4]);
528 	}
529 
530 	for (i = 0; i < kinfo->num_tqps; i++) {
531 		queue = handle->kinfo.tqp[i];
532 		tqp = container_of(queue, struct hclge_tqp, q);
533 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
534 		hclge_cmd_setup_basic_desc(&desc[0],
535 					   HCLGE_OPC_QUERY_TX_STATUS,
536 					   true);
537 
538 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
539 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
540 		if (ret) {
541 			dev_err(&hdev->pdev->dev,
542 				"Query tqp stat fail, status = %d,queue = %d\n",
543 				ret, i);
544 			return ret;
545 		}
546 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
547 			le32_to_cpu(desc[0].data[4]);
548 	}
549 
550 	return 0;
551 }
552 
553 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
554 {
555 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
556 	struct hclge_tqp *tqp;
557 	u64 *buff = data;
558 	int i;
559 
560 	for (i = 0; i < kinfo->num_tqps; i++) {
561 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
562 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
563 	}
564 
565 	for (i = 0; i < kinfo->num_tqps; i++) {
566 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
567 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
568 	}
569 
570 	return buff;
571 }
572 
573 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
574 {
575 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
576 
577 	return kinfo->num_tqps * (2);
578 }
579 
580 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
581 {
582 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
583 	u8 *buff = data;
584 	int i = 0;
585 
586 	for (i = 0; i < kinfo->num_tqps; i++) {
587 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
588 			struct hclge_tqp, q);
589 		snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
590 			 tqp->index);
591 		buff = buff + ETH_GSTRING_LEN;
592 	}
593 
594 	for (i = 0; i < kinfo->num_tqps; i++) {
595 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
596 			struct hclge_tqp, q);
597 		snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
598 			 tqp->index);
599 		buff = buff + ETH_GSTRING_LEN;
600 	}
601 
602 	return buff;
603 }
604 
605 static u64 *hclge_comm_get_stats(void *comm_stats,
606 				 const struct hclge_comm_stats_str strs[],
607 				 int size, u64 *data)
608 {
609 	u64 *buf = data;
610 	u32 i;
611 
612 	for (i = 0; i < size; i++)
613 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
614 
615 	return buf + size;
616 }
617 
618 static u8 *hclge_comm_get_strings(u32 stringset,
619 				  const struct hclge_comm_stats_str strs[],
620 				  int size, u8 *data)
621 {
622 	char *buff = (char *)data;
623 	u32 i;
624 
625 	if (stringset != ETH_SS_STATS)
626 		return buff;
627 
628 	for (i = 0; i < size; i++) {
629 		snprintf(buff, ETH_GSTRING_LEN,
630 			 strs[i].desc);
631 		buff = buff + ETH_GSTRING_LEN;
632 	}
633 
634 	return (u8 *)buff;
635 }
636 
637 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
638 				 struct net_device_stats *net_stats)
639 {
640 	net_stats->tx_dropped = 0;
641 	net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
642 	net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
643 	net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
644 
645 	net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
646 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
647 	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
648 	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
649 	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
650 	net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
651 
652 	net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
653 	net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
654 
655 	net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
656 	net_stats->rx_length_errors =
657 		hw_stats->mac_stats.mac_rx_undersize_pkt_num;
658 	net_stats->rx_length_errors +=
659 		hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
660 	net_stats->rx_over_errors =
661 		hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
662 }
663 
664 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
665 {
666 	struct hnae3_handle *handle;
667 	int status;
668 
669 	handle = &hdev->vport[0].nic;
670 	if (handle->client) {
671 		status = hclge_tqps_update_stats(handle);
672 		if (status) {
673 			dev_err(&hdev->pdev->dev,
674 				"Update TQPS stats fail, status = %d.\n",
675 				status);
676 		}
677 	}
678 
679 	status = hclge_mac_update_stats(hdev);
680 	if (status)
681 		dev_err(&hdev->pdev->dev,
682 			"Update MAC stats fail, status = %d.\n", status);
683 
684 	status = hclge_32_bit_update_stats(hdev);
685 	if (status)
686 		dev_err(&hdev->pdev->dev,
687 			"Update 32 bit stats fail, status = %d.\n",
688 			status);
689 
690 	hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
691 }
692 
693 static void hclge_update_stats(struct hnae3_handle *handle,
694 			       struct net_device_stats *net_stats)
695 {
696 	struct hclge_vport *vport = hclge_get_vport(handle);
697 	struct hclge_dev *hdev = vport->back;
698 	struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
699 	int status;
700 
701 	status = hclge_mac_update_stats(hdev);
702 	if (status)
703 		dev_err(&hdev->pdev->dev,
704 			"Update MAC stats fail, status = %d.\n",
705 			status);
706 
707 	status = hclge_32_bit_update_stats(hdev);
708 	if (status)
709 		dev_err(&hdev->pdev->dev,
710 			"Update 32 bit stats fail, status = %d.\n",
711 			status);
712 
713 	status = hclge_64_bit_update_stats(hdev);
714 	if (status)
715 		dev_err(&hdev->pdev->dev,
716 			"Update 64 bit stats fail, status = %d.\n",
717 			status);
718 
719 	status = hclge_tqps_update_stats(handle);
720 	if (status)
721 		dev_err(&hdev->pdev->dev,
722 			"Update TQPS stats fail, status = %d.\n",
723 			status);
724 
725 	hclge_update_netstat(hw_stats, net_stats);
726 }
727 
728 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
729 {
730 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
731 
732 	struct hclge_vport *vport = hclge_get_vport(handle);
733 	struct hclge_dev *hdev = vport->back;
734 	int count = 0;
735 
736 	/* Loopback test support rules:
737 	 * mac: only GE mode support
738 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
739 	 * phy: only support when phy device exist on board
740 	 */
741 	if (stringset == ETH_SS_TEST) {
742 		/* clear loopback bit flags at first */
743 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
744 		if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 			count += 1;
748 			handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
749 		} else {
750 			count = -EOPNOTSUPP;
751 		}
752 	} else if (stringset == ETH_SS_STATS) {
753 		count = ARRAY_SIZE(g_mac_stats_string) +
754 			ARRAY_SIZE(g_all_32bit_stats_string) +
755 			ARRAY_SIZE(g_all_64bit_stats_string) +
756 			hclge_tqps_get_sset_count(handle, stringset);
757 	}
758 
759 	return count;
760 }
761 
762 static void hclge_get_strings(struct hnae3_handle *handle,
763 			      u32 stringset,
764 			      u8 *data)
765 {
766 	u8 *p = (char *)data;
767 	int size;
768 
769 	if (stringset == ETH_SS_STATS) {
770 		size = ARRAY_SIZE(g_mac_stats_string);
771 		p = hclge_comm_get_strings(stringset,
772 					   g_mac_stats_string,
773 					   size,
774 					   p);
775 		size = ARRAY_SIZE(g_all_32bit_stats_string);
776 		p = hclge_comm_get_strings(stringset,
777 					   g_all_32bit_stats_string,
778 					   size,
779 					   p);
780 		size = ARRAY_SIZE(g_all_64bit_stats_string);
781 		p = hclge_comm_get_strings(stringset,
782 					   g_all_64bit_stats_string,
783 					   size,
784 					   p);
785 		p = hclge_tqps_get_strings(handle, p);
786 	} else if (stringset == ETH_SS_TEST) {
787 		if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
788 			memcpy(p,
789 			       hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
790 			       ETH_GSTRING_LEN);
791 			p += ETH_GSTRING_LEN;
792 		}
793 		if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
794 			memcpy(p,
795 			       hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
796 			       ETH_GSTRING_LEN);
797 			p += ETH_GSTRING_LEN;
798 		}
799 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
800 			memcpy(p,
801 			       hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
802 			       ETH_GSTRING_LEN);
803 			p += ETH_GSTRING_LEN;
804 		}
805 	}
806 }
807 
808 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
809 {
810 	struct hclge_vport *vport = hclge_get_vport(handle);
811 	struct hclge_dev *hdev = vport->back;
812 	u64 *p;
813 
814 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
815 				 g_mac_stats_string,
816 				 ARRAY_SIZE(g_mac_stats_string),
817 				 data);
818 	p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
819 				 g_all_32bit_stats_string,
820 				 ARRAY_SIZE(g_all_32bit_stats_string),
821 				 p);
822 	p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
823 				 g_all_64bit_stats_string,
824 				 ARRAY_SIZE(g_all_64bit_stats_string),
825 				 p);
826 	p = hclge_tqps_get_stats(handle, p);
827 }
828 
829 static int hclge_parse_func_status(struct hclge_dev *hdev,
830 				   struct hclge_func_status_cmd *status)
831 {
832 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833 		return -EINVAL;
834 
835 	/* Set the pf to main pf */
836 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 		hdev->flag |= HCLGE_FLAG_MAIN;
838 	else
839 		hdev->flag &= ~HCLGE_FLAG_MAIN;
840 
841 	return 0;
842 }
843 
844 static int hclge_query_function_status(struct hclge_dev *hdev)
845 {
846 	struct hclge_func_status_cmd *req;
847 	struct hclge_desc desc;
848 	int timeout = 0;
849 	int ret;
850 
851 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
852 	req = (struct hclge_func_status_cmd *)desc.data;
853 
854 	do {
855 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
856 		if (ret) {
857 			dev_err(&hdev->pdev->dev,
858 				"query function status failed %d.\n",
859 				ret);
860 
861 			return ret;
862 		}
863 
864 		/* Check pf reset is done */
865 		if (req->pf_state)
866 			break;
867 		usleep_range(1000, 2000);
868 	} while (timeout++ < 5);
869 
870 	ret = hclge_parse_func_status(hdev, req);
871 
872 	return ret;
873 }
874 
875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 {
877 	struct hclge_pf_res_cmd *req;
878 	struct hclge_desc desc;
879 	int ret;
880 
881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 	if (ret) {
884 		dev_err(&hdev->pdev->dev,
885 			"query pf resource failed %d.\n", ret);
886 		return ret;
887 	}
888 
889 	req = (struct hclge_pf_res_cmd *)desc.data;
890 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
891 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892 
893 	if (hnae3_dev_roce_supported(hdev)) {
894 		hdev->num_roce_msi =
895 		hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
896 			       HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
897 
898 		/* PF should have NIC vectors and Roce vectors,
899 		 * NIC vectors are queued before Roce vectors.
900 		 */
901 		hdev->num_msi = hdev->num_roce_msi  + HCLGE_ROCE_VECTOR_OFFSET;
902 	} else {
903 		hdev->num_msi =
904 		hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
905 			       HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
906 	}
907 
908 	return 0;
909 }
910 
911 static int hclge_parse_speed(int speed_cmd, int *speed)
912 {
913 	switch (speed_cmd) {
914 	case 6:
915 		*speed = HCLGE_MAC_SPEED_10M;
916 		break;
917 	case 7:
918 		*speed = HCLGE_MAC_SPEED_100M;
919 		break;
920 	case 0:
921 		*speed = HCLGE_MAC_SPEED_1G;
922 		break;
923 	case 1:
924 		*speed = HCLGE_MAC_SPEED_10G;
925 		break;
926 	case 2:
927 		*speed = HCLGE_MAC_SPEED_25G;
928 		break;
929 	case 3:
930 		*speed = HCLGE_MAC_SPEED_40G;
931 		break;
932 	case 4:
933 		*speed = HCLGE_MAC_SPEED_50G;
934 		break;
935 	case 5:
936 		*speed = HCLGE_MAC_SPEED_100G;
937 		break;
938 	default:
939 		return -EINVAL;
940 	}
941 
942 	return 0;
943 }
944 
945 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
946 {
947 	struct hclge_cfg_param_cmd *req;
948 	u64 mac_addr_tmp_high;
949 	u64 mac_addr_tmp;
950 	int i;
951 
952 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
953 
954 	/* get the configuration */
955 	cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
956 					     HCLGE_CFG_VMDQ_M,
957 					     HCLGE_CFG_VMDQ_S);
958 	cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
959 				     HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
960 	cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
961 					   HCLGE_CFG_TQP_DESC_N_M,
962 					   HCLGE_CFG_TQP_DESC_N_S);
963 
964 	cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
965 				       HCLGE_CFG_PHY_ADDR_M,
966 				       HCLGE_CFG_PHY_ADDR_S);
967 	cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
968 					 HCLGE_CFG_MEDIA_TP_M,
969 					 HCLGE_CFG_MEDIA_TP_S);
970 	cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
971 					 HCLGE_CFG_RX_BUF_LEN_M,
972 					 HCLGE_CFG_RX_BUF_LEN_S);
973 	/* get mac_address */
974 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
975 	mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
976 					   HCLGE_CFG_MAC_ADDR_H_M,
977 					   HCLGE_CFG_MAC_ADDR_H_S);
978 
979 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
980 
981 	cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
982 					    HCLGE_CFG_DEFAULT_SPEED_M,
983 					    HCLGE_CFG_DEFAULT_SPEED_S);
984 	for (i = 0; i < ETH_ALEN; i++)
985 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
986 
987 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
988 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
989 }
990 
991 /* hclge_get_cfg: query the static parameter from flash
992  * @hdev: pointer to struct hclge_dev
993  * @hcfg: the config structure to be getted
994  */
995 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
996 {
997 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
998 	struct hclge_cfg_param_cmd *req;
999 	int i, ret;
1000 
1001 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1002 		u32 offset = 0;
1003 
1004 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1005 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1006 					   true);
1007 		hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
1008 			       HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1009 		/* Len should be united by 4 bytes when send to hardware */
1010 		hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1011 			       HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1012 		req->offset = cpu_to_le32(offset);
1013 	}
1014 
1015 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1016 	if (ret) {
1017 		dev_err(&hdev->pdev->dev,
1018 			"get config failed %d.\n", ret);
1019 		return ret;
1020 	}
1021 
1022 	hclge_parse_cfg(hcfg, desc);
1023 	return 0;
1024 }
1025 
1026 static int hclge_get_cap(struct hclge_dev *hdev)
1027 {
1028 	int ret;
1029 
1030 	ret = hclge_query_function_status(hdev);
1031 	if (ret) {
1032 		dev_err(&hdev->pdev->dev,
1033 			"query function status error %d.\n", ret);
1034 		return ret;
1035 	}
1036 
1037 	/* get pf resource */
1038 	ret = hclge_query_pf_resource(hdev);
1039 	if (ret) {
1040 		dev_err(&hdev->pdev->dev,
1041 			"query pf resource error %d.\n", ret);
1042 		return ret;
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 static int hclge_configure(struct hclge_dev *hdev)
1049 {
1050 	struct hclge_cfg cfg;
1051 	int ret, i;
1052 
1053 	ret = hclge_get_cfg(hdev, &cfg);
1054 	if (ret) {
1055 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1056 		return ret;
1057 	}
1058 
1059 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1060 	hdev->base_tqp_pid = 0;
1061 	hdev->rss_size_max = 1;
1062 	hdev->rx_buf_len = cfg.rx_buf_len;
1063 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1064 	hdev->hw.mac.media_type = cfg.media_type;
1065 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1066 	hdev->num_desc = cfg.tqp_desc_num;
1067 	hdev->tm_info.num_pg = 1;
1068 	hdev->tc_max = cfg.tc_num;
1069 	hdev->tm_info.hw_pfc_map = 0;
1070 
1071 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1072 	if (ret) {
1073 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1074 		return ret;
1075 	}
1076 
1077 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1078 	    (hdev->tc_max < 1)) {
1079 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1080 			 hdev->tc_max);
1081 		hdev->tc_max = 1;
1082 	}
1083 
1084 	/* Dev does not support DCB */
1085 	if (!hnae3_dev_dcb_supported(hdev)) {
1086 		hdev->tc_max = 1;
1087 		hdev->pfc_max = 0;
1088 	} else {
1089 		hdev->pfc_max = hdev->tc_max;
1090 	}
1091 
1092 	hdev->tm_info.num_tc = hdev->tc_max;
1093 
1094 	/* Currently not support uncontiuous tc */
1095 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1096 		hnae_set_bit(hdev->hw_tc_map, i, 1);
1097 
1098 	if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1099 		hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1100 	else
1101 		hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1102 
1103 	return ret;
1104 }
1105 
1106 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1107 			    int tso_mss_max)
1108 {
1109 	struct hclge_cfg_tso_status_cmd *req;
1110 	struct hclge_desc desc;
1111 	u16 tso_mss;
1112 
1113 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1114 
1115 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1116 
1117 	tso_mss = 0;
1118 	hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1119 		       HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1120 	req->tso_mss_min = cpu_to_le16(tso_mss);
1121 
1122 	tso_mss = 0;
1123 	hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1124 		       HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1125 	req->tso_mss_max = cpu_to_le16(tso_mss);
1126 
1127 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1128 }
1129 
1130 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1131 {
1132 	struct hclge_tqp *tqp;
1133 	int i;
1134 
1135 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1136 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1137 	if (!hdev->htqp)
1138 		return -ENOMEM;
1139 
1140 	tqp = hdev->htqp;
1141 
1142 	for (i = 0; i < hdev->num_tqps; i++) {
1143 		tqp->dev = &hdev->pdev->dev;
1144 		tqp->index = i;
1145 
1146 		tqp->q.ae_algo = &ae_algo;
1147 		tqp->q.buf_size = hdev->rx_buf_len;
1148 		tqp->q.desc_num = hdev->num_desc;
1149 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1150 			i * HCLGE_TQP_REG_SIZE;
1151 
1152 		tqp++;
1153 	}
1154 
1155 	return 0;
1156 }
1157 
1158 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1159 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1160 {
1161 	struct hclge_tqp_map_cmd *req;
1162 	struct hclge_desc desc;
1163 	int ret;
1164 
1165 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1166 
1167 	req = (struct hclge_tqp_map_cmd *)desc.data;
1168 	req->tqp_id = cpu_to_le16(tqp_pid);
1169 	req->tqp_vf = func_id;
1170 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1171 			1 << HCLGE_TQP_MAP_EN_B;
1172 	req->tqp_vid = cpu_to_le16(tqp_vid);
1173 
1174 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1175 	if (ret) {
1176 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1177 			ret);
1178 		return ret;
1179 	}
1180 
1181 	return 0;
1182 }
1183 
1184 static int  hclge_assign_tqp(struct hclge_vport *vport,
1185 			     struct hnae3_queue **tqp, u16 num_tqps)
1186 {
1187 	struct hclge_dev *hdev = vport->back;
1188 	int i, alloced;
1189 
1190 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1191 	     alloced < num_tqps; i++) {
1192 		if (!hdev->htqp[i].alloced) {
1193 			hdev->htqp[i].q.handle = &vport->nic;
1194 			hdev->htqp[i].q.tqp_index = alloced;
1195 			tqp[alloced] = &hdev->htqp[i].q;
1196 			hdev->htqp[i].alloced = true;
1197 			alloced++;
1198 		}
1199 	}
1200 	vport->alloc_tqps = num_tqps;
1201 
1202 	return 0;
1203 }
1204 
1205 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1206 {
1207 	struct hnae3_handle *nic = &vport->nic;
1208 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1209 	struct hclge_dev *hdev = vport->back;
1210 	int i, ret;
1211 
1212 	kinfo->num_desc = hdev->num_desc;
1213 	kinfo->rx_buf_len = hdev->rx_buf_len;
1214 	kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1215 	kinfo->rss_size
1216 		= min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1217 	kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1218 
1219 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1220 		if (hdev->hw_tc_map & BIT(i)) {
1221 			kinfo->tc_info[i].enable = true;
1222 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1223 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1224 			kinfo->tc_info[i].tc = i;
1225 		} else {
1226 			/* Set to default queue if TC is disable */
1227 			kinfo->tc_info[i].enable = false;
1228 			kinfo->tc_info[i].tqp_offset = 0;
1229 			kinfo->tc_info[i].tqp_count = 1;
1230 			kinfo->tc_info[i].tc = 0;
1231 		}
1232 	}
1233 
1234 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1235 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1236 	if (!kinfo->tqp)
1237 		return -ENOMEM;
1238 
1239 	ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1240 	if (ret) {
1241 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1242 		return -EINVAL;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1249 				  struct hclge_vport *vport)
1250 {
1251 	struct hnae3_handle *nic = &vport->nic;
1252 	struct hnae3_knic_private_info *kinfo;
1253 	u16 i;
1254 
1255 	kinfo = &nic->kinfo;
1256 	for (i = 0; i < kinfo->num_tqps; i++) {
1257 		struct hclge_tqp *q =
1258 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1259 		bool is_pf;
1260 		int ret;
1261 
1262 		is_pf = !(vport->vport_id);
1263 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1264 					     i, is_pf);
1265 		if (ret)
1266 			return ret;
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 static int hclge_map_tqp(struct hclge_dev *hdev)
1273 {
1274 	struct hclge_vport *vport = hdev->vport;
1275 	u16 i, num_vport;
1276 
1277 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1278 	for (i = 0; i < num_vport; i++)	{
1279 		int ret;
1280 
1281 		ret = hclge_map_tqp_to_vport(hdev, vport);
1282 		if (ret)
1283 			return ret;
1284 
1285 		vport++;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1292 {
1293 	/* this would be initialized later */
1294 }
1295 
1296 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1297 {
1298 	struct hnae3_handle *nic = &vport->nic;
1299 	struct hclge_dev *hdev = vport->back;
1300 	int ret;
1301 
1302 	nic->pdev = hdev->pdev;
1303 	nic->ae_algo = &ae_algo;
1304 	nic->numa_node_mask = hdev->numa_node_mask;
1305 
1306 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1307 		ret = hclge_knic_setup(vport, num_tqps);
1308 		if (ret) {
1309 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1310 				ret);
1311 			return ret;
1312 		}
1313 	} else {
1314 		hclge_unic_setup(vport, num_tqps);
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 static int hclge_alloc_vport(struct hclge_dev *hdev)
1321 {
1322 	struct pci_dev *pdev = hdev->pdev;
1323 	struct hclge_vport *vport;
1324 	u32 tqp_main_vport;
1325 	u32 tqp_per_vport;
1326 	int num_vport, i;
1327 	int ret;
1328 
1329 	/* We need to alloc a vport for main NIC of PF */
1330 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1331 
1332 	if (hdev->num_tqps < num_vport)
1333 		num_vport = hdev->num_tqps;
1334 
1335 	/* Alloc the same number of TQPs for every vport */
1336 	tqp_per_vport = hdev->num_tqps / num_vport;
1337 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1338 
1339 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1340 			     GFP_KERNEL);
1341 	if (!vport)
1342 		return -ENOMEM;
1343 
1344 	hdev->vport = vport;
1345 	hdev->num_alloc_vport = num_vport;
1346 
1347 #ifdef CONFIG_PCI_IOV
1348 	/* Enable SRIOV */
1349 	if (hdev->num_req_vfs) {
1350 		dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1351 			 hdev->num_req_vfs);
1352 		ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1353 		if (ret) {
1354 			hdev->num_alloc_vfs = 0;
1355 			dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1356 				ret);
1357 			return ret;
1358 		}
1359 	}
1360 	hdev->num_alloc_vfs = hdev->num_req_vfs;
1361 #endif
1362 
1363 	for (i = 0; i < num_vport; i++) {
1364 		vport->back = hdev;
1365 		vport->vport_id = i;
1366 
1367 		if (i == 0)
1368 			ret = hclge_vport_setup(vport, tqp_main_vport);
1369 		else
1370 			ret = hclge_vport_setup(vport, tqp_per_vport);
1371 		if (ret) {
1372 			dev_err(&pdev->dev,
1373 				"vport setup failed for vport %d, %d\n",
1374 				i, ret);
1375 			return ret;
1376 		}
1377 
1378 		vport++;
1379 	}
1380 
1381 	return 0;
1382 }
1383 
1384 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1385 				    struct hclge_pkt_buf_alloc *buf_alloc)
1386 {
1387 /* TX buffer size is unit by 128 byte */
1388 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1389 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1390 	struct hclge_tx_buff_alloc_cmd *req;
1391 	struct hclge_desc desc;
1392 	int ret;
1393 	u8 i;
1394 
1395 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1396 
1397 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1398 	for (i = 0; i < HCLGE_TC_NUM; i++) {
1399 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1400 
1401 		req->tx_pkt_buff[i] =
1402 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1403 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1404 	}
1405 
1406 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1407 	if (ret) {
1408 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1409 			ret);
1410 		return ret;
1411 	}
1412 
1413 	return 0;
1414 }
1415 
1416 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1417 				 struct hclge_pkt_buf_alloc *buf_alloc)
1418 {
1419 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1420 
1421 	if (ret) {
1422 		dev_err(&hdev->pdev->dev,
1423 			"tx buffer alloc failed %d\n", ret);
1424 		return ret;
1425 	}
1426 
1427 	return 0;
1428 }
1429 
1430 static int hclge_get_tc_num(struct hclge_dev *hdev)
1431 {
1432 	int i, cnt = 0;
1433 
1434 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1435 		if (hdev->hw_tc_map & BIT(i))
1436 			cnt++;
1437 	return cnt;
1438 }
1439 
1440 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1441 {
1442 	int i, cnt = 0;
1443 
1444 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1445 		if (hdev->hw_tc_map & BIT(i) &&
1446 		    hdev->tm_info.hw_pfc_map & BIT(i))
1447 			cnt++;
1448 	return cnt;
1449 }
1450 
1451 /* Get the number of pfc enabled TCs, which have private buffer */
1452 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1453 				  struct hclge_pkt_buf_alloc *buf_alloc)
1454 {
1455 	struct hclge_priv_buf *priv;
1456 	int i, cnt = 0;
1457 
1458 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1459 		priv = &buf_alloc->priv_buf[i];
1460 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1461 		    priv->enable)
1462 			cnt++;
1463 	}
1464 
1465 	return cnt;
1466 }
1467 
1468 /* Get the number of pfc disabled TCs, which have private buffer */
1469 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1470 				     struct hclge_pkt_buf_alloc *buf_alloc)
1471 {
1472 	struct hclge_priv_buf *priv;
1473 	int i, cnt = 0;
1474 
1475 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1476 		priv = &buf_alloc->priv_buf[i];
1477 		if (hdev->hw_tc_map & BIT(i) &&
1478 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1479 		    priv->enable)
1480 			cnt++;
1481 	}
1482 
1483 	return cnt;
1484 }
1485 
1486 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1487 {
1488 	struct hclge_priv_buf *priv;
1489 	u32 rx_priv = 0;
1490 	int i;
1491 
1492 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1493 		priv = &buf_alloc->priv_buf[i];
1494 		if (priv->enable)
1495 			rx_priv += priv->buf_size;
1496 	}
1497 	return rx_priv;
1498 }
1499 
1500 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1501 {
1502 	u32 i, total_tx_size = 0;
1503 
1504 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1505 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1506 
1507 	return total_tx_size;
1508 }
1509 
1510 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1511 				struct hclge_pkt_buf_alloc *buf_alloc,
1512 				u32 rx_all)
1513 {
1514 	u32 shared_buf_min, shared_buf_tc, shared_std;
1515 	int tc_num, pfc_enable_num;
1516 	u32 shared_buf;
1517 	u32 rx_priv;
1518 	int i;
1519 
1520 	tc_num = hclge_get_tc_num(hdev);
1521 	pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1522 
1523 	if (hnae3_dev_dcb_supported(hdev))
1524 		shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1525 	else
1526 		shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1527 
1528 	shared_buf_tc = pfc_enable_num * hdev->mps +
1529 			(tc_num - pfc_enable_num) * hdev->mps / 2 +
1530 			hdev->mps;
1531 	shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1532 
1533 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1534 	if (rx_all <= rx_priv + shared_std)
1535 		return false;
1536 
1537 	shared_buf = rx_all - rx_priv;
1538 	buf_alloc->s_buf.buf_size = shared_buf;
1539 	buf_alloc->s_buf.self.high = shared_buf;
1540 	buf_alloc->s_buf.self.low =  2 * hdev->mps;
1541 
1542 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1543 		if ((hdev->hw_tc_map & BIT(i)) &&
1544 		    (hdev->tm_info.hw_pfc_map & BIT(i))) {
1545 			buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1546 			buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1547 		} else {
1548 			buf_alloc->s_buf.tc_thrd[i].low = 0;
1549 			buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1550 		}
1551 	}
1552 
1553 	return true;
1554 }
1555 
1556 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1557 				struct hclge_pkt_buf_alloc *buf_alloc)
1558 {
1559 	u32 i, total_size;
1560 
1561 	total_size = hdev->pkt_buf_size;
1562 
1563 	/* alloc tx buffer for all enabled tc */
1564 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1565 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1566 
1567 		if (total_size < HCLGE_DEFAULT_TX_BUF)
1568 			return -ENOMEM;
1569 
1570 		if (hdev->hw_tc_map & BIT(i))
1571 			priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1572 		else
1573 			priv->tx_buf_size = 0;
1574 
1575 		total_size -= priv->tx_buf_size;
1576 	}
1577 
1578 	return 0;
1579 }
1580 
1581 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1582  * @hdev: pointer to struct hclge_dev
1583  * @buf_alloc: pointer to buffer calculation data
1584  * @return: 0: calculate sucessful, negative: fail
1585  */
1586 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1587 				struct hclge_pkt_buf_alloc *buf_alloc)
1588 {
1589 	u32 rx_all = hdev->pkt_buf_size;
1590 	int no_pfc_priv_num, pfc_priv_num;
1591 	struct hclge_priv_buf *priv;
1592 	int i;
1593 
1594 	rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1595 
1596 	/* When DCB is not supported, rx private
1597 	 * buffer is not allocated.
1598 	 */
1599 	if (!hnae3_dev_dcb_supported(hdev)) {
1600 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1601 			return -ENOMEM;
1602 
1603 		return 0;
1604 	}
1605 
1606 	/* step 1, try to alloc private buffer for all enabled tc */
1607 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1608 		priv = &buf_alloc->priv_buf[i];
1609 		if (hdev->hw_tc_map & BIT(i)) {
1610 			priv->enable = 1;
1611 			if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1612 				priv->wl.low = hdev->mps;
1613 				priv->wl.high = priv->wl.low + hdev->mps;
1614 				priv->buf_size = priv->wl.high +
1615 						HCLGE_DEFAULT_DV;
1616 			} else {
1617 				priv->wl.low = 0;
1618 				priv->wl.high = 2 * hdev->mps;
1619 				priv->buf_size = priv->wl.high;
1620 			}
1621 		} else {
1622 			priv->enable = 0;
1623 			priv->wl.low = 0;
1624 			priv->wl.high = 0;
1625 			priv->buf_size = 0;
1626 		}
1627 	}
1628 
1629 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1630 		return 0;
1631 
1632 	/* step 2, try to decrease the buffer size of
1633 	 * no pfc TC's private buffer
1634 	 */
1635 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1636 		priv = &buf_alloc->priv_buf[i];
1637 
1638 		priv->enable = 0;
1639 		priv->wl.low = 0;
1640 		priv->wl.high = 0;
1641 		priv->buf_size = 0;
1642 
1643 		if (!(hdev->hw_tc_map & BIT(i)))
1644 			continue;
1645 
1646 		priv->enable = 1;
1647 
1648 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1649 			priv->wl.low = 128;
1650 			priv->wl.high = priv->wl.low + hdev->mps;
1651 			priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1652 		} else {
1653 			priv->wl.low = 0;
1654 			priv->wl.high = hdev->mps;
1655 			priv->buf_size = priv->wl.high;
1656 		}
1657 	}
1658 
1659 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1660 		return 0;
1661 
1662 	/* step 3, try to reduce the number of pfc disabled TCs,
1663 	 * which have private buffer
1664 	 */
1665 	/* get the total no pfc enable TC number, which have private buffer */
1666 	no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1667 
1668 	/* let the last to be cleared first */
1669 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1670 		priv = &buf_alloc->priv_buf[i];
1671 
1672 		if (hdev->hw_tc_map & BIT(i) &&
1673 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1674 			/* Clear the no pfc TC private buffer */
1675 			priv->wl.low = 0;
1676 			priv->wl.high = 0;
1677 			priv->buf_size = 0;
1678 			priv->enable = 0;
1679 			no_pfc_priv_num--;
1680 		}
1681 
1682 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1683 		    no_pfc_priv_num == 0)
1684 			break;
1685 	}
1686 
1687 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1688 		return 0;
1689 
1690 	/* step 4, try to reduce the number of pfc enabled TCs
1691 	 * which have private buffer.
1692 	 */
1693 	pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1694 
1695 	/* let the last to be cleared first */
1696 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1697 		priv = &buf_alloc->priv_buf[i];
1698 
1699 		if (hdev->hw_tc_map & BIT(i) &&
1700 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1701 			/* Reduce the number of pfc TC with private buffer */
1702 			priv->wl.low = 0;
1703 			priv->enable = 0;
1704 			priv->wl.high = 0;
1705 			priv->buf_size = 0;
1706 			pfc_priv_num--;
1707 		}
1708 
1709 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1710 		    pfc_priv_num == 0)
1711 			break;
1712 	}
1713 	if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1714 		return 0;
1715 
1716 	return -ENOMEM;
1717 }
1718 
1719 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1720 				   struct hclge_pkt_buf_alloc *buf_alloc)
1721 {
1722 	struct hclge_rx_priv_buff_cmd *req;
1723 	struct hclge_desc desc;
1724 	int ret;
1725 	int i;
1726 
1727 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1728 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1729 
1730 	/* Alloc private buffer TCs */
1731 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1732 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1733 
1734 		req->buf_num[i] =
1735 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1736 		req->buf_num[i] |=
1737 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1738 	}
1739 
1740 	req->shared_buf =
1741 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1742 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1743 
1744 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1745 	if (ret) {
1746 		dev_err(&hdev->pdev->dev,
1747 			"rx private buffer alloc cmd failed %d\n", ret);
1748 		return ret;
1749 	}
1750 
1751 	return 0;
1752 }
1753 
1754 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1755 
1756 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1757 				   struct hclge_pkt_buf_alloc *buf_alloc)
1758 {
1759 	struct hclge_rx_priv_wl_buf *req;
1760 	struct hclge_priv_buf *priv;
1761 	struct hclge_desc desc[2];
1762 	int i, j;
1763 	int ret;
1764 
1765 	for (i = 0; i < 2; i++) {
1766 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1767 					   false);
1768 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1769 
1770 		/* The first descriptor set the NEXT bit to 1 */
1771 		if (i == 0)
1772 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1773 		else
1774 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1775 
1776 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1777 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1778 
1779 			priv = &buf_alloc->priv_buf[idx];
1780 			req->tc_wl[j].high =
1781 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1782 			req->tc_wl[j].high |=
1783 				cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1784 					    HCLGE_RX_PRIV_EN_B);
1785 			req->tc_wl[j].low =
1786 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1787 			req->tc_wl[j].low |=
1788 				cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1789 					    HCLGE_RX_PRIV_EN_B);
1790 		}
1791 	}
1792 
1793 	/* Send 2 descriptor at one time */
1794 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1795 	if (ret) {
1796 		dev_err(&hdev->pdev->dev,
1797 			"rx private waterline config cmd failed %d\n",
1798 			ret);
1799 		return ret;
1800 	}
1801 	return 0;
1802 }
1803 
1804 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1805 				    struct hclge_pkt_buf_alloc *buf_alloc)
1806 {
1807 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1808 	struct hclge_rx_com_thrd *req;
1809 	struct hclge_desc desc[2];
1810 	struct hclge_tc_thrd *tc;
1811 	int i, j;
1812 	int ret;
1813 
1814 	for (i = 0; i < 2; i++) {
1815 		hclge_cmd_setup_basic_desc(&desc[i],
1816 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1817 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1818 
1819 		/* The first descriptor set the NEXT bit to 1 */
1820 		if (i == 0)
1821 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1822 		else
1823 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1824 
1825 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1826 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1827 
1828 			req->com_thrd[j].high =
1829 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1830 			req->com_thrd[j].high |=
1831 				cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1832 					    HCLGE_RX_PRIV_EN_B);
1833 			req->com_thrd[j].low =
1834 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1835 			req->com_thrd[j].low |=
1836 				cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1837 					    HCLGE_RX_PRIV_EN_B);
1838 		}
1839 	}
1840 
1841 	/* Send 2 descriptors at one time */
1842 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1843 	if (ret) {
1844 		dev_err(&hdev->pdev->dev,
1845 			"common threshold config cmd failed %d\n", ret);
1846 		return ret;
1847 	}
1848 	return 0;
1849 }
1850 
1851 static int hclge_common_wl_config(struct hclge_dev *hdev,
1852 				  struct hclge_pkt_buf_alloc *buf_alloc)
1853 {
1854 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1855 	struct hclge_rx_com_wl *req;
1856 	struct hclge_desc desc;
1857 	int ret;
1858 
1859 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1860 
1861 	req = (struct hclge_rx_com_wl *)desc.data;
1862 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1863 	req->com_wl.high |=
1864 		cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1865 			    HCLGE_RX_PRIV_EN_B);
1866 
1867 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1868 	req->com_wl.low |=
1869 		cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1870 			    HCLGE_RX_PRIV_EN_B);
1871 
1872 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1873 	if (ret) {
1874 		dev_err(&hdev->pdev->dev,
1875 			"common waterline config cmd failed %d\n", ret);
1876 		return ret;
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 int hclge_buffer_alloc(struct hclge_dev *hdev)
1883 {
1884 	struct hclge_pkt_buf_alloc *pkt_buf;
1885 	int ret;
1886 
1887 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1888 	if (!pkt_buf)
1889 		return -ENOMEM;
1890 
1891 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1892 	if (ret) {
1893 		dev_err(&hdev->pdev->dev,
1894 			"could not calc tx buffer size for all TCs %d\n", ret);
1895 		goto out;
1896 	}
1897 
1898 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1899 	if (ret) {
1900 		dev_err(&hdev->pdev->dev,
1901 			"could not alloc tx buffers %d\n", ret);
1902 		goto out;
1903 	}
1904 
1905 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1906 	if (ret) {
1907 		dev_err(&hdev->pdev->dev,
1908 			"could not calc rx priv buffer size for all TCs %d\n",
1909 			ret);
1910 		goto out;
1911 	}
1912 
1913 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1914 	if (ret) {
1915 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1916 			ret);
1917 		goto out;
1918 	}
1919 
1920 	if (hnae3_dev_dcb_supported(hdev)) {
1921 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1922 		if (ret) {
1923 			dev_err(&hdev->pdev->dev,
1924 				"could not configure rx private waterline %d\n",
1925 				ret);
1926 			goto out;
1927 		}
1928 
1929 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1930 		if (ret) {
1931 			dev_err(&hdev->pdev->dev,
1932 				"could not configure common threshold %d\n",
1933 				ret);
1934 			goto out;
1935 		}
1936 	}
1937 
1938 	ret = hclge_common_wl_config(hdev, pkt_buf);
1939 	if (ret)
1940 		dev_err(&hdev->pdev->dev,
1941 			"could not configure common waterline %d\n", ret);
1942 
1943 out:
1944 	kfree(pkt_buf);
1945 	return ret;
1946 }
1947 
1948 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1949 {
1950 	struct hnae3_handle *roce = &vport->roce;
1951 	struct hnae3_handle *nic = &vport->nic;
1952 
1953 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1954 
1955 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1956 	    vport->back->num_msi_left == 0)
1957 		return -EINVAL;
1958 
1959 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1960 
1961 	roce->rinfo.netdev = nic->kinfo.netdev;
1962 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1963 
1964 	roce->pdev = nic->pdev;
1965 	roce->ae_algo = nic->ae_algo;
1966 	roce->numa_node_mask = nic->numa_node_mask;
1967 
1968 	return 0;
1969 }
1970 
1971 static int hclge_init_msi(struct hclge_dev *hdev)
1972 {
1973 	struct pci_dev *pdev = hdev->pdev;
1974 	int vectors;
1975 	int i;
1976 
1977 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1978 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1979 	if (vectors < 0) {
1980 		dev_err(&pdev->dev,
1981 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1982 			vectors);
1983 		return vectors;
1984 	}
1985 	if (vectors < hdev->num_msi)
1986 		dev_warn(&hdev->pdev->dev,
1987 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1988 			 hdev->num_msi, vectors);
1989 
1990 	hdev->num_msi = vectors;
1991 	hdev->num_msi_left = vectors;
1992 	hdev->base_msi_vector = pdev->irq;
1993 	hdev->roce_base_vector = hdev->base_msi_vector +
1994 				HCLGE_ROCE_VECTOR_OFFSET;
1995 
1996 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1997 					   sizeof(u16), GFP_KERNEL);
1998 	if (!hdev->vector_status) {
1999 		pci_free_irq_vectors(pdev);
2000 		return -ENOMEM;
2001 	}
2002 
2003 	for (i = 0; i < hdev->num_msi; i++)
2004 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2005 
2006 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2007 					sizeof(int), GFP_KERNEL);
2008 	if (!hdev->vector_irq) {
2009 		pci_free_irq_vectors(pdev);
2010 		return -ENOMEM;
2011 	}
2012 
2013 	return 0;
2014 }
2015 
2016 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2017 {
2018 	struct hclge_mac *mac = &hdev->hw.mac;
2019 
2020 	if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2021 		mac->duplex = (u8)duplex;
2022 	else
2023 		mac->duplex = HCLGE_MAC_FULL;
2024 
2025 	mac->speed = speed;
2026 }
2027 
2028 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2029 {
2030 	struct hclge_config_mac_speed_dup_cmd *req;
2031 	struct hclge_desc desc;
2032 	int ret;
2033 
2034 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2035 
2036 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2037 
2038 	hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2039 
2040 	switch (speed) {
2041 	case HCLGE_MAC_SPEED_10M:
2042 		hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2043 			       HCLGE_CFG_SPEED_S, 6);
2044 		break;
2045 	case HCLGE_MAC_SPEED_100M:
2046 		hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2047 			       HCLGE_CFG_SPEED_S, 7);
2048 		break;
2049 	case HCLGE_MAC_SPEED_1G:
2050 		hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2051 			       HCLGE_CFG_SPEED_S, 0);
2052 		break;
2053 	case HCLGE_MAC_SPEED_10G:
2054 		hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2055 			       HCLGE_CFG_SPEED_S, 1);
2056 		break;
2057 	case HCLGE_MAC_SPEED_25G:
2058 		hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2059 			       HCLGE_CFG_SPEED_S, 2);
2060 		break;
2061 	case HCLGE_MAC_SPEED_40G:
2062 		hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2063 			       HCLGE_CFG_SPEED_S, 3);
2064 		break;
2065 	case HCLGE_MAC_SPEED_50G:
2066 		hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2067 			       HCLGE_CFG_SPEED_S, 4);
2068 		break;
2069 	case HCLGE_MAC_SPEED_100G:
2070 		hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2071 			       HCLGE_CFG_SPEED_S, 5);
2072 		break;
2073 	default:
2074 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2075 		return -EINVAL;
2076 	}
2077 
2078 	hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2079 		     1);
2080 
2081 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2082 	if (ret) {
2083 		dev_err(&hdev->pdev->dev,
2084 			"mac speed/duplex config cmd failed %d.\n", ret);
2085 		return ret;
2086 	}
2087 
2088 	hclge_check_speed_dup(hdev, duplex, speed);
2089 
2090 	return 0;
2091 }
2092 
2093 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2094 				     u8 duplex)
2095 {
2096 	struct hclge_vport *vport = hclge_get_vport(handle);
2097 	struct hclge_dev *hdev = vport->back;
2098 
2099 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2100 }
2101 
2102 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2103 					u8 *duplex)
2104 {
2105 	struct hclge_query_an_speed_dup_cmd *req;
2106 	struct hclge_desc desc;
2107 	int speed_tmp;
2108 	int ret;
2109 
2110 	req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2111 
2112 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2113 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2114 	if (ret) {
2115 		dev_err(&hdev->pdev->dev,
2116 			"mac speed/autoneg/duplex query cmd failed %d\n",
2117 			ret);
2118 		return ret;
2119 	}
2120 
2121 	*duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2122 	speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2123 				   HCLGE_QUERY_SPEED_S);
2124 
2125 	ret = hclge_parse_speed(speed_tmp, speed);
2126 	if (ret) {
2127 		dev_err(&hdev->pdev->dev,
2128 			"could not parse speed(=%d), %d\n", speed_tmp, ret);
2129 		return -EIO;
2130 	}
2131 
2132 	return 0;
2133 }
2134 
2135 static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2136 {
2137 	struct hclge_mac *mac = &hdev->hw.mac;
2138 	struct hclge_query_an_speed_dup_cmd *req;
2139 	struct hclge_desc desc;
2140 	int ret;
2141 
2142 	req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2143 
2144 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2145 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2146 	if (ret) {
2147 		dev_err(&hdev->pdev->dev,
2148 			"autoneg result query cmd failed %d.\n", ret);
2149 		return ret;
2150 	}
2151 
2152 	mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2153 
2154 	return 0;
2155 }
2156 
2157 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2158 {
2159 	struct hclge_config_auto_neg_cmd *req;
2160 	struct hclge_desc desc;
2161 	u32 flag = 0;
2162 	int ret;
2163 
2164 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2165 
2166 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2167 	hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2168 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2169 
2170 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2171 	if (ret) {
2172 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2173 			ret);
2174 		return ret;
2175 	}
2176 
2177 	return 0;
2178 }
2179 
2180 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2181 {
2182 	struct hclge_vport *vport = hclge_get_vport(handle);
2183 	struct hclge_dev *hdev = vport->back;
2184 
2185 	return hclge_set_autoneg_en(hdev, enable);
2186 }
2187 
2188 static int hclge_get_autoneg(struct hnae3_handle *handle)
2189 {
2190 	struct hclge_vport *vport = hclge_get_vport(handle);
2191 	struct hclge_dev *hdev = vport->back;
2192 
2193 	hclge_query_autoneg_result(hdev);
2194 
2195 	return hdev->hw.mac.autoneg;
2196 }
2197 
2198 static int hclge_mac_init(struct hclge_dev *hdev)
2199 {
2200 	struct hclge_mac *mac = &hdev->hw.mac;
2201 	int ret;
2202 
2203 	ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2204 	if (ret) {
2205 		dev_err(&hdev->pdev->dev,
2206 			"Config mac speed dup fail ret=%d\n", ret);
2207 		return ret;
2208 	}
2209 
2210 	mac->link = 0;
2211 
2212 	/* Initialize the MTA table work mode */
2213 	hdev->accept_mta_mc	= true;
2214 	hdev->enable_mta	= true;
2215 	hdev->mta_mac_sel_type	= HCLGE_MAC_ADDR_47_36;
2216 
2217 	ret = hclge_set_mta_filter_mode(hdev,
2218 					hdev->mta_mac_sel_type,
2219 					hdev->enable_mta);
2220 	if (ret) {
2221 		dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2222 			ret);
2223 		return ret;
2224 	}
2225 
2226 	return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2227 }
2228 
2229 static void hclge_task_schedule(struct hclge_dev *hdev)
2230 {
2231 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2232 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2233 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2234 		(void)schedule_work(&hdev->service_task);
2235 }
2236 
2237 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2238 {
2239 	struct hclge_link_status_cmd *req;
2240 	struct hclge_desc desc;
2241 	int link_status;
2242 	int ret;
2243 
2244 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2245 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2246 	if (ret) {
2247 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2248 			ret);
2249 		return ret;
2250 	}
2251 
2252 	req = (struct hclge_link_status_cmd *)desc.data;
2253 	link_status = req->status & HCLGE_LINK_STATUS;
2254 
2255 	return !!link_status;
2256 }
2257 
2258 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2259 {
2260 	int mac_state;
2261 	int link_stat;
2262 
2263 	mac_state = hclge_get_mac_link_status(hdev);
2264 
2265 	if (hdev->hw.mac.phydev) {
2266 		if (!genphy_read_status(hdev->hw.mac.phydev))
2267 			link_stat = mac_state &
2268 				hdev->hw.mac.phydev->link;
2269 		else
2270 			link_stat = 0;
2271 
2272 	} else {
2273 		link_stat = mac_state;
2274 	}
2275 
2276 	return !!link_stat;
2277 }
2278 
2279 static void hclge_update_link_status(struct hclge_dev *hdev)
2280 {
2281 	struct hnae3_client *client = hdev->nic_client;
2282 	struct hnae3_handle *handle;
2283 	int state;
2284 	int i;
2285 
2286 	if (!client)
2287 		return;
2288 	state = hclge_get_mac_phy_link(hdev);
2289 	if (state != hdev->hw.mac.link) {
2290 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2291 			handle = &hdev->vport[i].nic;
2292 			client->ops->link_status_change(handle, state);
2293 		}
2294 		hdev->hw.mac.link = state;
2295 	}
2296 }
2297 
2298 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2299 {
2300 	struct hclge_mac mac = hdev->hw.mac;
2301 	u8 duplex;
2302 	int speed;
2303 	int ret;
2304 
2305 	/* get the speed and duplex as autoneg'result from mac cmd when phy
2306 	 * doesn't exit.
2307 	 */
2308 	if (mac.phydev || !mac.autoneg)
2309 		return 0;
2310 
2311 	ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2312 	if (ret) {
2313 		dev_err(&hdev->pdev->dev,
2314 			"mac autoneg/speed/duplex query failed %d\n", ret);
2315 		return ret;
2316 	}
2317 
2318 	if ((mac.speed != speed) || (mac.duplex != duplex)) {
2319 		ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2320 		if (ret) {
2321 			dev_err(&hdev->pdev->dev,
2322 				"mac speed/duplex config failed %d\n", ret);
2323 			return ret;
2324 		}
2325 	}
2326 
2327 	return 0;
2328 }
2329 
2330 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2331 {
2332 	struct hclge_vport *vport = hclge_get_vport(handle);
2333 	struct hclge_dev *hdev = vport->back;
2334 
2335 	return hclge_update_speed_duplex(hdev);
2336 }
2337 
2338 static int hclge_get_status(struct hnae3_handle *handle)
2339 {
2340 	struct hclge_vport *vport = hclge_get_vport(handle);
2341 	struct hclge_dev *hdev = vport->back;
2342 
2343 	hclge_update_link_status(hdev);
2344 
2345 	return hdev->hw.mac.link;
2346 }
2347 
2348 static void hclge_service_timer(struct timer_list *t)
2349 {
2350 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2351 
2352 	mod_timer(&hdev->service_timer, jiffies + HZ);
2353 	hclge_task_schedule(hdev);
2354 }
2355 
2356 static void hclge_service_complete(struct hclge_dev *hdev)
2357 {
2358 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2359 
2360 	/* Flush memory before next watchdog */
2361 	smp_mb__before_atomic();
2362 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2363 }
2364 
2365 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2366 {
2367 	writel(enable ? 1 : 0, vector->addr);
2368 }
2369 
2370 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2371 {
2372 	struct hclge_dev *hdev = data;
2373 
2374 	hclge_enable_vector(&hdev->misc_vector, false);
2375 	if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2376 		schedule_work(&hdev->service_task);
2377 
2378 	return IRQ_HANDLED;
2379 }
2380 
2381 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2382 {
2383 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2384 	hdev->num_msi_left += 1;
2385 	hdev->num_msi_used -= 1;
2386 }
2387 
2388 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2389 {
2390 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2391 
2392 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2393 
2394 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2395 	hdev->vector_status[0] = 0;
2396 
2397 	hdev->num_msi_left -= 1;
2398 	hdev->num_msi_used += 1;
2399 }
2400 
2401 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2402 {
2403 	int ret;
2404 
2405 	hclge_get_misc_vector(hdev);
2406 
2407 	ret = devm_request_irq(&hdev->pdev->dev,
2408 			       hdev->misc_vector.vector_irq,
2409 			       hclge_misc_irq_handle, 0, "hclge_misc", hdev);
2410 	if (ret) {
2411 		hclge_free_vector(hdev, 0);
2412 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2413 			hdev->misc_vector.vector_irq);
2414 	}
2415 
2416 	return ret;
2417 }
2418 
2419 static int hclge_notify_client(struct hclge_dev *hdev,
2420 			       enum hnae3_reset_notify_type type)
2421 {
2422 	struct hnae3_client *client = hdev->nic_client;
2423 	u16 i;
2424 
2425 	if (!client->ops->reset_notify)
2426 		return -EOPNOTSUPP;
2427 
2428 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2429 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2430 		int ret;
2431 
2432 		ret = client->ops->reset_notify(handle, type);
2433 		if (ret)
2434 			return ret;
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 static int hclge_reset_wait(struct hclge_dev *hdev)
2441 {
2442 #define HCLGE_RESET_WATI_MS	100
2443 #define HCLGE_RESET_WAIT_CNT	5
2444 	u32 val, reg, reg_bit;
2445 	u32 cnt = 0;
2446 
2447 	switch (hdev->reset_type) {
2448 	case HNAE3_GLOBAL_RESET:
2449 		reg = HCLGE_GLOBAL_RESET_REG;
2450 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2451 		break;
2452 	case HNAE3_CORE_RESET:
2453 		reg = HCLGE_GLOBAL_RESET_REG;
2454 		reg_bit = HCLGE_CORE_RESET_BIT;
2455 		break;
2456 	case HNAE3_FUNC_RESET:
2457 		reg = HCLGE_FUN_RST_ING;
2458 		reg_bit = HCLGE_FUN_RST_ING_B;
2459 		break;
2460 	default:
2461 		dev_err(&hdev->pdev->dev,
2462 			"Wait for unsupported reset type: %d\n",
2463 			hdev->reset_type);
2464 		return -EINVAL;
2465 	}
2466 
2467 	val = hclge_read_dev(&hdev->hw, reg);
2468 	while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2469 		msleep(HCLGE_RESET_WATI_MS);
2470 		val = hclge_read_dev(&hdev->hw, reg);
2471 		cnt++;
2472 	}
2473 
2474 	/* must clear reset status register to
2475 	 * prevent driver detect reset interrupt again
2476 	 */
2477 	reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
2478 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
2479 
2480 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2481 		dev_warn(&hdev->pdev->dev,
2482 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2483 		return -EBUSY;
2484 	}
2485 
2486 	return 0;
2487 }
2488 
2489 static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2490 {
2491 	struct hclge_desc desc;
2492 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2493 	int ret;
2494 
2495 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2496 	hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
2497 	hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2498 	req->fun_reset_vfid = func_id;
2499 
2500 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2501 	if (ret)
2502 		dev_err(&hdev->pdev->dev,
2503 			"send function reset cmd fail, status =%d\n", ret);
2504 
2505 	return ret;
2506 }
2507 
2508 static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
2509 {
2510 	struct pci_dev *pdev = hdev->pdev;
2511 	u32 val;
2512 
2513 	switch (type) {
2514 	case HNAE3_GLOBAL_RESET:
2515 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2516 		hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2517 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2518 		dev_info(&pdev->dev, "Global Reset requested\n");
2519 		break;
2520 	case HNAE3_CORE_RESET:
2521 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2522 		hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2523 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2524 		dev_info(&pdev->dev, "Core Reset requested\n");
2525 		break;
2526 	case HNAE3_FUNC_RESET:
2527 		dev_info(&pdev->dev, "PF Reset requested\n");
2528 		hclge_func_reset_cmd(hdev, 0);
2529 		break;
2530 	default:
2531 		dev_warn(&pdev->dev,
2532 			 "Unsupported reset type: %d\n", type);
2533 		break;
2534 	}
2535 }
2536 
2537 static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
2538 {
2539 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2540 	u32 rst_reg_val;
2541 
2542 	rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
2543 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
2544 		rst_level = HNAE3_GLOBAL_RESET;
2545 	else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
2546 		rst_level = HNAE3_CORE_RESET;
2547 	else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
2548 		rst_level = HNAE3_IMP_RESET;
2549 
2550 	return rst_level;
2551 }
2552 
2553 static void hclge_reset_event(struct hnae3_handle *handle,
2554 			      enum hnae3_reset_type reset)
2555 {
2556 	struct hclge_vport *vport = hclge_get_vport(handle);
2557 	struct hclge_dev *hdev = vport->back;
2558 
2559 	dev_info(&hdev->pdev->dev,
2560 		 "Receive reset event , reset_type is %d", reset);
2561 
2562 	switch (reset) {
2563 	case HNAE3_FUNC_RESET:
2564 	case HNAE3_CORE_RESET:
2565 	case HNAE3_GLOBAL_RESET:
2566 		if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
2567 			dev_err(&hdev->pdev->dev, "Already in reset state");
2568 			return;
2569 		}
2570 		hdev->reset_type = reset;
2571 		set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
2572 		set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2573 		schedule_work(&hdev->service_task);
2574 		break;
2575 	default:
2576 		dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
2577 		break;
2578 	}
2579 }
2580 
2581 static void hclge_reset_subtask(struct hclge_dev *hdev)
2582 {
2583 	bool do_reset;
2584 
2585 	do_reset = hdev->reset_type != HNAE3_NONE_RESET;
2586 
2587 	/* Reset is detected by interrupt */
2588 	if (hdev->reset_type == HNAE3_NONE_RESET)
2589 		hdev->reset_type = hclge_detected_reset_event(hdev);
2590 
2591 	if (hdev->reset_type == HNAE3_NONE_RESET)
2592 		return;
2593 
2594 	switch (hdev->reset_type) {
2595 	case HNAE3_FUNC_RESET:
2596 	case HNAE3_CORE_RESET:
2597 	case HNAE3_GLOBAL_RESET:
2598 	case HNAE3_IMP_RESET:
2599 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2600 
2601 		if (do_reset)
2602 			hclge_do_reset(hdev, hdev->reset_type);
2603 		else
2604 			set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
2605 
2606 		if (!hclge_reset_wait(hdev)) {
2607 			hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2608 			hclge_reset_ae_dev(hdev->ae_dev);
2609 			hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2610 			clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
2611 		}
2612 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2613 		break;
2614 	default:
2615 		dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
2616 			hdev->reset_type);
2617 		break;
2618 	}
2619 	hdev->reset_type = HNAE3_NONE_RESET;
2620 }
2621 
2622 static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
2623 {
2624 	hclge_reset_subtask(hdev);
2625 	hclge_enable_vector(&hdev->misc_vector, true);
2626 }
2627 
2628 static void hclge_service_task(struct work_struct *work)
2629 {
2630 	struct hclge_dev *hdev =
2631 		container_of(work, struct hclge_dev, service_task);
2632 
2633 	hclge_misc_irq_service_task(hdev);
2634 	hclge_update_speed_duplex(hdev);
2635 	hclge_update_link_status(hdev);
2636 	hclge_update_stats_for_all(hdev);
2637 	hclge_service_complete(hdev);
2638 }
2639 
2640 static void hclge_disable_sriov(struct hclge_dev *hdev)
2641 {
2642 	/* If our VFs are assigned we cannot shut down SR-IOV
2643 	 * without causing issues, so just leave the hardware
2644 	 * available but disabled
2645 	 */
2646 	if (pci_vfs_assigned(hdev->pdev)) {
2647 		dev_warn(&hdev->pdev->dev,
2648 			 "disabling driver while VFs are assigned\n");
2649 		return;
2650 	}
2651 
2652 	pci_disable_sriov(hdev->pdev);
2653 }
2654 
2655 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2656 {
2657 	/* VF handle has no client */
2658 	if (!handle->client)
2659 		return container_of(handle, struct hclge_vport, nic);
2660 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
2661 		return container_of(handle, struct hclge_vport, roce);
2662 	else
2663 		return container_of(handle, struct hclge_vport, nic);
2664 }
2665 
2666 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2667 			    struct hnae3_vector_info *vector_info)
2668 {
2669 	struct hclge_vport *vport = hclge_get_vport(handle);
2670 	struct hnae3_vector_info *vector = vector_info;
2671 	struct hclge_dev *hdev = vport->back;
2672 	int alloc = 0;
2673 	int i, j;
2674 
2675 	vector_num = min(hdev->num_msi_left, vector_num);
2676 
2677 	for (j = 0; j < vector_num; j++) {
2678 		for (i = 1; i < hdev->num_msi; i++) {
2679 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2680 				vector->vector = pci_irq_vector(hdev->pdev, i);
2681 				vector->io_addr = hdev->hw.io_base +
2682 					HCLGE_VECTOR_REG_BASE +
2683 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
2684 					vport->vport_id *
2685 					HCLGE_VECTOR_VF_OFFSET;
2686 				hdev->vector_status[i] = vport->vport_id;
2687 				hdev->vector_irq[i] = vector->vector;
2688 
2689 				vector++;
2690 				alloc++;
2691 
2692 				break;
2693 			}
2694 		}
2695 	}
2696 	hdev->num_msi_left -= alloc;
2697 	hdev->num_msi_used += alloc;
2698 
2699 	return alloc;
2700 }
2701 
2702 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2703 {
2704 	int i;
2705 
2706 	for (i = 0; i < hdev->num_msi; i++)
2707 		if (vector == hdev->vector_irq[i])
2708 			return i;
2709 
2710 	return -EINVAL;
2711 }
2712 
2713 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2714 {
2715 	return HCLGE_RSS_KEY_SIZE;
2716 }
2717 
2718 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2719 {
2720 	return HCLGE_RSS_IND_TBL_SIZE;
2721 }
2722 
2723 static int hclge_get_rss_algo(struct hclge_dev *hdev)
2724 {
2725 	struct hclge_rss_config_cmd *req;
2726 	struct hclge_desc desc;
2727 	int rss_hash_algo;
2728 	int ret;
2729 
2730 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2731 
2732 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2733 	if (ret) {
2734 		dev_err(&hdev->pdev->dev,
2735 			"Get link status error, status =%d\n", ret);
2736 		return ret;
2737 	}
2738 
2739 	req = (struct hclge_rss_config_cmd *)desc.data;
2740 	rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2741 
2742 	if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2743 		return ETH_RSS_HASH_TOP;
2744 
2745 	return -EINVAL;
2746 }
2747 
2748 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2749 				  const u8 hfunc, const u8 *key)
2750 {
2751 	struct hclge_rss_config_cmd *req;
2752 	struct hclge_desc desc;
2753 	int key_offset;
2754 	int key_size;
2755 	int ret;
2756 
2757 	req = (struct hclge_rss_config_cmd *)desc.data;
2758 
2759 	for (key_offset = 0; key_offset < 3; key_offset++) {
2760 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2761 					   false);
2762 
2763 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2764 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2765 
2766 		if (key_offset == 2)
2767 			key_size =
2768 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2769 		else
2770 			key_size = HCLGE_RSS_HASH_KEY_NUM;
2771 
2772 		memcpy(req->hash_key,
2773 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2774 
2775 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2776 		if (ret) {
2777 			dev_err(&hdev->pdev->dev,
2778 				"Configure RSS config fail, status = %d\n",
2779 				ret);
2780 			return ret;
2781 		}
2782 	}
2783 	return 0;
2784 }
2785 
2786 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2787 {
2788 	struct hclge_rss_indirection_table_cmd *req;
2789 	struct hclge_desc desc;
2790 	int i, j;
2791 	int ret;
2792 
2793 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
2794 
2795 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2796 		hclge_cmd_setup_basic_desc
2797 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2798 
2799 		req->start_table_index =
2800 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2801 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
2802 
2803 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2804 			req->rss_result[j] =
2805 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2806 
2807 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2808 		if (ret) {
2809 			dev_err(&hdev->pdev->dev,
2810 				"Configure rss indir table fail,status = %d\n",
2811 				ret);
2812 			return ret;
2813 		}
2814 	}
2815 	return 0;
2816 }
2817 
2818 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2819 				 u16 *tc_size, u16 *tc_offset)
2820 {
2821 	struct hclge_rss_tc_mode_cmd *req;
2822 	struct hclge_desc desc;
2823 	int ret;
2824 	int i;
2825 
2826 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2827 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
2828 
2829 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2830 		u16 mode = 0;
2831 
2832 		hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2833 		hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
2834 			       HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2835 		hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
2836 			       HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2837 
2838 		req->rss_tc_mode[i] = cpu_to_le16(mode);
2839 	}
2840 
2841 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2842 	if (ret) {
2843 		dev_err(&hdev->pdev->dev,
2844 			"Configure rss tc mode fail, status = %d\n", ret);
2845 		return ret;
2846 	}
2847 
2848 	return 0;
2849 }
2850 
2851 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2852 {
2853 	struct hclge_rss_input_tuple_cmd *req;
2854 	struct hclge_desc desc;
2855 	int ret;
2856 
2857 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2858 
2859 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2860 	req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2861 	req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2862 	req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2863 	req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2864 	req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2865 	req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2866 	req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2867 	req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2868 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2869 	if (ret) {
2870 		dev_err(&hdev->pdev->dev,
2871 			"Configure rss input fail, status = %d\n", ret);
2872 		return ret;
2873 	}
2874 
2875 	return 0;
2876 }
2877 
2878 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2879 			 u8 *key, u8 *hfunc)
2880 {
2881 	struct hclge_vport *vport = hclge_get_vport(handle);
2882 	struct hclge_dev *hdev = vport->back;
2883 	int i;
2884 
2885 	/* Get hash algorithm */
2886 	if (hfunc)
2887 		*hfunc = hclge_get_rss_algo(hdev);
2888 
2889 	/* Get the RSS Key required by the user */
2890 	if (key)
2891 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2892 
2893 	/* Get indirect table */
2894 	if (indir)
2895 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2896 			indir[i] =  vport->rss_indirection_tbl[i];
2897 
2898 	return 0;
2899 }
2900 
2901 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2902 			 const  u8 *key, const  u8 hfunc)
2903 {
2904 	struct hclge_vport *vport = hclge_get_vport(handle);
2905 	struct hclge_dev *hdev = vport->back;
2906 	u8 hash_algo;
2907 	int ret, i;
2908 
2909 	/* Set the RSS Hash Key if specififed by the user */
2910 	if (key) {
2911 		/* Update the shadow RSS key with user specified qids */
2912 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2913 
2914 		if (hfunc == ETH_RSS_HASH_TOP ||
2915 		    hfunc == ETH_RSS_HASH_NO_CHANGE)
2916 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2917 		else
2918 			return -EINVAL;
2919 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2920 		if (ret)
2921 			return ret;
2922 	}
2923 
2924 	/* Update the shadow RSS table with user specified qids */
2925 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2926 		vport->rss_indirection_tbl[i] = indir[i];
2927 
2928 	/* Update the hardware */
2929 	ret = hclge_set_rss_indir_table(hdev, indir);
2930 	return ret;
2931 }
2932 
2933 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
2934 {
2935 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
2936 
2937 	if (nfc->data & RXH_L4_B_2_3)
2938 		hash_sets |= HCLGE_D_PORT_BIT;
2939 	else
2940 		hash_sets &= ~HCLGE_D_PORT_BIT;
2941 
2942 	if (nfc->data & RXH_IP_SRC)
2943 		hash_sets |= HCLGE_S_IP_BIT;
2944 	else
2945 		hash_sets &= ~HCLGE_S_IP_BIT;
2946 
2947 	if (nfc->data & RXH_IP_DST)
2948 		hash_sets |= HCLGE_D_IP_BIT;
2949 	else
2950 		hash_sets &= ~HCLGE_D_IP_BIT;
2951 
2952 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
2953 		hash_sets |= HCLGE_V_TAG_BIT;
2954 
2955 	return hash_sets;
2956 }
2957 
2958 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
2959 			       struct ethtool_rxnfc *nfc)
2960 {
2961 	struct hclge_vport *vport = hclge_get_vport(handle);
2962 	struct hclge_dev *hdev = vport->back;
2963 	struct hclge_rss_input_tuple_cmd *req;
2964 	struct hclge_desc desc;
2965 	u8 tuple_sets;
2966 	int ret;
2967 
2968 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2969 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
2970 		return -EINVAL;
2971 
2972 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2973 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
2974 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2975 	if (ret) {
2976 		dev_err(&hdev->pdev->dev,
2977 			"Read rss tuple fail, status = %d\n", ret);
2978 		return ret;
2979 	}
2980 
2981 	hclge_cmd_reuse_desc(&desc, false);
2982 
2983 	tuple_sets = hclge_get_rss_hash_bits(nfc);
2984 	switch (nfc->flow_type) {
2985 	case TCP_V4_FLOW:
2986 		req->ipv4_tcp_en = tuple_sets;
2987 		break;
2988 	case TCP_V6_FLOW:
2989 		req->ipv6_tcp_en = tuple_sets;
2990 		break;
2991 	case UDP_V4_FLOW:
2992 		req->ipv4_udp_en = tuple_sets;
2993 		break;
2994 	case UDP_V6_FLOW:
2995 		req->ipv6_udp_en = tuple_sets;
2996 		break;
2997 	case SCTP_V4_FLOW:
2998 		req->ipv4_sctp_en = tuple_sets;
2999 		break;
3000 	case SCTP_V6_FLOW:
3001 		if ((nfc->data & RXH_L4_B_0_1) ||
3002 		    (nfc->data & RXH_L4_B_2_3))
3003 			return -EINVAL;
3004 
3005 		req->ipv6_sctp_en = tuple_sets;
3006 		break;
3007 	case IPV4_FLOW:
3008 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3009 		break;
3010 	case IPV6_FLOW:
3011 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3012 		break;
3013 	default:
3014 		return -EINVAL;
3015 	}
3016 
3017 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3018 	if (ret)
3019 		dev_err(&hdev->pdev->dev,
3020 			"Set rss tuple fail, status = %d\n", ret);
3021 
3022 	return ret;
3023 }
3024 
3025 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3026 			       struct ethtool_rxnfc *nfc)
3027 {
3028 	struct hclge_vport *vport = hclge_get_vport(handle);
3029 	struct hclge_dev *hdev = vport->back;
3030 	struct hclge_rss_input_tuple_cmd *req;
3031 	struct hclge_desc desc;
3032 	u8 tuple_sets;
3033 	int ret;
3034 
3035 	nfc->data = 0;
3036 
3037 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3038 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
3039 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3040 	if (ret) {
3041 		dev_err(&hdev->pdev->dev,
3042 			"Read rss tuple fail, status = %d\n", ret);
3043 		return ret;
3044 	}
3045 
3046 	switch (nfc->flow_type) {
3047 	case TCP_V4_FLOW:
3048 		tuple_sets = req->ipv4_tcp_en;
3049 		break;
3050 	case UDP_V4_FLOW:
3051 		tuple_sets = req->ipv4_udp_en;
3052 		break;
3053 	case TCP_V6_FLOW:
3054 		tuple_sets = req->ipv6_tcp_en;
3055 		break;
3056 	case UDP_V6_FLOW:
3057 		tuple_sets = req->ipv6_udp_en;
3058 		break;
3059 	case SCTP_V4_FLOW:
3060 		tuple_sets = req->ipv4_sctp_en;
3061 		break;
3062 	case SCTP_V6_FLOW:
3063 		tuple_sets = req->ipv6_sctp_en;
3064 		break;
3065 	case IPV4_FLOW:
3066 	case IPV6_FLOW:
3067 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3068 		break;
3069 	default:
3070 		return -EINVAL;
3071 	}
3072 
3073 	if (!tuple_sets)
3074 		return 0;
3075 
3076 	if (tuple_sets & HCLGE_D_PORT_BIT)
3077 		nfc->data |= RXH_L4_B_2_3;
3078 	if (tuple_sets & HCLGE_S_PORT_BIT)
3079 		nfc->data |= RXH_L4_B_0_1;
3080 	if (tuple_sets & HCLGE_D_IP_BIT)
3081 		nfc->data |= RXH_IP_DST;
3082 	if (tuple_sets & HCLGE_S_IP_BIT)
3083 		nfc->data |= RXH_IP_SRC;
3084 
3085 	return 0;
3086 }
3087 
3088 static int hclge_get_tc_size(struct hnae3_handle *handle)
3089 {
3090 	struct hclge_vport *vport = hclge_get_vport(handle);
3091 	struct hclge_dev *hdev = vport->back;
3092 
3093 	return hdev->rss_size_max;
3094 }
3095 
3096 int hclge_rss_init_hw(struct hclge_dev *hdev)
3097 {
3098 	const  u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3099 	struct hclge_vport *vport = hdev->vport;
3100 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3101 	u8 rss_key[HCLGE_RSS_KEY_SIZE];
3102 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3103 	u16 tc_size[HCLGE_MAX_TC_NUM];
3104 	u32 *rss_indir = NULL;
3105 	u16 rss_size = 0, roundup_size;
3106 	const u8 *key;
3107 	int i, ret, j;
3108 
3109 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
3110 	if (!rss_indir)
3111 		return -ENOMEM;
3112 
3113 	/* Get default RSS key */
3114 	netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
3115 
3116 	/* Initialize RSS indirect table for each vport */
3117 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3118 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
3119 			vport[j].rss_indirection_tbl[i] =
3120 				i % vport[j].alloc_rss_size;
3121 
3122 			/* vport 0 is for PF */
3123 			if (j != 0)
3124 				continue;
3125 
3126 			rss_size = vport[j].alloc_rss_size;
3127 			rss_indir[i] = vport[j].rss_indirection_tbl[i];
3128 		}
3129 	}
3130 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3131 	if (ret)
3132 		goto err;
3133 
3134 	key = rss_key;
3135 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3136 	if (ret)
3137 		goto err;
3138 
3139 	ret = hclge_set_rss_input_tuple(hdev);
3140 	if (ret)
3141 		goto err;
3142 
3143 	/* Each TC have the same queue size, and tc_size set to hardware is
3144 	 * the log2 of roundup power of two of rss_size, the acutal queue
3145 	 * size is limited by indirection table.
3146 	 */
3147 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3148 		dev_err(&hdev->pdev->dev,
3149 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3150 			rss_size);
3151 		ret = -EINVAL;
3152 		goto err;
3153 	}
3154 
3155 	roundup_size = roundup_pow_of_two(rss_size);
3156 	roundup_size = ilog2(roundup_size);
3157 
3158 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3159 		tc_valid[i] = 0;
3160 
3161 		if (!(hdev->hw_tc_map & BIT(i)))
3162 			continue;
3163 
3164 		tc_valid[i] = 1;
3165 		tc_size[i] = roundup_size;
3166 		tc_offset[i] = rss_size * i;
3167 	}
3168 
3169 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3170 
3171 err:
3172 	kfree(rss_indir);
3173 
3174 	return ret;
3175 }
3176 
3177 int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
3178 				   struct hnae3_ring_chain_node *ring_chain)
3179 {
3180 	struct hclge_dev *hdev = vport->back;
3181 	struct hclge_ctrl_vector_chain_cmd *req;
3182 	struct hnae3_ring_chain_node *node;
3183 	struct hclge_desc desc;
3184 	int ret;
3185 	int i;
3186 
3187 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
3188 
3189 	req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3190 	req->int_vector_id = vector_id;
3191 
3192 	i = 0;
3193 	for (node = ring_chain; node; node = node->next) {
3194 		u16 type_and_id = 0;
3195 
3196 		hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
3197 			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3198 		hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
3199 			       node->tqp_index);
3200 		hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
3201 			       HCLGE_INT_GL_IDX_S,
3202 			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3203 		req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
3204 		req->vfid = vport->vport_id;
3205 
3206 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3207 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3208 
3209 			ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3210 			if (ret) {
3211 				dev_err(&hdev->pdev->dev,
3212 					"Map TQP fail, status is %d.\n",
3213 					ret);
3214 				return ret;
3215 			}
3216 			i = 0;
3217 
3218 			hclge_cmd_setup_basic_desc(&desc,
3219 						   HCLGE_OPC_ADD_RING_TO_VECTOR,
3220 						   false);
3221 			req->int_vector_id = vector_id;
3222 		}
3223 	}
3224 
3225 	if (i > 0) {
3226 		req->int_cause_num = i;
3227 
3228 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3229 		if (ret) {
3230 			dev_err(&hdev->pdev->dev,
3231 				"Map TQP fail, status is %d.\n", ret);
3232 			return ret;
3233 		}
3234 	}
3235 
3236 	return 0;
3237 }
3238 
3239 static int hclge_map_handle_ring_to_vector(
3240 		struct hnae3_handle *handle, int vector,
3241 		struct hnae3_ring_chain_node *ring_chain)
3242 {
3243 	struct hclge_vport *vport = hclge_get_vport(handle);
3244 	struct hclge_dev *hdev = vport->back;
3245 	int vector_id;
3246 
3247 	vector_id = hclge_get_vector_index(hdev, vector);
3248 	if (vector_id < 0) {
3249 		dev_err(&hdev->pdev->dev,
3250 			"Get vector index fail. ret =%d\n", vector_id);
3251 		return vector_id;
3252 	}
3253 
3254 	return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
3255 }
3256 
3257 static int hclge_unmap_ring_from_vector(
3258 	struct hnae3_handle *handle, int vector,
3259 	struct hnae3_ring_chain_node *ring_chain)
3260 {
3261 	struct hclge_vport *vport = hclge_get_vport(handle);
3262 	struct hclge_dev *hdev = vport->back;
3263 	struct hclge_ctrl_vector_chain_cmd *req;
3264 	struct hnae3_ring_chain_node *node;
3265 	struct hclge_desc desc;
3266 	int i, vector_id;
3267 	int ret;
3268 
3269 	vector_id = hclge_get_vector_index(hdev, vector);
3270 	if (vector_id < 0) {
3271 		dev_err(&handle->pdev->dev,
3272 			"Get vector index fail. ret =%d\n", vector_id);
3273 		return vector_id;
3274 	}
3275 
3276 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
3277 
3278 	req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3279 	req->int_vector_id = vector_id;
3280 
3281 	i = 0;
3282 	for (node = ring_chain; node; node = node->next) {
3283 		u16 type_and_id = 0;
3284 
3285 		hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
3286 			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3287 		hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
3288 			       node->tqp_index);
3289 		hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
3290 			       HCLGE_INT_GL_IDX_S,
3291 			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3292 
3293 		req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
3294 		req->vfid = vport->vport_id;
3295 
3296 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3297 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3298 
3299 			ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3300 			if (ret) {
3301 				dev_err(&hdev->pdev->dev,
3302 					"Unmap TQP fail, status is %d.\n",
3303 					ret);
3304 				return ret;
3305 			}
3306 			i = 0;
3307 			hclge_cmd_setup_basic_desc(&desc,
3308 						   HCLGE_OPC_DEL_RING_TO_VECTOR,
3309 						   false);
3310 			req->int_vector_id = vector_id;
3311 		}
3312 	}
3313 
3314 	if (i > 0) {
3315 		req->int_cause_num = i;
3316 
3317 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3318 		if (ret) {
3319 			dev_err(&hdev->pdev->dev,
3320 				"Unmap TQP fail, status is %d.\n", ret);
3321 			return ret;
3322 		}
3323 	}
3324 
3325 	return 0;
3326 }
3327 
3328 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3329 			       struct hclge_promisc_param *param)
3330 {
3331 	struct hclge_promisc_cfg_cmd *req;
3332 	struct hclge_desc desc;
3333 	int ret;
3334 
3335 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3336 
3337 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3338 	req->vf_id = param->vf_id;
3339 	req->flag = (param->enable << HCLGE_PROMISC_EN_B);
3340 
3341 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3342 	if (ret) {
3343 		dev_err(&hdev->pdev->dev,
3344 			"Set promisc mode fail, status is %d.\n", ret);
3345 		return ret;
3346 	}
3347 	return 0;
3348 }
3349 
3350 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3351 			      bool en_mc, bool en_bc, int vport_id)
3352 {
3353 	if (!param)
3354 		return;
3355 
3356 	memset(param, 0, sizeof(struct hclge_promisc_param));
3357 	if (en_uc)
3358 		param->enable = HCLGE_PROMISC_EN_UC;
3359 	if (en_mc)
3360 		param->enable |= HCLGE_PROMISC_EN_MC;
3361 	if (en_bc)
3362 		param->enable |= HCLGE_PROMISC_EN_BC;
3363 	param->vf_id = vport_id;
3364 }
3365 
3366 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
3367 {
3368 	struct hclge_vport *vport = hclge_get_vport(handle);
3369 	struct hclge_dev *hdev = vport->back;
3370 	struct hclge_promisc_param param;
3371 
3372 	hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
3373 	hclge_cmd_set_promisc_mode(hdev, &param);
3374 }
3375 
3376 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3377 {
3378 	struct hclge_desc desc;
3379 	struct hclge_config_mac_mode_cmd *req =
3380 		(struct hclge_config_mac_mode_cmd *)desc.data;
3381 	u32 loop_en = 0;
3382 	int ret;
3383 
3384 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3385 	hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3386 	hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3387 	hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3388 	hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3389 	hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3390 	hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3391 	hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3392 	hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3393 	hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3394 	hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3395 	hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3396 	hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3397 	hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3398 	hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3399 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3400 
3401 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3402 	if (ret)
3403 		dev_err(&hdev->pdev->dev,
3404 			"mac enable fail, ret =%d.\n", ret);
3405 }
3406 
3407 static int hclge_set_loopback(struct hnae3_handle *handle,
3408 			      enum hnae3_loop loop_mode, bool en)
3409 {
3410 	struct hclge_vport *vport = hclge_get_vport(handle);
3411 	struct hclge_config_mac_mode_cmd *req;
3412 	struct hclge_dev *hdev = vport->back;
3413 	struct hclge_desc desc;
3414 	u32 loop_en;
3415 	int ret;
3416 
3417 	switch (loop_mode) {
3418 	case HNAE3_MAC_INTER_LOOP_MAC:
3419 		req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3420 		/* 1 Read out the MAC mode config at first */
3421 		hclge_cmd_setup_basic_desc(&desc,
3422 					   HCLGE_OPC_CONFIG_MAC_MODE,
3423 					   true);
3424 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3425 		if (ret) {
3426 			dev_err(&hdev->pdev->dev,
3427 				"mac loopback get fail, ret =%d.\n",
3428 				ret);
3429 			return ret;
3430 		}
3431 
3432 		/* 2 Then setup the loopback flag */
3433 		loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3434 		if (en)
3435 			hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
3436 		else
3437 			hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3438 
3439 		req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3440 
3441 		/* 3 Config mac work mode with loopback flag
3442 		 * and its original configure parameters
3443 		 */
3444 		hclge_cmd_reuse_desc(&desc, false);
3445 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3446 		if (ret)
3447 			dev_err(&hdev->pdev->dev,
3448 				"mac loopback set fail, ret =%d.\n", ret);
3449 		break;
3450 	default:
3451 		ret = -ENOTSUPP;
3452 		dev_err(&hdev->pdev->dev,
3453 			"loop_mode %d is not supported\n", loop_mode);
3454 		break;
3455 	}
3456 
3457 	return ret;
3458 }
3459 
3460 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3461 			    int stream_id, bool enable)
3462 {
3463 	struct hclge_desc desc;
3464 	struct hclge_cfg_com_tqp_queue_cmd *req =
3465 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
3466 	int ret;
3467 
3468 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3469 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3470 	req->stream_id = cpu_to_le16(stream_id);
3471 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
3472 
3473 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3474 	if (ret)
3475 		dev_err(&hdev->pdev->dev,
3476 			"Tqp enable fail, status =%d.\n", ret);
3477 	return ret;
3478 }
3479 
3480 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3481 {
3482 	struct hclge_vport *vport = hclge_get_vport(handle);
3483 	struct hnae3_queue *queue;
3484 	struct hclge_tqp *tqp;
3485 	int i;
3486 
3487 	for (i = 0; i < vport->alloc_tqps; i++) {
3488 		queue = handle->kinfo.tqp[i];
3489 		tqp = container_of(queue, struct hclge_tqp, q);
3490 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3491 	}
3492 }
3493 
3494 static int hclge_ae_start(struct hnae3_handle *handle)
3495 {
3496 	struct hclge_vport *vport = hclge_get_vport(handle);
3497 	struct hclge_dev *hdev = vport->back;
3498 	int i, queue_id, ret;
3499 
3500 	for (i = 0; i < vport->alloc_tqps; i++) {
3501 		/* todo clear interrupt */
3502 		/* ring enable */
3503 		queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3504 		if (queue_id < 0) {
3505 			dev_warn(&hdev->pdev->dev,
3506 				 "Get invalid queue id, ignore it\n");
3507 			continue;
3508 		}
3509 
3510 		hclge_tqp_enable(hdev, queue_id, 0, true);
3511 	}
3512 	/* mac enable */
3513 	hclge_cfg_mac_mode(hdev, true);
3514 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3515 	mod_timer(&hdev->service_timer, jiffies + HZ);
3516 
3517 	ret = hclge_mac_start_phy(hdev);
3518 	if (ret)
3519 		return ret;
3520 
3521 	/* reset tqp stats */
3522 	hclge_reset_tqp_stats(handle);
3523 
3524 	return 0;
3525 }
3526 
3527 static void hclge_ae_stop(struct hnae3_handle *handle)
3528 {
3529 	struct hclge_vport *vport = hclge_get_vport(handle);
3530 	struct hclge_dev *hdev = vport->back;
3531 	int i, queue_id;
3532 
3533 	for (i = 0; i < vport->alloc_tqps; i++) {
3534 		/* Ring disable */
3535 		queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3536 		if (queue_id < 0) {
3537 			dev_warn(&hdev->pdev->dev,
3538 				 "Get invalid queue id, ignore it\n");
3539 			continue;
3540 		}
3541 
3542 		hclge_tqp_enable(hdev, queue_id, 0, false);
3543 	}
3544 	/* Mac disable */
3545 	hclge_cfg_mac_mode(hdev, false);
3546 
3547 	hclge_mac_stop_phy(hdev);
3548 
3549 	/* reset tqp stats */
3550 	hclge_reset_tqp_stats(handle);
3551 }
3552 
3553 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3554 					 u16 cmdq_resp, u8  resp_code,
3555 					 enum hclge_mac_vlan_tbl_opcode op)
3556 {
3557 	struct hclge_dev *hdev = vport->back;
3558 	int return_status = -EIO;
3559 
3560 	if (cmdq_resp) {
3561 		dev_err(&hdev->pdev->dev,
3562 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3563 			cmdq_resp);
3564 		return -EIO;
3565 	}
3566 
3567 	if (op == HCLGE_MAC_VLAN_ADD) {
3568 		if ((!resp_code) || (resp_code == 1)) {
3569 			return_status = 0;
3570 		} else if (resp_code == 2) {
3571 			return_status = -EIO;
3572 			dev_err(&hdev->pdev->dev,
3573 				"add mac addr failed for uc_overflow.\n");
3574 		} else if (resp_code == 3) {
3575 			return_status = -EIO;
3576 			dev_err(&hdev->pdev->dev,
3577 				"add mac addr failed for mc_overflow.\n");
3578 		} else {
3579 			dev_err(&hdev->pdev->dev,
3580 				"add mac addr failed for undefined, code=%d.\n",
3581 				resp_code);
3582 		}
3583 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
3584 		if (!resp_code) {
3585 			return_status = 0;
3586 		} else if (resp_code == 1) {
3587 			return_status = -EIO;
3588 			dev_dbg(&hdev->pdev->dev,
3589 				"remove mac addr failed for miss.\n");
3590 		} else {
3591 			dev_err(&hdev->pdev->dev,
3592 				"remove mac addr failed for undefined, code=%d.\n",
3593 				resp_code);
3594 		}
3595 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
3596 		if (!resp_code) {
3597 			return_status = 0;
3598 		} else if (resp_code == 1) {
3599 			return_status = -EIO;
3600 			dev_dbg(&hdev->pdev->dev,
3601 				"lookup mac addr failed for miss.\n");
3602 		} else {
3603 			dev_err(&hdev->pdev->dev,
3604 				"lookup mac addr failed for undefined, code=%d.\n",
3605 				resp_code);
3606 		}
3607 	} else {
3608 		return_status = -EIO;
3609 		dev_err(&hdev->pdev->dev,
3610 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3611 			op);
3612 	}
3613 
3614 	return return_status;
3615 }
3616 
3617 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3618 {
3619 	int word_num;
3620 	int bit_num;
3621 
3622 	if (vfid > 255 || vfid < 0)
3623 		return -EIO;
3624 
3625 	if (vfid >= 0 && vfid <= 191) {
3626 		word_num = vfid / 32;
3627 		bit_num  = vfid % 32;
3628 		if (clr)
3629 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3630 		else
3631 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
3632 	} else {
3633 		word_num = (vfid - 192) / 32;
3634 		bit_num  = vfid % 32;
3635 		if (clr)
3636 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3637 		else
3638 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
3639 	}
3640 
3641 	return 0;
3642 }
3643 
3644 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3645 {
3646 #define HCLGE_DESC_NUMBER 3
3647 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3648 	int i, j;
3649 
3650 	for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3651 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3652 			if (desc[i].data[j])
3653 				return false;
3654 
3655 	return true;
3656 }
3657 
3658 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3659 				   const u8 *addr)
3660 {
3661 	const unsigned char *mac_addr = addr;
3662 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3663 		       (mac_addr[0]) | (mac_addr[1] << 8);
3664 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
3665 
3666 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3667 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3668 }
3669 
3670 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3671 					   const u8 *addr)
3672 {
3673 	u16 high_val = addr[1] | (addr[0] << 8);
3674 	struct hclge_dev *hdev = vport->back;
3675 	u32 rsh = 4 - hdev->mta_mac_sel_type;
3676 	u16 ret_val = (high_val >> rsh) & 0xfff;
3677 
3678 	return ret_val;
3679 }
3680 
3681 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3682 				     enum hclge_mta_dmac_sel_type mta_mac_sel,
3683 				     bool enable)
3684 {
3685 	struct hclge_mta_filter_mode_cmd *req;
3686 	struct hclge_desc desc;
3687 	int ret;
3688 
3689 	req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3690 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3691 
3692 	hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3693 		     enable);
3694 	hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3695 		       HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3696 
3697 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3698 	if (ret) {
3699 		dev_err(&hdev->pdev->dev,
3700 			"Config mat filter mode failed for cmd_send, ret =%d.\n",
3701 			ret);
3702 		return ret;
3703 	}
3704 
3705 	return 0;
3706 }
3707 
3708 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3709 			      u8 func_id,
3710 			      bool enable)
3711 {
3712 	struct hclge_cfg_func_mta_filter_cmd *req;
3713 	struct hclge_desc desc;
3714 	int ret;
3715 
3716 	req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
3717 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3718 
3719 	hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3720 		     enable);
3721 	req->function_id = func_id;
3722 
3723 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3724 	if (ret) {
3725 		dev_err(&hdev->pdev->dev,
3726 			"Config func_id enable failed for cmd_send, ret =%d.\n",
3727 			ret);
3728 		return ret;
3729 	}
3730 
3731 	return 0;
3732 }
3733 
3734 static int hclge_set_mta_table_item(struct hclge_vport *vport,
3735 				    u16 idx,
3736 				    bool enable)
3737 {
3738 	struct hclge_dev *hdev = vport->back;
3739 	struct hclge_cfg_func_mta_item_cmd *req;
3740 	struct hclge_desc desc;
3741 	u16 item_idx = 0;
3742 	int ret;
3743 
3744 	req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
3745 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3746 	hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3747 
3748 	hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3749 		       HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3750 	req->item_idx = cpu_to_le16(item_idx);
3751 
3752 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3753 	if (ret) {
3754 		dev_err(&hdev->pdev->dev,
3755 			"Config mta table item failed for cmd_send, ret =%d.\n",
3756 			ret);
3757 		return ret;
3758 	}
3759 
3760 	return 0;
3761 }
3762 
3763 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3764 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
3765 {
3766 	struct hclge_dev *hdev = vport->back;
3767 	struct hclge_desc desc;
3768 	u8 resp_code;
3769 	u16 retval;
3770 	int ret;
3771 
3772 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3773 
3774 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3775 
3776 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3777 	if (ret) {
3778 		dev_err(&hdev->pdev->dev,
3779 			"del mac addr failed for cmd_send, ret =%d.\n",
3780 			ret);
3781 		return ret;
3782 	}
3783 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3784 	retval = le16_to_cpu(desc.retval);
3785 
3786 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
3787 					     HCLGE_MAC_VLAN_REMOVE);
3788 }
3789 
3790 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
3791 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
3792 				     struct hclge_desc *desc,
3793 				     bool is_mc)
3794 {
3795 	struct hclge_dev *hdev = vport->back;
3796 	u8 resp_code;
3797 	u16 retval;
3798 	int ret;
3799 
3800 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3801 	if (is_mc) {
3802 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3803 		memcpy(desc[0].data,
3804 		       req,
3805 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3806 		hclge_cmd_setup_basic_desc(&desc[1],
3807 					   HCLGE_OPC_MAC_VLAN_ADD,
3808 					   true);
3809 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3810 		hclge_cmd_setup_basic_desc(&desc[2],
3811 					   HCLGE_OPC_MAC_VLAN_ADD,
3812 					   true);
3813 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
3814 	} else {
3815 		memcpy(desc[0].data,
3816 		       req,
3817 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3818 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
3819 	}
3820 	if (ret) {
3821 		dev_err(&hdev->pdev->dev,
3822 			"lookup mac addr failed for cmd_send, ret =%d.\n",
3823 			ret);
3824 		return ret;
3825 	}
3826 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
3827 	retval = le16_to_cpu(desc[0].retval);
3828 
3829 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
3830 					     HCLGE_MAC_VLAN_LKUP);
3831 }
3832 
3833 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
3834 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
3835 				  struct hclge_desc *mc_desc)
3836 {
3837 	struct hclge_dev *hdev = vport->back;
3838 	int cfg_status;
3839 	u8 resp_code;
3840 	u16 retval;
3841 	int ret;
3842 
3843 	if (!mc_desc) {
3844 		struct hclge_desc desc;
3845 
3846 		hclge_cmd_setup_basic_desc(&desc,
3847 					   HCLGE_OPC_MAC_VLAN_ADD,
3848 					   false);
3849 		memcpy(desc.data, req,
3850 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3851 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3852 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3853 		retval = le16_to_cpu(desc.retval);
3854 
3855 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
3856 							   resp_code,
3857 							   HCLGE_MAC_VLAN_ADD);
3858 	} else {
3859 		hclge_cmd_reuse_desc(&mc_desc[0], false);
3860 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3861 		hclge_cmd_reuse_desc(&mc_desc[1], false);
3862 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3863 		hclge_cmd_reuse_desc(&mc_desc[2], false);
3864 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3865 		memcpy(mc_desc[0].data, req,
3866 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3867 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
3868 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
3869 		retval = le16_to_cpu(mc_desc[0].retval);
3870 
3871 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
3872 							   resp_code,
3873 							   HCLGE_MAC_VLAN_ADD);
3874 	}
3875 
3876 	if (ret) {
3877 		dev_err(&hdev->pdev->dev,
3878 			"add mac addr failed for cmd_send, ret =%d.\n",
3879 			ret);
3880 		return ret;
3881 	}
3882 
3883 	return cfg_status;
3884 }
3885 
3886 static int hclge_add_uc_addr(struct hnae3_handle *handle,
3887 			     const unsigned char *addr)
3888 {
3889 	struct hclge_vport *vport = hclge_get_vport(handle);
3890 
3891 	return hclge_add_uc_addr_common(vport, addr);
3892 }
3893 
3894 int hclge_add_uc_addr_common(struct hclge_vport *vport,
3895 			     const unsigned char *addr)
3896 {
3897 	struct hclge_dev *hdev = vport->back;
3898 	struct hclge_mac_vlan_tbl_entry_cmd req;
3899 	enum hclge_cmd_status status;
3900 	u16 egress_port = 0;
3901 
3902 	/* mac addr check */
3903 	if (is_zero_ether_addr(addr) ||
3904 	    is_broadcast_ether_addr(addr) ||
3905 	    is_multicast_ether_addr(addr)) {
3906 		dev_err(&hdev->pdev->dev,
3907 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3908 			 addr,
3909 			 is_zero_ether_addr(addr),
3910 			 is_broadcast_ether_addr(addr),
3911 			 is_multicast_ether_addr(addr));
3912 		return -EINVAL;
3913 	}
3914 
3915 	memset(&req, 0, sizeof(req));
3916 	hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3917 	hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3918 	hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
3919 	hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3920 
3921 	hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
3922 	hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
3923 	hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
3924 		       HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
3925 	hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
3926 		       HCLGE_MAC_EPORT_PFID_S, 0);
3927 
3928 	req.egress_port = cpu_to_le16(egress_port);
3929 
3930 	hclge_prepare_mac_addr(&req, addr);
3931 
3932 	status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
3933 
3934 	return status;
3935 }
3936 
3937 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
3938 			    const unsigned char *addr)
3939 {
3940 	struct hclge_vport *vport = hclge_get_vport(handle);
3941 
3942 	return hclge_rm_uc_addr_common(vport, addr);
3943 }
3944 
3945 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
3946 			    const unsigned char *addr)
3947 {
3948 	struct hclge_dev *hdev = vport->back;
3949 	struct hclge_mac_vlan_tbl_entry_cmd req;
3950 	enum hclge_cmd_status status;
3951 
3952 	/* mac addr check */
3953 	if (is_zero_ether_addr(addr) ||
3954 	    is_broadcast_ether_addr(addr) ||
3955 	    is_multicast_ether_addr(addr)) {
3956 		dev_dbg(&hdev->pdev->dev,
3957 			"Remove mac err! invalid mac:%pM.\n",
3958 			 addr);
3959 		return -EINVAL;
3960 	}
3961 
3962 	memset(&req, 0, sizeof(req));
3963 	hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3964 	hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3965 	hclge_prepare_mac_addr(&req, addr);
3966 	status = hclge_remove_mac_vlan_tbl(vport, &req);
3967 
3968 	return status;
3969 }
3970 
3971 static int hclge_add_mc_addr(struct hnae3_handle *handle,
3972 			     const unsigned char *addr)
3973 {
3974 	struct hclge_vport *vport = hclge_get_vport(handle);
3975 
3976 	return	hclge_add_mc_addr_common(vport, addr);
3977 }
3978 
3979 int hclge_add_mc_addr_common(struct hclge_vport *vport,
3980 			     const unsigned char *addr)
3981 {
3982 	struct hclge_dev *hdev = vport->back;
3983 	struct hclge_mac_vlan_tbl_entry_cmd req;
3984 	struct hclge_desc desc[3];
3985 	u16 tbl_idx;
3986 	int status;
3987 
3988 	/* mac addr check */
3989 	if (!is_multicast_ether_addr(addr)) {
3990 		dev_err(&hdev->pdev->dev,
3991 			"Add mc mac err! invalid mac:%pM.\n",
3992 			 addr);
3993 		return -EINVAL;
3994 	}
3995 	memset(&req, 0, sizeof(req));
3996 	hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3997 	hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3998 	hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3999 	hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4000 	hclge_prepare_mac_addr(&req, addr);
4001 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4002 	if (!status) {
4003 		/* This mac addr exist, update VFID for it */
4004 		hclge_update_desc_vfid(desc, vport->vport_id, false);
4005 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4006 	} else {
4007 		/* This mac addr do not exist, add new entry for it */
4008 		memset(desc[0].data, 0, sizeof(desc[0].data));
4009 		memset(desc[1].data, 0, sizeof(desc[0].data));
4010 		memset(desc[2].data, 0, sizeof(desc[0].data));
4011 		hclge_update_desc_vfid(desc, vport->vport_id, false);
4012 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4013 	}
4014 
4015 	/* Set MTA table for this MAC address */
4016 	tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4017 	status = hclge_set_mta_table_item(vport, tbl_idx, true);
4018 
4019 	return status;
4020 }
4021 
4022 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
4023 			    const unsigned char *addr)
4024 {
4025 	struct hclge_vport *vport = hclge_get_vport(handle);
4026 
4027 	return hclge_rm_mc_addr_common(vport, addr);
4028 }
4029 
4030 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
4031 			    const unsigned char *addr)
4032 {
4033 	struct hclge_dev *hdev = vport->back;
4034 	struct hclge_mac_vlan_tbl_entry_cmd req;
4035 	enum hclge_cmd_status status;
4036 	struct hclge_desc desc[3];
4037 	u16 tbl_idx;
4038 
4039 	/* mac addr check */
4040 	if (!is_multicast_ether_addr(addr)) {
4041 		dev_dbg(&hdev->pdev->dev,
4042 			"Remove mc mac err! invalid mac:%pM.\n",
4043 			 addr);
4044 		return -EINVAL;
4045 	}
4046 
4047 	memset(&req, 0, sizeof(req));
4048 	hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4049 	hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4050 	hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4051 	hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4052 	hclge_prepare_mac_addr(&req, addr);
4053 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4054 	if (!status) {
4055 		/* This mac addr exist, remove this handle's VFID for it */
4056 		hclge_update_desc_vfid(desc, vport->vport_id, true);
4057 
4058 		if (hclge_is_all_function_id_zero(desc))
4059 			/* All the vfid is zero, so need to delete this entry */
4060 			status = hclge_remove_mac_vlan_tbl(vport, &req);
4061 		else
4062 			/* Not all the vfid is zero, update the vfid */
4063 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4064 
4065 	} else {
4066 		/* This mac addr do not exist, can't delete it */
4067 		dev_err(&hdev->pdev->dev,
4068 			"Rm multicast mac addr failed, ret = %d.\n",
4069 			status);
4070 		return -EIO;
4071 	}
4072 
4073 	/* Set MTB table for this MAC address */
4074 	tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4075 	status = hclge_set_mta_table_item(vport, tbl_idx, false);
4076 
4077 	return status;
4078 }
4079 
4080 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
4081 {
4082 	struct hclge_vport *vport = hclge_get_vport(handle);
4083 	struct hclge_dev *hdev = vport->back;
4084 
4085 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
4086 }
4087 
4088 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
4089 {
4090 	const unsigned char *new_addr = (const unsigned char *)p;
4091 	struct hclge_vport *vport = hclge_get_vport(handle);
4092 	struct hclge_dev *hdev = vport->back;
4093 
4094 	/* mac addr check */
4095 	if (is_zero_ether_addr(new_addr) ||
4096 	    is_broadcast_ether_addr(new_addr) ||
4097 	    is_multicast_ether_addr(new_addr)) {
4098 		dev_err(&hdev->pdev->dev,
4099 			"Change uc mac err! invalid mac:%p.\n",
4100 			 new_addr);
4101 		return -EINVAL;
4102 	}
4103 
4104 	hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
4105 
4106 	if (!hclge_add_uc_addr(handle, new_addr)) {
4107 		ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
4108 		return 0;
4109 	}
4110 
4111 	return -EIO;
4112 }
4113 
4114 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4115 				      bool filter_en)
4116 {
4117 	struct hclge_vlan_filter_ctrl_cmd *req;
4118 	struct hclge_desc desc;
4119 	int ret;
4120 
4121 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4122 
4123 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
4124 	req->vlan_type = vlan_type;
4125 	req->vlan_fe = filter_en;
4126 
4127 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4128 	if (ret) {
4129 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
4130 			ret);
4131 		return ret;
4132 	}
4133 
4134 	return 0;
4135 }
4136 
4137 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4138 			     bool is_kill, u16 vlan, u8 qos, __be16 proto)
4139 {
4140 #define HCLGE_MAX_VF_BYTES  16
4141 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
4142 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
4143 	struct hclge_desc desc[2];
4144 	u8 vf_byte_val;
4145 	u8 vf_byte_off;
4146 	int ret;
4147 
4148 	hclge_cmd_setup_basic_desc(&desc[0],
4149 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4150 	hclge_cmd_setup_basic_desc(&desc[1],
4151 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4152 
4153 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4154 
4155 	vf_byte_off = vfid / 8;
4156 	vf_byte_val = 1 << (vfid % 8);
4157 
4158 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
4159 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
4160 
4161 	req0->vlan_id  = cpu_to_le16(vlan);
4162 	req0->vlan_cfg = is_kill;
4163 
4164 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
4165 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
4166 	else
4167 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
4168 
4169 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
4170 	if (ret) {
4171 		dev_err(&hdev->pdev->dev,
4172 			"Send vf vlan command fail, ret =%d.\n",
4173 			ret);
4174 		return ret;
4175 	}
4176 
4177 	if (!is_kill) {
4178 		if (!req0->resp_code || req0->resp_code == 1)
4179 			return 0;
4180 
4181 		dev_err(&hdev->pdev->dev,
4182 			"Add vf vlan filter fail, ret =%d.\n",
4183 			req0->resp_code);
4184 	} else {
4185 		if (!req0->resp_code)
4186 			return 0;
4187 
4188 		dev_err(&hdev->pdev->dev,
4189 			"Kill vf vlan filter fail, ret =%d.\n",
4190 			req0->resp_code);
4191 	}
4192 
4193 	return -EIO;
4194 }
4195 
4196 static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
4197 				      __be16 proto, u16 vlan_id,
4198 				      bool is_kill)
4199 {
4200 	struct hclge_vport *vport = hclge_get_vport(handle);
4201 	struct hclge_dev *hdev = vport->back;
4202 	struct hclge_vlan_filter_pf_cfg_cmd *req;
4203 	struct hclge_desc desc;
4204 	u8 vlan_offset_byte_val;
4205 	u8 vlan_offset_byte;
4206 	u8 vlan_offset_160;
4207 	int ret;
4208 
4209 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
4210 
4211 	vlan_offset_160 = vlan_id / 160;
4212 	vlan_offset_byte = (vlan_id % 160) / 8;
4213 	vlan_offset_byte_val = 1 << (vlan_id % 8);
4214 
4215 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
4216 	req->vlan_offset = vlan_offset_160;
4217 	req->vlan_cfg = is_kill;
4218 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
4219 
4220 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4221 	if (ret) {
4222 		dev_err(&hdev->pdev->dev,
4223 			"port vlan command, send fail, ret =%d.\n",
4224 			ret);
4225 		return ret;
4226 	}
4227 
4228 	ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
4229 	if (ret) {
4230 		dev_err(&hdev->pdev->dev,
4231 			"Set pf vlan filter config fail, ret =%d.\n",
4232 			ret);
4233 		return -EIO;
4234 	}
4235 
4236 	return 0;
4237 }
4238 
4239 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4240 				    u16 vlan, u8 qos, __be16 proto)
4241 {
4242 	struct hclge_vport *vport = hclge_get_vport(handle);
4243 	struct hclge_dev *hdev = vport->back;
4244 
4245 	if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4246 		return -EINVAL;
4247 	if (proto != htons(ETH_P_8021Q))
4248 		return -EPROTONOSUPPORT;
4249 
4250 	return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
4251 }
4252 
4253 static int hclge_init_vlan_config(struct hclge_dev *hdev)
4254 {
4255 #define HCLGE_VLAN_TYPE_VF_TABLE   0
4256 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
4257 	struct hnae3_handle *handle;
4258 	int ret;
4259 
4260 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
4261 					 true);
4262 	if (ret)
4263 		return ret;
4264 
4265 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
4266 					 true);
4267 	if (ret)
4268 		return ret;
4269 
4270 	handle = &hdev->vport[0].nic;
4271 	return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
4272 }
4273 
4274 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4275 {
4276 	struct hclge_vport *vport = hclge_get_vport(handle);
4277 	struct hclge_config_max_frm_size_cmd *req;
4278 	struct hclge_dev *hdev = vport->back;
4279 	struct hclge_desc desc;
4280 	int ret;
4281 
4282 	if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
4283 		return -EINVAL;
4284 
4285 	hdev->mps = new_mtu;
4286 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4287 
4288 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
4289 	req->max_frm_size = cpu_to_le16(new_mtu);
4290 
4291 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4292 	if (ret) {
4293 		dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
4294 		return ret;
4295 	}
4296 
4297 	return 0;
4298 }
4299 
4300 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
4301 				    bool enable)
4302 {
4303 	struct hclge_reset_tqp_queue_cmd *req;
4304 	struct hclge_desc desc;
4305 	int ret;
4306 
4307 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
4308 
4309 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4310 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4311 	hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
4312 
4313 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4314 	if (ret) {
4315 		dev_err(&hdev->pdev->dev,
4316 			"Send tqp reset cmd error, status =%d\n", ret);
4317 		return ret;
4318 	}
4319 
4320 	return 0;
4321 }
4322 
4323 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
4324 {
4325 	struct hclge_reset_tqp_queue_cmd *req;
4326 	struct hclge_desc desc;
4327 	int ret;
4328 
4329 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
4330 
4331 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4332 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4333 
4334 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4335 	if (ret) {
4336 		dev_err(&hdev->pdev->dev,
4337 			"Get reset status error, status =%d\n", ret);
4338 		return ret;
4339 	}
4340 
4341 	return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
4342 }
4343 
4344 static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
4345 {
4346 	struct hclge_vport *vport = hclge_get_vport(handle);
4347 	struct hclge_dev *hdev = vport->back;
4348 	int reset_try_times = 0;
4349 	int reset_status;
4350 	int ret;
4351 
4352 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
4353 	if (ret) {
4354 		dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
4355 		return;
4356 	}
4357 
4358 	ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
4359 	if (ret) {
4360 		dev_warn(&hdev->pdev->dev,
4361 			 "Send reset tqp cmd fail, ret = %d\n", ret);
4362 		return;
4363 	}
4364 
4365 	reset_try_times = 0;
4366 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
4367 		/* Wait for tqp hw reset */
4368 		msleep(20);
4369 		reset_status = hclge_get_reset_status(hdev, queue_id);
4370 		if (reset_status)
4371 			break;
4372 	}
4373 
4374 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
4375 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
4376 		return;
4377 	}
4378 
4379 	ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
4380 	if (ret) {
4381 		dev_warn(&hdev->pdev->dev,
4382 			 "Deassert the soft reset fail, ret = %d\n", ret);
4383 		return;
4384 	}
4385 }
4386 
4387 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
4388 {
4389 	struct hclge_vport *vport = hclge_get_vport(handle);
4390 	struct hclge_dev *hdev = vport->back;
4391 
4392 	return hdev->fw_version;
4393 }
4394 
4395 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
4396 				 u32 *rx_en, u32 *tx_en)
4397 {
4398 	struct hclge_vport *vport = hclge_get_vport(handle);
4399 	struct hclge_dev *hdev = vport->back;
4400 
4401 	*auto_neg = hclge_get_autoneg(handle);
4402 
4403 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
4404 		*rx_en = 0;
4405 		*tx_en = 0;
4406 		return;
4407 	}
4408 
4409 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
4410 		*rx_en = 1;
4411 		*tx_en = 0;
4412 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
4413 		*tx_en = 1;
4414 		*rx_en = 0;
4415 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
4416 		*rx_en = 1;
4417 		*tx_en = 1;
4418 	} else {
4419 		*rx_en = 0;
4420 		*tx_en = 0;
4421 	}
4422 }
4423 
4424 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
4425 					  u8 *auto_neg, u32 *speed, u8 *duplex)
4426 {
4427 	struct hclge_vport *vport = hclge_get_vport(handle);
4428 	struct hclge_dev *hdev = vport->back;
4429 
4430 	if (speed)
4431 		*speed = hdev->hw.mac.speed;
4432 	if (duplex)
4433 		*duplex = hdev->hw.mac.duplex;
4434 	if (auto_neg)
4435 		*auto_neg = hdev->hw.mac.autoneg;
4436 }
4437 
4438 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
4439 {
4440 	struct hclge_vport *vport = hclge_get_vport(handle);
4441 	struct hclge_dev *hdev = vport->back;
4442 
4443 	if (media_type)
4444 		*media_type = hdev->hw.mac.media_type;
4445 }
4446 
4447 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
4448 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
4449 {
4450 	struct hclge_vport *vport = hclge_get_vport(handle);
4451 	struct hclge_dev *hdev = vport->back;
4452 	struct phy_device *phydev = hdev->hw.mac.phydev;
4453 	int mdix_ctrl, mdix, retval, is_resolved;
4454 
4455 	if (!phydev) {
4456 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4457 		*tp_mdix = ETH_TP_MDI_INVALID;
4458 		return;
4459 	}
4460 
4461 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
4462 
4463 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
4464 	mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
4465 				   HCLGE_PHY_MDIX_CTRL_S);
4466 
4467 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
4468 	mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
4469 	is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
4470 
4471 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
4472 
4473 	switch (mdix_ctrl) {
4474 	case 0x0:
4475 		*tp_mdix_ctrl = ETH_TP_MDI;
4476 		break;
4477 	case 0x1:
4478 		*tp_mdix_ctrl = ETH_TP_MDI_X;
4479 		break;
4480 	case 0x3:
4481 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
4482 		break;
4483 	default:
4484 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4485 		break;
4486 	}
4487 
4488 	if (!is_resolved)
4489 		*tp_mdix = ETH_TP_MDI_INVALID;
4490 	else if (mdix)
4491 		*tp_mdix = ETH_TP_MDI_X;
4492 	else
4493 		*tp_mdix = ETH_TP_MDI;
4494 }
4495 
4496 static int hclge_init_client_instance(struct hnae3_client *client,
4497 				      struct hnae3_ae_dev *ae_dev)
4498 {
4499 	struct hclge_dev *hdev = ae_dev->priv;
4500 	struct hclge_vport *vport;
4501 	int i, ret;
4502 
4503 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
4504 		vport = &hdev->vport[i];
4505 
4506 		switch (client->type) {
4507 		case HNAE3_CLIENT_KNIC:
4508 
4509 			hdev->nic_client = client;
4510 			vport->nic.client = client;
4511 			ret = client->ops->init_instance(&vport->nic);
4512 			if (ret)
4513 				goto err;
4514 
4515 			if (hdev->roce_client &&
4516 			    hnae3_dev_roce_supported(hdev)) {
4517 				struct hnae3_client *rc = hdev->roce_client;
4518 
4519 				ret = hclge_init_roce_base_info(vport);
4520 				if (ret)
4521 					goto err;
4522 
4523 				ret = rc->ops->init_instance(&vport->roce);
4524 				if (ret)
4525 					goto err;
4526 			}
4527 
4528 			break;
4529 		case HNAE3_CLIENT_UNIC:
4530 			hdev->nic_client = client;
4531 			vport->nic.client = client;
4532 
4533 			ret = client->ops->init_instance(&vport->nic);
4534 			if (ret)
4535 				goto err;
4536 
4537 			break;
4538 		case HNAE3_CLIENT_ROCE:
4539 			if (hnae3_dev_roce_supported(hdev)) {
4540 				hdev->roce_client = client;
4541 				vport->roce.client = client;
4542 			}
4543 
4544 			if (hdev->roce_client && hdev->nic_client) {
4545 				ret = hclge_init_roce_base_info(vport);
4546 				if (ret)
4547 					goto err;
4548 
4549 				ret = client->ops->init_instance(&vport->roce);
4550 				if (ret)
4551 					goto err;
4552 			}
4553 		}
4554 	}
4555 
4556 	return 0;
4557 err:
4558 	return ret;
4559 }
4560 
4561 static void hclge_uninit_client_instance(struct hnae3_client *client,
4562 					 struct hnae3_ae_dev *ae_dev)
4563 {
4564 	struct hclge_dev *hdev = ae_dev->priv;
4565 	struct hclge_vport *vport;
4566 	int i;
4567 
4568 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4569 		vport = &hdev->vport[i];
4570 		if (hdev->roce_client) {
4571 			hdev->roce_client->ops->uninit_instance(&vport->roce,
4572 								0);
4573 			hdev->roce_client = NULL;
4574 			vport->roce.client = NULL;
4575 		}
4576 		if (client->type == HNAE3_CLIENT_ROCE)
4577 			return;
4578 		if (client->ops->uninit_instance) {
4579 			client->ops->uninit_instance(&vport->nic, 0);
4580 			hdev->nic_client = NULL;
4581 			vport->nic.client = NULL;
4582 		}
4583 	}
4584 }
4585 
4586 static int hclge_pci_init(struct hclge_dev *hdev)
4587 {
4588 	struct pci_dev *pdev = hdev->pdev;
4589 	struct hclge_hw *hw;
4590 	int ret;
4591 
4592 	ret = pci_enable_device(pdev);
4593 	if (ret) {
4594 		dev_err(&pdev->dev, "failed to enable PCI device\n");
4595 		goto err_no_drvdata;
4596 	}
4597 
4598 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4599 	if (ret) {
4600 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4601 		if (ret) {
4602 			dev_err(&pdev->dev,
4603 				"can't set consistent PCI DMA");
4604 			goto err_disable_device;
4605 		}
4606 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4607 	}
4608 
4609 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4610 	if (ret) {
4611 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4612 		goto err_disable_device;
4613 	}
4614 
4615 	pci_set_master(pdev);
4616 	hw = &hdev->hw;
4617 	hw->back = hdev;
4618 	hw->io_base = pcim_iomap(pdev, 2, 0);
4619 	if (!hw->io_base) {
4620 		dev_err(&pdev->dev, "Can't map configuration register space\n");
4621 		ret = -ENOMEM;
4622 		goto err_clr_master;
4623 	}
4624 
4625 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
4626 
4627 	return 0;
4628 err_clr_master:
4629 	pci_clear_master(pdev);
4630 	pci_release_regions(pdev);
4631 err_disable_device:
4632 	pci_disable_device(pdev);
4633 err_no_drvdata:
4634 	pci_set_drvdata(pdev, NULL);
4635 
4636 	return ret;
4637 }
4638 
4639 static void hclge_pci_uninit(struct hclge_dev *hdev)
4640 {
4641 	struct pci_dev *pdev = hdev->pdev;
4642 
4643 	pci_free_irq_vectors(pdev);
4644 	pci_clear_master(pdev);
4645 	pci_release_mem_regions(pdev);
4646 	pci_disable_device(pdev);
4647 }
4648 
4649 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4650 {
4651 	struct pci_dev *pdev = ae_dev->pdev;
4652 	struct hclge_dev *hdev;
4653 	int ret;
4654 
4655 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4656 	if (!hdev) {
4657 		ret = -ENOMEM;
4658 		goto err_hclge_dev;
4659 	}
4660 
4661 	hdev->pdev = pdev;
4662 	hdev->ae_dev = ae_dev;
4663 	hdev->reset_type = HNAE3_NONE_RESET;
4664 	ae_dev->priv = hdev;
4665 
4666 	ret = hclge_pci_init(hdev);
4667 	if (ret) {
4668 		dev_err(&pdev->dev, "PCI init failed\n");
4669 		goto err_pci_init;
4670 	}
4671 
4672 	/* Firmware command queue initialize */
4673 	ret = hclge_cmd_queue_init(hdev);
4674 	if (ret) {
4675 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
4676 		return ret;
4677 	}
4678 
4679 	/* Firmware command initialize */
4680 	ret = hclge_cmd_init(hdev);
4681 	if (ret)
4682 		goto err_cmd_init;
4683 
4684 	ret = hclge_get_cap(hdev);
4685 	if (ret) {
4686 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4687 			ret);
4688 		return ret;
4689 	}
4690 
4691 	ret = hclge_configure(hdev);
4692 	if (ret) {
4693 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4694 		return ret;
4695 	}
4696 
4697 	ret = hclge_init_msi(hdev);
4698 	if (ret) {
4699 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
4700 		return ret;
4701 	}
4702 
4703 	ret = hclge_misc_irq_init(hdev);
4704 	if (ret) {
4705 		dev_err(&pdev->dev,
4706 			"Misc IRQ(vector0) init error, ret = %d.\n",
4707 			ret);
4708 		return ret;
4709 	}
4710 
4711 	ret = hclge_alloc_tqps(hdev);
4712 	if (ret) {
4713 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4714 		return ret;
4715 	}
4716 
4717 	ret = hclge_alloc_vport(hdev);
4718 	if (ret) {
4719 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4720 		return ret;
4721 	}
4722 
4723 	ret = hclge_map_tqp(hdev);
4724 	if (ret) {
4725 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
4726 		return ret;
4727 	}
4728 
4729 	ret = hclge_mac_mdio_config(hdev);
4730 	if (ret) {
4731 		dev_warn(&hdev->pdev->dev,
4732 			 "mdio config fail ret=%d\n", ret);
4733 		return ret;
4734 	}
4735 
4736 	ret = hclge_mac_init(hdev);
4737 	if (ret) {
4738 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4739 		return ret;
4740 	}
4741 	ret = hclge_buffer_alloc(hdev);
4742 	if (ret) {
4743 		dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4744 		return  ret;
4745 	}
4746 
4747 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4748 	if (ret) {
4749 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4750 		return ret;
4751 	}
4752 
4753 	ret = hclge_init_vlan_config(hdev);
4754 	if (ret) {
4755 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4756 		return  ret;
4757 	}
4758 
4759 	ret = hclge_tm_schd_init(hdev);
4760 	if (ret) {
4761 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4762 		return ret;
4763 	}
4764 
4765 	ret = hclge_rss_init_hw(hdev);
4766 	if (ret) {
4767 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4768 		return ret;
4769 	}
4770 
4771 	hclge_dcb_ops_set(hdev);
4772 
4773 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
4774 	INIT_WORK(&hdev->service_task, hclge_service_task);
4775 
4776 	/* Enable MISC vector(vector0) */
4777 	hclge_enable_vector(&hdev->misc_vector, true);
4778 
4779 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4780 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
4781 
4782 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4783 	return 0;
4784 
4785 err_cmd_init:
4786 	pci_release_regions(pdev);
4787 err_pci_init:
4788 	pci_set_drvdata(pdev, NULL);
4789 err_hclge_dev:
4790 	return ret;
4791 }
4792 
4793 static void hclge_stats_clear(struct hclge_dev *hdev)
4794 {
4795 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
4796 }
4797 
4798 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
4799 {
4800 	struct hclge_dev *hdev = ae_dev->priv;
4801 	struct pci_dev *pdev = ae_dev->pdev;
4802 	int ret;
4803 
4804 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
4805 
4806 	hclge_stats_clear(hdev);
4807 
4808 	ret = hclge_cmd_init(hdev);
4809 	if (ret) {
4810 		dev_err(&pdev->dev, "Cmd queue init failed\n");
4811 		return ret;
4812 	}
4813 
4814 	ret = hclge_get_cap(hdev);
4815 	if (ret) {
4816 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4817 			ret);
4818 		return ret;
4819 	}
4820 
4821 	ret = hclge_configure(hdev);
4822 	if (ret) {
4823 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4824 		return ret;
4825 	}
4826 
4827 	ret = hclge_map_tqp(hdev);
4828 	if (ret) {
4829 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
4830 		return ret;
4831 	}
4832 
4833 	ret = hclge_mac_init(hdev);
4834 	if (ret) {
4835 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4836 		return ret;
4837 	}
4838 
4839 	ret = hclge_buffer_alloc(hdev);
4840 	if (ret) {
4841 		dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4842 		return ret;
4843 	}
4844 
4845 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4846 	if (ret) {
4847 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4848 		return ret;
4849 	}
4850 
4851 	ret = hclge_init_vlan_config(hdev);
4852 	if (ret) {
4853 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4854 		return ret;
4855 	}
4856 
4857 	ret = hclge_tm_schd_init(hdev);
4858 	if (ret) {
4859 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4860 		return ret;
4861 	}
4862 
4863 	ret = hclge_rss_init_hw(hdev);
4864 	if (ret) {
4865 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4866 		return ret;
4867 	}
4868 
4869 	/* Enable MISC vector(vector0) */
4870 	hclge_enable_vector(&hdev->misc_vector, true);
4871 
4872 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
4873 		 HCLGE_DRIVER_NAME);
4874 
4875 	return 0;
4876 }
4877 
4878 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4879 {
4880 	struct hclge_dev *hdev = ae_dev->priv;
4881 	struct hclge_mac *mac = &hdev->hw.mac;
4882 
4883 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
4884 
4885 	if (IS_ENABLED(CONFIG_PCI_IOV))
4886 		hclge_disable_sriov(hdev);
4887 
4888 	if (hdev->service_timer.function)
4889 		del_timer_sync(&hdev->service_timer);
4890 	if (hdev->service_task.func)
4891 		cancel_work_sync(&hdev->service_task);
4892 
4893 	if (mac->phydev)
4894 		mdiobus_unregister(mac->mdio_bus);
4895 
4896 	/* Disable MISC vector(vector0) */
4897 	hclge_enable_vector(&hdev->misc_vector, false);
4898 	hclge_free_vector(hdev, 0);
4899 	hclge_destroy_cmd_queue(&hdev->hw);
4900 	hclge_pci_uninit(hdev);
4901 	ae_dev->priv = NULL;
4902 }
4903 
4904 static const struct hnae3_ae_ops hclge_ops = {
4905 	.init_ae_dev = hclge_init_ae_dev,
4906 	.uninit_ae_dev = hclge_uninit_ae_dev,
4907 	.init_client_instance = hclge_init_client_instance,
4908 	.uninit_client_instance = hclge_uninit_client_instance,
4909 	.map_ring_to_vector = hclge_map_handle_ring_to_vector,
4910 	.unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4911 	.get_vector = hclge_get_vector,
4912 	.set_promisc_mode = hclge_set_promisc_mode,
4913 	.set_loopback = hclge_set_loopback,
4914 	.start = hclge_ae_start,
4915 	.stop = hclge_ae_stop,
4916 	.get_status = hclge_get_status,
4917 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
4918 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
4919 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
4920 	.get_media_type = hclge_get_media_type,
4921 	.get_rss_key_size = hclge_get_rss_key_size,
4922 	.get_rss_indir_size = hclge_get_rss_indir_size,
4923 	.get_rss = hclge_get_rss,
4924 	.set_rss = hclge_set_rss,
4925 	.set_rss_tuple = hclge_set_rss_tuple,
4926 	.get_rss_tuple = hclge_get_rss_tuple,
4927 	.get_tc_size = hclge_get_tc_size,
4928 	.get_mac_addr = hclge_get_mac_addr,
4929 	.set_mac_addr = hclge_set_mac_addr,
4930 	.add_uc_addr = hclge_add_uc_addr,
4931 	.rm_uc_addr = hclge_rm_uc_addr,
4932 	.add_mc_addr = hclge_add_mc_addr,
4933 	.rm_mc_addr = hclge_rm_mc_addr,
4934 	.set_autoneg = hclge_set_autoneg,
4935 	.get_autoneg = hclge_get_autoneg,
4936 	.get_pauseparam = hclge_get_pauseparam,
4937 	.set_mtu = hclge_set_mtu,
4938 	.reset_queue = hclge_reset_tqp,
4939 	.get_stats = hclge_get_stats,
4940 	.update_stats = hclge_update_stats,
4941 	.get_strings = hclge_get_strings,
4942 	.get_sset_count = hclge_get_sset_count,
4943 	.get_fw_version = hclge_get_fw_version,
4944 	.get_mdix_mode = hclge_get_mdix_mode,
4945 	.set_vlan_filter = hclge_set_port_vlan_filter,
4946 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4947 	.reset_event = hclge_reset_event,
4948 };
4949 
4950 static struct hnae3_ae_algo ae_algo = {
4951 	.ops = &hclge_ops,
4952 	.name = HCLGE_NAME,
4953 	.pdev_id_table = ae_algo_pci_tbl,
4954 };
4955 
4956 static int hclge_init(void)
4957 {
4958 	pr_info("%s is initializing\n", HCLGE_NAME);
4959 
4960 	return hnae3_register_ae_algo(&ae_algo);
4961 }
4962 
4963 static void hclge_exit(void)
4964 {
4965 	hnae3_unregister_ae_algo(&ae_algo);
4966 }
4967 module_init(hclge_init);
4968 module_exit(hclge_exit);
4969 
4970 MODULE_LICENSE("GPL");
4971 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4972 MODULE_DESCRIPTION("HCLGE Driver");
4973 MODULE_VERSION(HCLGE_MOD_VERSION);
4974