xref: /openbmc/linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #ifndef __HCLGE_MAIN_H
11 #define __HCLGE_MAIN_H
12 #include <linux/fs.h>
13 #include <linux/types.h>
14 #include <linux/phy.h>
15 #include <linux/if_vlan.h>
16 
17 #include "hclge_cmd.h"
18 #include "hnae3.h"
19 
20 #define HCLGE_MOD_VERSION "1.0"
21 #define HCLGE_DRIVER_NAME "hclge"
22 
23 #define HCLGE_INVALID_VPORT 0xffff
24 
25 #define HCLGE_ROCE_VECTOR_OFFSET	96
26 
27 #define HCLGE_PF_CFG_BLOCK_SIZE		32
28 #define HCLGE_PF_CFG_DESC_NUM \
29 	(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
30 
31 #define HCLGE_VECTOR_REG_BASE		0x20000
32 #define HCLGE_MISC_VECTOR_REG_BASE	0x20400
33 
34 #define HCLGE_VECTOR_REG_OFFSET		0x4
35 #define HCLGE_VECTOR_VF_OFFSET		0x100000
36 
37 #define HCLGE_RSS_IND_TBL_SIZE		512
38 #define HCLGE_RSS_SET_BITMAP_MSK	GENMASK(15, 0)
39 #define HCLGE_RSS_KEY_SIZE		40
40 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ	0
41 #define HCLGE_RSS_HASH_ALGO_SIMPLE	1
42 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC	2
43 #define HCLGE_RSS_HASH_ALGO_MASK	0xf
44 #define HCLGE_RSS_CFG_TBL_NUM \
45 	(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
46 
47 #define HCLGE_RSS_INPUT_TUPLE_OTHER	GENMASK(3, 0)
48 #define HCLGE_RSS_INPUT_TUPLE_SCTP	GENMASK(4, 0)
49 #define HCLGE_D_PORT_BIT		BIT(0)
50 #define HCLGE_S_PORT_BIT		BIT(1)
51 #define HCLGE_D_IP_BIT			BIT(2)
52 #define HCLGE_S_IP_BIT			BIT(3)
53 #define HCLGE_V_TAG_BIT			BIT(4)
54 
55 #define HCLGE_RSS_TC_SIZE_0		1
56 #define HCLGE_RSS_TC_SIZE_1		2
57 #define HCLGE_RSS_TC_SIZE_2		4
58 #define HCLGE_RSS_TC_SIZE_3		8
59 #define HCLGE_RSS_TC_SIZE_4		16
60 #define HCLGE_RSS_TC_SIZE_5		32
61 #define HCLGE_RSS_TC_SIZE_6		64
62 #define HCLGE_RSS_TC_SIZE_7		128
63 
64 #define HCLGE_MTA_TBL_SIZE		4096
65 
66 #define HCLGE_TQP_RESET_TRY_TIMES	10
67 
68 #define HCLGE_PHY_PAGE_MDIX		0
69 #define HCLGE_PHY_PAGE_COPPER		0
70 
71 /* Page Selection Reg. */
72 #define HCLGE_PHY_PAGE_REG		22
73 
74 /* Copper Specific Control Register */
75 #define HCLGE_PHY_CSC_REG		16
76 
77 /* Copper Specific Status Register */
78 #define HCLGE_PHY_CSS_REG		17
79 
80 #define HCLGE_PHY_MDIX_CTRL_S		(5)
81 #define HCLGE_PHY_MDIX_CTRL_M		GENMASK(6, 5)
82 
83 #define HCLGE_PHY_MDIX_STATUS_B	(6)
84 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B	(11)
85 
86 /* Factor used to calculate offset and bitmap of VF num */
87 #define HCLGE_VF_NUM_PER_CMD           64
88 #define HCLGE_VF_NUM_PER_BYTE          8
89 
90 /* Reset related Registers */
91 #define HCLGE_MISC_RESET_STS_REG	0x20700
92 #define HCLGE_GLOBAL_RESET_REG		0x20A00
93 #define HCLGE_GLOBAL_RESET_BIT		0x0
94 #define HCLGE_CORE_RESET_BIT		0x1
95 #define HCLGE_FUN_RST_ING		0x20C00
96 #define HCLGE_FUN_RST_ING_B		0
97 
98 /* Vector0 register bits define */
99 #define HCLGE_VECTOR0_GLOBALRESET_INT_B	5
100 #define HCLGE_VECTOR0_CORERESET_INT_B	6
101 #define HCLGE_VECTOR0_IMPRESET_INT_B	7
102 
103 /* Vector0 interrupt CMDQ event source register(RW) */
104 #define HCLGE_VECTOR0_CMDQ_SRC_REG	0x27100
105 /* CMDQ register bits for RX event(=MBX event) */
106 #define HCLGE_VECTOR0_RX_CMDQ_INT_B	1
107 
108 #define HCLGE_MAC_DEFAULT_FRAME \
109 	(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
110 #define HCLGE_MAC_MIN_FRAME		64
111 #define HCLGE_MAC_MAX_FRAME		9728
112 
113 #define HCLGE_SUPPORT_1G_BIT		BIT(0)
114 #define HCLGE_SUPPORT_10G_BIT		BIT(1)
115 #define HCLGE_SUPPORT_25G_BIT		BIT(2)
116 #define HCLGE_SUPPORT_50G_BIT		BIT(3)
117 #define HCLGE_SUPPORT_100G_BIT		BIT(4)
118 
119 enum HCLGE_DEV_STATE {
120 	HCLGE_STATE_REINITING,
121 	HCLGE_STATE_DOWN,
122 	HCLGE_STATE_DISABLED,
123 	HCLGE_STATE_REMOVING,
124 	HCLGE_STATE_SERVICE_INITED,
125 	HCLGE_STATE_SERVICE_SCHED,
126 	HCLGE_STATE_RST_SERVICE_SCHED,
127 	HCLGE_STATE_RST_HANDLING,
128 	HCLGE_STATE_MBX_SERVICE_SCHED,
129 	HCLGE_STATE_MBX_HANDLING,
130 	HCLGE_STATE_STATISTICS_UPDATING,
131 	HCLGE_STATE_MAX
132 };
133 
134 enum hclge_evt_cause {
135 	HCLGE_VECTOR0_EVENT_RST,
136 	HCLGE_VECTOR0_EVENT_MBX,
137 	HCLGE_VECTOR0_EVENT_OTHER,
138 };
139 
140 #define HCLGE_MPF_ENBALE 1
141 struct hclge_caps {
142 	u16 num_tqp;
143 	u16 num_buffer_cell;
144 	u32 flag;
145 	u16 vmdq;
146 };
147 
148 enum HCLGE_MAC_SPEED {
149 	HCLGE_MAC_SPEED_10M	= 10,		/* 10 Mbps */
150 	HCLGE_MAC_SPEED_100M	= 100,		/* 100 Mbps */
151 	HCLGE_MAC_SPEED_1G	= 1000,		/* 1000 Mbps   = 1 Gbps */
152 	HCLGE_MAC_SPEED_10G	= 10000,	/* 10000 Mbps  = 10 Gbps */
153 	HCLGE_MAC_SPEED_25G	= 25000,	/* 25000 Mbps  = 25 Gbps */
154 	HCLGE_MAC_SPEED_40G	= 40000,	/* 40000 Mbps  = 40 Gbps */
155 	HCLGE_MAC_SPEED_50G	= 50000,	/* 50000 Mbps  = 50 Gbps */
156 	HCLGE_MAC_SPEED_100G	= 100000	/* 100000 Mbps = 100 Gbps */
157 };
158 
159 enum HCLGE_MAC_DUPLEX {
160 	HCLGE_MAC_HALF,
161 	HCLGE_MAC_FULL
162 };
163 
164 enum hclge_mta_dmac_sel_type {
165 	HCLGE_MAC_ADDR_47_36,
166 	HCLGE_MAC_ADDR_46_35,
167 	HCLGE_MAC_ADDR_45_34,
168 	HCLGE_MAC_ADDR_44_33,
169 };
170 
171 struct hclge_mac {
172 	u8 phy_addr;
173 	u8 flag;
174 	u8 media_type;
175 	u8 mac_addr[ETH_ALEN];
176 	u8 autoneg;
177 	u8 duplex;
178 	u32 speed;
179 	int link;	/* store the link status of mac & phy (if phy exit)*/
180 	struct phy_device *phydev;
181 	struct mii_bus *mdio_bus;
182 	phy_interface_t phy_if;
183 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
184 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
185 };
186 
187 struct hclge_hw {
188 	void __iomem *io_base;
189 	struct hclge_mac mac;
190 	int num_vec;
191 	struct hclge_cmq cmq;
192 	struct hclge_caps caps;
193 	void *back;
194 };
195 
196 /* TQP stats */
197 struct hlcge_tqp_stats {
198 	/* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
199 	u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
200 	/* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
201 	u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
202 };
203 
204 struct hclge_tqp {
205 	struct device *dev;	/* Device for DMA mapping */
206 	struct hnae3_queue q;
207 	struct hlcge_tqp_stats tqp_stats;
208 	u16 index;	/* Global index in a NIC controller */
209 
210 	bool alloced;
211 };
212 
213 enum hclge_fc_mode {
214 	HCLGE_FC_NONE,
215 	HCLGE_FC_RX_PAUSE,
216 	HCLGE_FC_TX_PAUSE,
217 	HCLGE_FC_FULL,
218 	HCLGE_FC_PFC,
219 	HCLGE_FC_DEFAULT
220 };
221 
222 #define HCLGE_PG_NUM		4
223 #define HCLGE_SCH_MODE_SP	0
224 #define HCLGE_SCH_MODE_DWRR	1
225 struct hclge_pg_info {
226 	u8 pg_id;
227 	u8 pg_sch_mode;		/* 0: sp; 1: dwrr */
228 	u8 tc_bit_map;
229 	u32 bw_limit;
230 	u8 tc_dwrr[HNAE3_MAX_TC];
231 };
232 
233 struct hclge_tc_info {
234 	u8 tc_id;
235 	u8 tc_sch_mode;		/* 0: sp; 1: dwrr */
236 	u8 pgid;
237 	u32 bw_limit;
238 };
239 
240 struct hclge_cfg {
241 	u8 vmdq_vport_num;
242 	u8 tc_num;
243 	u16 tqp_desc_num;
244 	u16 rx_buf_len;
245 	u16 rss_size_max;
246 	u8 phy_addr;
247 	u8 media_type;
248 	u8 mac_addr[ETH_ALEN];
249 	u8 default_speed;
250 	u32 numa_node_map;
251 	u8 speed_ability;
252 };
253 
254 struct hclge_tm_info {
255 	u8 num_tc;
256 	u8 num_pg;      /* It must be 1 if vNET-Base schd */
257 	u8 pg_dwrr[HCLGE_PG_NUM];
258 	u8 prio_tc[HNAE3_MAX_USER_PRIO];
259 	struct hclge_pg_info pg_info[HCLGE_PG_NUM];
260 	struct hclge_tc_info tc_info[HNAE3_MAX_TC];
261 	enum hclge_fc_mode fc_mode;
262 	u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
263 };
264 
265 struct hclge_comm_stats_str {
266 	char desc[ETH_GSTRING_LEN];
267 	unsigned long offset;
268 };
269 
270 /* all 64bit stats, opcode id: 0x0030 */
271 struct hclge_64_bit_stats {
272 	/* query_igu_stat */
273 	u64 igu_rx_oversize_pkt;
274 	u64 igu_rx_undersize_pkt;
275 	u64 igu_rx_out_all_pkt;
276 	u64 igu_rx_uni_pkt;
277 	u64 igu_rx_multi_pkt;
278 	u64 igu_rx_broad_pkt;
279 	u64 rsv0;
280 
281 	/* query_egu_stat */
282 	u64 egu_tx_out_all_pkt;
283 	u64 egu_tx_uni_pkt;
284 	u64 egu_tx_multi_pkt;
285 	u64 egu_tx_broad_pkt;
286 
287 	/* ssu_ppp packet stats */
288 	u64 ssu_ppp_mac_key_num;
289 	u64 ssu_ppp_host_key_num;
290 	u64 ppp_ssu_mac_rlt_num;
291 	u64 ppp_ssu_host_rlt_num;
292 
293 	/* ssu_tx_in_out_dfx_stats */
294 	u64 ssu_tx_in_num;
295 	u64 ssu_tx_out_num;
296 	/* ssu_rx_in_out_dfx_stats */
297 	u64 ssu_rx_in_num;
298 	u64 ssu_rx_out_num;
299 };
300 
301 /* all 32bit stats, opcode id: 0x0031 */
302 struct hclge_32_bit_stats {
303 	u64 igu_rx_err_pkt;
304 	u64 igu_rx_no_eof_pkt;
305 	u64 igu_rx_no_sof_pkt;
306 	u64 egu_tx_1588_pkt;
307 	u64 egu_tx_err_pkt;
308 	u64 ssu_full_drop_num;
309 	u64 ssu_part_drop_num;
310 	u64 ppp_key_drop_num;
311 	u64 ppp_rlt_drop_num;
312 	u64 ssu_key_drop_num;
313 	u64 pkt_curr_buf_cnt;
314 	u64 qcn_fb_rcv_cnt;
315 	u64 qcn_fb_drop_cnt;
316 	u64 qcn_fb_invaild_cnt;
317 	u64 rsv0;
318 	u64 rx_packet_tc0_in_cnt;
319 	u64 rx_packet_tc1_in_cnt;
320 	u64 rx_packet_tc2_in_cnt;
321 	u64 rx_packet_tc3_in_cnt;
322 	u64 rx_packet_tc4_in_cnt;
323 	u64 rx_packet_tc5_in_cnt;
324 	u64 rx_packet_tc6_in_cnt;
325 	u64 rx_packet_tc7_in_cnt;
326 	u64 rx_packet_tc0_out_cnt;
327 	u64 rx_packet_tc1_out_cnt;
328 	u64 rx_packet_tc2_out_cnt;
329 	u64 rx_packet_tc3_out_cnt;
330 	u64 rx_packet_tc4_out_cnt;
331 	u64 rx_packet_tc5_out_cnt;
332 	u64 rx_packet_tc6_out_cnt;
333 	u64 rx_packet_tc7_out_cnt;
334 
335 	/* Tx packet level statistics */
336 	u64 tx_packet_tc0_in_cnt;
337 	u64 tx_packet_tc1_in_cnt;
338 	u64 tx_packet_tc2_in_cnt;
339 	u64 tx_packet_tc3_in_cnt;
340 	u64 tx_packet_tc4_in_cnt;
341 	u64 tx_packet_tc5_in_cnt;
342 	u64 tx_packet_tc6_in_cnt;
343 	u64 tx_packet_tc7_in_cnt;
344 	u64 tx_packet_tc0_out_cnt;
345 	u64 tx_packet_tc1_out_cnt;
346 	u64 tx_packet_tc2_out_cnt;
347 	u64 tx_packet_tc3_out_cnt;
348 	u64 tx_packet_tc4_out_cnt;
349 	u64 tx_packet_tc5_out_cnt;
350 	u64 tx_packet_tc6_out_cnt;
351 	u64 tx_packet_tc7_out_cnt;
352 
353 	/* packet buffer statistics */
354 	u64 pkt_curr_buf_tc0_cnt;
355 	u64 pkt_curr_buf_tc1_cnt;
356 	u64 pkt_curr_buf_tc2_cnt;
357 	u64 pkt_curr_buf_tc3_cnt;
358 	u64 pkt_curr_buf_tc4_cnt;
359 	u64 pkt_curr_buf_tc5_cnt;
360 	u64 pkt_curr_buf_tc6_cnt;
361 	u64 pkt_curr_buf_tc7_cnt;
362 
363 	u64 mb_uncopy_num;
364 	u64 lo_pri_unicast_rlt_drop_num;
365 	u64 hi_pri_multicast_rlt_drop_num;
366 	u64 lo_pri_multicast_rlt_drop_num;
367 	u64 rx_oq_drop_pkt_cnt;
368 	u64 tx_oq_drop_pkt_cnt;
369 	u64 nic_l2_err_drop_pkt_cnt;
370 	u64 roc_l2_err_drop_pkt_cnt;
371 };
372 
373 /* mac stats ,opcode id: 0x0032 */
374 struct hclge_mac_stats {
375 	u64 mac_tx_mac_pause_num;
376 	u64 mac_rx_mac_pause_num;
377 	u64 mac_tx_pfc_pri0_pkt_num;
378 	u64 mac_tx_pfc_pri1_pkt_num;
379 	u64 mac_tx_pfc_pri2_pkt_num;
380 	u64 mac_tx_pfc_pri3_pkt_num;
381 	u64 mac_tx_pfc_pri4_pkt_num;
382 	u64 mac_tx_pfc_pri5_pkt_num;
383 	u64 mac_tx_pfc_pri6_pkt_num;
384 	u64 mac_tx_pfc_pri7_pkt_num;
385 	u64 mac_rx_pfc_pri0_pkt_num;
386 	u64 mac_rx_pfc_pri1_pkt_num;
387 	u64 mac_rx_pfc_pri2_pkt_num;
388 	u64 mac_rx_pfc_pri3_pkt_num;
389 	u64 mac_rx_pfc_pri4_pkt_num;
390 	u64 mac_rx_pfc_pri5_pkt_num;
391 	u64 mac_rx_pfc_pri6_pkt_num;
392 	u64 mac_rx_pfc_pri7_pkt_num;
393 	u64 mac_tx_total_pkt_num;
394 	u64 mac_tx_total_oct_num;
395 	u64 mac_tx_good_pkt_num;
396 	u64 mac_tx_bad_pkt_num;
397 	u64 mac_tx_good_oct_num;
398 	u64 mac_tx_bad_oct_num;
399 	u64 mac_tx_uni_pkt_num;
400 	u64 mac_tx_multi_pkt_num;
401 	u64 mac_tx_broad_pkt_num;
402 	u64 mac_tx_undersize_pkt_num;
403 	u64 mac_tx_oversize_pkt_num;
404 	u64 mac_tx_64_oct_pkt_num;
405 	u64 mac_tx_65_127_oct_pkt_num;
406 	u64 mac_tx_128_255_oct_pkt_num;
407 	u64 mac_tx_256_511_oct_pkt_num;
408 	u64 mac_tx_512_1023_oct_pkt_num;
409 	u64 mac_tx_1024_1518_oct_pkt_num;
410 	u64 mac_tx_1519_2047_oct_pkt_num;
411 	u64 mac_tx_2048_4095_oct_pkt_num;
412 	u64 mac_tx_4096_8191_oct_pkt_num;
413 	u64 rsv0;
414 	u64 mac_tx_8192_9216_oct_pkt_num;
415 	u64 mac_tx_9217_12287_oct_pkt_num;
416 	u64 mac_tx_12288_16383_oct_pkt_num;
417 	u64 mac_tx_1519_max_good_oct_pkt_num;
418 	u64 mac_tx_1519_max_bad_oct_pkt_num;
419 
420 	u64 mac_rx_total_pkt_num;
421 	u64 mac_rx_total_oct_num;
422 	u64 mac_rx_good_pkt_num;
423 	u64 mac_rx_bad_pkt_num;
424 	u64 mac_rx_good_oct_num;
425 	u64 mac_rx_bad_oct_num;
426 	u64 mac_rx_uni_pkt_num;
427 	u64 mac_rx_multi_pkt_num;
428 	u64 mac_rx_broad_pkt_num;
429 	u64 mac_rx_undersize_pkt_num;
430 	u64 mac_rx_oversize_pkt_num;
431 	u64 mac_rx_64_oct_pkt_num;
432 	u64 mac_rx_65_127_oct_pkt_num;
433 	u64 mac_rx_128_255_oct_pkt_num;
434 	u64 mac_rx_256_511_oct_pkt_num;
435 	u64 mac_rx_512_1023_oct_pkt_num;
436 	u64 mac_rx_1024_1518_oct_pkt_num;
437 	u64 mac_rx_1519_2047_oct_pkt_num;
438 	u64 mac_rx_2048_4095_oct_pkt_num;
439 	u64 mac_rx_4096_8191_oct_pkt_num;
440 	u64 rsv1;
441 	u64 mac_rx_8192_9216_oct_pkt_num;
442 	u64 mac_rx_9217_12287_oct_pkt_num;
443 	u64 mac_rx_12288_16383_oct_pkt_num;
444 	u64 mac_rx_1519_max_good_oct_pkt_num;
445 	u64 mac_rx_1519_max_bad_oct_pkt_num;
446 
447 	u64 mac_tx_fragment_pkt_num;
448 	u64 mac_tx_undermin_pkt_num;
449 	u64 mac_tx_jabber_pkt_num;
450 	u64 mac_tx_err_all_pkt_num;
451 	u64 mac_tx_from_app_good_pkt_num;
452 	u64 mac_tx_from_app_bad_pkt_num;
453 	u64 mac_rx_fragment_pkt_num;
454 	u64 mac_rx_undermin_pkt_num;
455 	u64 mac_rx_jabber_pkt_num;
456 	u64 mac_rx_fcs_err_pkt_num;
457 	u64 mac_rx_send_app_good_pkt_num;
458 	u64 mac_rx_send_app_bad_pkt_num;
459 };
460 
461 #define HCLGE_STATS_TIMER_INTERVAL	(60 * 5)
462 struct hclge_hw_stats {
463 	struct hclge_mac_stats      mac_stats;
464 	struct hclge_64_bit_stats   all_64_bit_stats;
465 	struct hclge_32_bit_stats   all_32_bit_stats;
466 	u32 stats_timer;
467 };
468 
469 struct hclge_vlan_type_cfg {
470 	u16 rx_ot_fst_vlan_type;
471 	u16 rx_ot_sec_vlan_type;
472 	u16 rx_in_fst_vlan_type;
473 	u16 rx_in_sec_vlan_type;
474 	u16 tx_ot_vlan_type;
475 	u16 tx_in_vlan_type;
476 };
477 
478 #define HCLGE_VPORT_NUM 256
479 struct hclge_dev {
480 	struct pci_dev *pdev;
481 	struct hnae3_ae_dev *ae_dev;
482 	struct hclge_hw hw;
483 	struct hclge_misc_vector misc_vector;
484 	struct hclge_hw_stats hw_stats;
485 	unsigned long state;
486 
487 	enum hnae3_reset_type reset_type;
488 	unsigned long reset_request;	/* reset has been requested */
489 	unsigned long reset_pending;	/* client rst is pending to be served */
490 	u32 fw_version;
491 	u16 num_vmdq_vport;		/* Num vmdq vport this PF has set up */
492 	u16 num_tqps;			/* Num task queue pairs of this PF */
493 	u16 num_req_vfs;		/* Num VFs requested for this PF */
494 
495 	/* Base task tqp physical id of this PF */
496 	u16 base_tqp_pid;
497 	u16 alloc_rss_size;		/* Allocated RSS task queue */
498 	u16 rss_size_max;		/* HW defined max RSS task queue */
499 
500 	/* Num of guaranteed filters for this PF */
501 	u16 fdir_pf_filter_count;
502 	u16 num_alloc_vport;		/* Num vports this driver supports */
503 	u32 numa_node_mask;
504 	u16 rx_buf_len;
505 	u16 num_desc;
506 	u8 hw_tc_map;
507 	u8 tc_num_last_time;
508 	enum hclge_fc_mode fc_mode_last_time;
509 
510 #define HCLGE_FLAG_TC_BASE_SCH_MODE		1
511 #define HCLGE_FLAG_VNET_BASE_SCH_MODE		2
512 	u8 tx_sch_mode;
513 	u8 tc_max;
514 	u8 pfc_max;
515 
516 	u8 default_up;
517 	u8 dcbx_cap;
518 	struct hclge_tm_info tm_info;
519 
520 	u16 num_msi;
521 	u16 num_msi_left;
522 	u16 num_msi_used;
523 	u32 base_msi_vector;
524 	u16 *vector_status;
525 	int *vector_irq;
526 	u16 num_roce_msi;	/* Num of roce vectors for this PF */
527 	int roce_base_vector;
528 
529 	u16 pending_udp_bitmap;
530 
531 	u16 rx_itr_default;
532 	u16 tx_itr_default;
533 
534 	u16 adminq_work_limit; /* Num of admin receive queue desc to process */
535 	unsigned long service_timer_period;
536 	unsigned long service_timer_previous;
537 	struct timer_list service_timer;
538 	struct work_struct service_task;
539 	struct work_struct rst_service_task;
540 	struct work_struct mbx_service_task;
541 
542 	bool cur_promisc;
543 	int num_alloc_vfs;	/* Actual number of VFs allocated */
544 
545 	struct hclge_tqp *htqp;
546 	struct hclge_vport *vport;
547 
548 	struct dentry *hclge_dbgfs;
549 
550 	struct hnae3_client *nic_client;
551 	struct hnae3_client *roce_client;
552 
553 #define HCLGE_FLAG_MAIN			BIT(0)
554 #define HCLGE_FLAG_DCB_CAPABLE		BIT(1)
555 #define HCLGE_FLAG_DCB_ENABLE		BIT(2)
556 #define HCLGE_FLAG_MQPRIO_ENABLE	BIT(3)
557 	u32 flag;
558 
559 	u32 pkt_buf_size; /* Total pf buf size for tx/rx */
560 	u32 mps; /* Max packet size */
561 
562 	enum hclge_mta_dmac_sel_type mta_mac_sel_type;
563 	bool enable_mta; /* Mutilcast filter enable */
564 
565 	struct hclge_vlan_type_cfg vlan_type_cfg;
566 
567 	unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
568 };
569 
570 /* VPort level vlan tag configuration for TX direction */
571 struct hclge_tx_vtag_cfg {
572 	bool accept_tag1;	/* Whether accept tag1 packet from host */
573 	bool accept_untag1;	/* Whether accept untag1 packet from host */
574 	bool accept_tag2;
575 	bool accept_untag2;
576 	bool insert_tag1_en;	/* Whether insert inner vlan tag */
577 	bool insert_tag2_en;	/* Whether insert outer vlan tag */
578 	u16  default_tag1;	/* The default inner vlan tag to insert */
579 	u16  default_tag2;	/* The default outer vlan tag to insert */
580 };
581 
582 /* VPort level vlan tag configuration for RX direction */
583 struct hclge_rx_vtag_cfg {
584 	bool strip_tag1_en;	/* Whether strip inner vlan tag */
585 	bool strip_tag2_en;	/* Whether strip outer vlan tag */
586 	bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
587 	bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
588 };
589 
590 struct hclge_rss_tuple_cfg {
591 	u8 ipv4_tcp_en;
592 	u8 ipv4_udp_en;
593 	u8 ipv4_sctp_en;
594 	u8 ipv4_fragment_en;
595 	u8 ipv6_tcp_en;
596 	u8 ipv6_udp_en;
597 	u8 ipv6_sctp_en;
598 	u8 ipv6_fragment_en;
599 };
600 
601 struct hclge_vport {
602 	u16 alloc_tqps;	/* Allocated Tx/Rx queues */
603 
604 	u8  rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
605 	/* User configured lookup table entries */
606 	u8  rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
607 	int rss_algo;		/* User configured hash algorithm */
608 	/* User configured rss tuple sets */
609 	struct hclge_rss_tuple_cfg rss_tuple_sets;
610 
611 	u16 alloc_rss_size;
612 
613 	u16 qs_offset;
614 	u16 bw_limit;		/* VSI BW Limit (0 = disabled) */
615 	u8  dwrr;
616 
617 	struct hclge_tx_vtag_cfg  txvlan_cfg;
618 	struct hclge_rx_vtag_cfg  rxvlan_cfg;
619 
620 	int vport_id;
621 	struct hclge_dev *back;  /* Back reference to associated dev */
622 	struct hnae3_handle nic;
623 	struct hnae3_handle roce;
624 
625 	bool accept_mta_mc; /* whether to accept mta filter multicast */
626 	unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
627 };
628 
629 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
630 			      bool en_mc, bool en_bc, int vport_id);
631 
632 int hclge_add_uc_addr_common(struct hclge_vport *vport,
633 			     const unsigned char *addr);
634 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
635 			    const unsigned char *addr);
636 int hclge_add_mc_addr_common(struct hclge_vport *vport,
637 			     const unsigned char *addr);
638 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
639 			    const unsigned char *addr);
640 
641 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
642 			      u8 func_id,
643 			      bool enable);
644 int hclge_update_mta_status_common(struct hclge_vport *vport,
645 				   unsigned long *status,
646 				   u16 idx,
647 				   u16 count,
648 				   bool update_filter);
649 
650 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
651 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
652 				int vector_id, bool en,
653 				struct hnae3_ring_chain_node *ring_chain);
654 
655 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
656 {
657 	struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
658 
659 	return tqp->index;
660 }
661 
662 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
663 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
664 			  u16 vlan_id, bool is_kill);
665 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
666 
667 int hclge_buffer_alloc(struct hclge_dev *hdev);
668 int hclge_rss_init_hw(struct hclge_dev *hdev);
669 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
670 
671 void hclge_mbx_handler(struct hclge_dev *hdev);
672 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
673 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
674 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
675 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
676 #endif
677