1 /* SPDX-License-Identifier: GPL-2.0+ */
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #ifndef __HCLGE_MAIN_H
5 #define __HCLGE_MAIN_H
6 #include <linux/fs.h>
7 #include <linux/types.h>
8 #include <linux/phy.h>
9 #include <linux/if_vlan.h>
10 #include <linux/kfifo.h>
11 
12 #include "hclge_cmd.h"
13 #include "hnae3.h"
14 
15 #define HCLGE_MOD_VERSION "1.0"
16 #define HCLGE_DRIVER_NAME "hclge"
17 
18 #define HCLGE_MAX_PF_NUM		8
19 
20 #define HCLGE_RD_FIRST_STATS_NUM        2
21 #define HCLGE_RD_OTHER_STATS_NUM        4
22 
23 #define HCLGE_INVALID_VPORT 0xffff
24 
25 #define HCLGE_PF_CFG_BLOCK_SIZE		32
26 #define HCLGE_PF_CFG_DESC_NUM \
27 	(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
28 
29 #define HCLGE_VECTOR_REG_BASE		0x20000
30 #define HCLGE_MISC_VECTOR_REG_BASE	0x20400
31 
32 #define HCLGE_VECTOR_REG_OFFSET		0x4
33 #define HCLGE_VECTOR_VF_OFFSET		0x100000
34 
35 #define HCLGE_CMDQ_TX_ADDR_L_REG	0x27000
36 #define HCLGE_CMDQ_TX_ADDR_H_REG	0x27004
37 #define HCLGE_CMDQ_TX_DEPTH_REG		0x27008
38 #define HCLGE_CMDQ_TX_TAIL_REG		0x27010
39 #define HCLGE_CMDQ_TX_HEAD_REG		0x27014
40 #define HCLGE_CMDQ_RX_ADDR_L_REG	0x27018
41 #define HCLGE_CMDQ_RX_ADDR_H_REG	0x2701C
42 #define HCLGE_CMDQ_RX_DEPTH_REG		0x27020
43 #define HCLGE_CMDQ_RX_TAIL_REG		0x27024
44 #define HCLGE_CMDQ_RX_HEAD_REG		0x27028
45 #define HCLGE_CMDQ_INTR_SRC_REG		0x27100
46 #define HCLGE_CMDQ_INTR_STS_REG		0x27104
47 #define HCLGE_CMDQ_INTR_EN_REG		0x27108
48 #define HCLGE_CMDQ_INTR_GEN_REG		0x2710C
49 
50 /* bar registers for common func */
51 #define HCLGE_VECTOR0_OTER_EN_REG	0x20600
52 #define HCLGE_RAS_OTHER_STS_REG		0x20B00
53 #define HCLGE_FUNC_RESET_STS_REG	0x20C00
54 #define HCLGE_GRO_EN_REG		0x28000
55 
56 /* bar registers for rcb */
57 #define HCLGE_RING_RX_ADDR_L_REG	0x80000
58 #define HCLGE_RING_RX_ADDR_H_REG	0x80004
59 #define HCLGE_RING_RX_BD_NUM_REG	0x80008
60 #define HCLGE_RING_RX_BD_LENGTH_REG	0x8000C
61 #define HCLGE_RING_RX_MERGE_EN_REG	0x80014
62 #define HCLGE_RING_RX_TAIL_REG		0x80018
63 #define HCLGE_RING_RX_HEAD_REG		0x8001C
64 #define HCLGE_RING_RX_FBD_NUM_REG	0x80020
65 #define HCLGE_RING_RX_OFFSET_REG	0x80024
66 #define HCLGE_RING_RX_FBD_OFFSET_REG	0x80028
67 #define HCLGE_RING_RX_STASH_REG		0x80030
68 #define HCLGE_RING_RX_BD_ERR_REG	0x80034
69 #define HCLGE_RING_TX_ADDR_L_REG	0x80040
70 #define HCLGE_RING_TX_ADDR_H_REG	0x80044
71 #define HCLGE_RING_TX_BD_NUM_REG	0x80048
72 #define HCLGE_RING_TX_PRIORITY_REG	0x8004C
73 #define HCLGE_RING_TX_TC_REG		0x80050
74 #define HCLGE_RING_TX_MERGE_EN_REG	0x80054
75 #define HCLGE_RING_TX_TAIL_REG		0x80058
76 #define HCLGE_RING_TX_HEAD_REG		0x8005C
77 #define HCLGE_RING_TX_FBD_NUM_REG	0x80060
78 #define HCLGE_RING_TX_OFFSET_REG	0x80064
79 #define HCLGE_RING_TX_EBD_NUM_REG	0x80068
80 #define HCLGE_RING_TX_EBD_OFFSET_REG	0x80070
81 #define HCLGE_RING_TX_BD_ERR_REG	0x80074
82 #define HCLGE_RING_EN_REG		0x80090
83 
84 /* bar registers for tqp interrupt */
85 #define HCLGE_TQP_INTR_CTRL_REG		0x20000
86 #define HCLGE_TQP_INTR_GL0_REG		0x20100
87 #define HCLGE_TQP_INTR_GL1_REG		0x20200
88 #define HCLGE_TQP_INTR_GL2_REG		0x20300
89 #define HCLGE_TQP_INTR_RL_REG		0x20900
90 
91 #define HCLGE_RSS_IND_TBL_SIZE		512
92 #define HCLGE_RSS_SET_BITMAP_MSK	GENMASK(15, 0)
93 #define HCLGE_RSS_KEY_SIZE		40
94 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ	0
95 #define HCLGE_RSS_HASH_ALGO_SIMPLE	1
96 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC	2
97 #define HCLGE_RSS_HASH_ALGO_MASK	GENMASK(3, 0)
98 #define HCLGE_RSS_CFG_TBL_NUM \
99 	(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
100 
101 #define HCLGE_RSS_INPUT_TUPLE_OTHER	GENMASK(3, 0)
102 #define HCLGE_RSS_INPUT_TUPLE_SCTP	GENMASK(4, 0)
103 #define HCLGE_D_PORT_BIT		BIT(0)
104 #define HCLGE_S_PORT_BIT		BIT(1)
105 #define HCLGE_D_IP_BIT			BIT(2)
106 #define HCLGE_S_IP_BIT			BIT(3)
107 #define HCLGE_V_TAG_BIT			BIT(4)
108 
109 #define HCLGE_RSS_TC_SIZE_0		1
110 #define HCLGE_RSS_TC_SIZE_1		2
111 #define HCLGE_RSS_TC_SIZE_2		4
112 #define HCLGE_RSS_TC_SIZE_3		8
113 #define HCLGE_RSS_TC_SIZE_4		16
114 #define HCLGE_RSS_TC_SIZE_5		32
115 #define HCLGE_RSS_TC_SIZE_6		64
116 #define HCLGE_RSS_TC_SIZE_7		128
117 
118 #define HCLGE_UMV_TBL_SIZE		3072
119 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
120 	(HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
121 
122 #define HCLGE_TQP_RESET_TRY_TIMES	200
123 
124 #define HCLGE_PHY_PAGE_MDIX		0
125 #define HCLGE_PHY_PAGE_COPPER		0
126 
127 /* Page Selection Reg. */
128 #define HCLGE_PHY_PAGE_REG		22
129 
130 /* Copper Specific Control Register */
131 #define HCLGE_PHY_CSC_REG		16
132 
133 /* Copper Specific Status Register */
134 #define HCLGE_PHY_CSS_REG		17
135 
136 #define HCLGE_PHY_MDIX_CTRL_S		5
137 #define HCLGE_PHY_MDIX_CTRL_M		GENMASK(6, 5)
138 
139 #define HCLGE_PHY_MDIX_STATUS_B		6
140 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B	11
141 
142 #define HCLGE_GET_DFX_REG_TYPE_CNT	4
143 
144 /* Factor used to calculate offset and bitmap of VF num */
145 #define HCLGE_VF_NUM_PER_CMD           64
146 
147 enum HLCGE_PORT_TYPE {
148 	HOST_PORT,
149 	NETWORK_PORT
150 };
151 
152 #define PF_VPORT_ID			0
153 
154 #define HCLGE_PF_ID_S			0
155 #define HCLGE_PF_ID_M			GENMASK(2, 0)
156 #define HCLGE_VF_ID_S			3
157 #define HCLGE_VF_ID_M			GENMASK(10, 3)
158 #define HCLGE_PORT_TYPE_B		11
159 #define HCLGE_NETWORK_PORT_ID_S		0
160 #define HCLGE_NETWORK_PORT_ID_M		GENMASK(3, 0)
161 
162 /* Reset related Registers */
163 #define HCLGE_PF_OTHER_INT_REG		0x20600
164 #define HCLGE_MISC_RESET_STS_REG	0x20700
165 #define HCLGE_MISC_VECTOR_INT_STS	0x20800
166 #define HCLGE_GLOBAL_RESET_REG		0x20A00
167 #define HCLGE_GLOBAL_RESET_BIT		0
168 #define HCLGE_CORE_RESET_BIT		1
169 #define HCLGE_IMP_RESET_BIT		2
170 #define HCLGE_RESET_INT_M		GENMASK(7, 5)
171 #define HCLGE_FUN_RST_ING		0x20C00
172 #define HCLGE_FUN_RST_ING_B		0
173 
174 /* Vector0 register bits define */
175 #define HCLGE_VECTOR0_GLOBALRESET_INT_B	5
176 #define HCLGE_VECTOR0_CORERESET_INT_B	6
177 #define HCLGE_VECTOR0_IMPRESET_INT_B	7
178 
179 /* Vector0 interrupt CMDQ event source register(RW) */
180 #define HCLGE_VECTOR0_CMDQ_SRC_REG	0x27100
181 /* CMDQ register bits for RX event(=MBX event) */
182 #define HCLGE_VECTOR0_RX_CMDQ_INT_B	1
183 
184 #define HCLGE_VECTOR0_IMP_RESET_INT_B	1
185 #define HCLGE_VECTOR0_IMP_CMDQ_ERR_B	4U
186 #define HCLGE_VECTOR0_IMP_RD_POISON_B	5U
187 
188 #define HCLGE_MAC_DEFAULT_FRAME \
189 	(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
190 #define HCLGE_MAC_MIN_FRAME		64
191 #define HCLGE_MAC_MAX_FRAME		9728
192 
193 #define HCLGE_SUPPORT_1G_BIT		BIT(0)
194 #define HCLGE_SUPPORT_10G_BIT		BIT(1)
195 #define HCLGE_SUPPORT_25G_BIT		BIT(2)
196 #define HCLGE_SUPPORT_50G_BIT		BIT(3)
197 #define HCLGE_SUPPORT_100G_BIT		BIT(4)
198 /* to be compatible with exsit board */
199 #define HCLGE_SUPPORT_40G_BIT		BIT(5)
200 #define HCLGE_SUPPORT_100M_BIT		BIT(6)
201 #define HCLGE_SUPPORT_10M_BIT		BIT(7)
202 #define HCLGE_SUPPORT_GE \
203 	(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
204 
205 enum HCLGE_DEV_STATE {
206 	HCLGE_STATE_REINITING,
207 	HCLGE_STATE_DOWN,
208 	HCLGE_STATE_DISABLED,
209 	HCLGE_STATE_REMOVING,
210 	HCLGE_STATE_NIC_REGISTERED,
211 	HCLGE_STATE_ROCE_REGISTERED,
212 	HCLGE_STATE_SERVICE_INITED,
213 	HCLGE_STATE_RST_SERVICE_SCHED,
214 	HCLGE_STATE_RST_HANDLING,
215 	HCLGE_STATE_MBX_SERVICE_SCHED,
216 	HCLGE_STATE_MBX_HANDLING,
217 	HCLGE_STATE_STATISTICS_UPDATING,
218 	HCLGE_STATE_CMD_DISABLE,
219 	HCLGE_STATE_LINK_UPDATING,
220 	HCLGE_STATE_RST_FAIL,
221 	HCLGE_STATE_MAX
222 };
223 
224 enum hclge_evt_cause {
225 	HCLGE_VECTOR0_EVENT_RST,
226 	HCLGE_VECTOR0_EVENT_MBX,
227 	HCLGE_VECTOR0_EVENT_ERR,
228 	HCLGE_VECTOR0_EVENT_OTHER,
229 };
230 
231 enum HCLGE_MAC_SPEED {
232 	HCLGE_MAC_SPEED_UNKNOWN = 0,		/* unknown */
233 	HCLGE_MAC_SPEED_10M	= 10,		/* 10 Mbps */
234 	HCLGE_MAC_SPEED_100M	= 100,		/* 100 Mbps */
235 	HCLGE_MAC_SPEED_1G	= 1000,		/* 1000 Mbps   = 1 Gbps */
236 	HCLGE_MAC_SPEED_10G	= 10000,	/* 10000 Mbps  = 10 Gbps */
237 	HCLGE_MAC_SPEED_25G	= 25000,	/* 25000 Mbps  = 25 Gbps */
238 	HCLGE_MAC_SPEED_40G	= 40000,	/* 40000 Mbps  = 40 Gbps */
239 	HCLGE_MAC_SPEED_50G	= 50000,	/* 50000 Mbps  = 50 Gbps */
240 	HCLGE_MAC_SPEED_100G	= 100000	/* 100000 Mbps = 100 Gbps */
241 };
242 
243 enum HCLGE_MAC_DUPLEX {
244 	HCLGE_MAC_HALF,
245 	HCLGE_MAC_FULL
246 };
247 
248 #define QUERY_SFP_SPEED		0
249 #define QUERY_ACTIVE_SPEED	1
250 
251 struct hclge_mac {
252 	u8 mac_id;
253 	u8 phy_addr;
254 	u8 flag;
255 	u8 media_type;	/* port media type, e.g. fibre/copper/backplane */
256 	u8 mac_addr[ETH_ALEN];
257 	u8 autoneg;
258 	u8 duplex;
259 	u8 support_autoneg;
260 	u8 speed_type;	/* 0: sfp speed, 1: active speed */
261 	u32 speed;
262 	u32 max_speed;
263 	u32 speed_ability; /* speed ability supported by current media */
264 	u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
265 	u32 fec_mode; /* active fec mode */
266 	u32 user_fec_mode;
267 	u32 fec_ability;
268 	int link;	/* store the link status of mac & phy (if phy exit) */
269 	struct phy_device *phydev;
270 	struct mii_bus *mdio_bus;
271 	phy_interface_t phy_if;
272 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
273 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
274 };
275 
276 struct hclge_hw {
277 	void __iomem *io_base;
278 	struct hclge_mac mac;
279 	int num_vec;
280 	struct hclge_cmq cmq;
281 };
282 
283 /* TQP stats */
284 struct hlcge_tqp_stats {
285 	/* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
286 	u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
287 	/* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
288 	u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
289 };
290 
291 struct hclge_tqp {
292 	/* copy of device pointer from pci_dev,
293 	 * used when perform DMA mapping
294 	 */
295 	struct device *dev;
296 	struct hnae3_queue q;
297 	struct hlcge_tqp_stats tqp_stats;
298 	u16 index;	/* Global index in a NIC controller */
299 
300 	bool alloced;
301 };
302 
303 enum hclge_fc_mode {
304 	HCLGE_FC_NONE,
305 	HCLGE_FC_RX_PAUSE,
306 	HCLGE_FC_TX_PAUSE,
307 	HCLGE_FC_FULL,
308 	HCLGE_FC_PFC,
309 	HCLGE_FC_DEFAULT
310 };
311 
312 enum hclge_link_fail_code {
313 	HCLGE_LF_NORMAL,
314 	HCLGE_LF_REF_CLOCK_LOST,
315 	HCLGE_LF_XSFP_TX_DISABLE,
316 	HCLGE_LF_XSFP_ABSENT,
317 };
318 
319 #define HCLGE_PG_NUM		4
320 #define HCLGE_SCH_MODE_SP	0
321 #define HCLGE_SCH_MODE_DWRR	1
322 struct hclge_pg_info {
323 	u8 pg_id;
324 	u8 pg_sch_mode;		/* 0: sp; 1: dwrr */
325 	u8 tc_bit_map;
326 	u32 bw_limit;
327 	u8 tc_dwrr[HNAE3_MAX_TC];
328 };
329 
330 struct hclge_tc_info {
331 	u8 tc_id;
332 	u8 tc_sch_mode;		/* 0: sp; 1: dwrr */
333 	u8 pgid;
334 	u32 bw_limit;
335 };
336 
337 struct hclge_cfg {
338 	u8 vmdq_vport_num;
339 	u8 tc_num;
340 	u16 tqp_desc_num;
341 	u16 rx_buf_len;
342 	u16 rss_size_max;
343 	u8 phy_addr;
344 	u8 media_type;
345 	u8 mac_addr[ETH_ALEN];
346 	u8 default_speed;
347 	u32 numa_node_map;
348 	u8 speed_ability;
349 	u16 umv_space;
350 };
351 
352 struct hclge_tm_info {
353 	u8 num_tc;
354 	u8 num_pg;      /* It must be 1 if vNET-Base schd */
355 	u8 pg_dwrr[HCLGE_PG_NUM];
356 	u8 prio_tc[HNAE3_MAX_USER_PRIO];
357 	struct hclge_pg_info pg_info[HCLGE_PG_NUM];
358 	struct hclge_tc_info tc_info[HNAE3_MAX_TC];
359 	enum hclge_fc_mode fc_mode;
360 	u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
361 	u8 pfc_en;	/* PFC enabled or not for user priority */
362 };
363 
364 struct hclge_comm_stats_str {
365 	char desc[ETH_GSTRING_LEN];
366 	unsigned long offset;
367 };
368 
369 /* mac stats ,opcode id: 0x0032 */
370 struct hclge_mac_stats {
371 	u64 mac_tx_mac_pause_num;
372 	u64 mac_rx_mac_pause_num;
373 	u64 mac_tx_pfc_pri0_pkt_num;
374 	u64 mac_tx_pfc_pri1_pkt_num;
375 	u64 mac_tx_pfc_pri2_pkt_num;
376 	u64 mac_tx_pfc_pri3_pkt_num;
377 	u64 mac_tx_pfc_pri4_pkt_num;
378 	u64 mac_tx_pfc_pri5_pkt_num;
379 	u64 mac_tx_pfc_pri6_pkt_num;
380 	u64 mac_tx_pfc_pri7_pkt_num;
381 	u64 mac_rx_pfc_pri0_pkt_num;
382 	u64 mac_rx_pfc_pri1_pkt_num;
383 	u64 mac_rx_pfc_pri2_pkt_num;
384 	u64 mac_rx_pfc_pri3_pkt_num;
385 	u64 mac_rx_pfc_pri4_pkt_num;
386 	u64 mac_rx_pfc_pri5_pkt_num;
387 	u64 mac_rx_pfc_pri6_pkt_num;
388 	u64 mac_rx_pfc_pri7_pkt_num;
389 	u64 mac_tx_total_pkt_num;
390 	u64 mac_tx_total_oct_num;
391 	u64 mac_tx_good_pkt_num;
392 	u64 mac_tx_bad_pkt_num;
393 	u64 mac_tx_good_oct_num;
394 	u64 mac_tx_bad_oct_num;
395 	u64 mac_tx_uni_pkt_num;
396 	u64 mac_tx_multi_pkt_num;
397 	u64 mac_tx_broad_pkt_num;
398 	u64 mac_tx_undersize_pkt_num;
399 	u64 mac_tx_oversize_pkt_num;
400 	u64 mac_tx_64_oct_pkt_num;
401 	u64 mac_tx_65_127_oct_pkt_num;
402 	u64 mac_tx_128_255_oct_pkt_num;
403 	u64 mac_tx_256_511_oct_pkt_num;
404 	u64 mac_tx_512_1023_oct_pkt_num;
405 	u64 mac_tx_1024_1518_oct_pkt_num;
406 	u64 mac_tx_1519_2047_oct_pkt_num;
407 	u64 mac_tx_2048_4095_oct_pkt_num;
408 	u64 mac_tx_4096_8191_oct_pkt_num;
409 	u64 rsv0;
410 	u64 mac_tx_8192_9216_oct_pkt_num;
411 	u64 mac_tx_9217_12287_oct_pkt_num;
412 	u64 mac_tx_12288_16383_oct_pkt_num;
413 	u64 mac_tx_1519_max_good_oct_pkt_num;
414 	u64 mac_tx_1519_max_bad_oct_pkt_num;
415 
416 	u64 mac_rx_total_pkt_num;
417 	u64 mac_rx_total_oct_num;
418 	u64 mac_rx_good_pkt_num;
419 	u64 mac_rx_bad_pkt_num;
420 	u64 mac_rx_good_oct_num;
421 	u64 mac_rx_bad_oct_num;
422 	u64 mac_rx_uni_pkt_num;
423 	u64 mac_rx_multi_pkt_num;
424 	u64 mac_rx_broad_pkt_num;
425 	u64 mac_rx_undersize_pkt_num;
426 	u64 mac_rx_oversize_pkt_num;
427 	u64 mac_rx_64_oct_pkt_num;
428 	u64 mac_rx_65_127_oct_pkt_num;
429 	u64 mac_rx_128_255_oct_pkt_num;
430 	u64 mac_rx_256_511_oct_pkt_num;
431 	u64 mac_rx_512_1023_oct_pkt_num;
432 	u64 mac_rx_1024_1518_oct_pkt_num;
433 	u64 mac_rx_1519_2047_oct_pkt_num;
434 	u64 mac_rx_2048_4095_oct_pkt_num;
435 	u64 mac_rx_4096_8191_oct_pkt_num;
436 	u64 rsv1;
437 	u64 mac_rx_8192_9216_oct_pkt_num;
438 	u64 mac_rx_9217_12287_oct_pkt_num;
439 	u64 mac_rx_12288_16383_oct_pkt_num;
440 	u64 mac_rx_1519_max_good_oct_pkt_num;
441 	u64 mac_rx_1519_max_bad_oct_pkt_num;
442 
443 	u64 mac_tx_fragment_pkt_num;
444 	u64 mac_tx_undermin_pkt_num;
445 	u64 mac_tx_jabber_pkt_num;
446 	u64 mac_tx_err_all_pkt_num;
447 	u64 mac_tx_from_app_good_pkt_num;
448 	u64 mac_tx_from_app_bad_pkt_num;
449 	u64 mac_rx_fragment_pkt_num;
450 	u64 mac_rx_undermin_pkt_num;
451 	u64 mac_rx_jabber_pkt_num;
452 	u64 mac_rx_fcs_err_pkt_num;
453 	u64 mac_rx_send_app_good_pkt_num;
454 	u64 mac_rx_send_app_bad_pkt_num;
455 	u64 mac_tx_pfc_pause_pkt_num;
456 	u64 mac_rx_pfc_pause_pkt_num;
457 	u64 mac_tx_ctrl_pkt_num;
458 	u64 mac_rx_ctrl_pkt_num;
459 };
460 
461 #define HCLGE_STATS_TIMER_INTERVAL	300UL
462 
463 struct hclge_vlan_type_cfg {
464 	u16 rx_ot_fst_vlan_type;
465 	u16 rx_ot_sec_vlan_type;
466 	u16 rx_in_fst_vlan_type;
467 	u16 rx_in_sec_vlan_type;
468 	u16 tx_ot_vlan_type;
469 	u16 tx_in_vlan_type;
470 };
471 
472 enum HCLGE_FD_MODE {
473 	HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
474 	HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
475 	HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
476 	HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
477 };
478 
479 enum HCLGE_FD_KEY_TYPE {
480 	HCLGE_FD_KEY_BASE_ON_PTYPE,
481 	HCLGE_FD_KEY_BASE_ON_TUPLE,
482 };
483 
484 enum HCLGE_FD_STAGE {
485 	HCLGE_FD_STAGE_1,
486 	HCLGE_FD_STAGE_2,
487 	MAX_STAGE_NUM,
488 };
489 
490 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
491  * INNER_XXX indicate tuples in tunneled header of tunnel packet or
492  *           tuples of non-tunnel packet
493  */
494 enum HCLGE_FD_TUPLE {
495 	OUTER_DST_MAC,
496 	OUTER_SRC_MAC,
497 	OUTER_VLAN_TAG_FST,
498 	OUTER_VLAN_TAG_SEC,
499 	OUTER_ETH_TYPE,
500 	OUTER_L2_RSV,
501 	OUTER_IP_TOS,
502 	OUTER_IP_PROTO,
503 	OUTER_SRC_IP,
504 	OUTER_DST_IP,
505 	OUTER_L3_RSV,
506 	OUTER_SRC_PORT,
507 	OUTER_DST_PORT,
508 	OUTER_L4_RSV,
509 	OUTER_TUN_VNI,
510 	OUTER_TUN_FLOW_ID,
511 	INNER_DST_MAC,
512 	INNER_SRC_MAC,
513 	INNER_VLAN_TAG_FST,
514 	INNER_VLAN_TAG_SEC,
515 	INNER_ETH_TYPE,
516 	INNER_L2_RSV,
517 	INNER_IP_TOS,
518 	INNER_IP_PROTO,
519 	INNER_SRC_IP,
520 	INNER_DST_IP,
521 	INNER_L3_RSV,
522 	INNER_SRC_PORT,
523 	INNER_DST_PORT,
524 	INNER_L4_RSV,
525 	MAX_TUPLE,
526 };
527 
528 enum HCLGE_FD_META_DATA {
529 	PACKET_TYPE_ID,
530 	IP_FRAGEMENT,
531 	ROCE_TYPE,
532 	NEXT_KEY,
533 	VLAN_NUMBER,
534 	SRC_VPORT,
535 	DST_VPORT,
536 	TUNNEL_PACKET,
537 	MAX_META_DATA,
538 };
539 
540 struct key_info {
541 	u8 key_type;
542 	u8 key_length; /* use bit as unit */
543 };
544 
545 #define MAX_KEY_LENGTH	400
546 #define MAX_KEY_DWORDS	DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
547 #define MAX_KEY_BYTES	(MAX_KEY_DWORDS * 4)
548 #define MAX_META_DATA_LENGTH	32
549 
550 /* assigned by firmware, the real filter number for each pf may be less */
551 #define MAX_FD_FILTER_NUM	4096
552 #define HCLGE_ARFS_EXPIRE_INTERVAL	5UL
553 
554 enum HCLGE_FD_ACTIVE_RULE_TYPE {
555 	HCLGE_FD_RULE_NONE,
556 	HCLGE_FD_ARFS_ACTIVE,
557 	HCLGE_FD_EP_ACTIVE,
558 };
559 
560 enum HCLGE_FD_PACKET_TYPE {
561 	NIC_PACKET,
562 	ROCE_PACKET,
563 };
564 
565 enum HCLGE_FD_ACTION {
566 	HCLGE_FD_ACTION_ACCEPT_PACKET,
567 	HCLGE_FD_ACTION_DROP_PACKET,
568 };
569 
570 struct hclge_fd_key_cfg {
571 	u8 key_sel;
572 	u8 inner_sipv6_word_en;
573 	u8 inner_dipv6_word_en;
574 	u8 outer_sipv6_word_en;
575 	u8 outer_dipv6_word_en;
576 	u32 tuple_active;
577 	u32 meta_data_active;
578 };
579 
580 struct hclge_fd_cfg {
581 	u8 fd_mode;
582 	u16 max_key_length; /* use bit as unit */
583 	u32 proto_support;
584 	u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
585 	u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
586 	struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
587 };
588 
589 #define IPV4_INDEX	3
590 #define IPV6_SIZE	4
591 struct hclge_fd_rule_tuples {
592 	u8 src_mac[ETH_ALEN];
593 	u8 dst_mac[ETH_ALEN];
594 	/* Be compatible for ip address of both ipv4 and ipv6.
595 	 * For ipv4 address, we store it in src/dst_ip[3].
596 	 */
597 	u32 src_ip[IPV6_SIZE];
598 	u32 dst_ip[IPV6_SIZE];
599 	u16 src_port;
600 	u16 dst_port;
601 	u16 vlan_tag1;
602 	u16 ether_proto;
603 	u8 ip_tos;
604 	u8 ip_proto;
605 };
606 
607 struct hclge_fd_rule {
608 	struct hlist_node rule_node;
609 	struct hclge_fd_rule_tuples tuples;
610 	struct hclge_fd_rule_tuples tuples_mask;
611 	u32 unused_tuple;
612 	u32 flow_type;
613 	u8 action;
614 	u16 vf_id;
615 	u16 queue_id;
616 	u16 location;
617 	u16 flow_id;	/* only used for arfs */
618 	enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
619 };
620 
621 struct hclge_fd_ad_data {
622 	u16 ad_id;
623 	u8 drop_packet;
624 	u8 forward_to_direct_queue;
625 	u16 queue_id;
626 	u8 use_counter;
627 	u8 counter_id;
628 	u8 use_next_stage;
629 	u8 write_rule_id_to_bd;
630 	u8 next_input_key;
631 	u16 rule_id;
632 };
633 
634 struct hclge_vport_mac_addr_cfg {
635 	struct list_head node;
636 	int hd_tbl_status;
637 	u8 mac_addr[ETH_ALEN];
638 };
639 
640 enum HCLGE_MAC_ADDR_TYPE {
641 	HCLGE_MAC_ADDR_UC,
642 	HCLGE_MAC_ADDR_MC
643 };
644 
645 struct hclge_vport_vlan_cfg {
646 	struct list_head node;
647 	int hd_tbl_status;
648 	u16 vlan_id;
649 };
650 
651 struct hclge_rst_stats {
652 	u32 reset_done_cnt;	/* the number of reset has completed */
653 	u32 hw_reset_done_cnt;	/* the number of HW reset has completed */
654 	u32 pf_rst_cnt;		/* the number of PF reset */
655 	u32 flr_rst_cnt;	/* the number of FLR */
656 	u32 global_rst_cnt;	/* the number of GLOBAL */
657 	u32 imp_rst_cnt;	/* the number of IMP reset */
658 	u32 reset_cnt;		/* the number of reset */
659 	u32 reset_fail_cnt;	/* the number of reset fail */
660 };
661 
662 /* time and register status when mac tunnel interruption occur */
663 struct hclge_mac_tnl_stats {
664 	u64 time;
665 	u32 status;
666 };
667 
668 #define HCLGE_RESET_INTERVAL	(10 * HZ)
669 #define HCLGE_WAIT_RESET_DONE	100
670 
671 #pragma pack(1)
672 struct hclge_vf_vlan_cfg {
673 	u8 mbx_cmd;
674 	u8 subcode;
675 	u8 is_kill;
676 	u16 vlan;
677 	u16 proto;
678 };
679 
680 #pragma pack()
681 
682 /* For each bit of TCAM entry, it uses a pair of 'x' and
683  * 'y' to indicate which value to match, like below:
684  * ----------------------------------
685  * | bit x | bit y |  search value  |
686  * ----------------------------------
687  * |   0   |   0   |   always hit   |
688  * ----------------------------------
689  * |   1   |   0   |   match '0'    |
690  * ----------------------------------
691  * |   0   |   1   |   match '1'    |
692  * ----------------------------------
693  * |   1   |   1   |   invalid      |
694  * ----------------------------------
695  * Then for input key(k) and mask(v), we can calculate the value by
696  * the formulae:
697  *	x = (~k) & v
698  *	y = (k ^ ~v) & k
699  */
700 #define calc_x(x, k, v) ((x) = (~(k) & (v)))
701 #define calc_y(y, k, v) \
702 	do { \
703 		const typeof(k) _k_ = (k); \
704 		const typeof(v) _v_ = (v); \
705 		(y) = (_k_ ^ ~_v_) & (_k_); \
706 	} while (0)
707 
708 #define HCLGE_MAC_TNL_LOG_SIZE	8
709 #define HCLGE_VPORT_NUM 256
710 struct hclge_dev {
711 	struct pci_dev *pdev;
712 	struct hnae3_ae_dev *ae_dev;
713 	struct hclge_hw hw;
714 	struct hclge_misc_vector misc_vector;
715 	struct hclge_mac_stats mac_stats;
716 	unsigned long state;
717 	unsigned long flr_state;
718 	unsigned long last_reset_time;
719 
720 	enum hnae3_reset_type reset_type;
721 	enum hnae3_reset_type reset_level;
722 	unsigned long default_reset_request;
723 	unsigned long reset_request;	/* reset has been requested */
724 	unsigned long reset_pending;	/* client rst is pending to be served */
725 	struct hclge_rst_stats rst_stats;
726 	struct semaphore reset_sem;	/* protect reset process */
727 	u32 fw_version;
728 	u16 num_vmdq_vport;		/* Num vmdq vport this PF has set up */
729 	u16 num_tqps;			/* Num task queue pairs of this PF */
730 	u16 num_req_vfs;		/* Num VFs requested for this PF */
731 
732 	u16 base_tqp_pid;	/* Base task tqp physical id of this PF */
733 	u16 alloc_rss_size;		/* Allocated RSS task queue */
734 	u16 rss_size_max;		/* HW defined max RSS task queue */
735 
736 	u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
737 	u16 num_alloc_vport;		/* Num vports this driver supports */
738 	u32 numa_node_mask;
739 	u16 rx_buf_len;
740 	u16 num_tx_desc;		/* desc num of per tx queue */
741 	u16 num_rx_desc;		/* desc num of per rx queue */
742 	u8 hw_tc_map;
743 	u8 tc_num_last_time;
744 	enum hclge_fc_mode fc_mode_last_time;
745 	u8 support_sfp_query;
746 
747 #define HCLGE_FLAG_TC_BASE_SCH_MODE		1
748 #define HCLGE_FLAG_VNET_BASE_SCH_MODE		2
749 	u8 tx_sch_mode;
750 	u8 tc_max;
751 	u8 pfc_max;
752 
753 	u8 default_up;
754 	u8 dcbx_cap;
755 	struct hclge_tm_info tm_info;
756 
757 	u16 num_msi;
758 	u16 num_msi_left;
759 	u16 num_msi_used;
760 	u16 roce_base_msix_offset;
761 	u32 base_msi_vector;
762 	u16 *vector_status;
763 	int *vector_irq;
764 	u16 num_nic_msi;	/* Num of nic vectors for this PF */
765 	u16 num_roce_msi;	/* Num of roce vectors for this PF */
766 	int roce_base_vector;
767 
768 	u16 pending_udp_bitmap;
769 
770 	u16 rx_itr_default;
771 	u16 tx_itr_default;
772 
773 	u16 adminq_work_limit; /* Num of admin receive queue desc to process */
774 	unsigned long service_timer_period;
775 	unsigned long service_timer_previous;
776 	struct timer_list reset_timer;
777 	struct delayed_work service_task;
778 
779 	bool cur_promisc;
780 	int num_alloc_vfs;	/* Actual number of VFs allocated */
781 
782 	struct hclge_tqp *htqp;
783 	struct hclge_vport *vport;
784 
785 	struct dentry *hclge_dbgfs;
786 
787 	struct hnae3_client *nic_client;
788 	struct hnae3_client *roce_client;
789 
790 #define HCLGE_FLAG_MAIN			BIT(0)
791 #define HCLGE_FLAG_DCB_CAPABLE		BIT(1)
792 #define HCLGE_FLAG_DCB_ENABLE		BIT(2)
793 #define HCLGE_FLAG_MQPRIO_ENABLE	BIT(3)
794 	u32 flag;
795 
796 	u32 pkt_buf_size; /* Total pf buf size for tx/rx */
797 	u32 tx_buf_size; /* Tx buffer size for each TC */
798 	u32 dv_buf_size; /* Dv buffer size for each TC */
799 
800 	u32 mps; /* Max packet size */
801 	/* vport_lock protect resource shared by vports */
802 	struct mutex vport_lock;
803 
804 	struct hclge_vlan_type_cfg vlan_type_cfg;
805 
806 	unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
807 	unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
808 
809 	struct hclge_fd_cfg fd_cfg;
810 	struct hlist_head fd_rule_list;
811 	spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
812 	u16 hclge_fd_rule_num;
813 	unsigned long serv_processed_cnt;
814 	unsigned long last_serv_processed;
815 	unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
816 	enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
817 	u8 fd_en;
818 
819 	u16 wanted_umv_size;
820 	/* max available unicast mac vlan space */
821 	u16 max_umv_size;
822 	/* private unicast mac vlan space, it's same for PF and its VFs */
823 	u16 priv_umv_size;
824 	/* unicast mac vlan space shared by PF and its VFs */
825 	u16 share_umv_size;
826 	struct mutex umv_mutex; /* protect share_umv_size */
827 
828 	DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
829 		      HCLGE_MAC_TNL_LOG_SIZE);
830 
831 	/* affinity mask and notify for misc interrupt */
832 	cpumask_t affinity_mask;
833 	struct irq_affinity_notify affinity_notify;
834 };
835 
836 /* VPort level vlan tag configuration for TX direction */
837 struct hclge_tx_vtag_cfg {
838 	bool accept_tag1;	/* Whether accept tag1 packet from host */
839 	bool accept_untag1;	/* Whether accept untag1 packet from host */
840 	bool accept_tag2;
841 	bool accept_untag2;
842 	bool insert_tag1_en;	/* Whether insert inner vlan tag */
843 	bool insert_tag2_en;	/* Whether insert outer vlan tag */
844 	u16  default_tag1;	/* The default inner vlan tag to insert */
845 	u16  default_tag2;	/* The default outer vlan tag to insert */
846 };
847 
848 /* VPort level vlan tag configuration for RX direction */
849 struct hclge_rx_vtag_cfg {
850 	u8 rx_vlan_offload_en;	/* Whether enable rx vlan offload */
851 	u8 strip_tag1_en;	/* Whether strip inner vlan tag */
852 	u8 strip_tag2_en;	/* Whether strip outer vlan tag */
853 	u8 vlan1_vlan_prionly;	/* Inner VLAN Tag up to descriptor Enable */
854 	u8 vlan2_vlan_prionly;	/* Outer VLAN Tag up to descriptor Enable */
855 };
856 
857 struct hclge_rss_tuple_cfg {
858 	u8 ipv4_tcp_en;
859 	u8 ipv4_udp_en;
860 	u8 ipv4_sctp_en;
861 	u8 ipv4_fragment_en;
862 	u8 ipv6_tcp_en;
863 	u8 ipv6_udp_en;
864 	u8 ipv6_sctp_en;
865 	u8 ipv6_fragment_en;
866 };
867 
868 enum HCLGE_VPORT_STATE {
869 	HCLGE_VPORT_STATE_ALIVE,
870 	HCLGE_VPORT_STATE_MAX
871 };
872 
873 struct hclge_vlan_info {
874 	u16 vlan_proto; /* so far support 802.1Q only */
875 	u16 qos;
876 	u16 vlan_tag;
877 };
878 
879 struct hclge_port_base_vlan_config {
880 	u16 state;
881 	struct hclge_vlan_info vlan_info;
882 };
883 
884 struct hclge_vf_info {
885 	int link_state;
886 	u8 mac[ETH_ALEN];
887 	u32 spoofchk;
888 	u32 max_tx_rate;
889 	u32 trusted;
890 	u16 promisc_enable;
891 };
892 
893 struct hclge_vport {
894 	u16 alloc_tqps;	/* Allocated Tx/Rx queues */
895 
896 	u8  rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
897 	/* User configured lookup table entries */
898 	u8  rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
899 	int rss_algo;		/* User configured hash algorithm */
900 	/* User configured rss tuple sets */
901 	struct hclge_rss_tuple_cfg rss_tuple_sets;
902 
903 	u16 alloc_rss_size;
904 
905 	u16 qs_offset;
906 	u32 bw_limit;		/* VSI BW Limit (0 = disabled) */
907 	u8  dwrr;
908 
909 	unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
910 	struct hclge_port_base_vlan_config port_base_vlan_cfg;
911 	struct hclge_tx_vtag_cfg  txvlan_cfg;
912 	struct hclge_rx_vtag_cfg  rxvlan_cfg;
913 
914 	u16 used_umv_num;
915 
916 	u16 vport_id;
917 	struct hclge_dev *back;  /* Back reference to associated dev */
918 	struct hnae3_handle nic;
919 	struct hnae3_handle roce;
920 
921 	unsigned long state;
922 	unsigned long last_active_jiffies;
923 	u32 mps; /* Max packet size */
924 	struct hclge_vf_info vf_info;
925 
926 	struct list_head uc_mac_list;   /* Store VF unicast table */
927 	struct list_head mc_mac_list;   /* Store VF multicast table */
928 	struct list_head vlan_list;     /* Store VF vlan table */
929 };
930 
931 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
932 				 bool en_mc_pmc, bool en_bc_pmc);
933 int hclge_add_uc_addr_common(struct hclge_vport *vport,
934 			     const unsigned char *addr);
935 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
936 			    const unsigned char *addr);
937 int hclge_add_mc_addr_common(struct hclge_vport *vport,
938 			     const unsigned char *addr);
939 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
940 			    const unsigned char *addr);
941 
942 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
943 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
944 				int vector_id, bool en,
945 				struct hnae3_ring_chain_node *ring_chain);
946 
947 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
948 {
949 	struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
950 
951 	return tqp->index;
952 }
953 
954 static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
955 {
956 	return !!hdev->reset_pending;
957 }
958 
959 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
960 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
961 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
962 			  u16 vlan_id, bool is_kill);
963 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
964 
965 int hclge_buffer_alloc(struct hclge_dev *hdev);
966 int hclge_rss_init_hw(struct hclge_dev *hdev);
967 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
968 
969 void hclge_mbx_handler(struct hclge_dev *hdev);
970 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
971 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
972 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
973 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
974 int hclge_vport_start(struct hclge_vport *vport);
975 void hclge_vport_stop(struct hclge_vport *vport);
976 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
977 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
978 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
979 int hclge_notify_client(struct hclge_dev *hdev,
980 			enum hnae3_reset_notify_type type);
981 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
982 			       enum HCLGE_MAC_ADDR_TYPE mac_type);
983 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
984 			      bool is_write_tbl,
985 			      enum HCLGE_MAC_ADDR_TYPE mac_type);
986 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
987 				  enum HCLGE_MAC_ADDR_TYPE mac_type);
988 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
989 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
990 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
991 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
992 				    struct hclge_vlan_info *vlan_info);
993 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
994 				      u16 state, u16 vlan_tag, u16 qos,
995 				      u16 vlan_proto);
996 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
997 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
998 				struct hclge_desc *desc);
999 void hclge_report_hw_error(struct hclge_dev *hdev,
1000 			   enum hnae3_hw_error_type type);
1001 void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
1002 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
1003 #endif
1004