1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #ifndef __HCLGE_MAIN_H
5 #define __HCLGE_MAIN_H
6 #include <linux/fs.h>
7 #include <linux/types.h>
8 #include <linux/phy.h>
9 #include <linux/if_vlan.h>
10 
11 #include "hclge_cmd.h"
12 #include "hnae3.h"
13 
14 #define HCLGE_MOD_VERSION "1.0"
15 #define HCLGE_DRIVER_NAME "hclge"
16 
17 #define HCLGE_MAX_PF_NUM		8
18 
19 #define HCLGE_RD_FIRST_STATS_NUM        2
20 #define HCLGE_RD_OTHER_STATS_NUM        4
21 
22 #define HCLGE_INVALID_VPORT 0xffff
23 
24 #define HCLGE_PF_CFG_BLOCK_SIZE		32
25 #define HCLGE_PF_CFG_DESC_NUM \
26 	(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
27 
28 #define HCLGE_VECTOR_REG_BASE		0x20000
29 #define HCLGE_MISC_VECTOR_REG_BASE	0x20400
30 
31 #define HCLGE_VECTOR_REG_OFFSET		0x4
32 #define HCLGE_VECTOR_VF_OFFSET		0x100000
33 
34 #define HCLGE_CMDQ_TX_ADDR_L_REG	0x27000
35 #define HCLGE_CMDQ_TX_ADDR_H_REG	0x27004
36 #define HCLGE_CMDQ_TX_DEPTH_REG		0x27008
37 #define HCLGE_CMDQ_TX_TAIL_REG		0x27010
38 #define HCLGE_CMDQ_TX_HEAD_REG		0x27014
39 #define HCLGE_CMDQ_RX_ADDR_L_REG	0x27018
40 #define HCLGE_CMDQ_RX_ADDR_H_REG	0x2701C
41 #define HCLGE_CMDQ_RX_DEPTH_REG		0x27020
42 #define HCLGE_CMDQ_RX_TAIL_REG		0x27024
43 #define HCLGE_CMDQ_RX_HEAD_REG		0x27028
44 #define HCLGE_CMDQ_INTR_SRC_REG		0x27100
45 #define HCLGE_CMDQ_INTR_STS_REG		0x27104
46 #define HCLGE_CMDQ_INTR_EN_REG		0x27108
47 #define HCLGE_CMDQ_INTR_GEN_REG		0x2710C
48 
49 /* bar registers for common func */
50 #define HCLGE_VECTOR0_OTER_EN_REG	0x20600
51 #define HCLGE_RAS_OTHER_STS_REG		0x20B00
52 #define HCLGE_FUNC_RESET_STS_REG	0x20C00
53 #define HCLGE_GRO_EN_REG		0x28000
54 
55 /* bar registers for rcb */
56 #define HCLGE_RING_RX_ADDR_L_REG	0x80000
57 #define HCLGE_RING_RX_ADDR_H_REG	0x80004
58 #define HCLGE_RING_RX_BD_NUM_REG	0x80008
59 #define HCLGE_RING_RX_BD_LENGTH_REG	0x8000C
60 #define HCLGE_RING_RX_MERGE_EN_REG	0x80014
61 #define HCLGE_RING_RX_TAIL_REG		0x80018
62 #define HCLGE_RING_RX_HEAD_REG		0x8001C
63 #define HCLGE_RING_RX_FBD_NUM_REG	0x80020
64 #define HCLGE_RING_RX_OFFSET_REG	0x80024
65 #define HCLGE_RING_RX_FBD_OFFSET_REG	0x80028
66 #define HCLGE_RING_RX_STASH_REG		0x80030
67 #define HCLGE_RING_RX_BD_ERR_REG	0x80034
68 #define HCLGE_RING_TX_ADDR_L_REG	0x80040
69 #define HCLGE_RING_TX_ADDR_H_REG	0x80044
70 #define HCLGE_RING_TX_BD_NUM_REG	0x80048
71 #define HCLGE_RING_TX_PRIORITY_REG	0x8004C
72 #define HCLGE_RING_TX_TC_REG		0x80050
73 #define HCLGE_RING_TX_MERGE_EN_REG	0x80054
74 #define HCLGE_RING_TX_TAIL_REG		0x80058
75 #define HCLGE_RING_TX_HEAD_REG		0x8005C
76 #define HCLGE_RING_TX_FBD_NUM_REG	0x80060
77 #define HCLGE_RING_TX_OFFSET_REG	0x80064
78 #define HCLGE_RING_TX_EBD_NUM_REG	0x80068
79 #define HCLGE_RING_TX_EBD_OFFSET_REG	0x80070
80 #define HCLGE_RING_TX_BD_ERR_REG	0x80074
81 #define HCLGE_RING_EN_REG		0x80090
82 
83 /* bar registers for tqp interrupt */
84 #define HCLGE_TQP_INTR_CTRL_REG		0x20000
85 #define HCLGE_TQP_INTR_GL0_REG		0x20100
86 #define HCLGE_TQP_INTR_GL1_REG		0x20200
87 #define HCLGE_TQP_INTR_GL2_REG		0x20300
88 #define HCLGE_TQP_INTR_RL_REG		0x20900
89 
90 #define HCLGE_RSS_IND_TBL_SIZE		512
91 #define HCLGE_RSS_SET_BITMAP_MSK	GENMASK(15, 0)
92 #define HCLGE_RSS_KEY_SIZE		40
93 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ	0
94 #define HCLGE_RSS_HASH_ALGO_SIMPLE	1
95 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC	2
96 #define HCLGE_RSS_HASH_ALGO_MASK	GENMASK(3, 0)
97 #define HCLGE_RSS_CFG_TBL_NUM \
98 	(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
99 
100 #define HCLGE_RSS_INPUT_TUPLE_OTHER	GENMASK(3, 0)
101 #define HCLGE_RSS_INPUT_TUPLE_SCTP	GENMASK(4, 0)
102 #define HCLGE_D_PORT_BIT		BIT(0)
103 #define HCLGE_S_PORT_BIT		BIT(1)
104 #define HCLGE_D_IP_BIT			BIT(2)
105 #define HCLGE_S_IP_BIT			BIT(3)
106 #define HCLGE_V_TAG_BIT			BIT(4)
107 
108 #define HCLGE_RSS_TC_SIZE_0		1
109 #define HCLGE_RSS_TC_SIZE_1		2
110 #define HCLGE_RSS_TC_SIZE_2		4
111 #define HCLGE_RSS_TC_SIZE_3		8
112 #define HCLGE_RSS_TC_SIZE_4		16
113 #define HCLGE_RSS_TC_SIZE_5		32
114 #define HCLGE_RSS_TC_SIZE_6		64
115 #define HCLGE_RSS_TC_SIZE_7		128
116 
117 #define HCLGE_UMV_TBL_SIZE		3072
118 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
119 	(HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
120 
121 #define HCLGE_TQP_RESET_TRY_TIMES	10
122 
123 #define HCLGE_PHY_PAGE_MDIX		0
124 #define HCLGE_PHY_PAGE_COPPER		0
125 
126 /* Page Selection Reg. */
127 #define HCLGE_PHY_PAGE_REG		22
128 
129 /* Copper Specific Control Register */
130 #define HCLGE_PHY_CSC_REG		16
131 
132 /* Copper Specific Status Register */
133 #define HCLGE_PHY_CSS_REG		17
134 
135 #define HCLGE_PHY_MDIX_CTRL_S		5
136 #define HCLGE_PHY_MDIX_CTRL_M		GENMASK(6, 5)
137 
138 #define HCLGE_PHY_MDIX_STATUS_B		6
139 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B	11
140 
141 /* Factor used to calculate offset and bitmap of VF num */
142 #define HCLGE_VF_NUM_PER_CMD           64
143 #define HCLGE_VF_NUM_PER_BYTE          8
144 
145 enum HLCGE_PORT_TYPE {
146 	HOST_PORT,
147 	NETWORK_PORT
148 };
149 
150 #define HCLGE_PF_ID_S			0
151 #define HCLGE_PF_ID_M			GENMASK(2, 0)
152 #define HCLGE_VF_ID_S			3
153 #define HCLGE_VF_ID_M			GENMASK(10, 3)
154 #define HCLGE_PORT_TYPE_B		11
155 #define HCLGE_NETWORK_PORT_ID_S		0
156 #define HCLGE_NETWORK_PORT_ID_M		GENMASK(3, 0)
157 
158 /* Reset related Registers */
159 #define HCLGE_PF_OTHER_INT_REG		0x20600
160 #define HCLGE_MISC_RESET_STS_REG	0x20700
161 #define HCLGE_MISC_VECTOR_INT_STS	0x20800
162 #define HCLGE_GLOBAL_RESET_REG		0x20A00
163 #define HCLGE_GLOBAL_RESET_BIT		0
164 #define HCLGE_CORE_RESET_BIT		1
165 #define HCLGE_IMP_RESET_BIT		2
166 #define HCLGE_FUN_RST_ING		0x20C00
167 #define HCLGE_FUN_RST_ING_B		0
168 
169 /* Vector0 register bits define */
170 #define HCLGE_VECTOR0_GLOBALRESET_INT_B	5
171 #define HCLGE_VECTOR0_CORERESET_INT_B	6
172 #define HCLGE_VECTOR0_IMPRESET_INT_B	7
173 
174 /* Vector0 interrupt CMDQ event source register(RW) */
175 #define HCLGE_VECTOR0_CMDQ_SRC_REG	0x27100
176 /* CMDQ register bits for RX event(=MBX event) */
177 #define HCLGE_VECTOR0_RX_CMDQ_INT_B	1
178 
179 #define HCLGE_VECTOR0_IMP_RESET_INT_B	1
180 
181 #define HCLGE_MAC_DEFAULT_FRAME \
182 	(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
183 #define HCLGE_MAC_MIN_FRAME		64
184 #define HCLGE_MAC_MAX_FRAME		9728
185 
186 #define HCLGE_SUPPORT_1G_BIT		BIT(0)
187 #define HCLGE_SUPPORT_10G_BIT		BIT(1)
188 #define HCLGE_SUPPORT_25G_BIT		BIT(2)
189 #define HCLGE_SUPPORT_50G_BIT		BIT(3)
190 #define HCLGE_SUPPORT_100G_BIT		BIT(4)
191 #define HCLGE_SUPPORT_100M_BIT		BIT(6)
192 #define HCLGE_SUPPORT_10M_BIT		BIT(7)
193 #define HCLGE_SUPPORT_GE \
194 	(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
195 
196 enum HCLGE_DEV_STATE {
197 	HCLGE_STATE_REINITING,
198 	HCLGE_STATE_DOWN,
199 	HCLGE_STATE_DISABLED,
200 	HCLGE_STATE_REMOVING,
201 	HCLGE_STATE_SERVICE_INITED,
202 	HCLGE_STATE_SERVICE_SCHED,
203 	HCLGE_STATE_RST_SERVICE_SCHED,
204 	HCLGE_STATE_RST_HANDLING,
205 	HCLGE_STATE_MBX_SERVICE_SCHED,
206 	HCLGE_STATE_MBX_HANDLING,
207 	HCLGE_STATE_STATISTICS_UPDATING,
208 	HCLGE_STATE_CMD_DISABLE,
209 	HCLGE_STATE_MAX
210 };
211 
212 enum hclge_evt_cause {
213 	HCLGE_VECTOR0_EVENT_RST,
214 	HCLGE_VECTOR0_EVENT_MBX,
215 	HCLGE_VECTOR0_EVENT_ERR,
216 	HCLGE_VECTOR0_EVENT_OTHER,
217 };
218 
219 #define HCLGE_MPF_ENBALE 1
220 
221 enum HCLGE_MAC_SPEED {
222 	HCLGE_MAC_SPEED_UNKNOWN = 0,		/* unknown */
223 	HCLGE_MAC_SPEED_10M	= 10,		/* 10 Mbps */
224 	HCLGE_MAC_SPEED_100M	= 100,		/* 100 Mbps */
225 	HCLGE_MAC_SPEED_1G	= 1000,		/* 1000 Mbps   = 1 Gbps */
226 	HCLGE_MAC_SPEED_10G	= 10000,	/* 10000 Mbps  = 10 Gbps */
227 	HCLGE_MAC_SPEED_25G	= 25000,	/* 25000 Mbps  = 25 Gbps */
228 	HCLGE_MAC_SPEED_40G	= 40000,	/* 40000 Mbps  = 40 Gbps */
229 	HCLGE_MAC_SPEED_50G	= 50000,	/* 50000 Mbps  = 50 Gbps */
230 	HCLGE_MAC_SPEED_100G	= 100000	/* 100000 Mbps = 100 Gbps */
231 };
232 
233 enum HCLGE_MAC_DUPLEX {
234 	HCLGE_MAC_HALF,
235 	HCLGE_MAC_FULL
236 };
237 
238 struct hclge_mac {
239 	u8 phy_addr;
240 	u8 flag;
241 	u8 media_type;
242 	u8 mac_addr[ETH_ALEN];
243 	u8 autoneg;
244 	u8 duplex;
245 	u32 speed;
246 	int link;	/* store the link status of mac & phy (if phy exit)*/
247 	struct phy_device *phydev;
248 	struct mii_bus *mdio_bus;
249 	phy_interface_t phy_if;
250 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
251 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
252 };
253 
254 struct hclge_hw {
255 	void __iomem *io_base;
256 	struct hclge_mac mac;
257 	int num_vec;
258 	struct hclge_cmq cmq;
259 };
260 
261 /* TQP stats */
262 struct hlcge_tqp_stats {
263 	/* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
264 	u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
265 	/* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
266 	u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
267 };
268 
269 struct hclge_tqp {
270 	/* copy of device pointer from pci_dev,
271 	 * used when perform DMA mapping
272 	 */
273 	struct device *dev;
274 	struct hnae3_queue q;
275 	struct hlcge_tqp_stats tqp_stats;
276 	u16 index;	/* Global index in a NIC controller */
277 
278 	bool alloced;
279 };
280 
281 enum hclge_fc_mode {
282 	HCLGE_FC_NONE,
283 	HCLGE_FC_RX_PAUSE,
284 	HCLGE_FC_TX_PAUSE,
285 	HCLGE_FC_FULL,
286 	HCLGE_FC_PFC,
287 	HCLGE_FC_DEFAULT
288 };
289 
290 #define HCLGE_PG_NUM		4
291 #define HCLGE_SCH_MODE_SP	0
292 #define HCLGE_SCH_MODE_DWRR	1
293 struct hclge_pg_info {
294 	u8 pg_id;
295 	u8 pg_sch_mode;		/* 0: sp; 1: dwrr */
296 	u8 tc_bit_map;
297 	u32 bw_limit;
298 	u8 tc_dwrr[HNAE3_MAX_TC];
299 };
300 
301 struct hclge_tc_info {
302 	u8 tc_id;
303 	u8 tc_sch_mode;		/* 0: sp; 1: dwrr */
304 	u8 pgid;
305 	u32 bw_limit;
306 };
307 
308 struct hclge_cfg {
309 	u8 vmdq_vport_num;
310 	u8 tc_num;
311 	u16 tqp_desc_num;
312 	u16 rx_buf_len;
313 	u16 rss_size_max;
314 	u8 phy_addr;
315 	u8 media_type;
316 	u8 mac_addr[ETH_ALEN];
317 	u8 default_speed;
318 	u32 numa_node_map;
319 	u8 speed_ability;
320 	u16 umv_space;
321 };
322 
323 struct hclge_tm_info {
324 	u8 num_tc;
325 	u8 num_pg;      /* It must be 1 if vNET-Base schd */
326 	u8 pg_dwrr[HCLGE_PG_NUM];
327 	u8 prio_tc[HNAE3_MAX_USER_PRIO];
328 	struct hclge_pg_info pg_info[HCLGE_PG_NUM];
329 	struct hclge_tc_info tc_info[HNAE3_MAX_TC];
330 	enum hclge_fc_mode fc_mode;
331 	u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
332 	u8 pfc_en;	/* PFC enabled or not for user priority */
333 };
334 
335 struct hclge_comm_stats_str {
336 	char desc[ETH_GSTRING_LEN];
337 	unsigned long offset;
338 };
339 
340 /* mac stats ,opcode id: 0x0032 */
341 struct hclge_mac_stats {
342 	u64 mac_tx_mac_pause_num;
343 	u64 mac_rx_mac_pause_num;
344 	u64 mac_tx_pfc_pri0_pkt_num;
345 	u64 mac_tx_pfc_pri1_pkt_num;
346 	u64 mac_tx_pfc_pri2_pkt_num;
347 	u64 mac_tx_pfc_pri3_pkt_num;
348 	u64 mac_tx_pfc_pri4_pkt_num;
349 	u64 mac_tx_pfc_pri5_pkt_num;
350 	u64 mac_tx_pfc_pri6_pkt_num;
351 	u64 mac_tx_pfc_pri7_pkt_num;
352 	u64 mac_rx_pfc_pri0_pkt_num;
353 	u64 mac_rx_pfc_pri1_pkt_num;
354 	u64 mac_rx_pfc_pri2_pkt_num;
355 	u64 mac_rx_pfc_pri3_pkt_num;
356 	u64 mac_rx_pfc_pri4_pkt_num;
357 	u64 mac_rx_pfc_pri5_pkt_num;
358 	u64 mac_rx_pfc_pri6_pkt_num;
359 	u64 mac_rx_pfc_pri7_pkt_num;
360 	u64 mac_tx_total_pkt_num;
361 	u64 mac_tx_total_oct_num;
362 	u64 mac_tx_good_pkt_num;
363 	u64 mac_tx_bad_pkt_num;
364 	u64 mac_tx_good_oct_num;
365 	u64 mac_tx_bad_oct_num;
366 	u64 mac_tx_uni_pkt_num;
367 	u64 mac_tx_multi_pkt_num;
368 	u64 mac_tx_broad_pkt_num;
369 	u64 mac_tx_undersize_pkt_num;
370 	u64 mac_tx_oversize_pkt_num;
371 	u64 mac_tx_64_oct_pkt_num;
372 	u64 mac_tx_65_127_oct_pkt_num;
373 	u64 mac_tx_128_255_oct_pkt_num;
374 	u64 mac_tx_256_511_oct_pkt_num;
375 	u64 mac_tx_512_1023_oct_pkt_num;
376 	u64 mac_tx_1024_1518_oct_pkt_num;
377 	u64 mac_tx_1519_2047_oct_pkt_num;
378 	u64 mac_tx_2048_4095_oct_pkt_num;
379 	u64 mac_tx_4096_8191_oct_pkt_num;
380 	u64 rsv0;
381 	u64 mac_tx_8192_9216_oct_pkt_num;
382 	u64 mac_tx_9217_12287_oct_pkt_num;
383 	u64 mac_tx_12288_16383_oct_pkt_num;
384 	u64 mac_tx_1519_max_good_oct_pkt_num;
385 	u64 mac_tx_1519_max_bad_oct_pkt_num;
386 
387 	u64 mac_rx_total_pkt_num;
388 	u64 mac_rx_total_oct_num;
389 	u64 mac_rx_good_pkt_num;
390 	u64 mac_rx_bad_pkt_num;
391 	u64 mac_rx_good_oct_num;
392 	u64 mac_rx_bad_oct_num;
393 	u64 mac_rx_uni_pkt_num;
394 	u64 mac_rx_multi_pkt_num;
395 	u64 mac_rx_broad_pkt_num;
396 	u64 mac_rx_undersize_pkt_num;
397 	u64 mac_rx_oversize_pkt_num;
398 	u64 mac_rx_64_oct_pkt_num;
399 	u64 mac_rx_65_127_oct_pkt_num;
400 	u64 mac_rx_128_255_oct_pkt_num;
401 	u64 mac_rx_256_511_oct_pkt_num;
402 	u64 mac_rx_512_1023_oct_pkt_num;
403 	u64 mac_rx_1024_1518_oct_pkt_num;
404 	u64 mac_rx_1519_2047_oct_pkt_num;
405 	u64 mac_rx_2048_4095_oct_pkt_num;
406 	u64 mac_rx_4096_8191_oct_pkt_num;
407 	u64 rsv1;
408 	u64 mac_rx_8192_9216_oct_pkt_num;
409 	u64 mac_rx_9217_12287_oct_pkt_num;
410 	u64 mac_rx_12288_16383_oct_pkt_num;
411 	u64 mac_rx_1519_max_good_oct_pkt_num;
412 	u64 mac_rx_1519_max_bad_oct_pkt_num;
413 
414 	u64 mac_tx_fragment_pkt_num;
415 	u64 mac_tx_undermin_pkt_num;
416 	u64 mac_tx_jabber_pkt_num;
417 	u64 mac_tx_err_all_pkt_num;
418 	u64 mac_tx_from_app_good_pkt_num;
419 	u64 mac_tx_from_app_bad_pkt_num;
420 	u64 mac_rx_fragment_pkt_num;
421 	u64 mac_rx_undermin_pkt_num;
422 	u64 mac_rx_jabber_pkt_num;
423 	u64 mac_rx_fcs_err_pkt_num;
424 	u64 mac_rx_send_app_good_pkt_num;
425 	u64 mac_rx_send_app_bad_pkt_num;
426 	u64 mac_tx_pfc_pause_pkt_num;
427 	u64 mac_rx_pfc_pause_pkt_num;
428 	u64 mac_tx_ctrl_pkt_num;
429 	u64 mac_rx_ctrl_pkt_num;
430 };
431 
432 #define HCLGE_STATS_TIMER_INTERVAL	(60 * 5)
433 struct hclge_hw_stats {
434 	struct hclge_mac_stats      mac_stats;
435 	u32 stats_timer;
436 };
437 
438 struct hclge_vlan_type_cfg {
439 	u16 rx_ot_fst_vlan_type;
440 	u16 rx_ot_sec_vlan_type;
441 	u16 rx_in_fst_vlan_type;
442 	u16 rx_in_sec_vlan_type;
443 	u16 tx_ot_vlan_type;
444 	u16 tx_in_vlan_type;
445 };
446 
447 enum HCLGE_FD_MODE {
448 	HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
449 	HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
450 	HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
451 	HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
452 };
453 
454 enum HCLGE_FD_KEY_TYPE {
455 	HCLGE_FD_KEY_BASE_ON_PTYPE,
456 	HCLGE_FD_KEY_BASE_ON_TUPLE,
457 };
458 
459 enum HCLGE_FD_STAGE {
460 	HCLGE_FD_STAGE_1,
461 	HCLGE_FD_STAGE_2,
462 };
463 
464 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
465  * INNER_XXX indicate tuples in tunneled header of tunnel packet or
466  *           tuples of non-tunnel packet
467  */
468 enum HCLGE_FD_TUPLE {
469 	OUTER_DST_MAC,
470 	OUTER_SRC_MAC,
471 	OUTER_VLAN_TAG_FST,
472 	OUTER_VLAN_TAG_SEC,
473 	OUTER_ETH_TYPE,
474 	OUTER_L2_RSV,
475 	OUTER_IP_TOS,
476 	OUTER_IP_PROTO,
477 	OUTER_SRC_IP,
478 	OUTER_DST_IP,
479 	OUTER_L3_RSV,
480 	OUTER_SRC_PORT,
481 	OUTER_DST_PORT,
482 	OUTER_L4_RSV,
483 	OUTER_TUN_VNI,
484 	OUTER_TUN_FLOW_ID,
485 	INNER_DST_MAC,
486 	INNER_SRC_MAC,
487 	INNER_VLAN_TAG_FST,
488 	INNER_VLAN_TAG_SEC,
489 	INNER_ETH_TYPE,
490 	INNER_L2_RSV,
491 	INNER_IP_TOS,
492 	INNER_IP_PROTO,
493 	INNER_SRC_IP,
494 	INNER_DST_IP,
495 	INNER_L3_RSV,
496 	INNER_SRC_PORT,
497 	INNER_DST_PORT,
498 	INNER_L4_RSV,
499 	MAX_TUPLE,
500 };
501 
502 enum HCLGE_FD_META_DATA {
503 	PACKET_TYPE_ID,
504 	IP_FRAGEMENT,
505 	ROCE_TYPE,
506 	NEXT_KEY,
507 	VLAN_NUMBER,
508 	SRC_VPORT,
509 	DST_VPORT,
510 	TUNNEL_PACKET,
511 	MAX_META_DATA,
512 };
513 
514 struct key_info {
515 	u8 key_type;
516 	u8 key_length;
517 };
518 
519 static const struct key_info meta_data_key_info[] = {
520 	{ PACKET_TYPE_ID, 6},
521 	{ IP_FRAGEMENT, 1},
522 	{ ROCE_TYPE, 1},
523 	{ NEXT_KEY, 5},
524 	{ VLAN_NUMBER, 2},
525 	{ SRC_VPORT, 12},
526 	{ DST_VPORT, 12},
527 	{ TUNNEL_PACKET, 1},
528 };
529 
530 static const struct key_info tuple_key_info[] = {
531 	{ OUTER_DST_MAC, 48},
532 	{ OUTER_SRC_MAC, 48},
533 	{ OUTER_VLAN_TAG_FST, 16},
534 	{ OUTER_VLAN_TAG_SEC, 16},
535 	{ OUTER_ETH_TYPE, 16},
536 	{ OUTER_L2_RSV, 16},
537 	{ OUTER_IP_TOS, 8},
538 	{ OUTER_IP_PROTO, 8},
539 	{ OUTER_SRC_IP, 32},
540 	{ OUTER_DST_IP, 32},
541 	{ OUTER_L3_RSV, 16},
542 	{ OUTER_SRC_PORT, 16},
543 	{ OUTER_DST_PORT, 16},
544 	{ OUTER_L4_RSV, 32},
545 	{ OUTER_TUN_VNI, 24},
546 	{ OUTER_TUN_FLOW_ID, 8},
547 	{ INNER_DST_MAC, 48},
548 	{ INNER_SRC_MAC, 48},
549 	{ INNER_VLAN_TAG_FST, 16},
550 	{ INNER_VLAN_TAG_SEC, 16},
551 	{ INNER_ETH_TYPE, 16},
552 	{ INNER_L2_RSV, 16},
553 	{ INNER_IP_TOS, 8},
554 	{ INNER_IP_PROTO, 8},
555 	{ INNER_SRC_IP, 32},
556 	{ INNER_DST_IP, 32},
557 	{ INNER_L3_RSV, 16},
558 	{ INNER_SRC_PORT, 16},
559 	{ INNER_DST_PORT, 16},
560 	{ INNER_L4_RSV, 32},
561 };
562 
563 #define MAX_KEY_LENGTH	400
564 #define MAX_KEY_DWORDS	DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
565 #define MAX_KEY_BYTES	(MAX_KEY_DWORDS * 4)
566 #define MAX_META_DATA_LENGTH	32
567 
568 enum HCLGE_FD_PACKET_TYPE {
569 	NIC_PACKET,
570 	ROCE_PACKET,
571 };
572 
573 enum HCLGE_FD_ACTION {
574 	HCLGE_FD_ACTION_ACCEPT_PACKET,
575 	HCLGE_FD_ACTION_DROP_PACKET,
576 };
577 
578 struct hclge_fd_key_cfg {
579 	u8 key_sel;
580 	u8 inner_sipv6_word_en;
581 	u8 inner_dipv6_word_en;
582 	u8 outer_sipv6_word_en;
583 	u8 outer_dipv6_word_en;
584 	u32 tuple_active;
585 	u32 meta_data_active;
586 };
587 
588 struct hclge_fd_cfg {
589 	u8 fd_mode;
590 	u16 max_key_length;
591 	u32 proto_support;
592 	u32 rule_num[2]; /* rule entry number */
593 	u16 cnt_num[2]; /* rule hit counter number */
594 	struct hclge_fd_key_cfg key_cfg[2];
595 };
596 
597 struct hclge_fd_rule_tuples {
598 	u8 src_mac[6];
599 	u8 dst_mac[6];
600 	u32 src_ip[4];
601 	u32 dst_ip[4];
602 	u16 src_port;
603 	u16 dst_port;
604 	u16 vlan_tag1;
605 	u16 ether_proto;
606 	u8 ip_tos;
607 	u8 ip_proto;
608 };
609 
610 struct hclge_fd_rule {
611 	struct hlist_node rule_node;
612 	struct hclge_fd_rule_tuples tuples;
613 	struct hclge_fd_rule_tuples tuples_mask;
614 	u32 unused_tuple;
615 	u32 flow_type;
616 	u8 action;
617 	u16 vf_id;
618 	u16 queue_id;
619 	u16 location;
620 };
621 
622 struct hclge_fd_ad_data {
623 	u16 ad_id;
624 	u8 drop_packet;
625 	u8 forward_to_direct_queue;
626 	u16 queue_id;
627 	u8 use_counter;
628 	u8 counter_id;
629 	u8 use_next_stage;
630 	u8 write_rule_id_to_bd;
631 	u8 next_input_key;
632 	u16 rule_id;
633 };
634 
635 struct hclge_vport_mac_addr_cfg {
636 	struct list_head node;
637 	int hd_tbl_status;
638 	u8 mac_addr[ETH_ALEN];
639 };
640 
641 enum HCLGE_MAC_ADDR_TYPE {
642 	HCLGE_MAC_ADDR_UC,
643 	HCLGE_MAC_ADDR_MC
644 };
645 
646 struct hclge_vport_vlan_cfg {
647 	struct list_head node;
648 	int hd_tbl_status;
649 	u16 vlan_id;
650 };
651 
652 /* For each bit of TCAM entry, it uses a pair of 'x' and
653  * 'y' to indicate which value to match, like below:
654  * ----------------------------------
655  * | bit x | bit y |  search value  |
656  * ----------------------------------
657  * |   0   |   0   |   always hit   |
658  * ----------------------------------
659  * |   1   |   0   |   match '0'    |
660  * ----------------------------------
661  * |   0   |   1   |   match '1'    |
662  * ----------------------------------
663  * |   1   |   1   |   invalid      |
664  * ----------------------------------
665  * Then for input key(k) and mask(v), we can calculate the value by
666  * the formulae:
667  *	x = (~k) & v
668  *	y = (k ^ ~v) & k
669  */
670 #define calc_x(x, k, v) ((x) = (~(k) & (v)))
671 #define calc_y(y, k, v) \
672 	do { \
673 		const typeof(k) _k_ = (k); \
674 		const typeof(v) _v_ = (v); \
675 		(y) = (_k_ ^ ~_v_) & (_k_); \
676 	} while (0)
677 
678 #define HCLGE_VPORT_NUM 256
679 struct hclge_dev {
680 	struct pci_dev *pdev;
681 	struct hnae3_ae_dev *ae_dev;
682 	struct hclge_hw hw;
683 	struct hclge_misc_vector misc_vector;
684 	struct hclge_hw_stats hw_stats;
685 	unsigned long state;
686 	unsigned long flr_state;
687 	unsigned long last_reset_time;
688 
689 	enum hnae3_reset_type reset_type;
690 	enum hnae3_reset_type reset_level;
691 	unsigned long default_reset_request;
692 	unsigned long reset_request;	/* reset has been requested */
693 	unsigned long reset_pending;	/* client rst is pending to be served */
694 	unsigned long reset_count;	/* the number of reset has been done */
695 	u32 reset_fail_cnt;
696 	u32 fw_version;
697 	u16 num_vmdq_vport;		/* Num vmdq vport this PF has set up */
698 	u16 num_tqps;			/* Num task queue pairs of this PF */
699 	u16 num_req_vfs;		/* Num VFs requested for this PF */
700 
701 	u16 base_tqp_pid;	/* Base task tqp physical id of this PF */
702 	u16 alloc_rss_size;		/* Allocated RSS task queue */
703 	u16 rss_size_max;		/* HW defined max RSS task queue */
704 
705 	u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
706 	u16 num_alloc_vport;		/* Num vports this driver supports */
707 	u32 numa_node_mask;
708 	u16 rx_buf_len;
709 	u16 num_tx_desc;		/* desc num of per tx queue */
710 	u16 num_rx_desc;		/* desc num of per rx queue */
711 	u8 hw_tc_map;
712 	u8 tc_num_last_time;
713 	enum hclge_fc_mode fc_mode_last_time;
714 	u8 support_sfp_query;
715 
716 #define HCLGE_FLAG_TC_BASE_SCH_MODE		1
717 #define HCLGE_FLAG_VNET_BASE_SCH_MODE		2
718 	u8 tx_sch_mode;
719 	u8 tc_max;
720 	u8 pfc_max;
721 
722 	u8 default_up;
723 	u8 dcbx_cap;
724 	struct hclge_tm_info tm_info;
725 
726 	u16 num_msi;
727 	u16 num_msi_left;
728 	u16 num_msi_used;
729 	u16 roce_base_msix_offset;
730 	u32 base_msi_vector;
731 	u16 *vector_status;
732 	int *vector_irq;
733 	u16 num_roce_msi;	/* Num of roce vectors for this PF */
734 	int roce_base_vector;
735 
736 	u16 pending_udp_bitmap;
737 
738 	u16 rx_itr_default;
739 	u16 tx_itr_default;
740 
741 	u16 adminq_work_limit; /* Num of admin receive queue desc to process */
742 	unsigned long service_timer_period;
743 	unsigned long service_timer_previous;
744 	struct timer_list service_timer;
745 	struct timer_list reset_timer;
746 	struct work_struct service_task;
747 	struct work_struct rst_service_task;
748 	struct work_struct mbx_service_task;
749 
750 	bool cur_promisc;
751 	int num_alloc_vfs;	/* Actual number of VFs allocated */
752 
753 	struct hclge_tqp *htqp;
754 	struct hclge_vport *vport;
755 
756 	struct dentry *hclge_dbgfs;
757 
758 	struct hnae3_client *nic_client;
759 	struct hnae3_client *roce_client;
760 
761 #define HCLGE_FLAG_MAIN			BIT(0)
762 #define HCLGE_FLAG_DCB_CAPABLE		BIT(1)
763 #define HCLGE_FLAG_DCB_ENABLE		BIT(2)
764 #define HCLGE_FLAG_MQPRIO_ENABLE	BIT(3)
765 	u32 flag;
766 
767 	u32 pkt_buf_size; /* Total pf buf size for tx/rx */
768 	u32 tx_buf_size; /* Tx buffer size for each TC */
769 	u32 dv_buf_size; /* Dv buffer size for each TC */
770 
771 	u32 mps; /* Max packet size */
772 	/* vport_lock protect resource shared by vports */
773 	struct mutex vport_lock;
774 
775 	struct hclge_vlan_type_cfg vlan_type_cfg;
776 
777 	unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
778 
779 	struct hclge_fd_cfg fd_cfg;
780 	struct hlist_head fd_rule_list;
781 	u16 hclge_fd_rule_num;
782 	u8 fd_en;
783 
784 	u16 wanted_umv_size;
785 	/* max available unicast mac vlan space */
786 	u16 max_umv_size;
787 	/* private unicast mac vlan space, it's same for PF and its VFs */
788 	u16 priv_umv_size;
789 	/* unicast mac vlan space shared by PF and its VFs */
790 	u16 share_umv_size;
791 	struct mutex umv_mutex; /* protect share_umv_size */
792 
793 	struct mutex vport_cfg_mutex;   /* Protect stored vf table */
794 };
795 
796 /* VPort level vlan tag configuration for TX direction */
797 struct hclge_tx_vtag_cfg {
798 	bool accept_tag1;	/* Whether accept tag1 packet from host */
799 	bool accept_untag1;	/* Whether accept untag1 packet from host */
800 	bool accept_tag2;
801 	bool accept_untag2;
802 	bool insert_tag1_en;	/* Whether insert inner vlan tag */
803 	bool insert_tag2_en;	/* Whether insert outer vlan tag */
804 	u16  default_tag1;	/* The default inner vlan tag to insert */
805 	u16  default_tag2;	/* The default outer vlan tag to insert */
806 };
807 
808 /* VPort level vlan tag configuration for RX direction */
809 struct hclge_rx_vtag_cfg {
810 	bool strip_tag1_en;	/* Whether strip inner vlan tag */
811 	bool strip_tag2_en;	/* Whether strip outer vlan tag */
812 	bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
813 	bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
814 };
815 
816 struct hclge_rss_tuple_cfg {
817 	u8 ipv4_tcp_en;
818 	u8 ipv4_udp_en;
819 	u8 ipv4_sctp_en;
820 	u8 ipv4_fragment_en;
821 	u8 ipv6_tcp_en;
822 	u8 ipv6_udp_en;
823 	u8 ipv6_sctp_en;
824 	u8 ipv6_fragment_en;
825 };
826 
827 enum HCLGE_VPORT_STATE {
828 	HCLGE_VPORT_STATE_ALIVE,
829 	HCLGE_VPORT_STATE_MAX
830 };
831 
832 struct hclge_vport {
833 	u16 alloc_tqps;	/* Allocated Tx/Rx queues */
834 
835 	u8  rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
836 	/* User configured lookup table entries */
837 	u8  rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
838 	int rss_algo;		/* User configured hash algorithm */
839 	/* User configured rss tuple sets */
840 	struct hclge_rss_tuple_cfg rss_tuple_sets;
841 
842 	u16 alloc_rss_size;
843 
844 	u16 qs_offset;
845 	u16 bw_limit;		/* VSI BW Limit (0 = disabled) */
846 	u8  dwrr;
847 
848 	struct hclge_tx_vtag_cfg  txvlan_cfg;
849 	struct hclge_rx_vtag_cfg  rxvlan_cfg;
850 
851 	u16 used_umv_num;
852 
853 	int vport_id;
854 	struct hclge_dev *back;  /* Back reference to associated dev */
855 	struct hnae3_handle nic;
856 	struct hnae3_handle roce;
857 
858 	unsigned long state;
859 	unsigned long last_active_jiffies;
860 	u32 mps; /* Max packet size */
861 
862 	struct list_head uc_mac_list;   /* Store VF unicast table */
863 	struct list_head mc_mac_list;   /* Store VF multicast table */
864 	struct list_head vlan_list;     /* Store VF vlan table */
865 };
866 
867 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
868 			      bool en_mc, bool en_bc, int vport_id);
869 
870 int hclge_add_uc_addr_common(struct hclge_vport *vport,
871 			     const unsigned char *addr);
872 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
873 			    const unsigned char *addr);
874 int hclge_add_mc_addr_common(struct hclge_vport *vport,
875 			     const unsigned char *addr);
876 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
877 			    const unsigned char *addr);
878 
879 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
880 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
881 				int vector_id, bool en,
882 				struct hnae3_ring_chain_node *ring_chain);
883 
884 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
885 {
886 	struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
887 
888 	return tqp->index;
889 }
890 
891 static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
892 {
893 	return !!hdev->reset_pending;
894 }
895 
896 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
897 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
898 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
899 			  u16 vlan_id, bool is_kill);
900 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
901 
902 int hclge_buffer_alloc(struct hclge_dev *hdev);
903 int hclge_rss_init_hw(struct hclge_dev *hdev);
904 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
905 
906 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
907 void hclge_mbx_handler(struct hclge_dev *hdev);
908 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
909 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
910 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
911 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
912 int hclge_vport_start(struct hclge_vport *vport);
913 void hclge_vport_stop(struct hclge_vport *vport);
914 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
915 int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
916 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
917 int hclge_notify_client(struct hclge_dev *hdev,
918 			enum hnae3_reset_notify_type type);
919 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
920 			       enum HCLGE_MAC_ADDR_TYPE mac_type);
921 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
922 			      bool is_write_tbl,
923 			      enum HCLGE_MAC_ADDR_TYPE mac_type);
924 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
925 				  enum HCLGE_MAC_ADDR_TYPE mac_type);
926 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
927 void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id);
928 void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
929 			       bool is_write_tbl);
930 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
931 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
932 #endif
933