1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #ifndef __HCLGE_MAIN_H
5 #define __HCLGE_MAIN_H
6 #include <linux/fs.h>
7 #include <linux/types.h>
8 #include <linux/phy.h>
9 #include <linux/if_vlan.h>
10 
11 #include "hclge_cmd.h"
12 #include "hnae3.h"
13 
14 #define HCLGE_MOD_VERSION "1.0"
15 #define HCLGE_DRIVER_NAME "hclge"
16 
17 #define HCLGE_MAX_PF_NUM		8
18 
19 #define HCLGE_INVALID_VPORT 0xffff
20 
21 #define HCLGE_PF_CFG_BLOCK_SIZE		32
22 #define HCLGE_PF_CFG_DESC_NUM \
23 	(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
24 
25 #define HCLGE_VECTOR_REG_BASE		0x20000
26 #define HCLGE_MISC_VECTOR_REG_BASE	0x20400
27 
28 #define HCLGE_VECTOR_REG_OFFSET		0x4
29 #define HCLGE_VECTOR_VF_OFFSET		0x100000
30 
31 #define HCLGE_RSS_IND_TBL_SIZE		512
32 #define HCLGE_RSS_SET_BITMAP_MSK	GENMASK(15, 0)
33 #define HCLGE_RSS_KEY_SIZE		40
34 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ	0
35 #define HCLGE_RSS_HASH_ALGO_SIMPLE	1
36 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC	2
37 #define HCLGE_RSS_HASH_ALGO_MASK	GENMASK(3, 0)
38 #define HCLGE_RSS_CFG_TBL_NUM \
39 	(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
40 
41 #define HCLGE_RSS_INPUT_TUPLE_OTHER	GENMASK(3, 0)
42 #define HCLGE_RSS_INPUT_TUPLE_SCTP	GENMASK(4, 0)
43 #define HCLGE_D_PORT_BIT		BIT(0)
44 #define HCLGE_S_PORT_BIT		BIT(1)
45 #define HCLGE_D_IP_BIT			BIT(2)
46 #define HCLGE_S_IP_BIT			BIT(3)
47 #define HCLGE_V_TAG_BIT			BIT(4)
48 
49 #define HCLGE_RSS_TC_SIZE_0		1
50 #define HCLGE_RSS_TC_SIZE_1		2
51 #define HCLGE_RSS_TC_SIZE_2		4
52 #define HCLGE_RSS_TC_SIZE_3		8
53 #define HCLGE_RSS_TC_SIZE_4		16
54 #define HCLGE_RSS_TC_SIZE_5		32
55 #define HCLGE_RSS_TC_SIZE_6		64
56 #define HCLGE_RSS_TC_SIZE_7		128
57 
58 #define HCLGE_UMV_TBL_SIZE		3072
59 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
60 	(HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
61 
62 #define HCLGE_TQP_RESET_TRY_TIMES	10
63 
64 #define HCLGE_PHY_PAGE_MDIX		0
65 #define HCLGE_PHY_PAGE_COPPER		0
66 
67 /* Page Selection Reg. */
68 #define HCLGE_PHY_PAGE_REG		22
69 
70 /* Copper Specific Control Register */
71 #define HCLGE_PHY_CSC_REG		16
72 
73 /* Copper Specific Status Register */
74 #define HCLGE_PHY_CSS_REG		17
75 
76 #define HCLGE_PHY_MDIX_CTRL_S		5
77 #define HCLGE_PHY_MDIX_CTRL_M		GENMASK(6, 5)
78 
79 #define HCLGE_PHY_MDIX_STATUS_B		6
80 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B	11
81 
82 /* Factor used to calculate offset and bitmap of VF num */
83 #define HCLGE_VF_NUM_PER_CMD           64
84 #define HCLGE_VF_NUM_PER_BYTE          8
85 
86 enum HLCGE_PORT_TYPE {
87 	HOST_PORT,
88 	NETWORK_PORT
89 };
90 
91 #define HCLGE_PF_ID_S			0
92 #define HCLGE_PF_ID_M			GENMASK(2, 0)
93 #define HCLGE_VF_ID_S			3
94 #define HCLGE_VF_ID_M			GENMASK(10, 3)
95 #define HCLGE_PORT_TYPE_B		11
96 #define HCLGE_NETWORK_PORT_ID_S		0
97 #define HCLGE_NETWORK_PORT_ID_M		GENMASK(3, 0)
98 
99 /* Reset related Registers */
100 #define HCLGE_PF_OTHER_INT_REG		0x20600
101 #define HCLGE_MISC_RESET_STS_REG	0x20700
102 #define HCLGE_MISC_VECTOR_INT_STS	0x20800
103 #define HCLGE_GLOBAL_RESET_REG		0x20A00
104 #define HCLGE_GLOBAL_RESET_BIT		0
105 #define HCLGE_CORE_RESET_BIT		1
106 #define HCLGE_IMP_RESET_BIT		2
107 #define HCLGE_FUN_RST_ING		0x20C00
108 #define HCLGE_FUN_RST_ING_B		0
109 
110 /* Vector0 register bits define */
111 #define HCLGE_VECTOR0_GLOBALRESET_INT_B	5
112 #define HCLGE_VECTOR0_CORERESET_INT_B	6
113 #define HCLGE_VECTOR0_IMPRESET_INT_B	7
114 
115 /* Vector0 interrupt CMDQ event source register(RW) */
116 #define HCLGE_VECTOR0_CMDQ_SRC_REG	0x27100
117 /* CMDQ register bits for RX event(=MBX event) */
118 #define HCLGE_VECTOR0_RX_CMDQ_INT_B	1
119 
120 #define HCLGE_VECTOR0_IMP_RESET_INT_B	1
121 
122 #define HCLGE_MAC_DEFAULT_FRAME \
123 	(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
124 #define HCLGE_MAC_MIN_FRAME		64
125 #define HCLGE_MAC_MAX_FRAME		9728
126 
127 #define HCLGE_SUPPORT_1G_BIT		BIT(0)
128 #define HCLGE_SUPPORT_10G_BIT		BIT(1)
129 #define HCLGE_SUPPORT_25G_BIT		BIT(2)
130 #define HCLGE_SUPPORT_50G_BIT		BIT(3)
131 #define HCLGE_SUPPORT_100G_BIT		BIT(4)
132 
133 enum HCLGE_DEV_STATE {
134 	HCLGE_STATE_REINITING,
135 	HCLGE_STATE_DOWN,
136 	HCLGE_STATE_DISABLED,
137 	HCLGE_STATE_REMOVING,
138 	HCLGE_STATE_SERVICE_INITED,
139 	HCLGE_STATE_SERVICE_SCHED,
140 	HCLGE_STATE_RST_SERVICE_SCHED,
141 	HCLGE_STATE_RST_HANDLING,
142 	HCLGE_STATE_MBX_SERVICE_SCHED,
143 	HCLGE_STATE_MBX_HANDLING,
144 	HCLGE_STATE_STATISTICS_UPDATING,
145 	HCLGE_STATE_CMD_DISABLE,
146 	HCLGE_STATE_MAX
147 };
148 
149 enum hclge_evt_cause {
150 	HCLGE_VECTOR0_EVENT_RST,
151 	HCLGE_VECTOR0_EVENT_MBX,
152 	HCLGE_VECTOR0_EVENT_OTHER,
153 };
154 
155 #define HCLGE_MPF_ENBALE 1
156 
157 enum HCLGE_MAC_SPEED {
158 	HCLGE_MAC_SPEED_UNKNOWN = 0,		/* unknown */
159 	HCLGE_MAC_SPEED_10M	= 10,		/* 10 Mbps */
160 	HCLGE_MAC_SPEED_100M	= 100,		/* 100 Mbps */
161 	HCLGE_MAC_SPEED_1G	= 1000,		/* 1000 Mbps   = 1 Gbps */
162 	HCLGE_MAC_SPEED_10G	= 10000,	/* 10000 Mbps  = 10 Gbps */
163 	HCLGE_MAC_SPEED_25G	= 25000,	/* 25000 Mbps  = 25 Gbps */
164 	HCLGE_MAC_SPEED_40G	= 40000,	/* 40000 Mbps  = 40 Gbps */
165 	HCLGE_MAC_SPEED_50G	= 50000,	/* 50000 Mbps  = 50 Gbps */
166 	HCLGE_MAC_SPEED_100G	= 100000	/* 100000 Mbps = 100 Gbps */
167 };
168 
169 enum HCLGE_MAC_DUPLEX {
170 	HCLGE_MAC_HALF,
171 	HCLGE_MAC_FULL
172 };
173 
174 struct hclge_mac {
175 	u8 phy_addr;
176 	u8 flag;
177 	u8 media_type;
178 	u8 mac_addr[ETH_ALEN];
179 	u8 autoneg;
180 	u8 duplex;
181 	u32 speed;
182 	int link;	/* store the link status of mac & phy (if phy exit)*/
183 	struct phy_device *phydev;
184 	struct mii_bus *mdio_bus;
185 	phy_interface_t phy_if;
186 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
187 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
188 };
189 
190 struct hclge_hw {
191 	void __iomem *io_base;
192 	struct hclge_mac mac;
193 	int num_vec;
194 	struct hclge_cmq cmq;
195 };
196 
197 /* TQP stats */
198 struct hlcge_tqp_stats {
199 	/* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
200 	u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
201 	/* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
202 	u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
203 };
204 
205 struct hclge_tqp {
206 	/* copy of device pointer from pci_dev,
207 	 * used when perform DMA mapping
208 	 */
209 	struct device *dev;
210 	struct hnae3_queue q;
211 	struct hlcge_tqp_stats tqp_stats;
212 	u16 index;	/* Global index in a NIC controller */
213 
214 	bool alloced;
215 };
216 
217 enum hclge_fc_mode {
218 	HCLGE_FC_NONE,
219 	HCLGE_FC_RX_PAUSE,
220 	HCLGE_FC_TX_PAUSE,
221 	HCLGE_FC_FULL,
222 	HCLGE_FC_PFC,
223 	HCLGE_FC_DEFAULT
224 };
225 
226 #define HCLGE_PG_NUM		4
227 #define HCLGE_SCH_MODE_SP	0
228 #define HCLGE_SCH_MODE_DWRR	1
229 struct hclge_pg_info {
230 	u8 pg_id;
231 	u8 pg_sch_mode;		/* 0: sp; 1: dwrr */
232 	u8 tc_bit_map;
233 	u32 bw_limit;
234 	u8 tc_dwrr[HNAE3_MAX_TC];
235 };
236 
237 struct hclge_tc_info {
238 	u8 tc_id;
239 	u8 tc_sch_mode;		/* 0: sp; 1: dwrr */
240 	u8 pgid;
241 	u32 bw_limit;
242 };
243 
244 struct hclge_cfg {
245 	u8 vmdq_vport_num;
246 	u8 tc_num;
247 	u16 tqp_desc_num;
248 	u16 rx_buf_len;
249 	u16 rss_size_max;
250 	u8 phy_addr;
251 	u8 media_type;
252 	u8 mac_addr[ETH_ALEN];
253 	u8 default_speed;
254 	u32 numa_node_map;
255 	u8 speed_ability;
256 	u16 umv_space;
257 };
258 
259 struct hclge_tm_info {
260 	u8 num_tc;
261 	u8 num_pg;      /* It must be 1 if vNET-Base schd */
262 	u8 pg_dwrr[HCLGE_PG_NUM];
263 	u8 prio_tc[HNAE3_MAX_USER_PRIO];
264 	struct hclge_pg_info pg_info[HCLGE_PG_NUM];
265 	struct hclge_tc_info tc_info[HNAE3_MAX_TC];
266 	enum hclge_fc_mode fc_mode;
267 	u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
268 };
269 
270 struct hclge_comm_stats_str {
271 	char desc[ETH_GSTRING_LEN];
272 	unsigned long offset;
273 };
274 
275 /* mac stats ,opcode id: 0x0032 */
276 struct hclge_mac_stats {
277 	u64 mac_tx_mac_pause_num;
278 	u64 mac_rx_mac_pause_num;
279 	u64 mac_tx_pfc_pri0_pkt_num;
280 	u64 mac_tx_pfc_pri1_pkt_num;
281 	u64 mac_tx_pfc_pri2_pkt_num;
282 	u64 mac_tx_pfc_pri3_pkt_num;
283 	u64 mac_tx_pfc_pri4_pkt_num;
284 	u64 mac_tx_pfc_pri5_pkt_num;
285 	u64 mac_tx_pfc_pri6_pkt_num;
286 	u64 mac_tx_pfc_pri7_pkt_num;
287 	u64 mac_rx_pfc_pri0_pkt_num;
288 	u64 mac_rx_pfc_pri1_pkt_num;
289 	u64 mac_rx_pfc_pri2_pkt_num;
290 	u64 mac_rx_pfc_pri3_pkt_num;
291 	u64 mac_rx_pfc_pri4_pkt_num;
292 	u64 mac_rx_pfc_pri5_pkt_num;
293 	u64 mac_rx_pfc_pri6_pkt_num;
294 	u64 mac_rx_pfc_pri7_pkt_num;
295 	u64 mac_tx_total_pkt_num;
296 	u64 mac_tx_total_oct_num;
297 	u64 mac_tx_good_pkt_num;
298 	u64 mac_tx_bad_pkt_num;
299 	u64 mac_tx_good_oct_num;
300 	u64 mac_tx_bad_oct_num;
301 	u64 mac_tx_uni_pkt_num;
302 	u64 mac_tx_multi_pkt_num;
303 	u64 mac_tx_broad_pkt_num;
304 	u64 mac_tx_undersize_pkt_num;
305 	u64 mac_tx_oversize_pkt_num;
306 	u64 mac_tx_64_oct_pkt_num;
307 	u64 mac_tx_65_127_oct_pkt_num;
308 	u64 mac_tx_128_255_oct_pkt_num;
309 	u64 mac_tx_256_511_oct_pkt_num;
310 	u64 mac_tx_512_1023_oct_pkt_num;
311 	u64 mac_tx_1024_1518_oct_pkt_num;
312 	u64 mac_tx_1519_2047_oct_pkt_num;
313 	u64 mac_tx_2048_4095_oct_pkt_num;
314 	u64 mac_tx_4096_8191_oct_pkt_num;
315 	u64 rsv0;
316 	u64 mac_tx_8192_9216_oct_pkt_num;
317 	u64 mac_tx_9217_12287_oct_pkt_num;
318 	u64 mac_tx_12288_16383_oct_pkt_num;
319 	u64 mac_tx_1519_max_good_oct_pkt_num;
320 	u64 mac_tx_1519_max_bad_oct_pkt_num;
321 
322 	u64 mac_rx_total_pkt_num;
323 	u64 mac_rx_total_oct_num;
324 	u64 mac_rx_good_pkt_num;
325 	u64 mac_rx_bad_pkt_num;
326 	u64 mac_rx_good_oct_num;
327 	u64 mac_rx_bad_oct_num;
328 	u64 mac_rx_uni_pkt_num;
329 	u64 mac_rx_multi_pkt_num;
330 	u64 mac_rx_broad_pkt_num;
331 	u64 mac_rx_undersize_pkt_num;
332 	u64 mac_rx_oversize_pkt_num;
333 	u64 mac_rx_64_oct_pkt_num;
334 	u64 mac_rx_65_127_oct_pkt_num;
335 	u64 mac_rx_128_255_oct_pkt_num;
336 	u64 mac_rx_256_511_oct_pkt_num;
337 	u64 mac_rx_512_1023_oct_pkt_num;
338 	u64 mac_rx_1024_1518_oct_pkt_num;
339 	u64 mac_rx_1519_2047_oct_pkt_num;
340 	u64 mac_rx_2048_4095_oct_pkt_num;
341 	u64 mac_rx_4096_8191_oct_pkt_num;
342 	u64 rsv1;
343 	u64 mac_rx_8192_9216_oct_pkt_num;
344 	u64 mac_rx_9217_12287_oct_pkt_num;
345 	u64 mac_rx_12288_16383_oct_pkt_num;
346 	u64 mac_rx_1519_max_good_oct_pkt_num;
347 	u64 mac_rx_1519_max_bad_oct_pkt_num;
348 
349 	u64 mac_tx_fragment_pkt_num;
350 	u64 mac_tx_undermin_pkt_num;
351 	u64 mac_tx_jabber_pkt_num;
352 	u64 mac_tx_err_all_pkt_num;
353 	u64 mac_tx_from_app_good_pkt_num;
354 	u64 mac_tx_from_app_bad_pkt_num;
355 	u64 mac_rx_fragment_pkt_num;
356 	u64 mac_rx_undermin_pkt_num;
357 	u64 mac_rx_jabber_pkt_num;
358 	u64 mac_rx_fcs_err_pkt_num;
359 	u64 mac_rx_send_app_good_pkt_num;
360 	u64 mac_rx_send_app_bad_pkt_num;
361 };
362 
363 #define HCLGE_STATS_TIMER_INTERVAL	(60 * 5)
364 struct hclge_hw_stats {
365 	struct hclge_mac_stats      mac_stats;
366 	u32 stats_timer;
367 };
368 
369 struct hclge_vlan_type_cfg {
370 	u16 rx_ot_fst_vlan_type;
371 	u16 rx_ot_sec_vlan_type;
372 	u16 rx_in_fst_vlan_type;
373 	u16 rx_in_sec_vlan_type;
374 	u16 tx_ot_vlan_type;
375 	u16 tx_in_vlan_type;
376 };
377 
378 enum HCLGE_FD_MODE {
379 	HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
380 	HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
381 	HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
382 	HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
383 };
384 
385 enum HCLGE_FD_KEY_TYPE {
386 	HCLGE_FD_KEY_BASE_ON_PTYPE,
387 	HCLGE_FD_KEY_BASE_ON_TUPLE,
388 };
389 
390 enum HCLGE_FD_STAGE {
391 	HCLGE_FD_STAGE_1,
392 	HCLGE_FD_STAGE_2,
393 };
394 
395 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
396  * INNER_XXX indicate tuples in tunneled header of tunnel packet or
397  *           tuples of non-tunnel packet
398  */
399 enum HCLGE_FD_TUPLE {
400 	OUTER_DST_MAC,
401 	OUTER_SRC_MAC,
402 	OUTER_VLAN_TAG_FST,
403 	OUTER_VLAN_TAG_SEC,
404 	OUTER_ETH_TYPE,
405 	OUTER_L2_RSV,
406 	OUTER_IP_TOS,
407 	OUTER_IP_PROTO,
408 	OUTER_SRC_IP,
409 	OUTER_DST_IP,
410 	OUTER_L3_RSV,
411 	OUTER_SRC_PORT,
412 	OUTER_DST_PORT,
413 	OUTER_L4_RSV,
414 	OUTER_TUN_VNI,
415 	OUTER_TUN_FLOW_ID,
416 	INNER_DST_MAC,
417 	INNER_SRC_MAC,
418 	INNER_VLAN_TAG_FST,
419 	INNER_VLAN_TAG_SEC,
420 	INNER_ETH_TYPE,
421 	INNER_L2_RSV,
422 	INNER_IP_TOS,
423 	INNER_IP_PROTO,
424 	INNER_SRC_IP,
425 	INNER_DST_IP,
426 	INNER_L3_RSV,
427 	INNER_SRC_PORT,
428 	INNER_DST_PORT,
429 	INNER_L4_RSV,
430 	MAX_TUPLE,
431 };
432 
433 enum HCLGE_FD_META_DATA {
434 	PACKET_TYPE_ID,
435 	IP_FRAGEMENT,
436 	ROCE_TYPE,
437 	NEXT_KEY,
438 	VLAN_NUMBER,
439 	SRC_VPORT,
440 	DST_VPORT,
441 	TUNNEL_PACKET,
442 	MAX_META_DATA,
443 };
444 
445 struct key_info {
446 	u8 key_type;
447 	u8 key_length;
448 };
449 
450 static const struct key_info meta_data_key_info[] = {
451 	{ PACKET_TYPE_ID, 6},
452 	{ IP_FRAGEMENT, 1},
453 	{ ROCE_TYPE, 1},
454 	{ NEXT_KEY, 5},
455 	{ VLAN_NUMBER, 2},
456 	{ SRC_VPORT, 12},
457 	{ DST_VPORT, 12},
458 	{ TUNNEL_PACKET, 1},
459 };
460 
461 static const struct key_info tuple_key_info[] = {
462 	{ OUTER_DST_MAC, 48},
463 	{ OUTER_SRC_MAC, 48},
464 	{ OUTER_VLAN_TAG_FST, 16},
465 	{ OUTER_VLAN_TAG_SEC, 16},
466 	{ OUTER_ETH_TYPE, 16},
467 	{ OUTER_L2_RSV, 16},
468 	{ OUTER_IP_TOS, 8},
469 	{ OUTER_IP_PROTO, 8},
470 	{ OUTER_SRC_IP, 32},
471 	{ OUTER_DST_IP, 32},
472 	{ OUTER_L3_RSV, 16},
473 	{ OUTER_SRC_PORT, 16},
474 	{ OUTER_DST_PORT, 16},
475 	{ OUTER_L4_RSV, 32},
476 	{ OUTER_TUN_VNI, 24},
477 	{ OUTER_TUN_FLOW_ID, 8},
478 	{ INNER_DST_MAC, 48},
479 	{ INNER_SRC_MAC, 48},
480 	{ INNER_VLAN_TAG_FST, 16},
481 	{ INNER_VLAN_TAG_SEC, 16},
482 	{ INNER_ETH_TYPE, 16},
483 	{ INNER_L2_RSV, 16},
484 	{ INNER_IP_TOS, 8},
485 	{ INNER_IP_PROTO, 8},
486 	{ INNER_SRC_IP, 32},
487 	{ INNER_DST_IP, 32},
488 	{ INNER_L3_RSV, 16},
489 	{ INNER_SRC_PORT, 16},
490 	{ INNER_DST_PORT, 16},
491 	{ INNER_L4_RSV, 32},
492 };
493 
494 #define MAX_KEY_LENGTH	400
495 #define MAX_KEY_DWORDS	DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
496 #define MAX_KEY_BYTES	(MAX_KEY_DWORDS * 4)
497 #define MAX_META_DATA_LENGTH	32
498 
499 enum HCLGE_FD_PACKET_TYPE {
500 	NIC_PACKET,
501 	ROCE_PACKET,
502 };
503 
504 enum HCLGE_FD_ACTION {
505 	HCLGE_FD_ACTION_ACCEPT_PACKET,
506 	HCLGE_FD_ACTION_DROP_PACKET,
507 };
508 
509 struct hclge_fd_key_cfg {
510 	u8 key_sel;
511 	u8 inner_sipv6_word_en;
512 	u8 inner_dipv6_word_en;
513 	u8 outer_sipv6_word_en;
514 	u8 outer_dipv6_word_en;
515 	u32 tuple_active;
516 	u32 meta_data_active;
517 };
518 
519 struct hclge_fd_cfg {
520 	u8 fd_mode;
521 	u8 fd_en;
522 	u16 max_key_length;
523 	u32 proto_support;
524 	u32 rule_num[2]; /* rule entry number */
525 	u16 cnt_num[2]; /* rule hit counter number */
526 	struct hclge_fd_key_cfg key_cfg[2];
527 };
528 
529 struct hclge_fd_rule_tuples {
530 	u8 src_mac[6];
531 	u8 dst_mac[6];
532 	u32 src_ip[4];
533 	u32 dst_ip[4];
534 	u16 src_port;
535 	u16 dst_port;
536 	u16 vlan_tag1;
537 	u16 ether_proto;
538 	u8 ip_tos;
539 	u8 ip_proto;
540 };
541 
542 struct hclge_fd_rule {
543 	struct hlist_node rule_node;
544 	struct hclge_fd_rule_tuples tuples;
545 	struct hclge_fd_rule_tuples tuples_mask;
546 	u32 unused_tuple;
547 	u32 flow_type;
548 	u8 action;
549 	u16 vf_id;
550 	u16 queue_id;
551 	u16 location;
552 };
553 
554 struct hclge_fd_ad_data {
555 	u16 ad_id;
556 	u8 drop_packet;
557 	u8 forward_to_direct_queue;
558 	u16 queue_id;
559 	u8 use_counter;
560 	u8 counter_id;
561 	u8 use_next_stage;
562 	u8 write_rule_id_to_bd;
563 	u8 next_input_key;
564 	u16 rule_id;
565 };
566 
567 /* For each bit of TCAM entry, it uses a pair of 'x' and
568  * 'y' to indicate which value to match, like below:
569  * ----------------------------------
570  * | bit x | bit y |  search value  |
571  * ----------------------------------
572  * |   0   |   0   |   always hit   |
573  * ----------------------------------
574  * |   1   |   0   |   match '0'    |
575  * ----------------------------------
576  * |   0   |   1   |   match '1'    |
577  * ----------------------------------
578  * |   1   |   1   |   invalid      |
579  * ----------------------------------
580  * Then for input key(k) and mask(v), we can calculate the value by
581  * the formulae:
582  *	x = (~k) & v
583  *	y = (k ^ ~v) & k
584  */
585 #define calc_x(x, k, v) ((x) = (~(k) & (v)))
586 #define calc_y(y, k, v) \
587 	do { \
588 		const typeof(k) _k_ = (k); \
589 		const typeof(v) _v_ = (v); \
590 		(y) = (_k_ ^ ~_v_) & (_k_); \
591 	} while (0)
592 
593 #define HCLGE_VPORT_NUM 256
594 struct hclge_dev {
595 	struct pci_dev *pdev;
596 	struct hnae3_ae_dev *ae_dev;
597 	struct hclge_hw hw;
598 	struct hclge_misc_vector misc_vector;
599 	struct hclge_hw_stats hw_stats;
600 	unsigned long state;
601 	unsigned long flr_state;
602 	unsigned long last_reset_time;
603 
604 	enum hnae3_reset_type reset_type;
605 	enum hnae3_reset_type reset_level;
606 	unsigned long default_reset_request;
607 	unsigned long reset_request;	/* reset has been requested */
608 	unsigned long reset_pending;	/* client rst is pending to be served */
609 	unsigned long reset_count;	/* the number of reset has been done */
610 	u32 reset_fail_cnt;
611 	u32 fw_version;
612 	u16 num_vmdq_vport;		/* Num vmdq vport this PF has set up */
613 	u16 num_tqps;			/* Num task queue pairs of this PF */
614 	u16 num_req_vfs;		/* Num VFs requested for this PF */
615 
616 	u16 base_tqp_pid;	/* Base task tqp physical id of this PF */
617 	u16 alloc_rss_size;		/* Allocated RSS task queue */
618 	u16 rss_size_max;		/* HW defined max RSS task queue */
619 
620 	u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
621 	u16 num_alloc_vport;		/* Num vports this driver supports */
622 	u32 numa_node_mask;
623 	u16 rx_buf_len;
624 	u16 num_desc;
625 	u8 hw_tc_map;
626 	u8 tc_num_last_time;
627 	enum hclge_fc_mode fc_mode_last_time;
628 	u8 support_sfp_query;
629 
630 #define HCLGE_FLAG_TC_BASE_SCH_MODE		1
631 #define HCLGE_FLAG_VNET_BASE_SCH_MODE		2
632 	u8 tx_sch_mode;
633 	u8 tc_max;
634 	u8 pfc_max;
635 
636 	u8 default_up;
637 	u8 dcbx_cap;
638 	struct hclge_tm_info tm_info;
639 
640 	u16 num_msi;
641 	u16 num_msi_left;
642 	u16 num_msi_used;
643 	u16 roce_base_msix_offset;
644 	u32 base_msi_vector;
645 	u16 *vector_status;
646 	int *vector_irq;
647 	u16 num_roce_msi;	/* Num of roce vectors for this PF */
648 	int roce_base_vector;
649 
650 	u16 pending_udp_bitmap;
651 
652 	u16 rx_itr_default;
653 	u16 tx_itr_default;
654 
655 	u16 adminq_work_limit; /* Num of admin receive queue desc to process */
656 	unsigned long service_timer_period;
657 	unsigned long service_timer_previous;
658 	struct timer_list service_timer;
659 	struct timer_list reset_timer;
660 	struct work_struct service_task;
661 	struct work_struct rst_service_task;
662 	struct work_struct mbx_service_task;
663 
664 	bool cur_promisc;
665 	int num_alloc_vfs;	/* Actual number of VFs allocated */
666 
667 	struct hclge_tqp *htqp;
668 	struct hclge_vport *vport;
669 
670 	struct dentry *hclge_dbgfs;
671 
672 	struct hnae3_client *nic_client;
673 	struct hnae3_client *roce_client;
674 
675 #define HCLGE_FLAG_MAIN			BIT(0)
676 #define HCLGE_FLAG_DCB_CAPABLE		BIT(1)
677 #define HCLGE_FLAG_DCB_ENABLE		BIT(2)
678 #define HCLGE_FLAG_MQPRIO_ENABLE	BIT(3)
679 	u32 flag;
680 
681 	u32 pkt_buf_size; /* Total pf buf size for tx/rx */
682 	u32 mps; /* Max packet size */
683 	/* vport_lock protect resource shared by vports */
684 	struct mutex vport_lock;
685 
686 	struct hclge_vlan_type_cfg vlan_type_cfg;
687 
688 	unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
689 
690 	struct hclge_fd_cfg fd_cfg;
691 	struct hlist_head fd_rule_list;
692 	u16 hclge_fd_rule_num;
693 
694 	u16 wanted_umv_size;
695 	/* max available unicast mac vlan space */
696 	u16 max_umv_size;
697 	/* private unicast mac vlan space, it's same for PF and its VFs */
698 	u16 priv_umv_size;
699 	/* unicast mac vlan space shared by PF and its VFs */
700 	u16 share_umv_size;
701 	struct mutex umv_mutex; /* protect share_umv_size */
702 };
703 
704 /* VPort level vlan tag configuration for TX direction */
705 struct hclge_tx_vtag_cfg {
706 	bool accept_tag1;	/* Whether accept tag1 packet from host */
707 	bool accept_untag1;	/* Whether accept untag1 packet from host */
708 	bool accept_tag2;
709 	bool accept_untag2;
710 	bool insert_tag1_en;	/* Whether insert inner vlan tag */
711 	bool insert_tag2_en;	/* Whether insert outer vlan tag */
712 	u16  default_tag1;	/* The default inner vlan tag to insert */
713 	u16  default_tag2;	/* The default outer vlan tag to insert */
714 };
715 
716 /* VPort level vlan tag configuration for RX direction */
717 struct hclge_rx_vtag_cfg {
718 	bool strip_tag1_en;	/* Whether strip inner vlan tag */
719 	bool strip_tag2_en;	/* Whether strip outer vlan tag */
720 	bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
721 	bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
722 };
723 
724 struct hclge_rss_tuple_cfg {
725 	u8 ipv4_tcp_en;
726 	u8 ipv4_udp_en;
727 	u8 ipv4_sctp_en;
728 	u8 ipv4_fragment_en;
729 	u8 ipv6_tcp_en;
730 	u8 ipv6_udp_en;
731 	u8 ipv6_sctp_en;
732 	u8 ipv6_fragment_en;
733 };
734 
735 enum HCLGE_VPORT_STATE {
736 	HCLGE_VPORT_STATE_ALIVE,
737 	HCLGE_VPORT_STATE_MAX
738 };
739 
740 struct hclge_vport {
741 	u16 alloc_tqps;	/* Allocated Tx/Rx queues */
742 
743 	u8  rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
744 	/* User configured lookup table entries */
745 	u8  rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
746 	int rss_algo;		/* User configured hash algorithm */
747 	/* User configured rss tuple sets */
748 	struct hclge_rss_tuple_cfg rss_tuple_sets;
749 
750 	u16 alloc_rss_size;
751 
752 	u16 qs_offset;
753 	u16 bw_limit;		/* VSI BW Limit (0 = disabled) */
754 	u8  dwrr;
755 
756 	struct hclge_tx_vtag_cfg  txvlan_cfg;
757 	struct hclge_rx_vtag_cfg  rxvlan_cfg;
758 
759 	u16 used_umv_num;
760 
761 	int vport_id;
762 	struct hclge_dev *back;  /* Back reference to associated dev */
763 	struct hnae3_handle nic;
764 	struct hnae3_handle roce;
765 
766 	unsigned long state;
767 	unsigned long last_active_jiffies;
768 	u32 mps; /* Max packet size */
769 };
770 
771 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
772 			      bool en_mc, bool en_bc, int vport_id);
773 
774 int hclge_add_uc_addr_common(struct hclge_vport *vport,
775 			     const unsigned char *addr);
776 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
777 			    const unsigned char *addr);
778 int hclge_add_mc_addr_common(struct hclge_vport *vport,
779 			     const unsigned char *addr);
780 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
781 			    const unsigned char *addr);
782 
783 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
784 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
785 				int vector_id, bool en,
786 				struct hnae3_ring_chain_node *ring_chain);
787 
788 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
789 {
790 	struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
791 
792 	return tqp->index;
793 }
794 
795 static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
796 {
797 	return !!hdev->reset_pending;
798 }
799 
800 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
801 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
802 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
803 			  u16 vlan_id, bool is_kill);
804 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
805 
806 int hclge_buffer_alloc(struct hclge_dev *hdev);
807 int hclge_rss_init_hw(struct hclge_dev *hdev);
808 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
809 
810 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
811 void hclge_mbx_handler(struct hclge_dev *hdev);
812 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
813 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
814 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
815 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
816 int hclge_vport_start(struct hclge_vport *vport);
817 void hclge_vport_stop(struct hclge_vport *vport);
818 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
819 int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
820 #endif
821