1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2018 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #ifndef BNXT_H
12 #define BNXT_H
13 
14 #define DRV_MODULE_NAME		"bnxt_en"
15 
16 /* DO NOT CHANGE DRV_VER_* defines
17  * FIXME: Delete them
18  */
19 #define DRV_VER_MAJ	1
20 #define DRV_VER_MIN	10
21 #define DRV_VER_UPD	1
22 
23 #include <linux/interrupt.h>
24 #include <linux/rhashtable.h>
25 #include <linux/crash_dump.h>
26 #include <net/devlink.h>
27 #include <net/dst_metadata.h>
28 #include <net/xdp.h>
29 #include <linux/dim.h>
30 #ifdef CONFIG_TEE_BNXT_FW
31 #include <linux/firmware/broadcom/tee_bnxt_fw.h>
32 #endif
33 
34 extern struct list_head bnxt_block_cb_list;
35 
36 struct page_pool;
37 
38 struct tx_bd {
39 	__le32 tx_bd_len_flags_type;
40 	#define TX_BD_TYPE					(0x3f << 0)
41 	 #define TX_BD_TYPE_SHORT_TX_BD				 (0x00 << 0)
42 	 #define TX_BD_TYPE_LONG_TX_BD				 (0x10 << 0)
43 	#define TX_BD_FLAGS_PACKET_END				(1 << 6)
44 	#define TX_BD_FLAGS_NO_CMPL				(1 << 7)
45 	#define TX_BD_FLAGS_BD_CNT				(0x1f << 8)
46 	 #define TX_BD_FLAGS_BD_CNT_SHIFT			 8
47 	#define TX_BD_FLAGS_LHINT				(3 << 13)
48 	 #define TX_BD_FLAGS_LHINT_SHIFT			 13
49 	 #define TX_BD_FLAGS_LHINT_512_AND_SMALLER		 (0 << 13)
50 	 #define TX_BD_FLAGS_LHINT_512_TO_1023			 (1 << 13)
51 	 #define TX_BD_FLAGS_LHINT_1024_TO_2047			 (2 << 13)
52 	 #define TX_BD_FLAGS_LHINT_2048_AND_LARGER		 (3 << 13)
53 	#define TX_BD_FLAGS_COAL_NOW				(1 << 15)
54 	#define TX_BD_LEN					(0xffff << 16)
55 	 #define TX_BD_LEN_SHIFT				 16
56 
57 	u32 tx_bd_opaque;
58 	__le64 tx_bd_haddr;
59 } __packed;
60 
61 struct tx_bd_ext {
62 	__le32 tx_bd_hsize_lflags;
63 	#define TX_BD_FLAGS_TCP_UDP_CHKSUM			(1 << 0)
64 	#define TX_BD_FLAGS_IP_CKSUM				(1 << 1)
65 	#define TX_BD_FLAGS_NO_CRC				(1 << 2)
66 	#define TX_BD_FLAGS_STAMP				(1 << 3)
67 	#define TX_BD_FLAGS_T_IP_CHKSUM				(1 << 4)
68 	#define TX_BD_FLAGS_LSO					(1 << 5)
69 	#define TX_BD_FLAGS_IPID_FMT				(1 << 6)
70 	#define TX_BD_FLAGS_T_IPID				(1 << 7)
71 	#define TX_BD_HSIZE					(0xff << 16)
72 	 #define TX_BD_HSIZE_SHIFT				 16
73 
74 	__le32 tx_bd_mss;
75 	__le32 tx_bd_cfa_action;
76 	#define TX_BD_CFA_ACTION				(0xffff << 16)
77 	 #define TX_BD_CFA_ACTION_SHIFT				 16
78 
79 	__le32 tx_bd_cfa_meta;
80 	#define TX_BD_CFA_META_MASK                             0xfffffff
81 	#define TX_BD_CFA_META_VID_MASK                         0xfff
82 	#define TX_BD_CFA_META_PRI_MASK                         (0xf << 12)
83 	 #define TX_BD_CFA_META_PRI_SHIFT                        12
84 	#define TX_BD_CFA_META_TPID_MASK                        (3 << 16)
85 	 #define TX_BD_CFA_META_TPID_SHIFT                       16
86 	#define TX_BD_CFA_META_KEY                              (0xf << 28)
87 	 #define TX_BD_CFA_META_KEY_SHIFT			 28
88 	#define TX_BD_CFA_META_KEY_VLAN                         (1 << 28)
89 };
90 
91 struct rx_bd {
92 	__le32 rx_bd_len_flags_type;
93 	#define RX_BD_TYPE					(0x3f << 0)
94 	 #define RX_BD_TYPE_RX_PACKET_BD			 0x4
95 	 #define RX_BD_TYPE_RX_BUFFER_BD			 0x5
96 	 #define RX_BD_TYPE_RX_AGG_BD				 0x6
97 	 #define RX_BD_TYPE_16B_BD_SIZE				 (0 << 4)
98 	 #define RX_BD_TYPE_32B_BD_SIZE				 (1 << 4)
99 	 #define RX_BD_TYPE_48B_BD_SIZE				 (2 << 4)
100 	 #define RX_BD_TYPE_64B_BD_SIZE				 (3 << 4)
101 	#define RX_BD_FLAGS_SOP					(1 << 6)
102 	#define RX_BD_FLAGS_EOP					(1 << 7)
103 	#define RX_BD_FLAGS_BUFFERS				(3 << 8)
104 	 #define RX_BD_FLAGS_1_BUFFER_PACKET			 (0 << 8)
105 	 #define RX_BD_FLAGS_2_BUFFER_PACKET			 (1 << 8)
106 	 #define RX_BD_FLAGS_3_BUFFER_PACKET			 (2 << 8)
107 	 #define RX_BD_FLAGS_4_BUFFER_PACKET			 (3 << 8)
108 	#define RX_BD_LEN					(0xffff << 16)
109 	 #define RX_BD_LEN_SHIFT				 16
110 
111 	u32 rx_bd_opaque;
112 	__le64 rx_bd_haddr;
113 };
114 
115 struct tx_cmp {
116 	__le32 tx_cmp_flags_type;
117 	#define CMP_TYPE					(0x3f << 0)
118 	 #define CMP_TYPE_TX_L2_CMP				 0
119 	 #define CMP_TYPE_RX_L2_CMP				 17
120 	 #define CMP_TYPE_RX_AGG_CMP				 18
121 	 #define CMP_TYPE_RX_L2_TPA_START_CMP			 19
122 	 #define CMP_TYPE_RX_L2_TPA_END_CMP			 21
123 	 #define CMP_TYPE_RX_TPA_AGG_CMP			 22
124 	 #define CMP_TYPE_STATUS_CMP				 32
125 	 #define CMP_TYPE_REMOTE_DRIVER_REQ			 34
126 	 #define CMP_TYPE_REMOTE_DRIVER_RESP			 36
127 	 #define CMP_TYPE_ERROR_STATUS				 48
128 	 #define CMPL_BASE_TYPE_STAT_EJECT			 0x1aUL
129 	 #define CMPL_BASE_TYPE_HWRM_DONE			 0x20UL
130 	 #define CMPL_BASE_TYPE_HWRM_FWD_REQ			 0x22UL
131 	 #define CMPL_BASE_TYPE_HWRM_FWD_RESP			 0x24UL
132 	 #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT		 0x2eUL
133 
134 	#define TX_CMP_FLAGS_ERROR				(1 << 6)
135 	#define TX_CMP_FLAGS_PUSH				(1 << 7)
136 
137 	u32 tx_cmp_opaque;
138 	__le32 tx_cmp_errors_v;
139 	#define TX_CMP_V					(1 << 0)
140 	#define TX_CMP_ERRORS_BUFFER_ERROR			(7 << 1)
141 	 #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR		 0
142 	 #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT		 2
143 	 #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG	 4
144 	 #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS		 5
145 	 #define TX_CMP_ERRORS_ZERO_LENGTH_PKT			 (1 << 4)
146 	 #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN			 (1 << 5)
147 	 #define TX_CMP_ERRORS_DMA_ERROR			 (1 << 6)
148 	 #define TX_CMP_ERRORS_HINT_TOO_SHORT			 (1 << 7)
149 
150 	__le32 tx_cmp_unsed_3;
151 };
152 
153 struct rx_cmp {
154 	__le32 rx_cmp_len_flags_type;
155 	#define RX_CMP_CMP_TYPE					(0x3f << 0)
156 	#define RX_CMP_FLAGS_ERROR				(1 << 6)
157 	#define RX_CMP_FLAGS_PLACEMENT				(7 << 7)
158 	#define RX_CMP_FLAGS_RSS_VALID				(1 << 10)
159 	#define RX_CMP_FLAGS_UNUSED				(1 << 11)
160 	 #define RX_CMP_FLAGS_ITYPES_SHIFT			 12
161 	 #define RX_CMP_FLAGS_ITYPE_UNKNOWN			 (0 << 12)
162 	 #define RX_CMP_FLAGS_ITYPE_IP				 (1 << 12)
163 	 #define RX_CMP_FLAGS_ITYPE_TCP				 (2 << 12)
164 	 #define RX_CMP_FLAGS_ITYPE_UDP				 (3 << 12)
165 	 #define RX_CMP_FLAGS_ITYPE_FCOE			 (4 << 12)
166 	 #define RX_CMP_FLAGS_ITYPE_ROCE			 (5 << 12)
167 	 #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS			 (8 << 12)
168 	 #define RX_CMP_FLAGS_ITYPE_PTP_W_TS			 (9 << 12)
169 	#define RX_CMP_LEN					(0xffff << 16)
170 	 #define RX_CMP_LEN_SHIFT				 16
171 
172 	u32 rx_cmp_opaque;
173 	__le32 rx_cmp_misc_v1;
174 	#define RX_CMP_V1					(1 << 0)
175 	#define RX_CMP_AGG_BUFS					(0x1f << 1)
176 	 #define RX_CMP_AGG_BUFS_SHIFT				 1
177 	#define RX_CMP_RSS_HASH_TYPE				(0x7f << 9)
178 	 #define RX_CMP_RSS_HASH_TYPE_SHIFT			 9
179 	#define RX_CMP_PAYLOAD_OFFSET				(0xff << 16)
180 	 #define RX_CMP_PAYLOAD_OFFSET_SHIFT			 16
181 
182 	__le32 rx_cmp_rss_hash;
183 };
184 
185 #define RX_CMP_HASH_VALID(rxcmp)				\
186 	((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
187 
188 #define RSS_PROFILE_ID_MASK	0x1f
189 
190 #define RX_CMP_HASH_TYPE(rxcmp)					\
191 	(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
192 	  RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
193 
194 struct rx_cmp_ext {
195 	__le32 rx_cmp_flags2;
196 	#define RX_CMP_FLAGS2_IP_CS_CALC			0x1
197 	#define RX_CMP_FLAGS2_L4_CS_CALC			(0x1 << 1)
198 	#define RX_CMP_FLAGS2_T_IP_CS_CALC			(0x1 << 2)
199 	#define RX_CMP_FLAGS2_T_L4_CS_CALC			(0x1 << 3)
200 	#define RX_CMP_FLAGS2_META_FORMAT_VLAN			(0x1 << 4)
201 	__le32 rx_cmp_meta_data;
202 	#define RX_CMP_FLAGS2_METADATA_TCI_MASK			0xffff
203 	#define RX_CMP_FLAGS2_METADATA_VID_MASK			0xfff
204 	#define RX_CMP_FLAGS2_METADATA_TPID_MASK		0xffff0000
205 	 #define RX_CMP_FLAGS2_METADATA_TPID_SFT		 16
206 	__le32 rx_cmp_cfa_code_errors_v2;
207 	#define RX_CMP_V					(1 << 0)
208 	#define RX_CMPL_ERRORS_MASK				(0x7fff << 1)
209 	 #define RX_CMPL_ERRORS_SFT				 1
210 	#define RX_CMPL_ERRORS_BUFFER_ERROR_MASK		(0x7 << 1)
211 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER		 (0x0 << 1)
212 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT	 (0x1 << 1)
213 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP	 (0x2 << 1)
214 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT		 (0x3 << 1)
215 	#define RX_CMPL_ERRORS_IP_CS_ERROR			(0x1 << 4)
216 	#define RX_CMPL_ERRORS_L4_CS_ERROR			(0x1 << 5)
217 	#define RX_CMPL_ERRORS_T_IP_CS_ERROR			(0x1 << 6)
218 	#define RX_CMPL_ERRORS_T_L4_CS_ERROR			(0x1 << 7)
219 	#define RX_CMPL_ERRORS_CRC_ERROR			(0x1 << 8)
220 	#define RX_CMPL_ERRORS_T_PKT_ERROR_MASK			(0x7 << 9)
221 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR		 (0x0 << 9)
222 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION	 (0x1 << 9)
223 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN	 (0x2 << 9)
224 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR	 (0x3 << 9)
225 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR	 (0x4 << 9)
226 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR	 (0x5 << 9)
227 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL	 (0x6 << 9)
228 	#define RX_CMPL_ERRORS_PKT_ERROR_MASK			(0xf << 12)
229 	 #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR		 (0x0 << 12)
230 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION	 (0x1 << 12)
231 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN	 (0x2 << 12)
232 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL		 (0x3 << 12)
233 	 #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR	 (0x4 << 12)
234 	 #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR	 (0x5 << 12)
235 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN	 (0x6 << 12)
236 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
237 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN	 (0x8 << 12)
238 
239 	#define RX_CMPL_CFA_CODE_MASK				(0xffff << 16)
240 	 #define RX_CMPL_CFA_CODE_SFT				 16
241 
242 	__le32 rx_cmp_unused3;
243 };
244 
245 #define RX_CMP_L2_ERRORS						\
246 	cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
247 
248 #define RX_CMP_L4_CS_BITS						\
249 	(cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
250 
251 #define RX_CMP_L4_CS_ERR_BITS						\
252 	(cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
253 
254 #define RX_CMP_L4_CS_OK(rxcmp1)						\
255 	    (((rxcmp1)->rx_cmp_flags2 &	RX_CMP_L4_CS_BITS) &&		\
256 	     !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
257 
258 #define RX_CMP_ENCAP(rxcmp1)						\
259 	    ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) &			\
260 	     RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
261 
262 #define RX_CMP_CFA_CODE(rxcmpl1)					\
263 	((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) &		\
264 	  RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
265 
266 struct rx_agg_cmp {
267 	__le32 rx_agg_cmp_len_flags_type;
268 	#define RX_AGG_CMP_TYPE					(0x3f << 0)
269 	#define RX_AGG_CMP_LEN					(0xffff << 16)
270 	 #define RX_AGG_CMP_LEN_SHIFT				 16
271 	u32 rx_agg_cmp_opaque;
272 	__le32 rx_agg_cmp_v;
273 	#define RX_AGG_CMP_V					(1 << 0)
274 	#define RX_AGG_CMP_AGG_ID				(0xffff << 16)
275 	 #define RX_AGG_CMP_AGG_ID_SHIFT			 16
276 	__le32 rx_agg_cmp_unused;
277 };
278 
279 #define TPA_AGG_AGG_ID(rx_agg)				\
280 	((le32_to_cpu((rx_agg)->rx_agg_cmp_v) &		\
281 	 RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
282 
283 struct rx_tpa_start_cmp {
284 	__le32 rx_tpa_start_cmp_len_flags_type;
285 	#define RX_TPA_START_CMP_TYPE				(0x3f << 0)
286 	#define RX_TPA_START_CMP_FLAGS				(0x3ff << 6)
287 	 #define RX_TPA_START_CMP_FLAGS_SHIFT			 6
288 	#define RX_TPA_START_CMP_FLAGS_ERROR			(0x1 << 6)
289 	#define RX_TPA_START_CMP_FLAGS_PLACEMENT		(0x7 << 7)
290 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT		 7
291 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
292 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
293 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
294 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS	 (0x6 << 7)
295 	#define RX_TPA_START_CMP_FLAGS_RSS_VALID		(0x1 << 10)
296 	#define RX_TPA_START_CMP_FLAGS_TIMESTAMP		(0x1 << 11)
297 	#define RX_TPA_START_CMP_FLAGS_ITYPES			(0xf << 12)
298 	 #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT		 12
299 	 #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP		 (0x2 << 12)
300 	#define RX_TPA_START_CMP_LEN				(0xffff << 16)
301 	 #define RX_TPA_START_CMP_LEN_SHIFT			 16
302 
303 	u32 rx_tpa_start_cmp_opaque;
304 	__le32 rx_tpa_start_cmp_misc_v1;
305 	#define RX_TPA_START_CMP_V1				(0x1 << 0)
306 	#define RX_TPA_START_CMP_RSS_HASH_TYPE			(0x7f << 9)
307 	 #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT		 9
308 	#define RX_TPA_START_CMP_AGG_ID				(0x7f << 25)
309 	 #define RX_TPA_START_CMP_AGG_ID_SHIFT			 25
310 	#define RX_TPA_START_CMP_AGG_ID_P5			(0xffff << 16)
311 	 #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5		 16
312 
313 	__le32 rx_tpa_start_cmp_rss_hash;
314 };
315 
316 #define TPA_START_HASH_VALID(rx_tpa_start)				\
317 	((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &		\
318 	 cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
319 
320 #define TPA_START_HASH_TYPE(rx_tpa_start)				\
321 	(((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
322 	   RX_TPA_START_CMP_RSS_HASH_TYPE) >>				\
323 	  RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
324 
325 #define TPA_START_AGG_ID(rx_tpa_start)					\
326 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
327 	 RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
328 
329 #define TPA_START_AGG_ID_P5(rx_tpa_start)				\
330 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
331 	 RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5)
332 
333 #define TPA_START_ERROR(rx_tpa_start)					\
334 	((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &		\
335 	 cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
336 
337 struct rx_tpa_start_cmp_ext {
338 	__le32 rx_tpa_start_cmp_flags2;
339 	#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC		(0x1 << 0)
340 	#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC		(0x1 << 1)
341 	#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC		(0x1 << 2)
342 	#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC		(0x1 << 3)
343 	#define RX_TPA_START_CMP_FLAGS2_IP_TYPE			(0x1 << 8)
344 	#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID		(0x1 << 9)
345 	#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT		(0x3 << 10)
346 	 #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT	 10
347 	#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL		(0xffff << 16)
348 	 #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT	 16
349 
350 	__le32 rx_tpa_start_cmp_metadata;
351 	__le32 rx_tpa_start_cmp_cfa_code_v2;
352 	#define RX_TPA_START_CMP_V2				(0x1 << 0)
353 	#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK	(0x7 << 1)
354 	 #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT	 1
355 	 #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER	 (0x0 << 1)
356 	 #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
357 	 #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH	 (0x5 << 1)
358 	#define RX_TPA_START_CMP_CFA_CODE			(0xffff << 16)
359 	 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT		 16
360 	__le32 rx_tpa_start_cmp_hdr_info;
361 };
362 
363 #define TPA_START_CFA_CODE(rx_tpa_start)				\
364 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &	\
365 	 RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
366 
367 #define TPA_START_IS_IPV6(rx_tpa_start)				\
368 	(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 &		\
369 	    cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
370 
371 #define TPA_START_ERROR_CODE(rx_tpa_start)				\
372 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &	\
373 	  RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >>			\
374 	 RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
375 
376 struct rx_tpa_end_cmp {
377 	__le32 rx_tpa_end_cmp_len_flags_type;
378 	#define RX_TPA_END_CMP_TYPE				(0x3f << 0)
379 	#define RX_TPA_END_CMP_FLAGS				(0x3ff << 6)
380 	 #define RX_TPA_END_CMP_FLAGS_SHIFT			 6
381 	#define RX_TPA_END_CMP_FLAGS_PLACEMENT			(0x7 << 7)
382 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT		 7
383 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
384 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
385 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
386 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS		 (0x6 << 7)
387 	#define RX_TPA_END_CMP_FLAGS_RSS_VALID			(0x1 << 10)
388 	#define RX_TPA_END_CMP_FLAGS_ITYPES			(0xf << 12)
389 	 #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT		 12
390 	 #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP			 (0x2 << 12)
391 	#define RX_TPA_END_CMP_LEN				(0xffff << 16)
392 	 #define RX_TPA_END_CMP_LEN_SHIFT			 16
393 
394 	u32 rx_tpa_end_cmp_opaque;
395 	__le32 rx_tpa_end_cmp_misc_v1;
396 	#define RX_TPA_END_CMP_V1				(0x1 << 0)
397 	#define RX_TPA_END_CMP_AGG_BUFS				(0x3f << 1)
398 	 #define RX_TPA_END_CMP_AGG_BUFS_SHIFT			 1
399 	#define RX_TPA_END_CMP_TPA_SEGS				(0xff << 8)
400 	 #define RX_TPA_END_CMP_TPA_SEGS_SHIFT			 8
401 	#define RX_TPA_END_CMP_PAYLOAD_OFFSET			(0xff << 16)
402 	 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT		 16
403 	#define RX_TPA_END_CMP_AGG_ID				(0x7f << 25)
404 	 #define RX_TPA_END_CMP_AGG_ID_SHIFT			 25
405 	#define RX_TPA_END_CMP_AGG_ID_P5			(0xffff << 16)
406 	 #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5			 16
407 
408 	__le32 rx_tpa_end_cmp_tsdelta;
409 	#define RX_TPA_END_GRO_TS				(0x1 << 31)
410 };
411 
412 #define TPA_END_AGG_ID(rx_tpa_end)					\
413 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
414 	 RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
415 
416 #define TPA_END_AGG_ID_P5(rx_tpa_end)					\
417 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
418 	 RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5)
419 
420 #define TPA_END_PAYLOAD_OFF(rx_tpa_end)					\
421 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
422 	 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
423 
424 #define TPA_END_AGG_BUFS(rx_tpa_end)					\
425 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
426 	 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
427 
428 #define TPA_END_TPA_SEGS(rx_tpa_end)					\
429 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
430 	 RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
431 
432 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO				\
433 	cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &		\
434 		    RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
435 
436 #define TPA_END_GRO(rx_tpa_end)						\
437 	((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &			\
438 	 RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
439 
440 #define TPA_END_GRO_TS(rx_tpa_end)					\
441 	(!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta &			\
442 	    cpu_to_le32(RX_TPA_END_GRO_TS)))
443 
444 struct rx_tpa_end_cmp_ext {
445 	__le32 rx_tpa_end_cmp_dup_acks;
446 	#define RX_TPA_END_CMP_TPA_DUP_ACKS			(0xf << 0)
447 	#define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5		(0xff << 16)
448 	 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5		 16
449 	#define RX_TPA_END_CMP_AGG_BUFS_P5			(0xff << 24)
450 	 #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5		 24
451 
452 	__le32 rx_tpa_end_cmp_seg_len;
453 	#define RX_TPA_END_CMP_TPA_SEG_LEN			(0xffff << 0)
454 
455 	__le32 rx_tpa_end_cmp_errors_v2;
456 	#define RX_TPA_END_CMP_V2				(0x1 << 0)
457 	#define RX_TPA_END_CMP_ERRORS				(0x3 << 1)
458 	#define RX_TPA_END_CMP_ERRORS_P5			(0x7 << 1)
459 	#define RX_TPA_END_CMPL_ERRORS_SHIFT			 1
460 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER	 (0x0 << 1)
461 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP	 (0x2 << 1)
462 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT	 (0x3 << 1)
463 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR	 (0x4 << 1)
464 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH	 (0x5 << 1)
465 
466 	u32 rx_tpa_end_cmp_start_opaque;
467 };
468 
469 #define TPA_END_ERRORS(rx_tpa_end_ext)					\
470 	((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 &			\
471 	 cpu_to_le32(RX_TPA_END_CMP_ERRORS))
472 
473 #define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext)				\
474 	((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) &	\
475 	 RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >>				\
476 	RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5)
477 
478 #define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext)				\
479 	((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) &	\
480 	 RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5)
481 
482 #define EVENT_DATA1_RESET_NOTIFY_FATAL(data1)				\
483 	(((data1) &							\
484 	  ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
485 	 ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
486 
487 #define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1)				\
488 	!!((data1) &							\
489 	   ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
490 
491 #define EVENT_DATA1_RECOVERY_ENABLED(data1)				\
492 	!!((data1) &							\
493 	   ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
494 
495 struct nqe_cn {
496 	__le16	type;
497 	#define NQ_CN_TYPE_MASK           0x3fUL
498 	#define NQ_CN_TYPE_SFT            0
499 	#define NQ_CN_TYPE_CQ_NOTIFICATION  0x30UL
500 	#define NQ_CN_TYPE_LAST            NQ_CN_TYPE_CQ_NOTIFICATION
501 	__le16	reserved16;
502 	__le32	cq_handle_low;
503 	__le32	v;
504 	#define NQ_CN_V     0x1UL
505 	__le32	cq_handle_high;
506 };
507 
508 #define DB_IDX_MASK						0xffffff
509 #define DB_IDX_VALID						(0x1 << 26)
510 #define DB_IRQ_DIS						(0x1 << 27)
511 #define DB_KEY_TX						(0x0 << 28)
512 #define DB_KEY_RX						(0x1 << 28)
513 #define DB_KEY_CP						(0x2 << 28)
514 #define DB_KEY_ST						(0x3 << 28)
515 #define DB_KEY_TX_PUSH						(0x4 << 28)
516 #define DB_LONG_TX_PUSH						(0x2 << 24)
517 
518 #define BNXT_MIN_ROCE_CP_RINGS	2
519 #define BNXT_MIN_ROCE_STAT_CTXS	1
520 
521 /* 64-bit doorbell */
522 #define DBR_INDEX_MASK					0x0000000000ffffffULL
523 #define DBR_XID_MASK					0x000fffff00000000ULL
524 #define DBR_XID_SFT					32
525 #define DBR_PATH_L2					(0x1ULL << 56)
526 #define DBR_TYPE_SQ					(0x0ULL << 60)
527 #define DBR_TYPE_RQ					(0x1ULL << 60)
528 #define DBR_TYPE_SRQ					(0x2ULL << 60)
529 #define DBR_TYPE_SRQ_ARM				(0x3ULL << 60)
530 #define DBR_TYPE_CQ					(0x4ULL << 60)
531 #define DBR_TYPE_CQ_ARMSE				(0x5ULL << 60)
532 #define DBR_TYPE_CQ_ARMALL				(0x6ULL << 60)
533 #define DBR_TYPE_CQ_ARMENA				(0x7ULL << 60)
534 #define DBR_TYPE_SRQ_ARMENA				(0x8ULL << 60)
535 #define DBR_TYPE_CQ_CUTOFF_ACK				(0x9ULL << 60)
536 #define DBR_TYPE_NQ					(0xaULL << 60)
537 #define DBR_TYPE_NQ_ARM					(0xbULL << 60)
538 #define DBR_TYPE_NULL					(0xfULL << 60)
539 
540 #define DB_PF_OFFSET_P5					0x10000
541 #define DB_VF_OFFSET_P5					0x4000
542 
543 #define INVALID_HW_RING_ID	((u16)-1)
544 
545 /* The hardware supports certain page sizes.  Use the supported page sizes
546  * to allocate the rings.
547  */
548 #if (PAGE_SHIFT < 12)
549 #define BNXT_PAGE_SHIFT	12
550 #elif (PAGE_SHIFT <= 13)
551 #define BNXT_PAGE_SHIFT	PAGE_SHIFT
552 #elif (PAGE_SHIFT < 16)
553 #define BNXT_PAGE_SHIFT	13
554 #else
555 #define BNXT_PAGE_SHIFT	16
556 #endif
557 
558 #define BNXT_PAGE_SIZE	(1 << BNXT_PAGE_SHIFT)
559 
560 /* The RXBD length is 16-bit so we can only support page sizes < 64K */
561 #if (PAGE_SHIFT > 15)
562 #define BNXT_RX_PAGE_SHIFT 15
563 #else
564 #define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
565 #endif
566 
567 #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
568 
569 #define BNXT_MAX_MTU		9500
570 #define BNXT_MAX_PAGE_MODE_MTU	\
571 	((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -	\
572 	 XDP_PACKET_HEADROOM)
573 
574 #define BNXT_MIN_PKT_SIZE	52
575 
576 #define BNXT_DEFAULT_RX_RING_SIZE	511
577 #define BNXT_DEFAULT_TX_RING_SIZE	511
578 
579 #define MAX_TPA		64
580 #define MAX_TPA_P5	256
581 #define MAX_TPA_P5_MASK	(MAX_TPA_P5 - 1)
582 #define MAX_TPA_SEGS_P5	0x3f
583 
584 #if (BNXT_PAGE_SHIFT == 16)
585 #define MAX_RX_PAGES	1
586 #define MAX_RX_AGG_PAGES	4
587 #define MAX_TX_PAGES	1
588 #define MAX_CP_PAGES	8
589 #else
590 #define MAX_RX_PAGES	8
591 #define MAX_RX_AGG_PAGES	32
592 #define MAX_TX_PAGES	8
593 #define MAX_CP_PAGES	64
594 #endif
595 
596 #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
597 #define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
598 #define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
599 
600 #define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
601 #define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
602 
603 #define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
604 
605 #define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
606 #define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
607 
608 #define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
609 
610 #define BNXT_MAX_RX_DESC_CNT		(RX_DESC_CNT * MAX_RX_PAGES - 1)
611 #define BNXT_MAX_RX_JUM_DESC_CNT	(RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
612 #define BNXT_MAX_TX_DESC_CNT		(TX_DESC_CNT * MAX_TX_PAGES - 1)
613 
614 #define RX_RING(x)	(((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
615 #define RX_IDX(x)	((x) & (RX_DESC_CNT - 1))
616 
617 #define TX_RING(x)	(((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
618 #define TX_IDX(x)	((x) & (TX_DESC_CNT - 1))
619 
620 #define CP_RING(x)	(((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
621 #define CP_IDX(x)	((x) & (CP_DESC_CNT - 1))
622 
623 #define TX_CMP_VALID(txcmp, raw_cons)					\
624 	(!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) ==	\
625 	 !((raw_cons) & bp->cp_bit))
626 
627 #define RX_CMP_VALID(rxcmp1, raw_cons)					\
628 	(!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
629 	 !((raw_cons) & bp->cp_bit))
630 
631 #define RX_AGG_CMP_VALID(agg, raw_cons)				\
632 	(!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) ==	\
633 	 !((raw_cons) & bp->cp_bit))
634 
635 #define NQ_CMP_VALID(nqcmp, raw_cons)				\
636 	(!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & bp->cp_bit))
637 
638 #define TX_CMP_TYPE(txcmp)					\
639 	(le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
640 
641 #define RX_CMP_TYPE(rxcmp)					\
642 	(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
643 
644 #define NEXT_RX(idx)		(((idx) + 1) & bp->rx_ring_mask)
645 
646 #define NEXT_RX_AGG(idx)	(((idx) + 1) & bp->rx_agg_ring_mask)
647 
648 #define NEXT_TX(idx)		(((idx) + 1) & bp->tx_ring_mask)
649 
650 #define ADV_RAW_CMP(idx, n)	((idx) + (n))
651 #define NEXT_RAW_CMP(idx)	ADV_RAW_CMP(idx, 1)
652 #define RING_CMP(idx)		((idx) & bp->cp_ring_mask)
653 #define NEXT_CMP(idx)		RING_CMP(ADV_RAW_CMP(idx, 1))
654 
655 #define BNXT_HWRM_MAX_REQ_LEN		(bp->hwrm_max_req_len)
656 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
657 #define DFLT_HWRM_CMD_TIMEOUT		500
658 #define SHORT_HWRM_CMD_TIMEOUT		20
659 #define HWRM_CMD_TIMEOUT		(bp->hwrm_cmd_timeout)
660 #define HWRM_RESET_TIMEOUT		((HWRM_CMD_TIMEOUT) * 4)
661 #define HWRM_COREDUMP_TIMEOUT		((HWRM_CMD_TIMEOUT) * 12)
662 #define BNXT_HWRM_REQ_MAX_SIZE		128
663 #define BNXT_HWRM_REQS_PER_PAGE		(BNXT_PAGE_SIZE /	\
664 					 BNXT_HWRM_REQ_MAX_SIZE)
665 #define HWRM_SHORT_MIN_TIMEOUT		3
666 #define HWRM_SHORT_MAX_TIMEOUT		10
667 #define HWRM_SHORT_TIMEOUT_COUNTER	5
668 
669 #define HWRM_MIN_TIMEOUT		25
670 #define HWRM_MAX_TIMEOUT		40
671 
672 #define HWRM_TOTAL_TIMEOUT(n)	(((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ?	\
673 	((n) * HWRM_SHORT_MIN_TIMEOUT) :				\
674 	(HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT +		\
675 	 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
676 
677 #define HWRM_VALID_BIT_DELAY_USEC	150
678 
679 #define BNXT_HWRM_CHNL_CHIMP	0
680 #define BNXT_HWRM_CHNL_KONG	1
681 
682 #define BNXT_RX_EVENT		1
683 #define BNXT_AGG_EVENT		2
684 #define BNXT_TX_EVENT		4
685 #define BNXT_REDIRECT_EVENT	8
686 
687 struct bnxt_sw_tx_bd {
688 	union {
689 		struct sk_buff		*skb;
690 		struct xdp_frame	*xdpf;
691 	};
692 	DEFINE_DMA_UNMAP_ADDR(mapping);
693 	DEFINE_DMA_UNMAP_LEN(len);
694 	u8			is_gso;
695 	u8			is_push;
696 	u8			action;
697 	union {
698 		unsigned short		nr_frags;
699 		u16			rx_prod;
700 	};
701 };
702 
703 struct bnxt_sw_rx_bd {
704 	void			*data;
705 	u8			*data_ptr;
706 	dma_addr_t		mapping;
707 };
708 
709 struct bnxt_sw_rx_agg_bd {
710 	struct page		*page;
711 	unsigned int		offset;
712 	dma_addr_t		mapping;
713 };
714 
715 struct bnxt_ring_mem_info {
716 	int			nr_pages;
717 	int			page_size;
718 	u16			flags;
719 #define BNXT_RMEM_VALID_PTE_FLAG	1
720 #define BNXT_RMEM_RING_PTE_FLAG		2
721 #define BNXT_RMEM_USE_FULL_PAGE_FLAG	4
722 
723 	u16			depth;
724 	u8			init_val;
725 
726 	void			**pg_arr;
727 	dma_addr_t		*dma_arr;
728 
729 	__le64			*pg_tbl;
730 	dma_addr_t		pg_tbl_map;
731 
732 	int			vmem_size;
733 	void			**vmem;
734 };
735 
736 struct bnxt_ring_struct {
737 	struct bnxt_ring_mem_info	ring_mem;
738 
739 	u16			fw_ring_id; /* Ring id filled by Chimp FW */
740 	union {
741 		u16		grp_idx;
742 		u16		map_idx; /* Used by cmpl rings */
743 	};
744 	u32			handle;
745 	u8			queue_id;
746 };
747 
748 struct tx_push_bd {
749 	__le32			doorbell;
750 	__le32			tx_bd_len_flags_type;
751 	u32			tx_bd_opaque;
752 	struct tx_bd_ext	txbd2;
753 };
754 
755 struct tx_push_buffer {
756 	struct tx_push_bd	push_bd;
757 	u32			data[25];
758 };
759 
760 struct bnxt_db_info {
761 	void __iomem		*doorbell;
762 	union {
763 		u64		db_key64;
764 		u32		db_key32;
765 	};
766 };
767 
768 struct bnxt_tx_ring_info {
769 	struct bnxt_napi	*bnapi;
770 	u16			tx_prod;
771 	u16			tx_cons;
772 	u16			txq_index;
773 	struct bnxt_db_info	tx_db;
774 
775 	struct tx_bd		*tx_desc_ring[MAX_TX_PAGES];
776 	struct bnxt_sw_tx_bd	*tx_buf_ring;
777 
778 	dma_addr_t		tx_desc_mapping[MAX_TX_PAGES];
779 
780 	struct tx_push_buffer	*tx_push;
781 	dma_addr_t		tx_push_mapping;
782 	__le64			data_mapping;
783 
784 #define BNXT_DEV_STATE_CLOSING	0x1
785 	u32			dev_state;
786 
787 	struct bnxt_ring_struct	tx_ring_struct;
788 };
789 
790 #define BNXT_LEGACY_COAL_CMPL_PARAMS					\
791 	(RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN |		\
792 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX |		\
793 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET |		\
794 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE |			\
795 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR |		\
796 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT | \
797 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR |		\
798 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT | \
799 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT)
800 
801 #define BNXT_COAL_CMPL_ENABLES						\
802 	(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR | \
803 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR | \
804 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX | \
805 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT)
806 
807 #define BNXT_COAL_CMPL_MIN_TMR_ENABLE					\
808 	RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN
809 
810 #define BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE			\
811 	RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT
812 
813 struct bnxt_coal_cap {
814 	u32			cmpl_params;
815 	u32			nq_params;
816 	u16			num_cmpl_dma_aggr_max;
817 	u16			num_cmpl_dma_aggr_during_int_max;
818 	u16			cmpl_aggr_dma_tmr_max;
819 	u16			cmpl_aggr_dma_tmr_during_int_max;
820 	u16			int_lat_tmr_min_max;
821 	u16			int_lat_tmr_max_max;
822 	u16			num_cmpl_aggr_int_max;
823 	u16			timer_units;
824 };
825 
826 struct bnxt_coal {
827 	u16			coal_ticks;
828 	u16			coal_ticks_irq;
829 	u16			coal_bufs;
830 	u16			coal_bufs_irq;
831 			/* RING_IDLE enabled when coal ticks < idle_thresh  */
832 	u16			idle_thresh;
833 	u8			bufs_per_record;
834 	u8			budget;
835 };
836 
837 struct bnxt_tpa_info {
838 	void			*data;
839 	u8			*data_ptr;
840 	dma_addr_t		mapping;
841 	u16			len;
842 	unsigned short		gso_type;
843 	u32			flags2;
844 	u32			metadata;
845 	enum pkt_hash_types	hash_type;
846 	u32			rss_hash;
847 	u32			hdr_info;
848 
849 #define BNXT_TPA_L4_SIZE(hdr_info)	\
850 	(((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32)
851 
852 #define BNXT_TPA_INNER_L3_OFF(hdr_info)	\
853 	(((hdr_info) >> 18) & 0x1ff)
854 
855 #define BNXT_TPA_INNER_L2_OFF(hdr_info)	\
856 	(((hdr_info) >> 9) & 0x1ff)
857 
858 #define BNXT_TPA_OUTER_L3_OFF(hdr_info)	\
859 	((hdr_info) & 0x1ff)
860 
861 	u16			cfa_code; /* cfa_code in TPA start compl */
862 	u8			agg_count;
863 	struct rx_agg_cmp	*agg_arr;
864 };
865 
866 #define BNXT_AGG_IDX_BMAP_SIZE	(MAX_TPA_P5 / BITS_PER_LONG)
867 
868 struct bnxt_tpa_idx_map {
869 	u16		agg_id_tbl[1024];
870 	unsigned long	agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
871 };
872 
873 struct bnxt_rx_ring_info {
874 	struct bnxt_napi	*bnapi;
875 	u16			rx_prod;
876 	u16			rx_agg_prod;
877 	u16			rx_sw_agg_prod;
878 	u16			rx_next_cons;
879 	struct bnxt_db_info	rx_db;
880 	struct bnxt_db_info	rx_agg_db;
881 
882 	struct bpf_prog		*xdp_prog;
883 
884 	struct rx_bd		*rx_desc_ring[MAX_RX_PAGES];
885 	struct bnxt_sw_rx_bd	*rx_buf_ring;
886 
887 	struct rx_bd		*rx_agg_desc_ring[MAX_RX_AGG_PAGES];
888 	struct bnxt_sw_rx_agg_bd	*rx_agg_ring;
889 
890 	unsigned long		*rx_agg_bmap;
891 	u16			rx_agg_bmap_size;
892 
893 	struct page		*rx_page;
894 	unsigned int		rx_page_offset;
895 
896 	dma_addr_t		rx_desc_mapping[MAX_RX_PAGES];
897 	dma_addr_t		rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
898 
899 	struct bnxt_tpa_info	*rx_tpa;
900 	struct bnxt_tpa_idx_map *rx_tpa_idx_map;
901 
902 	struct bnxt_ring_struct	rx_ring_struct;
903 	struct bnxt_ring_struct	rx_agg_ring_struct;
904 	struct xdp_rxq_info	xdp_rxq;
905 	struct page_pool	*page_pool;
906 };
907 
908 struct bnxt_rx_sw_stats {
909 	u64			rx_l4_csum_errors;
910 	u64			rx_buf_errors;
911 };
912 
913 struct bnxt_cmn_sw_stats {
914 	u64			missed_irqs;
915 };
916 
917 struct bnxt_sw_stats {
918 	struct bnxt_rx_sw_stats rx;
919 	struct bnxt_cmn_sw_stats cmn;
920 };
921 
922 struct bnxt_cp_ring_info {
923 	struct bnxt_napi	*bnapi;
924 	u32			cp_raw_cons;
925 	struct bnxt_db_info	cp_db;
926 
927 	u8			had_work_done:1;
928 	u8			has_more_work:1;
929 
930 	u32			last_cp_raw_cons;
931 
932 	struct bnxt_coal	rx_ring_coal;
933 	u64			rx_packets;
934 	u64			rx_bytes;
935 	u64			event_ctr;
936 
937 	struct dim		dim;
938 
939 	union {
940 		struct tx_cmp	*cp_desc_ring[MAX_CP_PAGES];
941 		struct nqe_cn	*nq_desc_ring[MAX_CP_PAGES];
942 	};
943 
944 	dma_addr_t		cp_desc_mapping[MAX_CP_PAGES];
945 
946 	struct ctx_hw_stats	*hw_stats;
947 	dma_addr_t		hw_stats_map;
948 	u32			hw_stats_ctx_id;
949 
950 	struct bnxt_sw_stats	sw_stats;
951 
952 	struct bnxt_ring_struct	cp_ring_struct;
953 
954 	struct bnxt_cp_ring_info *cp_ring_arr[2];
955 #define BNXT_RX_HDL	0
956 #define BNXT_TX_HDL	1
957 };
958 
959 struct bnxt_napi {
960 	struct napi_struct	napi;
961 	struct bnxt		*bp;
962 
963 	int			index;
964 	struct bnxt_cp_ring_info	cp_ring;
965 	struct bnxt_rx_ring_info	*rx_ring;
966 	struct bnxt_tx_ring_info	*tx_ring;
967 
968 	void			(*tx_int)(struct bnxt *, struct bnxt_napi *,
969 					  int);
970 	int			tx_pkts;
971 	u8			events;
972 
973 	u32			flags;
974 #define BNXT_NAPI_FLAG_XDP	0x1
975 
976 	bool			in_reset;
977 };
978 
979 struct bnxt_irq {
980 	irq_handler_t	handler;
981 	unsigned int	vector;
982 	u8		requested:1;
983 	u8		have_cpumask:1;
984 	char		name[IFNAMSIZ + 2];
985 	cpumask_var_t	cpu_mask;
986 };
987 
988 #define HWRM_RING_ALLOC_TX	0x1
989 #define HWRM_RING_ALLOC_RX	0x2
990 #define HWRM_RING_ALLOC_AGG	0x4
991 #define HWRM_RING_ALLOC_CMPL	0x8
992 #define HWRM_RING_ALLOC_NQ	0x10
993 
994 #define INVALID_STATS_CTX_ID	-1
995 
996 struct bnxt_ring_grp_info {
997 	u16	fw_stats_ctx;
998 	u16	fw_grp_id;
999 	u16	rx_fw_ring_id;
1000 	u16	agg_fw_ring_id;
1001 	u16	cp_fw_ring_id;
1002 };
1003 
1004 struct bnxt_vnic_info {
1005 	u16		fw_vnic_id; /* returned by Chimp during alloc */
1006 #define BNXT_MAX_CTX_PER_VNIC	8
1007 	u16		fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
1008 	u16		fw_l2_ctx_id;
1009 #define BNXT_MAX_UC_ADDRS	4
1010 	__le64		fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
1011 				/* index 0 always dev_addr */
1012 	u16		uc_filter_count;
1013 	u8		*uc_list;
1014 
1015 	u16		*fw_grp_ids;
1016 	dma_addr_t	rss_table_dma_addr;
1017 	__le16		*rss_table;
1018 	dma_addr_t	rss_hash_key_dma_addr;
1019 	u64		*rss_hash_key;
1020 	u32		rx_mask;
1021 
1022 	u8		*mc_list;
1023 	int		mc_list_size;
1024 	int		mc_list_count;
1025 	dma_addr_t	mc_list_mapping;
1026 #define BNXT_MAX_MC_ADDRS	16
1027 
1028 	u32		flags;
1029 #define BNXT_VNIC_RSS_FLAG	1
1030 #define BNXT_VNIC_RFS_FLAG	2
1031 #define BNXT_VNIC_MCAST_FLAG	4
1032 #define BNXT_VNIC_UCAST_FLAG	8
1033 #define BNXT_VNIC_RFS_NEW_RSS_FLAG	0x10
1034 };
1035 
1036 struct bnxt_hw_resc {
1037 	u16	min_rsscos_ctxs;
1038 	u16	max_rsscos_ctxs;
1039 	u16	min_cp_rings;
1040 	u16	max_cp_rings;
1041 	u16	resv_cp_rings;
1042 	u16	min_tx_rings;
1043 	u16	max_tx_rings;
1044 	u16	resv_tx_rings;
1045 	u16	max_tx_sch_inputs;
1046 	u16	min_rx_rings;
1047 	u16	max_rx_rings;
1048 	u16	resv_rx_rings;
1049 	u16	min_hw_ring_grps;
1050 	u16	max_hw_ring_grps;
1051 	u16	resv_hw_ring_grps;
1052 	u16	min_l2_ctxs;
1053 	u16	max_l2_ctxs;
1054 	u16	min_vnics;
1055 	u16	max_vnics;
1056 	u16	resv_vnics;
1057 	u16	min_stat_ctxs;
1058 	u16	max_stat_ctxs;
1059 	u16	resv_stat_ctxs;
1060 	u16	max_nqs;
1061 	u16	max_irqs;
1062 	u16	resv_irqs;
1063 };
1064 
1065 #if defined(CONFIG_BNXT_SRIOV)
1066 struct bnxt_vf_info {
1067 	u16	fw_fid;
1068 	u8	mac_addr[ETH_ALEN];	/* PF assigned MAC Address */
1069 	u8	vf_mac_addr[ETH_ALEN];	/* VF assigned MAC address, only
1070 					 * stored by PF.
1071 					 */
1072 	u16	vlan;
1073 	u16	func_qcfg_flags;
1074 	u32	flags;
1075 #define BNXT_VF_QOS		0x1
1076 #define BNXT_VF_SPOOFCHK	0x2
1077 #define BNXT_VF_LINK_FORCED	0x4
1078 #define BNXT_VF_LINK_UP		0x8
1079 #define BNXT_VF_TRUST		0x10
1080 	u32	min_tx_rate;
1081 	u32	max_tx_rate;
1082 	void	*hwrm_cmd_req_addr;
1083 	dma_addr_t	hwrm_cmd_req_dma_addr;
1084 };
1085 #endif
1086 
1087 struct bnxt_pf_info {
1088 #define BNXT_FIRST_PF_FID	1
1089 #define BNXT_FIRST_VF_FID	128
1090 	u16	fw_fid;
1091 	u16	port_id;
1092 	u8	mac_addr[ETH_ALEN];
1093 	u32	first_vf_id;
1094 	u16	active_vfs;
1095 	u16	registered_vfs;
1096 	u16	max_vfs;
1097 	u32	max_encap_records;
1098 	u32	max_decap_records;
1099 	u32	max_tx_em_flows;
1100 	u32	max_tx_wm_flows;
1101 	u32	max_rx_em_flows;
1102 	u32	max_rx_wm_flows;
1103 	unsigned long	*vf_event_bmap;
1104 	u16	hwrm_cmd_req_pages;
1105 	u8	vf_resv_strategy;
1106 #define BNXT_VF_RESV_STRATEGY_MAXIMAL	0
1107 #define BNXT_VF_RESV_STRATEGY_MINIMAL	1
1108 #define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC	2
1109 	void			*hwrm_cmd_req_addr[4];
1110 	dma_addr_t		hwrm_cmd_req_dma_addr[4];
1111 	struct bnxt_vf_info	*vf;
1112 };
1113 
1114 struct bnxt_ntuple_filter {
1115 	struct hlist_node	hash;
1116 	u8			dst_mac_addr[ETH_ALEN];
1117 	u8			src_mac_addr[ETH_ALEN];
1118 	struct flow_keys	fkeys;
1119 	__le64			filter_id;
1120 	u16			sw_id;
1121 	u8			l2_fltr_idx;
1122 	u16			rxq;
1123 	u32			flow_id;
1124 	unsigned long		state;
1125 #define BNXT_FLTR_VALID		0
1126 #define BNXT_FLTR_UPDATE	1
1127 };
1128 
1129 struct bnxt_link_info {
1130 	u8			phy_type;
1131 	u8			media_type;
1132 	u8			transceiver;
1133 	u8			phy_addr;
1134 	u8			phy_link_status;
1135 #define BNXT_LINK_NO_LINK	PORT_PHY_QCFG_RESP_LINK_NO_LINK
1136 #define BNXT_LINK_SIGNAL	PORT_PHY_QCFG_RESP_LINK_SIGNAL
1137 #define BNXT_LINK_LINK		PORT_PHY_QCFG_RESP_LINK_LINK
1138 	u8			wire_speed;
1139 	u8			loop_back;
1140 	u8			link_up;
1141 	u8			duplex;
1142 #define BNXT_LINK_DUPLEX_HALF	PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
1143 #define BNXT_LINK_DUPLEX_FULL	PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
1144 	u8			pause;
1145 #define BNXT_LINK_PAUSE_TX	PORT_PHY_QCFG_RESP_PAUSE_TX
1146 #define BNXT_LINK_PAUSE_RX	PORT_PHY_QCFG_RESP_PAUSE_RX
1147 #define BNXT_LINK_PAUSE_BOTH	(PORT_PHY_QCFG_RESP_PAUSE_RX | \
1148 				 PORT_PHY_QCFG_RESP_PAUSE_TX)
1149 	u8			lp_pause;
1150 	u8			auto_pause_setting;
1151 	u8			force_pause_setting;
1152 	u8			duplex_setting;
1153 	u8			auto_mode;
1154 #define BNXT_AUTO_MODE(mode)	((mode) > BNXT_LINK_AUTO_NONE && \
1155 				 (mode) <= BNXT_LINK_AUTO_MSK)
1156 #define BNXT_LINK_AUTO_NONE     PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
1157 #define BNXT_LINK_AUTO_ALLSPDS	PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
1158 #define BNXT_LINK_AUTO_ONESPD	PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
1159 #define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
1160 #define BNXT_LINK_AUTO_MSK	PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
1161 #define PHY_VER_LEN		3
1162 	u8			phy_ver[PHY_VER_LEN];
1163 	u16			link_speed;
1164 #define BNXT_LINK_SPEED_100MB	PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
1165 #define BNXT_LINK_SPEED_1GB	PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
1166 #define BNXT_LINK_SPEED_2GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
1167 #define BNXT_LINK_SPEED_2_5GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
1168 #define BNXT_LINK_SPEED_10GB	PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
1169 #define BNXT_LINK_SPEED_20GB	PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
1170 #define BNXT_LINK_SPEED_25GB	PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
1171 #define BNXT_LINK_SPEED_40GB	PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
1172 #define BNXT_LINK_SPEED_50GB	PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
1173 #define BNXT_LINK_SPEED_100GB	PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
1174 	u16			support_speeds;
1175 	u16			auto_link_speeds;	/* fw adv setting */
1176 #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
1177 #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
1178 #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
1179 #define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
1180 #define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
1181 #define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
1182 #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
1183 #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
1184 #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
1185 #define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
1186 	u16			support_auto_speeds;
1187 	u16			lp_auto_link_speeds;
1188 	u16			force_link_speed;
1189 	u32			preemphasis;
1190 	u8			module_status;
1191 	u16			fec_cfg;
1192 #define BNXT_FEC_AUTONEG	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
1193 #define BNXT_FEC_ENC_BASE_R	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
1194 #define BNXT_FEC_ENC_RS		PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
1195 
1196 	/* copy of requested setting from ethtool cmd */
1197 	u8			autoneg;
1198 #define BNXT_AUTONEG_SPEED		1
1199 #define BNXT_AUTONEG_FLOW_CTRL		2
1200 	u8			req_duplex;
1201 	u8			req_flow_ctrl;
1202 	u16			req_link_speed;
1203 	u16			advertising;	/* user adv setting */
1204 	bool			force_link_chng;
1205 
1206 	bool			phy_retry;
1207 	unsigned long		phy_retry_expires;
1208 
1209 	/* a copy of phy_qcfg output used to report link
1210 	 * info to VF
1211 	 */
1212 	struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
1213 };
1214 
1215 #define BNXT_MAX_QUEUE	8
1216 
1217 struct bnxt_queue_info {
1218 	u8	queue_id;
1219 	u8	queue_profile;
1220 };
1221 
1222 #define BNXT_MAX_LED			4
1223 
1224 struct bnxt_led_info {
1225 	u8	led_id;
1226 	u8	led_type;
1227 	u8	led_group_id;
1228 	u8	unused;
1229 	__le16	led_state_caps;
1230 #define BNXT_LED_ALT_BLINK_CAP(x)	((x) &	\
1231 	cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED))
1232 
1233 	__le16	led_color_caps;
1234 };
1235 
1236 #define BNXT_MAX_TEST	8
1237 
1238 struct bnxt_test_info {
1239 	u8 offline_mask;
1240 	u8 flags;
1241 #define BNXT_TEST_FL_EXT_LPBK		0x1
1242 #define BNXT_TEST_FL_AN_PHY_LPBK	0x2
1243 	u16 timeout;
1244 	char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
1245 };
1246 
1247 #define BNXT_GRCPF_REG_CHIMP_COMM		0x0
1248 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER	0x100
1249 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT		0x400
1250 #define BNXT_CAG_REG_LEGACY_INT_STATUS		0x4014
1251 #define BNXT_CAG_REG_BASE			0x300000
1252 
1253 #define BNXT_GRCPF_REG_KONG_COMM		0xA00
1254 #define BNXT_GRCPF_REG_KONG_COMM_TRIGGER	0xB00
1255 
1256 #define BNXT_GRC_BASE_MASK			0xfffff000
1257 #define BNXT_GRC_OFFSET_MASK			0x00000ffc
1258 
1259 struct bnxt_tc_flow_stats {
1260 	u64		packets;
1261 	u64		bytes;
1262 };
1263 
1264 #ifdef CONFIG_BNXT_FLOWER_OFFLOAD
1265 struct bnxt_flower_indr_block_cb_priv {
1266 	struct net_device *tunnel_netdev;
1267 	struct bnxt *bp;
1268 	struct list_head list;
1269 };
1270 #endif
1271 
1272 struct bnxt_tc_info {
1273 	bool				enabled;
1274 
1275 	/* hash table to store TC offloaded flows */
1276 	struct rhashtable		flow_table;
1277 	struct rhashtable_params	flow_ht_params;
1278 
1279 	/* hash table to store L2 keys of TC flows */
1280 	struct rhashtable		l2_table;
1281 	struct rhashtable_params	l2_ht_params;
1282 	/* hash table to store L2 keys for TC tunnel decap */
1283 	struct rhashtable		decap_l2_table;
1284 	struct rhashtable_params	decap_l2_ht_params;
1285 	/* hash table to store tunnel decap entries */
1286 	struct rhashtable		decap_table;
1287 	struct rhashtable_params	decap_ht_params;
1288 	/* hash table to store tunnel encap entries */
1289 	struct rhashtable		encap_table;
1290 	struct rhashtable_params	encap_ht_params;
1291 
1292 	/* lock to atomically add/del an l2 node when a flow is
1293 	 * added or deleted.
1294 	 */
1295 	struct mutex			lock;
1296 
1297 	/* Fields used for batching stats query */
1298 	struct rhashtable_iter		iter;
1299 #define BNXT_FLOW_STATS_BATCH_MAX	10
1300 	struct bnxt_tc_stats_batch {
1301 		void			  *flow_node;
1302 		struct bnxt_tc_flow_stats hw_stats;
1303 	} stats_batch[BNXT_FLOW_STATS_BATCH_MAX];
1304 
1305 	/* Stat counter mask (width) */
1306 	u64				bytes_mask;
1307 	u64				packets_mask;
1308 };
1309 
1310 struct bnxt_vf_rep_stats {
1311 	u64			packets;
1312 	u64			bytes;
1313 	u64			dropped;
1314 };
1315 
1316 struct bnxt_vf_rep {
1317 	struct bnxt			*bp;
1318 	struct net_device		*dev;
1319 	struct metadata_dst		*dst;
1320 	u16				vf_idx;
1321 	u16				tx_cfa_action;
1322 	u16				rx_cfa_code;
1323 
1324 	struct bnxt_vf_rep_stats	rx_stats;
1325 	struct bnxt_vf_rep_stats	tx_stats;
1326 };
1327 
1328 #define PTU_PTE_VALID             0x1UL
1329 #define PTU_PTE_LAST              0x2UL
1330 #define PTU_PTE_NEXT_TO_LAST      0x4UL
1331 
1332 #define MAX_CTX_PAGES	(BNXT_PAGE_SIZE / 8)
1333 #define MAX_CTX_TOTAL_PAGES	(MAX_CTX_PAGES * MAX_CTX_PAGES)
1334 
1335 struct bnxt_ctx_pg_info {
1336 	u32		entries;
1337 	u32		nr_pages;
1338 	void		*ctx_pg_arr[MAX_CTX_PAGES];
1339 	dma_addr_t	ctx_dma_arr[MAX_CTX_PAGES];
1340 	struct bnxt_ring_mem_info ring_mem;
1341 	struct bnxt_ctx_pg_info **ctx_pg_tbl;
1342 };
1343 
1344 struct bnxt_ctx_mem_info {
1345 	u32	qp_max_entries;
1346 	u16	qp_min_qp1_entries;
1347 	u16	qp_max_l2_entries;
1348 	u16	qp_entry_size;
1349 	u16	srq_max_l2_entries;
1350 	u32	srq_max_entries;
1351 	u16	srq_entry_size;
1352 	u16	cq_max_l2_entries;
1353 	u32	cq_max_entries;
1354 	u16	cq_entry_size;
1355 	u16	vnic_max_vnic_entries;
1356 	u16	vnic_max_ring_table_entries;
1357 	u16	vnic_entry_size;
1358 	u32	stat_max_entries;
1359 	u16	stat_entry_size;
1360 	u16	tqm_entry_size;
1361 	u32	tqm_min_entries_per_ring;
1362 	u32	tqm_max_entries_per_ring;
1363 	u32	mrav_max_entries;
1364 	u16	mrav_entry_size;
1365 	u16	tim_entry_size;
1366 	u32	tim_max_entries;
1367 	u16	mrav_num_entries_units;
1368 	u8	tqm_entries_multiple;
1369 	u8	ctx_kind_initializer;
1370 	u8	tqm_fp_rings_count;
1371 
1372 	u32	flags;
1373 	#define BNXT_CTX_FLAG_INITED	0x01
1374 
1375 	struct bnxt_ctx_pg_info qp_mem;
1376 	struct bnxt_ctx_pg_info srq_mem;
1377 	struct bnxt_ctx_pg_info cq_mem;
1378 	struct bnxt_ctx_pg_info vnic_mem;
1379 	struct bnxt_ctx_pg_info stat_mem;
1380 	struct bnxt_ctx_pg_info mrav_mem;
1381 	struct bnxt_ctx_pg_info tim_mem;
1382 	struct bnxt_ctx_pg_info *tqm_mem[9];
1383 };
1384 
1385 struct bnxt_fw_health {
1386 	u32 flags;
1387 	u32 polling_dsecs;
1388 	u32 master_func_wait_dsecs;
1389 	u32 normal_func_wait_dsecs;
1390 	u32 post_reset_wait_dsecs;
1391 	u32 post_reset_max_wait_dsecs;
1392 	u32 regs[4];
1393 	u32 mapped_regs[4];
1394 #define BNXT_FW_HEALTH_REG		0
1395 #define BNXT_FW_HEARTBEAT_REG		1
1396 #define BNXT_FW_RESET_CNT_REG		2
1397 #define BNXT_FW_RESET_INPROG_REG	3
1398 	u32 fw_reset_inprog_reg_mask;
1399 	u32 last_fw_heartbeat;
1400 	u32 last_fw_reset_cnt;
1401 	u8 enabled:1;
1402 	u8 master:1;
1403 	u8 fatal:1;
1404 	u8 tmr_multiplier;
1405 	u8 tmr_counter;
1406 	u8 fw_reset_seq_cnt;
1407 	u32 fw_reset_seq_regs[16];
1408 	u32 fw_reset_seq_vals[16];
1409 	u32 fw_reset_seq_delay_msec[16];
1410 	struct devlink_health_reporter	*fw_reporter;
1411 	struct devlink_health_reporter *fw_reset_reporter;
1412 	struct devlink_health_reporter *fw_fatal_reporter;
1413 };
1414 
1415 struct bnxt_fw_reporter_ctx {
1416 	unsigned long sp_event;
1417 };
1418 
1419 #define BNXT_FW_HEALTH_REG_TYPE_MASK	3
1420 #define BNXT_FW_HEALTH_REG_TYPE_CFG	0
1421 #define BNXT_FW_HEALTH_REG_TYPE_GRC	1
1422 #define BNXT_FW_HEALTH_REG_TYPE_BAR0	2
1423 #define BNXT_FW_HEALTH_REG_TYPE_BAR1	3
1424 
1425 #define BNXT_FW_HEALTH_REG_TYPE(reg)	((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK)
1426 #define BNXT_FW_HEALTH_REG_OFF(reg)	((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK)
1427 
1428 #define BNXT_FW_HEALTH_WIN_BASE		0x3000
1429 #define BNXT_FW_HEALTH_WIN_MAP_OFF	8
1430 
1431 #define BNXT_FW_STATUS_HEALTHY		0x8000
1432 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
1433 
1434 struct bnxt {
1435 	void __iomem		*bar0;
1436 	void __iomem		*bar1;
1437 	void __iomem		*bar2;
1438 
1439 	u32			reg_base;
1440 	u16			chip_num;
1441 #define CHIP_NUM_57301		0x16c8
1442 #define CHIP_NUM_57302		0x16c9
1443 #define CHIP_NUM_57304		0x16ca
1444 #define CHIP_NUM_58700		0x16cd
1445 #define CHIP_NUM_57402		0x16d0
1446 #define CHIP_NUM_57404		0x16d1
1447 #define CHIP_NUM_57406		0x16d2
1448 #define CHIP_NUM_57407		0x16d5
1449 
1450 #define CHIP_NUM_57311		0x16ce
1451 #define CHIP_NUM_57312		0x16cf
1452 #define CHIP_NUM_57314		0x16df
1453 #define CHIP_NUM_57317		0x16e0
1454 #define CHIP_NUM_57412		0x16d6
1455 #define CHIP_NUM_57414		0x16d7
1456 #define CHIP_NUM_57416		0x16d8
1457 #define CHIP_NUM_57417		0x16d9
1458 #define CHIP_NUM_57412L		0x16da
1459 #define CHIP_NUM_57414L		0x16db
1460 
1461 #define CHIP_NUM_5745X		0xd730
1462 #define CHIP_NUM_57452		0xc452
1463 #define CHIP_NUM_57454		0xc454
1464 
1465 #define CHIP_NUM_57508		0x1750
1466 #define CHIP_NUM_57504		0x1751
1467 #define CHIP_NUM_57502		0x1752
1468 
1469 #define CHIP_NUM_58802		0xd802
1470 #define CHIP_NUM_58804		0xd804
1471 #define CHIP_NUM_58808		0xd808
1472 
1473 	u8			chip_rev;
1474 
1475 #define BNXT_CHIP_NUM_5730X(chip_num)		\
1476 	((chip_num) >= CHIP_NUM_57301 &&	\
1477 	 (chip_num) <= CHIP_NUM_57304)
1478 
1479 #define BNXT_CHIP_NUM_5740X(chip_num)		\
1480 	(((chip_num) >= CHIP_NUM_57402 &&	\
1481 	  (chip_num) <= CHIP_NUM_57406) ||	\
1482 	 (chip_num) == CHIP_NUM_57407)
1483 
1484 #define BNXT_CHIP_NUM_5731X(chip_num)		\
1485 	((chip_num) == CHIP_NUM_57311 ||	\
1486 	 (chip_num) == CHIP_NUM_57312 ||	\
1487 	 (chip_num) == CHIP_NUM_57314 ||	\
1488 	 (chip_num) == CHIP_NUM_57317)
1489 
1490 #define BNXT_CHIP_NUM_5741X(chip_num)		\
1491 	((chip_num) >= CHIP_NUM_57412 &&	\
1492 	 (chip_num) <= CHIP_NUM_57414L)
1493 
1494 #define BNXT_CHIP_NUM_58700(chip_num)		\
1495 	 ((chip_num) == CHIP_NUM_58700)
1496 
1497 #define BNXT_CHIP_NUM_5745X(chip_num)		\
1498 	((chip_num) == CHIP_NUM_5745X ||	\
1499 	 (chip_num) == CHIP_NUM_57452 ||	\
1500 	 (chip_num) == CHIP_NUM_57454)
1501 
1502 
1503 #define BNXT_CHIP_NUM_57X0X(chip_num)		\
1504 	(BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
1505 
1506 #define BNXT_CHIP_NUM_57X1X(chip_num)		\
1507 	(BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
1508 
1509 #define BNXT_CHIP_NUM_588XX(chip_num)		\
1510 	((chip_num) == CHIP_NUM_58802 ||	\
1511 	 (chip_num) == CHIP_NUM_58804 ||        \
1512 	 (chip_num) == CHIP_NUM_58808)
1513 
1514 #define BNXT_VPD_FLD_LEN	32
1515 	char			board_partno[BNXT_VPD_FLD_LEN];
1516 	char			board_serialno[BNXT_VPD_FLD_LEN];
1517 
1518 	struct net_device	*dev;
1519 	struct pci_dev		*pdev;
1520 
1521 	atomic_t		intr_sem;
1522 
1523 	u32			flags;
1524 	#define BNXT_FLAG_CHIP_P5	0x1
1525 	#define BNXT_FLAG_VF		0x2
1526 	#define BNXT_FLAG_LRO		0x4
1527 #ifdef CONFIG_INET
1528 	#define BNXT_FLAG_GRO		0x8
1529 #else
1530 	/* Cannot support hardware GRO if CONFIG_INET is not set */
1531 	#define BNXT_FLAG_GRO		0x0
1532 #endif
1533 	#define BNXT_FLAG_TPA		(BNXT_FLAG_LRO | BNXT_FLAG_GRO)
1534 	#define BNXT_FLAG_JUMBO		0x10
1535 	#define BNXT_FLAG_STRIP_VLAN	0x20
1536 	#define BNXT_FLAG_AGG_RINGS	(BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
1537 					 BNXT_FLAG_LRO)
1538 	#define BNXT_FLAG_USING_MSIX	0x40
1539 	#define BNXT_FLAG_MSIX_CAP	0x80
1540 	#define BNXT_FLAG_RFS		0x100
1541 	#define BNXT_FLAG_SHARED_RINGS	0x200
1542 	#define BNXT_FLAG_PORT_STATS	0x400
1543 	#define BNXT_FLAG_UDP_RSS_CAP	0x800
1544 	#define BNXT_FLAG_EEE_CAP	0x1000
1545 	#define BNXT_FLAG_NEW_RSS_CAP	0x2000
1546 	#define BNXT_FLAG_WOL_CAP	0x4000
1547 	#define BNXT_FLAG_ROCEV1_CAP	0x8000
1548 	#define BNXT_FLAG_ROCEV2_CAP	0x10000
1549 	#define BNXT_FLAG_ROCE_CAP	(BNXT_FLAG_ROCEV1_CAP |	\
1550 					 BNXT_FLAG_ROCEV2_CAP)
1551 	#define BNXT_FLAG_NO_AGG_RINGS	0x20000
1552 	#define BNXT_FLAG_RX_PAGE_MODE	0x40000
1553 	#define BNXT_FLAG_MULTI_HOST	0x100000
1554 	#define BNXT_FLAG_DSN_VALID	0x200000
1555 	#define BNXT_FLAG_DOUBLE_DB	0x400000
1556 	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000
1557 	#define BNXT_FLAG_DIM		0x2000000
1558 	#define BNXT_FLAG_ROCE_MIRROR_CAP	0x4000000
1559 	#define BNXT_FLAG_PORT_STATS_EXT	0x10000000
1560 	#define BNXT_FLAG_PCIE_STATS	0x40000000
1561 
1562 	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
1563 					    BNXT_FLAG_RFS |		\
1564 					    BNXT_FLAG_STRIP_VLAN)
1565 
1566 #define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
1567 #define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
1568 #define BNXT_NPAR(bp)		((bp)->port_partition_type)
1569 #define BNXT_MH(bp)		((bp)->flags & BNXT_FLAG_MULTI_HOST)
1570 #define BNXT_SINGLE_PF(bp)	(BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
1571 #define BNXT_PHY_CFG_ABLE(bp)	(BNXT_SINGLE_PF(bp) ||			\
1572 				 ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG))
1573 #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
1574 #define BNXT_RX_PAGE_MODE(bp)	((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
1575 #define BNXT_SUPPORTS_TPA(bp)	(!BNXT_CHIP_TYPE_NITRO_A0(bp) &&	\
1576 				 (!((bp)->flags & BNXT_FLAG_CHIP_P5) ||	\
1577 				  (bp)->max_tpa_v2) && !is_kdump_kernel())
1578 
1579 /* Chip class phase 5 */
1580 #define BNXT_CHIP_P5(bp)			\
1581 	((bp)->chip_num == CHIP_NUM_57508 ||	\
1582 	 (bp)->chip_num == CHIP_NUM_57504 ||	\
1583 	 (bp)->chip_num == CHIP_NUM_57502)
1584 
1585 /* Chip class phase 4.x */
1586 #define BNXT_CHIP_P4(bp)			\
1587 	(BNXT_CHIP_NUM_57X1X((bp)->chip_num) ||	\
1588 	 BNXT_CHIP_NUM_5745X((bp)->chip_num) ||	\
1589 	 BNXT_CHIP_NUM_588XX((bp)->chip_num) ||	\
1590 	 (BNXT_CHIP_NUM_58700((bp)->chip_num) &&	\
1591 	  !BNXT_CHIP_TYPE_NITRO_A0(bp)))
1592 
1593 #define BNXT_CHIP_P4_PLUS(bp)			\
1594 	(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
1595 
1596 	struct bnxt_en_dev	*edev;
1597 	struct bnxt_en_dev *	(*ulp_probe)(struct net_device *);
1598 
1599 	struct bnxt_napi	**bnapi;
1600 
1601 	struct bnxt_rx_ring_info	*rx_ring;
1602 	struct bnxt_tx_ring_info	*tx_ring;
1603 	u16			*tx_ring_map;
1604 
1605 	struct sk_buff *	(*gro_func)(struct bnxt_tpa_info *, int, int,
1606 					    struct sk_buff *);
1607 
1608 	struct sk_buff *	(*rx_skb_func)(struct bnxt *,
1609 					       struct bnxt_rx_ring_info *,
1610 					       u16, void *, u8 *, dma_addr_t,
1611 					       unsigned int);
1612 
1613 	u16			max_tpa_v2;
1614 	u16			max_tpa;
1615 	u32			rx_buf_size;
1616 	u32			rx_buf_use_size;	/* useable size */
1617 	u16			rx_offset;
1618 	u16			rx_dma_offset;
1619 	enum dma_data_direction	rx_dir;
1620 	u32			rx_ring_size;
1621 	u32			rx_agg_ring_size;
1622 	u32			rx_copy_thresh;
1623 	u32			rx_ring_mask;
1624 	u32			rx_agg_ring_mask;
1625 	int			rx_nr_pages;
1626 	int			rx_agg_nr_pages;
1627 	int			rx_nr_rings;
1628 	int			rsscos_nr_ctxs;
1629 
1630 	u32			tx_ring_size;
1631 	u32			tx_ring_mask;
1632 	int			tx_nr_pages;
1633 	int			tx_nr_rings;
1634 	int			tx_nr_rings_per_tc;
1635 	int			tx_nr_rings_xdp;
1636 
1637 	int			tx_wake_thresh;
1638 	int			tx_push_thresh;
1639 	int			tx_push_size;
1640 
1641 	u32			cp_ring_size;
1642 	u32			cp_ring_mask;
1643 	u32			cp_bit;
1644 	int			cp_nr_pages;
1645 	int			cp_nr_rings;
1646 
1647 	/* grp_info indexed by completion ring index */
1648 	struct bnxt_ring_grp_info	*grp_info;
1649 	struct bnxt_vnic_info	*vnic_info;
1650 	int			nr_vnics;
1651 	u32			rss_hash_cfg;
1652 
1653 	u16			max_mtu;
1654 	u8			max_tc;
1655 	u8			max_lltc;	/* lossless TCs */
1656 	struct bnxt_queue_info	q_info[BNXT_MAX_QUEUE];
1657 	u8			tc_to_qidx[BNXT_MAX_QUEUE];
1658 	u8			q_ids[BNXT_MAX_QUEUE];
1659 	u8			max_q;
1660 
1661 	unsigned int		current_interval;
1662 #define BNXT_TIMER_INTERVAL	HZ
1663 
1664 	struct timer_list	timer;
1665 
1666 	unsigned long		state;
1667 #define BNXT_STATE_OPEN		0
1668 #define BNXT_STATE_IN_SP_TASK	1
1669 #define BNXT_STATE_READ_STATS	2
1670 #define BNXT_STATE_FW_RESET_DET 3
1671 #define BNXT_STATE_IN_FW_RESET	4
1672 #define BNXT_STATE_ABORT_ERR	5
1673 #define BNXT_STATE_FW_FATAL_COND	6
1674 #define BNXT_STATE_DRV_REGISTERED	7
1675 
1676 	struct bnxt_irq	*irq_tbl;
1677 	int			total_irqs;
1678 	u8			mac_addr[ETH_ALEN];
1679 
1680 #ifdef CONFIG_BNXT_DCB
1681 	struct ieee_pfc		*ieee_pfc;
1682 	struct ieee_ets		*ieee_ets;
1683 	u8			dcbx_cap;
1684 	u8			default_pri;
1685 	u8			max_dscp_value;
1686 #endif /* CONFIG_BNXT_DCB */
1687 
1688 	u32			msg_enable;
1689 
1690 	u32			fw_cap;
1691 	#define BNXT_FW_CAP_SHORT_CMD			0x00000001
1692 	#define BNXT_FW_CAP_LLDP_AGENT			0x00000002
1693 	#define BNXT_FW_CAP_DCBX_AGENT			0x00000004
1694 	#define BNXT_FW_CAP_NEW_RM			0x00000008
1695 	#define BNXT_FW_CAP_IF_CHANGE			0x00000010
1696 	#define BNXT_FW_CAP_KONG_MB_CHNL		0x00000080
1697 	#define BNXT_FW_CAP_OVS_64BIT_HANDLE		0x00000400
1698 	#define BNXT_FW_CAP_TRUSTED_VF			0x00000800
1699 	#define BNXT_FW_CAP_ERROR_RECOVERY		0x00002000
1700 	#define BNXT_FW_CAP_PKG_VER			0x00004000
1701 	#define BNXT_FW_CAP_CFA_ADV_FLOW		0x00008000
1702 	#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2	0x00010000
1703 	#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED	0x00020000
1704 	#define BNXT_FW_CAP_EXT_STATS_SUPPORTED		0x00040000
1705 	#define BNXT_FW_CAP_ERR_RECOVER_RELOAD		0x00100000
1706 	#define BNXT_FW_CAP_HOT_RESET			0x00200000
1707 	#define BNXT_FW_CAP_SHARED_PORT_CFG		0x00400000
1708 
1709 #define BNXT_NEW_RM(bp)		((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
1710 	u32			hwrm_spec_code;
1711 	u16			hwrm_cmd_seq;
1712 	u16                     hwrm_cmd_kong_seq;
1713 	u16			hwrm_intr_seq_id;
1714 	void			*hwrm_short_cmd_req_addr;
1715 	dma_addr_t		hwrm_short_cmd_req_dma_addr;
1716 	void			*hwrm_cmd_resp_addr;
1717 	dma_addr_t		hwrm_cmd_resp_dma_addr;
1718 	void			*hwrm_cmd_kong_resp_addr;
1719 	dma_addr_t		hwrm_cmd_kong_resp_dma_addr;
1720 
1721 	struct rtnl_link_stats64	net_stats_prev;
1722 	struct rx_port_stats	*hw_rx_port_stats;
1723 	struct tx_port_stats	*hw_tx_port_stats;
1724 	struct rx_port_stats_ext	*hw_rx_port_stats_ext;
1725 	struct tx_port_stats_ext	*hw_tx_port_stats_ext;
1726 	struct pcie_ctx_hw_stats	*hw_pcie_stats;
1727 	dma_addr_t		hw_rx_port_stats_map;
1728 	dma_addr_t		hw_tx_port_stats_map;
1729 	dma_addr_t		hw_rx_port_stats_ext_map;
1730 	dma_addr_t		hw_tx_port_stats_ext_map;
1731 	dma_addr_t		hw_pcie_stats_map;
1732 	int			hw_port_stats_size;
1733 	u16			fw_rx_stats_ext_size;
1734 	u16			fw_tx_stats_ext_size;
1735 	u16			hw_ring_stats_size;
1736 	u8			pri2cos_idx[8];
1737 	u8			pri2cos_valid;
1738 
1739 	u16			hwrm_max_req_len;
1740 	u16			hwrm_max_ext_req_len;
1741 	int			hwrm_cmd_timeout;
1742 	struct mutex		hwrm_cmd_lock;	/* serialize hwrm messages */
1743 	struct hwrm_ver_get_output	ver_resp;
1744 #define FW_VER_STR_LEN		32
1745 #define BC_HWRM_STR_LEN		21
1746 #define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
1747 	char			fw_ver_str[FW_VER_STR_LEN];
1748 	char			hwrm_ver_supp[FW_VER_STR_LEN];
1749 	__be16			vxlan_port;
1750 	u8			vxlan_port_cnt;
1751 	__le16			vxlan_fw_dst_port_id;
1752 	__be16			nge_port;
1753 	u8			nge_port_cnt;
1754 	__le16			nge_fw_dst_port_id;
1755 	u8			port_partition_type;
1756 	u8			port_count;
1757 	u16			br_mode;
1758 
1759 	struct bnxt_coal_cap	coal_cap;
1760 	struct bnxt_coal	rx_coal;
1761 	struct bnxt_coal	tx_coal;
1762 
1763 	u32			stats_coal_ticks;
1764 #define BNXT_DEF_STATS_COAL_TICKS	 1000000
1765 #define BNXT_MIN_STATS_COAL_TICKS	  250000
1766 #define BNXT_MAX_STATS_COAL_TICKS	 1000000
1767 
1768 	struct work_struct	sp_task;
1769 	unsigned long		sp_event;
1770 #define BNXT_RX_MASK_SP_EVENT		0
1771 #define BNXT_RX_NTP_FLTR_SP_EVENT	1
1772 #define BNXT_LINK_CHNG_SP_EVENT		2
1773 #define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT	3
1774 #define BNXT_VXLAN_ADD_PORT_SP_EVENT	4
1775 #define BNXT_VXLAN_DEL_PORT_SP_EVENT	5
1776 #define BNXT_RESET_TASK_SP_EVENT	6
1777 #define BNXT_RST_RING_SP_EVENT		7
1778 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT	8
1779 #define BNXT_PERIODIC_STATS_SP_EVENT	9
1780 #define BNXT_HWRM_PORT_MODULE_SP_EVENT	10
1781 #define BNXT_RESET_TASK_SILENT_SP_EVENT	11
1782 #define BNXT_GENEVE_ADD_PORT_SP_EVENT	12
1783 #define BNXT_GENEVE_DEL_PORT_SP_EVENT	13
1784 #define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
1785 #define BNXT_FLOW_STATS_SP_EVENT	15
1786 #define BNXT_UPDATE_PHY_SP_EVENT	16
1787 #define BNXT_RING_COAL_NOW_SP_EVENT	17
1788 #define BNXT_FW_RESET_NOTIFY_SP_EVENT	18
1789 #define BNXT_FW_EXCEPTION_SP_EVENT	19
1790 #define BNXT_LINK_CFG_CHANGE_SP_EVENT	21
1791 
1792 	struct delayed_work	fw_reset_task;
1793 	int			fw_reset_state;
1794 #define BNXT_FW_RESET_STATE_POLL_VF	1
1795 #define BNXT_FW_RESET_STATE_RESET_FW	2
1796 #define BNXT_FW_RESET_STATE_ENABLE_DEV	3
1797 #define BNXT_FW_RESET_STATE_POLL_FW	4
1798 #define BNXT_FW_RESET_STATE_OPENING	5
1799 #define BNXT_FW_RESET_STATE_POLL_FW_DOWN	6
1800 
1801 	u16			fw_reset_min_dsecs;
1802 #define BNXT_DFLT_FW_RST_MIN_DSECS	20
1803 	u16			fw_reset_max_dsecs;
1804 #define BNXT_DFLT_FW_RST_MAX_DSECS	60
1805 	unsigned long		fw_reset_timestamp;
1806 
1807 	struct bnxt_fw_health	*fw_health;
1808 
1809 	struct bnxt_hw_resc	hw_resc;
1810 	struct bnxt_pf_info	pf;
1811 	struct bnxt_ctx_mem_info	*ctx;
1812 #ifdef CONFIG_BNXT_SRIOV
1813 	int			nr_vfs;
1814 	struct bnxt_vf_info	vf;
1815 	wait_queue_head_t	sriov_cfg_wait;
1816 	bool			sriov_cfg;
1817 #define BNXT_SRIOV_CFG_WAIT_TMO	msecs_to_jiffies(10000)
1818 
1819 	/* lock to protect VF-rep creation/cleanup via
1820 	 * multiple paths such as ->sriov_configure() and
1821 	 * devlink ->eswitch_mode_set()
1822 	 */
1823 	struct mutex		sriov_lock;
1824 #endif
1825 
1826 #if BITS_PER_LONG == 32
1827 	/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
1828 	spinlock_t		db_lock;
1829 #endif
1830 	int			db_size;
1831 
1832 #define BNXT_NTP_FLTR_MAX_FLTR	4096
1833 #define BNXT_NTP_FLTR_HASH_SIZE	512
1834 #define BNXT_NTP_FLTR_HASH_MASK	(BNXT_NTP_FLTR_HASH_SIZE - 1)
1835 	struct hlist_head	ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
1836 	spinlock_t		ntp_fltr_lock;	/* for hash table add, del */
1837 
1838 	unsigned long		*ntp_fltr_bmap;
1839 	int			ntp_fltr_count;
1840 
1841 	/* To protect link related settings during link changes and
1842 	 * ethtool settings changes.
1843 	 */
1844 	struct mutex		link_lock;
1845 	struct bnxt_link_info	link_info;
1846 	struct ethtool_eee	eee;
1847 	u32			lpi_tmr_lo;
1848 	u32			lpi_tmr_hi;
1849 
1850 	u8			num_tests;
1851 	struct bnxt_test_info	*test_info;
1852 
1853 	u8			wol_filter_id;
1854 	u8			wol;
1855 
1856 	u8			num_leds;
1857 	struct bnxt_led_info	leds[BNXT_MAX_LED];
1858 	u16			dump_flag;
1859 #define BNXT_DUMP_LIVE		0
1860 #define BNXT_DUMP_CRASH		1
1861 
1862 	struct bpf_prog		*xdp_prog;
1863 
1864 	/* devlink interface and vf-rep structs */
1865 	struct devlink		*dl;
1866 	struct devlink_port	dl_port;
1867 	enum devlink_eswitch_mode eswitch_mode;
1868 	struct bnxt_vf_rep	**vf_reps; /* array of vf-rep ptrs */
1869 	u16			*cfa_code_map; /* cfa_code -> vf_idx map */
1870 	u8			dsn[8];
1871 	struct bnxt_tc_info	*tc_info;
1872 	struct list_head	tc_indr_block_list;
1873 	struct dentry		*debugfs_pdev;
1874 	struct device		*hwmon_dev;
1875 };
1876 
1877 #define BNXT_RX_STATS_OFFSET(counter)			\
1878 	(offsetof(struct rx_port_stats, counter) / 8)
1879 
1880 #define BNXT_TX_STATS_OFFSET(counter)			\
1881 	((offsetof(struct tx_port_stats, counter) +	\
1882 	  sizeof(struct rx_port_stats) + 512) / 8)
1883 
1884 #define BNXT_RX_STATS_EXT_OFFSET(counter)		\
1885 	(offsetof(struct rx_port_stats_ext, counter) / 8)
1886 
1887 #define BNXT_TX_STATS_EXT_OFFSET(counter)		\
1888 	(offsetof(struct tx_port_stats_ext, counter) / 8)
1889 
1890 #define BNXT_PCIE_STATS_OFFSET(counter)			\
1891 	(offsetof(struct pcie_ctx_hw_stats, counter) / 8)
1892 
1893 #define I2C_DEV_ADDR_A0				0xa0
1894 #define I2C_DEV_ADDR_A2				0xa2
1895 #define SFF_DIAG_SUPPORT_OFFSET			0x5c
1896 #define SFF_MODULE_ID_SFP			0x3
1897 #define SFF_MODULE_ID_QSFP			0xc
1898 #define SFF_MODULE_ID_QSFP_PLUS			0xd
1899 #define SFF_MODULE_ID_QSFP28			0x11
1900 #define BNXT_MAX_PHY_I2C_RESP_SIZE		64
1901 
1902 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
1903 {
1904 	/* Tell compiler to fetch tx indices from memory. */
1905 	barrier();
1906 
1907 	return bp->tx_ring_size -
1908 		((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
1909 }
1910 
1911 #if BITS_PER_LONG == 32
1912 #define writeq(val64, db)			\
1913 do {						\
1914 	spin_lock(&bp->db_lock);		\
1915 	writel((val64) & 0xffffffff, db);	\
1916 	writel((val64) >> 32, (db) + 4);	\
1917 	spin_unlock(&bp->db_lock);		\
1918 } while (0)
1919 
1920 #define writeq_relaxed writeq
1921 #endif
1922 
1923 /* For TX and RX ring doorbells with no ordering guarantee*/
1924 static inline void bnxt_db_write_relaxed(struct bnxt *bp,
1925 					 struct bnxt_db_info *db, u32 idx)
1926 {
1927 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
1928 		writeq_relaxed(db->db_key64 | idx, db->doorbell);
1929 	} else {
1930 		u32 db_val = db->db_key32 | idx;
1931 
1932 		writel_relaxed(db_val, db->doorbell);
1933 		if (bp->flags & BNXT_FLAG_DOUBLE_DB)
1934 			writel_relaxed(db_val, db->doorbell);
1935 	}
1936 }
1937 
1938 /* For TX and RX ring doorbells */
1939 static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
1940 				 u32 idx)
1941 {
1942 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
1943 		writeq(db->db_key64 | idx, db->doorbell);
1944 	} else {
1945 		u32 db_val = db->db_key32 | idx;
1946 
1947 		writel(db_val, db->doorbell);
1948 		if (bp->flags & BNXT_FLAG_DOUBLE_DB)
1949 			writel(db_val, db->doorbell);
1950 	}
1951 }
1952 
1953 static inline bool bnxt_cfa_hwrm_message(u16 req_type)
1954 {
1955 	switch (req_type) {
1956 	case HWRM_CFA_ENCAP_RECORD_ALLOC:
1957 	case HWRM_CFA_ENCAP_RECORD_FREE:
1958 	case HWRM_CFA_DECAP_FILTER_ALLOC:
1959 	case HWRM_CFA_DECAP_FILTER_FREE:
1960 	case HWRM_CFA_EM_FLOW_ALLOC:
1961 	case HWRM_CFA_EM_FLOW_FREE:
1962 	case HWRM_CFA_EM_FLOW_CFG:
1963 	case HWRM_CFA_FLOW_ALLOC:
1964 	case HWRM_CFA_FLOW_FREE:
1965 	case HWRM_CFA_FLOW_INFO:
1966 	case HWRM_CFA_FLOW_FLUSH:
1967 	case HWRM_CFA_FLOW_STATS:
1968 	case HWRM_CFA_METER_PROFILE_ALLOC:
1969 	case HWRM_CFA_METER_PROFILE_FREE:
1970 	case HWRM_CFA_METER_PROFILE_CFG:
1971 	case HWRM_CFA_METER_INSTANCE_ALLOC:
1972 	case HWRM_CFA_METER_INSTANCE_FREE:
1973 		return true;
1974 	default:
1975 		return false;
1976 	}
1977 }
1978 
1979 static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
1980 {
1981 	return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
1982 		bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)));
1983 }
1984 
1985 static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req)
1986 {
1987 	return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
1988 		req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr));
1989 }
1990 
1991 static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
1992 {
1993 	if (bnxt_hwrm_kong_chnl(bp, (struct input *)req))
1994 		return bp->hwrm_cmd_kong_resp_addr;
1995 	else
1996 		return bp->hwrm_cmd_resp_addr;
1997 }
1998 
1999 static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst)
2000 {
2001 	u16 seq_id;
2002 
2003 	if (dst == BNXT_HWRM_CHNL_CHIMP)
2004 		seq_id = bp->hwrm_cmd_seq++;
2005 	else
2006 		seq_id = bp->hwrm_cmd_kong_seq++;
2007 	return seq_id;
2008 }
2009 
2010 extern const u16 bnxt_lhint_arr[];
2011 
2012 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
2013 		       u16 prod, gfp_t gfp);
2014 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
2015 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
2016 void bnxt_set_tpa_flags(struct bnxt *bp);
2017 void bnxt_set_ring_params(struct bnxt *);
2018 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
2019 void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
2020 int _hwrm_send_message(struct bnxt *, void *, u32, int);
2021 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
2022 int hwrm_send_message(struct bnxt *, void *, u32, int);
2023 int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
2024 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
2025 			    int bmap_size, bool async_only);
2026 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
2027 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
2028 int bnxt_nq_rings_in_use(struct bnxt *bp);
2029 int bnxt_hwrm_set_coal(struct bnxt *);
2030 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
2031 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
2032 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
2033 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
2034 int bnxt_get_avail_msix(struct bnxt *bp, int num);
2035 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
2036 void bnxt_tx_disable(struct bnxt *bp);
2037 void bnxt_tx_enable(struct bnxt *bp);
2038 int bnxt_hwrm_set_pause(struct bnxt *);
2039 int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
2040 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
2041 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
2042 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
2043 int bnxt_hwrm_fw_set_time(struct bnxt *);
2044 int bnxt_open_nic(struct bnxt *, bool, bool);
2045 int bnxt_half_open_nic(struct bnxt *bp);
2046 void bnxt_half_close_nic(struct bnxt *bp);
2047 int bnxt_close_nic(struct bnxt *, bool, bool);
2048 void bnxt_fw_exception(struct bnxt *bp);
2049 void bnxt_fw_reset(struct bnxt *bp);
2050 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
2051 		     int tx_xdp);
2052 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
2053 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
2054 int bnxt_restore_pf_fw_resources(struct bnxt *bp);
2055 int bnxt_get_port_parent_id(struct net_device *dev,
2056 			    struct netdev_phys_item_id *ppid);
2057 void bnxt_dim_work(struct work_struct *work);
2058 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
2059 
2060 #endif
2061