1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2018 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #ifndef BNXT_H
12 #define BNXT_H
13 
14 #define DRV_MODULE_NAME		"bnxt_en"
15 #define DRV_MODULE_VERSION	"1.10.0"
16 
17 #define DRV_VER_MAJ	1
18 #define DRV_VER_MIN	10
19 #define DRV_VER_UPD	0
20 
21 #include <linux/interrupt.h>
22 #include <linux/rhashtable.h>
23 #include <net/devlink.h>
24 #include <net/dst_metadata.h>
25 #include <net/xdp.h>
26 #include <linux/net_dim.h>
27 
28 struct tx_bd {
29 	__le32 tx_bd_len_flags_type;
30 	#define TX_BD_TYPE					(0x3f << 0)
31 	 #define TX_BD_TYPE_SHORT_TX_BD				 (0x00 << 0)
32 	 #define TX_BD_TYPE_LONG_TX_BD				 (0x10 << 0)
33 	#define TX_BD_FLAGS_PACKET_END				(1 << 6)
34 	#define TX_BD_FLAGS_NO_CMPL				(1 << 7)
35 	#define TX_BD_FLAGS_BD_CNT				(0x1f << 8)
36 	 #define TX_BD_FLAGS_BD_CNT_SHIFT			 8
37 	#define TX_BD_FLAGS_LHINT				(3 << 13)
38 	 #define TX_BD_FLAGS_LHINT_SHIFT			 13
39 	 #define TX_BD_FLAGS_LHINT_512_AND_SMALLER		 (0 << 13)
40 	 #define TX_BD_FLAGS_LHINT_512_TO_1023			 (1 << 13)
41 	 #define TX_BD_FLAGS_LHINT_1024_TO_2047			 (2 << 13)
42 	 #define TX_BD_FLAGS_LHINT_2048_AND_LARGER		 (3 << 13)
43 	#define TX_BD_FLAGS_COAL_NOW				(1 << 15)
44 	#define TX_BD_LEN					(0xffff << 16)
45 	 #define TX_BD_LEN_SHIFT				 16
46 
47 	u32 tx_bd_opaque;
48 	__le64 tx_bd_haddr;
49 } __packed;
50 
51 struct tx_bd_ext {
52 	__le32 tx_bd_hsize_lflags;
53 	#define TX_BD_FLAGS_TCP_UDP_CHKSUM			(1 << 0)
54 	#define TX_BD_FLAGS_IP_CKSUM				(1 << 1)
55 	#define TX_BD_FLAGS_NO_CRC				(1 << 2)
56 	#define TX_BD_FLAGS_STAMP				(1 << 3)
57 	#define TX_BD_FLAGS_T_IP_CHKSUM				(1 << 4)
58 	#define TX_BD_FLAGS_LSO					(1 << 5)
59 	#define TX_BD_FLAGS_IPID_FMT				(1 << 6)
60 	#define TX_BD_FLAGS_T_IPID				(1 << 7)
61 	#define TX_BD_HSIZE					(0xff << 16)
62 	 #define TX_BD_HSIZE_SHIFT				 16
63 
64 	__le32 tx_bd_mss;
65 	__le32 tx_bd_cfa_action;
66 	#define TX_BD_CFA_ACTION				(0xffff << 16)
67 	 #define TX_BD_CFA_ACTION_SHIFT				 16
68 
69 	__le32 tx_bd_cfa_meta;
70 	#define TX_BD_CFA_META_MASK                             0xfffffff
71 	#define TX_BD_CFA_META_VID_MASK                         0xfff
72 	#define TX_BD_CFA_META_PRI_MASK                         (0xf << 12)
73 	 #define TX_BD_CFA_META_PRI_SHIFT                        12
74 	#define TX_BD_CFA_META_TPID_MASK                        (3 << 16)
75 	 #define TX_BD_CFA_META_TPID_SHIFT                       16
76 	#define TX_BD_CFA_META_KEY                              (0xf << 28)
77 	 #define TX_BD_CFA_META_KEY_SHIFT			 28
78 	#define TX_BD_CFA_META_KEY_VLAN                         (1 << 28)
79 };
80 
81 struct rx_bd {
82 	__le32 rx_bd_len_flags_type;
83 	#define RX_BD_TYPE					(0x3f << 0)
84 	 #define RX_BD_TYPE_RX_PACKET_BD			 0x4
85 	 #define RX_BD_TYPE_RX_BUFFER_BD			 0x5
86 	 #define RX_BD_TYPE_RX_AGG_BD				 0x6
87 	 #define RX_BD_TYPE_16B_BD_SIZE				 (0 << 4)
88 	 #define RX_BD_TYPE_32B_BD_SIZE				 (1 << 4)
89 	 #define RX_BD_TYPE_48B_BD_SIZE				 (2 << 4)
90 	 #define RX_BD_TYPE_64B_BD_SIZE				 (3 << 4)
91 	#define RX_BD_FLAGS_SOP					(1 << 6)
92 	#define RX_BD_FLAGS_EOP					(1 << 7)
93 	#define RX_BD_FLAGS_BUFFERS				(3 << 8)
94 	 #define RX_BD_FLAGS_1_BUFFER_PACKET			 (0 << 8)
95 	 #define RX_BD_FLAGS_2_BUFFER_PACKET			 (1 << 8)
96 	 #define RX_BD_FLAGS_3_BUFFER_PACKET			 (2 << 8)
97 	 #define RX_BD_FLAGS_4_BUFFER_PACKET			 (3 << 8)
98 	#define RX_BD_LEN					(0xffff << 16)
99 	 #define RX_BD_LEN_SHIFT				 16
100 
101 	u32 rx_bd_opaque;
102 	__le64 rx_bd_haddr;
103 };
104 
105 struct tx_cmp {
106 	__le32 tx_cmp_flags_type;
107 	#define CMP_TYPE					(0x3f << 0)
108 	 #define CMP_TYPE_TX_L2_CMP				 0
109 	 #define CMP_TYPE_RX_L2_CMP				 17
110 	 #define CMP_TYPE_RX_AGG_CMP				 18
111 	 #define CMP_TYPE_RX_L2_TPA_START_CMP			 19
112 	 #define CMP_TYPE_RX_L2_TPA_END_CMP			 21
113 	 #define CMP_TYPE_STATUS_CMP				 32
114 	 #define CMP_TYPE_REMOTE_DRIVER_REQ			 34
115 	 #define CMP_TYPE_REMOTE_DRIVER_RESP			 36
116 	 #define CMP_TYPE_ERROR_STATUS				 48
117 	 #define CMPL_BASE_TYPE_STAT_EJECT			 0x1aUL
118 	 #define CMPL_BASE_TYPE_HWRM_DONE			 0x20UL
119 	 #define CMPL_BASE_TYPE_HWRM_FWD_REQ			 0x22UL
120 	 #define CMPL_BASE_TYPE_HWRM_FWD_RESP			 0x24UL
121 	 #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT		 0x2eUL
122 
123 	#define TX_CMP_FLAGS_ERROR				(1 << 6)
124 	#define TX_CMP_FLAGS_PUSH				(1 << 7)
125 
126 	u32 tx_cmp_opaque;
127 	__le32 tx_cmp_errors_v;
128 	#define TX_CMP_V					(1 << 0)
129 	#define TX_CMP_ERRORS_BUFFER_ERROR			(7 << 1)
130 	 #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR		 0
131 	 #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT		 2
132 	 #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG	 4
133 	 #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS		 5
134 	 #define TX_CMP_ERRORS_ZERO_LENGTH_PKT			 (1 << 4)
135 	 #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN			 (1 << 5)
136 	 #define TX_CMP_ERRORS_DMA_ERROR			 (1 << 6)
137 	 #define TX_CMP_ERRORS_HINT_TOO_SHORT			 (1 << 7)
138 
139 	__le32 tx_cmp_unsed_3;
140 };
141 
142 struct rx_cmp {
143 	__le32 rx_cmp_len_flags_type;
144 	#define RX_CMP_CMP_TYPE					(0x3f << 0)
145 	#define RX_CMP_FLAGS_ERROR				(1 << 6)
146 	#define RX_CMP_FLAGS_PLACEMENT				(7 << 7)
147 	#define RX_CMP_FLAGS_RSS_VALID				(1 << 10)
148 	#define RX_CMP_FLAGS_UNUSED				(1 << 11)
149 	 #define RX_CMP_FLAGS_ITYPES_SHIFT			 12
150 	 #define RX_CMP_FLAGS_ITYPE_UNKNOWN			 (0 << 12)
151 	 #define RX_CMP_FLAGS_ITYPE_IP				 (1 << 12)
152 	 #define RX_CMP_FLAGS_ITYPE_TCP				 (2 << 12)
153 	 #define RX_CMP_FLAGS_ITYPE_UDP				 (3 << 12)
154 	 #define RX_CMP_FLAGS_ITYPE_FCOE			 (4 << 12)
155 	 #define RX_CMP_FLAGS_ITYPE_ROCE			 (5 << 12)
156 	 #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS			 (8 << 12)
157 	 #define RX_CMP_FLAGS_ITYPE_PTP_W_TS			 (9 << 12)
158 	#define RX_CMP_LEN					(0xffff << 16)
159 	 #define RX_CMP_LEN_SHIFT				 16
160 
161 	u32 rx_cmp_opaque;
162 	__le32 rx_cmp_misc_v1;
163 	#define RX_CMP_V1					(1 << 0)
164 	#define RX_CMP_AGG_BUFS					(0x1f << 1)
165 	 #define RX_CMP_AGG_BUFS_SHIFT				 1
166 	#define RX_CMP_RSS_HASH_TYPE				(0x7f << 9)
167 	 #define RX_CMP_RSS_HASH_TYPE_SHIFT			 9
168 	#define RX_CMP_PAYLOAD_OFFSET				(0xff << 16)
169 	 #define RX_CMP_PAYLOAD_OFFSET_SHIFT			 16
170 
171 	__le32 rx_cmp_rss_hash;
172 };
173 
174 #define RX_CMP_HASH_VALID(rxcmp)				\
175 	((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
176 
177 #define RSS_PROFILE_ID_MASK	0x1f
178 
179 #define RX_CMP_HASH_TYPE(rxcmp)					\
180 	(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
181 	  RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
182 
183 struct rx_cmp_ext {
184 	__le32 rx_cmp_flags2;
185 	#define RX_CMP_FLAGS2_IP_CS_CALC			0x1
186 	#define RX_CMP_FLAGS2_L4_CS_CALC			(0x1 << 1)
187 	#define RX_CMP_FLAGS2_T_IP_CS_CALC			(0x1 << 2)
188 	#define RX_CMP_FLAGS2_T_L4_CS_CALC			(0x1 << 3)
189 	#define RX_CMP_FLAGS2_META_FORMAT_VLAN			(0x1 << 4)
190 	__le32 rx_cmp_meta_data;
191 	#define RX_CMP_FLAGS2_METADATA_TCI_MASK			0xffff
192 	#define RX_CMP_FLAGS2_METADATA_VID_MASK			0xfff
193 	#define RX_CMP_FLAGS2_METADATA_TPID_MASK		0xffff0000
194 	 #define RX_CMP_FLAGS2_METADATA_TPID_SFT		 16
195 	__le32 rx_cmp_cfa_code_errors_v2;
196 	#define RX_CMP_V					(1 << 0)
197 	#define RX_CMPL_ERRORS_MASK				(0x7fff << 1)
198 	 #define RX_CMPL_ERRORS_SFT				 1
199 	#define RX_CMPL_ERRORS_BUFFER_ERROR_MASK		(0x7 << 1)
200 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER		 (0x0 << 1)
201 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT	 (0x1 << 1)
202 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP	 (0x2 << 1)
203 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT		 (0x3 << 1)
204 	#define RX_CMPL_ERRORS_IP_CS_ERROR			(0x1 << 4)
205 	#define RX_CMPL_ERRORS_L4_CS_ERROR			(0x1 << 5)
206 	#define RX_CMPL_ERRORS_T_IP_CS_ERROR			(0x1 << 6)
207 	#define RX_CMPL_ERRORS_T_L4_CS_ERROR			(0x1 << 7)
208 	#define RX_CMPL_ERRORS_CRC_ERROR			(0x1 << 8)
209 	#define RX_CMPL_ERRORS_T_PKT_ERROR_MASK			(0x7 << 9)
210 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR		 (0x0 << 9)
211 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION	 (0x1 << 9)
212 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN	 (0x2 << 9)
213 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR	 (0x3 << 9)
214 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR	 (0x4 << 9)
215 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR	 (0x5 << 9)
216 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL	 (0x6 << 9)
217 	#define RX_CMPL_ERRORS_PKT_ERROR_MASK			(0xf << 12)
218 	 #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR		 (0x0 << 12)
219 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION	 (0x1 << 12)
220 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN	 (0x2 << 12)
221 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL		 (0x3 << 12)
222 	 #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR	 (0x4 << 12)
223 	 #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR	 (0x5 << 12)
224 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN	 (0x6 << 12)
225 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
226 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN	 (0x8 << 12)
227 
228 	#define RX_CMPL_CFA_CODE_MASK				(0xffff << 16)
229 	 #define RX_CMPL_CFA_CODE_SFT				 16
230 
231 	__le32 rx_cmp_unused3;
232 };
233 
234 #define RX_CMP_L2_ERRORS						\
235 	cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
236 
237 #define RX_CMP_L4_CS_BITS						\
238 	(cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
239 
240 #define RX_CMP_L4_CS_ERR_BITS						\
241 	(cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
242 
243 #define RX_CMP_L4_CS_OK(rxcmp1)						\
244 	    (((rxcmp1)->rx_cmp_flags2 &	RX_CMP_L4_CS_BITS) &&		\
245 	     !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
246 
247 #define RX_CMP_ENCAP(rxcmp1)						\
248 	    ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) &			\
249 	     RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
250 
251 #define RX_CMP_CFA_CODE(rxcmpl1)					\
252 	((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) &		\
253 	  RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
254 
255 struct rx_agg_cmp {
256 	__le32 rx_agg_cmp_len_flags_type;
257 	#define RX_AGG_CMP_TYPE					(0x3f << 0)
258 	#define RX_AGG_CMP_LEN					(0xffff << 16)
259 	 #define RX_AGG_CMP_LEN_SHIFT				 16
260 	u32 rx_agg_cmp_opaque;
261 	__le32 rx_agg_cmp_v;
262 	#define RX_AGG_CMP_V					(1 << 0)
263 	__le32 rx_agg_cmp_unused;
264 };
265 
266 struct rx_tpa_start_cmp {
267 	__le32 rx_tpa_start_cmp_len_flags_type;
268 	#define RX_TPA_START_CMP_TYPE				(0x3f << 0)
269 	#define RX_TPA_START_CMP_FLAGS				(0x3ff << 6)
270 	 #define RX_TPA_START_CMP_FLAGS_SHIFT			 6
271 	#define RX_TPA_START_CMP_FLAGS_PLACEMENT		(0x7 << 7)
272 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT		 7
273 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
274 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
275 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
276 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS	 (0x6 << 7)
277 	#define RX_TPA_START_CMP_FLAGS_RSS_VALID		(0x1 << 10)
278 	#define RX_TPA_START_CMP_FLAGS_ITYPES			(0xf << 12)
279 	 #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT		 12
280 	 #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP		 (0x2 << 12)
281 	#define RX_TPA_START_CMP_LEN				(0xffff << 16)
282 	 #define RX_TPA_START_CMP_LEN_SHIFT			 16
283 
284 	u32 rx_tpa_start_cmp_opaque;
285 	__le32 rx_tpa_start_cmp_misc_v1;
286 	#define RX_TPA_START_CMP_V1				(0x1 << 0)
287 	#define RX_TPA_START_CMP_RSS_HASH_TYPE			(0x7f << 9)
288 	 #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT		 9
289 	#define RX_TPA_START_CMP_AGG_ID				(0x7f << 25)
290 	 #define RX_TPA_START_CMP_AGG_ID_SHIFT			 25
291 
292 	__le32 rx_tpa_start_cmp_rss_hash;
293 };
294 
295 #define TPA_START_HASH_VALID(rx_tpa_start)				\
296 	((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &		\
297 	 cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
298 
299 #define TPA_START_HASH_TYPE(rx_tpa_start)				\
300 	(((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
301 	   RX_TPA_START_CMP_RSS_HASH_TYPE) >>				\
302 	  RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
303 
304 #define TPA_START_AGG_ID(rx_tpa_start)					\
305 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
306 	 RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
307 
308 struct rx_tpa_start_cmp_ext {
309 	__le32 rx_tpa_start_cmp_flags2;
310 	#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC		(0x1 << 0)
311 	#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC		(0x1 << 1)
312 	#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC		(0x1 << 2)
313 	#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC		(0x1 << 3)
314 	#define RX_TPA_START_CMP_FLAGS2_IP_TYPE			(0x1 << 8)
315 
316 	__le32 rx_tpa_start_cmp_metadata;
317 	__le32 rx_tpa_start_cmp_cfa_code_v2;
318 	#define RX_TPA_START_CMP_V2				(0x1 << 0)
319 	#define RX_TPA_START_CMP_CFA_CODE			(0xffff << 16)
320 	 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT		 16
321 	__le32 rx_tpa_start_cmp_hdr_info;
322 };
323 
324 #define TPA_START_CFA_CODE(rx_tpa_start)				\
325 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &	\
326 	 RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
327 
328 #define TPA_START_IS_IPV6(rx_tpa_start)				\
329 	(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 &		\
330 	    cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
331 
332 struct rx_tpa_end_cmp {
333 	__le32 rx_tpa_end_cmp_len_flags_type;
334 	#define RX_TPA_END_CMP_TYPE				(0x3f << 0)
335 	#define RX_TPA_END_CMP_FLAGS				(0x3ff << 6)
336 	 #define RX_TPA_END_CMP_FLAGS_SHIFT			 6
337 	#define RX_TPA_END_CMP_FLAGS_PLACEMENT			(0x7 << 7)
338 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT		 7
339 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
340 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
341 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
342 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS		 (0x6 << 7)
343 	#define RX_TPA_END_CMP_FLAGS_RSS_VALID			(0x1 << 10)
344 	#define RX_TPA_END_CMP_FLAGS_ITYPES			(0xf << 12)
345 	 #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT		 12
346 	 #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP			 (0x2 << 12)
347 	#define RX_TPA_END_CMP_LEN				(0xffff << 16)
348 	 #define RX_TPA_END_CMP_LEN_SHIFT			 16
349 
350 	u32 rx_tpa_end_cmp_opaque;
351 	__le32 rx_tpa_end_cmp_misc_v1;
352 	#define RX_TPA_END_CMP_V1				(0x1 << 0)
353 	#define RX_TPA_END_CMP_AGG_BUFS				(0x3f << 1)
354 	 #define RX_TPA_END_CMP_AGG_BUFS_SHIFT			 1
355 	#define RX_TPA_END_CMP_TPA_SEGS				(0xff << 8)
356 	 #define RX_TPA_END_CMP_TPA_SEGS_SHIFT			 8
357 	#define RX_TPA_END_CMP_PAYLOAD_OFFSET			(0xff << 16)
358 	 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT		 16
359 	#define RX_TPA_END_CMP_AGG_ID				(0x7f << 25)
360 	 #define RX_TPA_END_CMP_AGG_ID_SHIFT			 25
361 
362 	__le32 rx_tpa_end_cmp_tsdelta;
363 	#define RX_TPA_END_GRO_TS				(0x1 << 31)
364 };
365 
366 #define TPA_END_AGG_ID(rx_tpa_end)					\
367 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
368 	 RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
369 
370 #define TPA_END_TPA_SEGS(rx_tpa_end)					\
371 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
372 	 RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
373 
374 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO				\
375 	cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &		\
376 		    RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
377 
378 #define TPA_END_GRO(rx_tpa_end)						\
379 	((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &			\
380 	 RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
381 
382 #define TPA_END_GRO_TS(rx_tpa_end)					\
383 	(!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta &			\
384 	    cpu_to_le32(RX_TPA_END_GRO_TS)))
385 
386 struct rx_tpa_end_cmp_ext {
387 	__le32 rx_tpa_end_cmp_dup_acks;
388 	#define RX_TPA_END_CMP_TPA_DUP_ACKS			(0xf << 0)
389 
390 	__le32 rx_tpa_end_cmp_seg_len;
391 	#define RX_TPA_END_CMP_TPA_SEG_LEN			(0xffff << 0)
392 
393 	__le32 rx_tpa_end_cmp_errors_v2;
394 	#define RX_TPA_END_CMP_V2				(0x1 << 0)
395 	#define RX_TPA_END_CMP_ERRORS				(0x3 << 1)
396 	#define RX_TPA_END_CMPL_ERRORS_SHIFT			 1
397 
398 	u32 rx_tpa_end_cmp_start_opaque;
399 };
400 
401 #define TPA_END_ERRORS(rx_tpa_end_ext)					\
402 	((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 &			\
403 	 cpu_to_le32(RX_TPA_END_CMP_ERRORS))
404 
405 struct nqe_cn {
406 	__le16	type;
407 	#define NQ_CN_TYPE_MASK           0x3fUL
408 	#define NQ_CN_TYPE_SFT            0
409 	#define NQ_CN_TYPE_CQ_NOTIFICATION  0x30UL
410 	#define NQ_CN_TYPE_LAST            NQ_CN_TYPE_CQ_NOTIFICATION
411 	__le16	reserved16;
412 	__le32	cq_handle_low;
413 	__le32	v;
414 	#define NQ_CN_V     0x1UL
415 	__le32	cq_handle_high;
416 };
417 
418 #define DB_IDX_MASK						0xffffff
419 #define DB_IDX_VALID						(0x1 << 26)
420 #define DB_IRQ_DIS						(0x1 << 27)
421 #define DB_KEY_TX						(0x0 << 28)
422 #define DB_KEY_RX						(0x1 << 28)
423 #define DB_KEY_CP						(0x2 << 28)
424 #define DB_KEY_ST						(0x3 << 28)
425 #define DB_KEY_TX_PUSH						(0x4 << 28)
426 #define DB_LONG_TX_PUSH						(0x2 << 24)
427 
428 #define BNXT_MIN_ROCE_CP_RINGS	2
429 #define BNXT_MIN_ROCE_STAT_CTXS	1
430 
431 /* 64-bit doorbell */
432 #define DBR_INDEX_MASK					0x0000000000ffffffULL
433 #define DBR_XID_MASK					0x000fffff00000000ULL
434 #define DBR_XID_SFT					32
435 #define DBR_PATH_L2					(0x1ULL << 56)
436 #define DBR_TYPE_SQ					(0x0ULL << 60)
437 #define DBR_TYPE_RQ					(0x1ULL << 60)
438 #define DBR_TYPE_SRQ					(0x2ULL << 60)
439 #define DBR_TYPE_SRQ_ARM				(0x3ULL << 60)
440 #define DBR_TYPE_CQ					(0x4ULL << 60)
441 #define DBR_TYPE_CQ_ARMSE				(0x5ULL << 60)
442 #define DBR_TYPE_CQ_ARMALL				(0x6ULL << 60)
443 #define DBR_TYPE_CQ_ARMENA				(0x7ULL << 60)
444 #define DBR_TYPE_SRQ_ARMENA				(0x8ULL << 60)
445 #define DBR_TYPE_CQ_CUTOFF_ACK				(0x9ULL << 60)
446 #define DBR_TYPE_NQ					(0xaULL << 60)
447 #define DBR_TYPE_NQ_ARM					(0xbULL << 60)
448 #define DBR_TYPE_NULL					(0xfULL << 60)
449 
450 #define INVALID_HW_RING_ID	((u16)-1)
451 
452 /* The hardware supports certain page sizes.  Use the supported page sizes
453  * to allocate the rings.
454  */
455 #if (PAGE_SHIFT < 12)
456 #define BNXT_PAGE_SHIFT	12
457 #elif (PAGE_SHIFT <= 13)
458 #define BNXT_PAGE_SHIFT	PAGE_SHIFT
459 #elif (PAGE_SHIFT < 16)
460 #define BNXT_PAGE_SHIFT	13
461 #else
462 #define BNXT_PAGE_SHIFT	16
463 #endif
464 
465 #define BNXT_PAGE_SIZE	(1 << BNXT_PAGE_SHIFT)
466 
467 /* The RXBD length is 16-bit so we can only support page sizes < 64K */
468 #if (PAGE_SHIFT > 15)
469 #define BNXT_RX_PAGE_SHIFT 15
470 #else
471 #define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
472 #endif
473 
474 #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
475 
476 #define BNXT_MAX_MTU		9500
477 #define BNXT_MAX_PAGE_MODE_MTU	\
478 	((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -	\
479 	 XDP_PACKET_HEADROOM)
480 
481 #define BNXT_MIN_PKT_SIZE	52
482 
483 #define BNXT_DEFAULT_RX_RING_SIZE	511
484 #define BNXT_DEFAULT_TX_RING_SIZE	511
485 
486 #define MAX_TPA		64
487 
488 #if (BNXT_PAGE_SHIFT == 16)
489 #define MAX_RX_PAGES	1
490 #define MAX_RX_AGG_PAGES	4
491 #define MAX_TX_PAGES	1
492 #define MAX_CP_PAGES	8
493 #else
494 #define MAX_RX_PAGES	8
495 #define MAX_RX_AGG_PAGES	32
496 #define MAX_TX_PAGES	8
497 #define MAX_CP_PAGES	64
498 #endif
499 
500 #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
501 #define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
502 #define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
503 
504 #define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
505 #define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
506 
507 #define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
508 
509 #define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
510 #define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
511 
512 #define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
513 
514 #define BNXT_MAX_RX_DESC_CNT		(RX_DESC_CNT * MAX_RX_PAGES - 1)
515 #define BNXT_MAX_RX_JUM_DESC_CNT	(RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
516 #define BNXT_MAX_TX_DESC_CNT		(TX_DESC_CNT * MAX_TX_PAGES - 1)
517 
518 #define RX_RING(x)	(((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
519 #define RX_IDX(x)	((x) & (RX_DESC_CNT - 1))
520 
521 #define TX_RING(x)	(((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
522 #define TX_IDX(x)	((x) & (TX_DESC_CNT - 1))
523 
524 #define CP_RING(x)	(((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
525 #define CP_IDX(x)	((x) & (CP_DESC_CNT - 1))
526 
527 #define TX_CMP_VALID(txcmp, raw_cons)					\
528 	(!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) ==	\
529 	 !((raw_cons) & bp->cp_bit))
530 
531 #define RX_CMP_VALID(rxcmp1, raw_cons)					\
532 	(!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
533 	 !((raw_cons) & bp->cp_bit))
534 
535 #define RX_AGG_CMP_VALID(agg, raw_cons)				\
536 	(!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) ==	\
537 	 !((raw_cons) & bp->cp_bit))
538 
539 #define NQ_CMP_VALID(nqcmp, raw_cons)				\
540 	(!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & bp->cp_bit))
541 
542 #define TX_CMP_TYPE(txcmp)					\
543 	(le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
544 
545 #define RX_CMP_TYPE(rxcmp)					\
546 	(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
547 
548 #define NEXT_RX(idx)		(((idx) + 1) & bp->rx_ring_mask)
549 
550 #define NEXT_RX_AGG(idx)	(((idx) + 1) & bp->rx_agg_ring_mask)
551 
552 #define NEXT_TX(idx)		(((idx) + 1) & bp->tx_ring_mask)
553 
554 #define ADV_RAW_CMP(idx, n)	((idx) + (n))
555 #define NEXT_RAW_CMP(idx)	ADV_RAW_CMP(idx, 1)
556 #define RING_CMP(idx)		((idx) & bp->cp_ring_mask)
557 #define NEXT_CMP(idx)		RING_CMP(ADV_RAW_CMP(idx, 1))
558 
559 #define BNXT_HWRM_MAX_REQ_LEN		(bp->hwrm_max_req_len)
560 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
561 #define DFLT_HWRM_CMD_TIMEOUT		500
562 #define HWRM_CMD_TIMEOUT		(bp->hwrm_cmd_timeout)
563 #define HWRM_RESET_TIMEOUT		((HWRM_CMD_TIMEOUT) * 4)
564 #define HWRM_RESP_ERR_CODE_MASK		0xffff
565 #define HWRM_RESP_LEN_OFFSET		4
566 #define HWRM_RESP_LEN_MASK		0xffff0000
567 #define HWRM_RESP_LEN_SFT		16
568 #define HWRM_RESP_VALID_MASK		0xff000000
569 #define BNXT_HWRM_REQ_MAX_SIZE		128
570 #define BNXT_HWRM_REQS_PER_PAGE		(BNXT_PAGE_SIZE /	\
571 					 BNXT_HWRM_REQ_MAX_SIZE)
572 #define HWRM_SHORT_MIN_TIMEOUT		3
573 #define HWRM_SHORT_MAX_TIMEOUT		10
574 #define HWRM_SHORT_TIMEOUT_COUNTER	5
575 
576 #define HWRM_MIN_TIMEOUT		25
577 #define HWRM_MAX_TIMEOUT		40
578 
579 #define HWRM_TOTAL_TIMEOUT(n)	(((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ?	\
580 	((n) * HWRM_SHORT_MIN_TIMEOUT) :				\
581 	(HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT +		\
582 	 ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
583 
584 #define HWRM_VALID_BIT_DELAY_USEC	150
585 
586 #define BNXT_HWRM_CHNL_CHIMP	0
587 #define BNXT_HWRM_CHNL_KONG	1
588 
589 #define BNXT_RX_EVENT	1
590 #define BNXT_AGG_EVENT	2
591 #define BNXT_TX_EVENT	4
592 
593 struct bnxt_sw_tx_bd {
594 	struct sk_buff		*skb;
595 	DEFINE_DMA_UNMAP_ADDR(mapping);
596 	u8			is_gso;
597 	u8			is_push;
598 	union {
599 		unsigned short		nr_frags;
600 		u16			rx_prod;
601 	};
602 };
603 
604 struct bnxt_sw_rx_bd {
605 	void			*data;
606 	u8			*data_ptr;
607 	dma_addr_t		mapping;
608 };
609 
610 struct bnxt_sw_rx_agg_bd {
611 	struct page		*page;
612 	unsigned int		offset;
613 	dma_addr_t		mapping;
614 };
615 
616 struct bnxt_ring_mem_info {
617 	int			nr_pages;
618 	int			page_size;
619 	u16			flags;
620 #define BNXT_RMEM_VALID_PTE_FLAG	1
621 #define BNXT_RMEM_RING_PTE_FLAG		2
622 #define BNXT_RMEM_USE_FULL_PAGE_FLAG	4
623 
624 	u16			depth;
625 
626 	void			**pg_arr;
627 	dma_addr_t		*dma_arr;
628 
629 	__le64			*pg_tbl;
630 	dma_addr_t		pg_tbl_map;
631 
632 	int			vmem_size;
633 	void			**vmem;
634 };
635 
636 struct bnxt_ring_struct {
637 	struct bnxt_ring_mem_info	ring_mem;
638 
639 	u16			fw_ring_id; /* Ring id filled by Chimp FW */
640 	union {
641 		u16		grp_idx;
642 		u16		map_idx; /* Used by cmpl rings */
643 	};
644 	u32			handle;
645 	u8			queue_id;
646 };
647 
648 struct tx_push_bd {
649 	__le32			doorbell;
650 	__le32			tx_bd_len_flags_type;
651 	u32			tx_bd_opaque;
652 	struct tx_bd_ext	txbd2;
653 };
654 
655 struct tx_push_buffer {
656 	struct tx_push_bd	push_bd;
657 	u32			data[25];
658 };
659 
660 struct bnxt_db_info {
661 	void __iomem		*doorbell;
662 	union {
663 		u64		db_key64;
664 		u32		db_key32;
665 	};
666 };
667 
668 struct bnxt_tx_ring_info {
669 	struct bnxt_napi	*bnapi;
670 	u16			tx_prod;
671 	u16			tx_cons;
672 	u16			txq_index;
673 	struct bnxt_db_info	tx_db;
674 
675 	struct tx_bd		*tx_desc_ring[MAX_TX_PAGES];
676 	struct bnxt_sw_tx_bd	*tx_buf_ring;
677 
678 	dma_addr_t		tx_desc_mapping[MAX_TX_PAGES];
679 
680 	struct tx_push_buffer	*tx_push;
681 	dma_addr_t		tx_push_mapping;
682 	__le64			data_mapping;
683 
684 #define BNXT_DEV_STATE_CLOSING	0x1
685 	u32			dev_state;
686 
687 	struct bnxt_ring_struct	tx_ring_struct;
688 };
689 
690 #define BNXT_LEGACY_COAL_CMPL_PARAMS					\
691 	(RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN |		\
692 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX |		\
693 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET |		\
694 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE |			\
695 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR |		\
696 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT | \
697 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR |		\
698 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT | \
699 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT)
700 
701 #define BNXT_COAL_CMPL_ENABLES						\
702 	(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR | \
703 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR | \
704 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX | \
705 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT)
706 
707 #define BNXT_COAL_CMPL_MIN_TMR_ENABLE					\
708 	RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN
709 
710 #define BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE			\
711 	RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT
712 
713 struct bnxt_coal_cap {
714 	u32			cmpl_params;
715 	u32			nq_params;
716 	u16			num_cmpl_dma_aggr_max;
717 	u16			num_cmpl_dma_aggr_during_int_max;
718 	u16			cmpl_aggr_dma_tmr_max;
719 	u16			cmpl_aggr_dma_tmr_during_int_max;
720 	u16			int_lat_tmr_min_max;
721 	u16			int_lat_tmr_max_max;
722 	u16			num_cmpl_aggr_int_max;
723 	u16			timer_units;
724 };
725 
726 struct bnxt_coal {
727 	u16			coal_ticks;
728 	u16			coal_ticks_irq;
729 	u16			coal_bufs;
730 	u16			coal_bufs_irq;
731 			/* RING_IDLE enabled when coal ticks < idle_thresh  */
732 	u16			idle_thresh;
733 	u8			bufs_per_record;
734 	u8			budget;
735 };
736 
737 struct bnxt_tpa_info {
738 	void			*data;
739 	u8			*data_ptr;
740 	dma_addr_t		mapping;
741 	u16			len;
742 	unsigned short		gso_type;
743 	u32			flags2;
744 	u32			metadata;
745 	enum pkt_hash_types	hash_type;
746 	u32			rss_hash;
747 	u32			hdr_info;
748 
749 #define BNXT_TPA_L4_SIZE(hdr_info)	\
750 	(((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32)
751 
752 #define BNXT_TPA_INNER_L3_OFF(hdr_info)	\
753 	(((hdr_info) >> 18) & 0x1ff)
754 
755 #define BNXT_TPA_INNER_L2_OFF(hdr_info)	\
756 	(((hdr_info) >> 9) & 0x1ff)
757 
758 #define BNXT_TPA_OUTER_L3_OFF(hdr_info)	\
759 	((hdr_info) & 0x1ff)
760 
761 	u16			cfa_code; /* cfa_code in TPA start compl */
762 };
763 
764 struct bnxt_rx_ring_info {
765 	struct bnxt_napi	*bnapi;
766 	u16			rx_prod;
767 	u16			rx_agg_prod;
768 	u16			rx_sw_agg_prod;
769 	u16			rx_next_cons;
770 	struct bnxt_db_info	rx_db;
771 	struct bnxt_db_info	rx_agg_db;
772 
773 	struct bpf_prog		*xdp_prog;
774 
775 	struct rx_bd		*rx_desc_ring[MAX_RX_PAGES];
776 	struct bnxt_sw_rx_bd	*rx_buf_ring;
777 
778 	struct rx_bd		*rx_agg_desc_ring[MAX_RX_AGG_PAGES];
779 	struct bnxt_sw_rx_agg_bd	*rx_agg_ring;
780 
781 	unsigned long		*rx_agg_bmap;
782 	u16			rx_agg_bmap_size;
783 
784 	struct page		*rx_page;
785 	unsigned int		rx_page_offset;
786 
787 	dma_addr_t		rx_desc_mapping[MAX_RX_PAGES];
788 	dma_addr_t		rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
789 
790 	struct bnxt_tpa_info	*rx_tpa;
791 
792 	struct bnxt_ring_struct	rx_ring_struct;
793 	struct bnxt_ring_struct	rx_agg_ring_struct;
794 	struct xdp_rxq_info	xdp_rxq;
795 };
796 
797 struct bnxt_cp_ring_info {
798 	struct bnxt_napi	*bnapi;
799 	u32			cp_raw_cons;
800 	struct bnxt_db_info	cp_db;
801 
802 	u8			had_work_done:1;
803 	u8			has_more_work:1;
804 
805 	u32			last_cp_raw_cons;
806 
807 	struct bnxt_coal	rx_ring_coal;
808 	u64			rx_packets;
809 	u64			rx_bytes;
810 	u64			event_ctr;
811 
812 	struct net_dim		dim;
813 
814 	union {
815 		struct tx_cmp	*cp_desc_ring[MAX_CP_PAGES];
816 		struct nqe_cn	*nq_desc_ring[MAX_CP_PAGES];
817 	};
818 
819 	dma_addr_t		cp_desc_mapping[MAX_CP_PAGES];
820 
821 	struct ctx_hw_stats	*hw_stats;
822 	dma_addr_t		hw_stats_map;
823 	u32			hw_stats_ctx_id;
824 	u64			rx_l4_csum_errors;
825 	u64			missed_irqs;
826 
827 	struct bnxt_ring_struct	cp_ring_struct;
828 
829 	struct bnxt_cp_ring_info *cp_ring_arr[2];
830 #define BNXT_RX_HDL	0
831 #define BNXT_TX_HDL	1
832 };
833 
834 struct bnxt_napi {
835 	struct napi_struct	napi;
836 	struct bnxt		*bp;
837 
838 	int			index;
839 	struct bnxt_cp_ring_info	cp_ring;
840 	struct bnxt_rx_ring_info	*rx_ring;
841 	struct bnxt_tx_ring_info	*tx_ring;
842 
843 	void			(*tx_int)(struct bnxt *, struct bnxt_napi *,
844 					  int);
845 	int			tx_pkts;
846 	u8			events;
847 
848 	u32			flags;
849 #define BNXT_NAPI_FLAG_XDP	0x1
850 
851 	bool			in_reset;
852 };
853 
854 struct bnxt_irq {
855 	irq_handler_t	handler;
856 	unsigned int	vector;
857 	u8		requested:1;
858 	u8		have_cpumask:1;
859 	char		name[IFNAMSIZ + 2];
860 	cpumask_var_t	cpu_mask;
861 };
862 
863 #define HWRM_RING_ALLOC_TX	0x1
864 #define HWRM_RING_ALLOC_RX	0x2
865 #define HWRM_RING_ALLOC_AGG	0x4
866 #define HWRM_RING_ALLOC_CMPL	0x8
867 #define HWRM_RING_ALLOC_NQ	0x10
868 
869 #define INVALID_STATS_CTX_ID	-1
870 
871 struct bnxt_ring_grp_info {
872 	u16	fw_stats_ctx;
873 	u16	fw_grp_id;
874 	u16	rx_fw_ring_id;
875 	u16	agg_fw_ring_id;
876 	u16	cp_fw_ring_id;
877 };
878 
879 struct bnxt_vnic_info {
880 	u16		fw_vnic_id; /* returned by Chimp during alloc */
881 #define BNXT_MAX_CTX_PER_VNIC	8
882 	u16		fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
883 	u16		fw_l2_ctx_id;
884 #define BNXT_MAX_UC_ADDRS	4
885 	__le64		fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
886 				/* index 0 always dev_addr */
887 	u16		uc_filter_count;
888 	u8		*uc_list;
889 
890 	u16		*fw_grp_ids;
891 	dma_addr_t	rss_table_dma_addr;
892 	__le16		*rss_table;
893 	dma_addr_t	rss_hash_key_dma_addr;
894 	u64		*rss_hash_key;
895 	u32		rx_mask;
896 
897 	u8		*mc_list;
898 	int		mc_list_size;
899 	int		mc_list_count;
900 	dma_addr_t	mc_list_mapping;
901 #define BNXT_MAX_MC_ADDRS	16
902 
903 	u32		flags;
904 #define BNXT_VNIC_RSS_FLAG	1
905 #define BNXT_VNIC_RFS_FLAG	2
906 #define BNXT_VNIC_MCAST_FLAG	4
907 #define BNXT_VNIC_UCAST_FLAG	8
908 #define BNXT_VNIC_RFS_NEW_RSS_FLAG	0x10
909 };
910 
911 struct bnxt_hw_resc {
912 	u16	min_rsscos_ctxs;
913 	u16	max_rsscos_ctxs;
914 	u16	min_cp_rings;
915 	u16	max_cp_rings;
916 	u16	resv_cp_rings;
917 	u16	min_tx_rings;
918 	u16	max_tx_rings;
919 	u16	resv_tx_rings;
920 	u16	max_tx_sch_inputs;
921 	u16	min_rx_rings;
922 	u16	max_rx_rings;
923 	u16	resv_rx_rings;
924 	u16	min_hw_ring_grps;
925 	u16	max_hw_ring_grps;
926 	u16	resv_hw_ring_grps;
927 	u16	min_l2_ctxs;
928 	u16	max_l2_ctxs;
929 	u16	min_vnics;
930 	u16	max_vnics;
931 	u16	resv_vnics;
932 	u16	min_stat_ctxs;
933 	u16	max_stat_ctxs;
934 	u16	resv_stat_ctxs;
935 	u16	max_nqs;
936 	u16	max_irqs;
937 	u16	resv_irqs;
938 };
939 
940 #if defined(CONFIG_BNXT_SRIOV)
941 struct bnxt_vf_info {
942 	u16	fw_fid;
943 	u8	mac_addr[ETH_ALEN];	/* PF assigned MAC Address */
944 	u8	vf_mac_addr[ETH_ALEN];	/* VF assigned MAC address, only
945 					 * stored by PF.
946 					 */
947 	u16	vlan;
948 	u16	func_qcfg_flags;
949 	u32	flags;
950 #define BNXT_VF_QOS		0x1
951 #define BNXT_VF_SPOOFCHK	0x2
952 #define BNXT_VF_LINK_FORCED	0x4
953 #define BNXT_VF_LINK_UP		0x8
954 #define BNXT_VF_TRUST		0x10
955 	u32	func_flags; /* func cfg flags */
956 	u32	min_tx_rate;
957 	u32	max_tx_rate;
958 	void	*hwrm_cmd_req_addr;
959 	dma_addr_t	hwrm_cmd_req_dma_addr;
960 };
961 #endif
962 
963 struct bnxt_pf_info {
964 #define BNXT_FIRST_PF_FID	1
965 #define BNXT_FIRST_VF_FID	128
966 	u16	fw_fid;
967 	u16	port_id;
968 	u8	mac_addr[ETH_ALEN];
969 	u32	first_vf_id;
970 	u16	active_vfs;
971 	u16	max_vfs;
972 	u32	max_encap_records;
973 	u32	max_decap_records;
974 	u32	max_tx_em_flows;
975 	u32	max_tx_wm_flows;
976 	u32	max_rx_em_flows;
977 	u32	max_rx_wm_flows;
978 	unsigned long	*vf_event_bmap;
979 	u16	hwrm_cmd_req_pages;
980 	u8	vf_resv_strategy;
981 #define BNXT_VF_RESV_STRATEGY_MAXIMAL	0
982 #define BNXT_VF_RESV_STRATEGY_MINIMAL	1
983 #define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC	2
984 	void			*hwrm_cmd_req_addr[4];
985 	dma_addr_t		hwrm_cmd_req_dma_addr[4];
986 	struct bnxt_vf_info	*vf;
987 };
988 
989 struct bnxt_ntuple_filter {
990 	struct hlist_node	hash;
991 	u8			dst_mac_addr[ETH_ALEN];
992 	u8			src_mac_addr[ETH_ALEN];
993 	struct flow_keys	fkeys;
994 	__le64			filter_id;
995 	u16			sw_id;
996 	u8			l2_fltr_idx;
997 	u16			rxq;
998 	u32			flow_id;
999 	unsigned long		state;
1000 #define BNXT_FLTR_VALID		0
1001 #define BNXT_FLTR_UPDATE	1
1002 };
1003 
1004 struct bnxt_link_info {
1005 	u8			phy_type;
1006 	u8			media_type;
1007 	u8			transceiver;
1008 	u8			phy_addr;
1009 	u8			phy_link_status;
1010 #define BNXT_LINK_NO_LINK	PORT_PHY_QCFG_RESP_LINK_NO_LINK
1011 #define BNXT_LINK_SIGNAL	PORT_PHY_QCFG_RESP_LINK_SIGNAL
1012 #define BNXT_LINK_LINK		PORT_PHY_QCFG_RESP_LINK_LINK
1013 	u8			wire_speed;
1014 	u8			loop_back;
1015 	u8			link_up;
1016 	u8			duplex;
1017 #define BNXT_LINK_DUPLEX_HALF	PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
1018 #define BNXT_LINK_DUPLEX_FULL	PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
1019 	u8			pause;
1020 #define BNXT_LINK_PAUSE_TX	PORT_PHY_QCFG_RESP_PAUSE_TX
1021 #define BNXT_LINK_PAUSE_RX	PORT_PHY_QCFG_RESP_PAUSE_RX
1022 #define BNXT_LINK_PAUSE_BOTH	(PORT_PHY_QCFG_RESP_PAUSE_RX | \
1023 				 PORT_PHY_QCFG_RESP_PAUSE_TX)
1024 	u8			lp_pause;
1025 	u8			auto_pause_setting;
1026 	u8			force_pause_setting;
1027 	u8			duplex_setting;
1028 	u8			auto_mode;
1029 #define BNXT_AUTO_MODE(mode)	((mode) > BNXT_LINK_AUTO_NONE && \
1030 				 (mode) <= BNXT_LINK_AUTO_MSK)
1031 #define BNXT_LINK_AUTO_NONE     PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
1032 #define BNXT_LINK_AUTO_ALLSPDS	PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
1033 #define BNXT_LINK_AUTO_ONESPD	PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
1034 #define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
1035 #define BNXT_LINK_AUTO_MSK	PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
1036 #define PHY_VER_LEN		3
1037 	u8			phy_ver[PHY_VER_LEN];
1038 	u16			link_speed;
1039 #define BNXT_LINK_SPEED_100MB	PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
1040 #define BNXT_LINK_SPEED_1GB	PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
1041 #define BNXT_LINK_SPEED_2GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
1042 #define BNXT_LINK_SPEED_2_5GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
1043 #define BNXT_LINK_SPEED_10GB	PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
1044 #define BNXT_LINK_SPEED_20GB	PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
1045 #define BNXT_LINK_SPEED_25GB	PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
1046 #define BNXT_LINK_SPEED_40GB	PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
1047 #define BNXT_LINK_SPEED_50GB	PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
1048 #define BNXT_LINK_SPEED_100GB	PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
1049 	u16			support_speeds;
1050 	u16			auto_link_speeds;	/* fw adv setting */
1051 #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
1052 #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
1053 #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
1054 #define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
1055 #define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
1056 #define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
1057 #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
1058 #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
1059 #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
1060 #define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
1061 	u16			support_auto_speeds;
1062 	u16			lp_auto_link_speeds;
1063 	u16			force_link_speed;
1064 	u32			preemphasis;
1065 	u8			module_status;
1066 	u16			fec_cfg;
1067 #define BNXT_FEC_AUTONEG	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
1068 #define BNXT_FEC_ENC_BASE_R	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
1069 #define BNXT_FEC_ENC_RS		PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
1070 
1071 	/* copy of requested setting from ethtool cmd */
1072 	u8			autoneg;
1073 #define BNXT_AUTONEG_SPEED		1
1074 #define BNXT_AUTONEG_FLOW_CTRL		2
1075 	u8			req_duplex;
1076 	u8			req_flow_ctrl;
1077 	u16			req_link_speed;
1078 	u16			advertising;	/* user adv setting */
1079 	bool			force_link_chng;
1080 
1081 	bool			phy_retry;
1082 	unsigned long		phy_retry_expires;
1083 
1084 	/* a copy of phy_qcfg output used to report link
1085 	 * info to VF
1086 	 */
1087 	struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
1088 };
1089 
1090 #define BNXT_MAX_QUEUE	8
1091 
1092 struct bnxt_queue_info {
1093 	u8	queue_id;
1094 	u8	queue_profile;
1095 };
1096 
1097 #define BNXT_MAX_LED			4
1098 
1099 struct bnxt_led_info {
1100 	u8	led_id;
1101 	u8	led_type;
1102 	u8	led_group_id;
1103 	u8	unused;
1104 	__le16	led_state_caps;
1105 #define BNXT_LED_ALT_BLINK_CAP(x)	((x) &	\
1106 	cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED))
1107 
1108 	__le16	led_color_caps;
1109 };
1110 
1111 #define BNXT_MAX_TEST	8
1112 
1113 struct bnxt_test_info {
1114 	u8 offline_mask;
1115 	u8 flags;
1116 #define BNXT_TEST_FL_EXT_LPBK	0x1
1117 	u16 timeout;
1118 	char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
1119 };
1120 
1121 #define BNXT_GRCPF_REG_CHIMP_COMM		0x0
1122 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER	0x100
1123 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT		0x400
1124 #define BNXT_CAG_REG_LEGACY_INT_STATUS		0x4014
1125 #define BNXT_CAG_REG_BASE			0x300000
1126 
1127 #define BNXT_GRCPF_REG_KONG_COMM		0xA00
1128 #define BNXT_GRCPF_REG_KONG_COMM_TRIGGER	0xB00
1129 
1130 struct bnxt_tc_flow_stats {
1131 	u64		packets;
1132 	u64		bytes;
1133 };
1134 
1135 struct bnxt_tc_info {
1136 	bool				enabled;
1137 
1138 	/* hash table to store TC offloaded flows */
1139 	struct rhashtable		flow_table;
1140 	struct rhashtable_params	flow_ht_params;
1141 
1142 	/* hash table to store L2 keys of TC flows */
1143 	struct rhashtable		l2_table;
1144 	struct rhashtable_params	l2_ht_params;
1145 	/* hash table to store L2 keys for TC tunnel decap */
1146 	struct rhashtable		decap_l2_table;
1147 	struct rhashtable_params	decap_l2_ht_params;
1148 	/* hash table to store tunnel decap entries */
1149 	struct rhashtable		decap_table;
1150 	struct rhashtable_params	decap_ht_params;
1151 	/* hash table to store tunnel encap entries */
1152 	struct rhashtable		encap_table;
1153 	struct rhashtable_params	encap_ht_params;
1154 
1155 	/* lock to atomically add/del an l2 node when a flow is
1156 	 * added or deleted.
1157 	 */
1158 	struct mutex			lock;
1159 
1160 	/* Fields used for batching stats query */
1161 	struct rhashtable_iter		iter;
1162 #define BNXT_FLOW_STATS_BATCH_MAX	10
1163 	struct bnxt_tc_stats_batch {
1164 		void			  *flow_node;
1165 		struct bnxt_tc_flow_stats hw_stats;
1166 	} stats_batch[BNXT_FLOW_STATS_BATCH_MAX];
1167 
1168 	/* Stat counter mask (width) */
1169 	u64				bytes_mask;
1170 	u64				packets_mask;
1171 };
1172 
1173 struct bnxt_vf_rep_stats {
1174 	u64			packets;
1175 	u64			bytes;
1176 	u64			dropped;
1177 };
1178 
1179 struct bnxt_vf_rep {
1180 	struct bnxt			*bp;
1181 	struct net_device		*dev;
1182 	struct metadata_dst		*dst;
1183 	u16				vf_idx;
1184 	u16				tx_cfa_action;
1185 	u16				rx_cfa_code;
1186 
1187 	struct bnxt_vf_rep_stats	rx_stats;
1188 	struct bnxt_vf_rep_stats	tx_stats;
1189 };
1190 
1191 #define PTU_PTE_VALID             0x1UL
1192 #define PTU_PTE_LAST              0x2UL
1193 #define PTU_PTE_NEXT_TO_LAST      0x4UL
1194 
1195 #define MAX_CTX_PAGES	(BNXT_PAGE_SIZE / 8)
1196 #define MAX_CTX_TOTAL_PAGES	(MAX_CTX_PAGES * MAX_CTX_PAGES)
1197 
1198 struct bnxt_ctx_pg_info {
1199 	u32		entries;
1200 	u32		nr_pages;
1201 	void		*ctx_pg_arr[MAX_CTX_PAGES];
1202 	dma_addr_t	ctx_dma_arr[MAX_CTX_PAGES];
1203 	struct bnxt_ring_mem_info ring_mem;
1204 	struct bnxt_ctx_pg_info **ctx_pg_tbl;
1205 };
1206 
1207 struct bnxt_ctx_mem_info {
1208 	u32	qp_max_entries;
1209 	u16	qp_min_qp1_entries;
1210 	u16	qp_max_l2_entries;
1211 	u16	qp_entry_size;
1212 	u16	srq_max_l2_entries;
1213 	u32	srq_max_entries;
1214 	u16	srq_entry_size;
1215 	u16	cq_max_l2_entries;
1216 	u32	cq_max_entries;
1217 	u16	cq_entry_size;
1218 	u16	vnic_max_vnic_entries;
1219 	u16	vnic_max_ring_table_entries;
1220 	u16	vnic_entry_size;
1221 	u32	stat_max_entries;
1222 	u16	stat_entry_size;
1223 	u16	tqm_entry_size;
1224 	u32	tqm_min_entries_per_ring;
1225 	u32	tqm_max_entries_per_ring;
1226 	u32	mrav_max_entries;
1227 	u16	mrav_entry_size;
1228 	u16	tim_entry_size;
1229 	u32	tim_max_entries;
1230 	u8	tqm_entries_multiple;
1231 
1232 	u32	flags;
1233 	#define BNXT_CTX_FLAG_INITED	0x01
1234 
1235 	struct bnxt_ctx_pg_info qp_mem;
1236 	struct bnxt_ctx_pg_info srq_mem;
1237 	struct bnxt_ctx_pg_info cq_mem;
1238 	struct bnxt_ctx_pg_info vnic_mem;
1239 	struct bnxt_ctx_pg_info stat_mem;
1240 	struct bnxt_ctx_pg_info mrav_mem;
1241 	struct bnxt_ctx_pg_info tim_mem;
1242 	struct bnxt_ctx_pg_info *tqm_mem[9];
1243 };
1244 
1245 struct bnxt {
1246 	void __iomem		*bar0;
1247 	void __iomem		*bar1;
1248 	void __iomem		*bar2;
1249 
1250 	u32			reg_base;
1251 	u16			chip_num;
1252 #define CHIP_NUM_57301		0x16c8
1253 #define CHIP_NUM_57302		0x16c9
1254 #define CHIP_NUM_57304		0x16ca
1255 #define CHIP_NUM_58700		0x16cd
1256 #define CHIP_NUM_57402		0x16d0
1257 #define CHIP_NUM_57404		0x16d1
1258 #define CHIP_NUM_57406		0x16d2
1259 #define CHIP_NUM_57407		0x16d5
1260 
1261 #define CHIP_NUM_57311		0x16ce
1262 #define CHIP_NUM_57312		0x16cf
1263 #define CHIP_NUM_57314		0x16df
1264 #define CHIP_NUM_57317		0x16e0
1265 #define CHIP_NUM_57412		0x16d6
1266 #define CHIP_NUM_57414		0x16d7
1267 #define CHIP_NUM_57416		0x16d8
1268 #define CHIP_NUM_57417		0x16d9
1269 #define CHIP_NUM_57412L		0x16da
1270 #define CHIP_NUM_57414L		0x16db
1271 
1272 #define CHIP_NUM_5745X		0xd730
1273 
1274 #define CHIP_NUM_57500		0x1750
1275 
1276 #define CHIP_NUM_58802		0xd802
1277 #define CHIP_NUM_58804		0xd804
1278 #define CHIP_NUM_58808		0xd808
1279 
1280 #define BNXT_CHIP_NUM_5730X(chip_num)		\
1281 	((chip_num) >= CHIP_NUM_57301 &&	\
1282 	 (chip_num) <= CHIP_NUM_57304)
1283 
1284 #define BNXT_CHIP_NUM_5740X(chip_num)		\
1285 	(((chip_num) >= CHIP_NUM_57402 &&	\
1286 	  (chip_num) <= CHIP_NUM_57406) ||	\
1287 	 (chip_num) == CHIP_NUM_57407)
1288 
1289 #define BNXT_CHIP_NUM_5731X(chip_num)		\
1290 	((chip_num) == CHIP_NUM_57311 ||	\
1291 	 (chip_num) == CHIP_NUM_57312 ||	\
1292 	 (chip_num) == CHIP_NUM_57314 ||	\
1293 	 (chip_num) == CHIP_NUM_57317)
1294 
1295 #define BNXT_CHIP_NUM_5741X(chip_num)		\
1296 	((chip_num) >= CHIP_NUM_57412 &&	\
1297 	 (chip_num) <= CHIP_NUM_57414L)
1298 
1299 #define BNXT_CHIP_NUM_58700(chip_num)		\
1300 	 ((chip_num) == CHIP_NUM_58700)
1301 
1302 #define BNXT_CHIP_NUM_5745X(chip_num)		\
1303 	 ((chip_num) == CHIP_NUM_5745X)
1304 
1305 #define BNXT_CHIP_NUM_57X0X(chip_num)		\
1306 	(BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
1307 
1308 #define BNXT_CHIP_NUM_57X1X(chip_num)		\
1309 	(BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
1310 
1311 #define BNXT_CHIP_NUM_588XX(chip_num)		\
1312 	((chip_num) == CHIP_NUM_58802 ||	\
1313 	 (chip_num) == CHIP_NUM_58804 ||        \
1314 	 (chip_num) == CHIP_NUM_58808)
1315 
1316 	struct net_device	*dev;
1317 	struct pci_dev		*pdev;
1318 
1319 	atomic_t		intr_sem;
1320 
1321 	u32			flags;
1322 	#define BNXT_FLAG_CHIP_P5	0x1
1323 	#define BNXT_FLAG_VF		0x2
1324 	#define BNXT_FLAG_LRO		0x4
1325 #ifdef CONFIG_INET
1326 	#define BNXT_FLAG_GRO		0x8
1327 #else
1328 	/* Cannot support hardware GRO if CONFIG_INET is not set */
1329 	#define BNXT_FLAG_GRO		0x0
1330 #endif
1331 	#define BNXT_FLAG_TPA		(BNXT_FLAG_LRO | BNXT_FLAG_GRO)
1332 	#define BNXT_FLAG_JUMBO		0x10
1333 	#define BNXT_FLAG_STRIP_VLAN	0x20
1334 	#define BNXT_FLAG_AGG_RINGS	(BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
1335 					 BNXT_FLAG_LRO)
1336 	#define BNXT_FLAG_USING_MSIX	0x40
1337 	#define BNXT_FLAG_MSIX_CAP	0x80
1338 	#define BNXT_FLAG_RFS		0x100
1339 	#define BNXT_FLAG_SHARED_RINGS	0x200
1340 	#define BNXT_FLAG_PORT_STATS	0x400
1341 	#define BNXT_FLAG_UDP_RSS_CAP	0x800
1342 	#define BNXT_FLAG_EEE_CAP	0x1000
1343 	#define BNXT_FLAG_NEW_RSS_CAP	0x2000
1344 	#define BNXT_FLAG_WOL_CAP	0x4000
1345 	#define BNXT_FLAG_ROCEV1_CAP	0x8000
1346 	#define BNXT_FLAG_ROCEV2_CAP	0x10000
1347 	#define BNXT_FLAG_ROCE_CAP	(BNXT_FLAG_ROCEV1_CAP |	\
1348 					 BNXT_FLAG_ROCEV2_CAP)
1349 	#define BNXT_FLAG_NO_AGG_RINGS	0x20000
1350 	#define BNXT_FLAG_RX_PAGE_MODE	0x40000
1351 	#define BNXT_FLAG_MULTI_HOST	0x100000
1352 	#define BNXT_FLAG_DOUBLE_DB	0x400000
1353 	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000
1354 	#define BNXT_FLAG_DIM		0x2000000
1355 	#define BNXT_FLAG_ROCE_MIRROR_CAP	0x4000000
1356 	#define BNXT_FLAG_PORT_STATS_EXT	0x10000000
1357 
1358 	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
1359 					    BNXT_FLAG_RFS |		\
1360 					    BNXT_FLAG_STRIP_VLAN)
1361 
1362 #define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
1363 #define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
1364 #define BNXT_NPAR(bp)		((bp)->port_partition_type)
1365 #define BNXT_MH(bp)		((bp)->flags & BNXT_FLAG_MULTI_HOST)
1366 #define BNXT_SINGLE_PF(bp)	(BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
1367 #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
1368 #define BNXT_RX_PAGE_MODE(bp)	((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
1369 #define BNXT_SUPPORTS_TPA(bp)	(!BNXT_CHIP_TYPE_NITRO_A0(bp) &&	\
1370 				 !(bp->flags & BNXT_FLAG_CHIP_P5))
1371 
1372 /* Chip class phase 5 */
1373 #define BNXT_CHIP_P5(bp)			\
1374 	((bp)->chip_num == CHIP_NUM_57500)
1375 
1376 /* Chip class phase 4.x */
1377 #define BNXT_CHIP_P4(bp)			\
1378 	(BNXT_CHIP_NUM_57X1X((bp)->chip_num) ||	\
1379 	 BNXT_CHIP_NUM_5745X((bp)->chip_num) ||	\
1380 	 BNXT_CHIP_NUM_588XX((bp)->chip_num) ||	\
1381 	 (BNXT_CHIP_NUM_58700((bp)->chip_num) &&	\
1382 	  !BNXT_CHIP_TYPE_NITRO_A0(bp)))
1383 
1384 #define BNXT_CHIP_P4_PLUS(bp)			\
1385 	(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
1386 
1387 	struct bnxt_en_dev	*edev;
1388 	struct bnxt_en_dev *	(*ulp_probe)(struct net_device *);
1389 
1390 	struct bnxt_napi	**bnapi;
1391 
1392 	struct bnxt_rx_ring_info	*rx_ring;
1393 	struct bnxt_tx_ring_info	*tx_ring;
1394 	u16			*tx_ring_map;
1395 
1396 	struct sk_buff *	(*gro_func)(struct bnxt_tpa_info *, int, int,
1397 					    struct sk_buff *);
1398 
1399 	struct sk_buff *	(*rx_skb_func)(struct bnxt *,
1400 					       struct bnxt_rx_ring_info *,
1401 					       u16, void *, u8 *, dma_addr_t,
1402 					       unsigned int);
1403 
1404 	u32			rx_buf_size;
1405 	u32			rx_buf_use_size;	/* useable size */
1406 	u16			rx_offset;
1407 	u16			rx_dma_offset;
1408 	enum dma_data_direction	rx_dir;
1409 	u32			rx_ring_size;
1410 	u32			rx_agg_ring_size;
1411 	u32			rx_copy_thresh;
1412 	u32			rx_ring_mask;
1413 	u32			rx_agg_ring_mask;
1414 	int			rx_nr_pages;
1415 	int			rx_agg_nr_pages;
1416 	int			rx_nr_rings;
1417 	int			rsscos_nr_ctxs;
1418 
1419 	u32			tx_ring_size;
1420 	u32			tx_ring_mask;
1421 	int			tx_nr_pages;
1422 	int			tx_nr_rings;
1423 	int			tx_nr_rings_per_tc;
1424 	int			tx_nr_rings_xdp;
1425 
1426 	int			tx_wake_thresh;
1427 	int			tx_push_thresh;
1428 	int			tx_push_size;
1429 
1430 	u32			cp_ring_size;
1431 	u32			cp_ring_mask;
1432 	u32			cp_bit;
1433 	int			cp_nr_pages;
1434 	int			cp_nr_rings;
1435 
1436 	/* grp_info indexed by completion ring index */
1437 	struct bnxt_ring_grp_info	*grp_info;
1438 	struct bnxt_vnic_info	*vnic_info;
1439 	int			nr_vnics;
1440 	u32			rss_hash_cfg;
1441 
1442 	u16			max_mtu;
1443 	u8			max_tc;
1444 	u8			max_lltc;	/* lossless TCs */
1445 	struct bnxt_queue_info	q_info[BNXT_MAX_QUEUE];
1446 	u8			tc_to_qidx[BNXT_MAX_QUEUE];
1447 	u8			q_ids[BNXT_MAX_QUEUE];
1448 	u8			max_q;
1449 
1450 	unsigned int		current_interval;
1451 #define BNXT_TIMER_INTERVAL	HZ
1452 
1453 	struct timer_list	timer;
1454 
1455 	unsigned long		state;
1456 #define BNXT_STATE_OPEN		0
1457 #define BNXT_STATE_IN_SP_TASK	1
1458 #define BNXT_STATE_READ_STATS	2
1459 
1460 	struct bnxt_irq	*irq_tbl;
1461 	int			total_irqs;
1462 	u8			mac_addr[ETH_ALEN];
1463 
1464 #ifdef CONFIG_BNXT_DCB
1465 	struct ieee_pfc		*ieee_pfc;
1466 	struct ieee_ets		*ieee_ets;
1467 	u8			dcbx_cap;
1468 	u8			default_pri;
1469 	u8			max_dscp_value;
1470 #endif /* CONFIG_BNXT_DCB */
1471 
1472 	u32			msg_enable;
1473 
1474 	u32			fw_cap;
1475 	#define BNXT_FW_CAP_SHORT_CMD			0x00000001
1476 	#define BNXT_FW_CAP_LLDP_AGENT			0x00000002
1477 	#define BNXT_FW_CAP_DCBX_AGENT			0x00000004
1478 	#define BNXT_FW_CAP_NEW_RM			0x00000008
1479 	#define BNXT_FW_CAP_IF_CHANGE			0x00000010
1480 	#define BNXT_FW_CAP_KONG_MB_CHNL		0x00000080
1481 	#define BNXT_FW_CAP_OVS_64BIT_HANDLE		0x00000400
1482 	#define BNXT_FW_CAP_TRUSTED_VF			0x00000800
1483 
1484 #define BNXT_NEW_RM(bp)		((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
1485 	u32			hwrm_spec_code;
1486 	u16			hwrm_cmd_seq;
1487 	u16                     hwrm_cmd_kong_seq;
1488 	u16			hwrm_intr_seq_id;
1489 	void			*hwrm_short_cmd_req_addr;
1490 	dma_addr_t		hwrm_short_cmd_req_dma_addr;
1491 	void			*hwrm_cmd_resp_addr;
1492 	dma_addr_t		hwrm_cmd_resp_dma_addr;
1493 	void			*hwrm_cmd_kong_resp_addr;
1494 	dma_addr_t		hwrm_cmd_kong_resp_dma_addr;
1495 
1496 	struct rtnl_link_stats64	net_stats_prev;
1497 	struct rx_port_stats	*hw_rx_port_stats;
1498 	struct tx_port_stats	*hw_tx_port_stats;
1499 	struct rx_port_stats_ext	*hw_rx_port_stats_ext;
1500 	struct tx_port_stats_ext	*hw_tx_port_stats_ext;
1501 	dma_addr_t		hw_rx_port_stats_map;
1502 	dma_addr_t		hw_tx_port_stats_map;
1503 	dma_addr_t		hw_rx_port_stats_ext_map;
1504 	dma_addr_t		hw_tx_port_stats_ext_map;
1505 	int			hw_port_stats_size;
1506 	u16			fw_rx_stats_ext_size;
1507 	u16			fw_tx_stats_ext_size;
1508 	u8			pri2cos[8];
1509 	u8			pri2cos_valid;
1510 
1511 	u16			hwrm_max_req_len;
1512 	u16			hwrm_max_ext_req_len;
1513 	int			hwrm_cmd_timeout;
1514 	struct mutex		hwrm_cmd_lock;	/* serialize hwrm messages */
1515 	struct hwrm_ver_get_output	ver_resp;
1516 #define FW_VER_STR_LEN		32
1517 #define BC_HWRM_STR_LEN		21
1518 #define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
1519 	char			fw_ver_str[FW_VER_STR_LEN];
1520 	__be16			vxlan_port;
1521 	u8			vxlan_port_cnt;
1522 	__le16			vxlan_fw_dst_port_id;
1523 	__be16			nge_port;
1524 	u8			nge_port_cnt;
1525 	__le16			nge_fw_dst_port_id;
1526 	u8			port_partition_type;
1527 	u8			port_count;
1528 	u16			br_mode;
1529 
1530 	struct bnxt_coal_cap	coal_cap;
1531 	struct bnxt_coal	rx_coal;
1532 	struct bnxt_coal	tx_coal;
1533 
1534 	u32			stats_coal_ticks;
1535 #define BNXT_DEF_STATS_COAL_TICKS	 1000000
1536 #define BNXT_MIN_STATS_COAL_TICKS	  250000
1537 #define BNXT_MAX_STATS_COAL_TICKS	 1000000
1538 
1539 	struct work_struct	sp_task;
1540 	unsigned long		sp_event;
1541 #define BNXT_RX_MASK_SP_EVENT		0
1542 #define BNXT_RX_NTP_FLTR_SP_EVENT	1
1543 #define BNXT_LINK_CHNG_SP_EVENT		2
1544 #define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT	3
1545 #define BNXT_VXLAN_ADD_PORT_SP_EVENT	4
1546 #define BNXT_VXLAN_DEL_PORT_SP_EVENT	5
1547 #define BNXT_RESET_TASK_SP_EVENT	6
1548 #define BNXT_RST_RING_SP_EVENT		7
1549 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT	8
1550 #define BNXT_PERIODIC_STATS_SP_EVENT	9
1551 #define BNXT_HWRM_PORT_MODULE_SP_EVENT	10
1552 #define BNXT_RESET_TASK_SILENT_SP_EVENT	11
1553 #define BNXT_GENEVE_ADD_PORT_SP_EVENT	12
1554 #define BNXT_GENEVE_DEL_PORT_SP_EVENT	13
1555 #define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
1556 #define BNXT_FLOW_STATS_SP_EVENT	15
1557 #define BNXT_UPDATE_PHY_SP_EVENT	16
1558 #define BNXT_RING_COAL_NOW_SP_EVENT	17
1559 
1560 	struct bnxt_hw_resc	hw_resc;
1561 	struct bnxt_pf_info	pf;
1562 	struct bnxt_ctx_mem_info	*ctx;
1563 #ifdef CONFIG_BNXT_SRIOV
1564 	int			nr_vfs;
1565 	struct bnxt_vf_info	vf;
1566 	wait_queue_head_t	sriov_cfg_wait;
1567 	bool			sriov_cfg;
1568 #define BNXT_SRIOV_CFG_WAIT_TMO	msecs_to_jiffies(10000)
1569 
1570 	/* lock to protect VF-rep creation/cleanup via
1571 	 * multiple paths such as ->sriov_configure() and
1572 	 * devlink ->eswitch_mode_set()
1573 	 */
1574 	struct mutex		sriov_lock;
1575 #endif
1576 
1577 #if BITS_PER_LONG == 32
1578 	/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
1579 	spinlock_t		db_lock;
1580 #endif
1581 
1582 #define BNXT_NTP_FLTR_MAX_FLTR	4096
1583 #define BNXT_NTP_FLTR_HASH_SIZE	512
1584 #define BNXT_NTP_FLTR_HASH_MASK	(BNXT_NTP_FLTR_HASH_SIZE - 1)
1585 	struct hlist_head	ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
1586 	spinlock_t		ntp_fltr_lock;	/* for hash table add, del */
1587 
1588 	unsigned long		*ntp_fltr_bmap;
1589 	int			ntp_fltr_count;
1590 
1591 	/* To protect link related settings during link changes and
1592 	 * ethtool settings changes.
1593 	 */
1594 	struct mutex		link_lock;
1595 	struct bnxt_link_info	link_info;
1596 	struct ethtool_eee	eee;
1597 	u32			lpi_tmr_lo;
1598 	u32			lpi_tmr_hi;
1599 
1600 	u8			num_tests;
1601 	struct bnxt_test_info	*test_info;
1602 
1603 	u8			wol_filter_id;
1604 	u8			wol;
1605 
1606 	u8			num_leds;
1607 	struct bnxt_led_info	leds[BNXT_MAX_LED];
1608 
1609 	struct bpf_prog		*xdp_prog;
1610 
1611 	/* devlink interface and vf-rep structs */
1612 	struct devlink		*dl;
1613 	struct devlink_port	dl_port;
1614 	enum devlink_eswitch_mode eswitch_mode;
1615 	struct bnxt_vf_rep	**vf_reps; /* array of vf-rep ptrs */
1616 	u16			*cfa_code_map; /* cfa_code -> vf_idx map */
1617 	u8			switch_id[8];
1618 	struct bnxt_tc_info	*tc_info;
1619 	struct dentry		*debugfs_pdev;
1620 	struct dentry		*debugfs_dim;
1621 	struct device		*hwmon_dev;
1622 };
1623 
1624 #define BNXT_RX_STATS_OFFSET(counter)			\
1625 	(offsetof(struct rx_port_stats, counter) / 8)
1626 
1627 #define BNXT_TX_STATS_OFFSET(counter)			\
1628 	((offsetof(struct tx_port_stats, counter) +	\
1629 	  sizeof(struct rx_port_stats) + 512) / 8)
1630 
1631 #define BNXT_RX_STATS_EXT_OFFSET(counter)		\
1632 	(offsetof(struct rx_port_stats_ext, counter) / 8)
1633 
1634 #define BNXT_TX_STATS_EXT_OFFSET(counter)		\
1635 	(offsetof(struct tx_port_stats_ext, counter) / 8)
1636 
1637 #define I2C_DEV_ADDR_A0				0xa0
1638 #define I2C_DEV_ADDR_A2				0xa2
1639 #define SFF_DIAG_SUPPORT_OFFSET			0x5c
1640 #define SFF_MODULE_ID_SFP			0x3
1641 #define SFF_MODULE_ID_QSFP			0xc
1642 #define SFF_MODULE_ID_QSFP_PLUS			0xd
1643 #define SFF_MODULE_ID_QSFP28			0x11
1644 #define BNXT_MAX_PHY_I2C_RESP_SIZE		64
1645 
1646 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
1647 {
1648 	/* Tell compiler to fetch tx indices from memory. */
1649 	barrier();
1650 
1651 	return bp->tx_ring_size -
1652 		((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
1653 }
1654 
1655 #if BITS_PER_LONG == 32
1656 #define writeq(val64, db)			\
1657 do {						\
1658 	spin_lock(&bp->db_lock);		\
1659 	writel((val64) & 0xffffffff, db);	\
1660 	writel((val64) >> 32, (db) + 4);	\
1661 	spin_unlock(&bp->db_lock);		\
1662 } while (0)
1663 
1664 #define writeq_relaxed writeq
1665 #endif
1666 
1667 /* For TX and RX ring doorbells with no ordering guarantee*/
1668 static inline void bnxt_db_write_relaxed(struct bnxt *bp,
1669 					 struct bnxt_db_info *db, u32 idx)
1670 {
1671 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
1672 		writeq_relaxed(db->db_key64 | idx, db->doorbell);
1673 	} else {
1674 		u32 db_val = db->db_key32 | idx;
1675 
1676 		writel_relaxed(db_val, db->doorbell);
1677 		if (bp->flags & BNXT_FLAG_DOUBLE_DB)
1678 			writel_relaxed(db_val, db->doorbell);
1679 	}
1680 }
1681 
1682 /* For TX and RX ring doorbells */
1683 static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
1684 				 u32 idx)
1685 {
1686 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
1687 		writeq(db->db_key64 | idx, db->doorbell);
1688 	} else {
1689 		u32 db_val = db->db_key32 | idx;
1690 
1691 		writel(db_val, db->doorbell);
1692 		if (bp->flags & BNXT_FLAG_DOUBLE_DB)
1693 			writel(db_val, db->doorbell);
1694 	}
1695 }
1696 
1697 static inline bool bnxt_cfa_hwrm_message(u16 req_type)
1698 {
1699 	switch (req_type) {
1700 	case HWRM_CFA_ENCAP_RECORD_ALLOC:
1701 	case HWRM_CFA_ENCAP_RECORD_FREE:
1702 	case HWRM_CFA_DECAP_FILTER_ALLOC:
1703 	case HWRM_CFA_DECAP_FILTER_FREE:
1704 	case HWRM_CFA_NTUPLE_FILTER_ALLOC:
1705 	case HWRM_CFA_NTUPLE_FILTER_FREE:
1706 	case HWRM_CFA_NTUPLE_FILTER_CFG:
1707 	case HWRM_CFA_EM_FLOW_ALLOC:
1708 	case HWRM_CFA_EM_FLOW_FREE:
1709 	case HWRM_CFA_EM_FLOW_CFG:
1710 	case HWRM_CFA_FLOW_ALLOC:
1711 	case HWRM_CFA_FLOW_FREE:
1712 	case HWRM_CFA_FLOW_INFO:
1713 	case HWRM_CFA_FLOW_FLUSH:
1714 	case HWRM_CFA_FLOW_STATS:
1715 	case HWRM_CFA_METER_PROFILE_ALLOC:
1716 	case HWRM_CFA_METER_PROFILE_FREE:
1717 	case HWRM_CFA_METER_PROFILE_CFG:
1718 	case HWRM_CFA_METER_INSTANCE_ALLOC:
1719 	case HWRM_CFA_METER_INSTANCE_FREE:
1720 		return true;
1721 	default:
1722 		return false;
1723 	}
1724 }
1725 
1726 static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
1727 {
1728 	return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
1729 		bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)));
1730 }
1731 
1732 static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req)
1733 {
1734 	return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
1735 		req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr));
1736 }
1737 
1738 static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
1739 {
1740 	if (bnxt_hwrm_kong_chnl(bp, (struct input *)req))
1741 		return bp->hwrm_cmd_kong_resp_addr;
1742 	else
1743 		return bp->hwrm_cmd_resp_addr;
1744 }
1745 
1746 static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst)
1747 {
1748 	u16 seq_id;
1749 
1750 	if (dst == BNXT_HWRM_CHNL_CHIMP)
1751 		seq_id = bp->hwrm_cmd_seq++;
1752 	else
1753 		seq_id = bp->hwrm_cmd_kong_seq++;
1754 	return seq_id;
1755 }
1756 
1757 extern const u16 bnxt_lhint_arr[];
1758 
1759 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1760 		       u16 prod, gfp_t gfp);
1761 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
1762 void bnxt_set_tpa_flags(struct bnxt *bp);
1763 void bnxt_set_ring_params(struct bnxt *);
1764 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
1765 void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
1766 int _hwrm_send_message(struct bnxt *, void *, u32, int);
1767 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
1768 int hwrm_send_message(struct bnxt *, void *, u32, int);
1769 int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
1770 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
1771 				     int bmap_size);
1772 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
1773 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
1774 int bnxt_nq_rings_in_use(struct bnxt *bp);
1775 int bnxt_hwrm_set_coal(struct bnxt *);
1776 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
1777 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
1778 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
1779 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
1780 int bnxt_get_avail_msix(struct bnxt *bp, int num);
1781 int bnxt_reserve_rings(struct bnxt *bp);
1782 void bnxt_tx_disable(struct bnxt *bp);
1783 void bnxt_tx_enable(struct bnxt *bp);
1784 int bnxt_hwrm_set_pause(struct bnxt *);
1785 int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
1786 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
1787 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
1788 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
1789 int bnxt_hwrm_fw_set_time(struct bnxt *);
1790 int bnxt_open_nic(struct bnxt *, bool, bool);
1791 int bnxt_half_open_nic(struct bnxt *bp);
1792 void bnxt_half_close_nic(struct bnxt *bp);
1793 int bnxt_close_nic(struct bnxt *, bool, bool);
1794 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
1795 		     int tx_xdp);
1796 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
1797 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
1798 int bnxt_restore_pf_fw_resources(struct bnxt *bp);
1799 int bnxt_get_port_parent_id(struct net_device *dev,
1800 			    struct netdev_phys_item_id *ppid);
1801 void bnxt_dim_work(struct work_struct *work);
1802 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
1803 
1804 #endif
1805