1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
60 
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_hwrm.h"
64 #include "bnxt_ulp.h"
65 #include "bnxt_sriov.h"
66 #include "bnxt_ethtool.h"
67 #include "bnxt_dcb.h"
68 #include "bnxt_xdp.h"
69 #include "bnxt_ptp.h"
70 #include "bnxt_vfr.h"
71 #include "bnxt_tc.h"
72 #include "bnxt_devlink.h"
73 #include "bnxt_debugfs.h"
74 
75 #define BNXT_TX_TIMEOUT		(5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE	(NETIF_MSG_DRV | NETIF_MSG_HW | \
77 				 NETIF_MSG_TX_ERR)
78 
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
81 
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
85 
86 #define BNXT_TX_PUSH_THRESH 164
87 
88 enum board_idx {
89 	BCM57301,
90 	BCM57302,
91 	BCM57304,
92 	BCM57417_NPAR,
93 	BCM58700,
94 	BCM57311,
95 	BCM57312,
96 	BCM57402,
97 	BCM57404,
98 	BCM57406,
99 	BCM57402_NPAR,
100 	BCM57407,
101 	BCM57412,
102 	BCM57414,
103 	BCM57416,
104 	BCM57417,
105 	BCM57412_NPAR,
106 	BCM57314,
107 	BCM57417_SFP,
108 	BCM57416_SFP,
109 	BCM57404_NPAR,
110 	BCM57406_NPAR,
111 	BCM57407_SFP,
112 	BCM57407_NPAR,
113 	BCM57414_NPAR,
114 	BCM57416_NPAR,
115 	BCM57452,
116 	BCM57454,
117 	BCM5745x_NPAR,
118 	BCM57508,
119 	BCM57504,
120 	BCM57502,
121 	BCM57508_NPAR,
122 	BCM57504_NPAR,
123 	BCM57502_NPAR,
124 	BCM58802,
125 	BCM58804,
126 	BCM58808,
127 	NETXTREME_E_VF,
128 	NETXTREME_C_VF,
129 	NETXTREME_S_VF,
130 	NETXTREME_C_VF_HV,
131 	NETXTREME_E_VF_HV,
132 	NETXTREME_E_P5_VF,
133 	NETXTREME_E_P5_VF_HV,
134 };
135 
136 /* indexed by enum above */
137 static const struct {
138 	char *name;
139 } board_info[] = {
140 	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
141 	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
142 	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
143 	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
144 	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
145 	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
146 	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
147 	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
148 	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
149 	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
150 	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
151 	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
152 	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
153 	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
154 	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
155 	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
156 	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
157 	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
158 	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
159 	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
160 	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
161 	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
162 	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
163 	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
164 	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
165 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
166 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
167 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
168 	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
169 	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
170 	[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
171 	[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
172 	[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
173 	[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
174 	[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
175 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
176 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
177 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
178 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
179 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
180 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
181 	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
182 	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
183 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
184 	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
185 };
186 
187 static const struct pci_device_id bnxt_pci_tbl[] = {
188 	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
189 	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
190 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
191 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
192 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
193 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
194 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
195 	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
196 	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
197 	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
198 	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
199 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
200 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
201 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
202 	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
203 	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
204 	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
205 	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
206 	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
207 	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
208 	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
209 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
210 	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
211 	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
212 	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
213 	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
214 	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
215 	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
216 	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
217 	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
218 	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
219 	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
220 	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
221 	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
222 	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
223 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
224 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
225 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
226 	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
227 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
228 	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
229 	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
230 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
231 	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
232 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
233 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
234 #ifdef CONFIG_BNXT_SRIOV
235 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
236 	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
237 	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
238 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
239 	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
240 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
241 	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
242 	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
243 	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
244 	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
245 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
246 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
247 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
248 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
249 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
250 	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
251 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
252 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
253 	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
254 	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
255 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
256 #endif
257 	{ 0 }
258 };
259 
260 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
261 
262 static const u16 bnxt_vf_req_snif[] = {
263 	HWRM_FUNC_CFG,
264 	HWRM_FUNC_VF_CFG,
265 	HWRM_PORT_PHY_QCFG,
266 	HWRM_CFA_L2_FILTER_ALLOC,
267 };
268 
269 static const u16 bnxt_async_events_arr[] = {
270 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
271 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
272 	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
273 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
274 	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
275 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
276 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
277 	ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
278 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
279 	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
280 	ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
281 	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
282 	ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
283 	ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
284 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
285 };
286 
287 static struct workqueue_struct *bnxt_pf_wq;
288 
289 static bool bnxt_vf_pciid(enum board_idx idx)
290 {
291 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
292 		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
293 		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
294 		idx == NETXTREME_E_P5_VF_HV);
295 }
296 
297 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
298 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
299 #define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
300 
301 #define BNXT_CP_DB_IRQ_DIS(db)						\
302 		writel(DB_CP_IRQ_DIS_FLAGS, db)
303 
304 #define BNXT_DB_CQ(db, idx)						\
305 	writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
306 
307 #define BNXT_DB_NQ_P5(db, idx)						\
308 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx),	\
309 		    (db)->doorbell)
310 
311 #define BNXT_DB_CQ_ARM(db, idx)						\
312 	writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
313 
314 #define BNXT_DB_NQ_ARM_P5(db, idx)					\
315 	bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
316 		    (db)->doorbell)
317 
318 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
319 {
320 	if (bp->flags & BNXT_FLAG_CHIP_P5)
321 		BNXT_DB_NQ_P5(db, idx);
322 	else
323 		BNXT_DB_CQ(db, idx);
324 }
325 
326 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
327 {
328 	if (bp->flags & BNXT_FLAG_CHIP_P5)
329 		BNXT_DB_NQ_ARM_P5(db, idx);
330 	else
331 		BNXT_DB_CQ_ARM(db, idx);
332 }
333 
334 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
335 {
336 	if (bp->flags & BNXT_FLAG_CHIP_P5)
337 		bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
338 			    RING_CMP(idx), db->doorbell);
339 	else
340 		BNXT_DB_CQ(db, idx);
341 }
342 
343 const u16 bnxt_lhint_arr[] = {
344 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
345 	TX_BD_FLAGS_LHINT_512_TO_1023,
346 	TX_BD_FLAGS_LHINT_1024_TO_2047,
347 	TX_BD_FLAGS_LHINT_1024_TO_2047,
348 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
362 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
363 };
364 
365 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
366 {
367 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
368 
369 	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
370 		return 0;
371 
372 	return md_dst->u.port_info.port_id;
373 }
374 
375 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
376 			     u16 prod)
377 {
378 	bnxt_db_write(bp, &txr->tx_db, prod);
379 	txr->kick_pending = 0;
380 }
381 
382 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
383 					  struct bnxt_tx_ring_info *txr,
384 					  struct netdev_queue *txq)
385 {
386 	netif_tx_stop_queue(txq);
387 
388 	/* netif_tx_stop_queue() must be done before checking
389 	 * tx index in bnxt_tx_avail() below, because in
390 	 * bnxt_tx_int(), we update tx index before checking for
391 	 * netif_tx_queue_stopped().
392 	 */
393 	smp_mb();
394 	if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
395 		netif_tx_wake_queue(txq);
396 		return false;
397 	}
398 
399 	return true;
400 }
401 
402 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
403 {
404 	struct bnxt *bp = netdev_priv(dev);
405 	struct tx_bd *txbd;
406 	struct tx_bd_ext *txbd1;
407 	struct netdev_queue *txq;
408 	int i;
409 	dma_addr_t mapping;
410 	unsigned int length, pad = 0;
411 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
412 	u16 prod, last_frag;
413 	struct pci_dev *pdev = bp->pdev;
414 	struct bnxt_tx_ring_info *txr;
415 	struct bnxt_sw_tx_bd *tx_buf;
416 	__le32 lflags = 0;
417 
418 	i = skb_get_queue_mapping(skb);
419 	if (unlikely(i >= bp->tx_nr_rings)) {
420 		dev_kfree_skb_any(skb);
421 		atomic_long_inc(&dev->tx_dropped);
422 		return NETDEV_TX_OK;
423 	}
424 
425 	txq = netdev_get_tx_queue(dev, i);
426 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
427 	prod = txr->tx_prod;
428 
429 	free_size = bnxt_tx_avail(bp, txr);
430 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
431 		/* We must have raced with NAPI cleanup */
432 		if (net_ratelimit() && txr->kick_pending)
433 			netif_warn(bp, tx_err, dev,
434 				   "bnxt: ring busy w/ flush pending!\n");
435 		if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
436 			return NETDEV_TX_BUSY;
437 	}
438 
439 	length = skb->len;
440 	len = skb_headlen(skb);
441 	last_frag = skb_shinfo(skb)->nr_frags;
442 
443 	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
444 
445 	txbd->tx_bd_opaque = prod;
446 
447 	tx_buf = &txr->tx_buf_ring[prod];
448 	tx_buf->skb = skb;
449 	tx_buf->nr_frags = last_frag;
450 
451 	vlan_tag_flags = 0;
452 	cfa_action = bnxt_xmit_get_cfa_action(skb);
453 	if (skb_vlan_tag_present(skb)) {
454 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
455 				 skb_vlan_tag_get(skb);
456 		/* Currently supports 8021Q, 8021AD vlan offloads
457 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
458 		 */
459 		if (skb->vlan_proto == htons(ETH_P_8021Q))
460 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
461 	}
462 
463 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
464 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
465 
466 		if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
467 		    atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
468 			if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
469 					    &ptp->tx_hdr_off)) {
470 				if (vlan_tag_flags)
471 					ptp->tx_hdr_off += VLAN_HLEN;
472 				lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
473 				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
474 			} else {
475 				atomic_inc(&bp->ptp_cfg->tx_avail);
476 			}
477 		}
478 	}
479 
480 	if (unlikely(skb->no_fcs))
481 		lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
482 
483 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
484 	    !lflags) {
485 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
486 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
487 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
488 		void __iomem *db = txr->tx_db.doorbell;
489 		void *pdata = tx_push_buf->data;
490 		u64 *end;
491 		int j, push_len;
492 
493 		/* Set COAL_NOW to be ready quickly for the next push */
494 		tx_push->tx_bd_len_flags_type =
495 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
496 					TX_BD_TYPE_LONG_TX_BD |
497 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
498 					TX_BD_FLAGS_COAL_NOW |
499 					TX_BD_FLAGS_PACKET_END |
500 					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
501 
502 		if (skb->ip_summed == CHECKSUM_PARTIAL)
503 			tx_push1->tx_bd_hsize_lflags =
504 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
505 		else
506 			tx_push1->tx_bd_hsize_lflags = 0;
507 
508 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
509 		tx_push1->tx_bd_cfa_action =
510 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
511 
512 		end = pdata + length;
513 		end = PTR_ALIGN(end, 8) - 1;
514 		*end = 0;
515 
516 		skb_copy_from_linear_data(skb, pdata, len);
517 		pdata += len;
518 		for (j = 0; j < last_frag; j++) {
519 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
520 			void *fptr;
521 
522 			fptr = skb_frag_address_safe(frag);
523 			if (!fptr)
524 				goto normal_tx;
525 
526 			memcpy(pdata, fptr, skb_frag_size(frag));
527 			pdata += skb_frag_size(frag);
528 		}
529 
530 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
531 		txbd->tx_bd_haddr = txr->data_mapping;
532 		prod = NEXT_TX(prod);
533 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
534 		memcpy(txbd, tx_push1, sizeof(*txbd));
535 		prod = NEXT_TX(prod);
536 		tx_push->doorbell =
537 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
538 		txr->tx_prod = prod;
539 
540 		tx_buf->is_push = 1;
541 		netdev_tx_sent_queue(txq, skb->len);
542 		wmb();	/* Sync is_push and byte queue before pushing data */
543 
544 		push_len = (length + sizeof(*tx_push) + 7) / 8;
545 		if (push_len > 16) {
546 			__iowrite64_copy(db, tx_push_buf, 16);
547 			__iowrite32_copy(db + 4, tx_push_buf + 1,
548 					 (push_len - 16) << 1);
549 		} else {
550 			__iowrite64_copy(db, tx_push_buf, push_len);
551 		}
552 
553 		goto tx_done;
554 	}
555 
556 normal_tx:
557 	if (length < BNXT_MIN_PKT_SIZE) {
558 		pad = BNXT_MIN_PKT_SIZE - length;
559 		if (skb_pad(skb, pad))
560 			/* SKB already freed. */
561 			goto tx_kick_pending;
562 		length = BNXT_MIN_PKT_SIZE;
563 	}
564 
565 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
566 
567 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
568 		goto tx_free;
569 
570 	dma_unmap_addr_set(tx_buf, mapping, mapping);
571 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
572 		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
573 
574 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
575 
576 	prod = NEXT_TX(prod);
577 	txbd1 = (struct tx_bd_ext *)
578 		&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
579 
580 	txbd1->tx_bd_hsize_lflags = lflags;
581 	if (skb_is_gso(skb)) {
582 		u32 hdr_len;
583 
584 		if (skb->encapsulation)
585 			hdr_len = skb_inner_network_offset(skb) +
586 				skb_inner_network_header_len(skb) +
587 				inner_tcp_hdrlen(skb);
588 		else
589 			hdr_len = skb_transport_offset(skb) +
590 				tcp_hdrlen(skb);
591 
592 		txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
593 					TX_BD_FLAGS_T_IPID |
594 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
595 		length = skb_shinfo(skb)->gso_size;
596 		txbd1->tx_bd_mss = cpu_to_le32(length);
597 		length += hdr_len;
598 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 		txbd1->tx_bd_hsize_lflags |=
600 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
601 		txbd1->tx_bd_mss = 0;
602 	}
603 
604 	length >>= 9;
605 	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
606 		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
607 				     skb->len);
608 		i = 0;
609 		goto tx_dma_error;
610 	}
611 	flags |= bnxt_lhint_arr[length];
612 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
613 
614 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
615 	txbd1->tx_bd_cfa_action =
616 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
617 	for (i = 0; i < last_frag; i++) {
618 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
619 
620 		prod = NEXT_TX(prod);
621 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
622 
623 		len = skb_frag_size(frag);
624 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
625 					   DMA_TO_DEVICE);
626 
627 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
628 			goto tx_dma_error;
629 
630 		tx_buf = &txr->tx_buf_ring[prod];
631 		dma_unmap_addr_set(tx_buf, mapping, mapping);
632 
633 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
634 
635 		flags = len << TX_BD_LEN_SHIFT;
636 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
637 	}
638 
639 	flags &= ~TX_BD_LEN;
640 	txbd->tx_bd_len_flags_type =
641 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
642 			    TX_BD_FLAGS_PACKET_END);
643 
644 	netdev_tx_sent_queue(txq, skb->len);
645 
646 	skb_tx_timestamp(skb);
647 
648 	/* Sync BD data before updating doorbell */
649 	wmb();
650 
651 	prod = NEXT_TX(prod);
652 	txr->tx_prod = prod;
653 
654 	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
655 		bnxt_txr_db_kick(bp, txr, prod);
656 	else
657 		txr->kick_pending = 1;
658 
659 tx_done:
660 
661 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
662 		if (netdev_xmit_more() && !tx_buf->is_push)
663 			bnxt_txr_db_kick(bp, txr, prod);
664 
665 		bnxt_txr_netif_try_stop_queue(bp, txr, txq);
666 	}
667 	return NETDEV_TX_OK;
668 
669 tx_dma_error:
670 	if (BNXT_TX_PTP_IS_SET(lflags))
671 		atomic_inc(&bp->ptp_cfg->tx_avail);
672 
673 	last_frag = i;
674 
675 	/* start back at beginning and unmap skb */
676 	prod = txr->tx_prod;
677 	tx_buf = &txr->tx_buf_ring[prod];
678 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
679 			 skb_headlen(skb), DMA_TO_DEVICE);
680 	prod = NEXT_TX(prod);
681 
682 	/* unmap remaining mapped pages */
683 	for (i = 0; i < last_frag; i++) {
684 		prod = NEXT_TX(prod);
685 		tx_buf = &txr->tx_buf_ring[prod];
686 		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
687 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
688 			       DMA_TO_DEVICE);
689 	}
690 
691 tx_free:
692 	dev_kfree_skb_any(skb);
693 tx_kick_pending:
694 	if (txr->kick_pending)
695 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
696 	txr->tx_buf_ring[txr->tx_prod].skb = NULL;
697 	atomic_long_inc(&dev->tx_dropped);
698 	return NETDEV_TX_OK;
699 }
700 
701 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
702 {
703 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
704 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
705 	u16 cons = txr->tx_cons;
706 	struct pci_dev *pdev = bp->pdev;
707 	int i;
708 	unsigned int tx_bytes = 0;
709 
710 	for (i = 0; i < nr_pkts; i++) {
711 		struct bnxt_sw_tx_bd *tx_buf;
712 		bool compl_deferred = false;
713 		struct sk_buff *skb;
714 		int j, last;
715 
716 		tx_buf = &txr->tx_buf_ring[cons];
717 		cons = NEXT_TX(cons);
718 		skb = tx_buf->skb;
719 		tx_buf->skb = NULL;
720 
721 		if (tx_buf->is_push) {
722 			tx_buf->is_push = 0;
723 			goto next_tx_int;
724 		}
725 
726 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
727 				 skb_headlen(skb), DMA_TO_DEVICE);
728 		last = tx_buf->nr_frags;
729 
730 		for (j = 0; j < last; j++) {
731 			cons = NEXT_TX(cons);
732 			tx_buf = &txr->tx_buf_ring[cons];
733 			dma_unmap_page(
734 				&pdev->dev,
735 				dma_unmap_addr(tx_buf, mapping),
736 				skb_frag_size(&skb_shinfo(skb)->frags[j]),
737 				DMA_TO_DEVICE);
738 		}
739 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
740 			if (bp->flags & BNXT_FLAG_CHIP_P5) {
741 				if (!bnxt_get_tx_ts_p5(bp, skb))
742 					compl_deferred = true;
743 				else
744 					atomic_inc(&bp->ptp_cfg->tx_avail);
745 			}
746 		}
747 
748 next_tx_int:
749 		cons = NEXT_TX(cons);
750 
751 		tx_bytes += skb->len;
752 		if (!compl_deferred)
753 			dev_kfree_skb_any(skb);
754 	}
755 
756 	netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
757 	txr->tx_cons = cons;
758 
759 	/* Need to make the tx_cons update visible to bnxt_start_xmit()
760 	 * before checking for netif_tx_queue_stopped().  Without the
761 	 * memory barrier, there is a small possibility that bnxt_start_xmit()
762 	 * will miss it and cause the queue to be stopped forever.
763 	 */
764 	smp_mb();
765 
766 	if (unlikely(netif_tx_queue_stopped(txq)) &&
767 	    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
768 	    READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
769 		netif_tx_wake_queue(txq);
770 }
771 
772 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
773 					 struct bnxt_rx_ring_info *rxr,
774 					 gfp_t gfp)
775 {
776 	struct device *dev = &bp->pdev->dev;
777 	struct page *page;
778 
779 	page = page_pool_dev_alloc_pages(rxr->page_pool);
780 	if (!page)
781 		return NULL;
782 
783 	*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
784 				      DMA_ATTR_WEAK_ORDERING);
785 	if (dma_mapping_error(dev, *mapping)) {
786 		page_pool_recycle_direct(rxr->page_pool, page);
787 		return NULL;
788 	}
789 	*mapping += bp->rx_dma_offset;
790 	return page;
791 }
792 
793 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
794 				       gfp_t gfp)
795 {
796 	u8 *data;
797 	struct pci_dev *pdev = bp->pdev;
798 
799 	data = kmalloc(bp->rx_buf_size, gfp);
800 	if (!data)
801 		return NULL;
802 
803 	*mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
804 					bp->rx_buf_use_size, bp->rx_dir,
805 					DMA_ATTR_WEAK_ORDERING);
806 
807 	if (dma_mapping_error(&pdev->dev, *mapping)) {
808 		kfree(data);
809 		data = NULL;
810 	}
811 	return data;
812 }
813 
814 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
815 		       u16 prod, gfp_t gfp)
816 {
817 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
818 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
819 	dma_addr_t mapping;
820 
821 	if (BNXT_RX_PAGE_MODE(bp)) {
822 		struct page *page =
823 			__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
824 
825 		if (!page)
826 			return -ENOMEM;
827 
828 		rx_buf->data = page;
829 		rx_buf->data_ptr = page_address(page) + bp->rx_offset;
830 	} else {
831 		u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
832 
833 		if (!data)
834 			return -ENOMEM;
835 
836 		rx_buf->data = data;
837 		rx_buf->data_ptr = data + bp->rx_offset;
838 	}
839 	rx_buf->mapping = mapping;
840 
841 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
842 	return 0;
843 }
844 
845 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
846 {
847 	u16 prod = rxr->rx_prod;
848 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
849 	struct rx_bd *cons_bd, *prod_bd;
850 
851 	prod_rx_buf = &rxr->rx_buf_ring[prod];
852 	cons_rx_buf = &rxr->rx_buf_ring[cons];
853 
854 	prod_rx_buf->data = data;
855 	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
856 
857 	prod_rx_buf->mapping = cons_rx_buf->mapping;
858 
859 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
860 	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
861 
862 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
863 }
864 
865 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
866 {
867 	u16 next, max = rxr->rx_agg_bmap_size;
868 
869 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
870 	if (next >= max)
871 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
872 	return next;
873 }
874 
875 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
876 				     struct bnxt_rx_ring_info *rxr,
877 				     u16 prod, gfp_t gfp)
878 {
879 	struct rx_bd *rxbd =
880 		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
881 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
882 	struct pci_dev *pdev = bp->pdev;
883 	struct page *page;
884 	dma_addr_t mapping;
885 	u16 sw_prod = rxr->rx_sw_agg_prod;
886 	unsigned int offset = 0;
887 
888 	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
889 		page = rxr->rx_page;
890 		if (!page) {
891 			page = alloc_page(gfp);
892 			if (!page)
893 				return -ENOMEM;
894 			rxr->rx_page = page;
895 			rxr->rx_page_offset = 0;
896 		}
897 		offset = rxr->rx_page_offset;
898 		rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
899 		if (rxr->rx_page_offset == PAGE_SIZE)
900 			rxr->rx_page = NULL;
901 		else
902 			get_page(page);
903 	} else {
904 		page = alloc_page(gfp);
905 		if (!page)
906 			return -ENOMEM;
907 	}
908 
909 	mapping = dma_map_page_attrs(&pdev->dev, page, offset,
910 				     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
911 				     DMA_ATTR_WEAK_ORDERING);
912 	if (dma_mapping_error(&pdev->dev, mapping)) {
913 		__free_page(page);
914 		return -EIO;
915 	}
916 
917 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
918 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
919 
920 	__set_bit(sw_prod, rxr->rx_agg_bmap);
921 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
922 	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
923 
924 	rx_agg_buf->page = page;
925 	rx_agg_buf->offset = offset;
926 	rx_agg_buf->mapping = mapping;
927 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
928 	rxbd->rx_bd_opaque = sw_prod;
929 	return 0;
930 }
931 
932 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
933 				       struct bnxt_cp_ring_info *cpr,
934 				       u16 cp_cons, u16 curr)
935 {
936 	struct rx_agg_cmp *agg;
937 
938 	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
939 	agg = (struct rx_agg_cmp *)
940 		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
941 	return agg;
942 }
943 
944 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
945 					      struct bnxt_rx_ring_info *rxr,
946 					      u16 agg_id, u16 curr)
947 {
948 	struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
949 
950 	return &tpa_info->agg_arr[curr];
951 }
952 
953 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
954 				   u16 start, u32 agg_bufs, bool tpa)
955 {
956 	struct bnxt_napi *bnapi = cpr->bnapi;
957 	struct bnxt *bp = bnapi->bp;
958 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
959 	u16 prod = rxr->rx_agg_prod;
960 	u16 sw_prod = rxr->rx_sw_agg_prod;
961 	bool p5_tpa = false;
962 	u32 i;
963 
964 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
965 		p5_tpa = true;
966 
967 	for (i = 0; i < agg_bufs; i++) {
968 		u16 cons;
969 		struct rx_agg_cmp *agg;
970 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
971 		struct rx_bd *prod_bd;
972 		struct page *page;
973 
974 		if (p5_tpa)
975 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
976 		else
977 			agg = bnxt_get_agg(bp, cpr, idx, start + i);
978 		cons = agg->rx_agg_cmp_opaque;
979 		__clear_bit(cons, rxr->rx_agg_bmap);
980 
981 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
982 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
983 
984 		__set_bit(sw_prod, rxr->rx_agg_bmap);
985 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
986 		cons_rx_buf = &rxr->rx_agg_ring[cons];
987 
988 		/* It is possible for sw_prod to be equal to cons, so
989 		 * set cons_rx_buf->page to NULL first.
990 		 */
991 		page = cons_rx_buf->page;
992 		cons_rx_buf->page = NULL;
993 		prod_rx_buf->page = page;
994 		prod_rx_buf->offset = cons_rx_buf->offset;
995 
996 		prod_rx_buf->mapping = cons_rx_buf->mapping;
997 
998 		prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
999 
1000 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1001 		prod_bd->rx_bd_opaque = sw_prod;
1002 
1003 		prod = NEXT_RX_AGG(prod);
1004 		sw_prod = NEXT_RX_AGG(sw_prod);
1005 	}
1006 	rxr->rx_agg_prod = prod;
1007 	rxr->rx_sw_agg_prod = sw_prod;
1008 }
1009 
1010 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1011 					struct bnxt_rx_ring_info *rxr,
1012 					u16 cons, void *data, u8 *data_ptr,
1013 					dma_addr_t dma_addr,
1014 					unsigned int offset_and_len)
1015 {
1016 	unsigned int payload = offset_and_len >> 16;
1017 	unsigned int len = offset_and_len & 0xffff;
1018 	skb_frag_t *frag;
1019 	struct page *page = data;
1020 	u16 prod = rxr->rx_prod;
1021 	struct sk_buff *skb;
1022 	int off, err;
1023 
1024 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1025 	if (unlikely(err)) {
1026 		bnxt_reuse_rx_data(rxr, cons, data);
1027 		return NULL;
1028 	}
1029 	dma_addr -= bp->rx_dma_offset;
1030 	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1031 			     DMA_ATTR_WEAK_ORDERING);
1032 	page_pool_release_page(rxr->page_pool, page);
1033 
1034 	if (unlikely(!payload))
1035 		payload = eth_get_headlen(bp->dev, data_ptr, len);
1036 
1037 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1038 	if (!skb) {
1039 		__free_page(page);
1040 		return NULL;
1041 	}
1042 
1043 	off = (void *)data_ptr - page_address(page);
1044 	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1045 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1046 	       payload + NET_IP_ALIGN);
1047 
1048 	frag = &skb_shinfo(skb)->frags[0];
1049 	skb_frag_size_sub(frag, payload);
1050 	skb_frag_off_add(frag, payload);
1051 	skb->data_len -= payload;
1052 	skb->tail += payload;
1053 
1054 	return skb;
1055 }
1056 
1057 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1058 				   struct bnxt_rx_ring_info *rxr, u16 cons,
1059 				   void *data, u8 *data_ptr,
1060 				   dma_addr_t dma_addr,
1061 				   unsigned int offset_and_len)
1062 {
1063 	u16 prod = rxr->rx_prod;
1064 	struct sk_buff *skb;
1065 	int err;
1066 
1067 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1068 	if (unlikely(err)) {
1069 		bnxt_reuse_rx_data(rxr, cons, data);
1070 		return NULL;
1071 	}
1072 
1073 	skb = build_skb(data, 0);
1074 	dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1075 			       bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1076 	if (!skb) {
1077 		kfree(data);
1078 		return NULL;
1079 	}
1080 
1081 	skb_reserve(skb, bp->rx_offset);
1082 	skb_put(skb, offset_and_len & 0xffff);
1083 	return skb;
1084 }
1085 
1086 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1087 				     struct bnxt_cp_ring_info *cpr,
1088 				     struct sk_buff *skb, u16 idx,
1089 				     u32 agg_bufs, bool tpa)
1090 {
1091 	struct bnxt_napi *bnapi = cpr->bnapi;
1092 	struct pci_dev *pdev = bp->pdev;
1093 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1094 	u16 prod = rxr->rx_agg_prod;
1095 	bool p5_tpa = false;
1096 	u32 i;
1097 
1098 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1099 		p5_tpa = true;
1100 
1101 	for (i = 0; i < agg_bufs; i++) {
1102 		u16 cons, frag_len;
1103 		struct rx_agg_cmp *agg;
1104 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1105 		struct page *page;
1106 		dma_addr_t mapping;
1107 
1108 		if (p5_tpa)
1109 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1110 		else
1111 			agg = bnxt_get_agg(bp, cpr, idx, i);
1112 		cons = agg->rx_agg_cmp_opaque;
1113 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1114 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1115 
1116 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1117 		skb_fill_page_desc(skb, i, cons_rx_buf->page,
1118 				   cons_rx_buf->offset, frag_len);
1119 		__clear_bit(cons, rxr->rx_agg_bmap);
1120 
1121 		/* It is possible for bnxt_alloc_rx_page() to allocate
1122 		 * a sw_prod index that equals the cons index, so we
1123 		 * need to clear the cons entry now.
1124 		 */
1125 		mapping = cons_rx_buf->mapping;
1126 		page = cons_rx_buf->page;
1127 		cons_rx_buf->page = NULL;
1128 
1129 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1130 			struct skb_shared_info *shinfo;
1131 			unsigned int nr_frags;
1132 
1133 			shinfo = skb_shinfo(skb);
1134 			nr_frags = --shinfo->nr_frags;
1135 			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1136 
1137 			dev_kfree_skb(skb);
1138 
1139 			cons_rx_buf->page = page;
1140 
1141 			/* Update prod since possibly some pages have been
1142 			 * allocated already.
1143 			 */
1144 			rxr->rx_agg_prod = prod;
1145 			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1146 			return NULL;
1147 		}
1148 
1149 		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1150 				     DMA_FROM_DEVICE,
1151 				     DMA_ATTR_WEAK_ORDERING);
1152 
1153 		skb->data_len += frag_len;
1154 		skb->len += frag_len;
1155 		skb->truesize += PAGE_SIZE;
1156 
1157 		prod = NEXT_RX_AGG(prod);
1158 	}
1159 	rxr->rx_agg_prod = prod;
1160 	return skb;
1161 }
1162 
1163 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1164 			       u8 agg_bufs, u32 *raw_cons)
1165 {
1166 	u16 last;
1167 	struct rx_agg_cmp *agg;
1168 
1169 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1170 	last = RING_CMP(*raw_cons);
1171 	agg = (struct rx_agg_cmp *)
1172 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1173 	return RX_AGG_CMP_VALID(agg, *raw_cons);
1174 }
1175 
1176 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1177 					    unsigned int len,
1178 					    dma_addr_t mapping)
1179 {
1180 	struct bnxt *bp = bnapi->bp;
1181 	struct pci_dev *pdev = bp->pdev;
1182 	struct sk_buff *skb;
1183 
1184 	skb = napi_alloc_skb(&bnapi->napi, len);
1185 	if (!skb)
1186 		return NULL;
1187 
1188 	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1189 				bp->rx_dir);
1190 
1191 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1192 	       len + NET_IP_ALIGN);
1193 
1194 	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1195 				   bp->rx_dir);
1196 
1197 	skb_put(skb, len);
1198 	return skb;
1199 }
1200 
1201 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1202 			   u32 *raw_cons, void *cmp)
1203 {
1204 	struct rx_cmp *rxcmp = cmp;
1205 	u32 tmp_raw_cons = *raw_cons;
1206 	u8 cmp_type, agg_bufs = 0;
1207 
1208 	cmp_type = RX_CMP_TYPE(rxcmp);
1209 
1210 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1211 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1212 			    RX_CMP_AGG_BUFS) >>
1213 			   RX_CMP_AGG_BUFS_SHIFT;
1214 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1215 		struct rx_tpa_end_cmp *tpa_end = cmp;
1216 
1217 		if (bp->flags & BNXT_FLAG_CHIP_P5)
1218 			return 0;
1219 
1220 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1221 	}
1222 
1223 	if (agg_bufs) {
1224 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1225 			return -EBUSY;
1226 	}
1227 	*raw_cons = tmp_raw_cons;
1228 	return 0;
1229 }
1230 
1231 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1232 {
1233 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1234 		return;
1235 
1236 	if (BNXT_PF(bp))
1237 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1238 	else
1239 		schedule_delayed_work(&bp->fw_reset_task, delay);
1240 }
1241 
1242 static void bnxt_queue_sp_work(struct bnxt *bp)
1243 {
1244 	if (BNXT_PF(bp))
1245 		queue_work(bnxt_pf_wq, &bp->sp_task);
1246 	else
1247 		schedule_work(&bp->sp_task);
1248 }
1249 
1250 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1251 {
1252 	if (!rxr->bnapi->in_reset) {
1253 		rxr->bnapi->in_reset = true;
1254 		if (bp->flags & BNXT_FLAG_CHIP_P5)
1255 			set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1256 		else
1257 			set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1258 		bnxt_queue_sp_work(bp);
1259 	}
1260 	rxr->rx_next_cons = 0xffff;
1261 }
1262 
1263 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1264 {
1265 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1266 	u16 idx = agg_id & MAX_TPA_P5_MASK;
1267 
1268 	if (test_bit(idx, map->agg_idx_bmap))
1269 		idx = find_first_zero_bit(map->agg_idx_bmap,
1270 					  BNXT_AGG_IDX_BMAP_SIZE);
1271 	__set_bit(idx, map->agg_idx_bmap);
1272 	map->agg_id_tbl[agg_id] = idx;
1273 	return idx;
1274 }
1275 
1276 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1277 {
1278 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1279 
1280 	__clear_bit(idx, map->agg_idx_bmap);
1281 }
1282 
1283 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1284 {
1285 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1286 
1287 	return map->agg_id_tbl[agg_id];
1288 }
1289 
1290 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1291 			   struct rx_tpa_start_cmp *tpa_start,
1292 			   struct rx_tpa_start_cmp_ext *tpa_start1)
1293 {
1294 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1295 	struct bnxt_tpa_info *tpa_info;
1296 	u16 cons, prod, agg_id;
1297 	struct rx_bd *prod_bd;
1298 	dma_addr_t mapping;
1299 
1300 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
1301 		agg_id = TPA_START_AGG_ID_P5(tpa_start);
1302 		agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1303 	} else {
1304 		agg_id = TPA_START_AGG_ID(tpa_start);
1305 	}
1306 	cons = tpa_start->rx_tpa_start_cmp_opaque;
1307 	prod = rxr->rx_prod;
1308 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1309 	prod_rx_buf = &rxr->rx_buf_ring[prod];
1310 	tpa_info = &rxr->rx_tpa[agg_id];
1311 
1312 	if (unlikely(cons != rxr->rx_next_cons ||
1313 		     TPA_START_ERROR(tpa_start))) {
1314 		netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1315 			    cons, rxr->rx_next_cons,
1316 			    TPA_START_ERROR_CODE(tpa_start1));
1317 		bnxt_sched_reset(bp, rxr);
1318 		return;
1319 	}
1320 	/* Store cfa_code in tpa_info to use in tpa_end
1321 	 * completion processing.
1322 	 */
1323 	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1324 	prod_rx_buf->data = tpa_info->data;
1325 	prod_rx_buf->data_ptr = tpa_info->data_ptr;
1326 
1327 	mapping = tpa_info->mapping;
1328 	prod_rx_buf->mapping = mapping;
1329 
1330 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1331 
1332 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1333 
1334 	tpa_info->data = cons_rx_buf->data;
1335 	tpa_info->data_ptr = cons_rx_buf->data_ptr;
1336 	cons_rx_buf->data = NULL;
1337 	tpa_info->mapping = cons_rx_buf->mapping;
1338 
1339 	tpa_info->len =
1340 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1341 				RX_TPA_START_CMP_LEN_SHIFT;
1342 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
1343 		u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1344 
1345 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
1346 		tpa_info->gso_type = SKB_GSO_TCPV4;
1347 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1348 		if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1349 			tpa_info->gso_type = SKB_GSO_TCPV6;
1350 		tpa_info->rss_hash =
1351 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1352 	} else {
1353 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1354 		tpa_info->gso_type = 0;
1355 		netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1356 	}
1357 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1358 	tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1359 	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1360 	tpa_info->agg_count = 0;
1361 
1362 	rxr->rx_prod = NEXT_RX(prod);
1363 	cons = NEXT_RX(cons);
1364 	rxr->rx_next_cons = NEXT_RX(cons);
1365 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1366 
1367 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1368 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1369 	cons_rx_buf->data = NULL;
1370 }
1371 
1372 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1373 {
1374 	if (agg_bufs)
1375 		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1376 }
1377 
1378 #ifdef CONFIG_INET
1379 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1380 {
1381 	struct udphdr *uh = NULL;
1382 
1383 	if (ip_proto == htons(ETH_P_IP)) {
1384 		struct iphdr *iph = (struct iphdr *)skb->data;
1385 
1386 		if (iph->protocol == IPPROTO_UDP)
1387 			uh = (struct udphdr *)(iph + 1);
1388 	} else {
1389 		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1390 
1391 		if (iph->nexthdr == IPPROTO_UDP)
1392 			uh = (struct udphdr *)(iph + 1);
1393 	}
1394 	if (uh) {
1395 		if (uh->check)
1396 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1397 		else
1398 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1399 	}
1400 }
1401 #endif
1402 
1403 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1404 					   int payload_off, int tcp_ts,
1405 					   struct sk_buff *skb)
1406 {
1407 #ifdef CONFIG_INET
1408 	struct tcphdr *th;
1409 	int len, nw_off;
1410 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1411 	u32 hdr_info = tpa_info->hdr_info;
1412 	bool loopback = false;
1413 
1414 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1415 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1416 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1417 
1418 	/* If the packet is an internal loopback packet, the offsets will
1419 	 * have an extra 4 bytes.
1420 	 */
1421 	if (inner_mac_off == 4) {
1422 		loopback = true;
1423 	} else if (inner_mac_off > 4) {
1424 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1425 					    ETH_HLEN - 2));
1426 
1427 		/* We only support inner iPv4/ipv6.  If we don't see the
1428 		 * correct protocol ID, it must be a loopback packet where
1429 		 * the offsets are off by 4.
1430 		 */
1431 		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1432 			loopback = true;
1433 	}
1434 	if (loopback) {
1435 		/* internal loopback packet, subtract all offsets by 4 */
1436 		inner_ip_off -= 4;
1437 		inner_mac_off -= 4;
1438 		outer_ip_off -= 4;
1439 	}
1440 
1441 	nw_off = inner_ip_off - ETH_HLEN;
1442 	skb_set_network_header(skb, nw_off);
1443 	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1444 		struct ipv6hdr *iph = ipv6_hdr(skb);
1445 
1446 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1447 		len = skb->len - skb_transport_offset(skb);
1448 		th = tcp_hdr(skb);
1449 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1450 	} else {
1451 		struct iphdr *iph = ip_hdr(skb);
1452 
1453 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1454 		len = skb->len - skb_transport_offset(skb);
1455 		th = tcp_hdr(skb);
1456 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1457 	}
1458 
1459 	if (inner_mac_off) { /* tunnel */
1460 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1461 					    ETH_HLEN - 2));
1462 
1463 		bnxt_gro_tunnel(skb, proto);
1464 	}
1465 #endif
1466 	return skb;
1467 }
1468 
1469 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1470 					   int payload_off, int tcp_ts,
1471 					   struct sk_buff *skb)
1472 {
1473 #ifdef CONFIG_INET
1474 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1475 	u32 hdr_info = tpa_info->hdr_info;
1476 	int iphdr_len, nw_off;
1477 
1478 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1479 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1480 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1481 
1482 	nw_off = inner_ip_off - ETH_HLEN;
1483 	skb_set_network_header(skb, nw_off);
1484 	iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1485 		     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1486 	skb_set_transport_header(skb, nw_off + iphdr_len);
1487 
1488 	if (inner_mac_off) { /* tunnel */
1489 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1490 					    ETH_HLEN - 2));
1491 
1492 		bnxt_gro_tunnel(skb, proto);
1493 	}
1494 #endif
1495 	return skb;
1496 }
1497 
1498 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
1499 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1500 
1501 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1502 					   int payload_off, int tcp_ts,
1503 					   struct sk_buff *skb)
1504 {
1505 #ifdef CONFIG_INET
1506 	struct tcphdr *th;
1507 	int len, nw_off, tcp_opt_len = 0;
1508 
1509 	if (tcp_ts)
1510 		tcp_opt_len = 12;
1511 
1512 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1513 		struct iphdr *iph;
1514 
1515 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1516 			 ETH_HLEN;
1517 		skb_set_network_header(skb, nw_off);
1518 		iph = ip_hdr(skb);
1519 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1520 		len = skb->len - skb_transport_offset(skb);
1521 		th = tcp_hdr(skb);
1522 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1523 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1524 		struct ipv6hdr *iph;
1525 
1526 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1527 			 ETH_HLEN;
1528 		skb_set_network_header(skb, nw_off);
1529 		iph = ipv6_hdr(skb);
1530 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1531 		len = skb->len - skb_transport_offset(skb);
1532 		th = tcp_hdr(skb);
1533 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1534 	} else {
1535 		dev_kfree_skb_any(skb);
1536 		return NULL;
1537 	}
1538 
1539 	if (nw_off) /* tunnel */
1540 		bnxt_gro_tunnel(skb, skb->protocol);
1541 #endif
1542 	return skb;
1543 }
1544 
1545 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1546 					   struct bnxt_tpa_info *tpa_info,
1547 					   struct rx_tpa_end_cmp *tpa_end,
1548 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1549 					   struct sk_buff *skb)
1550 {
1551 #ifdef CONFIG_INET
1552 	int payload_off;
1553 	u16 segs;
1554 
1555 	segs = TPA_END_TPA_SEGS(tpa_end);
1556 	if (segs == 1)
1557 		return skb;
1558 
1559 	NAPI_GRO_CB(skb)->count = segs;
1560 	skb_shinfo(skb)->gso_size =
1561 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1562 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1563 	if (bp->flags & BNXT_FLAG_CHIP_P5)
1564 		payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1565 	else
1566 		payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1567 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1568 	if (likely(skb))
1569 		tcp_gro_complete(skb);
1570 #endif
1571 	return skb;
1572 }
1573 
1574 /* Given the cfa_code of a received packet determine which
1575  * netdev (vf-rep or PF) the packet is destined to.
1576  */
1577 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1578 {
1579 	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1580 
1581 	/* if vf-rep dev is NULL, the must belongs to the PF */
1582 	return dev ? dev : bp->dev;
1583 }
1584 
1585 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1586 					   struct bnxt_cp_ring_info *cpr,
1587 					   u32 *raw_cons,
1588 					   struct rx_tpa_end_cmp *tpa_end,
1589 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1590 					   u8 *event)
1591 {
1592 	struct bnxt_napi *bnapi = cpr->bnapi;
1593 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1594 	u8 *data_ptr, agg_bufs;
1595 	unsigned int len;
1596 	struct bnxt_tpa_info *tpa_info;
1597 	dma_addr_t mapping;
1598 	struct sk_buff *skb;
1599 	u16 idx = 0, agg_id;
1600 	void *data;
1601 	bool gro;
1602 
1603 	if (unlikely(bnapi->in_reset)) {
1604 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1605 
1606 		if (rc < 0)
1607 			return ERR_PTR(-EBUSY);
1608 		return NULL;
1609 	}
1610 
1611 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
1612 		agg_id = TPA_END_AGG_ID_P5(tpa_end);
1613 		agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1614 		agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1615 		tpa_info = &rxr->rx_tpa[agg_id];
1616 		if (unlikely(agg_bufs != tpa_info->agg_count)) {
1617 			netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1618 				    agg_bufs, tpa_info->agg_count);
1619 			agg_bufs = tpa_info->agg_count;
1620 		}
1621 		tpa_info->agg_count = 0;
1622 		*event |= BNXT_AGG_EVENT;
1623 		bnxt_free_agg_idx(rxr, agg_id);
1624 		idx = agg_id;
1625 		gro = !!(bp->flags & BNXT_FLAG_GRO);
1626 	} else {
1627 		agg_id = TPA_END_AGG_ID(tpa_end);
1628 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1629 		tpa_info = &rxr->rx_tpa[agg_id];
1630 		idx = RING_CMP(*raw_cons);
1631 		if (agg_bufs) {
1632 			if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1633 				return ERR_PTR(-EBUSY);
1634 
1635 			*event |= BNXT_AGG_EVENT;
1636 			idx = NEXT_CMP(idx);
1637 		}
1638 		gro = !!TPA_END_GRO(tpa_end);
1639 	}
1640 	data = tpa_info->data;
1641 	data_ptr = tpa_info->data_ptr;
1642 	prefetch(data_ptr);
1643 	len = tpa_info->len;
1644 	mapping = tpa_info->mapping;
1645 
1646 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1647 		bnxt_abort_tpa(cpr, idx, agg_bufs);
1648 		if (agg_bufs > MAX_SKB_FRAGS)
1649 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1650 				    agg_bufs, (int)MAX_SKB_FRAGS);
1651 		return NULL;
1652 	}
1653 
1654 	if (len <= bp->rx_copy_thresh) {
1655 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1656 		if (!skb) {
1657 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1658 			cpr->sw_stats.rx.rx_oom_discards += 1;
1659 			return NULL;
1660 		}
1661 	} else {
1662 		u8 *new_data;
1663 		dma_addr_t new_mapping;
1664 
1665 		new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1666 		if (!new_data) {
1667 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1668 			cpr->sw_stats.rx.rx_oom_discards += 1;
1669 			return NULL;
1670 		}
1671 
1672 		tpa_info->data = new_data;
1673 		tpa_info->data_ptr = new_data + bp->rx_offset;
1674 		tpa_info->mapping = new_mapping;
1675 
1676 		skb = build_skb(data, 0);
1677 		dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1678 				       bp->rx_buf_use_size, bp->rx_dir,
1679 				       DMA_ATTR_WEAK_ORDERING);
1680 
1681 		if (!skb) {
1682 			kfree(data);
1683 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1684 			cpr->sw_stats.rx.rx_oom_discards += 1;
1685 			return NULL;
1686 		}
1687 		skb_reserve(skb, bp->rx_offset);
1688 		skb_put(skb, len);
1689 	}
1690 
1691 	if (agg_bufs) {
1692 		skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1693 		if (!skb) {
1694 			/* Page reuse already handled by bnxt_rx_pages(). */
1695 			cpr->sw_stats.rx.rx_oom_discards += 1;
1696 			return NULL;
1697 		}
1698 	}
1699 
1700 	skb->protocol =
1701 		eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1702 
1703 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1704 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1705 
1706 	if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1707 	    (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1708 		__be16 vlan_proto = htons(tpa_info->metadata >>
1709 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1710 		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1711 
1712 		if (eth_type_vlan(vlan_proto)) {
1713 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1714 		} else {
1715 			dev_kfree_skb(skb);
1716 			return NULL;
1717 		}
1718 	}
1719 
1720 	skb_checksum_none_assert(skb);
1721 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1722 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1723 		skb->csum_level =
1724 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1725 	}
1726 
1727 	if (gro)
1728 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1729 
1730 	return skb;
1731 }
1732 
1733 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1734 			 struct rx_agg_cmp *rx_agg)
1735 {
1736 	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1737 	struct bnxt_tpa_info *tpa_info;
1738 
1739 	agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1740 	tpa_info = &rxr->rx_tpa[agg_id];
1741 	BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1742 	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1743 }
1744 
1745 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1746 			     struct sk_buff *skb)
1747 {
1748 	if (skb->dev != bp->dev) {
1749 		/* this packet belongs to a vf-rep */
1750 		bnxt_vf_rep_rx(bp, skb);
1751 		return;
1752 	}
1753 	skb_record_rx_queue(skb, bnapi->index);
1754 	napi_gro_receive(&bnapi->napi, skb);
1755 }
1756 
1757 /* returns the following:
1758  * 1       - 1 packet successfully received
1759  * 0       - successful TPA_START, packet not completed yet
1760  * -EBUSY  - completion ring does not have all the agg buffers yet
1761  * -ENOMEM - packet aborted due to out of memory
1762  * -EIO    - packet aborted due to hw error indicated in BD
1763  */
1764 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1765 		       u32 *raw_cons, u8 *event)
1766 {
1767 	struct bnxt_napi *bnapi = cpr->bnapi;
1768 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1769 	struct net_device *dev = bp->dev;
1770 	struct rx_cmp *rxcmp;
1771 	struct rx_cmp_ext *rxcmp1;
1772 	u32 tmp_raw_cons = *raw_cons;
1773 	u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1774 	struct bnxt_sw_rx_bd *rx_buf;
1775 	unsigned int len;
1776 	u8 *data_ptr, agg_bufs, cmp_type;
1777 	dma_addr_t dma_addr;
1778 	struct sk_buff *skb;
1779 	u32 flags, misc;
1780 	void *data;
1781 	int rc = 0;
1782 
1783 	rxcmp = (struct rx_cmp *)
1784 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1785 
1786 	cmp_type = RX_CMP_TYPE(rxcmp);
1787 
1788 	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1789 		bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1790 		goto next_rx_no_prod_no_len;
1791 	}
1792 
1793 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1794 	cp_cons = RING_CMP(tmp_raw_cons);
1795 	rxcmp1 = (struct rx_cmp_ext *)
1796 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1797 
1798 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1799 		return -EBUSY;
1800 
1801 	/* The valid test of the entry must be done first before
1802 	 * reading any further.
1803 	 */
1804 	dma_rmb();
1805 	prod = rxr->rx_prod;
1806 
1807 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1808 		bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1809 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
1810 
1811 		*event |= BNXT_RX_EVENT;
1812 		goto next_rx_no_prod_no_len;
1813 
1814 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1815 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1816 				   (struct rx_tpa_end_cmp *)rxcmp,
1817 				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1818 
1819 		if (IS_ERR(skb))
1820 			return -EBUSY;
1821 
1822 		rc = -ENOMEM;
1823 		if (likely(skb)) {
1824 			bnxt_deliver_skb(bp, bnapi, skb);
1825 			rc = 1;
1826 		}
1827 		*event |= BNXT_RX_EVENT;
1828 		goto next_rx_no_prod_no_len;
1829 	}
1830 
1831 	cons = rxcmp->rx_cmp_opaque;
1832 	if (unlikely(cons != rxr->rx_next_cons)) {
1833 		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1834 
1835 		/* 0xffff is forced error, don't print it */
1836 		if (rxr->rx_next_cons != 0xffff)
1837 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1838 				    cons, rxr->rx_next_cons);
1839 		bnxt_sched_reset(bp, rxr);
1840 		if (rc1)
1841 			return rc1;
1842 		goto next_rx_no_prod_no_len;
1843 	}
1844 	rx_buf = &rxr->rx_buf_ring[cons];
1845 	data = rx_buf->data;
1846 	data_ptr = rx_buf->data_ptr;
1847 	prefetch(data_ptr);
1848 
1849 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1850 	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1851 
1852 	if (agg_bufs) {
1853 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1854 			return -EBUSY;
1855 
1856 		cp_cons = NEXT_CMP(cp_cons);
1857 		*event |= BNXT_AGG_EVENT;
1858 	}
1859 	*event |= BNXT_RX_EVENT;
1860 
1861 	rx_buf->data = NULL;
1862 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1863 		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1864 
1865 		bnxt_reuse_rx_data(rxr, cons, data);
1866 		if (agg_bufs)
1867 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1868 					       false);
1869 
1870 		rc = -EIO;
1871 		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1872 			bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1873 			if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1874 			    !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1875 				netdev_warn_once(bp->dev, "RX buffer error %x\n",
1876 						 rx_err);
1877 				bnxt_sched_reset(bp, rxr);
1878 			}
1879 		}
1880 		goto next_rx_no_len;
1881 	}
1882 
1883 	flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1884 	len = flags >> RX_CMP_LEN_SHIFT;
1885 	dma_addr = rx_buf->mapping;
1886 
1887 	if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1888 		rc = 1;
1889 		goto next_rx;
1890 	}
1891 
1892 	if (len <= bp->rx_copy_thresh) {
1893 		skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1894 		bnxt_reuse_rx_data(rxr, cons, data);
1895 		if (!skb) {
1896 			if (agg_bufs)
1897 				bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1898 						       agg_bufs, false);
1899 			cpr->sw_stats.rx.rx_oom_discards += 1;
1900 			rc = -ENOMEM;
1901 			goto next_rx;
1902 		}
1903 	} else {
1904 		u32 payload;
1905 
1906 		if (rx_buf->data_ptr == data_ptr)
1907 			payload = misc & RX_CMP_PAYLOAD_OFFSET;
1908 		else
1909 			payload = 0;
1910 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1911 				      payload | len);
1912 		if (!skb) {
1913 			cpr->sw_stats.rx.rx_oom_discards += 1;
1914 			rc = -ENOMEM;
1915 			goto next_rx;
1916 		}
1917 	}
1918 
1919 	if (agg_bufs) {
1920 		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1921 		if (!skb) {
1922 			cpr->sw_stats.rx.rx_oom_discards += 1;
1923 			rc = -ENOMEM;
1924 			goto next_rx;
1925 		}
1926 	}
1927 
1928 	if (RX_CMP_HASH_VALID(rxcmp)) {
1929 		u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1930 		enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1931 
1932 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1933 		if (hash_type != 1 && hash_type != 3)
1934 			type = PKT_HASH_TYPE_L3;
1935 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1936 	}
1937 
1938 	cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1939 	skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1940 
1941 	if ((rxcmp1->rx_cmp_flags2 &
1942 	     cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1943 	    (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1944 		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1945 		u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1946 		__be16 vlan_proto = htons(meta_data >>
1947 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1948 
1949 		if (eth_type_vlan(vlan_proto)) {
1950 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1951 		} else {
1952 			dev_kfree_skb(skb);
1953 			goto next_rx;
1954 		}
1955 	}
1956 
1957 	skb_checksum_none_assert(skb);
1958 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
1959 		if (dev->features & NETIF_F_RXCSUM) {
1960 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1961 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1962 		}
1963 	} else {
1964 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1965 			if (dev->features & NETIF_F_RXCSUM)
1966 				bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1967 		}
1968 	}
1969 
1970 	if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1971 		     RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1972 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
1973 			u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1974 			u64 ns, ts;
1975 
1976 			if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1977 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1978 
1979 				spin_lock_bh(&ptp->ptp_lock);
1980 				ns = timecounter_cyc2time(&ptp->tc, ts);
1981 				spin_unlock_bh(&ptp->ptp_lock);
1982 				memset(skb_hwtstamps(skb), 0,
1983 				       sizeof(*skb_hwtstamps(skb)));
1984 				skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1985 			}
1986 		}
1987 	}
1988 	bnxt_deliver_skb(bp, bnapi, skb);
1989 	rc = 1;
1990 
1991 next_rx:
1992 	cpr->rx_packets += 1;
1993 	cpr->rx_bytes += len;
1994 
1995 next_rx_no_len:
1996 	rxr->rx_prod = NEXT_RX(prod);
1997 	rxr->rx_next_cons = NEXT_RX(cons);
1998 
1999 next_rx_no_prod_no_len:
2000 	*raw_cons = tmp_raw_cons;
2001 
2002 	return rc;
2003 }
2004 
2005 /* In netpoll mode, if we are using a combined completion ring, we need to
2006  * discard the rx packets and recycle the buffers.
2007  */
2008 static int bnxt_force_rx_discard(struct bnxt *bp,
2009 				 struct bnxt_cp_ring_info *cpr,
2010 				 u32 *raw_cons, u8 *event)
2011 {
2012 	u32 tmp_raw_cons = *raw_cons;
2013 	struct rx_cmp_ext *rxcmp1;
2014 	struct rx_cmp *rxcmp;
2015 	u16 cp_cons;
2016 	u8 cmp_type;
2017 	int rc;
2018 
2019 	cp_cons = RING_CMP(tmp_raw_cons);
2020 	rxcmp = (struct rx_cmp *)
2021 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2022 
2023 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2024 	cp_cons = RING_CMP(tmp_raw_cons);
2025 	rxcmp1 = (struct rx_cmp_ext *)
2026 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2027 
2028 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2029 		return -EBUSY;
2030 
2031 	/* The valid test of the entry must be done first before
2032 	 * reading any further.
2033 	 */
2034 	dma_rmb();
2035 	cmp_type = RX_CMP_TYPE(rxcmp);
2036 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2037 		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2038 			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2039 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2040 		struct rx_tpa_end_cmp_ext *tpa_end1;
2041 
2042 		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2043 		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2044 			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2045 	}
2046 	rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2047 	if (rc && rc != -EBUSY)
2048 		cpr->sw_stats.rx.rx_netpoll_discards += 1;
2049 	return rc;
2050 }
2051 
2052 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2053 {
2054 	struct bnxt_fw_health *fw_health = bp->fw_health;
2055 	u32 reg = fw_health->regs[reg_idx];
2056 	u32 reg_type, reg_off, val = 0;
2057 
2058 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2059 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2060 	switch (reg_type) {
2061 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
2062 		pci_read_config_dword(bp->pdev, reg_off, &val);
2063 		break;
2064 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
2065 		reg_off = fw_health->mapped_regs[reg_idx];
2066 		fallthrough;
2067 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2068 		val = readl(bp->bar0 + reg_off);
2069 		break;
2070 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2071 		val = readl(bp->bar1 + reg_off);
2072 		break;
2073 	}
2074 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2075 		val &= fw_health->fw_reset_inprog_reg_mask;
2076 	return val;
2077 }
2078 
2079 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2080 {
2081 	int i;
2082 
2083 	for (i = 0; i < bp->rx_nr_rings; i++) {
2084 		u16 grp_idx = bp->rx_ring[i].bnapi->index;
2085 		struct bnxt_ring_grp_info *grp_info;
2086 
2087 		grp_info = &bp->grp_info[grp_idx];
2088 		if (grp_info->agg_fw_ring_id == ring_id)
2089 			return grp_idx;
2090 	}
2091 	return INVALID_HW_RING_ID;
2092 }
2093 
2094 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2095 {
2096 	switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
2097 	case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2098 		netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2099 			   BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2100 		break;
2101 	default:
2102 		netdev_err(bp->dev, "FW reported unknown error type\n");
2103 		break;
2104 	}
2105 }
2106 
2107 #define BNXT_GET_EVENT_PORT(data)	\
2108 	((data) &			\
2109 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2110 
2111 #define BNXT_EVENT_RING_TYPE(data2)	\
2112 	((data2) &			\
2113 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2114 
2115 #define BNXT_EVENT_RING_TYPE_RX(data2)	\
2116 	(BNXT_EVENT_RING_TYPE(data2) ==	\
2117 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2118 
2119 static int bnxt_async_event_process(struct bnxt *bp,
2120 				    struct hwrm_async_event_cmpl *cmpl)
2121 {
2122 	u16 event_id = le16_to_cpu(cmpl->event_id);
2123 	u32 data1 = le32_to_cpu(cmpl->event_data1);
2124 	u32 data2 = le32_to_cpu(cmpl->event_data2);
2125 
2126 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
2127 	switch (event_id) {
2128 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2129 		struct bnxt_link_info *link_info = &bp->link_info;
2130 
2131 		if (BNXT_VF(bp))
2132 			goto async_event_process_exit;
2133 
2134 		/* print unsupported speed warning in forced speed mode only */
2135 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2136 		    (data1 & 0x20000)) {
2137 			u16 fw_speed = link_info->force_link_speed;
2138 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2139 
2140 			if (speed != SPEED_UNKNOWN)
2141 				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2142 					    speed);
2143 		}
2144 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2145 	}
2146 		fallthrough;
2147 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2148 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2149 		set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2150 		fallthrough;
2151 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2152 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2153 		break;
2154 	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2155 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2156 		break;
2157 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2158 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
2159 
2160 		if (BNXT_VF(bp))
2161 			break;
2162 
2163 		if (bp->pf.port_id != port_id)
2164 			break;
2165 
2166 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2167 		break;
2168 	}
2169 	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2170 		if (BNXT_PF(bp))
2171 			goto async_event_process_exit;
2172 		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2173 		break;
2174 	case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2175 		char *fatal_str = "non-fatal";
2176 
2177 		if (!bp->fw_health)
2178 			goto async_event_process_exit;
2179 
2180 		bp->fw_reset_timestamp = jiffies;
2181 		bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2182 		if (!bp->fw_reset_min_dsecs)
2183 			bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2184 		bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2185 		if (!bp->fw_reset_max_dsecs)
2186 			bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2187 		if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2188 			fatal_str = "fatal";
2189 			set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2190 		}
2191 		netif_warn(bp, hw, bp->dev,
2192 			   "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2193 			   fatal_str, data1, data2,
2194 			   bp->fw_reset_min_dsecs * 100,
2195 			   bp->fw_reset_max_dsecs * 100);
2196 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2197 		break;
2198 	}
2199 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2200 		struct bnxt_fw_health *fw_health = bp->fw_health;
2201 
2202 		if (!fw_health)
2203 			goto async_event_process_exit;
2204 
2205 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2206 			fw_health->enabled = false;
2207 			netif_info(bp, drv, bp->dev,
2208 				   "Error recovery info: error recovery[0]\n");
2209 			break;
2210 		}
2211 		fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2212 		fw_health->tmr_multiplier =
2213 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2214 				     bp->current_interval * 10);
2215 		fw_health->tmr_counter = fw_health->tmr_multiplier;
2216 		if (!fw_health->enabled)
2217 			fw_health->last_fw_heartbeat =
2218 				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2219 		fw_health->last_fw_reset_cnt =
2220 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2221 		netif_info(bp, drv, bp->dev,
2222 			   "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2223 			   fw_health->master, fw_health->last_fw_reset_cnt,
2224 			   bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2225 		if (!fw_health->enabled) {
2226 			/* Make sure tmr_counter is set and visible to
2227 			 * bnxt_health_check() before setting enabled to true.
2228 			 */
2229 			smp_wmb();
2230 			fw_health->enabled = true;
2231 		}
2232 		goto async_event_process_exit;
2233 	}
2234 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2235 		netif_notice(bp, hw, bp->dev,
2236 			     "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2237 			     data1, data2);
2238 		goto async_event_process_exit;
2239 	case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2240 		struct bnxt_rx_ring_info *rxr;
2241 		u16 grp_idx;
2242 
2243 		if (bp->flags & BNXT_FLAG_CHIP_P5)
2244 			goto async_event_process_exit;
2245 
2246 		netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2247 			    BNXT_EVENT_RING_TYPE(data2), data1);
2248 		if (!BNXT_EVENT_RING_TYPE_RX(data2))
2249 			goto async_event_process_exit;
2250 
2251 		grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2252 		if (grp_idx == INVALID_HW_RING_ID) {
2253 			netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2254 				    data1);
2255 			goto async_event_process_exit;
2256 		}
2257 		rxr = bp->bnapi[grp_idx]->rx_ring;
2258 		bnxt_sched_reset(bp, rxr);
2259 		goto async_event_process_exit;
2260 	}
2261 	case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2262 		struct bnxt_fw_health *fw_health = bp->fw_health;
2263 
2264 		netif_notice(bp, hw, bp->dev,
2265 			     "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2266 			     data1, data2);
2267 		if (fw_health) {
2268 			fw_health->echo_req_data1 = data1;
2269 			fw_health->echo_req_data2 = data2;
2270 			set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2271 			break;
2272 		}
2273 		goto async_event_process_exit;
2274 	}
2275 	case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2276 		bnxt_ptp_pps_event(bp, data1, data2);
2277 		goto async_event_process_exit;
2278 	}
2279 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2280 		bnxt_event_error_report(bp, data1, data2);
2281 		goto async_event_process_exit;
2282 	}
2283 	case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2284 		u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2285 
2286 		hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2287 		goto async_event_process_exit;
2288 	}
2289 	default:
2290 		goto async_event_process_exit;
2291 	}
2292 	bnxt_queue_sp_work(bp);
2293 async_event_process_exit:
2294 	bnxt_ulp_async_events(bp, cmpl);
2295 	return 0;
2296 }
2297 
2298 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2299 {
2300 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2301 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2302 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2303 				(struct hwrm_fwd_req_cmpl *)txcmp;
2304 
2305 	switch (cmpl_type) {
2306 	case CMPL_BASE_TYPE_HWRM_DONE:
2307 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
2308 		hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2309 		break;
2310 
2311 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2312 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2313 
2314 		if ((vf_id < bp->pf.first_vf_id) ||
2315 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2316 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2317 				   vf_id);
2318 			return -EINVAL;
2319 		}
2320 
2321 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2322 		set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2323 		bnxt_queue_sp_work(bp);
2324 		break;
2325 
2326 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2327 		bnxt_async_event_process(bp,
2328 					 (struct hwrm_async_event_cmpl *)txcmp);
2329 		break;
2330 
2331 	default:
2332 		break;
2333 	}
2334 
2335 	return 0;
2336 }
2337 
2338 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2339 {
2340 	struct bnxt_napi *bnapi = dev_instance;
2341 	struct bnxt *bp = bnapi->bp;
2342 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2343 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2344 
2345 	cpr->event_ctr++;
2346 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2347 	napi_schedule(&bnapi->napi);
2348 	return IRQ_HANDLED;
2349 }
2350 
2351 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2352 {
2353 	u32 raw_cons = cpr->cp_raw_cons;
2354 	u16 cons = RING_CMP(raw_cons);
2355 	struct tx_cmp *txcmp;
2356 
2357 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2358 
2359 	return TX_CMP_VALID(txcmp, raw_cons);
2360 }
2361 
2362 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2363 {
2364 	struct bnxt_napi *bnapi = dev_instance;
2365 	struct bnxt *bp = bnapi->bp;
2366 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2367 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2368 	u32 int_status;
2369 
2370 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2371 
2372 	if (!bnxt_has_work(bp, cpr)) {
2373 		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2374 		/* return if erroneous interrupt */
2375 		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2376 			return IRQ_NONE;
2377 	}
2378 
2379 	/* disable ring IRQ */
2380 	BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2381 
2382 	/* Return here if interrupt is shared and is disabled. */
2383 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
2384 		return IRQ_HANDLED;
2385 
2386 	napi_schedule(&bnapi->napi);
2387 	return IRQ_HANDLED;
2388 }
2389 
2390 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2391 			    int budget)
2392 {
2393 	struct bnxt_napi *bnapi = cpr->bnapi;
2394 	u32 raw_cons = cpr->cp_raw_cons;
2395 	u32 cons;
2396 	int tx_pkts = 0;
2397 	int rx_pkts = 0;
2398 	u8 event = 0;
2399 	struct tx_cmp *txcmp;
2400 
2401 	cpr->has_more_work = 0;
2402 	cpr->had_work_done = 1;
2403 	while (1) {
2404 		int rc;
2405 
2406 		cons = RING_CMP(raw_cons);
2407 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2408 
2409 		if (!TX_CMP_VALID(txcmp, raw_cons))
2410 			break;
2411 
2412 		/* The valid test of the entry must be done first before
2413 		 * reading any further.
2414 		 */
2415 		dma_rmb();
2416 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2417 			tx_pkts++;
2418 			/* return full budget so NAPI will complete. */
2419 			if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2420 				rx_pkts = budget;
2421 				raw_cons = NEXT_RAW_CMP(raw_cons);
2422 				if (budget)
2423 					cpr->has_more_work = 1;
2424 				break;
2425 			}
2426 		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2427 			if (likely(budget))
2428 				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2429 			else
2430 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2431 							   &event);
2432 			if (likely(rc >= 0))
2433 				rx_pkts += rc;
2434 			/* Increment rx_pkts when rc is -ENOMEM to count towards
2435 			 * the NAPI budget.  Otherwise, we may potentially loop
2436 			 * here forever if we consistently cannot allocate
2437 			 * buffers.
2438 			 */
2439 			else if (rc == -ENOMEM && budget)
2440 				rx_pkts++;
2441 			else if (rc == -EBUSY)	/* partial completion */
2442 				break;
2443 		} else if (unlikely((TX_CMP_TYPE(txcmp) ==
2444 				     CMPL_BASE_TYPE_HWRM_DONE) ||
2445 				    (TX_CMP_TYPE(txcmp) ==
2446 				     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2447 				    (TX_CMP_TYPE(txcmp) ==
2448 				     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2449 			bnxt_hwrm_handler(bp, txcmp);
2450 		}
2451 		raw_cons = NEXT_RAW_CMP(raw_cons);
2452 
2453 		if (rx_pkts && rx_pkts == budget) {
2454 			cpr->has_more_work = 1;
2455 			break;
2456 		}
2457 	}
2458 
2459 	if (event & BNXT_REDIRECT_EVENT)
2460 		xdp_do_flush_map();
2461 
2462 	if (event & BNXT_TX_EVENT) {
2463 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2464 		u16 prod = txr->tx_prod;
2465 
2466 		/* Sync BD data before updating doorbell */
2467 		wmb();
2468 
2469 		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2470 	}
2471 
2472 	cpr->cp_raw_cons = raw_cons;
2473 	bnapi->tx_pkts += tx_pkts;
2474 	bnapi->events |= event;
2475 	return rx_pkts;
2476 }
2477 
2478 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2479 {
2480 	if (bnapi->tx_pkts) {
2481 		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2482 		bnapi->tx_pkts = 0;
2483 	}
2484 
2485 	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2486 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2487 
2488 		if (bnapi->events & BNXT_AGG_EVENT)
2489 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2490 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2491 	}
2492 	bnapi->events = 0;
2493 }
2494 
2495 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2496 			  int budget)
2497 {
2498 	struct bnxt_napi *bnapi = cpr->bnapi;
2499 	int rx_pkts;
2500 
2501 	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2502 
2503 	/* ACK completion ring before freeing tx ring and producing new
2504 	 * buffers in rx/agg rings to prevent overflowing the completion
2505 	 * ring.
2506 	 */
2507 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2508 
2509 	__bnxt_poll_work_done(bp, bnapi);
2510 	return rx_pkts;
2511 }
2512 
2513 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2514 {
2515 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2516 	struct bnxt *bp = bnapi->bp;
2517 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2518 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2519 	struct tx_cmp *txcmp;
2520 	struct rx_cmp_ext *rxcmp1;
2521 	u32 cp_cons, tmp_raw_cons;
2522 	u32 raw_cons = cpr->cp_raw_cons;
2523 	u32 rx_pkts = 0;
2524 	u8 event = 0;
2525 
2526 	while (1) {
2527 		int rc;
2528 
2529 		cp_cons = RING_CMP(raw_cons);
2530 		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2531 
2532 		if (!TX_CMP_VALID(txcmp, raw_cons))
2533 			break;
2534 
2535 		/* The valid test of the entry must be done first before
2536 		 * reading any further.
2537 		 */
2538 		dma_rmb();
2539 		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2540 			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2541 			cp_cons = RING_CMP(tmp_raw_cons);
2542 			rxcmp1 = (struct rx_cmp_ext *)
2543 			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2544 
2545 			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2546 				break;
2547 
2548 			/* force an error to recycle the buffer */
2549 			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2550 				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2551 
2552 			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2553 			if (likely(rc == -EIO) && budget)
2554 				rx_pkts++;
2555 			else if (rc == -EBUSY)	/* partial completion */
2556 				break;
2557 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
2558 				    CMPL_BASE_TYPE_HWRM_DONE)) {
2559 			bnxt_hwrm_handler(bp, txcmp);
2560 		} else {
2561 			netdev_err(bp->dev,
2562 				   "Invalid completion received on special ring\n");
2563 		}
2564 		raw_cons = NEXT_RAW_CMP(raw_cons);
2565 
2566 		if (rx_pkts == budget)
2567 			break;
2568 	}
2569 
2570 	cpr->cp_raw_cons = raw_cons;
2571 	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2572 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2573 
2574 	if (event & BNXT_AGG_EVENT)
2575 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2576 
2577 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2578 		napi_complete_done(napi, rx_pkts);
2579 		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2580 	}
2581 	return rx_pkts;
2582 }
2583 
2584 static int bnxt_poll(struct napi_struct *napi, int budget)
2585 {
2586 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2587 	struct bnxt *bp = bnapi->bp;
2588 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2589 	int work_done = 0;
2590 
2591 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2592 		napi_complete(napi);
2593 		return 0;
2594 	}
2595 	while (1) {
2596 		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2597 
2598 		if (work_done >= budget) {
2599 			if (!budget)
2600 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2601 			break;
2602 		}
2603 
2604 		if (!bnxt_has_work(bp, cpr)) {
2605 			if (napi_complete_done(napi, work_done))
2606 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2607 			break;
2608 		}
2609 	}
2610 	if (bp->flags & BNXT_FLAG_DIM) {
2611 		struct dim_sample dim_sample = {};
2612 
2613 		dim_update_sample(cpr->event_ctr,
2614 				  cpr->rx_packets,
2615 				  cpr->rx_bytes,
2616 				  &dim_sample);
2617 		net_dim(&cpr->dim, dim_sample);
2618 	}
2619 	return work_done;
2620 }
2621 
2622 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2623 {
2624 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2625 	int i, work_done = 0;
2626 
2627 	for (i = 0; i < 2; i++) {
2628 		struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2629 
2630 		if (cpr2) {
2631 			work_done += __bnxt_poll_work(bp, cpr2,
2632 						      budget - work_done);
2633 			cpr->has_more_work |= cpr2->has_more_work;
2634 		}
2635 	}
2636 	return work_done;
2637 }
2638 
2639 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2640 				 u64 dbr_type)
2641 {
2642 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2643 	int i;
2644 
2645 	for (i = 0; i < 2; i++) {
2646 		struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2647 		struct bnxt_db_info *db;
2648 
2649 		if (cpr2 && cpr2->had_work_done) {
2650 			db = &cpr2->cp_db;
2651 			bnxt_writeq(bp, db->db_key64 | dbr_type |
2652 				    RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2653 			cpr2->had_work_done = 0;
2654 		}
2655 	}
2656 	__bnxt_poll_work_done(bp, bnapi);
2657 }
2658 
2659 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2660 {
2661 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2662 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2663 	u32 raw_cons = cpr->cp_raw_cons;
2664 	struct bnxt *bp = bnapi->bp;
2665 	struct nqe_cn *nqcmp;
2666 	int work_done = 0;
2667 	u32 cons;
2668 
2669 	if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2670 		napi_complete(napi);
2671 		return 0;
2672 	}
2673 	if (cpr->has_more_work) {
2674 		cpr->has_more_work = 0;
2675 		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2676 	}
2677 	while (1) {
2678 		cons = RING_CMP(raw_cons);
2679 		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2680 
2681 		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2682 			if (cpr->has_more_work)
2683 				break;
2684 
2685 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2686 			cpr->cp_raw_cons = raw_cons;
2687 			if (napi_complete_done(napi, work_done))
2688 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2689 						  cpr->cp_raw_cons);
2690 			return work_done;
2691 		}
2692 
2693 		/* The valid test of the entry must be done first before
2694 		 * reading any further.
2695 		 */
2696 		dma_rmb();
2697 
2698 		if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2699 			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2700 			struct bnxt_cp_ring_info *cpr2;
2701 
2702 			cpr2 = cpr->cp_ring_arr[idx];
2703 			work_done += __bnxt_poll_work(bp, cpr2,
2704 						      budget - work_done);
2705 			cpr->has_more_work |= cpr2->has_more_work;
2706 		} else {
2707 			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2708 		}
2709 		raw_cons = NEXT_RAW_CMP(raw_cons);
2710 	}
2711 	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2712 	if (raw_cons != cpr->cp_raw_cons) {
2713 		cpr->cp_raw_cons = raw_cons;
2714 		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2715 	}
2716 	return work_done;
2717 }
2718 
2719 static void bnxt_free_tx_skbs(struct bnxt *bp)
2720 {
2721 	int i, max_idx;
2722 	struct pci_dev *pdev = bp->pdev;
2723 
2724 	if (!bp->tx_ring)
2725 		return;
2726 
2727 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2728 	for (i = 0; i < bp->tx_nr_rings; i++) {
2729 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2730 		int j;
2731 
2732 		if (!txr->tx_buf_ring)
2733 			continue;
2734 
2735 		for (j = 0; j < max_idx;) {
2736 			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2737 			struct sk_buff *skb;
2738 			int k, last;
2739 
2740 			if (i < bp->tx_nr_rings_xdp &&
2741 			    tx_buf->action == XDP_REDIRECT) {
2742 				dma_unmap_single(&pdev->dev,
2743 					dma_unmap_addr(tx_buf, mapping),
2744 					dma_unmap_len(tx_buf, len),
2745 					DMA_TO_DEVICE);
2746 				xdp_return_frame(tx_buf->xdpf);
2747 				tx_buf->action = 0;
2748 				tx_buf->xdpf = NULL;
2749 				j++;
2750 				continue;
2751 			}
2752 
2753 			skb = tx_buf->skb;
2754 			if (!skb) {
2755 				j++;
2756 				continue;
2757 			}
2758 
2759 			tx_buf->skb = NULL;
2760 
2761 			if (tx_buf->is_push) {
2762 				dev_kfree_skb(skb);
2763 				j += 2;
2764 				continue;
2765 			}
2766 
2767 			dma_unmap_single(&pdev->dev,
2768 					 dma_unmap_addr(tx_buf, mapping),
2769 					 skb_headlen(skb),
2770 					 DMA_TO_DEVICE);
2771 
2772 			last = tx_buf->nr_frags;
2773 			j += 2;
2774 			for (k = 0; k < last; k++, j++) {
2775 				int ring_idx = j & bp->tx_ring_mask;
2776 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2777 
2778 				tx_buf = &txr->tx_buf_ring[ring_idx];
2779 				dma_unmap_page(
2780 					&pdev->dev,
2781 					dma_unmap_addr(tx_buf, mapping),
2782 					skb_frag_size(frag), DMA_TO_DEVICE);
2783 			}
2784 			dev_kfree_skb(skb);
2785 		}
2786 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2787 	}
2788 }
2789 
2790 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2791 {
2792 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2793 	struct pci_dev *pdev = bp->pdev;
2794 	struct bnxt_tpa_idx_map *map;
2795 	int i, max_idx, max_agg_idx;
2796 
2797 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2798 	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2799 	if (!rxr->rx_tpa)
2800 		goto skip_rx_tpa_free;
2801 
2802 	for (i = 0; i < bp->max_tpa; i++) {
2803 		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2804 		u8 *data = tpa_info->data;
2805 
2806 		if (!data)
2807 			continue;
2808 
2809 		dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2810 				       bp->rx_buf_use_size, bp->rx_dir,
2811 				       DMA_ATTR_WEAK_ORDERING);
2812 
2813 		tpa_info->data = NULL;
2814 
2815 		kfree(data);
2816 	}
2817 
2818 skip_rx_tpa_free:
2819 	if (!rxr->rx_buf_ring)
2820 		goto skip_rx_buf_free;
2821 
2822 	for (i = 0; i < max_idx; i++) {
2823 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2824 		dma_addr_t mapping = rx_buf->mapping;
2825 		void *data = rx_buf->data;
2826 
2827 		if (!data)
2828 			continue;
2829 
2830 		rx_buf->data = NULL;
2831 		if (BNXT_RX_PAGE_MODE(bp)) {
2832 			mapping -= bp->rx_dma_offset;
2833 			dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2834 					     bp->rx_dir,
2835 					     DMA_ATTR_WEAK_ORDERING);
2836 			page_pool_recycle_direct(rxr->page_pool, data);
2837 		} else {
2838 			dma_unmap_single_attrs(&pdev->dev, mapping,
2839 					       bp->rx_buf_use_size, bp->rx_dir,
2840 					       DMA_ATTR_WEAK_ORDERING);
2841 			kfree(data);
2842 		}
2843 	}
2844 
2845 skip_rx_buf_free:
2846 	if (!rxr->rx_agg_ring)
2847 		goto skip_rx_agg_free;
2848 
2849 	for (i = 0; i < max_agg_idx; i++) {
2850 		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2851 		struct page *page = rx_agg_buf->page;
2852 
2853 		if (!page)
2854 			continue;
2855 
2856 		dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2857 				     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
2858 				     DMA_ATTR_WEAK_ORDERING);
2859 
2860 		rx_agg_buf->page = NULL;
2861 		__clear_bit(i, rxr->rx_agg_bmap);
2862 
2863 		__free_page(page);
2864 	}
2865 
2866 skip_rx_agg_free:
2867 	if (rxr->rx_page) {
2868 		__free_page(rxr->rx_page);
2869 		rxr->rx_page = NULL;
2870 	}
2871 	map = rxr->rx_tpa_idx_map;
2872 	if (map)
2873 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2874 }
2875 
2876 static void bnxt_free_rx_skbs(struct bnxt *bp)
2877 {
2878 	int i;
2879 
2880 	if (!bp->rx_ring)
2881 		return;
2882 
2883 	for (i = 0; i < bp->rx_nr_rings; i++)
2884 		bnxt_free_one_rx_ring_skbs(bp, i);
2885 }
2886 
2887 static void bnxt_free_skbs(struct bnxt *bp)
2888 {
2889 	bnxt_free_tx_skbs(bp);
2890 	bnxt_free_rx_skbs(bp);
2891 }
2892 
2893 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2894 {
2895 	u8 init_val = mem_init->init_val;
2896 	u16 offset = mem_init->offset;
2897 	u8 *p2 = p;
2898 	int i;
2899 
2900 	if (!init_val)
2901 		return;
2902 	if (offset == BNXT_MEM_INVALID_OFFSET) {
2903 		memset(p, init_val, len);
2904 		return;
2905 	}
2906 	for (i = 0; i < len; i += mem_init->size)
2907 		*(p2 + i + offset) = init_val;
2908 }
2909 
2910 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2911 {
2912 	struct pci_dev *pdev = bp->pdev;
2913 	int i;
2914 
2915 	if (!rmem->pg_arr)
2916 		goto skip_pages;
2917 
2918 	for (i = 0; i < rmem->nr_pages; i++) {
2919 		if (!rmem->pg_arr[i])
2920 			continue;
2921 
2922 		dma_free_coherent(&pdev->dev, rmem->page_size,
2923 				  rmem->pg_arr[i], rmem->dma_arr[i]);
2924 
2925 		rmem->pg_arr[i] = NULL;
2926 	}
2927 skip_pages:
2928 	if (rmem->pg_tbl) {
2929 		size_t pg_tbl_size = rmem->nr_pages * 8;
2930 
2931 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2932 			pg_tbl_size = rmem->page_size;
2933 		dma_free_coherent(&pdev->dev, pg_tbl_size,
2934 				  rmem->pg_tbl, rmem->pg_tbl_map);
2935 		rmem->pg_tbl = NULL;
2936 	}
2937 	if (rmem->vmem_size && *rmem->vmem) {
2938 		vfree(*rmem->vmem);
2939 		*rmem->vmem = NULL;
2940 	}
2941 }
2942 
2943 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2944 {
2945 	struct pci_dev *pdev = bp->pdev;
2946 	u64 valid_bit = 0;
2947 	int i;
2948 
2949 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2950 		valid_bit = PTU_PTE_VALID;
2951 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2952 		size_t pg_tbl_size = rmem->nr_pages * 8;
2953 
2954 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2955 			pg_tbl_size = rmem->page_size;
2956 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2957 						  &rmem->pg_tbl_map,
2958 						  GFP_KERNEL);
2959 		if (!rmem->pg_tbl)
2960 			return -ENOMEM;
2961 	}
2962 
2963 	for (i = 0; i < rmem->nr_pages; i++) {
2964 		u64 extra_bits = valid_bit;
2965 
2966 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2967 						     rmem->page_size,
2968 						     &rmem->dma_arr[i],
2969 						     GFP_KERNEL);
2970 		if (!rmem->pg_arr[i])
2971 			return -ENOMEM;
2972 
2973 		if (rmem->mem_init)
2974 			bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2975 					  rmem->page_size);
2976 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
2977 			if (i == rmem->nr_pages - 2 &&
2978 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2979 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
2980 			else if (i == rmem->nr_pages - 1 &&
2981 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2982 				extra_bits |= PTU_PTE_LAST;
2983 			rmem->pg_tbl[i] =
2984 				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2985 		}
2986 	}
2987 
2988 	if (rmem->vmem_size) {
2989 		*rmem->vmem = vzalloc(rmem->vmem_size);
2990 		if (!(*rmem->vmem))
2991 			return -ENOMEM;
2992 	}
2993 	return 0;
2994 }
2995 
2996 static void bnxt_free_tpa_info(struct bnxt *bp)
2997 {
2998 	int i;
2999 
3000 	for (i = 0; i < bp->rx_nr_rings; i++) {
3001 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3002 
3003 		kfree(rxr->rx_tpa_idx_map);
3004 		rxr->rx_tpa_idx_map = NULL;
3005 		if (rxr->rx_tpa) {
3006 			kfree(rxr->rx_tpa[0].agg_arr);
3007 			rxr->rx_tpa[0].agg_arr = NULL;
3008 		}
3009 		kfree(rxr->rx_tpa);
3010 		rxr->rx_tpa = NULL;
3011 	}
3012 }
3013 
3014 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3015 {
3016 	int i, j, total_aggs = 0;
3017 
3018 	bp->max_tpa = MAX_TPA;
3019 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
3020 		if (!bp->max_tpa_v2)
3021 			return 0;
3022 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3023 		total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3024 	}
3025 
3026 	for (i = 0; i < bp->rx_nr_rings; i++) {
3027 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3028 		struct rx_agg_cmp *agg;
3029 
3030 		rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3031 				      GFP_KERNEL);
3032 		if (!rxr->rx_tpa)
3033 			return -ENOMEM;
3034 
3035 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3036 			continue;
3037 		agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3038 		rxr->rx_tpa[0].agg_arr = agg;
3039 		if (!agg)
3040 			return -ENOMEM;
3041 		for (j = 1; j < bp->max_tpa; j++)
3042 			rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3043 		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3044 					      GFP_KERNEL);
3045 		if (!rxr->rx_tpa_idx_map)
3046 			return -ENOMEM;
3047 	}
3048 	return 0;
3049 }
3050 
3051 static void bnxt_free_rx_rings(struct bnxt *bp)
3052 {
3053 	int i;
3054 
3055 	if (!bp->rx_ring)
3056 		return;
3057 
3058 	bnxt_free_tpa_info(bp);
3059 	for (i = 0; i < bp->rx_nr_rings; i++) {
3060 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3061 		struct bnxt_ring_struct *ring;
3062 
3063 		if (rxr->xdp_prog)
3064 			bpf_prog_put(rxr->xdp_prog);
3065 
3066 		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3067 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3068 
3069 		page_pool_destroy(rxr->page_pool);
3070 		rxr->page_pool = NULL;
3071 
3072 		kfree(rxr->rx_agg_bmap);
3073 		rxr->rx_agg_bmap = NULL;
3074 
3075 		ring = &rxr->rx_ring_struct;
3076 		bnxt_free_ring(bp, &ring->ring_mem);
3077 
3078 		ring = &rxr->rx_agg_ring_struct;
3079 		bnxt_free_ring(bp, &ring->ring_mem);
3080 	}
3081 }
3082 
3083 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3084 				   struct bnxt_rx_ring_info *rxr)
3085 {
3086 	struct page_pool_params pp = { 0 };
3087 
3088 	pp.pool_size = bp->rx_ring_size;
3089 	pp.nid = dev_to_node(&bp->pdev->dev);
3090 	pp.dev = &bp->pdev->dev;
3091 	pp.dma_dir = DMA_BIDIRECTIONAL;
3092 
3093 	rxr->page_pool = page_pool_create(&pp);
3094 	if (IS_ERR(rxr->page_pool)) {
3095 		int err = PTR_ERR(rxr->page_pool);
3096 
3097 		rxr->page_pool = NULL;
3098 		return err;
3099 	}
3100 	return 0;
3101 }
3102 
3103 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3104 {
3105 	int i, rc = 0, agg_rings = 0;
3106 
3107 	if (!bp->rx_ring)
3108 		return -ENOMEM;
3109 
3110 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
3111 		agg_rings = 1;
3112 
3113 	for (i = 0; i < bp->rx_nr_rings; i++) {
3114 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3115 		struct bnxt_ring_struct *ring;
3116 
3117 		ring = &rxr->rx_ring_struct;
3118 
3119 		rc = bnxt_alloc_rx_page_pool(bp, rxr);
3120 		if (rc)
3121 			return rc;
3122 
3123 		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3124 		if (rc < 0)
3125 			return rc;
3126 
3127 		rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3128 						MEM_TYPE_PAGE_POOL,
3129 						rxr->page_pool);
3130 		if (rc) {
3131 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
3132 			return rc;
3133 		}
3134 
3135 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3136 		if (rc)
3137 			return rc;
3138 
3139 		ring->grp_idx = i;
3140 		if (agg_rings) {
3141 			u16 mem_size;
3142 
3143 			ring = &rxr->rx_agg_ring_struct;
3144 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3145 			if (rc)
3146 				return rc;
3147 
3148 			ring->grp_idx = i;
3149 			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3150 			mem_size = rxr->rx_agg_bmap_size / 8;
3151 			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3152 			if (!rxr->rx_agg_bmap)
3153 				return -ENOMEM;
3154 		}
3155 	}
3156 	if (bp->flags & BNXT_FLAG_TPA)
3157 		rc = bnxt_alloc_tpa_info(bp);
3158 	return rc;
3159 }
3160 
3161 static void bnxt_free_tx_rings(struct bnxt *bp)
3162 {
3163 	int i;
3164 	struct pci_dev *pdev = bp->pdev;
3165 
3166 	if (!bp->tx_ring)
3167 		return;
3168 
3169 	for (i = 0; i < bp->tx_nr_rings; i++) {
3170 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3171 		struct bnxt_ring_struct *ring;
3172 
3173 		if (txr->tx_push) {
3174 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
3175 					  txr->tx_push, txr->tx_push_mapping);
3176 			txr->tx_push = NULL;
3177 		}
3178 
3179 		ring = &txr->tx_ring_struct;
3180 
3181 		bnxt_free_ring(bp, &ring->ring_mem);
3182 	}
3183 }
3184 
3185 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3186 {
3187 	int i, j, rc;
3188 	struct pci_dev *pdev = bp->pdev;
3189 
3190 	bp->tx_push_size = 0;
3191 	if (bp->tx_push_thresh) {
3192 		int push_size;
3193 
3194 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3195 					bp->tx_push_thresh);
3196 
3197 		if (push_size > 256) {
3198 			push_size = 0;
3199 			bp->tx_push_thresh = 0;
3200 		}
3201 
3202 		bp->tx_push_size = push_size;
3203 	}
3204 
3205 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3206 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3207 		struct bnxt_ring_struct *ring;
3208 		u8 qidx;
3209 
3210 		ring = &txr->tx_ring_struct;
3211 
3212 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3213 		if (rc)
3214 			return rc;
3215 
3216 		ring->grp_idx = txr->bnapi->index;
3217 		if (bp->tx_push_size) {
3218 			dma_addr_t mapping;
3219 
3220 			/* One pre-allocated DMA buffer to backup
3221 			 * TX push operation
3222 			 */
3223 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
3224 						bp->tx_push_size,
3225 						&txr->tx_push_mapping,
3226 						GFP_KERNEL);
3227 
3228 			if (!txr->tx_push)
3229 				return -ENOMEM;
3230 
3231 			mapping = txr->tx_push_mapping +
3232 				sizeof(struct tx_push_bd);
3233 			txr->data_mapping = cpu_to_le64(mapping);
3234 		}
3235 		qidx = bp->tc_to_qidx[j];
3236 		ring->queue_id = bp->q_info[qidx].queue_id;
3237 		if (i < bp->tx_nr_rings_xdp)
3238 			continue;
3239 		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3240 			j++;
3241 	}
3242 	return 0;
3243 }
3244 
3245 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3246 {
3247 	struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3248 
3249 	kfree(cpr->cp_desc_ring);
3250 	cpr->cp_desc_ring = NULL;
3251 	ring->ring_mem.pg_arr = NULL;
3252 	kfree(cpr->cp_desc_mapping);
3253 	cpr->cp_desc_mapping = NULL;
3254 	ring->ring_mem.dma_arr = NULL;
3255 }
3256 
3257 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3258 {
3259 	cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3260 	if (!cpr->cp_desc_ring)
3261 		return -ENOMEM;
3262 	cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3263 				       GFP_KERNEL);
3264 	if (!cpr->cp_desc_mapping)
3265 		return -ENOMEM;
3266 	return 0;
3267 }
3268 
3269 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3270 {
3271 	int i;
3272 
3273 	if (!bp->bnapi)
3274 		return;
3275 	for (i = 0; i < bp->cp_nr_rings; i++) {
3276 		struct bnxt_napi *bnapi = bp->bnapi[i];
3277 
3278 		if (!bnapi)
3279 			continue;
3280 		bnxt_free_cp_arrays(&bnapi->cp_ring);
3281 	}
3282 }
3283 
3284 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3285 {
3286 	int i, n = bp->cp_nr_pages;
3287 
3288 	for (i = 0; i < bp->cp_nr_rings; i++) {
3289 		struct bnxt_napi *bnapi = bp->bnapi[i];
3290 		int rc;
3291 
3292 		if (!bnapi)
3293 			continue;
3294 		rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3295 		if (rc)
3296 			return rc;
3297 	}
3298 	return 0;
3299 }
3300 
3301 static void bnxt_free_cp_rings(struct bnxt *bp)
3302 {
3303 	int i;
3304 
3305 	if (!bp->bnapi)
3306 		return;
3307 
3308 	for (i = 0; i < bp->cp_nr_rings; i++) {
3309 		struct bnxt_napi *bnapi = bp->bnapi[i];
3310 		struct bnxt_cp_ring_info *cpr;
3311 		struct bnxt_ring_struct *ring;
3312 		int j;
3313 
3314 		if (!bnapi)
3315 			continue;
3316 
3317 		cpr = &bnapi->cp_ring;
3318 		ring = &cpr->cp_ring_struct;
3319 
3320 		bnxt_free_ring(bp, &ring->ring_mem);
3321 
3322 		for (j = 0; j < 2; j++) {
3323 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3324 
3325 			if (cpr2) {
3326 				ring = &cpr2->cp_ring_struct;
3327 				bnxt_free_ring(bp, &ring->ring_mem);
3328 				bnxt_free_cp_arrays(cpr2);
3329 				kfree(cpr2);
3330 				cpr->cp_ring_arr[j] = NULL;
3331 			}
3332 		}
3333 	}
3334 }
3335 
3336 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3337 {
3338 	struct bnxt_ring_mem_info *rmem;
3339 	struct bnxt_ring_struct *ring;
3340 	struct bnxt_cp_ring_info *cpr;
3341 	int rc;
3342 
3343 	cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3344 	if (!cpr)
3345 		return NULL;
3346 
3347 	rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3348 	if (rc) {
3349 		bnxt_free_cp_arrays(cpr);
3350 		kfree(cpr);
3351 		return NULL;
3352 	}
3353 	ring = &cpr->cp_ring_struct;
3354 	rmem = &ring->ring_mem;
3355 	rmem->nr_pages = bp->cp_nr_pages;
3356 	rmem->page_size = HW_CMPD_RING_SIZE;
3357 	rmem->pg_arr = (void **)cpr->cp_desc_ring;
3358 	rmem->dma_arr = cpr->cp_desc_mapping;
3359 	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3360 	rc = bnxt_alloc_ring(bp, rmem);
3361 	if (rc) {
3362 		bnxt_free_ring(bp, rmem);
3363 		bnxt_free_cp_arrays(cpr);
3364 		kfree(cpr);
3365 		cpr = NULL;
3366 	}
3367 	return cpr;
3368 }
3369 
3370 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3371 {
3372 	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3373 	int i, rc, ulp_base_vec, ulp_msix;
3374 
3375 	ulp_msix = bnxt_get_ulp_msix_num(bp);
3376 	ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3377 	for (i = 0; i < bp->cp_nr_rings; i++) {
3378 		struct bnxt_napi *bnapi = bp->bnapi[i];
3379 		struct bnxt_cp_ring_info *cpr;
3380 		struct bnxt_ring_struct *ring;
3381 
3382 		if (!bnapi)
3383 			continue;
3384 
3385 		cpr = &bnapi->cp_ring;
3386 		cpr->bnapi = bnapi;
3387 		ring = &cpr->cp_ring_struct;
3388 
3389 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3390 		if (rc)
3391 			return rc;
3392 
3393 		if (ulp_msix && i >= ulp_base_vec)
3394 			ring->map_idx = i + ulp_msix;
3395 		else
3396 			ring->map_idx = i;
3397 
3398 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3399 			continue;
3400 
3401 		if (i < bp->rx_nr_rings) {
3402 			struct bnxt_cp_ring_info *cpr2 =
3403 				bnxt_alloc_cp_sub_ring(bp);
3404 
3405 			cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3406 			if (!cpr2)
3407 				return -ENOMEM;
3408 			cpr2->bnapi = bnapi;
3409 		}
3410 		if ((sh && i < bp->tx_nr_rings) ||
3411 		    (!sh && i >= bp->rx_nr_rings)) {
3412 			struct bnxt_cp_ring_info *cpr2 =
3413 				bnxt_alloc_cp_sub_ring(bp);
3414 
3415 			cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3416 			if (!cpr2)
3417 				return -ENOMEM;
3418 			cpr2->bnapi = bnapi;
3419 		}
3420 	}
3421 	return 0;
3422 }
3423 
3424 static void bnxt_init_ring_struct(struct bnxt *bp)
3425 {
3426 	int i;
3427 
3428 	for (i = 0; i < bp->cp_nr_rings; i++) {
3429 		struct bnxt_napi *bnapi = bp->bnapi[i];
3430 		struct bnxt_ring_mem_info *rmem;
3431 		struct bnxt_cp_ring_info *cpr;
3432 		struct bnxt_rx_ring_info *rxr;
3433 		struct bnxt_tx_ring_info *txr;
3434 		struct bnxt_ring_struct *ring;
3435 
3436 		if (!bnapi)
3437 			continue;
3438 
3439 		cpr = &bnapi->cp_ring;
3440 		ring = &cpr->cp_ring_struct;
3441 		rmem = &ring->ring_mem;
3442 		rmem->nr_pages = bp->cp_nr_pages;
3443 		rmem->page_size = HW_CMPD_RING_SIZE;
3444 		rmem->pg_arr = (void **)cpr->cp_desc_ring;
3445 		rmem->dma_arr = cpr->cp_desc_mapping;
3446 		rmem->vmem_size = 0;
3447 
3448 		rxr = bnapi->rx_ring;
3449 		if (!rxr)
3450 			goto skip_rx;
3451 
3452 		ring = &rxr->rx_ring_struct;
3453 		rmem = &ring->ring_mem;
3454 		rmem->nr_pages = bp->rx_nr_pages;
3455 		rmem->page_size = HW_RXBD_RING_SIZE;
3456 		rmem->pg_arr = (void **)rxr->rx_desc_ring;
3457 		rmem->dma_arr = rxr->rx_desc_mapping;
3458 		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3459 		rmem->vmem = (void **)&rxr->rx_buf_ring;
3460 
3461 		ring = &rxr->rx_agg_ring_struct;
3462 		rmem = &ring->ring_mem;
3463 		rmem->nr_pages = bp->rx_agg_nr_pages;
3464 		rmem->page_size = HW_RXBD_RING_SIZE;
3465 		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3466 		rmem->dma_arr = rxr->rx_agg_desc_mapping;
3467 		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3468 		rmem->vmem = (void **)&rxr->rx_agg_ring;
3469 
3470 skip_rx:
3471 		txr = bnapi->tx_ring;
3472 		if (!txr)
3473 			continue;
3474 
3475 		ring = &txr->tx_ring_struct;
3476 		rmem = &ring->ring_mem;
3477 		rmem->nr_pages = bp->tx_nr_pages;
3478 		rmem->page_size = HW_RXBD_RING_SIZE;
3479 		rmem->pg_arr = (void **)txr->tx_desc_ring;
3480 		rmem->dma_arr = txr->tx_desc_mapping;
3481 		rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3482 		rmem->vmem = (void **)&txr->tx_buf_ring;
3483 	}
3484 }
3485 
3486 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3487 {
3488 	int i;
3489 	u32 prod;
3490 	struct rx_bd **rx_buf_ring;
3491 
3492 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3493 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3494 		int j;
3495 		struct rx_bd *rxbd;
3496 
3497 		rxbd = rx_buf_ring[i];
3498 		if (!rxbd)
3499 			continue;
3500 
3501 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3502 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3503 			rxbd->rx_bd_opaque = prod;
3504 		}
3505 	}
3506 }
3507 
3508 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3509 {
3510 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3511 	struct net_device *dev = bp->dev;
3512 	u32 prod;
3513 	int i;
3514 
3515 	prod = rxr->rx_prod;
3516 	for (i = 0; i < bp->rx_ring_size; i++) {
3517 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3518 			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3519 				    ring_nr, i, bp->rx_ring_size);
3520 			break;
3521 		}
3522 		prod = NEXT_RX(prod);
3523 	}
3524 	rxr->rx_prod = prod;
3525 
3526 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3527 		return 0;
3528 
3529 	prod = rxr->rx_agg_prod;
3530 	for (i = 0; i < bp->rx_agg_ring_size; i++) {
3531 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3532 			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3533 				    ring_nr, i, bp->rx_ring_size);
3534 			break;
3535 		}
3536 		prod = NEXT_RX_AGG(prod);
3537 	}
3538 	rxr->rx_agg_prod = prod;
3539 
3540 	if (rxr->rx_tpa) {
3541 		dma_addr_t mapping;
3542 		u8 *data;
3543 
3544 		for (i = 0; i < bp->max_tpa; i++) {
3545 			data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3546 			if (!data)
3547 				return -ENOMEM;
3548 
3549 			rxr->rx_tpa[i].data = data;
3550 			rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3551 			rxr->rx_tpa[i].mapping = mapping;
3552 		}
3553 	}
3554 	return 0;
3555 }
3556 
3557 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3558 {
3559 	struct bnxt_rx_ring_info *rxr;
3560 	struct bnxt_ring_struct *ring;
3561 	u32 type;
3562 
3563 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3564 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3565 
3566 	if (NET_IP_ALIGN == 2)
3567 		type |= RX_BD_FLAGS_SOP;
3568 
3569 	rxr = &bp->rx_ring[ring_nr];
3570 	ring = &rxr->rx_ring_struct;
3571 	bnxt_init_rxbd_pages(ring, type);
3572 
3573 	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3574 		bpf_prog_add(bp->xdp_prog, 1);
3575 		rxr->xdp_prog = bp->xdp_prog;
3576 	}
3577 	ring->fw_ring_id = INVALID_HW_RING_ID;
3578 
3579 	ring = &rxr->rx_agg_ring_struct;
3580 	ring->fw_ring_id = INVALID_HW_RING_ID;
3581 
3582 	if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3583 		type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3584 			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3585 
3586 		bnxt_init_rxbd_pages(ring, type);
3587 	}
3588 
3589 	return bnxt_alloc_one_rx_ring(bp, ring_nr);
3590 }
3591 
3592 static void bnxt_init_cp_rings(struct bnxt *bp)
3593 {
3594 	int i, j;
3595 
3596 	for (i = 0; i < bp->cp_nr_rings; i++) {
3597 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3598 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3599 
3600 		ring->fw_ring_id = INVALID_HW_RING_ID;
3601 		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3602 		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3603 		for (j = 0; j < 2; j++) {
3604 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3605 
3606 			if (!cpr2)
3607 				continue;
3608 
3609 			ring = &cpr2->cp_ring_struct;
3610 			ring->fw_ring_id = INVALID_HW_RING_ID;
3611 			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3612 			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3613 		}
3614 	}
3615 }
3616 
3617 static int bnxt_init_rx_rings(struct bnxt *bp)
3618 {
3619 	int i, rc = 0;
3620 
3621 	if (BNXT_RX_PAGE_MODE(bp)) {
3622 		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3623 		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3624 	} else {
3625 		bp->rx_offset = BNXT_RX_OFFSET;
3626 		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3627 	}
3628 
3629 	for (i = 0; i < bp->rx_nr_rings; i++) {
3630 		rc = bnxt_init_one_rx_ring(bp, i);
3631 		if (rc)
3632 			break;
3633 	}
3634 
3635 	return rc;
3636 }
3637 
3638 static int bnxt_init_tx_rings(struct bnxt *bp)
3639 {
3640 	u16 i;
3641 
3642 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3643 				   MAX_SKB_FRAGS + 1);
3644 
3645 	for (i = 0; i < bp->tx_nr_rings; i++) {
3646 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3647 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3648 
3649 		ring->fw_ring_id = INVALID_HW_RING_ID;
3650 	}
3651 
3652 	return 0;
3653 }
3654 
3655 static void bnxt_free_ring_grps(struct bnxt *bp)
3656 {
3657 	kfree(bp->grp_info);
3658 	bp->grp_info = NULL;
3659 }
3660 
3661 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3662 {
3663 	int i;
3664 
3665 	if (irq_re_init) {
3666 		bp->grp_info = kcalloc(bp->cp_nr_rings,
3667 				       sizeof(struct bnxt_ring_grp_info),
3668 				       GFP_KERNEL);
3669 		if (!bp->grp_info)
3670 			return -ENOMEM;
3671 	}
3672 	for (i = 0; i < bp->cp_nr_rings; i++) {
3673 		if (irq_re_init)
3674 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3675 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3676 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3677 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3678 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3679 	}
3680 	return 0;
3681 }
3682 
3683 static void bnxt_free_vnics(struct bnxt *bp)
3684 {
3685 	kfree(bp->vnic_info);
3686 	bp->vnic_info = NULL;
3687 	bp->nr_vnics = 0;
3688 }
3689 
3690 static int bnxt_alloc_vnics(struct bnxt *bp)
3691 {
3692 	int num_vnics = 1;
3693 
3694 #ifdef CONFIG_RFS_ACCEL
3695 	if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3696 		num_vnics += bp->rx_nr_rings;
3697 #endif
3698 
3699 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3700 		num_vnics++;
3701 
3702 	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3703 				GFP_KERNEL);
3704 	if (!bp->vnic_info)
3705 		return -ENOMEM;
3706 
3707 	bp->nr_vnics = num_vnics;
3708 	return 0;
3709 }
3710 
3711 static void bnxt_init_vnics(struct bnxt *bp)
3712 {
3713 	int i;
3714 
3715 	for (i = 0; i < bp->nr_vnics; i++) {
3716 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3717 		int j;
3718 
3719 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
3720 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3721 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3722 
3723 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3724 
3725 		if (bp->vnic_info[i].rss_hash_key) {
3726 			if (i == 0)
3727 				prandom_bytes(vnic->rss_hash_key,
3728 					      HW_HASH_KEY_SIZE);
3729 			else
3730 				memcpy(vnic->rss_hash_key,
3731 				       bp->vnic_info[0].rss_hash_key,
3732 				       HW_HASH_KEY_SIZE);
3733 		}
3734 	}
3735 }
3736 
3737 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3738 {
3739 	int pages;
3740 
3741 	pages = ring_size / desc_per_pg;
3742 
3743 	if (!pages)
3744 		return 1;
3745 
3746 	pages++;
3747 
3748 	while (pages & (pages - 1))
3749 		pages++;
3750 
3751 	return pages;
3752 }
3753 
3754 void bnxt_set_tpa_flags(struct bnxt *bp)
3755 {
3756 	bp->flags &= ~BNXT_FLAG_TPA;
3757 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3758 		return;
3759 	if (bp->dev->features & NETIF_F_LRO)
3760 		bp->flags |= BNXT_FLAG_LRO;
3761 	else if (bp->dev->features & NETIF_F_GRO_HW)
3762 		bp->flags |= BNXT_FLAG_GRO;
3763 }
3764 
3765 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3766  * be set on entry.
3767  */
3768 void bnxt_set_ring_params(struct bnxt *bp)
3769 {
3770 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3771 	u32 agg_factor = 0, agg_ring_size = 0;
3772 
3773 	/* 8 for CRC and VLAN */
3774 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3775 
3776 	rx_space = rx_size + NET_SKB_PAD +
3777 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3778 
3779 	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3780 	ring_size = bp->rx_ring_size;
3781 	bp->rx_agg_ring_size = 0;
3782 	bp->rx_agg_nr_pages = 0;
3783 
3784 	if (bp->flags & BNXT_FLAG_TPA)
3785 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3786 
3787 	bp->flags &= ~BNXT_FLAG_JUMBO;
3788 	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3789 		u32 jumbo_factor;
3790 
3791 		bp->flags |= BNXT_FLAG_JUMBO;
3792 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3793 		if (jumbo_factor > agg_factor)
3794 			agg_factor = jumbo_factor;
3795 	}
3796 	if (agg_factor) {
3797 		if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3798 			ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3799 			netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3800 				    bp->rx_ring_size, ring_size);
3801 			bp->rx_ring_size = ring_size;
3802 		}
3803 		agg_ring_size = ring_size * agg_factor;
3804 
3805 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3806 							RX_DESC_CNT);
3807 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3808 			u32 tmp = agg_ring_size;
3809 
3810 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3811 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3812 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3813 				    tmp, agg_ring_size);
3814 		}
3815 		bp->rx_agg_ring_size = agg_ring_size;
3816 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3817 		rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3818 		rx_space = rx_size + NET_SKB_PAD +
3819 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3820 	}
3821 
3822 	bp->rx_buf_use_size = rx_size;
3823 	bp->rx_buf_size = rx_space;
3824 
3825 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3826 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3827 
3828 	ring_size = bp->tx_ring_size;
3829 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3830 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3831 
3832 	max_rx_cmpl = bp->rx_ring_size;
3833 	/* MAX TPA needs to be added because TPA_START completions are
3834 	 * immediately recycled, so the TPA completions are not bound by
3835 	 * the RX ring size.
3836 	 */
3837 	if (bp->flags & BNXT_FLAG_TPA)
3838 		max_rx_cmpl += bp->max_tpa;
3839 	/* RX and TPA completions are 32-byte, all others are 16-byte */
3840 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3841 	bp->cp_ring_size = ring_size;
3842 
3843 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3844 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
3845 		bp->cp_nr_pages = MAX_CP_PAGES;
3846 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3847 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3848 			    ring_size, bp->cp_ring_size);
3849 	}
3850 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3851 	bp->cp_ring_mask = bp->cp_bit - 1;
3852 }
3853 
3854 /* Changing allocation mode of RX rings.
3855  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3856  */
3857 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3858 {
3859 	if (page_mode) {
3860 		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3861 			return -EOPNOTSUPP;
3862 		bp->dev->max_mtu =
3863 			min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3864 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3865 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3866 		bp->rx_dir = DMA_BIDIRECTIONAL;
3867 		bp->rx_skb_func = bnxt_rx_page_skb;
3868 		/* Disable LRO or GRO_HW */
3869 		netdev_update_features(bp->dev);
3870 	} else {
3871 		bp->dev->max_mtu = bp->max_mtu;
3872 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3873 		bp->rx_dir = DMA_FROM_DEVICE;
3874 		bp->rx_skb_func = bnxt_rx_skb;
3875 	}
3876 	return 0;
3877 }
3878 
3879 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3880 {
3881 	int i;
3882 	struct bnxt_vnic_info *vnic;
3883 	struct pci_dev *pdev = bp->pdev;
3884 
3885 	if (!bp->vnic_info)
3886 		return;
3887 
3888 	for (i = 0; i < bp->nr_vnics; i++) {
3889 		vnic = &bp->vnic_info[i];
3890 
3891 		kfree(vnic->fw_grp_ids);
3892 		vnic->fw_grp_ids = NULL;
3893 
3894 		kfree(vnic->uc_list);
3895 		vnic->uc_list = NULL;
3896 
3897 		if (vnic->mc_list) {
3898 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3899 					  vnic->mc_list, vnic->mc_list_mapping);
3900 			vnic->mc_list = NULL;
3901 		}
3902 
3903 		if (vnic->rss_table) {
3904 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3905 					  vnic->rss_table,
3906 					  vnic->rss_table_dma_addr);
3907 			vnic->rss_table = NULL;
3908 		}
3909 
3910 		vnic->rss_hash_key = NULL;
3911 		vnic->flags = 0;
3912 	}
3913 }
3914 
3915 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3916 {
3917 	int i, rc = 0, size;
3918 	struct bnxt_vnic_info *vnic;
3919 	struct pci_dev *pdev = bp->pdev;
3920 	int max_rings;
3921 
3922 	for (i = 0; i < bp->nr_vnics; i++) {
3923 		vnic = &bp->vnic_info[i];
3924 
3925 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3926 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3927 
3928 			if (mem_size > 0) {
3929 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3930 				if (!vnic->uc_list) {
3931 					rc = -ENOMEM;
3932 					goto out;
3933 				}
3934 			}
3935 		}
3936 
3937 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3938 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3939 			vnic->mc_list =
3940 				dma_alloc_coherent(&pdev->dev,
3941 						   vnic->mc_list_size,
3942 						   &vnic->mc_list_mapping,
3943 						   GFP_KERNEL);
3944 			if (!vnic->mc_list) {
3945 				rc = -ENOMEM;
3946 				goto out;
3947 			}
3948 		}
3949 
3950 		if (bp->flags & BNXT_FLAG_CHIP_P5)
3951 			goto vnic_skip_grps;
3952 
3953 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3954 			max_rings = bp->rx_nr_rings;
3955 		else
3956 			max_rings = 1;
3957 
3958 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3959 		if (!vnic->fw_grp_ids) {
3960 			rc = -ENOMEM;
3961 			goto out;
3962 		}
3963 vnic_skip_grps:
3964 		if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3965 		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3966 			continue;
3967 
3968 		/* Allocate rss table and hash key */
3969 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3970 		if (bp->flags & BNXT_FLAG_CHIP_P5)
3971 			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3972 
3973 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3974 		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3975 						     vnic->rss_table_size,
3976 						     &vnic->rss_table_dma_addr,
3977 						     GFP_KERNEL);
3978 		if (!vnic->rss_table) {
3979 			rc = -ENOMEM;
3980 			goto out;
3981 		}
3982 
3983 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3984 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3985 	}
3986 	return 0;
3987 
3988 out:
3989 	return rc;
3990 }
3991 
3992 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3993 {
3994 	struct bnxt_hwrm_wait_token *token;
3995 
3996 	dma_pool_destroy(bp->hwrm_dma_pool);
3997 	bp->hwrm_dma_pool = NULL;
3998 
3999 	rcu_read_lock();
4000 	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4001 		WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4002 	rcu_read_unlock();
4003 }
4004 
4005 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4006 {
4007 	bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4008 					    BNXT_HWRM_DMA_SIZE,
4009 					    BNXT_HWRM_DMA_ALIGN, 0);
4010 	if (!bp->hwrm_dma_pool)
4011 		return -ENOMEM;
4012 
4013 	INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4014 
4015 	return 0;
4016 }
4017 
4018 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4019 {
4020 	kfree(stats->hw_masks);
4021 	stats->hw_masks = NULL;
4022 	kfree(stats->sw_stats);
4023 	stats->sw_stats = NULL;
4024 	if (stats->hw_stats) {
4025 		dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4026 				  stats->hw_stats_map);
4027 		stats->hw_stats = NULL;
4028 	}
4029 }
4030 
4031 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4032 				bool alloc_masks)
4033 {
4034 	stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4035 					     &stats->hw_stats_map, GFP_KERNEL);
4036 	if (!stats->hw_stats)
4037 		return -ENOMEM;
4038 
4039 	stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4040 	if (!stats->sw_stats)
4041 		goto stats_mem_err;
4042 
4043 	if (alloc_masks) {
4044 		stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4045 		if (!stats->hw_masks)
4046 			goto stats_mem_err;
4047 	}
4048 	return 0;
4049 
4050 stats_mem_err:
4051 	bnxt_free_stats_mem(bp, stats);
4052 	return -ENOMEM;
4053 }
4054 
4055 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4056 {
4057 	int i;
4058 
4059 	for (i = 0; i < count; i++)
4060 		mask_arr[i] = mask;
4061 }
4062 
4063 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4064 {
4065 	int i;
4066 
4067 	for (i = 0; i < count; i++)
4068 		mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4069 }
4070 
4071 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4072 				    struct bnxt_stats_mem *stats)
4073 {
4074 	struct hwrm_func_qstats_ext_output *resp;
4075 	struct hwrm_func_qstats_ext_input *req;
4076 	__le64 *hw_masks;
4077 	int rc;
4078 
4079 	if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4080 	    !(bp->flags & BNXT_FLAG_CHIP_P5))
4081 		return -EOPNOTSUPP;
4082 
4083 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4084 	if (rc)
4085 		return rc;
4086 
4087 	req->fid = cpu_to_le16(0xffff);
4088 	req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4089 
4090 	resp = hwrm_req_hold(bp, req);
4091 	rc = hwrm_req_send(bp, req);
4092 	if (!rc) {
4093 		hw_masks = &resp->rx_ucast_pkts;
4094 		bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4095 	}
4096 	hwrm_req_drop(bp, req);
4097 	return rc;
4098 }
4099 
4100 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4101 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4102 
4103 static void bnxt_init_stats(struct bnxt *bp)
4104 {
4105 	struct bnxt_napi *bnapi = bp->bnapi[0];
4106 	struct bnxt_cp_ring_info *cpr;
4107 	struct bnxt_stats_mem *stats;
4108 	__le64 *rx_stats, *tx_stats;
4109 	int rc, rx_count, tx_count;
4110 	u64 *rx_masks, *tx_masks;
4111 	u64 mask;
4112 	u8 flags;
4113 
4114 	cpr = &bnapi->cp_ring;
4115 	stats = &cpr->stats;
4116 	rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4117 	if (rc) {
4118 		if (bp->flags & BNXT_FLAG_CHIP_P5)
4119 			mask = (1ULL << 48) - 1;
4120 		else
4121 			mask = -1ULL;
4122 		bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4123 	}
4124 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
4125 		stats = &bp->port_stats;
4126 		rx_stats = stats->hw_stats;
4127 		rx_masks = stats->hw_masks;
4128 		rx_count = sizeof(struct rx_port_stats) / 8;
4129 		tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4130 		tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4131 		tx_count = sizeof(struct tx_port_stats) / 8;
4132 
4133 		flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4134 		rc = bnxt_hwrm_port_qstats(bp, flags);
4135 		if (rc) {
4136 			mask = (1ULL << 40) - 1;
4137 
4138 			bnxt_fill_masks(rx_masks, mask, rx_count);
4139 			bnxt_fill_masks(tx_masks, mask, tx_count);
4140 		} else {
4141 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4142 			bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4143 			bnxt_hwrm_port_qstats(bp, 0);
4144 		}
4145 	}
4146 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4147 		stats = &bp->rx_port_stats_ext;
4148 		rx_stats = stats->hw_stats;
4149 		rx_masks = stats->hw_masks;
4150 		rx_count = sizeof(struct rx_port_stats_ext) / 8;
4151 		stats = &bp->tx_port_stats_ext;
4152 		tx_stats = stats->hw_stats;
4153 		tx_masks = stats->hw_masks;
4154 		tx_count = sizeof(struct tx_port_stats_ext) / 8;
4155 
4156 		flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4157 		rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4158 		if (rc) {
4159 			mask = (1ULL << 40) - 1;
4160 
4161 			bnxt_fill_masks(rx_masks, mask, rx_count);
4162 			if (tx_stats)
4163 				bnxt_fill_masks(tx_masks, mask, tx_count);
4164 		} else {
4165 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4166 			if (tx_stats)
4167 				bnxt_copy_hw_masks(tx_masks, tx_stats,
4168 						   tx_count);
4169 			bnxt_hwrm_port_qstats_ext(bp, 0);
4170 		}
4171 	}
4172 }
4173 
4174 static void bnxt_free_port_stats(struct bnxt *bp)
4175 {
4176 	bp->flags &= ~BNXT_FLAG_PORT_STATS;
4177 	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4178 
4179 	bnxt_free_stats_mem(bp, &bp->port_stats);
4180 	bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4181 	bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4182 }
4183 
4184 static void bnxt_free_ring_stats(struct bnxt *bp)
4185 {
4186 	int i;
4187 
4188 	if (!bp->bnapi)
4189 		return;
4190 
4191 	for (i = 0; i < bp->cp_nr_rings; i++) {
4192 		struct bnxt_napi *bnapi = bp->bnapi[i];
4193 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4194 
4195 		bnxt_free_stats_mem(bp, &cpr->stats);
4196 	}
4197 }
4198 
4199 static int bnxt_alloc_stats(struct bnxt *bp)
4200 {
4201 	u32 size, i;
4202 	int rc;
4203 
4204 	size = bp->hw_ring_stats_size;
4205 
4206 	for (i = 0; i < bp->cp_nr_rings; i++) {
4207 		struct bnxt_napi *bnapi = bp->bnapi[i];
4208 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4209 
4210 		cpr->stats.len = size;
4211 		rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4212 		if (rc)
4213 			return rc;
4214 
4215 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4216 	}
4217 
4218 	if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4219 		return 0;
4220 
4221 	if (bp->port_stats.hw_stats)
4222 		goto alloc_ext_stats;
4223 
4224 	bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4225 	rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4226 	if (rc)
4227 		return rc;
4228 
4229 	bp->flags |= BNXT_FLAG_PORT_STATS;
4230 
4231 alloc_ext_stats:
4232 	/* Display extended statistics only if FW supports it */
4233 	if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4234 		if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4235 			return 0;
4236 
4237 	if (bp->rx_port_stats_ext.hw_stats)
4238 		goto alloc_tx_ext_stats;
4239 
4240 	bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4241 	rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4242 	/* Extended stats are optional */
4243 	if (rc)
4244 		return 0;
4245 
4246 alloc_tx_ext_stats:
4247 	if (bp->tx_port_stats_ext.hw_stats)
4248 		return 0;
4249 
4250 	if (bp->hwrm_spec_code >= 0x10902 ||
4251 	    (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4252 		bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4253 		rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4254 		/* Extended stats are optional */
4255 		if (rc)
4256 			return 0;
4257 	}
4258 	bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4259 	return 0;
4260 }
4261 
4262 static void bnxt_clear_ring_indices(struct bnxt *bp)
4263 {
4264 	int i;
4265 
4266 	if (!bp->bnapi)
4267 		return;
4268 
4269 	for (i = 0; i < bp->cp_nr_rings; i++) {
4270 		struct bnxt_napi *bnapi = bp->bnapi[i];
4271 		struct bnxt_cp_ring_info *cpr;
4272 		struct bnxt_rx_ring_info *rxr;
4273 		struct bnxt_tx_ring_info *txr;
4274 
4275 		if (!bnapi)
4276 			continue;
4277 
4278 		cpr = &bnapi->cp_ring;
4279 		cpr->cp_raw_cons = 0;
4280 
4281 		txr = bnapi->tx_ring;
4282 		if (txr) {
4283 			txr->tx_prod = 0;
4284 			txr->tx_cons = 0;
4285 		}
4286 
4287 		rxr = bnapi->rx_ring;
4288 		if (rxr) {
4289 			rxr->rx_prod = 0;
4290 			rxr->rx_agg_prod = 0;
4291 			rxr->rx_sw_agg_prod = 0;
4292 			rxr->rx_next_cons = 0;
4293 		}
4294 	}
4295 }
4296 
4297 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4298 {
4299 #ifdef CONFIG_RFS_ACCEL
4300 	int i;
4301 
4302 	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
4303 	 * safe to delete the hash table.
4304 	 */
4305 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4306 		struct hlist_head *head;
4307 		struct hlist_node *tmp;
4308 		struct bnxt_ntuple_filter *fltr;
4309 
4310 		head = &bp->ntp_fltr_hash_tbl[i];
4311 		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4312 			hlist_del(&fltr->hash);
4313 			kfree(fltr);
4314 		}
4315 	}
4316 	if (irq_reinit) {
4317 		kfree(bp->ntp_fltr_bmap);
4318 		bp->ntp_fltr_bmap = NULL;
4319 	}
4320 	bp->ntp_fltr_count = 0;
4321 #endif
4322 }
4323 
4324 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4325 {
4326 #ifdef CONFIG_RFS_ACCEL
4327 	int i, rc = 0;
4328 
4329 	if (!(bp->flags & BNXT_FLAG_RFS))
4330 		return 0;
4331 
4332 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4333 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4334 
4335 	bp->ntp_fltr_count = 0;
4336 	bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4337 				    sizeof(long),
4338 				    GFP_KERNEL);
4339 
4340 	if (!bp->ntp_fltr_bmap)
4341 		rc = -ENOMEM;
4342 
4343 	return rc;
4344 #else
4345 	return 0;
4346 #endif
4347 }
4348 
4349 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4350 {
4351 	bnxt_free_vnic_attributes(bp);
4352 	bnxt_free_tx_rings(bp);
4353 	bnxt_free_rx_rings(bp);
4354 	bnxt_free_cp_rings(bp);
4355 	bnxt_free_all_cp_arrays(bp);
4356 	bnxt_free_ntp_fltrs(bp, irq_re_init);
4357 	if (irq_re_init) {
4358 		bnxt_free_ring_stats(bp);
4359 		if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4360 		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4361 			bnxt_free_port_stats(bp);
4362 		bnxt_free_ring_grps(bp);
4363 		bnxt_free_vnics(bp);
4364 		kfree(bp->tx_ring_map);
4365 		bp->tx_ring_map = NULL;
4366 		kfree(bp->tx_ring);
4367 		bp->tx_ring = NULL;
4368 		kfree(bp->rx_ring);
4369 		bp->rx_ring = NULL;
4370 		kfree(bp->bnapi);
4371 		bp->bnapi = NULL;
4372 	} else {
4373 		bnxt_clear_ring_indices(bp);
4374 	}
4375 }
4376 
4377 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4378 {
4379 	int i, j, rc, size, arr_size;
4380 	void *bnapi;
4381 
4382 	if (irq_re_init) {
4383 		/* Allocate bnapi mem pointer array and mem block for
4384 		 * all queues
4385 		 */
4386 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4387 				bp->cp_nr_rings);
4388 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4389 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4390 		if (!bnapi)
4391 			return -ENOMEM;
4392 
4393 		bp->bnapi = bnapi;
4394 		bnapi += arr_size;
4395 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4396 			bp->bnapi[i] = bnapi;
4397 			bp->bnapi[i]->index = i;
4398 			bp->bnapi[i]->bp = bp;
4399 			if (bp->flags & BNXT_FLAG_CHIP_P5) {
4400 				struct bnxt_cp_ring_info *cpr =
4401 					&bp->bnapi[i]->cp_ring;
4402 
4403 				cpr->cp_ring_struct.ring_mem.flags =
4404 					BNXT_RMEM_RING_PTE_FLAG;
4405 			}
4406 		}
4407 
4408 		bp->rx_ring = kcalloc(bp->rx_nr_rings,
4409 				      sizeof(struct bnxt_rx_ring_info),
4410 				      GFP_KERNEL);
4411 		if (!bp->rx_ring)
4412 			return -ENOMEM;
4413 
4414 		for (i = 0; i < bp->rx_nr_rings; i++) {
4415 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4416 
4417 			if (bp->flags & BNXT_FLAG_CHIP_P5) {
4418 				rxr->rx_ring_struct.ring_mem.flags =
4419 					BNXT_RMEM_RING_PTE_FLAG;
4420 				rxr->rx_agg_ring_struct.ring_mem.flags =
4421 					BNXT_RMEM_RING_PTE_FLAG;
4422 			}
4423 			rxr->bnapi = bp->bnapi[i];
4424 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4425 		}
4426 
4427 		bp->tx_ring = kcalloc(bp->tx_nr_rings,
4428 				      sizeof(struct bnxt_tx_ring_info),
4429 				      GFP_KERNEL);
4430 		if (!bp->tx_ring)
4431 			return -ENOMEM;
4432 
4433 		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4434 					  GFP_KERNEL);
4435 
4436 		if (!bp->tx_ring_map)
4437 			return -ENOMEM;
4438 
4439 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4440 			j = 0;
4441 		else
4442 			j = bp->rx_nr_rings;
4443 
4444 		for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4445 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4446 
4447 			if (bp->flags & BNXT_FLAG_CHIP_P5)
4448 				txr->tx_ring_struct.ring_mem.flags =
4449 					BNXT_RMEM_RING_PTE_FLAG;
4450 			txr->bnapi = bp->bnapi[j];
4451 			bp->bnapi[j]->tx_ring = txr;
4452 			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4453 			if (i >= bp->tx_nr_rings_xdp) {
4454 				txr->txq_index = i - bp->tx_nr_rings_xdp;
4455 				bp->bnapi[j]->tx_int = bnxt_tx_int;
4456 			} else {
4457 				bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4458 				bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4459 			}
4460 		}
4461 
4462 		rc = bnxt_alloc_stats(bp);
4463 		if (rc)
4464 			goto alloc_mem_err;
4465 		bnxt_init_stats(bp);
4466 
4467 		rc = bnxt_alloc_ntp_fltrs(bp);
4468 		if (rc)
4469 			goto alloc_mem_err;
4470 
4471 		rc = bnxt_alloc_vnics(bp);
4472 		if (rc)
4473 			goto alloc_mem_err;
4474 	}
4475 
4476 	rc = bnxt_alloc_all_cp_arrays(bp);
4477 	if (rc)
4478 		goto alloc_mem_err;
4479 
4480 	bnxt_init_ring_struct(bp);
4481 
4482 	rc = bnxt_alloc_rx_rings(bp);
4483 	if (rc)
4484 		goto alloc_mem_err;
4485 
4486 	rc = bnxt_alloc_tx_rings(bp);
4487 	if (rc)
4488 		goto alloc_mem_err;
4489 
4490 	rc = bnxt_alloc_cp_rings(bp);
4491 	if (rc)
4492 		goto alloc_mem_err;
4493 
4494 	bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4495 				  BNXT_VNIC_UCAST_FLAG;
4496 	rc = bnxt_alloc_vnic_attributes(bp);
4497 	if (rc)
4498 		goto alloc_mem_err;
4499 	return 0;
4500 
4501 alloc_mem_err:
4502 	bnxt_free_mem(bp, true);
4503 	return rc;
4504 }
4505 
4506 static void bnxt_disable_int(struct bnxt *bp)
4507 {
4508 	int i;
4509 
4510 	if (!bp->bnapi)
4511 		return;
4512 
4513 	for (i = 0; i < bp->cp_nr_rings; i++) {
4514 		struct bnxt_napi *bnapi = bp->bnapi[i];
4515 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4516 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4517 
4518 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
4519 			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4520 	}
4521 }
4522 
4523 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4524 {
4525 	struct bnxt_napi *bnapi = bp->bnapi[n];
4526 	struct bnxt_cp_ring_info *cpr;
4527 
4528 	cpr = &bnapi->cp_ring;
4529 	return cpr->cp_ring_struct.map_idx;
4530 }
4531 
4532 static void bnxt_disable_int_sync(struct bnxt *bp)
4533 {
4534 	int i;
4535 
4536 	if (!bp->irq_tbl)
4537 		return;
4538 
4539 	atomic_inc(&bp->intr_sem);
4540 
4541 	bnxt_disable_int(bp);
4542 	for (i = 0; i < bp->cp_nr_rings; i++) {
4543 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4544 
4545 		synchronize_irq(bp->irq_tbl[map_idx].vector);
4546 	}
4547 }
4548 
4549 static void bnxt_enable_int(struct bnxt *bp)
4550 {
4551 	int i;
4552 
4553 	atomic_set(&bp->intr_sem, 0);
4554 	for (i = 0; i < bp->cp_nr_rings; i++) {
4555 		struct bnxt_napi *bnapi = bp->bnapi[i];
4556 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4557 
4558 		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4559 	}
4560 }
4561 
4562 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4563 			    bool async_only)
4564 {
4565 	DECLARE_BITMAP(async_events_bmap, 256);
4566 	u32 *events = (u32 *)async_events_bmap;
4567 	struct hwrm_func_drv_rgtr_output *resp;
4568 	struct hwrm_func_drv_rgtr_input *req;
4569 	u32 flags;
4570 	int rc, i;
4571 
4572 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4573 	if (rc)
4574 		return rc;
4575 
4576 	req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4577 				   FUNC_DRV_RGTR_REQ_ENABLES_VER |
4578 				   FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4579 
4580 	req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4581 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4582 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4583 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4584 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4585 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4586 			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4587 	req->flags = cpu_to_le32(flags);
4588 	req->ver_maj_8b = DRV_VER_MAJ;
4589 	req->ver_min_8b = DRV_VER_MIN;
4590 	req->ver_upd_8b = DRV_VER_UPD;
4591 	req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4592 	req->ver_min = cpu_to_le16(DRV_VER_MIN);
4593 	req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4594 
4595 	if (BNXT_PF(bp)) {
4596 		u32 data[8];
4597 		int i;
4598 
4599 		memset(data, 0, sizeof(data));
4600 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4601 			u16 cmd = bnxt_vf_req_snif[i];
4602 			unsigned int bit, idx;
4603 
4604 			idx = cmd / 32;
4605 			bit = cmd % 32;
4606 			data[idx] |= 1 << bit;
4607 		}
4608 
4609 		for (i = 0; i < 8; i++)
4610 			req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4611 
4612 		req->enables |=
4613 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4614 	}
4615 
4616 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4617 		req->flags |= cpu_to_le32(
4618 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4619 
4620 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
4621 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4622 		u16 event_id = bnxt_async_events_arr[i];
4623 
4624 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4625 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4626 			continue;
4627 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
4628 	}
4629 	if (bmap && bmap_size) {
4630 		for (i = 0; i < bmap_size; i++) {
4631 			if (test_bit(i, bmap))
4632 				__set_bit(i, async_events_bmap);
4633 		}
4634 	}
4635 	for (i = 0; i < 8; i++)
4636 		req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4637 
4638 	if (async_only)
4639 		req->enables =
4640 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4641 
4642 	resp = hwrm_req_hold(bp, req);
4643 	rc = hwrm_req_send(bp, req);
4644 	if (!rc) {
4645 		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4646 		if (resp->flags &
4647 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4648 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4649 	}
4650 	hwrm_req_drop(bp, req);
4651 	return rc;
4652 }
4653 
4654 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4655 {
4656 	struct hwrm_func_drv_unrgtr_input *req;
4657 	int rc;
4658 
4659 	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4660 		return 0;
4661 
4662 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4663 	if (rc)
4664 		return rc;
4665 	return hwrm_req_send(bp, req);
4666 }
4667 
4668 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4669 {
4670 	struct hwrm_tunnel_dst_port_free_input *req;
4671 	int rc;
4672 
4673 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4674 	    bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4675 		return 0;
4676 	if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4677 	    bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4678 		return 0;
4679 
4680 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4681 	if (rc)
4682 		return rc;
4683 
4684 	req->tunnel_type = tunnel_type;
4685 
4686 	switch (tunnel_type) {
4687 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4688 		req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4689 		bp->vxlan_port = 0;
4690 		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4691 		break;
4692 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4693 		req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4694 		bp->nge_port = 0;
4695 		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4696 		break;
4697 	default:
4698 		break;
4699 	}
4700 
4701 	rc = hwrm_req_send(bp, req);
4702 	if (rc)
4703 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4704 			   rc);
4705 	return rc;
4706 }
4707 
4708 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4709 					   u8 tunnel_type)
4710 {
4711 	struct hwrm_tunnel_dst_port_alloc_output *resp;
4712 	struct hwrm_tunnel_dst_port_alloc_input *req;
4713 	int rc;
4714 
4715 	rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4716 	if (rc)
4717 		return rc;
4718 
4719 	req->tunnel_type = tunnel_type;
4720 	req->tunnel_dst_port_val = port;
4721 
4722 	resp = hwrm_req_hold(bp, req);
4723 	rc = hwrm_req_send(bp, req);
4724 	if (rc) {
4725 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4726 			   rc);
4727 		goto err_out;
4728 	}
4729 
4730 	switch (tunnel_type) {
4731 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4732 		bp->vxlan_port = port;
4733 		bp->vxlan_fw_dst_port_id =
4734 			le16_to_cpu(resp->tunnel_dst_port_id);
4735 		break;
4736 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4737 		bp->nge_port = port;
4738 		bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4739 		break;
4740 	default:
4741 		break;
4742 	}
4743 
4744 err_out:
4745 	hwrm_req_drop(bp, req);
4746 	return rc;
4747 }
4748 
4749 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4750 {
4751 	struct hwrm_cfa_l2_set_rx_mask_input *req;
4752 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4753 	int rc;
4754 
4755 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4756 	if (rc)
4757 		return rc;
4758 
4759 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4760 	req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4761 	req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4762 	req->mask = cpu_to_le32(vnic->rx_mask);
4763 	return hwrm_req_send_silent(bp, req);
4764 }
4765 
4766 #ifdef CONFIG_RFS_ACCEL
4767 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4768 					    struct bnxt_ntuple_filter *fltr)
4769 {
4770 	struct hwrm_cfa_ntuple_filter_free_input *req;
4771 	int rc;
4772 
4773 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4774 	if (rc)
4775 		return rc;
4776 
4777 	req->ntuple_filter_id = fltr->filter_id;
4778 	return hwrm_req_send(bp, req);
4779 }
4780 
4781 #define BNXT_NTP_FLTR_FLAGS					\
4782 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
4783 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
4784 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |	\
4785 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
4786 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
4787 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
4788 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
4789 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
4790 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
4791 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
4792 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
4793 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
4794 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
4795 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4796 
4797 #define BNXT_NTP_TUNNEL_FLTR_FLAG				\
4798 		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4799 
4800 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4801 					     struct bnxt_ntuple_filter *fltr)
4802 {
4803 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4804 	struct hwrm_cfa_ntuple_filter_alloc_input *req;
4805 	struct flow_keys *keys = &fltr->fkeys;
4806 	struct bnxt_vnic_info *vnic;
4807 	u32 flags = 0;
4808 	int rc;
4809 
4810 	rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4811 	if (rc)
4812 		return rc;
4813 
4814 	req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4815 
4816 	if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4817 		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4818 		req->dst_id = cpu_to_le16(fltr->rxq);
4819 	} else {
4820 		vnic = &bp->vnic_info[fltr->rxq + 1];
4821 		req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4822 	}
4823 	req->flags = cpu_to_le32(flags);
4824 	req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4825 
4826 	req->ethertype = htons(ETH_P_IP);
4827 	memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4828 	req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4829 	req->ip_protocol = keys->basic.ip_proto;
4830 
4831 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4832 		int i;
4833 
4834 		req->ethertype = htons(ETH_P_IPV6);
4835 		req->ip_addr_type =
4836 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4837 		*(struct in6_addr *)&req->src_ipaddr[0] =
4838 			keys->addrs.v6addrs.src;
4839 		*(struct in6_addr *)&req->dst_ipaddr[0] =
4840 			keys->addrs.v6addrs.dst;
4841 		for (i = 0; i < 4; i++) {
4842 			req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4843 			req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4844 		}
4845 	} else {
4846 		req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4847 		req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4848 		req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4849 		req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4850 	}
4851 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4852 		req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4853 		req->tunnel_type =
4854 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4855 	}
4856 
4857 	req->src_port = keys->ports.src;
4858 	req->src_port_mask = cpu_to_be16(0xffff);
4859 	req->dst_port = keys->ports.dst;
4860 	req->dst_port_mask = cpu_to_be16(0xffff);
4861 
4862 	resp = hwrm_req_hold(bp, req);
4863 	rc = hwrm_req_send(bp, req);
4864 	if (!rc)
4865 		fltr->filter_id = resp->ntuple_filter_id;
4866 	hwrm_req_drop(bp, req);
4867 	return rc;
4868 }
4869 #endif
4870 
4871 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4872 				     u8 *mac_addr)
4873 {
4874 	struct hwrm_cfa_l2_filter_alloc_output *resp;
4875 	struct hwrm_cfa_l2_filter_alloc_input *req;
4876 	int rc;
4877 
4878 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4879 	if (rc)
4880 		return rc;
4881 
4882 	req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4883 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4884 		req->flags |=
4885 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4886 	req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4887 	req->enables =
4888 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4889 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4890 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4891 	memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4892 	req->l2_addr_mask[0] = 0xff;
4893 	req->l2_addr_mask[1] = 0xff;
4894 	req->l2_addr_mask[2] = 0xff;
4895 	req->l2_addr_mask[3] = 0xff;
4896 	req->l2_addr_mask[4] = 0xff;
4897 	req->l2_addr_mask[5] = 0xff;
4898 
4899 	resp = hwrm_req_hold(bp, req);
4900 	rc = hwrm_req_send(bp, req);
4901 	if (!rc)
4902 		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4903 							resp->l2_filter_id;
4904 	hwrm_req_drop(bp, req);
4905 	return rc;
4906 }
4907 
4908 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4909 {
4910 	struct hwrm_cfa_l2_filter_free_input *req;
4911 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4912 	int rc;
4913 
4914 	/* Any associated ntuple filters will also be cleared by firmware. */
4915 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4916 	if (rc)
4917 		return rc;
4918 	hwrm_req_hold(bp, req);
4919 	for (i = 0; i < num_of_vnics; i++) {
4920 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4921 
4922 		for (j = 0; j < vnic->uc_filter_count; j++) {
4923 			req->l2_filter_id = vnic->fw_l2_filter_id[j];
4924 
4925 			rc = hwrm_req_send(bp, req);
4926 		}
4927 		vnic->uc_filter_count = 0;
4928 	}
4929 	hwrm_req_drop(bp, req);
4930 	return rc;
4931 }
4932 
4933 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4934 {
4935 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4936 	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4937 	struct hwrm_vnic_tpa_cfg_input *req;
4938 	int rc;
4939 
4940 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4941 		return 0;
4942 
4943 	rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4944 	if (rc)
4945 		return rc;
4946 
4947 	if (tpa_flags) {
4948 		u16 mss = bp->dev->mtu - 40;
4949 		u32 nsegs, n, segs = 0, flags;
4950 
4951 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4952 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4953 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4954 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4955 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4956 		if (tpa_flags & BNXT_FLAG_GRO)
4957 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4958 
4959 		req->flags = cpu_to_le32(flags);
4960 
4961 		req->enables =
4962 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4963 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4964 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4965 
4966 		/* Number of segs are log2 units, and first packet is not
4967 		 * included as part of this units.
4968 		 */
4969 		if (mss <= BNXT_RX_PAGE_SIZE) {
4970 			n = BNXT_RX_PAGE_SIZE / mss;
4971 			nsegs = (MAX_SKB_FRAGS - 1) * n;
4972 		} else {
4973 			n = mss / BNXT_RX_PAGE_SIZE;
4974 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
4975 				n++;
4976 			nsegs = (MAX_SKB_FRAGS - n) / n;
4977 		}
4978 
4979 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
4980 			segs = MAX_TPA_SEGS_P5;
4981 			max_aggs = bp->max_tpa;
4982 		} else {
4983 			segs = ilog2(nsegs);
4984 		}
4985 		req->max_agg_segs = cpu_to_le16(segs);
4986 		req->max_aggs = cpu_to_le16(max_aggs);
4987 
4988 		req->min_agg_len = cpu_to_le32(512);
4989 	}
4990 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4991 
4992 	return hwrm_req_send(bp, req);
4993 }
4994 
4995 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4996 {
4997 	struct bnxt_ring_grp_info *grp_info;
4998 
4999 	grp_info = &bp->grp_info[ring->grp_idx];
5000 	return grp_info->cp_fw_ring_id;
5001 }
5002 
5003 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5004 {
5005 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5006 		struct bnxt_napi *bnapi = rxr->bnapi;
5007 		struct bnxt_cp_ring_info *cpr;
5008 
5009 		cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5010 		return cpr->cp_ring_struct.fw_ring_id;
5011 	} else {
5012 		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5013 	}
5014 }
5015 
5016 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5017 {
5018 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5019 		struct bnxt_napi *bnapi = txr->bnapi;
5020 		struct bnxt_cp_ring_info *cpr;
5021 
5022 		cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5023 		return cpr->cp_ring_struct.fw_ring_id;
5024 	} else {
5025 		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5026 	}
5027 }
5028 
5029 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5030 {
5031 	int entries;
5032 
5033 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5034 		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5035 	else
5036 		entries = HW_HASH_INDEX_SIZE;
5037 
5038 	bp->rss_indir_tbl_entries = entries;
5039 	bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5040 					  GFP_KERNEL);
5041 	if (!bp->rss_indir_tbl)
5042 		return -ENOMEM;
5043 	return 0;
5044 }
5045 
5046 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5047 {
5048 	u16 max_rings, max_entries, pad, i;
5049 
5050 	if (!bp->rx_nr_rings)
5051 		return;
5052 
5053 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5054 		max_rings = bp->rx_nr_rings - 1;
5055 	else
5056 		max_rings = bp->rx_nr_rings;
5057 
5058 	max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5059 
5060 	for (i = 0; i < max_entries; i++)
5061 		bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5062 
5063 	pad = bp->rss_indir_tbl_entries - max_entries;
5064 	if (pad)
5065 		memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5066 }
5067 
5068 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5069 {
5070 	u16 i, tbl_size, max_ring = 0;
5071 
5072 	if (!bp->rss_indir_tbl)
5073 		return 0;
5074 
5075 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5076 	for (i = 0; i < tbl_size; i++)
5077 		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5078 	return max_ring;
5079 }
5080 
5081 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5082 {
5083 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5084 		return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5085 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5086 		return 2;
5087 	return 1;
5088 }
5089 
5090 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5091 {
5092 	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5093 	u16 i, j;
5094 
5095 	/* Fill the RSS indirection table with ring group ids */
5096 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5097 		if (!no_rss)
5098 			j = bp->rss_indir_tbl[i];
5099 		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5100 	}
5101 }
5102 
5103 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5104 				      struct bnxt_vnic_info *vnic)
5105 {
5106 	__le16 *ring_tbl = vnic->rss_table;
5107 	struct bnxt_rx_ring_info *rxr;
5108 	u16 tbl_size, i;
5109 
5110 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5111 
5112 	for (i = 0; i < tbl_size; i++) {
5113 		u16 ring_id, j;
5114 
5115 		j = bp->rss_indir_tbl[i];
5116 		rxr = &bp->rx_ring[j];
5117 
5118 		ring_id = rxr->rx_ring_struct.fw_ring_id;
5119 		*ring_tbl++ = cpu_to_le16(ring_id);
5120 		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5121 		*ring_tbl++ = cpu_to_le16(ring_id);
5122 	}
5123 }
5124 
5125 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5126 {
5127 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5128 		__bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5129 	else
5130 		__bnxt_fill_hw_rss_tbl(bp, vnic);
5131 }
5132 
5133 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5134 {
5135 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5136 	struct hwrm_vnic_rss_cfg_input *req;
5137 	int rc;
5138 
5139 	if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5140 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5141 		return 0;
5142 
5143 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5144 	if (rc)
5145 		return rc;
5146 
5147 	if (set_rss) {
5148 		bnxt_fill_hw_rss_tbl(bp, vnic);
5149 		req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5150 		req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5151 		req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5152 		req->hash_key_tbl_addr =
5153 			cpu_to_le64(vnic->rss_hash_key_dma_addr);
5154 	}
5155 	req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5156 	return hwrm_req_send(bp, req);
5157 }
5158 
5159 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5160 {
5161 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5162 	struct hwrm_vnic_rss_cfg_input *req;
5163 	dma_addr_t ring_tbl_map;
5164 	u32 i, nr_ctxs;
5165 	int rc;
5166 
5167 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5168 	if (rc)
5169 		return rc;
5170 
5171 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5172 	if (!set_rss)
5173 		return hwrm_req_send(bp, req);
5174 
5175 	bnxt_fill_hw_rss_tbl(bp, vnic);
5176 	req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5177 	req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5178 	req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5179 	ring_tbl_map = vnic->rss_table_dma_addr;
5180 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5181 
5182 	hwrm_req_hold(bp, req);
5183 	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5184 		req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5185 		req->ring_table_pair_index = i;
5186 		req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5187 		rc = hwrm_req_send(bp, req);
5188 		if (rc)
5189 			goto exit;
5190 	}
5191 
5192 exit:
5193 	hwrm_req_drop(bp, req);
5194 	return rc;
5195 }
5196 
5197 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5198 {
5199 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5200 	struct hwrm_vnic_plcmodes_cfg_input *req;
5201 	int rc;
5202 
5203 	rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5204 	if (rc)
5205 		return rc;
5206 
5207 	req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5208 				 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5209 				 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5210 	req->enables =
5211 		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5212 			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5213 	/* thresholds not implemented in firmware yet */
5214 	req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5215 	req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5216 	req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5217 	return hwrm_req_send(bp, req);
5218 }
5219 
5220 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5221 					u16 ctx_idx)
5222 {
5223 	struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5224 
5225 	if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5226 		return;
5227 
5228 	req->rss_cos_lb_ctx_id =
5229 		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5230 
5231 	hwrm_req_send(bp, req);
5232 	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5233 }
5234 
5235 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5236 {
5237 	int i, j;
5238 
5239 	for (i = 0; i < bp->nr_vnics; i++) {
5240 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5241 
5242 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5243 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5244 				bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5245 		}
5246 	}
5247 	bp->rsscos_nr_ctxs = 0;
5248 }
5249 
5250 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5251 {
5252 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5253 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5254 	int rc;
5255 
5256 	rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5257 	if (rc)
5258 		return rc;
5259 
5260 	resp = hwrm_req_hold(bp, req);
5261 	rc = hwrm_req_send(bp, req);
5262 	if (!rc)
5263 		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5264 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
5265 	hwrm_req_drop(bp, req);
5266 
5267 	return rc;
5268 }
5269 
5270 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5271 {
5272 	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5273 		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5274 	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5275 }
5276 
5277 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5278 {
5279 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5280 	struct hwrm_vnic_cfg_input *req;
5281 	unsigned int ring = 0, grp_idx;
5282 	u16 def_vlan = 0;
5283 	int rc;
5284 
5285 	rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5286 	if (rc)
5287 		return rc;
5288 
5289 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5290 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5291 
5292 		req->default_rx_ring_id =
5293 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5294 		req->default_cmpl_ring_id =
5295 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5296 		req->enables =
5297 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5298 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5299 		goto vnic_mru;
5300 	}
5301 	req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5302 	/* Only RSS support for now TBD: COS & LB */
5303 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5304 		req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5305 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5306 					   VNIC_CFG_REQ_ENABLES_MRU);
5307 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5308 		req->rss_rule =
5309 			cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5310 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5311 					   VNIC_CFG_REQ_ENABLES_MRU);
5312 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5313 	} else {
5314 		req->rss_rule = cpu_to_le16(0xffff);
5315 	}
5316 
5317 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5318 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5319 		req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5320 		req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5321 	} else {
5322 		req->cos_rule = cpu_to_le16(0xffff);
5323 	}
5324 
5325 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5326 		ring = 0;
5327 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5328 		ring = vnic_id - 1;
5329 	else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5330 		ring = bp->rx_nr_rings - 1;
5331 
5332 	grp_idx = bp->rx_ring[ring].bnapi->index;
5333 	req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5334 	req->lb_rule = cpu_to_le16(0xffff);
5335 vnic_mru:
5336 	req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5337 
5338 	req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5339 #ifdef CONFIG_BNXT_SRIOV
5340 	if (BNXT_VF(bp))
5341 		def_vlan = bp->vf.vlan;
5342 #endif
5343 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5344 		req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5345 	if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5346 		req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5347 
5348 	return hwrm_req_send(bp, req);
5349 }
5350 
5351 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5352 {
5353 	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5354 		struct hwrm_vnic_free_input *req;
5355 
5356 		if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5357 			return;
5358 
5359 		req->vnic_id =
5360 			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5361 
5362 		hwrm_req_send(bp, req);
5363 		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5364 	}
5365 }
5366 
5367 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5368 {
5369 	u16 i;
5370 
5371 	for (i = 0; i < bp->nr_vnics; i++)
5372 		bnxt_hwrm_vnic_free_one(bp, i);
5373 }
5374 
5375 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5376 				unsigned int start_rx_ring_idx,
5377 				unsigned int nr_rings)
5378 {
5379 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5380 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5381 	struct hwrm_vnic_alloc_output *resp;
5382 	struct hwrm_vnic_alloc_input *req;
5383 	int rc;
5384 
5385 	rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5386 	if (rc)
5387 		return rc;
5388 
5389 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5390 		goto vnic_no_ring_grps;
5391 
5392 	/* map ring groups to this vnic */
5393 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5394 		grp_idx = bp->rx_ring[i].bnapi->index;
5395 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5396 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5397 				   j, nr_rings);
5398 			break;
5399 		}
5400 		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5401 	}
5402 
5403 vnic_no_ring_grps:
5404 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5405 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5406 	if (vnic_id == 0)
5407 		req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5408 
5409 	resp = hwrm_req_hold(bp, req);
5410 	rc = hwrm_req_send(bp, req);
5411 	if (!rc)
5412 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5413 	hwrm_req_drop(bp, req);
5414 	return rc;
5415 }
5416 
5417 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5418 {
5419 	struct hwrm_vnic_qcaps_output *resp;
5420 	struct hwrm_vnic_qcaps_input *req;
5421 	int rc;
5422 
5423 	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5424 	bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5425 	if (bp->hwrm_spec_code < 0x10600)
5426 		return 0;
5427 
5428 	rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5429 	if (rc)
5430 		return rc;
5431 
5432 	resp = hwrm_req_hold(bp, req);
5433 	rc = hwrm_req_send(bp, req);
5434 	if (!rc) {
5435 		u32 flags = le32_to_cpu(resp->flags);
5436 
5437 		if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5438 		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5439 			bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5440 		if (flags &
5441 		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5442 			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5443 
5444 		/* Older P5 fw before EXT_HW_STATS support did not set
5445 		 * VLAN_STRIP_CAP properly.
5446 		 */
5447 		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5448 		    (BNXT_CHIP_P5_THOR(bp) &&
5449 		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5450 			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5451 		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5452 		if (bp->max_tpa_v2) {
5453 			if (BNXT_CHIP_P5_THOR(bp))
5454 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5455 			else
5456 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5457 		}
5458 	}
5459 	hwrm_req_drop(bp, req);
5460 	return rc;
5461 }
5462 
5463 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5464 {
5465 	struct hwrm_ring_grp_alloc_output *resp;
5466 	struct hwrm_ring_grp_alloc_input *req;
5467 	int rc;
5468 	u16 i;
5469 
5470 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5471 		return 0;
5472 
5473 	rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5474 	if (rc)
5475 		return rc;
5476 
5477 	resp = hwrm_req_hold(bp, req);
5478 	for (i = 0; i < bp->rx_nr_rings; i++) {
5479 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5480 
5481 		req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5482 		req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5483 		req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5484 		req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5485 
5486 		rc = hwrm_req_send(bp, req);
5487 
5488 		if (rc)
5489 			break;
5490 
5491 		bp->grp_info[grp_idx].fw_grp_id =
5492 			le32_to_cpu(resp->ring_group_id);
5493 	}
5494 	hwrm_req_drop(bp, req);
5495 	return rc;
5496 }
5497 
5498 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5499 {
5500 	struct hwrm_ring_grp_free_input *req;
5501 	u16 i;
5502 
5503 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5504 		return;
5505 
5506 	if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5507 		return;
5508 
5509 	hwrm_req_hold(bp, req);
5510 	for (i = 0; i < bp->cp_nr_rings; i++) {
5511 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5512 			continue;
5513 		req->ring_group_id =
5514 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
5515 
5516 		hwrm_req_send(bp, req);
5517 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5518 	}
5519 	hwrm_req_drop(bp, req);
5520 }
5521 
5522 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5523 				    struct bnxt_ring_struct *ring,
5524 				    u32 ring_type, u32 map_index)
5525 {
5526 	struct hwrm_ring_alloc_output *resp;
5527 	struct hwrm_ring_alloc_input *req;
5528 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5529 	struct bnxt_ring_grp_info *grp_info;
5530 	int rc, err = 0;
5531 	u16 ring_id;
5532 
5533 	rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5534 	if (rc)
5535 		goto exit;
5536 
5537 	req->enables = 0;
5538 	if (rmem->nr_pages > 1) {
5539 		req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5540 		/* Page size is in log2 units */
5541 		req->page_size = BNXT_PAGE_SHIFT;
5542 		req->page_tbl_depth = 1;
5543 	} else {
5544 		req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5545 	}
5546 	req->fbo = 0;
5547 	/* Association of ring index with doorbell index and MSIX number */
5548 	req->logical_id = cpu_to_le16(map_index);
5549 
5550 	switch (ring_type) {
5551 	case HWRM_RING_ALLOC_TX: {
5552 		struct bnxt_tx_ring_info *txr;
5553 
5554 		txr = container_of(ring, struct bnxt_tx_ring_info,
5555 				   tx_ring_struct);
5556 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5557 		/* Association of transmit ring with completion ring */
5558 		grp_info = &bp->grp_info[ring->grp_idx];
5559 		req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5560 		req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5561 		req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5562 		req->queue_id = cpu_to_le16(ring->queue_id);
5563 		break;
5564 	}
5565 	case HWRM_RING_ALLOC_RX:
5566 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5567 		req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5568 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5569 			u16 flags = 0;
5570 
5571 			/* Association of rx ring with stats context */
5572 			grp_info = &bp->grp_info[ring->grp_idx];
5573 			req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5574 			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5575 			req->enables |= cpu_to_le32(
5576 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5577 			if (NET_IP_ALIGN == 2)
5578 				flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5579 			req->flags = cpu_to_le16(flags);
5580 		}
5581 		break;
5582 	case HWRM_RING_ALLOC_AGG:
5583 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5584 			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5585 			/* Association of agg ring with rx ring */
5586 			grp_info = &bp->grp_info[ring->grp_idx];
5587 			req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5588 			req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5589 			req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5590 			req->enables |= cpu_to_le32(
5591 				RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5592 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5593 		} else {
5594 			req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5595 		}
5596 		req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5597 		break;
5598 	case HWRM_RING_ALLOC_CMPL:
5599 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5600 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5601 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5602 			/* Association of cp ring with nq */
5603 			grp_info = &bp->grp_info[map_index];
5604 			req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5605 			req->cq_handle = cpu_to_le64(ring->handle);
5606 			req->enables |= cpu_to_le32(
5607 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5608 		} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5609 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5610 		}
5611 		break;
5612 	case HWRM_RING_ALLOC_NQ:
5613 		req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5614 		req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5615 		if (bp->flags & BNXT_FLAG_USING_MSIX)
5616 			req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5617 		break;
5618 	default:
5619 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5620 			   ring_type);
5621 		return -1;
5622 	}
5623 
5624 	resp = hwrm_req_hold(bp, req);
5625 	rc = hwrm_req_send(bp, req);
5626 	err = le16_to_cpu(resp->error_code);
5627 	ring_id = le16_to_cpu(resp->ring_id);
5628 	hwrm_req_drop(bp, req);
5629 
5630 exit:
5631 	if (rc || err) {
5632 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5633 			   ring_type, rc, err);
5634 		return -EIO;
5635 	}
5636 	ring->fw_ring_id = ring_id;
5637 	return rc;
5638 }
5639 
5640 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5641 {
5642 	int rc;
5643 
5644 	if (BNXT_PF(bp)) {
5645 		struct hwrm_func_cfg_input *req;
5646 
5647 		rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5648 		if (rc)
5649 			return rc;
5650 
5651 		req->fid = cpu_to_le16(0xffff);
5652 		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5653 		req->async_event_cr = cpu_to_le16(idx);
5654 		return hwrm_req_send(bp, req);
5655 	} else {
5656 		struct hwrm_func_vf_cfg_input *req;
5657 
5658 		rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5659 		if (rc)
5660 			return rc;
5661 
5662 		req->enables =
5663 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5664 		req->async_event_cr = cpu_to_le16(idx);
5665 		return hwrm_req_send(bp, req);
5666 	}
5667 }
5668 
5669 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5670 			u32 map_idx, u32 xid)
5671 {
5672 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5673 		if (BNXT_PF(bp))
5674 			db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5675 		else
5676 			db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5677 		switch (ring_type) {
5678 		case HWRM_RING_ALLOC_TX:
5679 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5680 			break;
5681 		case HWRM_RING_ALLOC_RX:
5682 		case HWRM_RING_ALLOC_AGG:
5683 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5684 			break;
5685 		case HWRM_RING_ALLOC_CMPL:
5686 			db->db_key64 = DBR_PATH_L2;
5687 			break;
5688 		case HWRM_RING_ALLOC_NQ:
5689 			db->db_key64 = DBR_PATH_L2;
5690 			break;
5691 		}
5692 		db->db_key64 |= (u64)xid << DBR_XID_SFT;
5693 	} else {
5694 		db->doorbell = bp->bar1 + map_idx * 0x80;
5695 		switch (ring_type) {
5696 		case HWRM_RING_ALLOC_TX:
5697 			db->db_key32 = DB_KEY_TX;
5698 			break;
5699 		case HWRM_RING_ALLOC_RX:
5700 		case HWRM_RING_ALLOC_AGG:
5701 			db->db_key32 = DB_KEY_RX;
5702 			break;
5703 		case HWRM_RING_ALLOC_CMPL:
5704 			db->db_key32 = DB_KEY_CP;
5705 			break;
5706 		}
5707 	}
5708 }
5709 
5710 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5711 {
5712 	bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5713 	int i, rc = 0;
5714 	u32 type;
5715 
5716 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5717 		type = HWRM_RING_ALLOC_NQ;
5718 	else
5719 		type = HWRM_RING_ALLOC_CMPL;
5720 	for (i = 0; i < bp->cp_nr_rings; i++) {
5721 		struct bnxt_napi *bnapi = bp->bnapi[i];
5722 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5723 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5724 		u32 map_idx = ring->map_idx;
5725 		unsigned int vector;
5726 
5727 		vector = bp->irq_tbl[map_idx].vector;
5728 		disable_irq_nosync(vector);
5729 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5730 		if (rc) {
5731 			enable_irq(vector);
5732 			goto err_out;
5733 		}
5734 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5735 		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5736 		enable_irq(vector);
5737 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5738 
5739 		if (!i) {
5740 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5741 			if (rc)
5742 				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5743 		}
5744 	}
5745 
5746 	type = HWRM_RING_ALLOC_TX;
5747 	for (i = 0; i < bp->tx_nr_rings; i++) {
5748 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5749 		struct bnxt_ring_struct *ring;
5750 		u32 map_idx;
5751 
5752 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5753 			struct bnxt_napi *bnapi = txr->bnapi;
5754 			struct bnxt_cp_ring_info *cpr, *cpr2;
5755 			u32 type2 = HWRM_RING_ALLOC_CMPL;
5756 
5757 			cpr = &bnapi->cp_ring;
5758 			cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5759 			ring = &cpr2->cp_ring_struct;
5760 			ring->handle = BNXT_TX_HDL;
5761 			map_idx = bnapi->index;
5762 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5763 			if (rc)
5764 				goto err_out;
5765 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5766 				    ring->fw_ring_id);
5767 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5768 		}
5769 		ring = &txr->tx_ring_struct;
5770 		map_idx = i;
5771 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5772 		if (rc)
5773 			goto err_out;
5774 		bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5775 	}
5776 
5777 	type = HWRM_RING_ALLOC_RX;
5778 	for (i = 0; i < bp->rx_nr_rings; i++) {
5779 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5780 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5781 		struct bnxt_napi *bnapi = rxr->bnapi;
5782 		u32 map_idx = bnapi->index;
5783 
5784 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5785 		if (rc)
5786 			goto err_out;
5787 		bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5788 		/* If we have agg rings, post agg buffers first. */
5789 		if (!agg_rings)
5790 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5791 		bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5792 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5793 			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5794 			u32 type2 = HWRM_RING_ALLOC_CMPL;
5795 			struct bnxt_cp_ring_info *cpr2;
5796 
5797 			cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5798 			ring = &cpr2->cp_ring_struct;
5799 			ring->handle = BNXT_RX_HDL;
5800 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5801 			if (rc)
5802 				goto err_out;
5803 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5804 				    ring->fw_ring_id);
5805 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5806 		}
5807 	}
5808 
5809 	if (agg_rings) {
5810 		type = HWRM_RING_ALLOC_AGG;
5811 		for (i = 0; i < bp->rx_nr_rings; i++) {
5812 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5813 			struct bnxt_ring_struct *ring =
5814 						&rxr->rx_agg_ring_struct;
5815 			u32 grp_idx = ring->grp_idx;
5816 			u32 map_idx = grp_idx + bp->rx_nr_rings;
5817 
5818 			rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5819 			if (rc)
5820 				goto err_out;
5821 
5822 			bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5823 				    ring->fw_ring_id);
5824 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5825 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5826 			bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5827 		}
5828 	}
5829 err_out:
5830 	return rc;
5831 }
5832 
5833 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5834 				   struct bnxt_ring_struct *ring,
5835 				   u32 ring_type, int cmpl_ring_id)
5836 {
5837 	struct hwrm_ring_free_output *resp;
5838 	struct hwrm_ring_free_input *req;
5839 	u16 error_code = 0;
5840 	int rc;
5841 
5842 	if (BNXT_NO_FW_ACCESS(bp))
5843 		return 0;
5844 
5845 	rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5846 	if (rc)
5847 		goto exit;
5848 
5849 	req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5850 	req->ring_type = ring_type;
5851 	req->ring_id = cpu_to_le16(ring->fw_ring_id);
5852 
5853 	resp = hwrm_req_hold(bp, req);
5854 	rc = hwrm_req_send(bp, req);
5855 	error_code = le16_to_cpu(resp->error_code);
5856 	hwrm_req_drop(bp, req);
5857 exit:
5858 	if (rc || error_code) {
5859 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5860 			   ring_type, rc, error_code);
5861 		return -EIO;
5862 	}
5863 	return 0;
5864 }
5865 
5866 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5867 {
5868 	u32 type;
5869 	int i;
5870 
5871 	if (!bp->bnapi)
5872 		return;
5873 
5874 	for (i = 0; i < bp->tx_nr_rings; i++) {
5875 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5876 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5877 
5878 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5879 			u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5880 
5881 			hwrm_ring_free_send_msg(bp, ring,
5882 						RING_FREE_REQ_RING_TYPE_TX,
5883 						close_path ? cmpl_ring_id :
5884 						INVALID_HW_RING_ID);
5885 			ring->fw_ring_id = INVALID_HW_RING_ID;
5886 		}
5887 	}
5888 
5889 	for (i = 0; i < bp->rx_nr_rings; i++) {
5890 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5891 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5892 		u32 grp_idx = rxr->bnapi->index;
5893 
5894 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5895 			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5896 
5897 			hwrm_ring_free_send_msg(bp, ring,
5898 						RING_FREE_REQ_RING_TYPE_RX,
5899 						close_path ? cmpl_ring_id :
5900 						INVALID_HW_RING_ID);
5901 			ring->fw_ring_id = INVALID_HW_RING_ID;
5902 			bp->grp_info[grp_idx].rx_fw_ring_id =
5903 				INVALID_HW_RING_ID;
5904 		}
5905 	}
5906 
5907 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5908 		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5909 	else
5910 		type = RING_FREE_REQ_RING_TYPE_RX;
5911 	for (i = 0; i < bp->rx_nr_rings; i++) {
5912 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5913 		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5914 		u32 grp_idx = rxr->bnapi->index;
5915 
5916 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5917 			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5918 
5919 			hwrm_ring_free_send_msg(bp, ring, type,
5920 						close_path ? cmpl_ring_id :
5921 						INVALID_HW_RING_ID);
5922 			ring->fw_ring_id = INVALID_HW_RING_ID;
5923 			bp->grp_info[grp_idx].agg_fw_ring_id =
5924 				INVALID_HW_RING_ID;
5925 		}
5926 	}
5927 
5928 	/* The completion rings are about to be freed.  After that the
5929 	 * IRQ doorbell will not work anymore.  So we need to disable
5930 	 * IRQ here.
5931 	 */
5932 	bnxt_disable_int_sync(bp);
5933 
5934 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5935 		type = RING_FREE_REQ_RING_TYPE_NQ;
5936 	else
5937 		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5938 	for (i = 0; i < bp->cp_nr_rings; i++) {
5939 		struct bnxt_napi *bnapi = bp->bnapi[i];
5940 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5941 		struct bnxt_ring_struct *ring;
5942 		int j;
5943 
5944 		for (j = 0; j < 2; j++) {
5945 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5946 
5947 			if (cpr2) {
5948 				ring = &cpr2->cp_ring_struct;
5949 				if (ring->fw_ring_id == INVALID_HW_RING_ID)
5950 					continue;
5951 				hwrm_ring_free_send_msg(bp, ring,
5952 					RING_FREE_REQ_RING_TYPE_L2_CMPL,
5953 					INVALID_HW_RING_ID);
5954 				ring->fw_ring_id = INVALID_HW_RING_ID;
5955 			}
5956 		}
5957 		ring = &cpr->cp_ring_struct;
5958 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5959 			hwrm_ring_free_send_msg(bp, ring, type,
5960 						INVALID_HW_RING_ID);
5961 			ring->fw_ring_id = INVALID_HW_RING_ID;
5962 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5963 		}
5964 	}
5965 }
5966 
5967 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5968 			   bool shared);
5969 
5970 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5971 {
5972 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5973 	struct hwrm_func_qcfg_output *resp;
5974 	struct hwrm_func_qcfg_input *req;
5975 	int rc;
5976 
5977 	if (bp->hwrm_spec_code < 0x10601)
5978 		return 0;
5979 
5980 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5981 	if (rc)
5982 		return rc;
5983 
5984 	req->fid = cpu_to_le16(0xffff);
5985 	resp = hwrm_req_hold(bp, req);
5986 	rc = hwrm_req_send(bp, req);
5987 	if (rc) {
5988 		hwrm_req_drop(bp, req);
5989 		return rc;
5990 	}
5991 
5992 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5993 	if (BNXT_NEW_RM(bp)) {
5994 		u16 cp, stats;
5995 
5996 		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5997 		hw_resc->resv_hw_ring_grps =
5998 			le32_to_cpu(resp->alloc_hw_ring_grps);
5999 		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6000 		cp = le16_to_cpu(resp->alloc_cmpl_rings);
6001 		stats = le16_to_cpu(resp->alloc_stat_ctx);
6002 		hw_resc->resv_irqs = cp;
6003 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
6004 			int rx = hw_resc->resv_rx_rings;
6005 			int tx = hw_resc->resv_tx_rings;
6006 
6007 			if (bp->flags & BNXT_FLAG_AGG_RINGS)
6008 				rx >>= 1;
6009 			if (cp < (rx + tx)) {
6010 				bnxt_trim_rings(bp, &rx, &tx, cp, false);
6011 				if (bp->flags & BNXT_FLAG_AGG_RINGS)
6012 					rx <<= 1;
6013 				hw_resc->resv_rx_rings = rx;
6014 				hw_resc->resv_tx_rings = tx;
6015 			}
6016 			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6017 			hw_resc->resv_hw_ring_grps = rx;
6018 		}
6019 		hw_resc->resv_cp_rings = cp;
6020 		hw_resc->resv_stat_ctxs = stats;
6021 	}
6022 	hwrm_req_drop(bp, req);
6023 	return 0;
6024 }
6025 
6026 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6027 {
6028 	struct hwrm_func_qcfg_output *resp;
6029 	struct hwrm_func_qcfg_input *req;
6030 	int rc;
6031 
6032 	if (bp->hwrm_spec_code < 0x10601)
6033 		return 0;
6034 
6035 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6036 	if (rc)
6037 		return rc;
6038 
6039 	req->fid = cpu_to_le16(fid);
6040 	resp = hwrm_req_hold(bp, req);
6041 	rc = hwrm_req_send(bp, req);
6042 	if (!rc)
6043 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6044 
6045 	hwrm_req_drop(bp, req);
6046 	return rc;
6047 }
6048 
6049 static bool bnxt_rfs_supported(struct bnxt *bp);
6050 
6051 static struct hwrm_func_cfg_input *
6052 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6053 			     int ring_grps, int cp_rings, int stats, int vnics)
6054 {
6055 	struct hwrm_func_cfg_input *req;
6056 	u32 enables = 0;
6057 
6058 	if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6059 		return NULL;
6060 
6061 	req->fid = cpu_to_le16(0xffff);
6062 	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6063 	req->num_tx_rings = cpu_to_le16(tx_rings);
6064 	if (BNXT_NEW_RM(bp)) {
6065 		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6066 		enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6067 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
6068 			enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6069 			enables |= tx_rings + ring_grps ?
6070 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6071 			enables |= rx_rings ?
6072 				FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6073 		} else {
6074 			enables |= cp_rings ?
6075 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6076 			enables |= ring_grps ?
6077 				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6078 				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6079 		}
6080 		enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6081 
6082 		req->num_rx_rings = cpu_to_le16(rx_rings);
6083 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
6084 			req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6085 			req->num_msix = cpu_to_le16(cp_rings);
6086 			req->num_rsscos_ctxs =
6087 				cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6088 		} else {
6089 			req->num_cmpl_rings = cpu_to_le16(cp_rings);
6090 			req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6091 			req->num_rsscos_ctxs = cpu_to_le16(1);
6092 			if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6093 			    bnxt_rfs_supported(bp))
6094 				req->num_rsscos_ctxs =
6095 					cpu_to_le16(ring_grps + 1);
6096 		}
6097 		req->num_stat_ctxs = cpu_to_le16(stats);
6098 		req->num_vnics = cpu_to_le16(vnics);
6099 	}
6100 	req->enables = cpu_to_le32(enables);
6101 	return req;
6102 }
6103 
6104 static struct hwrm_func_vf_cfg_input *
6105 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6106 			     int ring_grps, int cp_rings, int stats, int vnics)
6107 {
6108 	struct hwrm_func_vf_cfg_input *req;
6109 	u32 enables = 0;
6110 
6111 	if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6112 		return NULL;
6113 
6114 	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6115 	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6116 			      FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6117 	enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6118 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
6119 		enables |= tx_rings + ring_grps ?
6120 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6121 	} else {
6122 		enables |= cp_rings ?
6123 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6124 		enables |= ring_grps ?
6125 			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6126 	}
6127 	enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6128 	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6129 
6130 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6131 	req->num_tx_rings = cpu_to_le16(tx_rings);
6132 	req->num_rx_rings = cpu_to_le16(rx_rings);
6133 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
6134 		req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6135 		req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6136 	} else {
6137 		req->num_cmpl_rings = cpu_to_le16(cp_rings);
6138 		req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6139 		req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6140 	}
6141 	req->num_stat_ctxs = cpu_to_le16(stats);
6142 	req->num_vnics = cpu_to_le16(vnics);
6143 
6144 	req->enables = cpu_to_le32(enables);
6145 	return req;
6146 }
6147 
6148 static int
6149 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6150 			   int ring_grps, int cp_rings, int stats, int vnics)
6151 {
6152 	struct hwrm_func_cfg_input *req;
6153 	int rc;
6154 
6155 	req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6156 					   cp_rings, stats, vnics);
6157 	if (!req)
6158 		return -ENOMEM;
6159 
6160 	if (!req->enables) {
6161 		hwrm_req_drop(bp, req);
6162 		return 0;
6163 	}
6164 
6165 	rc = hwrm_req_send(bp, req);
6166 	if (rc)
6167 		return rc;
6168 
6169 	if (bp->hwrm_spec_code < 0x10601)
6170 		bp->hw_resc.resv_tx_rings = tx_rings;
6171 
6172 	return bnxt_hwrm_get_rings(bp);
6173 }
6174 
6175 static int
6176 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6177 			   int ring_grps, int cp_rings, int stats, int vnics)
6178 {
6179 	struct hwrm_func_vf_cfg_input *req;
6180 	int rc;
6181 
6182 	if (!BNXT_NEW_RM(bp)) {
6183 		bp->hw_resc.resv_tx_rings = tx_rings;
6184 		return 0;
6185 	}
6186 
6187 	req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6188 					   cp_rings, stats, vnics);
6189 	if (!req)
6190 		return -ENOMEM;
6191 
6192 	rc = hwrm_req_send(bp, req);
6193 	if (rc)
6194 		return rc;
6195 
6196 	return bnxt_hwrm_get_rings(bp);
6197 }
6198 
6199 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6200 				   int cp, int stat, int vnic)
6201 {
6202 	if (BNXT_PF(bp))
6203 		return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6204 						  vnic);
6205 	else
6206 		return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6207 						  vnic);
6208 }
6209 
6210 int bnxt_nq_rings_in_use(struct bnxt *bp)
6211 {
6212 	int cp = bp->cp_nr_rings;
6213 	int ulp_msix, ulp_base;
6214 
6215 	ulp_msix = bnxt_get_ulp_msix_num(bp);
6216 	if (ulp_msix) {
6217 		ulp_base = bnxt_get_ulp_msix_base(bp);
6218 		cp += ulp_msix;
6219 		if ((ulp_base + ulp_msix) > cp)
6220 			cp = ulp_base + ulp_msix;
6221 	}
6222 	return cp;
6223 }
6224 
6225 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6226 {
6227 	int cp;
6228 
6229 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6230 		return bnxt_nq_rings_in_use(bp);
6231 
6232 	cp = bp->tx_nr_rings + bp->rx_nr_rings;
6233 	return cp;
6234 }
6235 
6236 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6237 {
6238 	int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6239 	int cp = bp->cp_nr_rings;
6240 
6241 	if (!ulp_stat)
6242 		return cp;
6243 
6244 	if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6245 		return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6246 
6247 	return cp + ulp_stat;
6248 }
6249 
6250 /* Check if a default RSS map needs to be setup.  This function is only
6251  * used on older firmware that does not require reserving RX rings.
6252  */
6253 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6254 {
6255 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6256 
6257 	/* The RSS map is valid for RX rings set to resv_rx_rings */
6258 	if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6259 		hw_resc->resv_rx_rings = bp->rx_nr_rings;
6260 		if (!netif_is_rxfh_configured(bp->dev))
6261 			bnxt_set_dflt_rss_indir_tbl(bp);
6262 	}
6263 }
6264 
6265 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6266 {
6267 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6268 	int cp = bnxt_cp_rings_in_use(bp);
6269 	int nq = bnxt_nq_rings_in_use(bp);
6270 	int rx = bp->rx_nr_rings, stat;
6271 	int vnic = 1, grp = rx;
6272 
6273 	if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6274 	    bp->hwrm_spec_code >= 0x10601)
6275 		return true;
6276 
6277 	/* Old firmware does not need RX ring reservations but we still
6278 	 * need to setup a default RSS map when needed.  With new firmware
6279 	 * we go through RX ring reservations first and then set up the
6280 	 * RSS map for the successfully reserved RX rings when needed.
6281 	 */
6282 	if (!BNXT_NEW_RM(bp)) {
6283 		bnxt_check_rss_tbl_no_rmgr(bp);
6284 		return false;
6285 	}
6286 	if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6287 		vnic = rx + 1;
6288 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
6289 		rx <<= 1;
6290 	stat = bnxt_get_func_stat_ctxs(bp);
6291 	if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6292 	    hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6293 	    (hw_resc->resv_hw_ring_grps != grp &&
6294 	     !(bp->flags & BNXT_FLAG_CHIP_P5)))
6295 		return true;
6296 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6297 	    hw_resc->resv_irqs != nq)
6298 		return true;
6299 	return false;
6300 }
6301 
6302 static int __bnxt_reserve_rings(struct bnxt *bp)
6303 {
6304 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6305 	int cp = bnxt_nq_rings_in_use(bp);
6306 	int tx = bp->tx_nr_rings;
6307 	int rx = bp->rx_nr_rings;
6308 	int grp, rx_rings, rc;
6309 	int vnic = 1, stat;
6310 	bool sh = false;
6311 
6312 	if (!bnxt_need_reserve_rings(bp))
6313 		return 0;
6314 
6315 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6316 		sh = true;
6317 	if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6318 		vnic = rx + 1;
6319 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
6320 		rx <<= 1;
6321 	grp = bp->rx_nr_rings;
6322 	stat = bnxt_get_func_stat_ctxs(bp);
6323 
6324 	rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6325 	if (rc)
6326 		return rc;
6327 
6328 	tx = hw_resc->resv_tx_rings;
6329 	if (BNXT_NEW_RM(bp)) {
6330 		rx = hw_resc->resv_rx_rings;
6331 		cp = hw_resc->resv_irqs;
6332 		grp = hw_resc->resv_hw_ring_grps;
6333 		vnic = hw_resc->resv_vnics;
6334 		stat = hw_resc->resv_stat_ctxs;
6335 	}
6336 
6337 	rx_rings = rx;
6338 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6339 		if (rx >= 2) {
6340 			rx_rings = rx >> 1;
6341 		} else {
6342 			if (netif_running(bp->dev))
6343 				return -ENOMEM;
6344 
6345 			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6346 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6347 			bp->dev->hw_features &= ~NETIF_F_LRO;
6348 			bp->dev->features &= ~NETIF_F_LRO;
6349 			bnxt_set_ring_params(bp);
6350 		}
6351 	}
6352 	rx_rings = min_t(int, rx_rings, grp);
6353 	cp = min_t(int, cp, bp->cp_nr_rings);
6354 	if (stat > bnxt_get_ulp_stat_ctxs(bp))
6355 		stat -= bnxt_get_ulp_stat_ctxs(bp);
6356 	cp = min_t(int, cp, stat);
6357 	rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6358 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
6359 		rx = rx_rings << 1;
6360 	cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6361 	bp->tx_nr_rings = tx;
6362 
6363 	/* If we cannot reserve all the RX rings, reset the RSS map only
6364 	 * if absolutely necessary
6365 	 */
6366 	if (rx_rings != bp->rx_nr_rings) {
6367 		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6368 			    rx_rings, bp->rx_nr_rings);
6369 		if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6370 		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6371 		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6372 		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6373 			netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6374 			bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6375 		}
6376 	}
6377 	bp->rx_nr_rings = rx_rings;
6378 	bp->cp_nr_rings = cp;
6379 
6380 	if (!tx || !rx || !cp || !grp || !vnic || !stat)
6381 		return -ENOMEM;
6382 
6383 	if (!netif_is_rxfh_configured(bp->dev))
6384 		bnxt_set_dflt_rss_indir_tbl(bp);
6385 
6386 	return rc;
6387 }
6388 
6389 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6390 				    int ring_grps, int cp_rings, int stats,
6391 				    int vnics)
6392 {
6393 	struct hwrm_func_vf_cfg_input *req;
6394 	u32 flags;
6395 
6396 	if (!BNXT_NEW_RM(bp))
6397 		return 0;
6398 
6399 	req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6400 					   cp_rings, stats, vnics);
6401 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6402 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6403 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6404 		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6405 		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6406 		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6407 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6408 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6409 
6410 	req->flags = cpu_to_le32(flags);
6411 	return hwrm_req_send_silent(bp, req);
6412 }
6413 
6414 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6415 				    int ring_grps, int cp_rings, int stats,
6416 				    int vnics)
6417 {
6418 	struct hwrm_func_cfg_input *req;
6419 	u32 flags;
6420 
6421 	req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6422 					   cp_rings, stats, vnics);
6423 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6424 	if (BNXT_NEW_RM(bp)) {
6425 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6426 			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6427 			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6428 			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6429 		if (bp->flags & BNXT_FLAG_CHIP_P5)
6430 			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6431 				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6432 		else
6433 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6434 	}
6435 
6436 	req->flags = cpu_to_le32(flags);
6437 	return hwrm_req_send_silent(bp, req);
6438 }
6439 
6440 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6441 				 int ring_grps, int cp_rings, int stats,
6442 				 int vnics)
6443 {
6444 	if (bp->hwrm_spec_code < 0x10801)
6445 		return 0;
6446 
6447 	if (BNXT_PF(bp))
6448 		return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6449 						ring_grps, cp_rings, stats,
6450 						vnics);
6451 
6452 	return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6453 					cp_rings, stats, vnics);
6454 }
6455 
6456 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6457 {
6458 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6459 	struct hwrm_ring_aggint_qcaps_output *resp;
6460 	struct hwrm_ring_aggint_qcaps_input *req;
6461 	int rc;
6462 
6463 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6464 	coal_cap->num_cmpl_dma_aggr_max = 63;
6465 	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6466 	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6467 	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6468 	coal_cap->int_lat_tmr_min_max = 65535;
6469 	coal_cap->int_lat_tmr_max_max = 65535;
6470 	coal_cap->num_cmpl_aggr_int_max = 65535;
6471 	coal_cap->timer_units = 80;
6472 
6473 	if (bp->hwrm_spec_code < 0x10902)
6474 		return;
6475 
6476 	if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6477 		return;
6478 
6479 	resp = hwrm_req_hold(bp, req);
6480 	rc = hwrm_req_send_silent(bp, req);
6481 	if (!rc) {
6482 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6483 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6484 		coal_cap->num_cmpl_dma_aggr_max =
6485 			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6486 		coal_cap->num_cmpl_dma_aggr_during_int_max =
6487 			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6488 		coal_cap->cmpl_aggr_dma_tmr_max =
6489 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6490 		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6491 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6492 		coal_cap->int_lat_tmr_min_max =
6493 			le16_to_cpu(resp->int_lat_tmr_min_max);
6494 		coal_cap->int_lat_tmr_max_max =
6495 			le16_to_cpu(resp->int_lat_tmr_max_max);
6496 		coal_cap->num_cmpl_aggr_int_max =
6497 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
6498 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6499 	}
6500 	hwrm_req_drop(bp, req);
6501 }
6502 
6503 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6504 {
6505 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6506 
6507 	return usec * 1000 / coal_cap->timer_units;
6508 }
6509 
6510 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6511 	struct bnxt_coal *hw_coal,
6512 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6513 {
6514 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6515 	u32 cmpl_params = coal_cap->cmpl_params;
6516 	u16 val, tmr, max, flags = 0;
6517 
6518 	max = hw_coal->bufs_per_record * 128;
6519 	if (hw_coal->budget)
6520 		max = hw_coal->bufs_per_record * hw_coal->budget;
6521 	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6522 
6523 	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6524 	req->num_cmpl_aggr_int = cpu_to_le16(val);
6525 
6526 	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6527 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
6528 
6529 	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6530 		      coal_cap->num_cmpl_dma_aggr_during_int_max);
6531 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6532 
6533 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6534 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6535 	req->int_lat_tmr_max = cpu_to_le16(tmr);
6536 
6537 	/* min timer set to 1/2 of interrupt timer */
6538 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6539 		val = tmr / 2;
6540 		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6541 		req->int_lat_tmr_min = cpu_to_le16(val);
6542 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6543 	}
6544 
6545 	/* buf timer set to 1/4 of interrupt timer */
6546 	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6547 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6548 
6549 	if (cmpl_params &
6550 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6551 		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6552 		val = clamp_t(u16, tmr, 1,
6553 			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6554 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6555 		req->enables |=
6556 			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6557 	}
6558 
6559 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6560 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6561 	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6562 	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6563 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6564 	req->flags = cpu_to_le16(flags);
6565 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6566 }
6567 
6568 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6569 				   struct bnxt_coal *hw_coal)
6570 {
6571 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6572 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6573 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6574 	u32 nq_params = coal_cap->nq_params;
6575 	u16 tmr;
6576 	int rc;
6577 
6578 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6579 		return 0;
6580 
6581 	rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6582 	if (rc)
6583 		return rc;
6584 
6585 	req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6586 	req->flags =
6587 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6588 
6589 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6590 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6591 	req->int_lat_tmr_min = cpu_to_le16(tmr);
6592 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6593 	return hwrm_req_send(bp, req);
6594 }
6595 
6596 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6597 {
6598 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6599 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6600 	struct bnxt_coal coal;
6601 	int rc;
6602 
6603 	/* Tick values in micro seconds.
6604 	 * 1 coal_buf x bufs_per_record = 1 completion record.
6605 	 */
6606 	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6607 
6608 	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6609 	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6610 
6611 	if (!bnapi->rx_ring)
6612 		return -ENODEV;
6613 
6614 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6615 	if (rc)
6616 		return rc;
6617 
6618 	bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6619 
6620 	req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6621 
6622 	return hwrm_req_send(bp, req_rx);
6623 }
6624 
6625 int bnxt_hwrm_set_coal(struct bnxt *bp)
6626 {
6627 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6628 							   *req;
6629 	int i, rc;
6630 
6631 	rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6632 	if (rc)
6633 		return rc;
6634 
6635 	rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6636 	if (rc) {
6637 		hwrm_req_drop(bp, req_rx);
6638 		return rc;
6639 	}
6640 
6641 	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6642 	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6643 
6644 	hwrm_req_hold(bp, req_rx);
6645 	hwrm_req_hold(bp, req_tx);
6646 	for (i = 0; i < bp->cp_nr_rings; i++) {
6647 		struct bnxt_napi *bnapi = bp->bnapi[i];
6648 		struct bnxt_coal *hw_coal;
6649 		u16 ring_id;
6650 
6651 		req = req_rx;
6652 		if (!bnapi->rx_ring) {
6653 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6654 			req = req_tx;
6655 		} else {
6656 			ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6657 		}
6658 		req->ring_id = cpu_to_le16(ring_id);
6659 
6660 		rc = hwrm_req_send(bp, req);
6661 		if (rc)
6662 			break;
6663 
6664 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6665 			continue;
6666 
6667 		if (bnapi->rx_ring && bnapi->tx_ring) {
6668 			req = req_tx;
6669 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6670 			req->ring_id = cpu_to_le16(ring_id);
6671 			rc = hwrm_req_send(bp, req);
6672 			if (rc)
6673 				break;
6674 		}
6675 		if (bnapi->rx_ring)
6676 			hw_coal = &bp->rx_coal;
6677 		else
6678 			hw_coal = &bp->tx_coal;
6679 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6680 	}
6681 	hwrm_req_drop(bp, req_rx);
6682 	hwrm_req_drop(bp, req_tx);
6683 	return rc;
6684 }
6685 
6686 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6687 {
6688 	struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6689 	struct hwrm_stat_ctx_free_input *req;
6690 	int i;
6691 
6692 	if (!bp->bnapi)
6693 		return;
6694 
6695 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6696 		return;
6697 
6698 	if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6699 		return;
6700 	if (BNXT_FW_MAJ(bp) <= 20) {
6701 		if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6702 			hwrm_req_drop(bp, req);
6703 			return;
6704 		}
6705 		hwrm_req_hold(bp, req0);
6706 	}
6707 	hwrm_req_hold(bp, req);
6708 	for (i = 0; i < bp->cp_nr_rings; i++) {
6709 		struct bnxt_napi *bnapi = bp->bnapi[i];
6710 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6711 
6712 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6713 			req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6714 			if (req0) {
6715 				req0->stat_ctx_id = req->stat_ctx_id;
6716 				hwrm_req_send(bp, req0);
6717 			}
6718 			hwrm_req_send(bp, req);
6719 
6720 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6721 		}
6722 	}
6723 	hwrm_req_drop(bp, req);
6724 	if (req0)
6725 		hwrm_req_drop(bp, req0);
6726 }
6727 
6728 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6729 {
6730 	struct hwrm_stat_ctx_alloc_output *resp;
6731 	struct hwrm_stat_ctx_alloc_input *req;
6732 	int rc, i;
6733 
6734 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6735 		return 0;
6736 
6737 	rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6738 	if (rc)
6739 		return rc;
6740 
6741 	req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6742 	req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6743 
6744 	resp = hwrm_req_hold(bp, req);
6745 	for (i = 0; i < bp->cp_nr_rings; i++) {
6746 		struct bnxt_napi *bnapi = bp->bnapi[i];
6747 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6748 
6749 		req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6750 
6751 		rc = hwrm_req_send(bp, req);
6752 		if (rc)
6753 			break;
6754 
6755 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6756 
6757 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6758 	}
6759 	hwrm_req_drop(bp, req);
6760 	return rc;
6761 }
6762 
6763 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6764 {
6765 	struct hwrm_func_qcfg_output *resp;
6766 	struct hwrm_func_qcfg_input *req;
6767 	u32 min_db_offset = 0;
6768 	u16 flags;
6769 	int rc;
6770 
6771 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6772 	if (rc)
6773 		return rc;
6774 
6775 	req->fid = cpu_to_le16(0xffff);
6776 	resp = hwrm_req_hold(bp, req);
6777 	rc = hwrm_req_send(bp, req);
6778 	if (rc)
6779 		goto func_qcfg_exit;
6780 
6781 #ifdef CONFIG_BNXT_SRIOV
6782 	if (BNXT_VF(bp)) {
6783 		struct bnxt_vf_info *vf = &bp->vf;
6784 
6785 		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6786 	} else {
6787 		bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6788 	}
6789 #endif
6790 	flags = le16_to_cpu(resp->flags);
6791 	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6792 		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6793 		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6794 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6795 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6796 	}
6797 	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6798 		bp->flags |= BNXT_FLAG_MULTI_HOST;
6799 	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6800 		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6801 
6802 	switch (resp->port_partition_type) {
6803 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6804 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6805 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6806 		bp->port_partition_type = resp->port_partition_type;
6807 		break;
6808 	}
6809 	if (bp->hwrm_spec_code < 0x10707 ||
6810 	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6811 		bp->br_mode = BRIDGE_MODE_VEB;
6812 	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6813 		bp->br_mode = BRIDGE_MODE_VEPA;
6814 	else
6815 		bp->br_mode = BRIDGE_MODE_UNDEF;
6816 
6817 	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6818 	if (!bp->max_mtu)
6819 		bp->max_mtu = BNXT_MAX_MTU;
6820 
6821 	if (bp->db_size)
6822 		goto func_qcfg_exit;
6823 
6824 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
6825 		if (BNXT_PF(bp))
6826 			min_db_offset = DB_PF_OFFSET_P5;
6827 		else
6828 			min_db_offset = DB_VF_OFFSET_P5;
6829 	}
6830 	bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6831 				 1024);
6832 	if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6833 	    bp->db_size <= min_db_offset)
6834 		bp->db_size = pci_resource_len(bp->pdev, 2);
6835 
6836 func_qcfg_exit:
6837 	hwrm_req_drop(bp, req);
6838 	return rc;
6839 }
6840 
6841 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6842 			struct hwrm_func_backing_store_qcaps_output *resp)
6843 {
6844 	struct bnxt_mem_init *mem_init;
6845 	u16 init_mask;
6846 	u8 init_val;
6847 	u8 *offset;
6848 	int i;
6849 
6850 	init_val = resp->ctx_kind_initializer;
6851 	init_mask = le16_to_cpu(resp->ctx_init_mask);
6852 	offset = &resp->qp_init_offset;
6853 	mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6854 	for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6855 		mem_init->init_val = init_val;
6856 		mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6857 		if (!init_mask)
6858 			continue;
6859 		if (i == BNXT_CTX_MEM_INIT_STAT)
6860 			offset = &resp->stat_init_offset;
6861 		if (init_mask & (1 << i))
6862 			mem_init->offset = *offset * 4;
6863 		else
6864 			mem_init->init_val = 0;
6865 	}
6866 	ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6867 	ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6868 	ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6869 	ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6870 	ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6871 	ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6872 }
6873 
6874 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6875 {
6876 	struct hwrm_func_backing_store_qcaps_output *resp;
6877 	struct hwrm_func_backing_store_qcaps_input *req;
6878 	int rc;
6879 
6880 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6881 		return 0;
6882 
6883 	rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6884 	if (rc)
6885 		return rc;
6886 
6887 	resp = hwrm_req_hold(bp, req);
6888 	rc = hwrm_req_send_silent(bp, req);
6889 	if (!rc) {
6890 		struct bnxt_ctx_pg_info *ctx_pg;
6891 		struct bnxt_ctx_mem_info *ctx;
6892 		int i, tqm_rings;
6893 
6894 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6895 		if (!ctx) {
6896 			rc = -ENOMEM;
6897 			goto ctx_err;
6898 		}
6899 		ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6900 		ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6901 		ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6902 		ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6903 		ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6904 		ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6905 		ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6906 		ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6907 		ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6908 		ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6909 		ctx->vnic_max_vnic_entries =
6910 			le16_to_cpu(resp->vnic_max_vnic_entries);
6911 		ctx->vnic_max_ring_table_entries =
6912 			le16_to_cpu(resp->vnic_max_ring_table_entries);
6913 		ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6914 		ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6915 		ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6916 		ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6917 		ctx->tqm_min_entries_per_ring =
6918 			le32_to_cpu(resp->tqm_min_entries_per_ring);
6919 		ctx->tqm_max_entries_per_ring =
6920 			le32_to_cpu(resp->tqm_max_entries_per_ring);
6921 		ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6922 		if (!ctx->tqm_entries_multiple)
6923 			ctx->tqm_entries_multiple = 1;
6924 		ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6925 		ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6926 		ctx->mrav_num_entries_units =
6927 			le16_to_cpu(resp->mrav_num_entries_units);
6928 		ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6929 		ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6930 
6931 		bnxt_init_ctx_initializer(ctx, resp);
6932 
6933 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6934 		if (!ctx->tqm_fp_rings_count)
6935 			ctx->tqm_fp_rings_count = bp->max_q;
6936 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6937 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6938 
6939 		tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6940 		ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6941 		if (!ctx_pg) {
6942 			kfree(ctx);
6943 			rc = -ENOMEM;
6944 			goto ctx_err;
6945 		}
6946 		for (i = 0; i < tqm_rings; i++, ctx_pg++)
6947 			ctx->tqm_mem[i] = ctx_pg;
6948 		bp->ctx = ctx;
6949 	} else {
6950 		rc = 0;
6951 	}
6952 ctx_err:
6953 	hwrm_req_drop(bp, req);
6954 	return rc;
6955 }
6956 
6957 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6958 				  __le64 *pg_dir)
6959 {
6960 	if (!rmem->nr_pages)
6961 		return;
6962 
6963 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6964 	if (rmem->depth >= 1) {
6965 		if (rmem->depth == 2)
6966 			*pg_attr |= 2;
6967 		else
6968 			*pg_attr |= 1;
6969 		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6970 	} else {
6971 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6972 	}
6973 }
6974 
6975 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
6976 	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
6977 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
6978 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
6979 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
6980 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6981 
6982 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6983 {
6984 	struct hwrm_func_backing_store_cfg_input *req;
6985 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
6986 	struct bnxt_ctx_pg_info *ctx_pg;
6987 	void **__req = (void **)&req;
6988 	u32 req_len = sizeof(*req);
6989 	__le32 *num_entries;
6990 	__le64 *pg_dir;
6991 	u32 flags = 0;
6992 	u8 *pg_attr;
6993 	u32 ena;
6994 	int rc;
6995 	int i;
6996 
6997 	if (!ctx)
6998 		return 0;
6999 
7000 	if (req_len > bp->hwrm_max_ext_req_len)
7001 		req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7002 	rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7003 	if (rc)
7004 		return rc;
7005 
7006 	req->enables = cpu_to_le32(enables);
7007 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7008 		ctx_pg = &ctx->qp_mem;
7009 		req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7010 		req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7011 		req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7012 		req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7013 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7014 				      &req->qpc_pg_size_qpc_lvl,
7015 				      &req->qpc_page_dir);
7016 	}
7017 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7018 		ctx_pg = &ctx->srq_mem;
7019 		req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7020 		req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7021 		req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7022 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7023 				      &req->srq_pg_size_srq_lvl,
7024 				      &req->srq_page_dir);
7025 	}
7026 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7027 		ctx_pg = &ctx->cq_mem;
7028 		req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7029 		req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7030 		req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7031 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7032 				      &req->cq_pg_size_cq_lvl,
7033 				      &req->cq_page_dir);
7034 	}
7035 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7036 		ctx_pg = &ctx->vnic_mem;
7037 		req->vnic_num_vnic_entries =
7038 			cpu_to_le16(ctx->vnic_max_vnic_entries);
7039 		req->vnic_num_ring_table_entries =
7040 			cpu_to_le16(ctx->vnic_max_ring_table_entries);
7041 		req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7042 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7043 				      &req->vnic_pg_size_vnic_lvl,
7044 				      &req->vnic_page_dir);
7045 	}
7046 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7047 		ctx_pg = &ctx->stat_mem;
7048 		req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7049 		req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7050 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7051 				      &req->stat_pg_size_stat_lvl,
7052 				      &req->stat_page_dir);
7053 	}
7054 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7055 		ctx_pg = &ctx->mrav_mem;
7056 		req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7057 		if (ctx->mrav_num_entries_units)
7058 			flags |=
7059 			FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7060 		req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7061 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7062 				      &req->mrav_pg_size_mrav_lvl,
7063 				      &req->mrav_page_dir);
7064 	}
7065 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7066 		ctx_pg = &ctx->tim_mem;
7067 		req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7068 		req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7069 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7070 				      &req->tim_pg_size_tim_lvl,
7071 				      &req->tim_page_dir);
7072 	}
7073 	for (i = 0, num_entries = &req->tqm_sp_num_entries,
7074 	     pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7075 	     pg_dir = &req->tqm_sp_page_dir,
7076 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7077 	     i < BNXT_MAX_TQM_RINGS;
7078 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7079 		if (!(enables & ena))
7080 			continue;
7081 
7082 		req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7083 		ctx_pg = ctx->tqm_mem[i];
7084 		*num_entries = cpu_to_le32(ctx_pg->entries);
7085 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7086 	}
7087 	req->flags = cpu_to_le32(flags);
7088 	return hwrm_req_send(bp, req);
7089 }
7090 
7091 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7092 				  struct bnxt_ctx_pg_info *ctx_pg)
7093 {
7094 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7095 
7096 	rmem->page_size = BNXT_PAGE_SIZE;
7097 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
7098 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
7099 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7100 	if (rmem->depth >= 1)
7101 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7102 	return bnxt_alloc_ring(bp, rmem);
7103 }
7104 
7105 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7106 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7107 				  u8 depth, struct bnxt_mem_init *mem_init)
7108 {
7109 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7110 	int rc;
7111 
7112 	if (!mem_size)
7113 		return -EINVAL;
7114 
7115 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7116 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7117 		ctx_pg->nr_pages = 0;
7118 		return -EINVAL;
7119 	}
7120 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7121 		int nr_tbls, i;
7122 
7123 		rmem->depth = 2;
7124 		ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7125 					     GFP_KERNEL);
7126 		if (!ctx_pg->ctx_pg_tbl)
7127 			return -ENOMEM;
7128 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7129 		rmem->nr_pages = nr_tbls;
7130 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7131 		if (rc)
7132 			return rc;
7133 		for (i = 0; i < nr_tbls; i++) {
7134 			struct bnxt_ctx_pg_info *pg_tbl;
7135 
7136 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7137 			if (!pg_tbl)
7138 				return -ENOMEM;
7139 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7140 			rmem = &pg_tbl->ring_mem;
7141 			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7142 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7143 			rmem->depth = 1;
7144 			rmem->nr_pages = MAX_CTX_PAGES;
7145 			rmem->mem_init = mem_init;
7146 			if (i == (nr_tbls - 1)) {
7147 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7148 
7149 				if (rem)
7150 					rmem->nr_pages = rem;
7151 			}
7152 			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7153 			if (rc)
7154 				break;
7155 		}
7156 	} else {
7157 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7158 		if (rmem->nr_pages > 1 || depth)
7159 			rmem->depth = 1;
7160 		rmem->mem_init = mem_init;
7161 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7162 	}
7163 	return rc;
7164 }
7165 
7166 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7167 				  struct bnxt_ctx_pg_info *ctx_pg)
7168 {
7169 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7170 
7171 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7172 	    ctx_pg->ctx_pg_tbl) {
7173 		int i, nr_tbls = rmem->nr_pages;
7174 
7175 		for (i = 0; i < nr_tbls; i++) {
7176 			struct bnxt_ctx_pg_info *pg_tbl;
7177 			struct bnxt_ring_mem_info *rmem2;
7178 
7179 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
7180 			if (!pg_tbl)
7181 				continue;
7182 			rmem2 = &pg_tbl->ring_mem;
7183 			bnxt_free_ring(bp, rmem2);
7184 			ctx_pg->ctx_pg_arr[i] = NULL;
7185 			kfree(pg_tbl);
7186 			ctx_pg->ctx_pg_tbl[i] = NULL;
7187 		}
7188 		kfree(ctx_pg->ctx_pg_tbl);
7189 		ctx_pg->ctx_pg_tbl = NULL;
7190 	}
7191 	bnxt_free_ring(bp, rmem);
7192 	ctx_pg->nr_pages = 0;
7193 }
7194 
7195 static void bnxt_free_ctx_mem(struct bnxt *bp)
7196 {
7197 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
7198 	int i;
7199 
7200 	if (!ctx)
7201 		return;
7202 
7203 	if (ctx->tqm_mem[0]) {
7204 		for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7205 			bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7206 		kfree(ctx->tqm_mem[0]);
7207 		ctx->tqm_mem[0] = NULL;
7208 	}
7209 
7210 	bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7211 	bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7212 	bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7213 	bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7214 	bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7215 	bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7216 	bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7217 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7218 }
7219 
7220 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7221 {
7222 	struct bnxt_ctx_pg_info *ctx_pg;
7223 	struct bnxt_ctx_mem_info *ctx;
7224 	struct bnxt_mem_init *init;
7225 	u32 mem_size, ena, entries;
7226 	u32 entries_sp, min;
7227 	u32 num_mr, num_ah;
7228 	u32 extra_srqs = 0;
7229 	u32 extra_qps = 0;
7230 	u8 pg_lvl = 1;
7231 	int i, rc;
7232 
7233 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7234 	if (rc) {
7235 		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7236 			   rc);
7237 		return rc;
7238 	}
7239 	ctx = bp->ctx;
7240 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7241 		return 0;
7242 
7243 	if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7244 		pg_lvl = 2;
7245 		extra_qps = 65536;
7246 		extra_srqs = 8192;
7247 	}
7248 
7249 	ctx_pg = &ctx->qp_mem;
7250 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7251 			  extra_qps;
7252 	if (ctx->qp_entry_size) {
7253 		mem_size = ctx->qp_entry_size * ctx_pg->entries;
7254 		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7255 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7256 		if (rc)
7257 			return rc;
7258 	}
7259 
7260 	ctx_pg = &ctx->srq_mem;
7261 	ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7262 	if (ctx->srq_entry_size) {
7263 		mem_size = ctx->srq_entry_size * ctx_pg->entries;
7264 		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7265 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7266 		if (rc)
7267 			return rc;
7268 	}
7269 
7270 	ctx_pg = &ctx->cq_mem;
7271 	ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7272 	if (ctx->cq_entry_size) {
7273 		mem_size = ctx->cq_entry_size * ctx_pg->entries;
7274 		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7275 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7276 		if (rc)
7277 			return rc;
7278 	}
7279 
7280 	ctx_pg = &ctx->vnic_mem;
7281 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
7282 			  ctx->vnic_max_ring_table_entries;
7283 	if (ctx->vnic_entry_size) {
7284 		mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7285 		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7286 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7287 		if (rc)
7288 			return rc;
7289 	}
7290 
7291 	ctx_pg = &ctx->stat_mem;
7292 	ctx_pg->entries = ctx->stat_max_entries;
7293 	if (ctx->stat_entry_size) {
7294 		mem_size = ctx->stat_entry_size * ctx_pg->entries;
7295 		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7296 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7297 		if (rc)
7298 			return rc;
7299 	}
7300 
7301 	ena = 0;
7302 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7303 		goto skip_rdma;
7304 
7305 	ctx_pg = &ctx->mrav_mem;
7306 	/* 128K extra is needed to accommodate static AH context
7307 	 * allocation by f/w.
7308 	 */
7309 	num_mr = 1024 * 256;
7310 	num_ah = 1024 * 128;
7311 	ctx_pg->entries = num_mr + num_ah;
7312 	if (ctx->mrav_entry_size) {
7313 		mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7314 		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7315 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7316 		if (rc)
7317 			return rc;
7318 	}
7319 	ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7320 	if (ctx->mrav_num_entries_units)
7321 		ctx_pg->entries =
7322 			((num_mr / ctx->mrav_num_entries_units) << 16) |
7323 			 (num_ah / ctx->mrav_num_entries_units);
7324 
7325 	ctx_pg = &ctx->tim_mem;
7326 	ctx_pg->entries = ctx->qp_mem.entries;
7327 	if (ctx->tim_entry_size) {
7328 		mem_size = ctx->tim_entry_size * ctx_pg->entries;
7329 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7330 		if (rc)
7331 			return rc;
7332 	}
7333 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7334 
7335 skip_rdma:
7336 	min = ctx->tqm_min_entries_per_ring;
7337 	entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7338 		     2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7339 	entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7340 	entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7341 	entries = roundup(entries, ctx->tqm_entries_multiple);
7342 	entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7343 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7344 		ctx_pg = ctx->tqm_mem[i];
7345 		ctx_pg->entries = i ? entries : entries_sp;
7346 		if (ctx->tqm_entry_size) {
7347 			mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7348 			rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7349 						    NULL);
7350 			if (rc)
7351 				return rc;
7352 		}
7353 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7354 	}
7355 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7356 	rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7357 	if (rc) {
7358 		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7359 			   rc);
7360 		return rc;
7361 	}
7362 	ctx->flags |= BNXT_CTX_FLAG_INITED;
7363 	return 0;
7364 }
7365 
7366 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7367 {
7368 	struct hwrm_func_resource_qcaps_output *resp;
7369 	struct hwrm_func_resource_qcaps_input *req;
7370 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7371 	int rc;
7372 
7373 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7374 	if (rc)
7375 		return rc;
7376 
7377 	req->fid = cpu_to_le16(0xffff);
7378 	resp = hwrm_req_hold(bp, req);
7379 	rc = hwrm_req_send_silent(bp, req);
7380 	if (rc)
7381 		goto hwrm_func_resc_qcaps_exit;
7382 
7383 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7384 	if (!all)
7385 		goto hwrm_func_resc_qcaps_exit;
7386 
7387 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7388 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7389 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7390 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7391 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7392 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7393 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7394 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7395 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7396 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7397 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7398 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7399 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7400 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7401 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7402 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7403 
7404 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
7405 		u16 max_msix = le16_to_cpu(resp->max_msix);
7406 
7407 		hw_resc->max_nqs = max_msix;
7408 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7409 	}
7410 
7411 	if (BNXT_PF(bp)) {
7412 		struct bnxt_pf_info *pf = &bp->pf;
7413 
7414 		pf->vf_resv_strategy =
7415 			le16_to_cpu(resp->vf_reservation_strategy);
7416 		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7417 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7418 	}
7419 hwrm_func_resc_qcaps_exit:
7420 	hwrm_req_drop(bp, req);
7421 	return rc;
7422 }
7423 
7424 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7425 {
7426 	struct hwrm_port_mac_ptp_qcfg_output *resp;
7427 	struct hwrm_port_mac_ptp_qcfg_input *req;
7428 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7429 	u8 flags;
7430 	int rc;
7431 
7432 	if (bp->hwrm_spec_code < 0x10801) {
7433 		rc = -ENODEV;
7434 		goto no_ptp;
7435 	}
7436 
7437 	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7438 	if (rc)
7439 		goto no_ptp;
7440 
7441 	req->port_id = cpu_to_le16(bp->pf.port_id);
7442 	resp = hwrm_req_hold(bp, req);
7443 	rc = hwrm_req_send(bp, req);
7444 	if (rc)
7445 		goto exit;
7446 
7447 	flags = resp->flags;
7448 	if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7449 		rc = -ENODEV;
7450 		goto exit;
7451 	}
7452 	if (!ptp) {
7453 		ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7454 		if (!ptp) {
7455 			rc = -ENOMEM;
7456 			goto exit;
7457 		}
7458 		ptp->bp = bp;
7459 		bp->ptp_cfg = ptp;
7460 	}
7461 	if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7462 		ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7463 		ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7464 	} else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7465 		ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7466 		ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7467 	} else {
7468 		rc = -ENODEV;
7469 		goto exit;
7470 	}
7471 	rc = bnxt_ptp_init(bp);
7472 	if (rc)
7473 		netdev_warn(bp->dev, "PTP initialization failed.\n");
7474 exit:
7475 	hwrm_req_drop(bp, req);
7476 	if (!rc)
7477 		return 0;
7478 
7479 no_ptp:
7480 	bnxt_ptp_clear(bp);
7481 	kfree(ptp);
7482 	bp->ptp_cfg = NULL;
7483 	return rc;
7484 }
7485 
7486 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7487 {
7488 	struct hwrm_func_qcaps_output *resp;
7489 	struct hwrm_func_qcaps_input *req;
7490 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7491 	u32 flags, flags_ext;
7492 	int rc;
7493 
7494 	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7495 	if (rc)
7496 		return rc;
7497 
7498 	req->fid = cpu_to_le16(0xffff);
7499 	resp = hwrm_req_hold(bp, req);
7500 	rc = hwrm_req_send(bp, req);
7501 	if (rc)
7502 		goto hwrm_func_qcaps_exit;
7503 
7504 	flags = le32_to_cpu(resp->flags);
7505 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7506 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7507 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7508 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7509 	if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7510 		bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7511 	if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7512 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7513 	if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7514 		bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7515 	if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7516 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7517 	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7518 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7519 	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7520 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7521 
7522 	flags_ext = le32_to_cpu(resp->flags_ext);
7523 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7524 		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7525 	if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7526 		bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7527 
7528 	bp->tx_push_thresh = 0;
7529 	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7530 	    BNXT_FW_MAJ(bp) > 217)
7531 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7532 
7533 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7534 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7535 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7536 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7537 	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7538 	if (!hw_resc->max_hw_ring_grps)
7539 		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7540 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7541 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7542 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7543 
7544 	if (BNXT_PF(bp)) {
7545 		struct bnxt_pf_info *pf = &bp->pf;
7546 
7547 		pf->fw_fid = le16_to_cpu(resp->fid);
7548 		pf->port_id = le16_to_cpu(resp->port_id);
7549 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7550 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7551 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
7552 		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7553 		pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7554 		pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7555 		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7556 		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7557 		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7558 		bp->flags &= ~BNXT_FLAG_WOL_CAP;
7559 		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7560 			bp->flags |= BNXT_FLAG_WOL_CAP;
7561 		if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7562 			__bnxt_hwrm_ptp_qcfg(bp);
7563 		} else {
7564 			bnxt_ptp_clear(bp);
7565 			kfree(bp->ptp_cfg);
7566 			bp->ptp_cfg = NULL;
7567 		}
7568 	} else {
7569 #ifdef CONFIG_BNXT_SRIOV
7570 		struct bnxt_vf_info *vf = &bp->vf;
7571 
7572 		vf->fw_fid = le16_to_cpu(resp->fid);
7573 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7574 #endif
7575 	}
7576 
7577 hwrm_func_qcaps_exit:
7578 	hwrm_req_drop(bp, req);
7579 	return rc;
7580 }
7581 
7582 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7583 
7584 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7585 {
7586 	int rc;
7587 
7588 	rc = __bnxt_hwrm_func_qcaps(bp);
7589 	if (rc)
7590 		return rc;
7591 	rc = bnxt_hwrm_queue_qportcfg(bp);
7592 	if (rc) {
7593 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7594 		return rc;
7595 	}
7596 	if (bp->hwrm_spec_code >= 0x10803) {
7597 		rc = bnxt_alloc_ctx_mem(bp);
7598 		if (rc)
7599 			return rc;
7600 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7601 		if (!rc)
7602 			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7603 	}
7604 	return 0;
7605 }
7606 
7607 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7608 {
7609 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7610 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7611 	u32 flags;
7612 	int rc;
7613 
7614 	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7615 		return 0;
7616 
7617 	rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7618 	if (rc)
7619 		return rc;
7620 
7621 	resp = hwrm_req_hold(bp, req);
7622 	rc = hwrm_req_send(bp, req);
7623 	if (rc)
7624 		goto hwrm_cfa_adv_qcaps_exit;
7625 
7626 	flags = le32_to_cpu(resp->flags);
7627 	if (flags &
7628 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7629 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7630 
7631 hwrm_cfa_adv_qcaps_exit:
7632 	hwrm_req_drop(bp, req);
7633 	return rc;
7634 }
7635 
7636 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7637 {
7638 	if (bp->fw_health)
7639 		return 0;
7640 
7641 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7642 	if (!bp->fw_health)
7643 		return -ENOMEM;
7644 
7645 	return 0;
7646 }
7647 
7648 static int bnxt_alloc_fw_health(struct bnxt *bp)
7649 {
7650 	int rc;
7651 
7652 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7653 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7654 		return 0;
7655 
7656 	rc = __bnxt_alloc_fw_health(bp);
7657 	if (rc) {
7658 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7659 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7660 		return rc;
7661 	}
7662 
7663 	return 0;
7664 }
7665 
7666 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7667 {
7668 	writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7669 					 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7670 					 BNXT_FW_HEALTH_WIN_MAP_OFF);
7671 }
7672 
7673 bool bnxt_is_fw_healthy(struct bnxt *bp)
7674 {
7675 	if (bp->fw_health && bp->fw_health->status_reliable) {
7676 		u32 fw_status;
7677 
7678 		fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7679 		if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7680 			return false;
7681 	}
7682 
7683 	return true;
7684 }
7685 
7686 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7687 {
7688 	struct bnxt_fw_health *fw_health = bp->fw_health;
7689 	u32 reg_type;
7690 
7691 	if (!fw_health || !fw_health->status_reliable)
7692 		return;
7693 
7694 	reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7695 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7696 		fw_health->status_reliable = false;
7697 }
7698 
7699 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7700 {
7701 	void __iomem *hs;
7702 	u32 status_loc;
7703 	u32 reg_type;
7704 	u32 sig;
7705 
7706 	if (bp->fw_health)
7707 		bp->fw_health->status_reliable = false;
7708 
7709 	__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7710 	hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7711 
7712 	sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7713 	if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7714 		if (!bp->chip_num) {
7715 			__bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7716 			bp->chip_num = readl(bp->bar0 +
7717 					     BNXT_FW_HEALTH_WIN_BASE +
7718 					     BNXT_GRC_REG_CHIP_NUM);
7719 		}
7720 		if (!BNXT_CHIP_P5(bp))
7721 			return;
7722 
7723 		status_loc = BNXT_GRC_REG_STATUS_P5 |
7724 			     BNXT_FW_HEALTH_REG_TYPE_BAR0;
7725 	} else {
7726 		status_loc = readl(hs + offsetof(struct hcomm_status,
7727 						 fw_status_loc));
7728 	}
7729 
7730 	if (__bnxt_alloc_fw_health(bp)) {
7731 		netdev_warn(bp->dev, "no memory for firmware status checks\n");
7732 		return;
7733 	}
7734 
7735 	bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7736 	reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7737 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7738 		__bnxt_map_fw_health_reg(bp, status_loc);
7739 		bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7740 			BNXT_FW_HEALTH_WIN_OFF(status_loc);
7741 	}
7742 
7743 	bp->fw_health->status_reliable = true;
7744 }
7745 
7746 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7747 {
7748 	struct bnxt_fw_health *fw_health = bp->fw_health;
7749 	u32 reg_base = 0xffffffff;
7750 	int i;
7751 
7752 	bp->fw_health->status_reliable = false;
7753 	/* Only pre-map the monitoring GRC registers using window 3 */
7754 	for (i = 0; i < 4; i++) {
7755 		u32 reg = fw_health->regs[i];
7756 
7757 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7758 			continue;
7759 		if (reg_base == 0xffffffff)
7760 			reg_base = reg & BNXT_GRC_BASE_MASK;
7761 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7762 			return -ERANGE;
7763 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7764 	}
7765 	bp->fw_health->status_reliable = true;
7766 	if (reg_base == 0xffffffff)
7767 		return 0;
7768 
7769 	__bnxt_map_fw_health_reg(bp, reg_base);
7770 	return 0;
7771 }
7772 
7773 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7774 {
7775 	struct bnxt_fw_health *fw_health = bp->fw_health;
7776 	struct hwrm_error_recovery_qcfg_output *resp;
7777 	struct hwrm_error_recovery_qcfg_input *req;
7778 	int rc, i;
7779 
7780 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7781 		return 0;
7782 
7783 	rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7784 	if (rc)
7785 		return rc;
7786 
7787 	resp = hwrm_req_hold(bp, req);
7788 	rc = hwrm_req_send(bp, req);
7789 	if (rc)
7790 		goto err_recovery_out;
7791 	fw_health->flags = le32_to_cpu(resp->flags);
7792 	if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7793 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7794 		rc = -EINVAL;
7795 		goto err_recovery_out;
7796 	}
7797 	fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7798 	fw_health->master_func_wait_dsecs =
7799 		le32_to_cpu(resp->master_func_wait_period);
7800 	fw_health->normal_func_wait_dsecs =
7801 		le32_to_cpu(resp->normal_func_wait_period);
7802 	fw_health->post_reset_wait_dsecs =
7803 		le32_to_cpu(resp->master_func_wait_period_after_reset);
7804 	fw_health->post_reset_max_wait_dsecs =
7805 		le32_to_cpu(resp->max_bailout_time_after_reset);
7806 	fw_health->regs[BNXT_FW_HEALTH_REG] =
7807 		le32_to_cpu(resp->fw_health_status_reg);
7808 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7809 		le32_to_cpu(resp->fw_heartbeat_reg);
7810 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7811 		le32_to_cpu(resp->fw_reset_cnt_reg);
7812 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7813 		le32_to_cpu(resp->reset_inprogress_reg);
7814 	fw_health->fw_reset_inprog_reg_mask =
7815 		le32_to_cpu(resp->reset_inprogress_reg_mask);
7816 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7817 	if (fw_health->fw_reset_seq_cnt >= 16) {
7818 		rc = -EINVAL;
7819 		goto err_recovery_out;
7820 	}
7821 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7822 		fw_health->fw_reset_seq_regs[i] =
7823 			le32_to_cpu(resp->reset_reg[i]);
7824 		fw_health->fw_reset_seq_vals[i] =
7825 			le32_to_cpu(resp->reset_reg_val[i]);
7826 		fw_health->fw_reset_seq_delay_msec[i] =
7827 			resp->delay_after_reset[i];
7828 	}
7829 err_recovery_out:
7830 	hwrm_req_drop(bp, req);
7831 	if (!rc)
7832 		rc = bnxt_map_fw_health_regs(bp);
7833 	if (rc)
7834 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7835 	return rc;
7836 }
7837 
7838 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7839 {
7840 	struct hwrm_func_reset_input *req;
7841 	int rc;
7842 
7843 	rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7844 	if (rc)
7845 		return rc;
7846 
7847 	req->enables = 0;
7848 	hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7849 	return hwrm_req_send(bp, req);
7850 }
7851 
7852 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7853 {
7854 	struct hwrm_nvm_get_dev_info_output nvm_info;
7855 
7856 	if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7857 		snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7858 			 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7859 			 nvm_info.nvm_cfg_ver_upd);
7860 }
7861 
7862 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7863 {
7864 	struct hwrm_queue_qportcfg_output *resp;
7865 	struct hwrm_queue_qportcfg_input *req;
7866 	u8 i, j, *qptr;
7867 	bool no_rdma;
7868 	int rc = 0;
7869 
7870 	rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7871 	if (rc)
7872 		return rc;
7873 
7874 	resp = hwrm_req_hold(bp, req);
7875 	rc = hwrm_req_send(bp, req);
7876 	if (rc)
7877 		goto qportcfg_exit;
7878 
7879 	if (!resp->max_configurable_queues) {
7880 		rc = -EINVAL;
7881 		goto qportcfg_exit;
7882 	}
7883 	bp->max_tc = resp->max_configurable_queues;
7884 	bp->max_lltc = resp->max_configurable_lossless_queues;
7885 	if (bp->max_tc > BNXT_MAX_QUEUE)
7886 		bp->max_tc = BNXT_MAX_QUEUE;
7887 
7888 	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7889 	qptr = &resp->queue_id0;
7890 	for (i = 0, j = 0; i < bp->max_tc; i++) {
7891 		bp->q_info[j].queue_id = *qptr;
7892 		bp->q_ids[i] = *qptr++;
7893 		bp->q_info[j].queue_profile = *qptr++;
7894 		bp->tc_to_qidx[j] = j;
7895 		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7896 		    (no_rdma && BNXT_PF(bp)))
7897 			j++;
7898 	}
7899 	bp->max_q = bp->max_tc;
7900 	bp->max_tc = max_t(u8, j, 1);
7901 
7902 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7903 		bp->max_tc = 1;
7904 
7905 	if (bp->max_lltc > bp->max_tc)
7906 		bp->max_lltc = bp->max_tc;
7907 
7908 qportcfg_exit:
7909 	hwrm_req_drop(bp, req);
7910 	return rc;
7911 }
7912 
7913 static int bnxt_hwrm_poll(struct bnxt *bp)
7914 {
7915 	struct hwrm_ver_get_input *req;
7916 	int rc;
7917 
7918 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7919 	if (rc)
7920 		return rc;
7921 
7922 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7923 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
7924 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7925 
7926 	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7927 	rc = hwrm_req_send(bp, req);
7928 	return rc;
7929 }
7930 
7931 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7932 {
7933 	struct hwrm_ver_get_output *resp;
7934 	struct hwrm_ver_get_input *req;
7935 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
7936 	u32 dev_caps_cfg, hwrm_ver;
7937 	int rc, len;
7938 
7939 	rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7940 	if (rc)
7941 		return rc;
7942 
7943 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
7944 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7945 	req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7946 	req->hwrm_intf_min = HWRM_VERSION_MINOR;
7947 	req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7948 
7949 	resp = hwrm_req_hold(bp, req);
7950 	rc = hwrm_req_send(bp, req);
7951 	if (rc)
7952 		goto hwrm_ver_get_exit;
7953 
7954 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7955 
7956 	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7957 			     resp->hwrm_intf_min_8b << 8 |
7958 			     resp->hwrm_intf_upd_8b;
7959 	if (resp->hwrm_intf_maj_8b < 1) {
7960 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7961 			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7962 			    resp->hwrm_intf_upd_8b);
7963 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7964 	}
7965 
7966 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7967 			HWRM_VERSION_UPDATE;
7968 
7969 	if (bp->hwrm_spec_code > hwrm_ver)
7970 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7971 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7972 			 HWRM_VERSION_UPDATE);
7973 	else
7974 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7975 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7976 			 resp->hwrm_intf_upd_8b);
7977 
7978 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7979 	if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7980 		fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7981 		fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7982 		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7983 		len = FW_VER_STR_LEN;
7984 	} else {
7985 		fw_maj = resp->hwrm_fw_maj_8b;
7986 		fw_min = resp->hwrm_fw_min_8b;
7987 		fw_bld = resp->hwrm_fw_bld_8b;
7988 		fw_rsv = resp->hwrm_fw_rsvd_8b;
7989 		len = BC_HWRM_STR_LEN;
7990 	}
7991 	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7992 	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7993 		 fw_rsv);
7994 
7995 	if (strlen(resp->active_pkg_name)) {
7996 		int fw_ver_len = strlen(bp->fw_ver_str);
7997 
7998 		snprintf(bp->fw_ver_str + fw_ver_len,
7999 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8000 			 resp->active_pkg_name);
8001 		bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8002 	}
8003 
8004 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8005 	if (!bp->hwrm_cmd_timeout)
8006 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8007 
8008 	if (resp->hwrm_intf_maj_8b >= 1) {
8009 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8010 		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8011 	}
8012 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8013 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8014 
8015 	bp->chip_num = le16_to_cpu(resp->chip_num);
8016 	bp->chip_rev = resp->chip_rev;
8017 	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8018 	    !resp->chip_metal)
8019 		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8020 
8021 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8022 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8023 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8024 		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8025 
8026 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8027 		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8028 
8029 	if (dev_caps_cfg &
8030 	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8031 		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8032 
8033 	if (dev_caps_cfg &
8034 	    VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8035 		bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8036 
8037 	if (dev_caps_cfg &
8038 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8039 		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8040 
8041 hwrm_ver_get_exit:
8042 	hwrm_req_drop(bp, req);
8043 	return rc;
8044 }
8045 
8046 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8047 {
8048 	struct hwrm_fw_set_time_input *req;
8049 	struct tm tm;
8050 	time64_t now = ktime_get_real_seconds();
8051 	int rc;
8052 
8053 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8054 	    bp->hwrm_spec_code < 0x10400)
8055 		return -EOPNOTSUPP;
8056 
8057 	time64_to_tm(now, 0, &tm);
8058 	rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8059 	if (rc)
8060 		return rc;
8061 
8062 	req->year = cpu_to_le16(1900 + tm.tm_year);
8063 	req->month = 1 + tm.tm_mon;
8064 	req->day = tm.tm_mday;
8065 	req->hour = tm.tm_hour;
8066 	req->minute = tm.tm_min;
8067 	req->second = tm.tm_sec;
8068 	return hwrm_req_send(bp, req);
8069 }
8070 
8071 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8072 {
8073 	u64 sw_tmp;
8074 
8075 	hw &= mask;
8076 	sw_tmp = (*sw & ~mask) | hw;
8077 	if (hw < (*sw & mask))
8078 		sw_tmp += mask + 1;
8079 	WRITE_ONCE(*sw, sw_tmp);
8080 }
8081 
8082 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8083 				    int count, bool ignore_zero)
8084 {
8085 	int i;
8086 
8087 	for (i = 0; i < count; i++) {
8088 		u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8089 
8090 		if (ignore_zero && !hw)
8091 			continue;
8092 
8093 		if (masks[i] == -1ULL)
8094 			sw_stats[i] = hw;
8095 		else
8096 			bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8097 	}
8098 }
8099 
8100 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8101 {
8102 	if (!stats->hw_stats)
8103 		return;
8104 
8105 	__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8106 				stats->hw_masks, stats->len / 8, false);
8107 }
8108 
8109 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8110 {
8111 	struct bnxt_stats_mem *ring0_stats;
8112 	bool ignore_zero = false;
8113 	int i;
8114 
8115 	/* Chip bug.  Counter intermittently becomes 0. */
8116 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8117 		ignore_zero = true;
8118 
8119 	for (i = 0; i < bp->cp_nr_rings; i++) {
8120 		struct bnxt_napi *bnapi = bp->bnapi[i];
8121 		struct bnxt_cp_ring_info *cpr;
8122 		struct bnxt_stats_mem *stats;
8123 
8124 		cpr = &bnapi->cp_ring;
8125 		stats = &cpr->stats;
8126 		if (!i)
8127 			ring0_stats = stats;
8128 		__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8129 					ring0_stats->hw_masks,
8130 					ring0_stats->len / 8, ignore_zero);
8131 	}
8132 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
8133 		struct bnxt_stats_mem *stats = &bp->port_stats;
8134 		__le64 *hw_stats = stats->hw_stats;
8135 		u64 *sw_stats = stats->sw_stats;
8136 		u64 *masks = stats->hw_masks;
8137 		int cnt;
8138 
8139 		cnt = sizeof(struct rx_port_stats) / 8;
8140 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8141 
8142 		hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8143 		sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8144 		masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8145 		cnt = sizeof(struct tx_port_stats) / 8;
8146 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8147 	}
8148 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8149 		bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8150 		bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8151 	}
8152 }
8153 
8154 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8155 {
8156 	struct hwrm_port_qstats_input *req;
8157 	struct bnxt_pf_info *pf = &bp->pf;
8158 	int rc;
8159 
8160 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8161 		return 0;
8162 
8163 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8164 		return -EOPNOTSUPP;
8165 
8166 	rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8167 	if (rc)
8168 		return rc;
8169 
8170 	req->flags = flags;
8171 	req->port_id = cpu_to_le16(pf->port_id);
8172 	req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8173 					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
8174 	req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8175 	return hwrm_req_send(bp, req);
8176 }
8177 
8178 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8179 {
8180 	struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8181 	struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8182 	struct hwrm_port_qstats_ext_output *resp_qs;
8183 	struct hwrm_port_qstats_ext_input *req_qs;
8184 	struct bnxt_pf_info *pf = &bp->pf;
8185 	u32 tx_stat_size;
8186 	int rc;
8187 
8188 	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8189 		return 0;
8190 
8191 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8192 		return -EOPNOTSUPP;
8193 
8194 	rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8195 	if (rc)
8196 		return rc;
8197 
8198 	req_qs->flags = flags;
8199 	req_qs->port_id = cpu_to_le16(pf->port_id);
8200 	req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8201 	req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8202 	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8203 		       sizeof(struct tx_port_stats_ext) : 0;
8204 	req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8205 	req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8206 	resp_qs = hwrm_req_hold(bp, req_qs);
8207 	rc = hwrm_req_send(bp, req_qs);
8208 	if (!rc) {
8209 		bp->fw_rx_stats_ext_size =
8210 			le16_to_cpu(resp_qs->rx_stat_size) / 8;
8211 		bp->fw_tx_stats_ext_size = tx_stat_size ?
8212 			le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8213 	} else {
8214 		bp->fw_rx_stats_ext_size = 0;
8215 		bp->fw_tx_stats_ext_size = 0;
8216 	}
8217 	hwrm_req_drop(bp, req_qs);
8218 
8219 	if (flags)
8220 		return rc;
8221 
8222 	if (bp->fw_tx_stats_ext_size <=
8223 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8224 		bp->pri2cos_valid = 0;
8225 		return rc;
8226 	}
8227 
8228 	rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8229 	if (rc)
8230 		return rc;
8231 
8232 	req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8233 
8234 	resp_qc = hwrm_req_hold(bp, req_qc);
8235 	rc = hwrm_req_send(bp, req_qc);
8236 	if (!rc) {
8237 		u8 *pri2cos;
8238 		int i, j;
8239 
8240 		pri2cos = &resp_qc->pri0_cos_queue_id;
8241 		for (i = 0; i < 8; i++) {
8242 			u8 queue_id = pri2cos[i];
8243 			u8 queue_idx;
8244 
8245 			/* Per port queue IDs start from 0, 10, 20, etc */
8246 			queue_idx = queue_id % 10;
8247 			if (queue_idx > BNXT_MAX_QUEUE) {
8248 				bp->pri2cos_valid = false;
8249 				hwrm_req_drop(bp, req_qc);
8250 				return rc;
8251 			}
8252 			for (j = 0; j < bp->max_q; j++) {
8253 				if (bp->q_ids[j] == queue_id)
8254 					bp->pri2cos_idx[i] = queue_idx;
8255 			}
8256 		}
8257 		bp->pri2cos_valid = true;
8258 	}
8259 	hwrm_req_drop(bp, req_qc);
8260 
8261 	return rc;
8262 }
8263 
8264 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8265 {
8266 	bnxt_hwrm_tunnel_dst_port_free(bp,
8267 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8268 	bnxt_hwrm_tunnel_dst_port_free(bp,
8269 		TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8270 }
8271 
8272 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8273 {
8274 	int rc, i;
8275 	u32 tpa_flags = 0;
8276 
8277 	if (set_tpa)
8278 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
8279 	else if (BNXT_NO_FW_ACCESS(bp))
8280 		return 0;
8281 	for (i = 0; i < bp->nr_vnics; i++) {
8282 		rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8283 		if (rc) {
8284 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8285 				   i, rc);
8286 			return rc;
8287 		}
8288 	}
8289 	return 0;
8290 }
8291 
8292 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8293 {
8294 	int i;
8295 
8296 	for (i = 0; i < bp->nr_vnics; i++)
8297 		bnxt_hwrm_vnic_set_rss(bp, i, false);
8298 }
8299 
8300 static void bnxt_clear_vnic(struct bnxt *bp)
8301 {
8302 	if (!bp->vnic_info)
8303 		return;
8304 
8305 	bnxt_hwrm_clear_vnic_filter(bp);
8306 	if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8307 		/* clear all RSS setting before free vnic ctx */
8308 		bnxt_hwrm_clear_vnic_rss(bp);
8309 		bnxt_hwrm_vnic_ctx_free(bp);
8310 	}
8311 	/* before free the vnic, undo the vnic tpa settings */
8312 	if (bp->flags & BNXT_FLAG_TPA)
8313 		bnxt_set_tpa(bp, false);
8314 	bnxt_hwrm_vnic_free(bp);
8315 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8316 		bnxt_hwrm_vnic_ctx_free(bp);
8317 }
8318 
8319 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8320 				    bool irq_re_init)
8321 {
8322 	bnxt_clear_vnic(bp);
8323 	bnxt_hwrm_ring_free(bp, close_path);
8324 	bnxt_hwrm_ring_grp_free(bp);
8325 	if (irq_re_init) {
8326 		bnxt_hwrm_stat_ctx_free(bp);
8327 		bnxt_hwrm_free_tunnel_ports(bp);
8328 	}
8329 }
8330 
8331 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8332 {
8333 	struct hwrm_func_cfg_input *req;
8334 	u8 evb_mode;
8335 	int rc;
8336 
8337 	if (br_mode == BRIDGE_MODE_VEB)
8338 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8339 	else if (br_mode == BRIDGE_MODE_VEPA)
8340 		evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8341 	else
8342 		return -EINVAL;
8343 
8344 	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8345 	if (rc)
8346 		return rc;
8347 
8348 	req->fid = cpu_to_le16(0xffff);
8349 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8350 	req->evb_mode = evb_mode;
8351 	return hwrm_req_send(bp, req);
8352 }
8353 
8354 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8355 {
8356 	struct hwrm_func_cfg_input *req;
8357 	int rc;
8358 
8359 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8360 		return 0;
8361 
8362 	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8363 	if (rc)
8364 		return rc;
8365 
8366 	req->fid = cpu_to_le16(0xffff);
8367 	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8368 	req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8369 	if (size == 128)
8370 		req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8371 
8372 	return hwrm_req_send(bp, req);
8373 }
8374 
8375 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8376 {
8377 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8378 	int rc;
8379 
8380 	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8381 		goto skip_rss_ctx;
8382 
8383 	/* allocate context for vnic */
8384 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8385 	if (rc) {
8386 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8387 			   vnic_id, rc);
8388 		goto vnic_setup_err;
8389 	}
8390 	bp->rsscos_nr_ctxs++;
8391 
8392 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8393 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8394 		if (rc) {
8395 			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8396 				   vnic_id, rc);
8397 			goto vnic_setup_err;
8398 		}
8399 		bp->rsscos_nr_ctxs++;
8400 	}
8401 
8402 skip_rss_ctx:
8403 	/* configure default vnic, ring grp */
8404 	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8405 	if (rc) {
8406 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8407 			   vnic_id, rc);
8408 		goto vnic_setup_err;
8409 	}
8410 
8411 	/* Enable RSS hashing on vnic */
8412 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8413 	if (rc) {
8414 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8415 			   vnic_id, rc);
8416 		goto vnic_setup_err;
8417 	}
8418 
8419 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8420 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8421 		if (rc) {
8422 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8423 				   vnic_id, rc);
8424 		}
8425 	}
8426 
8427 vnic_setup_err:
8428 	return rc;
8429 }
8430 
8431 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8432 {
8433 	int rc, i, nr_ctxs;
8434 
8435 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8436 	for (i = 0; i < nr_ctxs; i++) {
8437 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8438 		if (rc) {
8439 			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8440 				   vnic_id, i, rc);
8441 			break;
8442 		}
8443 		bp->rsscos_nr_ctxs++;
8444 	}
8445 	if (i < nr_ctxs)
8446 		return -ENOMEM;
8447 
8448 	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8449 	if (rc) {
8450 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8451 			   vnic_id, rc);
8452 		return rc;
8453 	}
8454 	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8455 	if (rc) {
8456 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8457 			   vnic_id, rc);
8458 		return rc;
8459 	}
8460 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8461 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8462 		if (rc) {
8463 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8464 				   vnic_id, rc);
8465 		}
8466 	}
8467 	return rc;
8468 }
8469 
8470 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8471 {
8472 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8473 		return __bnxt_setup_vnic_p5(bp, vnic_id);
8474 	else
8475 		return __bnxt_setup_vnic(bp, vnic_id);
8476 }
8477 
8478 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8479 {
8480 #ifdef CONFIG_RFS_ACCEL
8481 	int i, rc = 0;
8482 
8483 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8484 		return 0;
8485 
8486 	for (i = 0; i < bp->rx_nr_rings; i++) {
8487 		struct bnxt_vnic_info *vnic;
8488 		u16 vnic_id = i + 1;
8489 		u16 ring_id = i;
8490 
8491 		if (vnic_id >= bp->nr_vnics)
8492 			break;
8493 
8494 		vnic = &bp->vnic_info[vnic_id];
8495 		vnic->flags |= BNXT_VNIC_RFS_FLAG;
8496 		if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8497 			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8498 		rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8499 		if (rc) {
8500 			netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8501 				   vnic_id, rc);
8502 			break;
8503 		}
8504 		rc = bnxt_setup_vnic(bp, vnic_id);
8505 		if (rc)
8506 			break;
8507 	}
8508 	return rc;
8509 #else
8510 	return 0;
8511 #endif
8512 }
8513 
8514 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8515 static bool bnxt_promisc_ok(struct bnxt *bp)
8516 {
8517 #ifdef CONFIG_BNXT_SRIOV
8518 	if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8519 		return false;
8520 #endif
8521 	return true;
8522 }
8523 
8524 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8525 {
8526 	unsigned int rc = 0;
8527 
8528 	rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8529 	if (rc) {
8530 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8531 			   rc);
8532 		return rc;
8533 	}
8534 
8535 	rc = bnxt_hwrm_vnic_cfg(bp, 1);
8536 	if (rc) {
8537 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8538 			   rc);
8539 		return rc;
8540 	}
8541 	return rc;
8542 }
8543 
8544 static int bnxt_cfg_rx_mode(struct bnxt *);
8545 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8546 
8547 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8548 {
8549 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8550 	int rc = 0;
8551 	unsigned int rx_nr_rings = bp->rx_nr_rings;
8552 
8553 	if (irq_re_init) {
8554 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
8555 		if (rc) {
8556 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8557 				   rc);
8558 			goto err_out;
8559 		}
8560 	}
8561 
8562 	rc = bnxt_hwrm_ring_alloc(bp);
8563 	if (rc) {
8564 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8565 		goto err_out;
8566 	}
8567 
8568 	rc = bnxt_hwrm_ring_grp_alloc(bp);
8569 	if (rc) {
8570 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8571 		goto err_out;
8572 	}
8573 
8574 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8575 		rx_nr_rings--;
8576 
8577 	/* default vnic 0 */
8578 	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8579 	if (rc) {
8580 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8581 		goto err_out;
8582 	}
8583 
8584 	rc = bnxt_setup_vnic(bp, 0);
8585 	if (rc)
8586 		goto err_out;
8587 
8588 	if (bp->flags & BNXT_FLAG_RFS) {
8589 		rc = bnxt_alloc_rfs_vnics(bp);
8590 		if (rc)
8591 			goto err_out;
8592 	}
8593 
8594 	if (bp->flags & BNXT_FLAG_TPA) {
8595 		rc = bnxt_set_tpa(bp, true);
8596 		if (rc)
8597 			goto err_out;
8598 	}
8599 
8600 	if (BNXT_VF(bp))
8601 		bnxt_update_vf_mac(bp);
8602 
8603 	/* Filter for default vnic 0 */
8604 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8605 	if (rc) {
8606 		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8607 		goto err_out;
8608 	}
8609 	vnic->uc_filter_count = 1;
8610 
8611 	vnic->rx_mask = 0;
8612 	if (bp->dev->flags & IFF_BROADCAST)
8613 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8614 
8615 	if (bp->dev->flags & IFF_PROMISC)
8616 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8617 
8618 	if (bp->dev->flags & IFF_ALLMULTI) {
8619 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8620 		vnic->mc_list_count = 0;
8621 	} else {
8622 		u32 mask = 0;
8623 
8624 		bnxt_mc_list_updated(bp, &mask);
8625 		vnic->rx_mask |= mask;
8626 	}
8627 
8628 	rc = bnxt_cfg_rx_mode(bp);
8629 	if (rc)
8630 		goto err_out;
8631 
8632 	rc = bnxt_hwrm_set_coal(bp);
8633 	if (rc)
8634 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8635 				rc);
8636 
8637 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8638 		rc = bnxt_setup_nitroa0_vnic(bp);
8639 		if (rc)
8640 			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8641 				   rc);
8642 	}
8643 
8644 	if (BNXT_VF(bp)) {
8645 		bnxt_hwrm_func_qcfg(bp);
8646 		netdev_update_features(bp->dev);
8647 	}
8648 
8649 	return 0;
8650 
8651 err_out:
8652 	bnxt_hwrm_resource_free(bp, 0, true);
8653 
8654 	return rc;
8655 }
8656 
8657 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8658 {
8659 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8660 	return 0;
8661 }
8662 
8663 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8664 {
8665 	bnxt_init_cp_rings(bp);
8666 	bnxt_init_rx_rings(bp);
8667 	bnxt_init_tx_rings(bp);
8668 	bnxt_init_ring_grps(bp, irq_re_init);
8669 	bnxt_init_vnics(bp);
8670 
8671 	return bnxt_init_chip(bp, irq_re_init);
8672 }
8673 
8674 static int bnxt_set_real_num_queues(struct bnxt *bp)
8675 {
8676 	int rc;
8677 	struct net_device *dev = bp->dev;
8678 
8679 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8680 					  bp->tx_nr_rings_xdp);
8681 	if (rc)
8682 		return rc;
8683 
8684 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8685 	if (rc)
8686 		return rc;
8687 
8688 #ifdef CONFIG_RFS_ACCEL
8689 	if (bp->flags & BNXT_FLAG_RFS)
8690 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8691 #endif
8692 
8693 	return rc;
8694 }
8695 
8696 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8697 			   bool shared)
8698 {
8699 	int _rx = *rx, _tx = *tx;
8700 
8701 	if (shared) {
8702 		*rx = min_t(int, _rx, max);
8703 		*tx = min_t(int, _tx, max);
8704 	} else {
8705 		if (max < 2)
8706 			return -ENOMEM;
8707 
8708 		while (_rx + _tx > max) {
8709 			if (_rx > _tx && _rx > 1)
8710 				_rx--;
8711 			else if (_tx > 1)
8712 				_tx--;
8713 		}
8714 		*rx = _rx;
8715 		*tx = _tx;
8716 	}
8717 	return 0;
8718 }
8719 
8720 static void bnxt_setup_msix(struct bnxt *bp)
8721 {
8722 	const int len = sizeof(bp->irq_tbl[0].name);
8723 	struct net_device *dev = bp->dev;
8724 	int tcs, i;
8725 
8726 	tcs = netdev_get_num_tc(dev);
8727 	if (tcs) {
8728 		int i, off, count;
8729 
8730 		for (i = 0; i < tcs; i++) {
8731 			count = bp->tx_nr_rings_per_tc;
8732 			off = i * count;
8733 			netdev_set_tc_queue(dev, i, count, off);
8734 		}
8735 	}
8736 
8737 	for (i = 0; i < bp->cp_nr_rings; i++) {
8738 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8739 		char *attr;
8740 
8741 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8742 			attr = "TxRx";
8743 		else if (i < bp->rx_nr_rings)
8744 			attr = "rx";
8745 		else
8746 			attr = "tx";
8747 
8748 		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8749 			 attr, i);
8750 		bp->irq_tbl[map_idx].handler = bnxt_msix;
8751 	}
8752 }
8753 
8754 static void bnxt_setup_inta(struct bnxt *bp)
8755 {
8756 	const int len = sizeof(bp->irq_tbl[0].name);
8757 
8758 	if (netdev_get_num_tc(bp->dev))
8759 		netdev_reset_tc(bp->dev);
8760 
8761 	snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8762 		 0);
8763 	bp->irq_tbl[0].handler = bnxt_inta;
8764 }
8765 
8766 static int bnxt_init_int_mode(struct bnxt *bp);
8767 
8768 static int bnxt_setup_int_mode(struct bnxt *bp)
8769 {
8770 	int rc;
8771 
8772 	if (!bp->irq_tbl) {
8773 		rc = bnxt_init_int_mode(bp);
8774 		if (rc || !bp->irq_tbl)
8775 			return rc ?: -ENODEV;
8776 	}
8777 
8778 	if (bp->flags & BNXT_FLAG_USING_MSIX)
8779 		bnxt_setup_msix(bp);
8780 	else
8781 		bnxt_setup_inta(bp);
8782 
8783 	rc = bnxt_set_real_num_queues(bp);
8784 	return rc;
8785 }
8786 
8787 #ifdef CONFIG_RFS_ACCEL
8788 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8789 {
8790 	return bp->hw_resc.max_rsscos_ctxs;
8791 }
8792 
8793 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8794 {
8795 	return bp->hw_resc.max_vnics;
8796 }
8797 #endif
8798 
8799 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8800 {
8801 	return bp->hw_resc.max_stat_ctxs;
8802 }
8803 
8804 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8805 {
8806 	return bp->hw_resc.max_cp_rings;
8807 }
8808 
8809 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8810 {
8811 	unsigned int cp = bp->hw_resc.max_cp_rings;
8812 
8813 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8814 		cp -= bnxt_get_ulp_msix_num(bp);
8815 
8816 	return cp;
8817 }
8818 
8819 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8820 {
8821 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8822 
8823 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8824 		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8825 
8826 	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8827 }
8828 
8829 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8830 {
8831 	bp->hw_resc.max_irqs = max_irqs;
8832 }
8833 
8834 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8835 {
8836 	unsigned int cp;
8837 
8838 	cp = bnxt_get_max_func_cp_rings_for_en(bp);
8839 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8840 		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8841 	else
8842 		return cp - bp->cp_nr_rings;
8843 }
8844 
8845 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8846 {
8847 	return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8848 }
8849 
8850 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8851 {
8852 	int max_cp = bnxt_get_max_func_cp_rings(bp);
8853 	int max_irq = bnxt_get_max_func_irqs(bp);
8854 	int total_req = bp->cp_nr_rings + num;
8855 	int max_idx, avail_msix;
8856 
8857 	max_idx = bp->total_irqs;
8858 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8859 		max_idx = min_t(int, bp->total_irqs, max_cp);
8860 	avail_msix = max_idx - bp->cp_nr_rings;
8861 	if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8862 		return avail_msix;
8863 
8864 	if (max_irq < total_req) {
8865 		num = max_irq - bp->cp_nr_rings;
8866 		if (num <= 0)
8867 			return 0;
8868 	}
8869 	return num;
8870 }
8871 
8872 static int bnxt_get_num_msix(struct bnxt *bp)
8873 {
8874 	if (!BNXT_NEW_RM(bp))
8875 		return bnxt_get_max_func_irqs(bp);
8876 
8877 	return bnxt_nq_rings_in_use(bp);
8878 }
8879 
8880 static int bnxt_init_msix(struct bnxt *bp)
8881 {
8882 	int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8883 	struct msix_entry *msix_ent;
8884 
8885 	total_vecs = bnxt_get_num_msix(bp);
8886 	max = bnxt_get_max_func_irqs(bp);
8887 	if (total_vecs > max)
8888 		total_vecs = max;
8889 
8890 	if (!total_vecs)
8891 		return 0;
8892 
8893 	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8894 	if (!msix_ent)
8895 		return -ENOMEM;
8896 
8897 	for (i = 0; i < total_vecs; i++) {
8898 		msix_ent[i].entry = i;
8899 		msix_ent[i].vector = 0;
8900 	}
8901 
8902 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8903 		min = 2;
8904 
8905 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8906 	ulp_msix = bnxt_get_ulp_msix_num(bp);
8907 	if (total_vecs < 0 || total_vecs < ulp_msix) {
8908 		rc = -ENODEV;
8909 		goto msix_setup_exit;
8910 	}
8911 
8912 	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8913 	if (bp->irq_tbl) {
8914 		for (i = 0; i < total_vecs; i++)
8915 			bp->irq_tbl[i].vector = msix_ent[i].vector;
8916 
8917 		bp->total_irqs = total_vecs;
8918 		/* Trim rings based upon num of vectors allocated */
8919 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8920 				     total_vecs - ulp_msix, min == 1);
8921 		if (rc)
8922 			goto msix_setup_exit;
8923 
8924 		bp->cp_nr_rings = (min == 1) ?
8925 				  max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8926 				  bp->tx_nr_rings + bp->rx_nr_rings;
8927 
8928 	} else {
8929 		rc = -ENOMEM;
8930 		goto msix_setup_exit;
8931 	}
8932 	bp->flags |= BNXT_FLAG_USING_MSIX;
8933 	kfree(msix_ent);
8934 	return 0;
8935 
8936 msix_setup_exit:
8937 	netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8938 	kfree(bp->irq_tbl);
8939 	bp->irq_tbl = NULL;
8940 	pci_disable_msix(bp->pdev);
8941 	kfree(msix_ent);
8942 	return rc;
8943 }
8944 
8945 static int bnxt_init_inta(struct bnxt *bp)
8946 {
8947 	bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8948 	if (!bp->irq_tbl)
8949 		return -ENOMEM;
8950 
8951 	bp->total_irqs = 1;
8952 	bp->rx_nr_rings = 1;
8953 	bp->tx_nr_rings = 1;
8954 	bp->cp_nr_rings = 1;
8955 	bp->flags |= BNXT_FLAG_SHARED_RINGS;
8956 	bp->irq_tbl[0].vector = bp->pdev->irq;
8957 	return 0;
8958 }
8959 
8960 static int bnxt_init_int_mode(struct bnxt *bp)
8961 {
8962 	int rc = -ENODEV;
8963 
8964 	if (bp->flags & BNXT_FLAG_MSIX_CAP)
8965 		rc = bnxt_init_msix(bp);
8966 
8967 	if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8968 		/* fallback to INTA */
8969 		rc = bnxt_init_inta(bp);
8970 	}
8971 	return rc;
8972 }
8973 
8974 static void bnxt_clear_int_mode(struct bnxt *bp)
8975 {
8976 	if (bp->flags & BNXT_FLAG_USING_MSIX)
8977 		pci_disable_msix(bp->pdev);
8978 
8979 	kfree(bp->irq_tbl);
8980 	bp->irq_tbl = NULL;
8981 	bp->flags &= ~BNXT_FLAG_USING_MSIX;
8982 }
8983 
8984 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8985 {
8986 	int tcs = netdev_get_num_tc(bp->dev);
8987 	bool irq_cleared = false;
8988 	int rc;
8989 
8990 	if (!bnxt_need_reserve_rings(bp))
8991 		return 0;
8992 
8993 	if (irq_re_init && BNXT_NEW_RM(bp) &&
8994 	    bnxt_get_num_msix(bp) != bp->total_irqs) {
8995 		bnxt_ulp_irq_stop(bp);
8996 		bnxt_clear_int_mode(bp);
8997 		irq_cleared = true;
8998 	}
8999 	rc = __bnxt_reserve_rings(bp);
9000 	if (irq_cleared) {
9001 		if (!rc)
9002 			rc = bnxt_init_int_mode(bp);
9003 		bnxt_ulp_irq_restart(bp, rc);
9004 	}
9005 	if (rc) {
9006 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9007 		return rc;
9008 	}
9009 	if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9010 		netdev_err(bp->dev, "tx ring reservation failure\n");
9011 		netdev_reset_tc(bp->dev);
9012 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9013 		return -ENOMEM;
9014 	}
9015 	return 0;
9016 }
9017 
9018 static void bnxt_free_irq(struct bnxt *bp)
9019 {
9020 	struct bnxt_irq *irq;
9021 	int i;
9022 
9023 #ifdef CONFIG_RFS_ACCEL
9024 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9025 	bp->dev->rx_cpu_rmap = NULL;
9026 #endif
9027 	if (!bp->irq_tbl || !bp->bnapi)
9028 		return;
9029 
9030 	for (i = 0; i < bp->cp_nr_rings; i++) {
9031 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9032 
9033 		irq = &bp->irq_tbl[map_idx];
9034 		if (irq->requested) {
9035 			if (irq->have_cpumask) {
9036 				irq_set_affinity_hint(irq->vector, NULL);
9037 				free_cpumask_var(irq->cpu_mask);
9038 				irq->have_cpumask = 0;
9039 			}
9040 			free_irq(irq->vector, bp->bnapi[i]);
9041 		}
9042 
9043 		irq->requested = 0;
9044 	}
9045 }
9046 
9047 static int bnxt_request_irq(struct bnxt *bp)
9048 {
9049 	int i, j, rc = 0;
9050 	unsigned long flags = 0;
9051 #ifdef CONFIG_RFS_ACCEL
9052 	struct cpu_rmap *rmap;
9053 #endif
9054 
9055 	rc = bnxt_setup_int_mode(bp);
9056 	if (rc) {
9057 		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9058 			   rc);
9059 		return rc;
9060 	}
9061 #ifdef CONFIG_RFS_ACCEL
9062 	rmap = bp->dev->rx_cpu_rmap;
9063 #endif
9064 	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9065 		flags = IRQF_SHARED;
9066 
9067 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9068 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9069 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9070 
9071 #ifdef CONFIG_RFS_ACCEL
9072 		if (rmap && bp->bnapi[i]->rx_ring) {
9073 			rc = irq_cpu_rmap_add(rmap, irq->vector);
9074 			if (rc)
9075 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9076 					    j);
9077 			j++;
9078 		}
9079 #endif
9080 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9081 				 bp->bnapi[i]);
9082 		if (rc)
9083 			break;
9084 
9085 		irq->requested = 1;
9086 
9087 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9088 			int numa_node = dev_to_node(&bp->pdev->dev);
9089 
9090 			irq->have_cpumask = 1;
9091 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9092 					irq->cpu_mask);
9093 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9094 			if (rc) {
9095 				netdev_warn(bp->dev,
9096 					    "Set affinity failed, IRQ = %d\n",
9097 					    irq->vector);
9098 				break;
9099 			}
9100 		}
9101 	}
9102 	return rc;
9103 }
9104 
9105 static void bnxt_del_napi(struct bnxt *bp)
9106 {
9107 	int i;
9108 
9109 	if (!bp->bnapi)
9110 		return;
9111 
9112 	for (i = 0; i < bp->cp_nr_rings; i++) {
9113 		struct bnxt_napi *bnapi = bp->bnapi[i];
9114 
9115 		__netif_napi_del(&bnapi->napi);
9116 	}
9117 	/* We called __netif_napi_del(), we need
9118 	 * to respect an RCU grace period before freeing napi structures.
9119 	 */
9120 	synchronize_net();
9121 }
9122 
9123 static void bnxt_init_napi(struct bnxt *bp)
9124 {
9125 	int i;
9126 	unsigned int cp_nr_rings = bp->cp_nr_rings;
9127 	struct bnxt_napi *bnapi;
9128 
9129 	if (bp->flags & BNXT_FLAG_USING_MSIX) {
9130 		int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9131 
9132 		if (bp->flags & BNXT_FLAG_CHIP_P5)
9133 			poll_fn = bnxt_poll_p5;
9134 		else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9135 			cp_nr_rings--;
9136 		for (i = 0; i < cp_nr_rings; i++) {
9137 			bnapi = bp->bnapi[i];
9138 			netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9139 		}
9140 		if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9141 			bnapi = bp->bnapi[cp_nr_rings];
9142 			netif_napi_add(bp->dev, &bnapi->napi,
9143 				       bnxt_poll_nitroa0, 64);
9144 		}
9145 	} else {
9146 		bnapi = bp->bnapi[0];
9147 		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9148 	}
9149 }
9150 
9151 static void bnxt_disable_napi(struct bnxt *bp)
9152 {
9153 	int i;
9154 
9155 	if (!bp->bnapi ||
9156 	    test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9157 		return;
9158 
9159 	for (i = 0; i < bp->cp_nr_rings; i++) {
9160 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9161 
9162 		napi_disable(&bp->bnapi[i]->napi);
9163 		if (bp->bnapi[i]->rx_ring)
9164 			cancel_work_sync(&cpr->dim.work);
9165 	}
9166 }
9167 
9168 static void bnxt_enable_napi(struct bnxt *bp)
9169 {
9170 	int i;
9171 
9172 	clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9173 	for (i = 0; i < bp->cp_nr_rings; i++) {
9174 		struct bnxt_napi *bnapi = bp->bnapi[i];
9175 		struct bnxt_cp_ring_info *cpr;
9176 
9177 		cpr = &bnapi->cp_ring;
9178 		if (bnapi->in_reset)
9179 			cpr->sw_stats.rx.rx_resets++;
9180 		bnapi->in_reset = false;
9181 
9182 		if (bnapi->rx_ring) {
9183 			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9184 			cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9185 		}
9186 		napi_enable(&bnapi->napi);
9187 	}
9188 }
9189 
9190 void bnxt_tx_disable(struct bnxt *bp)
9191 {
9192 	int i;
9193 	struct bnxt_tx_ring_info *txr;
9194 
9195 	if (bp->tx_ring) {
9196 		for (i = 0; i < bp->tx_nr_rings; i++) {
9197 			txr = &bp->tx_ring[i];
9198 			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9199 		}
9200 	}
9201 	/* Make sure napi polls see @dev_state change */
9202 	synchronize_net();
9203 	/* Drop carrier first to prevent TX timeout */
9204 	netif_carrier_off(bp->dev);
9205 	/* Stop all TX queues */
9206 	netif_tx_disable(bp->dev);
9207 }
9208 
9209 void bnxt_tx_enable(struct bnxt *bp)
9210 {
9211 	int i;
9212 	struct bnxt_tx_ring_info *txr;
9213 
9214 	for (i = 0; i < bp->tx_nr_rings; i++) {
9215 		txr = &bp->tx_ring[i];
9216 		WRITE_ONCE(txr->dev_state, 0);
9217 	}
9218 	/* Make sure napi polls see @dev_state change */
9219 	synchronize_net();
9220 	netif_tx_wake_all_queues(bp->dev);
9221 	if (bp->link_info.link_up)
9222 		netif_carrier_on(bp->dev);
9223 }
9224 
9225 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9226 {
9227 	u8 active_fec = link_info->active_fec_sig_mode &
9228 			PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9229 
9230 	switch (active_fec) {
9231 	default:
9232 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9233 		return "None";
9234 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9235 		return "Clause 74 BaseR";
9236 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9237 		return "Clause 91 RS(528,514)";
9238 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9239 		return "Clause 91 RS544_1XN";
9240 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9241 		return "Clause 91 RS(544,514)";
9242 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9243 		return "Clause 91 RS272_1XN";
9244 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9245 		return "Clause 91 RS(272,257)";
9246 	}
9247 }
9248 
9249 static void bnxt_report_link(struct bnxt *bp)
9250 {
9251 	if (bp->link_info.link_up) {
9252 		const char *signal = "";
9253 		const char *flow_ctrl;
9254 		const char *duplex;
9255 		u32 speed;
9256 		u16 fec;
9257 
9258 		netif_carrier_on(bp->dev);
9259 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9260 		if (speed == SPEED_UNKNOWN) {
9261 			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9262 			return;
9263 		}
9264 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9265 			duplex = "full";
9266 		else
9267 			duplex = "half";
9268 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9269 			flow_ctrl = "ON - receive & transmit";
9270 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9271 			flow_ctrl = "ON - transmit";
9272 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9273 			flow_ctrl = "ON - receive";
9274 		else
9275 			flow_ctrl = "none";
9276 		if (bp->link_info.phy_qcfg_resp.option_flags &
9277 		    PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9278 			u8 sig_mode = bp->link_info.active_fec_sig_mode &
9279 				      PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9280 			switch (sig_mode) {
9281 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9282 				signal = "(NRZ) ";
9283 				break;
9284 			case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9285 				signal = "(PAM4) ";
9286 				break;
9287 			default:
9288 				break;
9289 			}
9290 		}
9291 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9292 			    speed, signal, duplex, flow_ctrl);
9293 		if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9294 			netdev_info(bp->dev, "EEE is %s\n",
9295 				    bp->eee.eee_active ? "active" :
9296 							 "not active");
9297 		fec = bp->link_info.fec_cfg;
9298 		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9299 			netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9300 				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9301 				    bnxt_report_fec(&bp->link_info));
9302 	} else {
9303 		netif_carrier_off(bp->dev);
9304 		netdev_err(bp->dev, "NIC Link is Down\n");
9305 	}
9306 }
9307 
9308 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9309 {
9310 	if (!resp->supported_speeds_auto_mode &&
9311 	    !resp->supported_speeds_force_mode &&
9312 	    !resp->supported_pam4_speeds_auto_mode &&
9313 	    !resp->supported_pam4_speeds_force_mode)
9314 		return true;
9315 	return false;
9316 }
9317 
9318 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9319 {
9320 	struct bnxt_link_info *link_info = &bp->link_info;
9321 	struct hwrm_port_phy_qcaps_output *resp;
9322 	struct hwrm_port_phy_qcaps_input *req;
9323 	int rc = 0;
9324 
9325 	if (bp->hwrm_spec_code < 0x10201)
9326 		return 0;
9327 
9328 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9329 	if (rc)
9330 		return rc;
9331 
9332 	resp = hwrm_req_hold(bp, req);
9333 	rc = hwrm_req_send(bp, req);
9334 	if (rc)
9335 		goto hwrm_phy_qcaps_exit;
9336 
9337 	bp->phy_flags = resp->flags;
9338 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9339 		struct ethtool_eee *eee = &bp->eee;
9340 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9341 
9342 		eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9343 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9344 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9345 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9346 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9347 	}
9348 
9349 	if (bp->hwrm_spec_code >= 0x10a01) {
9350 		if (bnxt_phy_qcaps_no_speed(resp)) {
9351 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9352 			netdev_warn(bp->dev, "Ethernet link disabled\n");
9353 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9354 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9355 			netdev_info(bp->dev, "Ethernet link enabled\n");
9356 			/* Phy re-enabled, reprobe the speeds */
9357 			link_info->support_auto_speeds = 0;
9358 			link_info->support_pam4_auto_speeds = 0;
9359 		}
9360 	}
9361 	if (resp->supported_speeds_auto_mode)
9362 		link_info->support_auto_speeds =
9363 			le16_to_cpu(resp->supported_speeds_auto_mode);
9364 	if (resp->supported_pam4_speeds_auto_mode)
9365 		link_info->support_pam4_auto_speeds =
9366 			le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9367 
9368 	bp->port_count = resp->port_cnt;
9369 
9370 hwrm_phy_qcaps_exit:
9371 	hwrm_req_drop(bp, req);
9372 	return rc;
9373 }
9374 
9375 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9376 {
9377 	u16 diff = advertising ^ supported;
9378 
9379 	return ((supported | diff) != supported);
9380 }
9381 
9382 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9383 {
9384 	struct bnxt_link_info *link_info = &bp->link_info;
9385 	struct hwrm_port_phy_qcfg_output *resp;
9386 	struct hwrm_port_phy_qcfg_input *req;
9387 	u8 link_up = link_info->link_up;
9388 	bool support_changed = false;
9389 	int rc;
9390 
9391 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9392 	if (rc)
9393 		return rc;
9394 
9395 	resp = hwrm_req_hold(bp, req);
9396 	rc = hwrm_req_send(bp, req);
9397 	if (rc) {
9398 		hwrm_req_drop(bp, req);
9399 		return rc;
9400 	}
9401 
9402 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9403 	link_info->phy_link_status = resp->link;
9404 	link_info->duplex = resp->duplex_cfg;
9405 	if (bp->hwrm_spec_code >= 0x10800)
9406 		link_info->duplex = resp->duplex_state;
9407 	link_info->pause = resp->pause;
9408 	link_info->auto_mode = resp->auto_mode;
9409 	link_info->auto_pause_setting = resp->auto_pause;
9410 	link_info->lp_pause = resp->link_partner_adv_pause;
9411 	link_info->force_pause_setting = resp->force_pause;
9412 	link_info->duplex_setting = resp->duplex_cfg;
9413 	if (link_info->phy_link_status == BNXT_LINK_LINK)
9414 		link_info->link_speed = le16_to_cpu(resp->link_speed);
9415 	else
9416 		link_info->link_speed = 0;
9417 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9418 	link_info->force_pam4_link_speed =
9419 		le16_to_cpu(resp->force_pam4_link_speed);
9420 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9421 	link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9422 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9423 	link_info->auto_pam4_link_speeds =
9424 		le16_to_cpu(resp->auto_pam4_link_speed_mask);
9425 	link_info->lp_auto_link_speeds =
9426 		le16_to_cpu(resp->link_partner_adv_speeds);
9427 	link_info->lp_auto_pam4_link_speeds =
9428 		resp->link_partner_pam4_adv_speeds;
9429 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9430 	link_info->phy_ver[0] = resp->phy_maj;
9431 	link_info->phy_ver[1] = resp->phy_min;
9432 	link_info->phy_ver[2] = resp->phy_bld;
9433 	link_info->media_type = resp->media_type;
9434 	link_info->phy_type = resp->phy_type;
9435 	link_info->transceiver = resp->xcvr_pkg_type;
9436 	link_info->phy_addr = resp->eee_config_phy_addr &
9437 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9438 	link_info->module_status = resp->module_status;
9439 
9440 	if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9441 		struct ethtool_eee *eee = &bp->eee;
9442 		u16 fw_speeds;
9443 
9444 		eee->eee_active = 0;
9445 		if (resp->eee_config_phy_addr &
9446 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9447 			eee->eee_active = 1;
9448 			fw_speeds = le16_to_cpu(
9449 				resp->link_partner_adv_eee_link_speed_mask);
9450 			eee->lp_advertised =
9451 				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9452 		}
9453 
9454 		/* Pull initial EEE config */
9455 		if (!chng_link_state) {
9456 			if (resp->eee_config_phy_addr &
9457 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9458 				eee->eee_enabled = 1;
9459 
9460 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9461 			eee->advertised =
9462 				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9463 
9464 			if (resp->eee_config_phy_addr &
9465 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9466 				__le32 tmr;
9467 
9468 				eee->tx_lpi_enabled = 1;
9469 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9470 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
9471 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9472 			}
9473 		}
9474 	}
9475 
9476 	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9477 	if (bp->hwrm_spec_code >= 0x10504) {
9478 		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9479 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9480 	}
9481 	/* TODO: need to add more logic to report VF link */
9482 	if (chng_link_state) {
9483 		if (link_info->phy_link_status == BNXT_LINK_LINK)
9484 			link_info->link_up = 1;
9485 		else
9486 			link_info->link_up = 0;
9487 		if (link_up != link_info->link_up)
9488 			bnxt_report_link(bp);
9489 	} else {
9490 		/* alwasy link down if not require to update link state */
9491 		link_info->link_up = 0;
9492 	}
9493 	hwrm_req_drop(bp, req);
9494 
9495 	if (!BNXT_PHY_CFG_ABLE(bp))
9496 		return 0;
9497 
9498 	/* Check if any advertised speeds are no longer supported. The caller
9499 	 * holds the link_lock mutex, so we can modify link_info settings.
9500 	 */
9501 	if (bnxt_support_dropped(link_info->advertising,
9502 				 link_info->support_auto_speeds)) {
9503 		link_info->advertising = link_info->support_auto_speeds;
9504 		support_changed = true;
9505 	}
9506 	if (bnxt_support_dropped(link_info->advertising_pam4,
9507 				 link_info->support_pam4_auto_speeds)) {
9508 		link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9509 		support_changed = true;
9510 	}
9511 	if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9512 		bnxt_hwrm_set_link_setting(bp, true, false);
9513 	return 0;
9514 }
9515 
9516 static void bnxt_get_port_module_status(struct bnxt *bp)
9517 {
9518 	struct bnxt_link_info *link_info = &bp->link_info;
9519 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9520 	u8 module_status;
9521 
9522 	if (bnxt_update_link(bp, true))
9523 		return;
9524 
9525 	module_status = link_info->module_status;
9526 	switch (module_status) {
9527 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9528 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9529 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9530 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9531 			    bp->pf.port_id);
9532 		if (bp->hwrm_spec_code >= 0x10201) {
9533 			netdev_warn(bp->dev, "Module part number %s\n",
9534 				    resp->phy_vendor_partnumber);
9535 		}
9536 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9537 			netdev_warn(bp->dev, "TX is disabled\n");
9538 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9539 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9540 	}
9541 }
9542 
9543 static void
9544 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9545 {
9546 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9547 		if (bp->hwrm_spec_code >= 0x10201)
9548 			req->auto_pause =
9549 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9550 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9551 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9552 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9553 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9554 		req->enables |=
9555 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9556 	} else {
9557 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9558 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9559 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9560 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9561 		req->enables |=
9562 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9563 		if (bp->hwrm_spec_code >= 0x10201) {
9564 			req->auto_pause = req->force_pause;
9565 			req->enables |= cpu_to_le32(
9566 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9567 		}
9568 	}
9569 }
9570 
9571 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9572 {
9573 	if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9574 		req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9575 		if (bp->link_info.advertising) {
9576 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9577 			req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9578 		}
9579 		if (bp->link_info.advertising_pam4) {
9580 			req->enables |=
9581 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9582 			req->auto_link_pam4_speed_mask =
9583 				cpu_to_le16(bp->link_info.advertising_pam4);
9584 		}
9585 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9586 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9587 	} else {
9588 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9589 		if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9590 			req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9591 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9592 		} else {
9593 			req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9594 		}
9595 	}
9596 
9597 	/* tell chimp that the setting takes effect immediately */
9598 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9599 }
9600 
9601 int bnxt_hwrm_set_pause(struct bnxt *bp)
9602 {
9603 	struct hwrm_port_phy_cfg_input *req;
9604 	int rc;
9605 
9606 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9607 	if (rc)
9608 		return rc;
9609 
9610 	bnxt_hwrm_set_pause_common(bp, req);
9611 
9612 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9613 	    bp->link_info.force_link_chng)
9614 		bnxt_hwrm_set_link_common(bp, req);
9615 
9616 	rc = hwrm_req_send(bp, req);
9617 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9618 		/* since changing of pause setting doesn't trigger any link
9619 		 * change event, the driver needs to update the current pause
9620 		 * result upon successfully return of the phy_cfg command
9621 		 */
9622 		bp->link_info.pause =
9623 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9624 		bp->link_info.auto_pause_setting = 0;
9625 		if (!bp->link_info.force_link_chng)
9626 			bnxt_report_link(bp);
9627 	}
9628 	bp->link_info.force_link_chng = false;
9629 	return rc;
9630 }
9631 
9632 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9633 			      struct hwrm_port_phy_cfg_input *req)
9634 {
9635 	struct ethtool_eee *eee = &bp->eee;
9636 
9637 	if (eee->eee_enabled) {
9638 		u16 eee_speeds;
9639 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9640 
9641 		if (eee->tx_lpi_enabled)
9642 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9643 		else
9644 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9645 
9646 		req->flags |= cpu_to_le32(flags);
9647 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9648 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9649 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9650 	} else {
9651 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9652 	}
9653 }
9654 
9655 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9656 {
9657 	struct hwrm_port_phy_cfg_input *req;
9658 	int rc;
9659 
9660 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9661 	if (rc)
9662 		return rc;
9663 
9664 	if (set_pause)
9665 		bnxt_hwrm_set_pause_common(bp, req);
9666 
9667 	bnxt_hwrm_set_link_common(bp, req);
9668 
9669 	if (set_eee)
9670 		bnxt_hwrm_set_eee(bp, req);
9671 	return hwrm_req_send(bp, req);
9672 }
9673 
9674 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9675 {
9676 	struct hwrm_port_phy_cfg_input *req;
9677 	int rc;
9678 
9679 	if (!BNXT_SINGLE_PF(bp))
9680 		return 0;
9681 
9682 	if (pci_num_vf(bp->pdev) &&
9683 	    !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9684 		return 0;
9685 
9686 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9687 	if (rc)
9688 		return rc;
9689 
9690 	req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9691 	return hwrm_req_send(bp, req);
9692 }
9693 
9694 static int bnxt_fw_init_one(struct bnxt *bp);
9695 
9696 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9697 {
9698 #ifdef CONFIG_TEE_BNXT_FW
9699 	int rc = tee_bnxt_fw_load();
9700 
9701 	if (rc)
9702 		netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9703 
9704 	return rc;
9705 #else
9706 	netdev_err(bp->dev, "OP-TEE not supported\n");
9707 	return -ENODEV;
9708 #endif
9709 }
9710 
9711 static int bnxt_try_recover_fw(struct bnxt *bp)
9712 {
9713 	if (bp->fw_health && bp->fw_health->status_reliable) {
9714 		int retry = 0, rc;
9715 		u32 sts;
9716 
9717 		do {
9718 			sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9719 			rc = bnxt_hwrm_poll(bp);
9720 			if (!BNXT_FW_IS_BOOTING(sts) &&
9721 			    !BNXT_FW_IS_RECOVERING(sts))
9722 				break;
9723 			retry++;
9724 		} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9725 
9726 		if (!BNXT_FW_IS_HEALTHY(sts)) {
9727 			netdev_err(bp->dev,
9728 				   "Firmware not responding, status: 0x%x\n",
9729 				   sts);
9730 			rc = -ENODEV;
9731 		}
9732 		if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9733 			netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9734 			return bnxt_fw_reset_via_optee(bp);
9735 		}
9736 		return rc;
9737 	}
9738 
9739 	return -ENODEV;
9740 }
9741 
9742 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9743 {
9744 	struct hwrm_func_drv_if_change_output *resp;
9745 	struct hwrm_func_drv_if_change_input *req;
9746 	bool fw_reset = !bp->irq_tbl;
9747 	bool resc_reinit = false;
9748 	int rc, retry = 0;
9749 	u32 flags = 0;
9750 
9751 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9752 		return 0;
9753 
9754 	rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9755 	if (rc)
9756 		return rc;
9757 
9758 	if (up)
9759 		req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9760 	resp = hwrm_req_hold(bp, req);
9761 
9762 	hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
9763 	while (retry < BNXT_FW_IF_RETRY) {
9764 		rc = hwrm_req_send(bp, req);
9765 		if (rc != -EAGAIN)
9766 			break;
9767 
9768 		msleep(50);
9769 		retry++;
9770 	}
9771 
9772 	if (rc == -EAGAIN) {
9773 		hwrm_req_drop(bp, req);
9774 		return rc;
9775 	} else if (!rc) {
9776 		flags = le32_to_cpu(resp->flags);
9777 	} else if (up) {
9778 		rc = bnxt_try_recover_fw(bp);
9779 		fw_reset = true;
9780 	}
9781 	hwrm_req_drop(bp, req);
9782 	if (rc)
9783 		return rc;
9784 
9785 	if (!up) {
9786 		bnxt_inv_fw_health_reg(bp);
9787 		return 0;
9788 	}
9789 
9790 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9791 		resc_reinit = true;
9792 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9793 		fw_reset = true;
9794 	else if (bp->fw_health && !bp->fw_health->status_reliable)
9795 		bnxt_try_map_fw_health_reg(bp);
9796 
9797 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9798 		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9799 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9800 		return -ENODEV;
9801 	}
9802 	if (resc_reinit || fw_reset) {
9803 		if (fw_reset) {
9804 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9805 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9806 				bnxt_ulp_stop(bp);
9807 			bnxt_free_ctx_mem(bp);
9808 			kfree(bp->ctx);
9809 			bp->ctx = NULL;
9810 			bnxt_dcb_free(bp);
9811 			rc = bnxt_fw_init_one(bp);
9812 			if (rc) {
9813 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9814 				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9815 				return rc;
9816 			}
9817 			bnxt_clear_int_mode(bp);
9818 			rc = bnxt_init_int_mode(bp);
9819 			if (rc) {
9820 				clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9821 				netdev_err(bp->dev, "init int mode failed\n");
9822 				return rc;
9823 			}
9824 		}
9825 		if (BNXT_NEW_RM(bp)) {
9826 			struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9827 
9828 			rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9829 			if (rc)
9830 				netdev_err(bp->dev, "resc_qcaps failed\n");
9831 
9832 			hw_resc->resv_cp_rings = 0;
9833 			hw_resc->resv_stat_ctxs = 0;
9834 			hw_resc->resv_irqs = 0;
9835 			hw_resc->resv_tx_rings = 0;
9836 			hw_resc->resv_rx_rings = 0;
9837 			hw_resc->resv_hw_ring_grps = 0;
9838 			hw_resc->resv_vnics = 0;
9839 			if (!fw_reset) {
9840 				bp->tx_nr_rings = 0;
9841 				bp->rx_nr_rings = 0;
9842 			}
9843 		}
9844 	}
9845 	return rc;
9846 }
9847 
9848 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9849 {
9850 	struct hwrm_port_led_qcaps_output *resp;
9851 	struct hwrm_port_led_qcaps_input *req;
9852 	struct bnxt_pf_info *pf = &bp->pf;
9853 	int rc;
9854 
9855 	bp->num_leds = 0;
9856 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9857 		return 0;
9858 
9859 	rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9860 	if (rc)
9861 		return rc;
9862 
9863 	req->port_id = cpu_to_le16(pf->port_id);
9864 	resp = hwrm_req_hold(bp, req);
9865 	rc = hwrm_req_send(bp, req);
9866 	if (rc) {
9867 		hwrm_req_drop(bp, req);
9868 		return rc;
9869 	}
9870 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9871 		int i;
9872 
9873 		bp->num_leds = resp->num_leds;
9874 		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9875 						 bp->num_leds);
9876 		for (i = 0; i < bp->num_leds; i++) {
9877 			struct bnxt_led_info *led = &bp->leds[i];
9878 			__le16 caps = led->led_state_caps;
9879 
9880 			if (!led->led_group_id ||
9881 			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
9882 				bp->num_leds = 0;
9883 				break;
9884 			}
9885 		}
9886 	}
9887 	hwrm_req_drop(bp, req);
9888 	return 0;
9889 }
9890 
9891 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9892 {
9893 	struct hwrm_wol_filter_alloc_output *resp;
9894 	struct hwrm_wol_filter_alloc_input *req;
9895 	int rc;
9896 
9897 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
9898 	if (rc)
9899 		return rc;
9900 
9901 	req->port_id = cpu_to_le16(bp->pf.port_id);
9902 	req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9903 	req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9904 	memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
9905 
9906 	resp = hwrm_req_hold(bp, req);
9907 	rc = hwrm_req_send(bp, req);
9908 	if (!rc)
9909 		bp->wol_filter_id = resp->wol_filter_id;
9910 	hwrm_req_drop(bp, req);
9911 	return rc;
9912 }
9913 
9914 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9915 {
9916 	struct hwrm_wol_filter_free_input *req;
9917 	int rc;
9918 
9919 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
9920 	if (rc)
9921 		return rc;
9922 
9923 	req->port_id = cpu_to_le16(bp->pf.port_id);
9924 	req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9925 	req->wol_filter_id = bp->wol_filter_id;
9926 
9927 	return hwrm_req_send(bp, req);
9928 }
9929 
9930 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9931 {
9932 	struct hwrm_wol_filter_qcfg_output *resp;
9933 	struct hwrm_wol_filter_qcfg_input *req;
9934 	u16 next_handle = 0;
9935 	int rc;
9936 
9937 	rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
9938 	if (rc)
9939 		return rc;
9940 
9941 	req->port_id = cpu_to_le16(bp->pf.port_id);
9942 	req->handle = cpu_to_le16(handle);
9943 	resp = hwrm_req_hold(bp, req);
9944 	rc = hwrm_req_send(bp, req);
9945 	if (!rc) {
9946 		next_handle = le16_to_cpu(resp->next_handle);
9947 		if (next_handle != 0) {
9948 			if (resp->wol_type ==
9949 			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9950 				bp->wol = 1;
9951 				bp->wol_filter_id = resp->wol_filter_id;
9952 			}
9953 		}
9954 	}
9955 	hwrm_req_drop(bp, req);
9956 	return next_handle;
9957 }
9958 
9959 static void bnxt_get_wol_settings(struct bnxt *bp)
9960 {
9961 	u16 handle = 0;
9962 
9963 	bp->wol = 0;
9964 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9965 		return;
9966 
9967 	do {
9968 		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9969 	} while (handle && handle != 0xffff);
9970 }
9971 
9972 #ifdef CONFIG_BNXT_HWMON
9973 static ssize_t bnxt_show_temp(struct device *dev,
9974 			      struct device_attribute *devattr, char *buf)
9975 {
9976 	struct hwrm_temp_monitor_query_output *resp;
9977 	struct hwrm_temp_monitor_query_input *req;
9978 	struct bnxt *bp = dev_get_drvdata(dev);
9979 	u32 len = 0;
9980 	int rc;
9981 
9982 	rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9983 	if (rc)
9984 		return rc;
9985 	resp = hwrm_req_hold(bp, req);
9986 	rc = hwrm_req_send(bp, req);
9987 	if (!rc)
9988 		len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9989 	hwrm_req_drop(bp, req);
9990 	if (rc)
9991 		return rc;
9992 	return len;
9993 }
9994 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9995 
9996 static struct attribute *bnxt_attrs[] = {
9997 	&sensor_dev_attr_temp1_input.dev_attr.attr,
9998 	NULL
9999 };
10000 ATTRIBUTE_GROUPS(bnxt);
10001 
10002 static void bnxt_hwmon_close(struct bnxt *bp)
10003 {
10004 	if (bp->hwmon_dev) {
10005 		hwmon_device_unregister(bp->hwmon_dev);
10006 		bp->hwmon_dev = NULL;
10007 	}
10008 }
10009 
10010 static void bnxt_hwmon_open(struct bnxt *bp)
10011 {
10012 	struct hwrm_temp_monitor_query_input *req;
10013 	struct pci_dev *pdev = bp->pdev;
10014 	int rc;
10015 
10016 	rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10017 	if (!rc)
10018 		rc = hwrm_req_send_silent(bp, req);
10019 	if (rc == -EACCES || rc == -EOPNOTSUPP) {
10020 		bnxt_hwmon_close(bp);
10021 		return;
10022 	}
10023 
10024 	if (bp->hwmon_dev)
10025 		return;
10026 
10027 	bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10028 							  DRV_MODULE_NAME, bp,
10029 							  bnxt_groups);
10030 	if (IS_ERR(bp->hwmon_dev)) {
10031 		bp->hwmon_dev = NULL;
10032 		dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10033 	}
10034 }
10035 #else
10036 static void bnxt_hwmon_close(struct bnxt *bp)
10037 {
10038 }
10039 
10040 static void bnxt_hwmon_open(struct bnxt *bp)
10041 {
10042 }
10043 #endif
10044 
10045 static bool bnxt_eee_config_ok(struct bnxt *bp)
10046 {
10047 	struct ethtool_eee *eee = &bp->eee;
10048 	struct bnxt_link_info *link_info = &bp->link_info;
10049 
10050 	if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10051 		return true;
10052 
10053 	if (eee->eee_enabled) {
10054 		u32 advertising =
10055 			_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10056 
10057 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10058 			eee->eee_enabled = 0;
10059 			return false;
10060 		}
10061 		if (eee->advertised & ~advertising) {
10062 			eee->advertised = advertising & eee->supported;
10063 			return false;
10064 		}
10065 	}
10066 	return true;
10067 }
10068 
10069 static int bnxt_update_phy_setting(struct bnxt *bp)
10070 {
10071 	int rc;
10072 	bool update_link = false;
10073 	bool update_pause = false;
10074 	bool update_eee = false;
10075 	struct bnxt_link_info *link_info = &bp->link_info;
10076 
10077 	rc = bnxt_update_link(bp, true);
10078 	if (rc) {
10079 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10080 			   rc);
10081 		return rc;
10082 	}
10083 	if (!BNXT_SINGLE_PF(bp))
10084 		return 0;
10085 
10086 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10087 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10088 	    link_info->req_flow_ctrl)
10089 		update_pause = true;
10090 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10091 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
10092 		update_pause = true;
10093 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10094 		if (BNXT_AUTO_MODE(link_info->auto_mode))
10095 			update_link = true;
10096 		if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10097 		    link_info->req_link_speed != link_info->force_link_speed)
10098 			update_link = true;
10099 		else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10100 			 link_info->req_link_speed != link_info->force_pam4_link_speed)
10101 			update_link = true;
10102 		if (link_info->req_duplex != link_info->duplex_setting)
10103 			update_link = true;
10104 	} else {
10105 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10106 			update_link = true;
10107 		if (link_info->advertising != link_info->auto_link_speeds ||
10108 		    link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10109 			update_link = true;
10110 	}
10111 
10112 	/* The last close may have shutdown the link, so need to call
10113 	 * PHY_CFG to bring it back up.
10114 	 */
10115 	if (!bp->link_info.link_up)
10116 		update_link = true;
10117 
10118 	if (!bnxt_eee_config_ok(bp))
10119 		update_eee = true;
10120 
10121 	if (update_link)
10122 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10123 	else if (update_pause)
10124 		rc = bnxt_hwrm_set_pause(bp);
10125 	if (rc) {
10126 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10127 			   rc);
10128 		return rc;
10129 	}
10130 
10131 	return rc;
10132 }
10133 
10134 /* Common routine to pre-map certain register block to different GRC window.
10135  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10136  * in PF and 3 windows in VF that can be customized to map in different
10137  * register blocks.
10138  */
10139 static void bnxt_preset_reg_win(struct bnxt *bp)
10140 {
10141 	if (BNXT_PF(bp)) {
10142 		/* CAG registers map to GRC window #4 */
10143 		writel(BNXT_CAG_REG_BASE,
10144 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10145 	}
10146 }
10147 
10148 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10149 
10150 static int bnxt_reinit_after_abort(struct bnxt *bp)
10151 {
10152 	int rc;
10153 
10154 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10155 		return -EBUSY;
10156 
10157 	if (bp->dev->reg_state == NETREG_UNREGISTERED)
10158 		return -ENODEV;
10159 
10160 	rc = bnxt_fw_init_one(bp);
10161 	if (!rc) {
10162 		bnxt_clear_int_mode(bp);
10163 		rc = bnxt_init_int_mode(bp);
10164 		if (!rc) {
10165 			clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10166 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10167 		}
10168 	}
10169 	return rc;
10170 }
10171 
10172 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10173 {
10174 	int rc = 0;
10175 
10176 	bnxt_preset_reg_win(bp);
10177 	netif_carrier_off(bp->dev);
10178 	if (irq_re_init) {
10179 		/* Reserve rings now if none were reserved at driver probe. */
10180 		rc = bnxt_init_dflt_ring_mode(bp);
10181 		if (rc) {
10182 			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10183 			return rc;
10184 		}
10185 	}
10186 	rc = bnxt_reserve_rings(bp, irq_re_init);
10187 	if (rc)
10188 		return rc;
10189 	if ((bp->flags & BNXT_FLAG_RFS) &&
10190 	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10191 		/* disable RFS if falling back to INTA */
10192 		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10193 		bp->flags &= ~BNXT_FLAG_RFS;
10194 	}
10195 
10196 	rc = bnxt_alloc_mem(bp, irq_re_init);
10197 	if (rc) {
10198 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10199 		goto open_err_free_mem;
10200 	}
10201 
10202 	if (irq_re_init) {
10203 		bnxt_init_napi(bp);
10204 		rc = bnxt_request_irq(bp);
10205 		if (rc) {
10206 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10207 			goto open_err_irq;
10208 		}
10209 	}
10210 
10211 	rc = bnxt_init_nic(bp, irq_re_init);
10212 	if (rc) {
10213 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10214 		goto open_err_irq;
10215 	}
10216 
10217 	bnxt_enable_napi(bp);
10218 	bnxt_debug_dev_init(bp);
10219 
10220 	if (link_re_init) {
10221 		mutex_lock(&bp->link_lock);
10222 		rc = bnxt_update_phy_setting(bp);
10223 		mutex_unlock(&bp->link_lock);
10224 		if (rc) {
10225 			netdev_warn(bp->dev, "failed to update phy settings\n");
10226 			if (BNXT_SINGLE_PF(bp)) {
10227 				bp->link_info.phy_retry = true;
10228 				bp->link_info.phy_retry_expires =
10229 					jiffies + 5 * HZ;
10230 			}
10231 		}
10232 	}
10233 
10234 	if (irq_re_init)
10235 		udp_tunnel_nic_reset_ntf(bp->dev);
10236 
10237 	set_bit(BNXT_STATE_OPEN, &bp->state);
10238 	bnxt_enable_int(bp);
10239 	/* Enable TX queues */
10240 	bnxt_tx_enable(bp);
10241 	mod_timer(&bp->timer, jiffies + bp->current_interval);
10242 	/* Poll link status and check for SFP+ module status */
10243 	mutex_lock(&bp->link_lock);
10244 	bnxt_get_port_module_status(bp);
10245 	mutex_unlock(&bp->link_lock);
10246 
10247 	/* VF-reps may need to be re-opened after the PF is re-opened */
10248 	if (BNXT_PF(bp))
10249 		bnxt_vf_reps_open(bp);
10250 	return 0;
10251 
10252 open_err_irq:
10253 	bnxt_del_napi(bp);
10254 
10255 open_err_free_mem:
10256 	bnxt_free_skbs(bp);
10257 	bnxt_free_irq(bp);
10258 	bnxt_free_mem(bp, true);
10259 	return rc;
10260 }
10261 
10262 /* rtnl_lock held */
10263 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10264 {
10265 	int rc = 0;
10266 
10267 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10268 		rc = -EIO;
10269 	if (!rc)
10270 		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10271 	if (rc) {
10272 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10273 		dev_close(bp->dev);
10274 	}
10275 	return rc;
10276 }
10277 
10278 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10279  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10280  * self tests.
10281  */
10282 int bnxt_half_open_nic(struct bnxt *bp)
10283 {
10284 	int rc = 0;
10285 
10286 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10287 		netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10288 		rc = -ENODEV;
10289 		goto half_open_err;
10290 	}
10291 
10292 	rc = bnxt_alloc_mem(bp, false);
10293 	if (rc) {
10294 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10295 		goto half_open_err;
10296 	}
10297 	rc = bnxt_init_nic(bp, false);
10298 	if (rc) {
10299 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10300 		goto half_open_err;
10301 	}
10302 	return 0;
10303 
10304 half_open_err:
10305 	bnxt_free_skbs(bp);
10306 	bnxt_free_mem(bp, false);
10307 	dev_close(bp->dev);
10308 	return rc;
10309 }
10310 
10311 /* rtnl_lock held, this call can only be made after a previous successful
10312  * call to bnxt_half_open_nic().
10313  */
10314 void bnxt_half_close_nic(struct bnxt *bp)
10315 {
10316 	bnxt_hwrm_resource_free(bp, false, false);
10317 	bnxt_free_skbs(bp);
10318 	bnxt_free_mem(bp, false);
10319 }
10320 
10321 static void bnxt_reenable_sriov(struct bnxt *bp)
10322 {
10323 	if (BNXT_PF(bp)) {
10324 		struct bnxt_pf_info *pf = &bp->pf;
10325 		int n = pf->active_vfs;
10326 
10327 		if (n)
10328 			bnxt_cfg_hw_sriov(bp, &n, true);
10329 	}
10330 }
10331 
10332 static int bnxt_open(struct net_device *dev)
10333 {
10334 	struct bnxt *bp = netdev_priv(dev);
10335 	int rc;
10336 
10337 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10338 		rc = bnxt_reinit_after_abort(bp);
10339 		if (rc) {
10340 			if (rc == -EBUSY)
10341 				netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10342 			else
10343 				netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10344 			return -ENODEV;
10345 		}
10346 	}
10347 
10348 	rc = bnxt_hwrm_if_change(bp, true);
10349 	if (rc)
10350 		return rc;
10351 
10352 	rc = __bnxt_open_nic(bp, true, true);
10353 	if (rc) {
10354 		bnxt_hwrm_if_change(bp, false);
10355 	} else {
10356 		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10357 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10358 				bnxt_ulp_start(bp, 0);
10359 				bnxt_reenable_sriov(bp);
10360 			}
10361 		}
10362 		bnxt_hwmon_open(bp);
10363 	}
10364 
10365 	return rc;
10366 }
10367 
10368 static bool bnxt_drv_busy(struct bnxt *bp)
10369 {
10370 	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10371 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
10372 }
10373 
10374 static void bnxt_get_ring_stats(struct bnxt *bp,
10375 				struct rtnl_link_stats64 *stats);
10376 
10377 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10378 			     bool link_re_init)
10379 {
10380 	/* Close the VF-reps before closing PF */
10381 	if (BNXT_PF(bp))
10382 		bnxt_vf_reps_close(bp);
10383 
10384 	/* Change device state to avoid TX queue wake up's */
10385 	bnxt_tx_disable(bp);
10386 
10387 	clear_bit(BNXT_STATE_OPEN, &bp->state);
10388 	smp_mb__after_atomic();
10389 	while (bnxt_drv_busy(bp))
10390 		msleep(20);
10391 
10392 	/* Flush rings and and disable interrupts */
10393 	bnxt_shutdown_nic(bp, irq_re_init);
10394 
10395 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10396 
10397 	bnxt_debug_dev_exit(bp);
10398 	bnxt_disable_napi(bp);
10399 	del_timer_sync(&bp->timer);
10400 	bnxt_free_skbs(bp);
10401 
10402 	/* Save ring stats before shutdown */
10403 	if (bp->bnapi && irq_re_init)
10404 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10405 	if (irq_re_init) {
10406 		bnxt_free_irq(bp);
10407 		bnxt_del_napi(bp);
10408 	}
10409 	bnxt_free_mem(bp, irq_re_init);
10410 }
10411 
10412 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10413 {
10414 	int rc = 0;
10415 
10416 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10417 		/* If we get here, it means firmware reset is in progress
10418 		 * while we are trying to close.  We can safely proceed with
10419 		 * the close because we are holding rtnl_lock().  Some firmware
10420 		 * messages may fail as we proceed to close.  We set the
10421 		 * ABORT_ERR flag here so that the FW reset thread will later
10422 		 * abort when it gets the rtnl_lock() and sees the flag.
10423 		 */
10424 		netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10425 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10426 	}
10427 
10428 #ifdef CONFIG_BNXT_SRIOV
10429 	if (bp->sriov_cfg) {
10430 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10431 						      !bp->sriov_cfg,
10432 						      BNXT_SRIOV_CFG_WAIT_TMO);
10433 		if (rc)
10434 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10435 	}
10436 #endif
10437 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
10438 	return rc;
10439 }
10440 
10441 static int bnxt_close(struct net_device *dev)
10442 {
10443 	struct bnxt *bp = netdev_priv(dev);
10444 
10445 	bnxt_hwmon_close(bp);
10446 	bnxt_close_nic(bp, true, true);
10447 	bnxt_hwrm_shutdown_link(bp);
10448 	bnxt_hwrm_if_change(bp, false);
10449 	return 0;
10450 }
10451 
10452 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10453 				   u16 *val)
10454 {
10455 	struct hwrm_port_phy_mdio_read_output *resp;
10456 	struct hwrm_port_phy_mdio_read_input *req;
10457 	int rc;
10458 
10459 	if (bp->hwrm_spec_code < 0x10a00)
10460 		return -EOPNOTSUPP;
10461 
10462 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10463 	if (rc)
10464 		return rc;
10465 
10466 	req->port_id = cpu_to_le16(bp->pf.port_id);
10467 	req->phy_addr = phy_addr;
10468 	req->reg_addr = cpu_to_le16(reg & 0x1f);
10469 	if (mdio_phy_id_is_c45(phy_addr)) {
10470 		req->cl45_mdio = 1;
10471 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
10472 		req->dev_addr = mdio_phy_id_devad(phy_addr);
10473 		req->reg_addr = cpu_to_le16(reg);
10474 	}
10475 
10476 	resp = hwrm_req_hold(bp, req);
10477 	rc = hwrm_req_send(bp, req);
10478 	if (!rc)
10479 		*val = le16_to_cpu(resp->reg_data);
10480 	hwrm_req_drop(bp, req);
10481 	return rc;
10482 }
10483 
10484 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10485 				    u16 val)
10486 {
10487 	struct hwrm_port_phy_mdio_write_input *req;
10488 	int rc;
10489 
10490 	if (bp->hwrm_spec_code < 0x10a00)
10491 		return -EOPNOTSUPP;
10492 
10493 	rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10494 	if (rc)
10495 		return rc;
10496 
10497 	req->port_id = cpu_to_le16(bp->pf.port_id);
10498 	req->phy_addr = phy_addr;
10499 	req->reg_addr = cpu_to_le16(reg & 0x1f);
10500 	if (mdio_phy_id_is_c45(phy_addr)) {
10501 		req->cl45_mdio = 1;
10502 		req->phy_addr = mdio_phy_id_prtad(phy_addr);
10503 		req->dev_addr = mdio_phy_id_devad(phy_addr);
10504 		req->reg_addr = cpu_to_le16(reg);
10505 	}
10506 	req->reg_data = cpu_to_le16(val);
10507 
10508 	return hwrm_req_send(bp, req);
10509 }
10510 
10511 /* rtnl_lock held */
10512 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10513 {
10514 	struct mii_ioctl_data *mdio = if_mii(ifr);
10515 	struct bnxt *bp = netdev_priv(dev);
10516 	int rc;
10517 
10518 	switch (cmd) {
10519 	case SIOCGMIIPHY:
10520 		mdio->phy_id = bp->link_info.phy_addr;
10521 
10522 		fallthrough;
10523 	case SIOCGMIIREG: {
10524 		u16 mii_regval = 0;
10525 
10526 		if (!netif_running(dev))
10527 			return -EAGAIN;
10528 
10529 		rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10530 					     &mii_regval);
10531 		mdio->val_out = mii_regval;
10532 		return rc;
10533 	}
10534 
10535 	case SIOCSMIIREG:
10536 		if (!netif_running(dev))
10537 			return -EAGAIN;
10538 
10539 		return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10540 						mdio->val_in);
10541 
10542 	case SIOCSHWTSTAMP:
10543 		return bnxt_hwtstamp_set(dev, ifr);
10544 
10545 	case SIOCGHWTSTAMP:
10546 		return bnxt_hwtstamp_get(dev, ifr);
10547 
10548 	default:
10549 		/* do nothing */
10550 		break;
10551 	}
10552 	return -EOPNOTSUPP;
10553 }
10554 
10555 static void bnxt_get_ring_stats(struct bnxt *bp,
10556 				struct rtnl_link_stats64 *stats)
10557 {
10558 	int i;
10559 
10560 	for (i = 0; i < bp->cp_nr_rings; i++) {
10561 		struct bnxt_napi *bnapi = bp->bnapi[i];
10562 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10563 		u64 *sw = cpr->stats.sw_stats;
10564 
10565 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10566 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10567 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10568 
10569 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10570 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10571 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10572 
10573 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10574 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10575 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10576 
10577 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10578 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10579 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10580 
10581 		stats->rx_missed_errors +=
10582 			BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10583 
10584 		stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10585 
10586 		stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10587 
10588 		stats->rx_dropped +=
10589 			cpr->sw_stats.rx.rx_netpoll_discards +
10590 			cpr->sw_stats.rx.rx_oom_discards;
10591 	}
10592 }
10593 
10594 static void bnxt_add_prev_stats(struct bnxt *bp,
10595 				struct rtnl_link_stats64 *stats)
10596 {
10597 	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10598 
10599 	stats->rx_packets += prev_stats->rx_packets;
10600 	stats->tx_packets += prev_stats->tx_packets;
10601 	stats->rx_bytes += prev_stats->rx_bytes;
10602 	stats->tx_bytes += prev_stats->tx_bytes;
10603 	stats->rx_missed_errors += prev_stats->rx_missed_errors;
10604 	stats->multicast += prev_stats->multicast;
10605 	stats->rx_dropped += prev_stats->rx_dropped;
10606 	stats->tx_dropped += prev_stats->tx_dropped;
10607 }
10608 
10609 static void
10610 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10611 {
10612 	struct bnxt *bp = netdev_priv(dev);
10613 
10614 	set_bit(BNXT_STATE_READ_STATS, &bp->state);
10615 	/* Make sure bnxt_close_nic() sees that we are reading stats before
10616 	 * we check the BNXT_STATE_OPEN flag.
10617 	 */
10618 	smp_mb__after_atomic();
10619 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10620 		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10621 		*stats = bp->net_stats_prev;
10622 		return;
10623 	}
10624 
10625 	bnxt_get_ring_stats(bp, stats);
10626 	bnxt_add_prev_stats(bp, stats);
10627 
10628 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
10629 		u64 *rx = bp->port_stats.sw_stats;
10630 		u64 *tx = bp->port_stats.sw_stats +
10631 			  BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10632 
10633 		stats->rx_crc_errors =
10634 			BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10635 		stats->rx_frame_errors =
10636 			BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10637 		stats->rx_length_errors =
10638 			BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10639 			BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10640 			BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10641 		stats->rx_errors =
10642 			BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10643 			BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10644 		stats->collisions =
10645 			BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10646 		stats->tx_fifo_errors =
10647 			BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10648 		stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10649 	}
10650 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10651 }
10652 
10653 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10654 {
10655 	struct net_device *dev = bp->dev;
10656 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10657 	struct netdev_hw_addr *ha;
10658 	u8 *haddr;
10659 	int mc_count = 0;
10660 	bool update = false;
10661 	int off = 0;
10662 
10663 	netdev_for_each_mc_addr(ha, dev) {
10664 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
10665 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10666 			vnic->mc_list_count = 0;
10667 			return false;
10668 		}
10669 		haddr = ha->addr;
10670 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10671 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10672 			update = true;
10673 		}
10674 		off += ETH_ALEN;
10675 		mc_count++;
10676 	}
10677 	if (mc_count)
10678 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10679 
10680 	if (mc_count != vnic->mc_list_count) {
10681 		vnic->mc_list_count = mc_count;
10682 		update = true;
10683 	}
10684 	return update;
10685 }
10686 
10687 static bool bnxt_uc_list_updated(struct bnxt *bp)
10688 {
10689 	struct net_device *dev = bp->dev;
10690 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10691 	struct netdev_hw_addr *ha;
10692 	int off = 0;
10693 
10694 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10695 		return true;
10696 
10697 	netdev_for_each_uc_addr(ha, dev) {
10698 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10699 			return true;
10700 
10701 		off += ETH_ALEN;
10702 	}
10703 	return false;
10704 }
10705 
10706 static void bnxt_set_rx_mode(struct net_device *dev)
10707 {
10708 	struct bnxt *bp = netdev_priv(dev);
10709 	struct bnxt_vnic_info *vnic;
10710 	bool mc_update = false;
10711 	bool uc_update;
10712 	u32 mask;
10713 
10714 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10715 		return;
10716 
10717 	vnic = &bp->vnic_info[0];
10718 	mask = vnic->rx_mask;
10719 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10720 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10721 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10722 		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10723 
10724 	if (dev->flags & IFF_PROMISC)
10725 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10726 
10727 	uc_update = bnxt_uc_list_updated(bp);
10728 
10729 	if (dev->flags & IFF_BROADCAST)
10730 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10731 	if (dev->flags & IFF_ALLMULTI) {
10732 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10733 		vnic->mc_list_count = 0;
10734 	} else {
10735 		mc_update = bnxt_mc_list_updated(bp, &mask);
10736 	}
10737 
10738 	if (mask != vnic->rx_mask || uc_update || mc_update) {
10739 		vnic->rx_mask = mask;
10740 
10741 		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10742 		bnxt_queue_sp_work(bp);
10743 	}
10744 }
10745 
10746 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10747 {
10748 	struct net_device *dev = bp->dev;
10749 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10750 	struct hwrm_cfa_l2_filter_free_input *req;
10751 	struct netdev_hw_addr *ha;
10752 	int i, off = 0, rc;
10753 	bool uc_update;
10754 
10755 	netif_addr_lock_bh(dev);
10756 	uc_update = bnxt_uc_list_updated(bp);
10757 	netif_addr_unlock_bh(dev);
10758 
10759 	if (!uc_update)
10760 		goto skip_uc;
10761 
10762 	rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10763 	if (rc)
10764 		return rc;
10765 	hwrm_req_hold(bp, req);
10766 	for (i = 1; i < vnic->uc_filter_count; i++) {
10767 		req->l2_filter_id = vnic->fw_l2_filter_id[i];
10768 
10769 		rc = hwrm_req_send(bp, req);
10770 	}
10771 	hwrm_req_drop(bp, req);
10772 
10773 	vnic->uc_filter_count = 1;
10774 
10775 	netif_addr_lock_bh(dev);
10776 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10777 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10778 	} else {
10779 		netdev_for_each_uc_addr(ha, dev) {
10780 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10781 			off += ETH_ALEN;
10782 			vnic->uc_filter_count++;
10783 		}
10784 	}
10785 	netif_addr_unlock_bh(dev);
10786 
10787 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10788 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10789 		if (rc) {
10790 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10791 				   rc);
10792 			vnic->uc_filter_count = i;
10793 			return rc;
10794 		}
10795 	}
10796 
10797 skip_uc:
10798 	if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10799 	    !bnxt_promisc_ok(bp))
10800 		vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10801 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10802 	if (rc && vnic->mc_list_count) {
10803 		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10804 			    rc);
10805 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10806 		vnic->mc_list_count = 0;
10807 		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10808 	}
10809 	if (rc)
10810 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10811 			   rc);
10812 
10813 	return rc;
10814 }
10815 
10816 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10817 {
10818 #ifdef CONFIG_BNXT_SRIOV
10819 	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10820 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10821 
10822 		/* No minimum rings were provisioned by the PF.  Don't
10823 		 * reserve rings by default when device is down.
10824 		 */
10825 		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10826 			return true;
10827 
10828 		if (!netif_running(bp->dev))
10829 			return false;
10830 	}
10831 #endif
10832 	return true;
10833 }
10834 
10835 /* If the chip and firmware supports RFS */
10836 static bool bnxt_rfs_supported(struct bnxt *bp)
10837 {
10838 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
10839 		if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10840 			return true;
10841 		return false;
10842 	}
10843 	/* 212 firmware is broken for aRFS */
10844 	if (BNXT_FW_MAJ(bp) == 212)
10845 		return false;
10846 	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10847 		return true;
10848 	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10849 		return true;
10850 	return false;
10851 }
10852 
10853 /* If runtime conditions support RFS */
10854 static bool bnxt_rfs_capable(struct bnxt *bp)
10855 {
10856 #ifdef CONFIG_RFS_ACCEL
10857 	int vnics, max_vnics, max_rss_ctxs;
10858 
10859 	if (bp->flags & BNXT_FLAG_CHIP_P5)
10860 		return bnxt_rfs_supported(bp);
10861 	if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10862 		return false;
10863 
10864 	vnics = 1 + bp->rx_nr_rings;
10865 	max_vnics = bnxt_get_max_func_vnics(bp);
10866 	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10867 
10868 	/* RSS contexts not a limiting factor */
10869 	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10870 		max_rss_ctxs = max_vnics;
10871 	if (vnics > max_vnics || vnics > max_rss_ctxs) {
10872 		if (bp->rx_nr_rings > 1)
10873 			netdev_warn(bp->dev,
10874 				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10875 				    min(max_rss_ctxs - 1, max_vnics - 1));
10876 		return false;
10877 	}
10878 
10879 	if (!BNXT_NEW_RM(bp))
10880 		return true;
10881 
10882 	if (vnics == bp->hw_resc.resv_vnics)
10883 		return true;
10884 
10885 	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10886 	if (vnics <= bp->hw_resc.resv_vnics)
10887 		return true;
10888 
10889 	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10890 	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10891 	return false;
10892 #else
10893 	return false;
10894 #endif
10895 }
10896 
10897 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10898 					   netdev_features_t features)
10899 {
10900 	struct bnxt *bp = netdev_priv(dev);
10901 	netdev_features_t vlan_features;
10902 
10903 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10904 		features &= ~NETIF_F_NTUPLE;
10905 
10906 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10907 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10908 
10909 	if (!(features & NETIF_F_GRO))
10910 		features &= ~NETIF_F_GRO_HW;
10911 
10912 	if (features & NETIF_F_GRO_HW)
10913 		features &= ~NETIF_F_LRO;
10914 
10915 	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
10916 	 * turned on or off together.
10917 	 */
10918 	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10919 	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10920 		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10921 			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10922 		else if (vlan_features)
10923 			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10924 	}
10925 #ifdef CONFIG_BNXT_SRIOV
10926 	if (BNXT_VF(bp) && bp->vf.vlan)
10927 		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10928 #endif
10929 	return features;
10930 }
10931 
10932 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10933 {
10934 	struct bnxt *bp = netdev_priv(dev);
10935 	u32 flags = bp->flags;
10936 	u32 changes;
10937 	int rc = 0;
10938 	bool re_init = false;
10939 	bool update_tpa = false;
10940 
10941 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10942 	if (features & NETIF_F_GRO_HW)
10943 		flags |= BNXT_FLAG_GRO;
10944 	else if (features & NETIF_F_LRO)
10945 		flags |= BNXT_FLAG_LRO;
10946 
10947 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10948 		flags &= ~BNXT_FLAG_TPA;
10949 
10950 	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10951 		flags |= BNXT_FLAG_STRIP_VLAN;
10952 
10953 	if (features & NETIF_F_NTUPLE)
10954 		flags |= BNXT_FLAG_RFS;
10955 
10956 	changes = flags ^ bp->flags;
10957 	if (changes & BNXT_FLAG_TPA) {
10958 		update_tpa = true;
10959 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10960 		    (flags & BNXT_FLAG_TPA) == 0 ||
10961 		    (bp->flags & BNXT_FLAG_CHIP_P5))
10962 			re_init = true;
10963 	}
10964 
10965 	if (changes & ~BNXT_FLAG_TPA)
10966 		re_init = true;
10967 
10968 	if (flags != bp->flags) {
10969 		u32 old_flags = bp->flags;
10970 
10971 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10972 			bp->flags = flags;
10973 			if (update_tpa)
10974 				bnxt_set_ring_params(bp);
10975 			return rc;
10976 		}
10977 
10978 		if (re_init) {
10979 			bnxt_close_nic(bp, false, false);
10980 			bp->flags = flags;
10981 			if (update_tpa)
10982 				bnxt_set_ring_params(bp);
10983 
10984 			return bnxt_open_nic(bp, false, false);
10985 		}
10986 		if (update_tpa) {
10987 			bp->flags = flags;
10988 			rc = bnxt_set_tpa(bp,
10989 					  (flags & BNXT_FLAG_TPA) ?
10990 					  true : false);
10991 			if (rc)
10992 				bp->flags = old_flags;
10993 		}
10994 	}
10995 	return rc;
10996 }
10997 
10998 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10999 			      u8 **nextp)
11000 {
11001 	struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11002 	int hdr_count = 0;
11003 	u8 *nexthdr;
11004 	int start;
11005 
11006 	/* Check that there are at most 2 IPv6 extension headers, no
11007 	 * fragment header, and each is <= 64 bytes.
11008 	 */
11009 	start = nw_off + sizeof(*ip6h);
11010 	nexthdr = &ip6h->nexthdr;
11011 	while (ipv6_ext_hdr(*nexthdr)) {
11012 		struct ipv6_opt_hdr *hp;
11013 		int hdrlen;
11014 
11015 		if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11016 		    *nexthdr == NEXTHDR_FRAGMENT)
11017 			return false;
11018 		hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11019 					  skb_headlen(skb), NULL);
11020 		if (!hp)
11021 			return false;
11022 		if (*nexthdr == NEXTHDR_AUTH)
11023 			hdrlen = ipv6_authlen(hp);
11024 		else
11025 			hdrlen = ipv6_optlen(hp);
11026 
11027 		if (hdrlen > 64)
11028 			return false;
11029 		nexthdr = &hp->nexthdr;
11030 		start += hdrlen;
11031 		hdr_count++;
11032 	}
11033 	if (nextp) {
11034 		/* Caller will check inner protocol */
11035 		if (skb->encapsulation) {
11036 			*nextp = nexthdr;
11037 			return true;
11038 		}
11039 		*nextp = NULL;
11040 	}
11041 	/* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11042 	return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11043 }
11044 
11045 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11046 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11047 {
11048 	struct udphdr *uh = udp_hdr(skb);
11049 	__be16 udp_port = uh->dest;
11050 
11051 	if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11052 		return false;
11053 	if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11054 		struct ethhdr *eh = inner_eth_hdr(skb);
11055 
11056 		switch (eh->h_proto) {
11057 		case htons(ETH_P_IP):
11058 			return true;
11059 		case htons(ETH_P_IPV6):
11060 			return bnxt_exthdr_check(bp, skb,
11061 						 skb_inner_network_offset(skb),
11062 						 NULL);
11063 		}
11064 	}
11065 	return false;
11066 }
11067 
11068 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11069 {
11070 	switch (l4_proto) {
11071 	case IPPROTO_UDP:
11072 		return bnxt_udp_tunl_check(bp, skb);
11073 	case IPPROTO_IPIP:
11074 		return true;
11075 	case IPPROTO_GRE: {
11076 		switch (skb->inner_protocol) {
11077 		default:
11078 			return false;
11079 		case htons(ETH_P_IP):
11080 			return true;
11081 		case htons(ETH_P_IPV6):
11082 			fallthrough;
11083 		}
11084 	}
11085 	case IPPROTO_IPV6:
11086 		/* Check ext headers of inner ipv6 */
11087 		return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11088 					 NULL);
11089 	}
11090 	return false;
11091 }
11092 
11093 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11094 					     struct net_device *dev,
11095 					     netdev_features_t features)
11096 {
11097 	struct bnxt *bp = netdev_priv(dev);
11098 	u8 *l4_proto;
11099 
11100 	features = vlan_features_check(skb, features);
11101 	switch (vlan_get_protocol(skb)) {
11102 	case htons(ETH_P_IP):
11103 		if (!skb->encapsulation)
11104 			return features;
11105 		l4_proto = &ip_hdr(skb)->protocol;
11106 		if (bnxt_tunl_check(bp, skb, *l4_proto))
11107 			return features;
11108 		break;
11109 	case htons(ETH_P_IPV6):
11110 		if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11111 				       &l4_proto))
11112 			break;
11113 		if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11114 			return features;
11115 		break;
11116 	}
11117 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11118 }
11119 
11120 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11121 			 u32 *reg_buf)
11122 {
11123 	struct hwrm_dbg_read_direct_output *resp;
11124 	struct hwrm_dbg_read_direct_input *req;
11125 	__le32 *dbg_reg_buf;
11126 	dma_addr_t mapping;
11127 	int rc, i;
11128 
11129 	rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11130 	if (rc)
11131 		return rc;
11132 
11133 	dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11134 					 &mapping);
11135 	if (!dbg_reg_buf) {
11136 		rc = -ENOMEM;
11137 		goto dbg_rd_reg_exit;
11138 	}
11139 
11140 	req->host_dest_addr = cpu_to_le64(mapping);
11141 
11142 	resp = hwrm_req_hold(bp, req);
11143 	req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11144 	req->read_len32 = cpu_to_le32(num_words);
11145 
11146 	rc = hwrm_req_send(bp, req);
11147 	if (rc || resp->error_code) {
11148 		rc = -EIO;
11149 		goto dbg_rd_reg_exit;
11150 	}
11151 	for (i = 0; i < num_words; i++)
11152 		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11153 
11154 dbg_rd_reg_exit:
11155 	hwrm_req_drop(bp, req);
11156 	return rc;
11157 }
11158 
11159 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11160 				       u32 ring_id, u32 *prod, u32 *cons)
11161 {
11162 	struct hwrm_dbg_ring_info_get_output *resp;
11163 	struct hwrm_dbg_ring_info_get_input *req;
11164 	int rc;
11165 
11166 	rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11167 	if (rc)
11168 		return rc;
11169 
11170 	req->ring_type = ring_type;
11171 	req->fw_ring_id = cpu_to_le32(ring_id);
11172 	resp = hwrm_req_hold(bp, req);
11173 	rc = hwrm_req_send(bp, req);
11174 	if (!rc) {
11175 		*prod = le32_to_cpu(resp->producer_index);
11176 		*cons = le32_to_cpu(resp->consumer_index);
11177 	}
11178 	hwrm_req_drop(bp, req);
11179 	return rc;
11180 }
11181 
11182 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11183 {
11184 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11185 	int i = bnapi->index;
11186 
11187 	if (!txr)
11188 		return;
11189 
11190 	netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11191 		    i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11192 		    txr->tx_cons);
11193 }
11194 
11195 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11196 {
11197 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11198 	int i = bnapi->index;
11199 
11200 	if (!rxr)
11201 		return;
11202 
11203 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11204 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11205 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11206 		    rxr->rx_sw_agg_prod);
11207 }
11208 
11209 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11210 {
11211 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11212 	int i = bnapi->index;
11213 
11214 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11215 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11216 }
11217 
11218 static void bnxt_dbg_dump_states(struct bnxt *bp)
11219 {
11220 	int i;
11221 	struct bnxt_napi *bnapi;
11222 
11223 	for (i = 0; i < bp->cp_nr_rings; i++) {
11224 		bnapi = bp->bnapi[i];
11225 		if (netif_msg_drv(bp)) {
11226 			bnxt_dump_tx_sw_state(bnapi);
11227 			bnxt_dump_rx_sw_state(bnapi);
11228 			bnxt_dump_cp_sw_state(bnapi);
11229 		}
11230 	}
11231 }
11232 
11233 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11234 {
11235 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11236 	struct hwrm_ring_reset_input *req;
11237 	struct bnxt_napi *bnapi = rxr->bnapi;
11238 	struct bnxt_cp_ring_info *cpr;
11239 	u16 cp_ring_id;
11240 	int rc;
11241 
11242 	rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11243 	if (rc)
11244 		return rc;
11245 
11246 	cpr = &bnapi->cp_ring;
11247 	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11248 	req->cmpl_ring = cpu_to_le16(cp_ring_id);
11249 	req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11250 	req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11251 	return hwrm_req_send_silent(bp, req);
11252 }
11253 
11254 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11255 {
11256 	if (!silent)
11257 		bnxt_dbg_dump_states(bp);
11258 	if (netif_running(bp->dev)) {
11259 		int rc;
11260 
11261 		if (silent) {
11262 			bnxt_close_nic(bp, false, false);
11263 			bnxt_open_nic(bp, false, false);
11264 		} else {
11265 			bnxt_ulp_stop(bp);
11266 			bnxt_close_nic(bp, true, false);
11267 			rc = bnxt_open_nic(bp, true, false);
11268 			bnxt_ulp_start(bp, rc);
11269 		}
11270 	}
11271 }
11272 
11273 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11274 {
11275 	struct bnxt *bp = netdev_priv(dev);
11276 
11277 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11278 	set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11279 	bnxt_queue_sp_work(bp);
11280 }
11281 
11282 static void bnxt_fw_health_check(struct bnxt *bp)
11283 {
11284 	struct bnxt_fw_health *fw_health = bp->fw_health;
11285 	u32 val;
11286 
11287 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11288 		return;
11289 
11290 	/* Make sure it is enabled before checking the tmr_counter. */
11291 	smp_rmb();
11292 	if (fw_health->tmr_counter) {
11293 		fw_health->tmr_counter--;
11294 		return;
11295 	}
11296 
11297 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11298 	if (val == fw_health->last_fw_heartbeat)
11299 		goto fw_reset;
11300 
11301 	fw_health->last_fw_heartbeat = val;
11302 
11303 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11304 	if (val != fw_health->last_fw_reset_cnt)
11305 		goto fw_reset;
11306 
11307 	fw_health->tmr_counter = fw_health->tmr_multiplier;
11308 	return;
11309 
11310 fw_reset:
11311 	set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11312 	bnxt_queue_sp_work(bp);
11313 }
11314 
11315 static void bnxt_timer(struct timer_list *t)
11316 {
11317 	struct bnxt *bp = from_timer(bp, t, timer);
11318 	struct net_device *dev = bp->dev;
11319 
11320 	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11321 		return;
11322 
11323 	if (atomic_read(&bp->intr_sem) != 0)
11324 		goto bnxt_restart_timer;
11325 
11326 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11327 		bnxt_fw_health_check(bp);
11328 
11329 	if (bp->link_info.link_up && bp->stats_coal_ticks) {
11330 		set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11331 		bnxt_queue_sp_work(bp);
11332 	}
11333 
11334 	if (bnxt_tc_flower_enabled(bp)) {
11335 		set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11336 		bnxt_queue_sp_work(bp);
11337 	}
11338 
11339 #ifdef CONFIG_RFS_ACCEL
11340 	if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11341 		set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11342 		bnxt_queue_sp_work(bp);
11343 	}
11344 #endif /*CONFIG_RFS_ACCEL*/
11345 
11346 	if (bp->link_info.phy_retry) {
11347 		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11348 			bp->link_info.phy_retry = false;
11349 			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11350 		} else {
11351 			set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11352 			bnxt_queue_sp_work(bp);
11353 		}
11354 	}
11355 
11356 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11357 	    netif_carrier_ok(dev)) {
11358 		set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11359 		bnxt_queue_sp_work(bp);
11360 	}
11361 bnxt_restart_timer:
11362 	mod_timer(&bp->timer, jiffies + bp->current_interval);
11363 }
11364 
11365 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11366 {
11367 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11368 	 * set.  If the device is being closed, bnxt_close() may be holding
11369 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11370 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11371 	 */
11372 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11373 	rtnl_lock();
11374 }
11375 
11376 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11377 {
11378 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11379 	rtnl_unlock();
11380 }
11381 
11382 /* Only called from bnxt_sp_task() */
11383 static void bnxt_reset(struct bnxt *bp, bool silent)
11384 {
11385 	bnxt_rtnl_lock_sp(bp);
11386 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
11387 		bnxt_reset_task(bp, silent);
11388 	bnxt_rtnl_unlock_sp(bp);
11389 }
11390 
11391 /* Only called from bnxt_sp_task() */
11392 static void bnxt_rx_ring_reset(struct bnxt *bp)
11393 {
11394 	int i;
11395 
11396 	bnxt_rtnl_lock_sp(bp);
11397 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11398 		bnxt_rtnl_unlock_sp(bp);
11399 		return;
11400 	}
11401 	/* Disable and flush TPA before resetting the RX ring */
11402 	if (bp->flags & BNXT_FLAG_TPA)
11403 		bnxt_set_tpa(bp, false);
11404 	for (i = 0; i < bp->rx_nr_rings; i++) {
11405 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11406 		struct bnxt_cp_ring_info *cpr;
11407 		int rc;
11408 
11409 		if (!rxr->bnapi->in_reset)
11410 			continue;
11411 
11412 		rc = bnxt_hwrm_rx_ring_reset(bp, i);
11413 		if (rc) {
11414 			if (rc == -EINVAL || rc == -EOPNOTSUPP)
11415 				netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11416 			else
11417 				netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11418 					    rc);
11419 			bnxt_reset_task(bp, true);
11420 			break;
11421 		}
11422 		bnxt_free_one_rx_ring_skbs(bp, i);
11423 		rxr->rx_prod = 0;
11424 		rxr->rx_agg_prod = 0;
11425 		rxr->rx_sw_agg_prod = 0;
11426 		rxr->rx_next_cons = 0;
11427 		rxr->bnapi->in_reset = false;
11428 		bnxt_alloc_one_rx_ring(bp, i);
11429 		cpr = &rxr->bnapi->cp_ring;
11430 		cpr->sw_stats.rx.rx_resets++;
11431 		if (bp->flags & BNXT_FLAG_AGG_RINGS)
11432 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11433 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11434 	}
11435 	if (bp->flags & BNXT_FLAG_TPA)
11436 		bnxt_set_tpa(bp, true);
11437 	bnxt_rtnl_unlock_sp(bp);
11438 }
11439 
11440 static void bnxt_fw_reset_close(struct bnxt *bp)
11441 {
11442 	bnxt_ulp_stop(bp);
11443 	/* When firmware is in fatal state, quiesce device and disable
11444 	 * bus master to prevent any potential bad DMAs before freeing
11445 	 * kernel memory.
11446 	 */
11447 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11448 		u16 val = 0;
11449 
11450 		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11451 		if (val == 0xffff)
11452 			bp->fw_reset_min_dsecs = 0;
11453 		bnxt_tx_disable(bp);
11454 		bnxt_disable_napi(bp);
11455 		bnxt_disable_int_sync(bp);
11456 		bnxt_free_irq(bp);
11457 		bnxt_clear_int_mode(bp);
11458 		pci_disable_device(bp->pdev);
11459 	}
11460 	__bnxt_close_nic(bp, true, false);
11461 	bnxt_vf_reps_free(bp);
11462 	bnxt_clear_int_mode(bp);
11463 	bnxt_hwrm_func_drv_unrgtr(bp);
11464 	if (pci_is_enabled(bp->pdev))
11465 		pci_disable_device(bp->pdev);
11466 	bnxt_free_ctx_mem(bp);
11467 	kfree(bp->ctx);
11468 	bp->ctx = NULL;
11469 }
11470 
11471 static bool is_bnxt_fw_ok(struct bnxt *bp)
11472 {
11473 	struct bnxt_fw_health *fw_health = bp->fw_health;
11474 	bool no_heartbeat = false, has_reset = false;
11475 	u32 val;
11476 
11477 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11478 	if (val == fw_health->last_fw_heartbeat)
11479 		no_heartbeat = true;
11480 
11481 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11482 	if (val != fw_health->last_fw_reset_cnt)
11483 		has_reset = true;
11484 
11485 	if (!no_heartbeat && has_reset)
11486 		return true;
11487 
11488 	return false;
11489 }
11490 
11491 /* rtnl_lock is acquired before calling this function */
11492 static void bnxt_force_fw_reset(struct bnxt *bp)
11493 {
11494 	struct bnxt_fw_health *fw_health = bp->fw_health;
11495 	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11496 	u32 wait_dsecs;
11497 
11498 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11499 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11500 		return;
11501 
11502 	if (ptp) {
11503 		spin_lock_bh(&ptp->ptp_lock);
11504 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11505 		spin_unlock_bh(&ptp->ptp_lock);
11506 	} else {
11507 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11508 	}
11509 	bnxt_fw_reset_close(bp);
11510 	wait_dsecs = fw_health->master_func_wait_dsecs;
11511 	if (fw_health->master) {
11512 		if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11513 			wait_dsecs = 0;
11514 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11515 	} else {
11516 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11517 		wait_dsecs = fw_health->normal_func_wait_dsecs;
11518 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11519 	}
11520 
11521 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11522 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11523 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11524 }
11525 
11526 void bnxt_fw_exception(struct bnxt *bp)
11527 {
11528 	netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11529 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11530 	bnxt_rtnl_lock_sp(bp);
11531 	bnxt_force_fw_reset(bp);
11532 	bnxt_rtnl_unlock_sp(bp);
11533 }
11534 
11535 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11536  * < 0 on error.
11537  */
11538 static int bnxt_get_registered_vfs(struct bnxt *bp)
11539 {
11540 #ifdef CONFIG_BNXT_SRIOV
11541 	int rc;
11542 
11543 	if (!BNXT_PF(bp))
11544 		return 0;
11545 
11546 	rc = bnxt_hwrm_func_qcfg(bp);
11547 	if (rc) {
11548 		netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11549 		return rc;
11550 	}
11551 	if (bp->pf.registered_vfs)
11552 		return bp->pf.registered_vfs;
11553 	if (bp->sriov_cfg)
11554 		return 1;
11555 #endif
11556 	return 0;
11557 }
11558 
11559 void bnxt_fw_reset(struct bnxt *bp)
11560 {
11561 	bnxt_rtnl_lock_sp(bp);
11562 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11563 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11564 		struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11565 		int n = 0, tmo;
11566 
11567 		if (ptp) {
11568 			spin_lock_bh(&ptp->ptp_lock);
11569 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11570 			spin_unlock_bh(&ptp->ptp_lock);
11571 		} else {
11572 			set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11573 		}
11574 		if (bp->pf.active_vfs &&
11575 		    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11576 			n = bnxt_get_registered_vfs(bp);
11577 		if (n < 0) {
11578 			netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11579 				   n);
11580 			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11581 			dev_close(bp->dev);
11582 			goto fw_reset_exit;
11583 		} else if (n > 0) {
11584 			u16 vf_tmo_dsecs = n * 10;
11585 
11586 			if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11587 				bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11588 			bp->fw_reset_state =
11589 				BNXT_FW_RESET_STATE_POLL_VF;
11590 			bnxt_queue_fw_reset_work(bp, HZ / 10);
11591 			goto fw_reset_exit;
11592 		}
11593 		bnxt_fw_reset_close(bp);
11594 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11595 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11596 			tmo = HZ / 10;
11597 		} else {
11598 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11599 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
11600 		}
11601 		bnxt_queue_fw_reset_work(bp, tmo);
11602 	}
11603 fw_reset_exit:
11604 	bnxt_rtnl_unlock_sp(bp);
11605 }
11606 
11607 static void bnxt_chk_missed_irq(struct bnxt *bp)
11608 {
11609 	int i;
11610 
11611 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11612 		return;
11613 
11614 	for (i = 0; i < bp->cp_nr_rings; i++) {
11615 		struct bnxt_napi *bnapi = bp->bnapi[i];
11616 		struct bnxt_cp_ring_info *cpr;
11617 		u32 fw_ring_id;
11618 		int j;
11619 
11620 		if (!bnapi)
11621 			continue;
11622 
11623 		cpr = &bnapi->cp_ring;
11624 		for (j = 0; j < 2; j++) {
11625 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11626 			u32 val[2];
11627 
11628 			if (!cpr2 || cpr2->has_more_work ||
11629 			    !bnxt_has_work(bp, cpr2))
11630 				continue;
11631 
11632 			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11633 				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11634 				continue;
11635 			}
11636 			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11637 			bnxt_dbg_hwrm_ring_info_get(bp,
11638 				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11639 				fw_ring_id, &val[0], &val[1]);
11640 			cpr->sw_stats.cmn.missed_irqs++;
11641 		}
11642 	}
11643 }
11644 
11645 static void bnxt_cfg_ntp_filters(struct bnxt *);
11646 
11647 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11648 {
11649 	struct bnxt_link_info *link_info = &bp->link_info;
11650 
11651 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11652 		link_info->autoneg = BNXT_AUTONEG_SPEED;
11653 		if (bp->hwrm_spec_code >= 0x10201) {
11654 			if (link_info->auto_pause_setting &
11655 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11656 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11657 		} else {
11658 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11659 		}
11660 		link_info->advertising = link_info->auto_link_speeds;
11661 		link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11662 	} else {
11663 		link_info->req_link_speed = link_info->force_link_speed;
11664 		link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11665 		if (link_info->force_pam4_link_speed) {
11666 			link_info->req_link_speed =
11667 				link_info->force_pam4_link_speed;
11668 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11669 		}
11670 		link_info->req_duplex = link_info->duplex_setting;
11671 	}
11672 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11673 		link_info->req_flow_ctrl =
11674 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11675 	else
11676 		link_info->req_flow_ctrl = link_info->force_pause_setting;
11677 }
11678 
11679 static void bnxt_fw_echo_reply(struct bnxt *bp)
11680 {
11681 	struct bnxt_fw_health *fw_health = bp->fw_health;
11682 	struct hwrm_func_echo_response_input *req;
11683 	int rc;
11684 
11685 	rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11686 	if (rc)
11687 		return;
11688 	req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11689 	req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11690 	hwrm_req_send(bp, req);
11691 }
11692 
11693 static void bnxt_sp_task(struct work_struct *work)
11694 {
11695 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11696 
11697 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11698 	smp_mb__after_atomic();
11699 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11700 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11701 		return;
11702 	}
11703 
11704 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11705 		bnxt_cfg_rx_mode(bp);
11706 
11707 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11708 		bnxt_cfg_ntp_filters(bp);
11709 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11710 		bnxt_hwrm_exec_fwd_req(bp);
11711 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11712 		bnxt_hwrm_port_qstats(bp, 0);
11713 		bnxt_hwrm_port_qstats_ext(bp, 0);
11714 		bnxt_accumulate_all_stats(bp);
11715 	}
11716 
11717 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11718 		int rc;
11719 
11720 		mutex_lock(&bp->link_lock);
11721 		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11722 				       &bp->sp_event))
11723 			bnxt_hwrm_phy_qcaps(bp);
11724 
11725 		rc = bnxt_update_link(bp, true);
11726 		if (rc)
11727 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11728 				   rc);
11729 
11730 		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11731 				       &bp->sp_event))
11732 			bnxt_init_ethtool_link_settings(bp);
11733 		mutex_unlock(&bp->link_lock);
11734 	}
11735 	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11736 		int rc;
11737 
11738 		mutex_lock(&bp->link_lock);
11739 		rc = bnxt_update_phy_setting(bp);
11740 		mutex_unlock(&bp->link_lock);
11741 		if (rc) {
11742 			netdev_warn(bp->dev, "update phy settings retry failed\n");
11743 		} else {
11744 			bp->link_info.phy_retry = false;
11745 			netdev_info(bp->dev, "update phy settings retry succeeded\n");
11746 		}
11747 	}
11748 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11749 		mutex_lock(&bp->link_lock);
11750 		bnxt_get_port_module_status(bp);
11751 		mutex_unlock(&bp->link_lock);
11752 	}
11753 
11754 	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11755 		bnxt_tc_flow_stats_work(bp);
11756 
11757 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11758 		bnxt_chk_missed_irq(bp);
11759 
11760 	if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11761 		bnxt_fw_echo_reply(bp);
11762 
11763 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
11764 	 * must be the last functions to be called before exiting.
11765 	 */
11766 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11767 		bnxt_reset(bp, false);
11768 
11769 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11770 		bnxt_reset(bp, true);
11771 
11772 	if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11773 		bnxt_rx_ring_reset(bp);
11774 
11775 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11776 		bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11777 
11778 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11779 		if (!is_bnxt_fw_ok(bp))
11780 			bnxt_devlink_health_report(bp,
11781 						   BNXT_FW_EXCEPTION_SP_EVENT);
11782 	}
11783 
11784 	smp_mb__before_atomic();
11785 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11786 }
11787 
11788 /* Under rtnl_lock */
11789 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11790 		     int tx_xdp)
11791 {
11792 	int max_rx, max_tx, tx_sets = 1;
11793 	int tx_rings_needed, stats;
11794 	int rx_rings = rx;
11795 	int cp, vnics, rc;
11796 
11797 	if (tcs)
11798 		tx_sets = tcs;
11799 
11800 	rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11801 	if (rc)
11802 		return rc;
11803 
11804 	if (max_rx < rx)
11805 		return -ENOMEM;
11806 
11807 	tx_rings_needed = tx * tx_sets + tx_xdp;
11808 	if (max_tx < tx_rings_needed)
11809 		return -ENOMEM;
11810 
11811 	vnics = 1;
11812 	if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11813 		vnics += rx_rings;
11814 
11815 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
11816 		rx_rings <<= 1;
11817 	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11818 	stats = cp;
11819 	if (BNXT_NEW_RM(bp)) {
11820 		cp += bnxt_get_ulp_msix_num(bp);
11821 		stats += bnxt_get_ulp_stat_ctxs(bp);
11822 	}
11823 	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11824 				     stats, vnics);
11825 }
11826 
11827 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11828 {
11829 	if (bp->bar2) {
11830 		pci_iounmap(pdev, bp->bar2);
11831 		bp->bar2 = NULL;
11832 	}
11833 
11834 	if (bp->bar1) {
11835 		pci_iounmap(pdev, bp->bar1);
11836 		bp->bar1 = NULL;
11837 	}
11838 
11839 	if (bp->bar0) {
11840 		pci_iounmap(pdev, bp->bar0);
11841 		bp->bar0 = NULL;
11842 	}
11843 }
11844 
11845 static void bnxt_cleanup_pci(struct bnxt *bp)
11846 {
11847 	bnxt_unmap_bars(bp, bp->pdev);
11848 	pci_release_regions(bp->pdev);
11849 	if (pci_is_enabled(bp->pdev))
11850 		pci_disable_device(bp->pdev);
11851 }
11852 
11853 static void bnxt_init_dflt_coal(struct bnxt *bp)
11854 {
11855 	struct bnxt_coal *coal;
11856 
11857 	/* Tick values in micro seconds.
11858 	 * 1 coal_buf x bufs_per_record = 1 completion record.
11859 	 */
11860 	coal = &bp->rx_coal;
11861 	coal->coal_ticks = 10;
11862 	coal->coal_bufs = 30;
11863 	coal->coal_ticks_irq = 1;
11864 	coal->coal_bufs_irq = 2;
11865 	coal->idle_thresh = 50;
11866 	coal->bufs_per_record = 2;
11867 	coal->budget = 64;		/* NAPI budget */
11868 
11869 	coal = &bp->tx_coal;
11870 	coal->coal_ticks = 28;
11871 	coal->coal_bufs = 30;
11872 	coal->coal_ticks_irq = 2;
11873 	coal->coal_bufs_irq = 2;
11874 	coal->bufs_per_record = 1;
11875 
11876 	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11877 }
11878 
11879 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11880 {
11881 	int rc;
11882 
11883 	bp->fw_cap = 0;
11884 	rc = bnxt_hwrm_ver_get(bp);
11885 	bnxt_try_map_fw_health_reg(bp);
11886 	if (rc) {
11887 		rc = bnxt_try_recover_fw(bp);
11888 		if (rc)
11889 			return rc;
11890 		rc = bnxt_hwrm_ver_get(bp);
11891 		if (rc)
11892 			return rc;
11893 	}
11894 
11895 	bnxt_nvm_cfg_ver_get(bp);
11896 
11897 	rc = bnxt_hwrm_func_reset(bp);
11898 	if (rc)
11899 		return -ENODEV;
11900 
11901 	bnxt_hwrm_fw_set_time(bp);
11902 	return 0;
11903 }
11904 
11905 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11906 {
11907 	int rc;
11908 
11909 	/* Get the MAX capabilities for this function */
11910 	rc = bnxt_hwrm_func_qcaps(bp);
11911 	if (rc) {
11912 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11913 			   rc);
11914 		return -ENODEV;
11915 	}
11916 
11917 	rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11918 	if (rc)
11919 		netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11920 			    rc);
11921 
11922 	if (bnxt_alloc_fw_health(bp)) {
11923 		netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11924 	} else {
11925 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
11926 		if (rc)
11927 			netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11928 				    rc);
11929 	}
11930 
11931 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11932 	if (rc)
11933 		return -ENODEV;
11934 
11935 	bnxt_hwrm_func_qcfg(bp);
11936 	bnxt_hwrm_vnic_qcaps(bp);
11937 	bnxt_hwrm_port_led_qcaps(bp);
11938 	bnxt_ethtool_init(bp);
11939 	bnxt_dcb_init(bp);
11940 	return 0;
11941 }
11942 
11943 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11944 {
11945 	bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11946 	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11947 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11948 			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11949 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11950 	if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11951 		bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11952 		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11953 				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11954 	}
11955 }
11956 
11957 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11958 {
11959 	struct net_device *dev = bp->dev;
11960 
11961 	dev->hw_features &= ~NETIF_F_NTUPLE;
11962 	dev->features &= ~NETIF_F_NTUPLE;
11963 	bp->flags &= ~BNXT_FLAG_RFS;
11964 	if (bnxt_rfs_supported(bp)) {
11965 		dev->hw_features |= NETIF_F_NTUPLE;
11966 		if (bnxt_rfs_capable(bp)) {
11967 			bp->flags |= BNXT_FLAG_RFS;
11968 			dev->features |= NETIF_F_NTUPLE;
11969 		}
11970 	}
11971 }
11972 
11973 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11974 {
11975 	struct pci_dev *pdev = bp->pdev;
11976 
11977 	bnxt_set_dflt_rss_hash_type(bp);
11978 	bnxt_set_dflt_rfs(bp);
11979 
11980 	bnxt_get_wol_settings(bp);
11981 	if (bp->flags & BNXT_FLAG_WOL_CAP)
11982 		device_set_wakeup_enable(&pdev->dev, bp->wol);
11983 	else
11984 		device_set_wakeup_capable(&pdev->dev, false);
11985 
11986 	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11987 	bnxt_hwrm_coal_params_qcaps(bp);
11988 }
11989 
11990 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11991 
11992 static int bnxt_fw_init_one(struct bnxt *bp)
11993 {
11994 	int rc;
11995 
11996 	rc = bnxt_fw_init_one_p1(bp);
11997 	if (rc) {
11998 		netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11999 		return rc;
12000 	}
12001 	rc = bnxt_fw_init_one_p2(bp);
12002 	if (rc) {
12003 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12004 		return rc;
12005 	}
12006 	rc = bnxt_probe_phy(bp, false);
12007 	if (rc)
12008 		return rc;
12009 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12010 	if (rc)
12011 		return rc;
12012 
12013 	/* In case fw capabilities have changed, destroy the unneeded
12014 	 * reporters and create newly capable ones.
12015 	 */
12016 	bnxt_dl_fw_reporters_destroy(bp, false);
12017 	bnxt_dl_fw_reporters_create(bp);
12018 	bnxt_fw_init_one_p3(bp);
12019 	return 0;
12020 }
12021 
12022 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12023 {
12024 	struct bnxt_fw_health *fw_health = bp->fw_health;
12025 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12026 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12027 	u32 reg_type, reg_off, delay_msecs;
12028 
12029 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12030 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12031 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12032 	switch (reg_type) {
12033 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
12034 		pci_write_config_dword(bp->pdev, reg_off, val);
12035 		break;
12036 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
12037 		writel(reg_off & BNXT_GRC_BASE_MASK,
12038 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12039 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12040 		fallthrough;
12041 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12042 		writel(val, bp->bar0 + reg_off);
12043 		break;
12044 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12045 		writel(val, bp->bar1 + reg_off);
12046 		break;
12047 	}
12048 	if (delay_msecs) {
12049 		pci_read_config_dword(bp->pdev, 0, &val);
12050 		msleep(delay_msecs);
12051 	}
12052 }
12053 
12054 static void bnxt_reset_all(struct bnxt *bp)
12055 {
12056 	struct bnxt_fw_health *fw_health = bp->fw_health;
12057 	int i, rc;
12058 
12059 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12060 		bnxt_fw_reset_via_optee(bp);
12061 		bp->fw_reset_timestamp = jiffies;
12062 		return;
12063 	}
12064 
12065 	if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12066 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12067 			bnxt_fw_reset_writel(bp, i);
12068 	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12069 		struct hwrm_fw_reset_input *req;
12070 
12071 		rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12072 		if (!rc) {
12073 			req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12074 			req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12075 			req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12076 			req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12077 			rc = hwrm_req_send(bp, req);
12078 		}
12079 		if (rc != -ENODEV)
12080 			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12081 	}
12082 	bp->fw_reset_timestamp = jiffies;
12083 }
12084 
12085 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12086 {
12087 	return time_after(jiffies, bp->fw_reset_timestamp +
12088 			  (bp->fw_reset_max_dsecs * HZ / 10));
12089 }
12090 
12091 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12092 {
12093 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12094 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12095 		bnxt_ulp_start(bp, rc);
12096 		bnxt_dl_health_status_update(bp, false);
12097 	}
12098 	bp->fw_reset_state = 0;
12099 	dev_close(bp->dev);
12100 }
12101 
12102 static void bnxt_fw_reset_task(struct work_struct *work)
12103 {
12104 	struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12105 	int rc = 0;
12106 
12107 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12108 		netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12109 		return;
12110 	}
12111 
12112 	switch (bp->fw_reset_state) {
12113 	case BNXT_FW_RESET_STATE_POLL_VF: {
12114 		int n = bnxt_get_registered_vfs(bp);
12115 		int tmo;
12116 
12117 		if (n < 0) {
12118 			netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12119 				   n, jiffies_to_msecs(jiffies -
12120 				   bp->fw_reset_timestamp));
12121 			goto fw_reset_abort;
12122 		} else if (n > 0) {
12123 			if (bnxt_fw_reset_timeout(bp)) {
12124 				clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12125 				bp->fw_reset_state = 0;
12126 				netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12127 					   n);
12128 				return;
12129 			}
12130 			bnxt_queue_fw_reset_work(bp, HZ / 10);
12131 			return;
12132 		}
12133 		bp->fw_reset_timestamp = jiffies;
12134 		rtnl_lock();
12135 		if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12136 			bnxt_fw_reset_abort(bp, rc);
12137 			rtnl_unlock();
12138 			return;
12139 		}
12140 		bnxt_fw_reset_close(bp);
12141 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12142 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12143 			tmo = HZ / 10;
12144 		} else {
12145 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12146 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
12147 		}
12148 		rtnl_unlock();
12149 		bnxt_queue_fw_reset_work(bp, tmo);
12150 		return;
12151 	}
12152 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12153 		u32 val;
12154 
12155 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12156 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12157 		    !bnxt_fw_reset_timeout(bp)) {
12158 			bnxt_queue_fw_reset_work(bp, HZ / 5);
12159 			return;
12160 		}
12161 
12162 		if (!bp->fw_health->master) {
12163 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12164 
12165 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12166 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12167 			return;
12168 		}
12169 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12170 	}
12171 		fallthrough;
12172 	case BNXT_FW_RESET_STATE_RESET_FW:
12173 		bnxt_reset_all(bp);
12174 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12175 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12176 		return;
12177 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
12178 		bnxt_inv_fw_health_reg(bp);
12179 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12180 		    !bp->fw_reset_min_dsecs) {
12181 			u16 val;
12182 
12183 			pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12184 			if (val == 0xffff) {
12185 				if (bnxt_fw_reset_timeout(bp)) {
12186 					netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12187 					rc = -ETIMEDOUT;
12188 					goto fw_reset_abort;
12189 				}
12190 				bnxt_queue_fw_reset_work(bp, HZ / 1000);
12191 				return;
12192 			}
12193 		}
12194 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12195 		if (pci_enable_device(bp->pdev)) {
12196 			netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12197 			rc = -ENODEV;
12198 			goto fw_reset_abort;
12199 		}
12200 		pci_set_master(bp->pdev);
12201 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12202 		fallthrough;
12203 	case BNXT_FW_RESET_STATE_POLL_FW:
12204 		bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12205 		rc = bnxt_hwrm_poll(bp);
12206 		if (rc) {
12207 			if (bnxt_fw_reset_timeout(bp)) {
12208 				netdev_err(bp->dev, "Firmware reset aborted\n");
12209 				goto fw_reset_abort_status;
12210 			}
12211 			bnxt_queue_fw_reset_work(bp, HZ / 5);
12212 			return;
12213 		}
12214 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12215 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12216 		fallthrough;
12217 	case BNXT_FW_RESET_STATE_OPENING:
12218 		while (!rtnl_trylock()) {
12219 			bnxt_queue_fw_reset_work(bp, HZ / 10);
12220 			return;
12221 		}
12222 		rc = bnxt_open(bp->dev);
12223 		if (rc) {
12224 			netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12225 			bnxt_fw_reset_abort(bp, rc);
12226 			rtnl_unlock();
12227 			return;
12228 		}
12229 
12230 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12231 		    bp->fw_health->enabled) {
12232 			bp->fw_health->last_fw_reset_cnt =
12233 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12234 		}
12235 		bp->fw_reset_state = 0;
12236 		/* Make sure fw_reset_state is 0 before clearing the flag */
12237 		smp_mb__before_atomic();
12238 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12239 		bnxt_ulp_start(bp, 0);
12240 		bnxt_reenable_sriov(bp);
12241 		bnxt_vf_reps_alloc(bp);
12242 		bnxt_vf_reps_open(bp);
12243 		bnxt_ptp_reapply_pps(bp);
12244 		bnxt_dl_health_recovery_done(bp);
12245 		bnxt_dl_health_status_update(bp, true);
12246 		rtnl_unlock();
12247 		break;
12248 	}
12249 	return;
12250 
12251 fw_reset_abort_status:
12252 	if (bp->fw_health->status_reliable ||
12253 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12254 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12255 
12256 		netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12257 	}
12258 fw_reset_abort:
12259 	rtnl_lock();
12260 	bnxt_fw_reset_abort(bp, rc);
12261 	rtnl_unlock();
12262 }
12263 
12264 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12265 {
12266 	int rc;
12267 	struct bnxt *bp = netdev_priv(dev);
12268 
12269 	SET_NETDEV_DEV(dev, &pdev->dev);
12270 
12271 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
12272 	rc = pci_enable_device(pdev);
12273 	if (rc) {
12274 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12275 		goto init_err;
12276 	}
12277 
12278 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12279 		dev_err(&pdev->dev,
12280 			"Cannot find PCI device base address, aborting\n");
12281 		rc = -ENODEV;
12282 		goto init_err_disable;
12283 	}
12284 
12285 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12286 	if (rc) {
12287 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12288 		goto init_err_disable;
12289 	}
12290 
12291 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12292 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12293 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12294 		rc = -EIO;
12295 		goto init_err_release;
12296 	}
12297 
12298 	pci_set_master(pdev);
12299 
12300 	bp->dev = dev;
12301 	bp->pdev = pdev;
12302 
12303 	/* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12304 	 * determines the BAR size.
12305 	 */
12306 	bp->bar0 = pci_ioremap_bar(pdev, 0);
12307 	if (!bp->bar0) {
12308 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12309 		rc = -ENOMEM;
12310 		goto init_err_release;
12311 	}
12312 
12313 	bp->bar2 = pci_ioremap_bar(pdev, 4);
12314 	if (!bp->bar2) {
12315 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12316 		rc = -ENOMEM;
12317 		goto init_err_release;
12318 	}
12319 
12320 	pci_enable_pcie_error_reporting(pdev);
12321 
12322 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
12323 	INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12324 
12325 	spin_lock_init(&bp->ntp_fltr_lock);
12326 #if BITS_PER_LONG == 32
12327 	spin_lock_init(&bp->db_lock);
12328 #endif
12329 
12330 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12331 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12332 
12333 	bnxt_init_dflt_coal(bp);
12334 
12335 	timer_setup(&bp->timer, bnxt_timer, 0);
12336 	bp->current_interval = BNXT_TIMER_INTERVAL;
12337 
12338 	bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12339 	bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12340 
12341 	clear_bit(BNXT_STATE_OPEN, &bp->state);
12342 	return 0;
12343 
12344 init_err_release:
12345 	bnxt_unmap_bars(bp, pdev);
12346 	pci_release_regions(pdev);
12347 
12348 init_err_disable:
12349 	pci_disable_device(pdev);
12350 
12351 init_err:
12352 	return rc;
12353 }
12354 
12355 /* rtnl_lock held */
12356 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12357 {
12358 	struct sockaddr *addr = p;
12359 	struct bnxt *bp = netdev_priv(dev);
12360 	int rc = 0;
12361 
12362 	if (!is_valid_ether_addr(addr->sa_data))
12363 		return -EADDRNOTAVAIL;
12364 
12365 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12366 		return 0;
12367 
12368 	rc = bnxt_approve_mac(bp, addr->sa_data, true);
12369 	if (rc)
12370 		return rc;
12371 
12372 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12373 	if (netif_running(dev)) {
12374 		bnxt_close_nic(bp, false, false);
12375 		rc = bnxt_open_nic(bp, false, false);
12376 	}
12377 
12378 	return rc;
12379 }
12380 
12381 /* rtnl_lock held */
12382 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12383 {
12384 	struct bnxt *bp = netdev_priv(dev);
12385 
12386 	if (netif_running(dev))
12387 		bnxt_close_nic(bp, true, false);
12388 
12389 	dev->mtu = new_mtu;
12390 	bnxt_set_ring_params(bp);
12391 
12392 	if (netif_running(dev))
12393 		return bnxt_open_nic(bp, true, false);
12394 
12395 	return 0;
12396 }
12397 
12398 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12399 {
12400 	struct bnxt *bp = netdev_priv(dev);
12401 	bool sh = false;
12402 	int rc;
12403 
12404 	if (tc > bp->max_tc) {
12405 		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12406 			   tc, bp->max_tc);
12407 		return -EINVAL;
12408 	}
12409 
12410 	if (netdev_get_num_tc(dev) == tc)
12411 		return 0;
12412 
12413 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12414 		sh = true;
12415 
12416 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12417 			      sh, tc, bp->tx_nr_rings_xdp);
12418 	if (rc)
12419 		return rc;
12420 
12421 	/* Needs to close the device and do hw resource re-allocations */
12422 	if (netif_running(bp->dev))
12423 		bnxt_close_nic(bp, true, false);
12424 
12425 	if (tc) {
12426 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12427 		netdev_set_num_tc(dev, tc);
12428 	} else {
12429 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12430 		netdev_reset_tc(dev);
12431 	}
12432 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12433 	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12434 			       bp->tx_nr_rings + bp->rx_nr_rings;
12435 
12436 	if (netif_running(bp->dev))
12437 		return bnxt_open_nic(bp, true, false);
12438 
12439 	return 0;
12440 }
12441 
12442 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12443 				  void *cb_priv)
12444 {
12445 	struct bnxt *bp = cb_priv;
12446 
12447 	if (!bnxt_tc_flower_enabled(bp) ||
12448 	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12449 		return -EOPNOTSUPP;
12450 
12451 	switch (type) {
12452 	case TC_SETUP_CLSFLOWER:
12453 		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12454 	default:
12455 		return -EOPNOTSUPP;
12456 	}
12457 }
12458 
12459 LIST_HEAD(bnxt_block_cb_list);
12460 
12461 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12462 			 void *type_data)
12463 {
12464 	struct bnxt *bp = netdev_priv(dev);
12465 
12466 	switch (type) {
12467 	case TC_SETUP_BLOCK:
12468 		return flow_block_cb_setup_simple(type_data,
12469 						  &bnxt_block_cb_list,
12470 						  bnxt_setup_tc_block_cb,
12471 						  bp, bp, true);
12472 	case TC_SETUP_QDISC_MQPRIO: {
12473 		struct tc_mqprio_qopt *mqprio = type_data;
12474 
12475 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12476 
12477 		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12478 	}
12479 	default:
12480 		return -EOPNOTSUPP;
12481 	}
12482 }
12483 
12484 #ifdef CONFIG_RFS_ACCEL
12485 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12486 			    struct bnxt_ntuple_filter *f2)
12487 {
12488 	struct flow_keys *keys1 = &f1->fkeys;
12489 	struct flow_keys *keys2 = &f2->fkeys;
12490 
12491 	if (keys1->basic.n_proto != keys2->basic.n_proto ||
12492 	    keys1->basic.ip_proto != keys2->basic.ip_proto)
12493 		return false;
12494 
12495 	if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12496 		if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12497 		    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12498 			return false;
12499 	} else {
12500 		if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12501 			   sizeof(keys1->addrs.v6addrs.src)) ||
12502 		    memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12503 			   sizeof(keys1->addrs.v6addrs.dst)))
12504 			return false;
12505 	}
12506 
12507 	if (keys1->ports.ports == keys2->ports.ports &&
12508 	    keys1->control.flags == keys2->control.flags &&
12509 	    ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12510 	    ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12511 		return true;
12512 
12513 	return false;
12514 }
12515 
12516 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12517 			      u16 rxq_index, u32 flow_id)
12518 {
12519 	struct bnxt *bp = netdev_priv(dev);
12520 	struct bnxt_ntuple_filter *fltr, *new_fltr;
12521 	struct flow_keys *fkeys;
12522 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12523 	int rc = 0, idx, bit_id, l2_idx = 0;
12524 	struct hlist_head *head;
12525 	u32 flags;
12526 
12527 	if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12528 		struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12529 		int off = 0, j;
12530 
12531 		netif_addr_lock_bh(dev);
12532 		for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12533 			if (ether_addr_equal(eth->h_dest,
12534 					     vnic->uc_list + off)) {
12535 				l2_idx = j + 1;
12536 				break;
12537 			}
12538 		}
12539 		netif_addr_unlock_bh(dev);
12540 		if (!l2_idx)
12541 			return -EINVAL;
12542 	}
12543 	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12544 	if (!new_fltr)
12545 		return -ENOMEM;
12546 
12547 	fkeys = &new_fltr->fkeys;
12548 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12549 		rc = -EPROTONOSUPPORT;
12550 		goto err_free;
12551 	}
12552 
12553 	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12554 	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12555 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12556 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12557 		rc = -EPROTONOSUPPORT;
12558 		goto err_free;
12559 	}
12560 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12561 	    bp->hwrm_spec_code < 0x10601) {
12562 		rc = -EPROTONOSUPPORT;
12563 		goto err_free;
12564 	}
12565 	flags = fkeys->control.flags;
12566 	if (((flags & FLOW_DIS_ENCAPSULATION) &&
12567 	     bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12568 		rc = -EPROTONOSUPPORT;
12569 		goto err_free;
12570 	}
12571 
12572 	memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12573 	memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12574 
12575 	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12576 	head = &bp->ntp_fltr_hash_tbl[idx];
12577 	rcu_read_lock();
12578 	hlist_for_each_entry_rcu(fltr, head, hash) {
12579 		if (bnxt_fltr_match(fltr, new_fltr)) {
12580 			rcu_read_unlock();
12581 			rc = 0;
12582 			goto err_free;
12583 		}
12584 	}
12585 	rcu_read_unlock();
12586 
12587 	spin_lock_bh(&bp->ntp_fltr_lock);
12588 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12589 					 BNXT_NTP_FLTR_MAX_FLTR, 0);
12590 	if (bit_id < 0) {
12591 		spin_unlock_bh(&bp->ntp_fltr_lock);
12592 		rc = -ENOMEM;
12593 		goto err_free;
12594 	}
12595 
12596 	new_fltr->sw_id = (u16)bit_id;
12597 	new_fltr->flow_id = flow_id;
12598 	new_fltr->l2_fltr_idx = l2_idx;
12599 	new_fltr->rxq = rxq_index;
12600 	hlist_add_head_rcu(&new_fltr->hash, head);
12601 	bp->ntp_fltr_count++;
12602 	spin_unlock_bh(&bp->ntp_fltr_lock);
12603 
12604 	set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12605 	bnxt_queue_sp_work(bp);
12606 
12607 	return new_fltr->sw_id;
12608 
12609 err_free:
12610 	kfree(new_fltr);
12611 	return rc;
12612 }
12613 
12614 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12615 {
12616 	int i;
12617 
12618 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12619 		struct hlist_head *head;
12620 		struct hlist_node *tmp;
12621 		struct bnxt_ntuple_filter *fltr;
12622 		int rc;
12623 
12624 		head = &bp->ntp_fltr_hash_tbl[i];
12625 		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12626 			bool del = false;
12627 
12628 			if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12629 				if (rps_may_expire_flow(bp->dev, fltr->rxq,
12630 							fltr->flow_id,
12631 							fltr->sw_id)) {
12632 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
12633 									 fltr);
12634 					del = true;
12635 				}
12636 			} else {
12637 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12638 								       fltr);
12639 				if (rc)
12640 					del = true;
12641 				else
12642 					set_bit(BNXT_FLTR_VALID, &fltr->state);
12643 			}
12644 
12645 			if (del) {
12646 				spin_lock_bh(&bp->ntp_fltr_lock);
12647 				hlist_del_rcu(&fltr->hash);
12648 				bp->ntp_fltr_count--;
12649 				spin_unlock_bh(&bp->ntp_fltr_lock);
12650 				synchronize_rcu();
12651 				clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12652 				kfree(fltr);
12653 			}
12654 		}
12655 	}
12656 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12657 		netdev_info(bp->dev, "Receive PF driver unload event!\n");
12658 }
12659 
12660 #else
12661 
12662 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12663 {
12664 }
12665 
12666 #endif /* CONFIG_RFS_ACCEL */
12667 
12668 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12669 {
12670 	struct bnxt *bp = netdev_priv(netdev);
12671 	struct udp_tunnel_info ti;
12672 	unsigned int cmd;
12673 
12674 	udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12675 	if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
12676 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12677 	else
12678 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12679 
12680 	if (ti.port)
12681 		return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12682 
12683 	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12684 }
12685 
12686 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12687 	.sync_table	= bnxt_udp_tunnel_sync,
12688 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12689 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12690 	.tables		= {
12691 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
12692 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12693 	},
12694 };
12695 
12696 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12697 			       struct net_device *dev, u32 filter_mask,
12698 			       int nlflags)
12699 {
12700 	struct bnxt *bp = netdev_priv(dev);
12701 
12702 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12703 				       nlflags, filter_mask, NULL);
12704 }
12705 
12706 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12707 			       u16 flags, struct netlink_ext_ack *extack)
12708 {
12709 	struct bnxt *bp = netdev_priv(dev);
12710 	struct nlattr *attr, *br_spec;
12711 	int rem, rc = 0;
12712 
12713 	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12714 		return -EOPNOTSUPP;
12715 
12716 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12717 	if (!br_spec)
12718 		return -EINVAL;
12719 
12720 	nla_for_each_nested(attr, br_spec, rem) {
12721 		u16 mode;
12722 
12723 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
12724 			continue;
12725 
12726 		if (nla_len(attr) < sizeof(mode))
12727 			return -EINVAL;
12728 
12729 		mode = nla_get_u16(attr);
12730 		if (mode == bp->br_mode)
12731 			break;
12732 
12733 		rc = bnxt_hwrm_set_br_mode(bp, mode);
12734 		if (!rc)
12735 			bp->br_mode = mode;
12736 		break;
12737 	}
12738 	return rc;
12739 }
12740 
12741 int bnxt_get_port_parent_id(struct net_device *dev,
12742 			    struct netdev_phys_item_id *ppid)
12743 {
12744 	struct bnxt *bp = netdev_priv(dev);
12745 
12746 	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12747 		return -EOPNOTSUPP;
12748 
12749 	/* The PF and it's VF-reps only support the switchdev framework */
12750 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12751 		return -EOPNOTSUPP;
12752 
12753 	ppid->id_len = sizeof(bp->dsn);
12754 	memcpy(ppid->id, bp->dsn, ppid->id_len);
12755 
12756 	return 0;
12757 }
12758 
12759 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12760 {
12761 	struct bnxt *bp = netdev_priv(dev);
12762 
12763 	return &bp->dl_port;
12764 }
12765 
12766 static const struct net_device_ops bnxt_netdev_ops = {
12767 	.ndo_open		= bnxt_open,
12768 	.ndo_start_xmit		= bnxt_start_xmit,
12769 	.ndo_stop		= bnxt_close,
12770 	.ndo_get_stats64	= bnxt_get_stats64,
12771 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
12772 	.ndo_eth_ioctl		= bnxt_ioctl,
12773 	.ndo_validate_addr	= eth_validate_addr,
12774 	.ndo_set_mac_address	= bnxt_change_mac_addr,
12775 	.ndo_change_mtu		= bnxt_change_mtu,
12776 	.ndo_fix_features	= bnxt_fix_features,
12777 	.ndo_set_features	= bnxt_set_features,
12778 	.ndo_features_check	= bnxt_features_check,
12779 	.ndo_tx_timeout		= bnxt_tx_timeout,
12780 #ifdef CONFIG_BNXT_SRIOV
12781 	.ndo_get_vf_config	= bnxt_get_vf_config,
12782 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
12783 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
12784 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
12785 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
12786 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
12787 	.ndo_set_vf_trust	= bnxt_set_vf_trust,
12788 #endif
12789 	.ndo_setup_tc           = bnxt_setup_tc,
12790 #ifdef CONFIG_RFS_ACCEL
12791 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
12792 #endif
12793 	.ndo_bpf		= bnxt_xdp,
12794 	.ndo_xdp_xmit		= bnxt_xdp_xmit,
12795 	.ndo_bridge_getlink	= bnxt_bridge_getlink,
12796 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
12797 	.ndo_get_devlink_port	= bnxt_get_devlink_port,
12798 };
12799 
12800 static void bnxt_remove_one(struct pci_dev *pdev)
12801 {
12802 	struct net_device *dev = pci_get_drvdata(pdev);
12803 	struct bnxt *bp = netdev_priv(dev);
12804 
12805 	if (BNXT_PF(bp))
12806 		bnxt_sriov_disable(bp);
12807 
12808 	if (BNXT_PF(bp))
12809 		devlink_port_type_clear(&bp->dl_port);
12810 
12811 	bnxt_ptp_clear(bp);
12812 	pci_disable_pcie_error_reporting(pdev);
12813 	unregister_netdev(dev);
12814 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12815 	/* Flush any pending tasks */
12816 	cancel_work_sync(&bp->sp_task);
12817 	cancel_delayed_work_sync(&bp->fw_reset_task);
12818 	bp->sp_event = 0;
12819 
12820 	bnxt_dl_fw_reporters_destroy(bp, true);
12821 	bnxt_dl_unregister(bp);
12822 	bnxt_shutdown_tc(bp);
12823 
12824 	bnxt_clear_int_mode(bp);
12825 	bnxt_hwrm_func_drv_unrgtr(bp);
12826 	bnxt_free_hwrm_resources(bp);
12827 	bnxt_ethtool_free(bp);
12828 	bnxt_dcb_free(bp);
12829 	kfree(bp->edev);
12830 	bp->edev = NULL;
12831 	kfree(bp->ptp_cfg);
12832 	bp->ptp_cfg = NULL;
12833 	kfree(bp->fw_health);
12834 	bp->fw_health = NULL;
12835 	bnxt_cleanup_pci(bp);
12836 	bnxt_free_ctx_mem(bp);
12837 	kfree(bp->ctx);
12838 	bp->ctx = NULL;
12839 	kfree(bp->rss_indir_tbl);
12840 	bp->rss_indir_tbl = NULL;
12841 	bnxt_free_port_stats(bp);
12842 	free_netdev(dev);
12843 }
12844 
12845 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12846 {
12847 	int rc = 0;
12848 	struct bnxt_link_info *link_info = &bp->link_info;
12849 
12850 	bp->phy_flags = 0;
12851 	rc = bnxt_hwrm_phy_qcaps(bp);
12852 	if (rc) {
12853 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12854 			   rc);
12855 		return rc;
12856 	}
12857 	if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12858 		bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12859 	else
12860 		bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12861 	if (!fw_dflt)
12862 		return 0;
12863 
12864 	mutex_lock(&bp->link_lock);
12865 	rc = bnxt_update_link(bp, false);
12866 	if (rc) {
12867 		mutex_unlock(&bp->link_lock);
12868 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12869 			   rc);
12870 		return rc;
12871 	}
12872 
12873 	/* Older firmware does not have supported_auto_speeds, so assume
12874 	 * that all supported speeds can be autonegotiated.
12875 	 */
12876 	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12877 		link_info->support_auto_speeds = link_info->support_speeds;
12878 
12879 	bnxt_init_ethtool_link_settings(bp);
12880 	mutex_unlock(&bp->link_lock);
12881 	return 0;
12882 }
12883 
12884 static int bnxt_get_max_irq(struct pci_dev *pdev)
12885 {
12886 	u16 ctrl;
12887 
12888 	if (!pdev->msix_cap)
12889 		return 1;
12890 
12891 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12892 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12893 }
12894 
12895 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12896 				int *max_cp)
12897 {
12898 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12899 	int max_ring_grps = 0, max_irq;
12900 
12901 	*max_tx = hw_resc->max_tx_rings;
12902 	*max_rx = hw_resc->max_rx_rings;
12903 	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12904 	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12905 			bnxt_get_ulp_msix_num(bp),
12906 			hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12907 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12908 		*max_cp = min_t(int, *max_cp, max_irq);
12909 	max_ring_grps = hw_resc->max_hw_ring_grps;
12910 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12911 		*max_cp -= 1;
12912 		*max_rx -= 2;
12913 	}
12914 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
12915 		*max_rx >>= 1;
12916 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
12917 		bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12918 		/* On P5 chips, max_cp output param should be available NQs */
12919 		*max_cp = max_irq;
12920 	}
12921 	*max_rx = min_t(int, *max_rx, max_ring_grps);
12922 }
12923 
12924 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12925 {
12926 	int rx, tx, cp;
12927 
12928 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
12929 	*max_rx = rx;
12930 	*max_tx = tx;
12931 	if (!rx || !tx || !cp)
12932 		return -ENOMEM;
12933 
12934 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12935 }
12936 
12937 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12938 			       bool shared)
12939 {
12940 	int rc;
12941 
12942 	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12943 	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12944 		/* Not enough rings, try disabling agg rings. */
12945 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12946 		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12947 		if (rc) {
12948 			/* set BNXT_FLAG_AGG_RINGS back for consistency */
12949 			bp->flags |= BNXT_FLAG_AGG_RINGS;
12950 			return rc;
12951 		}
12952 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12953 		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12954 		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12955 		bnxt_set_ring_params(bp);
12956 	}
12957 
12958 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12959 		int max_cp, max_stat, max_irq;
12960 
12961 		/* Reserve minimum resources for RoCE */
12962 		max_cp = bnxt_get_max_func_cp_rings(bp);
12963 		max_stat = bnxt_get_max_func_stat_ctxs(bp);
12964 		max_irq = bnxt_get_max_func_irqs(bp);
12965 		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12966 		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12967 		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12968 			return 0;
12969 
12970 		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12971 		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12972 		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12973 		max_cp = min_t(int, max_cp, max_irq);
12974 		max_cp = min_t(int, max_cp, max_stat);
12975 		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12976 		if (rc)
12977 			rc = 0;
12978 	}
12979 	return rc;
12980 }
12981 
12982 /* In initial default shared ring setting, each shared ring must have a
12983  * RX/TX ring pair.
12984  */
12985 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12986 {
12987 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12988 	bp->rx_nr_rings = bp->cp_nr_rings;
12989 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12990 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12991 }
12992 
12993 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12994 {
12995 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
12996 
12997 	if (!bnxt_can_reserve_rings(bp))
12998 		return 0;
12999 
13000 	if (sh)
13001 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
13002 	dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13003 	/* Reduce default rings on multi-port cards so that total default
13004 	 * rings do not exceed CPU count.
13005 	 */
13006 	if (bp->port_count > 1) {
13007 		int max_rings =
13008 			max_t(int, num_online_cpus() / bp->port_count, 1);
13009 
13010 		dflt_rings = min_t(int, dflt_rings, max_rings);
13011 	}
13012 	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13013 	if (rc)
13014 		return rc;
13015 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13016 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13017 	if (sh)
13018 		bnxt_trim_dflt_sh_rings(bp);
13019 	else
13020 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13021 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13022 
13023 	rc = __bnxt_reserve_rings(bp);
13024 	if (rc)
13025 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13026 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13027 	if (sh)
13028 		bnxt_trim_dflt_sh_rings(bp);
13029 
13030 	/* Rings may have been trimmed, re-reserve the trimmed rings. */
13031 	if (bnxt_need_reserve_rings(bp)) {
13032 		rc = __bnxt_reserve_rings(bp);
13033 		if (rc)
13034 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13035 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13036 	}
13037 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13038 		bp->rx_nr_rings++;
13039 		bp->cp_nr_rings++;
13040 	}
13041 	if (rc) {
13042 		bp->tx_nr_rings = 0;
13043 		bp->rx_nr_rings = 0;
13044 	}
13045 	return rc;
13046 }
13047 
13048 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13049 {
13050 	int rc;
13051 
13052 	if (bp->tx_nr_rings)
13053 		return 0;
13054 
13055 	bnxt_ulp_irq_stop(bp);
13056 	bnxt_clear_int_mode(bp);
13057 	rc = bnxt_set_dflt_rings(bp, true);
13058 	if (rc) {
13059 		netdev_err(bp->dev, "Not enough rings available.\n");
13060 		goto init_dflt_ring_err;
13061 	}
13062 	rc = bnxt_init_int_mode(bp);
13063 	if (rc)
13064 		goto init_dflt_ring_err;
13065 
13066 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13067 	if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13068 		bp->flags |= BNXT_FLAG_RFS;
13069 		bp->dev->features |= NETIF_F_NTUPLE;
13070 	}
13071 init_dflt_ring_err:
13072 	bnxt_ulp_irq_restart(bp, rc);
13073 	return rc;
13074 }
13075 
13076 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13077 {
13078 	int rc;
13079 
13080 	ASSERT_RTNL();
13081 	bnxt_hwrm_func_qcaps(bp);
13082 
13083 	if (netif_running(bp->dev))
13084 		__bnxt_close_nic(bp, true, false);
13085 
13086 	bnxt_ulp_irq_stop(bp);
13087 	bnxt_clear_int_mode(bp);
13088 	rc = bnxt_init_int_mode(bp);
13089 	bnxt_ulp_irq_restart(bp, rc);
13090 
13091 	if (netif_running(bp->dev)) {
13092 		if (rc)
13093 			dev_close(bp->dev);
13094 		else
13095 			rc = bnxt_open_nic(bp, true, false);
13096 	}
13097 
13098 	return rc;
13099 }
13100 
13101 static int bnxt_init_mac_addr(struct bnxt *bp)
13102 {
13103 	int rc = 0;
13104 
13105 	if (BNXT_PF(bp)) {
13106 		memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13107 	} else {
13108 #ifdef CONFIG_BNXT_SRIOV
13109 		struct bnxt_vf_info *vf = &bp->vf;
13110 		bool strict_approval = true;
13111 
13112 		if (is_valid_ether_addr(vf->mac_addr)) {
13113 			/* overwrite netdev dev_addr with admin VF MAC */
13114 			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
13115 			/* Older PF driver or firmware may not approve this
13116 			 * correctly.
13117 			 */
13118 			strict_approval = false;
13119 		} else {
13120 			eth_hw_addr_random(bp->dev);
13121 		}
13122 		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13123 #endif
13124 	}
13125 	return rc;
13126 }
13127 
13128 static void bnxt_vpd_read_info(struct bnxt *bp)
13129 {
13130 	struct pci_dev *pdev = bp->pdev;
13131 	unsigned int vpd_size, kw_len;
13132 	int pos, size;
13133 	u8 *vpd_data;
13134 
13135 	vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13136 	if (IS_ERR(vpd_data)) {
13137 		pci_warn(pdev, "Unable to read VPD\n");
13138 		return;
13139 	}
13140 
13141 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13142 					   PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13143 	if (pos < 0)
13144 		goto read_sn;
13145 
13146 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13147 	memcpy(bp->board_partno, &vpd_data[pos], size);
13148 
13149 read_sn:
13150 	pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13151 					   PCI_VPD_RO_KEYWORD_SERIALNO,
13152 					   &kw_len);
13153 	if (pos < 0)
13154 		goto exit;
13155 
13156 	size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13157 	memcpy(bp->board_serialno, &vpd_data[pos], size);
13158 exit:
13159 	kfree(vpd_data);
13160 }
13161 
13162 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13163 {
13164 	struct pci_dev *pdev = bp->pdev;
13165 	u64 qword;
13166 
13167 	qword = pci_get_dsn(pdev);
13168 	if (!qword) {
13169 		netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13170 		return -EOPNOTSUPP;
13171 	}
13172 
13173 	put_unaligned_le64(qword, dsn);
13174 
13175 	bp->flags |= BNXT_FLAG_DSN_VALID;
13176 	return 0;
13177 }
13178 
13179 static int bnxt_map_db_bar(struct bnxt *bp)
13180 {
13181 	if (!bp->db_size)
13182 		return -ENODEV;
13183 	bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13184 	if (!bp->bar1)
13185 		return -ENOMEM;
13186 	return 0;
13187 }
13188 
13189 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13190 {
13191 	struct net_device *dev;
13192 	struct bnxt *bp;
13193 	int rc, max_irqs;
13194 
13195 	if (pci_is_bridge(pdev))
13196 		return -ENODEV;
13197 
13198 	/* Clear any pending DMA transactions from crash kernel
13199 	 * while loading driver in capture kernel.
13200 	 */
13201 	if (is_kdump_kernel()) {
13202 		pci_clear_master(pdev);
13203 		pcie_flr(pdev);
13204 	}
13205 
13206 	max_irqs = bnxt_get_max_irq(pdev);
13207 	dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13208 	if (!dev)
13209 		return -ENOMEM;
13210 
13211 	bp = netdev_priv(dev);
13212 	bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13213 	bnxt_set_max_func_irqs(bp, max_irqs);
13214 
13215 	if (bnxt_vf_pciid(ent->driver_data))
13216 		bp->flags |= BNXT_FLAG_VF;
13217 
13218 	if (pdev->msix_cap)
13219 		bp->flags |= BNXT_FLAG_MSIX_CAP;
13220 
13221 	rc = bnxt_init_board(pdev, dev);
13222 	if (rc < 0)
13223 		goto init_err_free;
13224 
13225 	dev->netdev_ops = &bnxt_netdev_ops;
13226 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13227 	dev->ethtool_ops = &bnxt_ethtool_ops;
13228 	pci_set_drvdata(pdev, dev);
13229 
13230 	rc = bnxt_alloc_hwrm_resources(bp);
13231 	if (rc)
13232 		goto init_err_pci_clean;
13233 
13234 	mutex_init(&bp->hwrm_cmd_lock);
13235 	mutex_init(&bp->link_lock);
13236 
13237 	rc = bnxt_fw_init_one_p1(bp);
13238 	if (rc)
13239 		goto init_err_pci_clean;
13240 
13241 	if (BNXT_PF(bp))
13242 		bnxt_vpd_read_info(bp);
13243 
13244 	if (BNXT_CHIP_P5(bp)) {
13245 		bp->flags |= BNXT_FLAG_CHIP_P5;
13246 		if (BNXT_CHIP_SR2(bp))
13247 			bp->flags |= BNXT_FLAG_CHIP_SR2;
13248 	}
13249 
13250 	rc = bnxt_alloc_rss_indir_tbl(bp);
13251 	if (rc)
13252 		goto init_err_pci_clean;
13253 
13254 	rc = bnxt_fw_init_one_p2(bp);
13255 	if (rc)
13256 		goto init_err_pci_clean;
13257 
13258 	rc = bnxt_map_db_bar(bp);
13259 	if (rc) {
13260 		dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13261 			rc);
13262 		goto init_err_pci_clean;
13263 	}
13264 
13265 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13266 			   NETIF_F_TSO | NETIF_F_TSO6 |
13267 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13268 			   NETIF_F_GSO_IPXIP4 |
13269 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13270 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13271 			   NETIF_F_RXCSUM | NETIF_F_GRO;
13272 
13273 	if (BNXT_SUPPORTS_TPA(bp))
13274 		dev->hw_features |= NETIF_F_LRO;
13275 
13276 	dev->hw_enc_features =
13277 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13278 			NETIF_F_TSO | NETIF_F_TSO6 |
13279 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13280 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13281 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13282 	dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13283 
13284 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13285 				    NETIF_F_GSO_GRE_CSUM;
13286 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13287 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13288 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13289 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13290 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13291 	if (BNXT_SUPPORTS_TPA(bp))
13292 		dev->hw_features |= NETIF_F_GRO_HW;
13293 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13294 	if (dev->features & NETIF_F_GRO_HW)
13295 		dev->features &= ~NETIF_F_LRO;
13296 	dev->priv_flags |= IFF_UNICAST_FLT;
13297 
13298 #ifdef CONFIG_BNXT_SRIOV
13299 	init_waitqueue_head(&bp->sriov_cfg_wait);
13300 	mutex_init(&bp->sriov_lock);
13301 #endif
13302 	if (BNXT_SUPPORTS_TPA(bp)) {
13303 		bp->gro_func = bnxt_gro_func_5730x;
13304 		if (BNXT_CHIP_P4(bp))
13305 			bp->gro_func = bnxt_gro_func_5731x;
13306 		else if (BNXT_CHIP_P5(bp))
13307 			bp->gro_func = bnxt_gro_func_5750x;
13308 	}
13309 	if (!BNXT_CHIP_P4_PLUS(bp))
13310 		bp->flags |= BNXT_FLAG_DOUBLE_DB;
13311 
13312 	rc = bnxt_init_mac_addr(bp);
13313 	if (rc) {
13314 		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13315 		rc = -EADDRNOTAVAIL;
13316 		goto init_err_pci_clean;
13317 	}
13318 
13319 	if (BNXT_PF(bp)) {
13320 		/* Read the adapter's DSN to use as the eswitch switch_id */
13321 		rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13322 	}
13323 
13324 	/* MTU range: 60 - FW defined max */
13325 	dev->min_mtu = ETH_ZLEN;
13326 	dev->max_mtu = bp->max_mtu;
13327 
13328 	rc = bnxt_probe_phy(bp, true);
13329 	if (rc)
13330 		goto init_err_pci_clean;
13331 
13332 	bnxt_set_rx_skb_mode(bp, false);
13333 	bnxt_set_tpa_flags(bp);
13334 	bnxt_set_ring_params(bp);
13335 	rc = bnxt_set_dflt_rings(bp, true);
13336 	if (rc) {
13337 		netdev_err(bp->dev, "Not enough rings available.\n");
13338 		rc = -ENOMEM;
13339 		goto init_err_pci_clean;
13340 	}
13341 
13342 	bnxt_fw_init_one_p3(bp);
13343 
13344 	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13345 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
13346 
13347 	rc = bnxt_init_int_mode(bp);
13348 	if (rc)
13349 		goto init_err_pci_clean;
13350 
13351 	/* No TC has been set yet and rings may have been trimmed due to
13352 	 * limited MSIX, so we re-initialize the TX rings per TC.
13353 	 */
13354 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13355 
13356 	if (BNXT_PF(bp)) {
13357 		if (!bnxt_pf_wq) {
13358 			bnxt_pf_wq =
13359 				create_singlethread_workqueue("bnxt_pf_wq");
13360 			if (!bnxt_pf_wq) {
13361 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
13362 				rc = -ENOMEM;
13363 				goto init_err_pci_clean;
13364 			}
13365 		}
13366 		rc = bnxt_init_tc(bp);
13367 		if (rc)
13368 			netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13369 				   rc);
13370 	}
13371 
13372 	bnxt_inv_fw_health_reg(bp);
13373 	bnxt_dl_register(bp);
13374 
13375 	rc = register_netdev(dev);
13376 	if (rc)
13377 		goto init_err_cleanup;
13378 
13379 	if (BNXT_PF(bp))
13380 		devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13381 	bnxt_dl_fw_reporters_create(bp);
13382 
13383 	netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13384 		    board_info[ent->driver_data].name,
13385 		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
13386 	pcie_print_link_status(pdev);
13387 
13388 	pci_save_state(pdev);
13389 	return 0;
13390 
13391 init_err_cleanup:
13392 	bnxt_dl_unregister(bp);
13393 	bnxt_shutdown_tc(bp);
13394 	bnxt_clear_int_mode(bp);
13395 
13396 init_err_pci_clean:
13397 	bnxt_hwrm_func_drv_unrgtr(bp);
13398 	bnxt_free_hwrm_resources(bp);
13399 	bnxt_ethtool_free(bp);
13400 	bnxt_ptp_clear(bp);
13401 	kfree(bp->ptp_cfg);
13402 	bp->ptp_cfg = NULL;
13403 	kfree(bp->fw_health);
13404 	bp->fw_health = NULL;
13405 	bnxt_cleanup_pci(bp);
13406 	bnxt_free_ctx_mem(bp);
13407 	kfree(bp->ctx);
13408 	bp->ctx = NULL;
13409 	kfree(bp->rss_indir_tbl);
13410 	bp->rss_indir_tbl = NULL;
13411 
13412 init_err_free:
13413 	free_netdev(dev);
13414 	return rc;
13415 }
13416 
13417 static void bnxt_shutdown(struct pci_dev *pdev)
13418 {
13419 	struct net_device *dev = pci_get_drvdata(pdev);
13420 	struct bnxt *bp;
13421 
13422 	if (!dev)
13423 		return;
13424 
13425 	rtnl_lock();
13426 	bp = netdev_priv(dev);
13427 	if (!bp)
13428 		goto shutdown_exit;
13429 
13430 	if (netif_running(dev))
13431 		dev_close(dev);
13432 
13433 	bnxt_ulp_shutdown(bp);
13434 	bnxt_clear_int_mode(bp);
13435 	pci_disable_device(pdev);
13436 
13437 	if (system_state == SYSTEM_POWER_OFF) {
13438 		pci_wake_from_d3(pdev, bp->wol);
13439 		pci_set_power_state(pdev, PCI_D3hot);
13440 	}
13441 
13442 shutdown_exit:
13443 	rtnl_unlock();
13444 }
13445 
13446 #ifdef CONFIG_PM_SLEEP
13447 static int bnxt_suspend(struct device *device)
13448 {
13449 	struct net_device *dev = dev_get_drvdata(device);
13450 	struct bnxt *bp = netdev_priv(dev);
13451 	int rc = 0;
13452 
13453 	rtnl_lock();
13454 	bnxt_ulp_stop(bp);
13455 	if (netif_running(dev)) {
13456 		netif_device_detach(dev);
13457 		rc = bnxt_close(dev);
13458 	}
13459 	bnxt_hwrm_func_drv_unrgtr(bp);
13460 	pci_disable_device(bp->pdev);
13461 	bnxt_free_ctx_mem(bp);
13462 	kfree(bp->ctx);
13463 	bp->ctx = NULL;
13464 	rtnl_unlock();
13465 	return rc;
13466 }
13467 
13468 static int bnxt_resume(struct device *device)
13469 {
13470 	struct net_device *dev = dev_get_drvdata(device);
13471 	struct bnxt *bp = netdev_priv(dev);
13472 	int rc = 0;
13473 
13474 	rtnl_lock();
13475 	rc = pci_enable_device(bp->pdev);
13476 	if (rc) {
13477 		netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13478 			   rc);
13479 		goto resume_exit;
13480 	}
13481 	pci_set_master(bp->pdev);
13482 	if (bnxt_hwrm_ver_get(bp)) {
13483 		rc = -ENODEV;
13484 		goto resume_exit;
13485 	}
13486 	rc = bnxt_hwrm_func_reset(bp);
13487 	if (rc) {
13488 		rc = -EBUSY;
13489 		goto resume_exit;
13490 	}
13491 
13492 	rc = bnxt_hwrm_func_qcaps(bp);
13493 	if (rc)
13494 		goto resume_exit;
13495 
13496 	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13497 		rc = -ENODEV;
13498 		goto resume_exit;
13499 	}
13500 
13501 	bnxt_get_wol_settings(bp);
13502 	if (netif_running(dev)) {
13503 		rc = bnxt_open(dev);
13504 		if (!rc)
13505 			netif_device_attach(dev);
13506 	}
13507 
13508 resume_exit:
13509 	bnxt_ulp_start(bp, rc);
13510 	if (!rc)
13511 		bnxt_reenable_sriov(bp);
13512 	rtnl_unlock();
13513 	return rc;
13514 }
13515 
13516 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13517 #define BNXT_PM_OPS (&bnxt_pm_ops)
13518 
13519 #else
13520 
13521 #define BNXT_PM_OPS NULL
13522 
13523 #endif /* CONFIG_PM_SLEEP */
13524 
13525 /**
13526  * bnxt_io_error_detected - called when PCI error is detected
13527  * @pdev: Pointer to PCI device
13528  * @state: The current pci connection state
13529  *
13530  * This function is called after a PCI bus error affecting
13531  * this device has been detected.
13532  */
13533 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13534 					       pci_channel_state_t state)
13535 {
13536 	struct net_device *netdev = pci_get_drvdata(pdev);
13537 	struct bnxt *bp = netdev_priv(netdev);
13538 
13539 	netdev_info(netdev, "PCI I/O error detected\n");
13540 
13541 	rtnl_lock();
13542 	netif_device_detach(netdev);
13543 
13544 	bnxt_ulp_stop(bp);
13545 
13546 	if (state == pci_channel_io_perm_failure) {
13547 		rtnl_unlock();
13548 		return PCI_ERS_RESULT_DISCONNECT;
13549 	}
13550 
13551 	if (state == pci_channel_io_frozen)
13552 		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13553 
13554 	if (netif_running(netdev))
13555 		bnxt_close(netdev);
13556 
13557 	if (pci_is_enabled(pdev))
13558 		pci_disable_device(pdev);
13559 	bnxt_free_ctx_mem(bp);
13560 	kfree(bp->ctx);
13561 	bp->ctx = NULL;
13562 	rtnl_unlock();
13563 
13564 	/* Request a slot slot reset. */
13565 	return PCI_ERS_RESULT_NEED_RESET;
13566 }
13567 
13568 /**
13569  * bnxt_io_slot_reset - called after the pci bus has been reset.
13570  * @pdev: Pointer to PCI device
13571  *
13572  * Restart the card from scratch, as if from a cold-boot.
13573  * At this point, the card has exprienced a hard reset,
13574  * followed by fixups by BIOS, and has its config space
13575  * set up identically to what it was at cold boot.
13576  */
13577 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13578 {
13579 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13580 	struct net_device *netdev = pci_get_drvdata(pdev);
13581 	struct bnxt *bp = netdev_priv(netdev);
13582 	int err = 0, off;
13583 
13584 	netdev_info(bp->dev, "PCI Slot Reset\n");
13585 
13586 	rtnl_lock();
13587 
13588 	if (pci_enable_device(pdev)) {
13589 		dev_err(&pdev->dev,
13590 			"Cannot re-enable PCI device after reset.\n");
13591 	} else {
13592 		pci_set_master(pdev);
13593 		/* Upon fatal error, our device internal logic that latches to
13594 		 * BAR value is getting reset and will restore only upon
13595 		 * rewritting the BARs.
13596 		 *
13597 		 * As pci_restore_state() does not re-write the BARs if the
13598 		 * value is same as saved value earlier, driver needs to
13599 		 * write the BARs to 0 to force restore, in case of fatal error.
13600 		 */
13601 		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13602 				       &bp->state)) {
13603 			for (off = PCI_BASE_ADDRESS_0;
13604 			     off <= PCI_BASE_ADDRESS_5; off += 4)
13605 				pci_write_config_dword(bp->pdev, off, 0);
13606 		}
13607 		pci_restore_state(pdev);
13608 		pci_save_state(pdev);
13609 
13610 		err = bnxt_hwrm_func_reset(bp);
13611 		if (!err)
13612 			result = PCI_ERS_RESULT_RECOVERED;
13613 	}
13614 
13615 	rtnl_unlock();
13616 
13617 	return result;
13618 }
13619 
13620 /**
13621  * bnxt_io_resume - called when traffic can start flowing again.
13622  * @pdev: Pointer to PCI device
13623  *
13624  * This callback is called when the error recovery driver tells
13625  * us that its OK to resume normal operation.
13626  */
13627 static void bnxt_io_resume(struct pci_dev *pdev)
13628 {
13629 	struct net_device *netdev = pci_get_drvdata(pdev);
13630 	struct bnxt *bp = netdev_priv(netdev);
13631 	int err;
13632 
13633 	netdev_info(bp->dev, "PCI Slot Resume\n");
13634 	rtnl_lock();
13635 
13636 	err = bnxt_hwrm_func_qcaps(bp);
13637 	if (!err && netif_running(netdev))
13638 		err = bnxt_open(netdev);
13639 
13640 	bnxt_ulp_start(bp, err);
13641 	if (!err) {
13642 		bnxt_reenable_sriov(bp);
13643 		netif_device_attach(netdev);
13644 	}
13645 
13646 	rtnl_unlock();
13647 }
13648 
13649 static const struct pci_error_handlers bnxt_err_handler = {
13650 	.error_detected	= bnxt_io_error_detected,
13651 	.slot_reset	= bnxt_io_slot_reset,
13652 	.resume		= bnxt_io_resume
13653 };
13654 
13655 static struct pci_driver bnxt_pci_driver = {
13656 	.name		= DRV_MODULE_NAME,
13657 	.id_table	= bnxt_pci_tbl,
13658 	.probe		= bnxt_init_one,
13659 	.remove		= bnxt_remove_one,
13660 	.shutdown	= bnxt_shutdown,
13661 	.driver.pm	= BNXT_PM_OPS,
13662 	.err_handler	= &bnxt_err_handler,
13663 #if defined(CONFIG_BNXT_SRIOV)
13664 	.sriov_configure = bnxt_sriov_configure,
13665 #endif
13666 };
13667 
13668 static int __init bnxt_init(void)
13669 {
13670 	bnxt_debug_init();
13671 	return pci_register_driver(&bnxt_pci_driver);
13672 }
13673 
13674 static void __exit bnxt_exit(void)
13675 {
13676 	pci_unregister_driver(&bnxt_pci_driver);
13677 	if (bnxt_pf_wq)
13678 		destroy_workqueue(bnxt_pf_wq);
13679 	bnxt_debug_exit();
13680 }
13681 
13682 module_init(bnxt_init);
13683 module_exit(bnxt_exit);
13684