1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2018 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/if_bridge.h>
37 #include <linux/rtc.h>
38 #include <linux/bpf.h>
39 #include <net/ip.h>
40 #include <net/tcp.h>
41 #include <net/udp.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <net/udp_tunnel.h>
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52 #include <linux/cpumask.h>
53 #include <net/pkt_cls.h>
54 #include <linux/hwmon.h>
55 #include <linux/hwmon-sysfs.h>
56 
57 #include "bnxt_hsi.h"
58 #include "bnxt.h"
59 #include "bnxt_ulp.h"
60 #include "bnxt_sriov.h"
61 #include "bnxt_ethtool.h"
62 #include "bnxt_dcb.h"
63 #include "bnxt_xdp.h"
64 #include "bnxt_vfr.h"
65 #include "bnxt_tc.h"
66 #include "bnxt_devlink.h"
67 #include "bnxt_debugfs.h"
68 
69 #define BNXT_TX_TIMEOUT		(5 * HZ)
70 
71 static const char version[] =
72 	"Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
73 
74 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76 MODULE_VERSION(DRV_MODULE_VERSION);
77 
78 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80 #define BNXT_RX_COPY_THRESH 256
81 
82 #define BNXT_TX_PUSH_THRESH 164
83 
84 enum board_idx {
85 	BCM57301,
86 	BCM57302,
87 	BCM57304,
88 	BCM57417_NPAR,
89 	BCM58700,
90 	BCM57311,
91 	BCM57312,
92 	BCM57402,
93 	BCM57404,
94 	BCM57406,
95 	BCM57402_NPAR,
96 	BCM57407,
97 	BCM57412,
98 	BCM57414,
99 	BCM57416,
100 	BCM57417,
101 	BCM57412_NPAR,
102 	BCM57314,
103 	BCM57417_SFP,
104 	BCM57416_SFP,
105 	BCM57404_NPAR,
106 	BCM57406_NPAR,
107 	BCM57407_SFP,
108 	BCM57407_NPAR,
109 	BCM57414_NPAR,
110 	BCM57416_NPAR,
111 	BCM57452,
112 	BCM57454,
113 	BCM5745x_NPAR,
114 	BCM57508,
115 	BCM58802,
116 	BCM58804,
117 	BCM58808,
118 	NETXTREME_E_VF,
119 	NETXTREME_C_VF,
120 	NETXTREME_S_VF,
121 	NETXTREME_E_P5_VF,
122 };
123 
124 /* indexed by enum above */
125 static const struct {
126 	char *name;
127 } board_info[] = {
128 	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
129 	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
130 	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
132 	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
133 	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
134 	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
135 	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
136 	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
137 	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
138 	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
139 	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
140 	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
141 	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
142 	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
143 	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
144 	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
145 	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
146 	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
147 	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
148 	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
149 	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
150 	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
151 	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
152 	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
153 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
154 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
155 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
156 	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
157 	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
158 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
159 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
160 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
161 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
162 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
163 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
164 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
165 };
166 
167 static const struct pci_device_id bnxt_pci_tbl[] = {
168 	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
169 	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
170 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
171 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
172 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
173 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
174 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
175 	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
176 	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
177 	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
178 	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
179 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
180 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
181 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
182 	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
183 	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
184 	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
185 	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
186 	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
187 	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
188 	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
189 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
190 	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
191 	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
192 	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
193 	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
194 	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
195 	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
196 	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
197 	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
198 	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
199 	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
200 	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
201 	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
202 	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
203 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
204 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
205 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
206 #ifdef CONFIG_BNXT_SRIOV
207 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
208 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
209 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
210 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
215 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
216 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
217 #endif
218 	{ 0 }
219 };
220 
221 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222 
223 static const u16 bnxt_vf_req_snif[] = {
224 	HWRM_FUNC_CFG,
225 	HWRM_FUNC_VF_CFG,
226 	HWRM_PORT_PHY_QCFG,
227 	HWRM_CFA_L2_FILTER_ALLOC,
228 };
229 
230 static const u16 bnxt_async_events_arr[] = {
231 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232 	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
233 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
234 	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
235 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
236 };
237 
238 static struct workqueue_struct *bnxt_pf_wq;
239 
240 static bool bnxt_vf_pciid(enum board_idx idx)
241 {
242 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
243 		idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
244 }
245 
246 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
247 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
248 #define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
249 
250 #define BNXT_CP_DB_IRQ_DIS(db)						\
251 		writel(DB_CP_IRQ_DIS_FLAGS, db)
252 
253 #define BNXT_DB_CQ(db, idx)						\
254 	writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
255 
256 #define BNXT_DB_NQ_P5(db, idx)						\
257 	writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
258 
259 #define BNXT_DB_CQ_ARM(db, idx)						\
260 	writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
261 
262 #define BNXT_DB_NQ_ARM_P5(db, idx)					\
263 	writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
264 
265 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
266 {
267 	if (bp->flags & BNXT_FLAG_CHIP_P5)
268 		BNXT_DB_NQ_P5(db, idx);
269 	else
270 		BNXT_DB_CQ(db, idx);
271 }
272 
273 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
274 {
275 	if (bp->flags & BNXT_FLAG_CHIP_P5)
276 		BNXT_DB_NQ_ARM_P5(db, idx);
277 	else
278 		BNXT_DB_CQ_ARM(db, idx);
279 }
280 
281 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
282 {
283 	if (bp->flags & BNXT_FLAG_CHIP_P5)
284 		writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
285 		       db->doorbell);
286 	else
287 		BNXT_DB_CQ(db, idx);
288 }
289 
290 const u16 bnxt_lhint_arr[] = {
291 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
292 	TX_BD_FLAGS_LHINT_512_TO_1023,
293 	TX_BD_FLAGS_LHINT_1024_TO_2047,
294 	TX_BD_FLAGS_LHINT_1024_TO_2047,
295 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
296 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
297 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
298 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
299 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310 };
311 
312 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
313 {
314 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
315 
316 	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
317 		return 0;
318 
319 	return md_dst->u.port_info.port_id;
320 }
321 
322 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
323 {
324 	struct bnxt *bp = netdev_priv(dev);
325 	struct tx_bd *txbd;
326 	struct tx_bd_ext *txbd1;
327 	struct netdev_queue *txq;
328 	int i;
329 	dma_addr_t mapping;
330 	unsigned int length, pad = 0;
331 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
332 	u16 prod, last_frag;
333 	struct pci_dev *pdev = bp->pdev;
334 	struct bnxt_tx_ring_info *txr;
335 	struct bnxt_sw_tx_bd *tx_buf;
336 
337 	i = skb_get_queue_mapping(skb);
338 	if (unlikely(i >= bp->tx_nr_rings)) {
339 		dev_kfree_skb_any(skb);
340 		return NETDEV_TX_OK;
341 	}
342 
343 	txq = netdev_get_tx_queue(dev, i);
344 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
345 	prod = txr->tx_prod;
346 
347 	free_size = bnxt_tx_avail(bp, txr);
348 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
349 		netif_tx_stop_queue(txq);
350 		return NETDEV_TX_BUSY;
351 	}
352 
353 	length = skb->len;
354 	len = skb_headlen(skb);
355 	last_frag = skb_shinfo(skb)->nr_frags;
356 
357 	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
358 
359 	txbd->tx_bd_opaque = prod;
360 
361 	tx_buf = &txr->tx_buf_ring[prod];
362 	tx_buf->skb = skb;
363 	tx_buf->nr_frags = last_frag;
364 
365 	vlan_tag_flags = 0;
366 	cfa_action = bnxt_xmit_get_cfa_action(skb);
367 	if (skb_vlan_tag_present(skb)) {
368 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
369 				 skb_vlan_tag_get(skb);
370 		/* Currently supports 8021Q, 8021AD vlan offloads
371 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
372 		 */
373 		if (skb->vlan_proto == htons(ETH_P_8021Q))
374 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
375 	}
376 
377 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
378 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
379 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
380 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
381 		void __iomem *db = txr->tx_db.doorbell;
382 		void *pdata = tx_push_buf->data;
383 		u64 *end;
384 		int j, push_len;
385 
386 		/* Set COAL_NOW to be ready quickly for the next push */
387 		tx_push->tx_bd_len_flags_type =
388 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
389 					TX_BD_TYPE_LONG_TX_BD |
390 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
391 					TX_BD_FLAGS_COAL_NOW |
392 					TX_BD_FLAGS_PACKET_END |
393 					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
394 
395 		if (skb->ip_summed == CHECKSUM_PARTIAL)
396 			tx_push1->tx_bd_hsize_lflags =
397 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
398 		else
399 			tx_push1->tx_bd_hsize_lflags = 0;
400 
401 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
402 		tx_push1->tx_bd_cfa_action =
403 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
404 
405 		end = pdata + length;
406 		end = PTR_ALIGN(end, 8) - 1;
407 		*end = 0;
408 
409 		skb_copy_from_linear_data(skb, pdata, len);
410 		pdata += len;
411 		for (j = 0; j < last_frag; j++) {
412 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
413 			void *fptr;
414 
415 			fptr = skb_frag_address_safe(frag);
416 			if (!fptr)
417 				goto normal_tx;
418 
419 			memcpy(pdata, fptr, skb_frag_size(frag));
420 			pdata += skb_frag_size(frag);
421 		}
422 
423 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
424 		txbd->tx_bd_haddr = txr->data_mapping;
425 		prod = NEXT_TX(prod);
426 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
427 		memcpy(txbd, tx_push1, sizeof(*txbd));
428 		prod = NEXT_TX(prod);
429 		tx_push->doorbell =
430 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
431 		txr->tx_prod = prod;
432 
433 		tx_buf->is_push = 1;
434 		netdev_tx_sent_queue(txq, skb->len);
435 		wmb();	/* Sync is_push and byte queue before pushing data */
436 
437 		push_len = (length + sizeof(*tx_push) + 7) / 8;
438 		if (push_len > 16) {
439 			__iowrite64_copy(db, tx_push_buf, 16);
440 			__iowrite32_copy(db + 4, tx_push_buf + 1,
441 					 (push_len - 16) << 1);
442 		} else {
443 			__iowrite64_copy(db, tx_push_buf, push_len);
444 		}
445 
446 		goto tx_done;
447 	}
448 
449 normal_tx:
450 	if (length < BNXT_MIN_PKT_SIZE) {
451 		pad = BNXT_MIN_PKT_SIZE - length;
452 		if (skb_pad(skb, pad)) {
453 			/* SKB already freed. */
454 			tx_buf->skb = NULL;
455 			return NETDEV_TX_OK;
456 		}
457 		length = BNXT_MIN_PKT_SIZE;
458 	}
459 
460 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
461 
462 	if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
463 		dev_kfree_skb_any(skb);
464 		tx_buf->skb = NULL;
465 		return NETDEV_TX_OK;
466 	}
467 
468 	dma_unmap_addr_set(tx_buf, mapping, mapping);
469 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
470 		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
471 
472 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
473 
474 	prod = NEXT_TX(prod);
475 	txbd1 = (struct tx_bd_ext *)
476 		&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
477 
478 	txbd1->tx_bd_hsize_lflags = 0;
479 	if (skb_is_gso(skb)) {
480 		u32 hdr_len;
481 
482 		if (skb->encapsulation)
483 			hdr_len = skb_inner_network_offset(skb) +
484 				skb_inner_network_header_len(skb) +
485 				inner_tcp_hdrlen(skb);
486 		else
487 			hdr_len = skb_transport_offset(skb) +
488 				tcp_hdrlen(skb);
489 
490 		txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
491 					TX_BD_FLAGS_T_IPID |
492 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
493 		length = skb_shinfo(skb)->gso_size;
494 		txbd1->tx_bd_mss = cpu_to_le32(length);
495 		length += hdr_len;
496 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
497 		txbd1->tx_bd_hsize_lflags =
498 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
499 		txbd1->tx_bd_mss = 0;
500 	}
501 
502 	length >>= 9;
503 	flags |= bnxt_lhint_arr[length];
504 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
505 
506 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
507 	txbd1->tx_bd_cfa_action =
508 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
509 	for (i = 0; i < last_frag; i++) {
510 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
511 
512 		prod = NEXT_TX(prod);
513 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
514 
515 		len = skb_frag_size(frag);
516 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
517 					   DMA_TO_DEVICE);
518 
519 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
520 			goto tx_dma_error;
521 
522 		tx_buf = &txr->tx_buf_ring[prod];
523 		dma_unmap_addr_set(tx_buf, mapping, mapping);
524 
525 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
526 
527 		flags = len << TX_BD_LEN_SHIFT;
528 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
529 	}
530 
531 	flags &= ~TX_BD_LEN;
532 	txbd->tx_bd_len_flags_type =
533 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
534 			    TX_BD_FLAGS_PACKET_END);
535 
536 	netdev_tx_sent_queue(txq, skb->len);
537 
538 	/* Sync BD data before updating doorbell */
539 	wmb();
540 
541 	prod = NEXT_TX(prod);
542 	txr->tx_prod = prod;
543 
544 	if (!skb->xmit_more || netif_xmit_stopped(txq))
545 		bnxt_db_write(bp, &txr->tx_db, prod);
546 
547 tx_done:
548 
549 	mmiowb();
550 
551 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
552 		if (skb->xmit_more && !tx_buf->is_push)
553 			bnxt_db_write(bp, &txr->tx_db, prod);
554 
555 		netif_tx_stop_queue(txq);
556 
557 		/* netif_tx_stop_queue() must be done before checking
558 		 * tx index in bnxt_tx_avail() below, because in
559 		 * bnxt_tx_int(), we update tx index before checking for
560 		 * netif_tx_queue_stopped().
561 		 */
562 		smp_mb();
563 		if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
564 			netif_tx_wake_queue(txq);
565 	}
566 	return NETDEV_TX_OK;
567 
568 tx_dma_error:
569 	last_frag = i;
570 
571 	/* start back at beginning and unmap skb */
572 	prod = txr->tx_prod;
573 	tx_buf = &txr->tx_buf_ring[prod];
574 	tx_buf->skb = NULL;
575 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
576 			 skb_headlen(skb), PCI_DMA_TODEVICE);
577 	prod = NEXT_TX(prod);
578 
579 	/* unmap remaining mapped pages */
580 	for (i = 0; i < last_frag; i++) {
581 		prod = NEXT_TX(prod);
582 		tx_buf = &txr->tx_buf_ring[prod];
583 		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
584 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
585 			       PCI_DMA_TODEVICE);
586 	}
587 
588 	dev_kfree_skb_any(skb);
589 	return NETDEV_TX_OK;
590 }
591 
592 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
593 {
594 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
595 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
596 	u16 cons = txr->tx_cons;
597 	struct pci_dev *pdev = bp->pdev;
598 	int i;
599 	unsigned int tx_bytes = 0;
600 
601 	for (i = 0; i < nr_pkts; i++) {
602 		struct bnxt_sw_tx_bd *tx_buf;
603 		struct sk_buff *skb;
604 		int j, last;
605 
606 		tx_buf = &txr->tx_buf_ring[cons];
607 		cons = NEXT_TX(cons);
608 		skb = tx_buf->skb;
609 		tx_buf->skb = NULL;
610 
611 		if (tx_buf->is_push) {
612 			tx_buf->is_push = 0;
613 			goto next_tx_int;
614 		}
615 
616 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
617 				 skb_headlen(skb), PCI_DMA_TODEVICE);
618 		last = tx_buf->nr_frags;
619 
620 		for (j = 0; j < last; j++) {
621 			cons = NEXT_TX(cons);
622 			tx_buf = &txr->tx_buf_ring[cons];
623 			dma_unmap_page(
624 				&pdev->dev,
625 				dma_unmap_addr(tx_buf, mapping),
626 				skb_frag_size(&skb_shinfo(skb)->frags[j]),
627 				PCI_DMA_TODEVICE);
628 		}
629 
630 next_tx_int:
631 		cons = NEXT_TX(cons);
632 
633 		tx_bytes += skb->len;
634 		dev_kfree_skb_any(skb);
635 	}
636 
637 	netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
638 	txr->tx_cons = cons;
639 
640 	/* Need to make the tx_cons update visible to bnxt_start_xmit()
641 	 * before checking for netif_tx_queue_stopped().  Without the
642 	 * memory barrier, there is a small possibility that bnxt_start_xmit()
643 	 * will miss it and cause the queue to be stopped forever.
644 	 */
645 	smp_mb();
646 
647 	if (unlikely(netif_tx_queue_stopped(txq)) &&
648 	    (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
649 		__netif_tx_lock(txq, smp_processor_id());
650 		if (netif_tx_queue_stopped(txq) &&
651 		    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
652 		    txr->dev_state != BNXT_DEV_STATE_CLOSING)
653 			netif_tx_wake_queue(txq);
654 		__netif_tx_unlock(txq);
655 	}
656 }
657 
658 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
659 					 gfp_t gfp)
660 {
661 	struct device *dev = &bp->pdev->dev;
662 	struct page *page;
663 
664 	page = alloc_page(gfp);
665 	if (!page)
666 		return NULL;
667 
668 	*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
669 				      DMA_ATTR_WEAK_ORDERING);
670 	if (dma_mapping_error(dev, *mapping)) {
671 		__free_page(page);
672 		return NULL;
673 	}
674 	*mapping += bp->rx_dma_offset;
675 	return page;
676 }
677 
678 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
679 				       gfp_t gfp)
680 {
681 	u8 *data;
682 	struct pci_dev *pdev = bp->pdev;
683 
684 	data = kmalloc(bp->rx_buf_size, gfp);
685 	if (!data)
686 		return NULL;
687 
688 	*mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
689 					bp->rx_buf_use_size, bp->rx_dir,
690 					DMA_ATTR_WEAK_ORDERING);
691 
692 	if (dma_mapping_error(&pdev->dev, *mapping)) {
693 		kfree(data);
694 		data = NULL;
695 	}
696 	return data;
697 }
698 
699 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
700 		       u16 prod, gfp_t gfp)
701 {
702 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
703 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
704 	dma_addr_t mapping;
705 
706 	if (BNXT_RX_PAGE_MODE(bp)) {
707 		struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
708 
709 		if (!page)
710 			return -ENOMEM;
711 
712 		rx_buf->data = page;
713 		rx_buf->data_ptr = page_address(page) + bp->rx_offset;
714 	} else {
715 		u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
716 
717 		if (!data)
718 			return -ENOMEM;
719 
720 		rx_buf->data = data;
721 		rx_buf->data_ptr = data + bp->rx_offset;
722 	}
723 	rx_buf->mapping = mapping;
724 
725 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
726 	return 0;
727 }
728 
729 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
730 {
731 	u16 prod = rxr->rx_prod;
732 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
733 	struct rx_bd *cons_bd, *prod_bd;
734 
735 	prod_rx_buf = &rxr->rx_buf_ring[prod];
736 	cons_rx_buf = &rxr->rx_buf_ring[cons];
737 
738 	prod_rx_buf->data = data;
739 	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
740 
741 	prod_rx_buf->mapping = cons_rx_buf->mapping;
742 
743 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
744 	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
745 
746 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
747 }
748 
749 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
750 {
751 	u16 next, max = rxr->rx_agg_bmap_size;
752 
753 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
754 	if (next >= max)
755 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
756 	return next;
757 }
758 
759 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
760 				     struct bnxt_rx_ring_info *rxr,
761 				     u16 prod, gfp_t gfp)
762 {
763 	struct rx_bd *rxbd =
764 		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
765 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
766 	struct pci_dev *pdev = bp->pdev;
767 	struct page *page;
768 	dma_addr_t mapping;
769 	u16 sw_prod = rxr->rx_sw_agg_prod;
770 	unsigned int offset = 0;
771 
772 	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
773 		page = rxr->rx_page;
774 		if (!page) {
775 			page = alloc_page(gfp);
776 			if (!page)
777 				return -ENOMEM;
778 			rxr->rx_page = page;
779 			rxr->rx_page_offset = 0;
780 		}
781 		offset = rxr->rx_page_offset;
782 		rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
783 		if (rxr->rx_page_offset == PAGE_SIZE)
784 			rxr->rx_page = NULL;
785 		else
786 			get_page(page);
787 	} else {
788 		page = alloc_page(gfp);
789 		if (!page)
790 			return -ENOMEM;
791 	}
792 
793 	mapping = dma_map_page_attrs(&pdev->dev, page, offset,
794 				     BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
795 				     DMA_ATTR_WEAK_ORDERING);
796 	if (dma_mapping_error(&pdev->dev, mapping)) {
797 		__free_page(page);
798 		return -EIO;
799 	}
800 
801 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
802 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
803 
804 	__set_bit(sw_prod, rxr->rx_agg_bmap);
805 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
806 	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
807 
808 	rx_agg_buf->page = page;
809 	rx_agg_buf->offset = offset;
810 	rx_agg_buf->mapping = mapping;
811 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
812 	rxbd->rx_bd_opaque = sw_prod;
813 	return 0;
814 }
815 
816 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
817 				   u32 agg_bufs)
818 {
819 	struct bnxt_napi *bnapi = cpr->bnapi;
820 	struct bnxt *bp = bnapi->bp;
821 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
822 	u16 prod = rxr->rx_agg_prod;
823 	u16 sw_prod = rxr->rx_sw_agg_prod;
824 	u32 i;
825 
826 	for (i = 0; i < agg_bufs; i++) {
827 		u16 cons;
828 		struct rx_agg_cmp *agg;
829 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
830 		struct rx_bd *prod_bd;
831 		struct page *page;
832 
833 		agg = (struct rx_agg_cmp *)
834 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
835 		cons = agg->rx_agg_cmp_opaque;
836 		__clear_bit(cons, rxr->rx_agg_bmap);
837 
838 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
839 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
840 
841 		__set_bit(sw_prod, rxr->rx_agg_bmap);
842 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
843 		cons_rx_buf = &rxr->rx_agg_ring[cons];
844 
845 		/* It is possible for sw_prod to be equal to cons, so
846 		 * set cons_rx_buf->page to NULL first.
847 		 */
848 		page = cons_rx_buf->page;
849 		cons_rx_buf->page = NULL;
850 		prod_rx_buf->page = page;
851 		prod_rx_buf->offset = cons_rx_buf->offset;
852 
853 		prod_rx_buf->mapping = cons_rx_buf->mapping;
854 
855 		prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
856 
857 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
858 		prod_bd->rx_bd_opaque = sw_prod;
859 
860 		prod = NEXT_RX_AGG(prod);
861 		sw_prod = NEXT_RX_AGG(sw_prod);
862 		cp_cons = NEXT_CMP(cp_cons);
863 	}
864 	rxr->rx_agg_prod = prod;
865 	rxr->rx_sw_agg_prod = sw_prod;
866 }
867 
868 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
869 					struct bnxt_rx_ring_info *rxr,
870 					u16 cons, void *data, u8 *data_ptr,
871 					dma_addr_t dma_addr,
872 					unsigned int offset_and_len)
873 {
874 	unsigned int payload = offset_and_len >> 16;
875 	unsigned int len = offset_and_len & 0xffff;
876 	struct skb_frag_struct *frag;
877 	struct page *page = data;
878 	u16 prod = rxr->rx_prod;
879 	struct sk_buff *skb;
880 	int off, err;
881 
882 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
883 	if (unlikely(err)) {
884 		bnxt_reuse_rx_data(rxr, cons, data);
885 		return NULL;
886 	}
887 	dma_addr -= bp->rx_dma_offset;
888 	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
889 			     DMA_ATTR_WEAK_ORDERING);
890 
891 	if (unlikely(!payload))
892 		payload = eth_get_headlen(data_ptr, len);
893 
894 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
895 	if (!skb) {
896 		__free_page(page);
897 		return NULL;
898 	}
899 
900 	off = (void *)data_ptr - page_address(page);
901 	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
902 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
903 	       payload + NET_IP_ALIGN);
904 
905 	frag = &skb_shinfo(skb)->frags[0];
906 	skb_frag_size_sub(frag, payload);
907 	frag->page_offset += payload;
908 	skb->data_len -= payload;
909 	skb->tail += payload;
910 
911 	return skb;
912 }
913 
914 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
915 				   struct bnxt_rx_ring_info *rxr, u16 cons,
916 				   void *data, u8 *data_ptr,
917 				   dma_addr_t dma_addr,
918 				   unsigned int offset_and_len)
919 {
920 	u16 prod = rxr->rx_prod;
921 	struct sk_buff *skb;
922 	int err;
923 
924 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
925 	if (unlikely(err)) {
926 		bnxt_reuse_rx_data(rxr, cons, data);
927 		return NULL;
928 	}
929 
930 	skb = build_skb(data, 0);
931 	dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
932 			       bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
933 	if (!skb) {
934 		kfree(data);
935 		return NULL;
936 	}
937 
938 	skb_reserve(skb, bp->rx_offset);
939 	skb_put(skb, offset_and_len & 0xffff);
940 	return skb;
941 }
942 
943 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
944 				     struct bnxt_cp_ring_info *cpr,
945 				     struct sk_buff *skb, u16 cp_cons,
946 				     u32 agg_bufs)
947 {
948 	struct bnxt_napi *bnapi = cpr->bnapi;
949 	struct pci_dev *pdev = bp->pdev;
950 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
951 	u16 prod = rxr->rx_agg_prod;
952 	u32 i;
953 
954 	for (i = 0; i < agg_bufs; i++) {
955 		u16 cons, frag_len;
956 		struct rx_agg_cmp *agg;
957 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
958 		struct page *page;
959 		dma_addr_t mapping;
960 
961 		agg = (struct rx_agg_cmp *)
962 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
963 		cons = agg->rx_agg_cmp_opaque;
964 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
965 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
966 
967 		cons_rx_buf = &rxr->rx_agg_ring[cons];
968 		skb_fill_page_desc(skb, i, cons_rx_buf->page,
969 				   cons_rx_buf->offset, frag_len);
970 		__clear_bit(cons, rxr->rx_agg_bmap);
971 
972 		/* It is possible for bnxt_alloc_rx_page() to allocate
973 		 * a sw_prod index that equals the cons index, so we
974 		 * need to clear the cons entry now.
975 		 */
976 		mapping = cons_rx_buf->mapping;
977 		page = cons_rx_buf->page;
978 		cons_rx_buf->page = NULL;
979 
980 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
981 			struct skb_shared_info *shinfo;
982 			unsigned int nr_frags;
983 
984 			shinfo = skb_shinfo(skb);
985 			nr_frags = --shinfo->nr_frags;
986 			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
987 
988 			dev_kfree_skb(skb);
989 
990 			cons_rx_buf->page = page;
991 
992 			/* Update prod since possibly some pages have been
993 			 * allocated already.
994 			 */
995 			rxr->rx_agg_prod = prod;
996 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
997 			return NULL;
998 		}
999 
1000 		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1001 				     PCI_DMA_FROMDEVICE,
1002 				     DMA_ATTR_WEAK_ORDERING);
1003 
1004 		skb->data_len += frag_len;
1005 		skb->len += frag_len;
1006 		skb->truesize += PAGE_SIZE;
1007 
1008 		prod = NEXT_RX_AGG(prod);
1009 		cp_cons = NEXT_CMP(cp_cons);
1010 	}
1011 	rxr->rx_agg_prod = prod;
1012 	return skb;
1013 }
1014 
1015 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1016 			       u8 agg_bufs, u32 *raw_cons)
1017 {
1018 	u16 last;
1019 	struct rx_agg_cmp *agg;
1020 
1021 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1022 	last = RING_CMP(*raw_cons);
1023 	agg = (struct rx_agg_cmp *)
1024 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1025 	return RX_AGG_CMP_VALID(agg, *raw_cons);
1026 }
1027 
1028 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1029 					    unsigned int len,
1030 					    dma_addr_t mapping)
1031 {
1032 	struct bnxt *bp = bnapi->bp;
1033 	struct pci_dev *pdev = bp->pdev;
1034 	struct sk_buff *skb;
1035 
1036 	skb = napi_alloc_skb(&bnapi->napi, len);
1037 	if (!skb)
1038 		return NULL;
1039 
1040 	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1041 				bp->rx_dir);
1042 
1043 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1044 	       len + NET_IP_ALIGN);
1045 
1046 	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1047 				   bp->rx_dir);
1048 
1049 	skb_put(skb, len);
1050 	return skb;
1051 }
1052 
1053 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1054 			   u32 *raw_cons, void *cmp)
1055 {
1056 	struct rx_cmp *rxcmp = cmp;
1057 	u32 tmp_raw_cons = *raw_cons;
1058 	u8 cmp_type, agg_bufs = 0;
1059 
1060 	cmp_type = RX_CMP_TYPE(rxcmp);
1061 
1062 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1063 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1064 			    RX_CMP_AGG_BUFS) >>
1065 			   RX_CMP_AGG_BUFS_SHIFT;
1066 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1067 		struct rx_tpa_end_cmp *tpa_end = cmp;
1068 
1069 		agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1070 			    RX_TPA_END_CMP_AGG_BUFS) >>
1071 			   RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1072 	}
1073 
1074 	if (agg_bufs) {
1075 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1076 			return -EBUSY;
1077 	}
1078 	*raw_cons = tmp_raw_cons;
1079 	return 0;
1080 }
1081 
1082 static void bnxt_queue_sp_work(struct bnxt *bp)
1083 {
1084 	if (BNXT_PF(bp))
1085 		queue_work(bnxt_pf_wq, &bp->sp_task);
1086 	else
1087 		schedule_work(&bp->sp_task);
1088 }
1089 
1090 static void bnxt_cancel_sp_work(struct bnxt *bp)
1091 {
1092 	if (BNXT_PF(bp))
1093 		flush_workqueue(bnxt_pf_wq);
1094 	else
1095 		cancel_work_sync(&bp->sp_task);
1096 }
1097 
1098 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1099 {
1100 	if (!rxr->bnapi->in_reset) {
1101 		rxr->bnapi->in_reset = true;
1102 		set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1103 		bnxt_queue_sp_work(bp);
1104 	}
1105 	rxr->rx_next_cons = 0xffff;
1106 }
1107 
1108 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1109 			   struct rx_tpa_start_cmp *tpa_start,
1110 			   struct rx_tpa_start_cmp_ext *tpa_start1)
1111 {
1112 	u8 agg_id = TPA_START_AGG_ID(tpa_start);
1113 	u16 cons, prod;
1114 	struct bnxt_tpa_info *tpa_info;
1115 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1116 	struct rx_bd *prod_bd;
1117 	dma_addr_t mapping;
1118 
1119 	cons = tpa_start->rx_tpa_start_cmp_opaque;
1120 	prod = rxr->rx_prod;
1121 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1122 	prod_rx_buf = &rxr->rx_buf_ring[prod];
1123 	tpa_info = &rxr->rx_tpa[agg_id];
1124 
1125 	if (unlikely(cons != rxr->rx_next_cons)) {
1126 		bnxt_sched_reset(bp, rxr);
1127 		return;
1128 	}
1129 	/* Store cfa_code in tpa_info to use in tpa_end
1130 	 * completion processing.
1131 	 */
1132 	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1133 	prod_rx_buf->data = tpa_info->data;
1134 	prod_rx_buf->data_ptr = tpa_info->data_ptr;
1135 
1136 	mapping = tpa_info->mapping;
1137 	prod_rx_buf->mapping = mapping;
1138 
1139 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1140 
1141 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1142 
1143 	tpa_info->data = cons_rx_buf->data;
1144 	tpa_info->data_ptr = cons_rx_buf->data_ptr;
1145 	cons_rx_buf->data = NULL;
1146 	tpa_info->mapping = cons_rx_buf->mapping;
1147 
1148 	tpa_info->len =
1149 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1150 				RX_TPA_START_CMP_LEN_SHIFT;
1151 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
1152 		u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1153 
1154 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
1155 		tpa_info->gso_type = SKB_GSO_TCPV4;
1156 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1157 		if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1158 			tpa_info->gso_type = SKB_GSO_TCPV6;
1159 		tpa_info->rss_hash =
1160 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1161 	} else {
1162 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1163 		tpa_info->gso_type = 0;
1164 		if (netif_msg_rx_err(bp))
1165 			netdev_warn(bp->dev, "TPA packet without valid hash\n");
1166 	}
1167 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1168 	tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1169 	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1170 
1171 	rxr->rx_prod = NEXT_RX(prod);
1172 	cons = NEXT_RX(cons);
1173 	rxr->rx_next_cons = NEXT_RX(cons);
1174 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1175 
1176 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1177 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1178 	cons_rx_buf->data = NULL;
1179 }
1180 
1181 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1182 			   u32 agg_bufs)
1183 {
1184 	if (agg_bufs)
1185 		bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1186 }
1187 
1188 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1189 					   int payload_off, int tcp_ts,
1190 					   struct sk_buff *skb)
1191 {
1192 #ifdef CONFIG_INET
1193 	struct tcphdr *th;
1194 	int len, nw_off;
1195 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1196 	u32 hdr_info = tpa_info->hdr_info;
1197 	bool loopback = false;
1198 
1199 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1200 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1201 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1202 
1203 	/* If the packet is an internal loopback packet, the offsets will
1204 	 * have an extra 4 bytes.
1205 	 */
1206 	if (inner_mac_off == 4) {
1207 		loopback = true;
1208 	} else if (inner_mac_off > 4) {
1209 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1210 					    ETH_HLEN - 2));
1211 
1212 		/* We only support inner iPv4/ipv6.  If we don't see the
1213 		 * correct protocol ID, it must be a loopback packet where
1214 		 * the offsets are off by 4.
1215 		 */
1216 		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1217 			loopback = true;
1218 	}
1219 	if (loopback) {
1220 		/* internal loopback packet, subtract all offsets by 4 */
1221 		inner_ip_off -= 4;
1222 		inner_mac_off -= 4;
1223 		outer_ip_off -= 4;
1224 	}
1225 
1226 	nw_off = inner_ip_off - ETH_HLEN;
1227 	skb_set_network_header(skb, nw_off);
1228 	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1229 		struct ipv6hdr *iph = ipv6_hdr(skb);
1230 
1231 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1232 		len = skb->len - skb_transport_offset(skb);
1233 		th = tcp_hdr(skb);
1234 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1235 	} else {
1236 		struct iphdr *iph = ip_hdr(skb);
1237 
1238 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1239 		len = skb->len - skb_transport_offset(skb);
1240 		th = tcp_hdr(skb);
1241 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1242 	}
1243 
1244 	if (inner_mac_off) { /* tunnel */
1245 		struct udphdr *uh = NULL;
1246 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1247 					    ETH_HLEN - 2));
1248 
1249 		if (proto == htons(ETH_P_IP)) {
1250 			struct iphdr *iph = (struct iphdr *)skb->data;
1251 
1252 			if (iph->protocol == IPPROTO_UDP)
1253 				uh = (struct udphdr *)(iph + 1);
1254 		} else {
1255 			struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1256 
1257 			if (iph->nexthdr == IPPROTO_UDP)
1258 				uh = (struct udphdr *)(iph + 1);
1259 		}
1260 		if (uh) {
1261 			if (uh->check)
1262 				skb_shinfo(skb)->gso_type |=
1263 					SKB_GSO_UDP_TUNNEL_CSUM;
1264 			else
1265 				skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1266 		}
1267 	}
1268 #endif
1269 	return skb;
1270 }
1271 
1272 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
1273 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1274 
1275 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1276 					   int payload_off, int tcp_ts,
1277 					   struct sk_buff *skb)
1278 {
1279 #ifdef CONFIG_INET
1280 	struct tcphdr *th;
1281 	int len, nw_off, tcp_opt_len = 0;
1282 
1283 	if (tcp_ts)
1284 		tcp_opt_len = 12;
1285 
1286 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1287 		struct iphdr *iph;
1288 
1289 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1290 			 ETH_HLEN;
1291 		skb_set_network_header(skb, nw_off);
1292 		iph = ip_hdr(skb);
1293 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1294 		len = skb->len - skb_transport_offset(skb);
1295 		th = tcp_hdr(skb);
1296 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1297 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1298 		struct ipv6hdr *iph;
1299 
1300 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1301 			 ETH_HLEN;
1302 		skb_set_network_header(skb, nw_off);
1303 		iph = ipv6_hdr(skb);
1304 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1305 		len = skb->len - skb_transport_offset(skb);
1306 		th = tcp_hdr(skb);
1307 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1308 	} else {
1309 		dev_kfree_skb_any(skb);
1310 		return NULL;
1311 	}
1312 
1313 	if (nw_off) { /* tunnel */
1314 		struct udphdr *uh = NULL;
1315 
1316 		if (skb->protocol == htons(ETH_P_IP)) {
1317 			struct iphdr *iph = (struct iphdr *)skb->data;
1318 
1319 			if (iph->protocol == IPPROTO_UDP)
1320 				uh = (struct udphdr *)(iph + 1);
1321 		} else {
1322 			struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1323 
1324 			if (iph->nexthdr == IPPROTO_UDP)
1325 				uh = (struct udphdr *)(iph + 1);
1326 		}
1327 		if (uh) {
1328 			if (uh->check)
1329 				skb_shinfo(skb)->gso_type |=
1330 					SKB_GSO_UDP_TUNNEL_CSUM;
1331 			else
1332 				skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1333 		}
1334 	}
1335 #endif
1336 	return skb;
1337 }
1338 
1339 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1340 					   struct bnxt_tpa_info *tpa_info,
1341 					   struct rx_tpa_end_cmp *tpa_end,
1342 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1343 					   struct sk_buff *skb)
1344 {
1345 #ifdef CONFIG_INET
1346 	int payload_off;
1347 	u16 segs;
1348 
1349 	segs = TPA_END_TPA_SEGS(tpa_end);
1350 	if (segs == 1)
1351 		return skb;
1352 
1353 	NAPI_GRO_CB(skb)->count = segs;
1354 	skb_shinfo(skb)->gso_size =
1355 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1356 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1357 	payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1358 		       RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1359 		      RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1360 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1361 	if (likely(skb))
1362 		tcp_gro_complete(skb);
1363 #endif
1364 	return skb;
1365 }
1366 
1367 /* Given the cfa_code of a received packet determine which
1368  * netdev (vf-rep or PF) the packet is destined to.
1369  */
1370 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1371 {
1372 	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1373 
1374 	/* if vf-rep dev is NULL, the must belongs to the PF */
1375 	return dev ? dev : bp->dev;
1376 }
1377 
1378 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1379 					   struct bnxt_cp_ring_info *cpr,
1380 					   u32 *raw_cons,
1381 					   struct rx_tpa_end_cmp *tpa_end,
1382 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1383 					   u8 *event)
1384 {
1385 	struct bnxt_napi *bnapi = cpr->bnapi;
1386 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1387 	u8 agg_id = TPA_END_AGG_ID(tpa_end);
1388 	u8 *data_ptr, agg_bufs;
1389 	u16 cp_cons = RING_CMP(*raw_cons);
1390 	unsigned int len;
1391 	struct bnxt_tpa_info *tpa_info;
1392 	dma_addr_t mapping;
1393 	struct sk_buff *skb;
1394 	void *data;
1395 
1396 	if (unlikely(bnapi->in_reset)) {
1397 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1398 
1399 		if (rc < 0)
1400 			return ERR_PTR(-EBUSY);
1401 		return NULL;
1402 	}
1403 
1404 	tpa_info = &rxr->rx_tpa[agg_id];
1405 	data = tpa_info->data;
1406 	data_ptr = tpa_info->data_ptr;
1407 	prefetch(data_ptr);
1408 	len = tpa_info->len;
1409 	mapping = tpa_info->mapping;
1410 
1411 	agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1412 		    RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1413 
1414 	if (agg_bufs) {
1415 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1416 			return ERR_PTR(-EBUSY);
1417 
1418 		*event |= BNXT_AGG_EVENT;
1419 		cp_cons = NEXT_CMP(cp_cons);
1420 	}
1421 
1422 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1423 		bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1424 		if (agg_bufs > MAX_SKB_FRAGS)
1425 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1426 				    agg_bufs, (int)MAX_SKB_FRAGS);
1427 		return NULL;
1428 	}
1429 
1430 	if (len <= bp->rx_copy_thresh) {
1431 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1432 		if (!skb) {
1433 			bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1434 			return NULL;
1435 		}
1436 	} else {
1437 		u8 *new_data;
1438 		dma_addr_t new_mapping;
1439 
1440 		new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1441 		if (!new_data) {
1442 			bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1443 			return NULL;
1444 		}
1445 
1446 		tpa_info->data = new_data;
1447 		tpa_info->data_ptr = new_data + bp->rx_offset;
1448 		tpa_info->mapping = new_mapping;
1449 
1450 		skb = build_skb(data, 0);
1451 		dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1452 				       bp->rx_buf_use_size, bp->rx_dir,
1453 				       DMA_ATTR_WEAK_ORDERING);
1454 
1455 		if (!skb) {
1456 			kfree(data);
1457 			bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1458 			return NULL;
1459 		}
1460 		skb_reserve(skb, bp->rx_offset);
1461 		skb_put(skb, len);
1462 	}
1463 
1464 	if (agg_bufs) {
1465 		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1466 		if (!skb) {
1467 			/* Page reuse already handled by bnxt_rx_pages(). */
1468 			return NULL;
1469 		}
1470 	}
1471 
1472 	skb->protocol =
1473 		eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1474 
1475 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1476 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1477 
1478 	if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1479 	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1480 		u16 vlan_proto = tpa_info->metadata >>
1481 			RX_CMP_FLAGS2_METADATA_TPID_SFT;
1482 		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1483 
1484 		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1485 	}
1486 
1487 	skb_checksum_none_assert(skb);
1488 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1489 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1490 		skb->csum_level =
1491 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1492 	}
1493 
1494 	if (TPA_END_GRO(tpa_end))
1495 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1496 
1497 	return skb;
1498 }
1499 
1500 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1501 			     struct sk_buff *skb)
1502 {
1503 	if (skb->dev != bp->dev) {
1504 		/* this packet belongs to a vf-rep */
1505 		bnxt_vf_rep_rx(bp, skb);
1506 		return;
1507 	}
1508 	skb_record_rx_queue(skb, bnapi->index);
1509 	napi_gro_receive(&bnapi->napi, skb);
1510 }
1511 
1512 /* returns the following:
1513  * 1       - 1 packet successfully received
1514  * 0       - successful TPA_START, packet not completed yet
1515  * -EBUSY  - completion ring does not have all the agg buffers yet
1516  * -ENOMEM - packet aborted due to out of memory
1517  * -EIO    - packet aborted due to hw error indicated in BD
1518  */
1519 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1520 		       u32 *raw_cons, u8 *event)
1521 {
1522 	struct bnxt_napi *bnapi = cpr->bnapi;
1523 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1524 	struct net_device *dev = bp->dev;
1525 	struct rx_cmp *rxcmp;
1526 	struct rx_cmp_ext *rxcmp1;
1527 	u32 tmp_raw_cons = *raw_cons;
1528 	u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1529 	struct bnxt_sw_rx_bd *rx_buf;
1530 	unsigned int len;
1531 	u8 *data_ptr, agg_bufs, cmp_type;
1532 	dma_addr_t dma_addr;
1533 	struct sk_buff *skb;
1534 	void *data;
1535 	int rc = 0;
1536 	u32 misc;
1537 
1538 	rxcmp = (struct rx_cmp *)
1539 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1540 
1541 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1542 	cp_cons = RING_CMP(tmp_raw_cons);
1543 	rxcmp1 = (struct rx_cmp_ext *)
1544 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1545 
1546 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1547 		return -EBUSY;
1548 
1549 	cmp_type = RX_CMP_TYPE(rxcmp);
1550 
1551 	prod = rxr->rx_prod;
1552 
1553 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1554 		bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1555 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
1556 
1557 		*event |= BNXT_RX_EVENT;
1558 		goto next_rx_no_prod_no_len;
1559 
1560 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1561 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1562 				   (struct rx_tpa_end_cmp *)rxcmp,
1563 				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1564 
1565 		if (IS_ERR(skb))
1566 			return -EBUSY;
1567 
1568 		rc = -ENOMEM;
1569 		if (likely(skb)) {
1570 			bnxt_deliver_skb(bp, bnapi, skb);
1571 			rc = 1;
1572 		}
1573 		*event |= BNXT_RX_EVENT;
1574 		goto next_rx_no_prod_no_len;
1575 	}
1576 
1577 	cons = rxcmp->rx_cmp_opaque;
1578 	rx_buf = &rxr->rx_buf_ring[cons];
1579 	data = rx_buf->data;
1580 	data_ptr = rx_buf->data_ptr;
1581 	if (unlikely(cons != rxr->rx_next_cons)) {
1582 		int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1583 
1584 		bnxt_sched_reset(bp, rxr);
1585 		return rc1;
1586 	}
1587 	prefetch(data_ptr);
1588 
1589 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1590 	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1591 
1592 	if (agg_bufs) {
1593 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1594 			return -EBUSY;
1595 
1596 		cp_cons = NEXT_CMP(cp_cons);
1597 		*event |= BNXT_AGG_EVENT;
1598 	}
1599 	*event |= BNXT_RX_EVENT;
1600 
1601 	rx_buf->data = NULL;
1602 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1603 		bnxt_reuse_rx_data(rxr, cons, data);
1604 		if (agg_bufs)
1605 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1606 
1607 		rc = -EIO;
1608 		goto next_rx;
1609 	}
1610 
1611 	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1612 	dma_addr = rx_buf->mapping;
1613 
1614 	if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1615 		rc = 1;
1616 		goto next_rx;
1617 	}
1618 
1619 	if (len <= bp->rx_copy_thresh) {
1620 		skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1621 		bnxt_reuse_rx_data(rxr, cons, data);
1622 		if (!skb) {
1623 			rc = -ENOMEM;
1624 			goto next_rx;
1625 		}
1626 	} else {
1627 		u32 payload;
1628 
1629 		if (rx_buf->data_ptr == data_ptr)
1630 			payload = misc & RX_CMP_PAYLOAD_OFFSET;
1631 		else
1632 			payload = 0;
1633 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1634 				      payload | len);
1635 		if (!skb) {
1636 			rc = -ENOMEM;
1637 			goto next_rx;
1638 		}
1639 	}
1640 
1641 	if (agg_bufs) {
1642 		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1643 		if (!skb) {
1644 			rc = -ENOMEM;
1645 			goto next_rx;
1646 		}
1647 	}
1648 
1649 	if (RX_CMP_HASH_VALID(rxcmp)) {
1650 		u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1651 		enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1652 
1653 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1654 		if (hash_type != 1 && hash_type != 3)
1655 			type = PKT_HASH_TYPE_L3;
1656 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1657 	}
1658 
1659 	cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1660 	skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1661 
1662 	if ((rxcmp1->rx_cmp_flags2 &
1663 	     cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1664 	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1665 		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1666 		u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1667 		u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1668 
1669 		__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1670 	}
1671 
1672 	skb_checksum_none_assert(skb);
1673 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
1674 		if (dev->features & NETIF_F_RXCSUM) {
1675 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1676 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1677 		}
1678 	} else {
1679 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1680 			if (dev->features & NETIF_F_RXCSUM)
1681 				bnapi->cp_ring.rx_l4_csum_errors++;
1682 		}
1683 	}
1684 
1685 	bnxt_deliver_skb(bp, bnapi, skb);
1686 	rc = 1;
1687 
1688 next_rx:
1689 	rxr->rx_prod = NEXT_RX(prod);
1690 	rxr->rx_next_cons = NEXT_RX(cons);
1691 
1692 	cpr->rx_packets += 1;
1693 	cpr->rx_bytes += len;
1694 
1695 next_rx_no_prod_no_len:
1696 	*raw_cons = tmp_raw_cons;
1697 
1698 	return rc;
1699 }
1700 
1701 /* In netpoll mode, if we are using a combined completion ring, we need to
1702  * discard the rx packets and recycle the buffers.
1703  */
1704 static int bnxt_force_rx_discard(struct bnxt *bp,
1705 				 struct bnxt_cp_ring_info *cpr,
1706 				 u32 *raw_cons, u8 *event)
1707 {
1708 	u32 tmp_raw_cons = *raw_cons;
1709 	struct rx_cmp_ext *rxcmp1;
1710 	struct rx_cmp *rxcmp;
1711 	u16 cp_cons;
1712 	u8 cmp_type;
1713 
1714 	cp_cons = RING_CMP(tmp_raw_cons);
1715 	rxcmp = (struct rx_cmp *)
1716 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1717 
1718 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1719 	cp_cons = RING_CMP(tmp_raw_cons);
1720 	rxcmp1 = (struct rx_cmp_ext *)
1721 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1722 
1723 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1724 		return -EBUSY;
1725 
1726 	cmp_type = RX_CMP_TYPE(rxcmp);
1727 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1728 		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1729 			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1730 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1731 		struct rx_tpa_end_cmp_ext *tpa_end1;
1732 
1733 		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1734 		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1735 			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1736 	}
1737 	return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1738 }
1739 
1740 #define BNXT_GET_EVENT_PORT(data)	\
1741 	((data) &			\
1742 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1743 
1744 static int bnxt_async_event_process(struct bnxt *bp,
1745 				    struct hwrm_async_event_cmpl *cmpl)
1746 {
1747 	u16 event_id = le16_to_cpu(cmpl->event_id);
1748 
1749 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
1750 	switch (event_id) {
1751 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1752 		u32 data1 = le32_to_cpu(cmpl->event_data1);
1753 		struct bnxt_link_info *link_info = &bp->link_info;
1754 
1755 		if (BNXT_VF(bp))
1756 			goto async_event_process_exit;
1757 
1758 		/* print unsupported speed warning in forced speed mode only */
1759 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1760 		    (data1 & 0x20000)) {
1761 			u16 fw_speed = link_info->force_link_speed;
1762 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1763 
1764 			if (speed != SPEED_UNKNOWN)
1765 				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1766 					    speed);
1767 		}
1768 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1769 	}
1770 	/* fall through */
1771 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1772 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1773 		break;
1774 	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1775 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1776 		break;
1777 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1778 		u32 data1 = le32_to_cpu(cmpl->event_data1);
1779 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
1780 
1781 		if (BNXT_VF(bp))
1782 			break;
1783 
1784 		if (bp->pf.port_id != port_id)
1785 			break;
1786 
1787 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1788 		break;
1789 	}
1790 	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1791 		if (BNXT_PF(bp))
1792 			goto async_event_process_exit;
1793 		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1794 		break;
1795 	default:
1796 		goto async_event_process_exit;
1797 	}
1798 	bnxt_queue_sp_work(bp);
1799 async_event_process_exit:
1800 	bnxt_ulp_async_events(bp, cmpl);
1801 	return 0;
1802 }
1803 
1804 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1805 {
1806 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1807 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1808 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1809 				(struct hwrm_fwd_req_cmpl *)txcmp;
1810 
1811 	switch (cmpl_type) {
1812 	case CMPL_BASE_TYPE_HWRM_DONE:
1813 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
1814 		if (seq_id == bp->hwrm_intr_seq_id)
1815 			bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
1816 		else
1817 			netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1818 		break;
1819 
1820 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1821 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1822 
1823 		if ((vf_id < bp->pf.first_vf_id) ||
1824 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1825 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1826 				   vf_id);
1827 			return -EINVAL;
1828 		}
1829 
1830 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1831 		set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1832 		bnxt_queue_sp_work(bp);
1833 		break;
1834 
1835 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1836 		bnxt_async_event_process(bp,
1837 					 (struct hwrm_async_event_cmpl *)txcmp);
1838 
1839 	default:
1840 		break;
1841 	}
1842 
1843 	return 0;
1844 }
1845 
1846 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1847 {
1848 	struct bnxt_napi *bnapi = dev_instance;
1849 	struct bnxt *bp = bnapi->bp;
1850 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1851 	u32 cons = RING_CMP(cpr->cp_raw_cons);
1852 
1853 	cpr->event_ctr++;
1854 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1855 	napi_schedule(&bnapi->napi);
1856 	return IRQ_HANDLED;
1857 }
1858 
1859 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1860 {
1861 	u32 raw_cons = cpr->cp_raw_cons;
1862 	u16 cons = RING_CMP(raw_cons);
1863 	struct tx_cmp *txcmp;
1864 
1865 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1866 
1867 	return TX_CMP_VALID(txcmp, raw_cons);
1868 }
1869 
1870 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1871 {
1872 	struct bnxt_napi *bnapi = dev_instance;
1873 	struct bnxt *bp = bnapi->bp;
1874 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1875 	u32 cons = RING_CMP(cpr->cp_raw_cons);
1876 	u32 int_status;
1877 
1878 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1879 
1880 	if (!bnxt_has_work(bp, cpr)) {
1881 		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1882 		/* return if erroneous interrupt */
1883 		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1884 			return IRQ_NONE;
1885 	}
1886 
1887 	/* disable ring IRQ */
1888 	BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
1889 
1890 	/* Return here if interrupt is shared and is disabled. */
1891 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
1892 		return IRQ_HANDLED;
1893 
1894 	napi_schedule(&bnapi->napi);
1895 	return IRQ_HANDLED;
1896 }
1897 
1898 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1899 			    int budget)
1900 {
1901 	struct bnxt_napi *bnapi = cpr->bnapi;
1902 	u32 raw_cons = cpr->cp_raw_cons;
1903 	u32 cons;
1904 	int tx_pkts = 0;
1905 	int rx_pkts = 0;
1906 	u8 event = 0;
1907 	struct tx_cmp *txcmp;
1908 
1909 	cpr->has_more_work = 0;
1910 	while (1) {
1911 		int rc;
1912 
1913 		cons = RING_CMP(raw_cons);
1914 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1915 
1916 		if (!TX_CMP_VALID(txcmp, raw_cons))
1917 			break;
1918 
1919 		/* The valid test of the entry must be done first before
1920 		 * reading any further.
1921 		 */
1922 		dma_rmb();
1923 		cpr->had_work_done = 1;
1924 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1925 			tx_pkts++;
1926 			/* return full budget so NAPI will complete. */
1927 			if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1928 				rx_pkts = budget;
1929 				raw_cons = NEXT_RAW_CMP(raw_cons);
1930 				if (budget)
1931 					cpr->has_more_work = 1;
1932 				break;
1933 			}
1934 		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1935 			if (likely(budget))
1936 				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
1937 			else
1938 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
1939 							   &event);
1940 			if (likely(rc >= 0))
1941 				rx_pkts += rc;
1942 			/* Increment rx_pkts when rc is -ENOMEM to count towards
1943 			 * the NAPI budget.  Otherwise, we may potentially loop
1944 			 * here forever if we consistently cannot allocate
1945 			 * buffers.
1946 			 */
1947 			else if (rc == -ENOMEM && budget)
1948 				rx_pkts++;
1949 			else if (rc == -EBUSY)	/* partial completion */
1950 				break;
1951 		} else if (unlikely((TX_CMP_TYPE(txcmp) ==
1952 				     CMPL_BASE_TYPE_HWRM_DONE) ||
1953 				    (TX_CMP_TYPE(txcmp) ==
1954 				     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1955 				    (TX_CMP_TYPE(txcmp) ==
1956 				     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1957 			bnxt_hwrm_handler(bp, txcmp);
1958 		}
1959 		raw_cons = NEXT_RAW_CMP(raw_cons);
1960 
1961 		if (rx_pkts && rx_pkts == budget) {
1962 			cpr->has_more_work = 1;
1963 			break;
1964 		}
1965 	}
1966 
1967 	if (event & BNXT_TX_EVENT) {
1968 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1969 		u16 prod = txr->tx_prod;
1970 
1971 		/* Sync BD data before updating doorbell */
1972 		wmb();
1973 
1974 		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
1975 	}
1976 
1977 	cpr->cp_raw_cons = raw_cons;
1978 	bnapi->tx_pkts += tx_pkts;
1979 	bnapi->events |= event;
1980 	return rx_pkts;
1981 }
1982 
1983 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1984 {
1985 	if (bnapi->tx_pkts) {
1986 		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
1987 		bnapi->tx_pkts = 0;
1988 	}
1989 
1990 	if (bnapi->events & BNXT_RX_EVENT) {
1991 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1992 
1993 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1994 		if (bnapi->events & BNXT_AGG_EVENT)
1995 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
1996 	}
1997 	bnapi->events = 0;
1998 }
1999 
2000 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2001 			  int budget)
2002 {
2003 	struct bnxt_napi *bnapi = cpr->bnapi;
2004 	int rx_pkts;
2005 
2006 	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2007 
2008 	/* ACK completion ring before freeing tx ring and producing new
2009 	 * buffers in rx/agg rings to prevent overflowing the completion
2010 	 * ring.
2011 	 */
2012 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2013 
2014 	__bnxt_poll_work_done(bp, bnapi);
2015 	return rx_pkts;
2016 }
2017 
2018 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2019 {
2020 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2021 	struct bnxt *bp = bnapi->bp;
2022 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2023 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2024 	struct tx_cmp *txcmp;
2025 	struct rx_cmp_ext *rxcmp1;
2026 	u32 cp_cons, tmp_raw_cons;
2027 	u32 raw_cons = cpr->cp_raw_cons;
2028 	u32 rx_pkts = 0;
2029 	u8 event = 0;
2030 
2031 	while (1) {
2032 		int rc;
2033 
2034 		cp_cons = RING_CMP(raw_cons);
2035 		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2036 
2037 		if (!TX_CMP_VALID(txcmp, raw_cons))
2038 			break;
2039 
2040 		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2041 			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2042 			cp_cons = RING_CMP(tmp_raw_cons);
2043 			rxcmp1 = (struct rx_cmp_ext *)
2044 			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2045 
2046 			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2047 				break;
2048 
2049 			/* force an error to recycle the buffer */
2050 			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2051 				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2052 
2053 			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2054 			if (likely(rc == -EIO) && budget)
2055 				rx_pkts++;
2056 			else if (rc == -EBUSY)	/* partial completion */
2057 				break;
2058 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
2059 				    CMPL_BASE_TYPE_HWRM_DONE)) {
2060 			bnxt_hwrm_handler(bp, txcmp);
2061 		} else {
2062 			netdev_err(bp->dev,
2063 				   "Invalid completion received on special ring\n");
2064 		}
2065 		raw_cons = NEXT_RAW_CMP(raw_cons);
2066 
2067 		if (rx_pkts == budget)
2068 			break;
2069 	}
2070 
2071 	cpr->cp_raw_cons = raw_cons;
2072 	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2073 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2074 
2075 	if (event & BNXT_AGG_EVENT)
2076 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2077 
2078 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2079 		napi_complete_done(napi, rx_pkts);
2080 		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2081 	}
2082 	return rx_pkts;
2083 }
2084 
2085 static int bnxt_poll(struct napi_struct *napi, int budget)
2086 {
2087 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2088 	struct bnxt *bp = bnapi->bp;
2089 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2090 	int work_done = 0;
2091 
2092 	while (1) {
2093 		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2094 
2095 		if (work_done >= budget) {
2096 			if (!budget)
2097 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2098 			break;
2099 		}
2100 
2101 		if (!bnxt_has_work(bp, cpr)) {
2102 			if (napi_complete_done(napi, work_done))
2103 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2104 			break;
2105 		}
2106 	}
2107 	if (bp->flags & BNXT_FLAG_DIM) {
2108 		struct net_dim_sample dim_sample;
2109 
2110 		net_dim_sample(cpr->event_ctr,
2111 			       cpr->rx_packets,
2112 			       cpr->rx_bytes,
2113 			       &dim_sample);
2114 		net_dim(&cpr->dim, dim_sample);
2115 	}
2116 	mmiowb();
2117 	return work_done;
2118 }
2119 
2120 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2121 {
2122 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2123 	int i, work_done = 0;
2124 
2125 	for (i = 0; i < 2; i++) {
2126 		struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2127 
2128 		if (cpr2) {
2129 			work_done += __bnxt_poll_work(bp, cpr2,
2130 						      budget - work_done);
2131 			cpr->has_more_work |= cpr2->has_more_work;
2132 		}
2133 	}
2134 	return work_done;
2135 }
2136 
2137 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2138 				 u64 dbr_type, bool all)
2139 {
2140 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2141 	int i;
2142 
2143 	for (i = 0; i < 2; i++) {
2144 		struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2145 		struct bnxt_db_info *db;
2146 
2147 		if (cpr2 && (all || cpr2->had_work_done)) {
2148 			db = &cpr2->cp_db;
2149 			writeq(db->db_key64 | dbr_type |
2150 			       RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2151 			cpr2->had_work_done = 0;
2152 		}
2153 	}
2154 	__bnxt_poll_work_done(bp, bnapi);
2155 }
2156 
2157 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2158 {
2159 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2160 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2161 	u32 raw_cons = cpr->cp_raw_cons;
2162 	struct bnxt *bp = bnapi->bp;
2163 	struct nqe_cn *nqcmp;
2164 	int work_done = 0;
2165 	u32 cons;
2166 
2167 	if (cpr->has_more_work) {
2168 		cpr->has_more_work = 0;
2169 		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2170 		if (cpr->has_more_work) {
2171 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2172 			return work_done;
2173 		}
2174 		__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2175 		if (napi_complete_done(napi, work_done))
2176 			BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2177 		return work_done;
2178 	}
2179 	while (1) {
2180 		cons = RING_CMP(raw_cons);
2181 		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2182 
2183 		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2184 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2185 					     false);
2186 			cpr->cp_raw_cons = raw_cons;
2187 			if (napi_complete_done(napi, work_done))
2188 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2189 						  cpr->cp_raw_cons);
2190 			return work_done;
2191 		}
2192 
2193 		/* The valid test of the entry must be done first before
2194 		 * reading any further.
2195 		 */
2196 		dma_rmb();
2197 
2198 		if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2199 			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2200 			struct bnxt_cp_ring_info *cpr2;
2201 
2202 			cpr2 = cpr->cp_ring_arr[idx];
2203 			work_done += __bnxt_poll_work(bp, cpr2,
2204 						      budget - work_done);
2205 			cpr->has_more_work = cpr2->has_more_work;
2206 		} else {
2207 			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2208 		}
2209 		raw_cons = NEXT_RAW_CMP(raw_cons);
2210 		if (cpr->has_more_work)
2211 			break;
2212 	}
2213 	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2214 	cpr->cp_raw_cons = raw_cons;
2215 	return work_done;
2216 }
2217 
2218 static void bnxt_free_tx_skbs(struct bnxt *bp)
2219 {
2220 	int i, max_idx;
2221 	struct pci_dev *pdev = bp->pdev;
2222 
2223 	if (!bp->tx_ring)
2224 		return;
2225 
2226 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2227 	for (i = 0; i < bp->tx_nr_rings; i++) {
2228 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2229 		int j;
2230 
2231 		for (j = 0; j < max_idx;) {
2232 			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2233 			struct sk_buff *skb = tx_buf->skb;
2234 			int k, last;
2235 
2236 			if (!skb) {
2237 				j++;
2238 				continue;
2239 			}
2240 
2241 			tx_buf->skb = NULL;
2242 
2243 			if (tx_buf->is_push) {
2244 				dev_kfree_skb(skb);
2245 				j += 2;
2246 				continue;
2247 			}
2248 
2249 			dma_unmap_single(&pdev->dev,
2250 					 dma_unmap_addr(tx_buf, mapping),
2251 					 skb_headlen(skb),
2252 					 PCI_DMA_TODEVICE);
2253 
2254 			last = tx_buf->nr_frags;
2255 			j += 2;
2256 			for (k = 0; k < last; k++, j++) {
2257 				int ring_idx = j & bp->tx_ring_mask;
2258 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2259 
2260 				tx_buf = &txr->tx_buf_ring[ring_idx];
2261 				dma_unmap_page(
2262 					&pdev->dev,
2263 					dma_unmap_addr(tx_buf, mapping),
2264 					skb_frag_size(frag), PCI_DMA_TODEVICE);
2265 			}
2266 			dev_kfree_skb(skb);
2267 		}
2268 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2269 	}
2270 }
2271 
2272 static void bnxt_free_rx_skbs(struct bnxt *bp)
2273 {
2274 	int i, max_idx, max_agg_idx;
2275 	struct pci_dev *pdev = bp->pdev;
2276 
2277 	if (!bp->rx_ring)
2278 		return;
2279 
2280 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2281 	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2282 	for (i = 0; i < bp->rx_nr_rings; i++) {
2283 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2284 		int j;
2285 
2286 		if (rxr->rx_tpa) {
2287 			for (j = 0; j < MAX_TPA; j++) {
2288 				struct bnxt_tpa_info *tpa_info =
2289 							&rxr->rx_tpa[j];
2290 				u8 *data = tpa_info->data;
2291 
2292 				if (!data)
2293 					continue;
2294 
2295 				dma_unmap_single_attrs(&pdev->dev,
2296 						       tpa_info->mapping,
2297 						       bp->rx_buf_use_size,
2298 						       bp->rx_dir,
2299 						       DMA_ATTR_WEAK_ORDERING);
2300 
2301 				tpa_info->data = NULL;
2302 
2303 				kfree(data);
2304 			}
2305 		}
2306 
2307 		for (j = 0; j < max_idx; j++) {
2308 			struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2309 			dma_addr_t mapping = rx_buf->mapping;
2310 			void *data = rx_buf->data;
2311 
2312 			if (!data)
2313 				continue;
2314 
2315 			rx_buf->data = NULL;
2316 
2317 			if (BNXT_RX_PAGE_MODE(bp)) {
2318 				mapping -= bp->rx_dma_offset;
2319 				dma_unmap_page_attrs(&pdev->dev, mapping,
2320 						     PAGE_SIZE, bp->rx_dir,
2321 						     DMA_ATTR_WEAK_ORDERING);
2322 				__free_page(data);
2323 			} else {
2324 				dma_unmap_single_attrs(&pdev->dev, mapping,
2325 						       bp->rx_buf_use_size,
2326 						       bp->rx_dir,
2327 						       DMA_ATTR_WEAK_ORDERING);
2328 				kfree(data);
2329 			}
2330 		}
2331 
2332 		for (j = 0; j < max_agg_idx; j++) {
2333 			struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2334 				&rxr->rx_agg_ring[j];
2335 			struct page *page = rx_agg_buf->page;
2336 
2337 			if (!page)
2338 				continue;
2339 
2340 			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2341 					     BNXT_RX_PAGE_SIZE,
2342 					     PCI_DMA_FROMDEVICE,
2343 					     DMA_ATTR_WEAK_ORDERING);
2344 
2345 			rx_agg_buf->page = NULL;
2346 			__clear_bit(j, rxr->rx_agg_bmap);
2347 
2348 			__free_page(page);
2349 		}
2350 		if (rxr->rx_page) {
2351 			__free_page(rxr->rx_page);
2352 			rxr->rx_page = NULL;
2353 		}
2354 	}
2355 }
2356 
2357 static void bnxt_free_skbs(struct bnxt *bp)
2358 {
2359 	bnxt_free_tx_skbs(bp);
2360 	bnxt_free_rx_skbs(bp);
2361 }
2362 
2363 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2364 {
2365 	struct pci_dev *pdev = bp->pdev;
2366 	int i;
2367 
2368 	for (i = 0; i < rmem->nr_pages; i++) {
2369 		if (!rmem->pg_arr[i])
2370 			continue;
2371 
2372 		dma_free_coherent(&pdev->dev, rmem->page_size,
2373 				  rmem->pg_arr[i], rmem->dma_arr[i]);
2374 
2375 		rmem->pg_arr[i] = NULL;
2376 	}
2377 	if (rmem->pg_tbl) {
2378 		size_t pg_tbl_size = rmem->nr_pages * 8;
2379 
2380 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2381 			pg_tbl_size = rmem->page_size;
2382 		dma_free_coherent(&pdev->dev, pg_tbl_size,
2383 				  rmem->pg_tbl, rmem->pg_tbl_map);
2384 		rmem->pg_tbl = NULL;
2385 	}
2386 	if (rmem->vmem_size && *rmem->vmem) {
2387 		vfree(*rmem->vmem);
2388 		*rmem->vmem = NULL;
2389 	}
2390 }
2391 
2392 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2393 {
2394 	struct pci_dev *pdev = bp->pdev;
2395 	u64 valid_bit = 0;
2396 	int i;
2397 
2398 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2399 		valid_bit = PTU_PTE_VALID;
2400 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2401 		size_t pg_tbl_size = rmem->nr_pages * 8;
2402 
2403 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2404 			pg_tbl_size = rmem->page_size;
2405 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2406 						  &rmem->pg_tbl_map,
2407 						  GFP_KERNEL);
2408 		if (!rmem->pg_tbl)
2409 			return -ENOMEM;
2410 	}
2411 
2412 	for (i = 0; i < rmem->nr_pages; i++) {
2413 		u64 extra_bits = valid_bit;
2414 
2415 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2416 						     rmem->page_size,
2417 						     &rmem->dma_arr[i],
2418 						     GFP_KERNEL);
2419 		if (!rmem->pg_arr[i])
2420 			return -ENOMEM;
2421 
2422 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
2423 			if (i == rmem->nr_pages - 2 &&
2424 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2425 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
2426 			else if (i == rmem->nr_pages - 1 &&
2427 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2428 				extra_bits |= PTU_PTE_LAST;
2429 			rmem->pg_tbl[i] =
2430 				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2431 		}
2432 	}
2433 
2434 	if (rmem->vmem_size) {
2435 		*rmem->vmem = vzalloc(rmem->vmem_size);
2436 		if (!(*rmem->vmem))
2437 			return -ENOMEM;
2438 	}
2439 	return 0;
2440 }
2441 
2442 static void bnxt_free_rx_rings(struct bnxt *bp)
2443 {
2444 	int i;
2445 
2446 	if (!bp->rx_ring)
2447 		return;
2448 
2449 	for (i = 0; i < bp->rx_nr_rings; i++) {
2450 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2451 		struct bnxt_ring_struct *ring;
2452 
2453 		if (rxr->xdp_prog)
2454 			bpf_prog_put(rxr->xdp_prog);
2455 
2456 		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2457 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
2458 
2459 		kfree(rxr->rx_tpa);
2460 		rxr->rx_tpa = NULL;
2461 
2462 		kfree(rxr->rx_agg_bmap);
2463 		rxr->rx_agg_bmap = NULL;
2464 
2465 		ring = &rxr->rx_ring_struct;
2466 		bnxt_free_ring(bp, &ring->ring_mem);
2467 
2468 		ring = &rxr->rx_agg_ring_struct;
2469 		bnxt_free_ring(bp, &ring->ring_mem);
2470 	}
2471 }
2472 
2473 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2474 {
2475 	int i, rc, agg_rings = 0, tpa_rings = 0;
2476 
2477 	if (!bp->rx_ring)
2478 		return -ENOMEM;
2479 
2480 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
2481 		agg_rings = 1;
2482 
2483 	if (bp->flags & BNXT_FLAG_TPA)
2484 		tpa_rings = 1;
2485 
2486 	for (i = 0; i < bp->rx_nr_rings; i++) {
2487 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2488 		struct bnxt_ring_struct *ring;
2489 
2490 		ring = &rxr->rx_ring_struct;
2491 
2492 		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2493 		if (rc < 0)
2494 			return rc;
2495 
2496 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2497 		if (rc)
2498 			return rc;
2499 
2500 		ring->grp_idx = i;
2501 		if (agg_rings) {
2502 			u16 mem_size;
2503 
2504 			ring = &rxr->rx_agg_ring_struct;
2505 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2506 			if (rc)
2507 				return rc;
2508 
2509 			ring->grp_idx = i;
2510 			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2511 			mem_size = rxr->rx_agg_bmap_size / 8;
2512 			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2513 			if (!rxr->rx_agg_bmap)
2514 				return -ENOMEM;
2515 
2516 			if (tpa_rings) {
2517 				rxr->rx_tpa = kcalloc(MAX_TPA,
2518 						sizeof(struct bnxt_tpa_info),
2519 						GFP_KERNEL);
2520 				if (!rxr->rx_tpa)
2521 					return -ENOMEM;
2522 			}
2523 		}
2524 	}
2525 	return 0;
2526 }
2527 
2528 static void bnxt_free_tx_rings(struct bnxt *bp)
2529 {
2530 	int i;
2531 	struct pci_dev *pdev = bp->pdev;
2532 
2533 	if (!bp->tx_ring)
2534 		return;
2535 
2536 	for (i = 0; i < bp->tx_nr_rings; i++) {
2537 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2538 		struct bnxt_ring_struct *ring;
2539 
2540 		if (txr->tx_push) {
2541 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
2542 					  txr->tx_push, txr->tx_push_mapping);
2543 			txr->tx_push = NULL;
2544 		}
2545 
2546 		ring = &txr->tx_ring_struct;
2547 
2548 		bnxt_free_ring(bp, &ring->ring_mem);
2549 	}
2550 }
2551 
2552 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2553 {
2554 	int i, j, rc;
2555 	struct pci_dev *pdev = bp->pdev;
2556 
2557 	bp->tx_push_size = 0;
2558 	if (bp->tx_push_thresh) {
2559 		int push_size;
2560 
2561 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2562 					bp->tx_push_thresh);
2563 
2564 		if (push_size > 256) {
2565 			push_size = 0;
2566 			bp->tx_push_thresh = 0;
2567 		}
2568 
2569 		bp->tx_push_size = push_size;
2570 	}
2571 
2572 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2573 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2574 		struct bnxt_ring_struct *ring;
2575 		u8 qidx;
2576 
2577 		ring = &txr->tx_ring_struct;
2578 
2579 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2580 		if (rc)
2581 			return rc;
2582 
2583 		ring->grp_idx = txr->bnapi->index;
2584 		if (bp->tx_push_size) {
2585 			dma_addr_t mapping;
2586 
2587 			/* One pre-allocated DMA buffer to backup
2588 			 * TX push operation
2589 			 */
2590 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
2591 						bp->tx_push_size,
2592 						&txr->tx_push_mapping,
2593 						GFP_KERNEL);
2594 
2595 			if (!txr->tx_push)
2596 				return -ENOMEM;
2597 
2598 			mapping = txr->tx_push_mapping +
2599 				sizeof(struct tx_push_bd);
2600 			txr->data_mapping = cpu_to_le64(mapping);
2601 
2602 			memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2603 		}
2604 		qidx = bp->tc_to_qidx[j];
2605 		ring->queue_id = bp->q_info[qidx].queue_id;
2606 		if (i < bp->tx_nr_rings_xdp)
2607 			continue;
2608 		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2609 			j++;
2610 	}
2611 	return 0;
2612 }
2613 
2614 static void bnxt_free_cp_rings(struct bnxt *bp)
2615 {
2616 	int i;
2617 
2618 	if (!bp->bnapi)
2619 		return;
2620 
2621 	for (i = 0; i < bp->cp_nr_rings; i++) {
2622 		struct bnxt_napi *bnapi = bp->bnapi[i];
2623 		struct bnxt_cp_ring_info *cpr;
2624 		struct bnxt_ring_struct *ring;
2625 		int j;
2626 
2627 		if (!bnapi)
2628 			continue;
2629 
2630 		cpr = &bnapi->cp_ring;
2631 		ring = &cpr->cp_ring_struct;
2632 
2633 		bnxt_free_ring(bp, &ring->ring_mem);
2634 
2635 		for (j = 0; j < 2; j++) {
2636 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2637 
2638 			if (cpr2) {
2639 				ring = &cpr2->cp_ring_struct;
2640 				bnxt_free_ring(bp, &ring->ring_mem);
2641 				kfree(cpr2);
2642 				cpr->cp_ring_arr[j] = NULL;
2643 			}
2644 		}
2645 	}
2646 }
2647 
2648 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2649 {
2650 	struct bnxt_ring_mem_info *rmem;
2651 	struct bnxt_ring_struct *ring;
2652 	struct bnxt_cp_ring_info *cpr;
2653 	int rc;
2654 
2655 	cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2656 	if (!cpr)
2657 		return NULL;
2658 
2659 	ring = &cpr->cp_ring_struct;
2660 	rmem = &ring->ring_mem;
2661 	rmem->nr_pages = bp->cp_nr_pages;
2662 	rmem->page_size = HW_CMPD_RING_SIZE;
2663 	rmem->pg_arr = (void **)cpr->cp_desc_ring;
2664 	rmem->dma_arr = cpr->cp_desc_mapping;
2665 	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2666 	rc = bnxt_alloc_ring(bp, rmem);
2667 	if (rc) {
2668 		bnxt_free_ring(bp, rmem);
2669 		kfree(cpr);
2670 		cpr = NULL;
2671 	}
2672 	return cpr;
2673 }
2674 
2675 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2676 {
2677 	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
2678 	int i, rc, ulp_base_vec, ulp_msix;
2679 
2680 	ulp_msix = bnxt_get_ulp_msix_num(bp);
2681 	ulp_base_vec = bnxt_get_ulp_msix_base(bp);
2682 	for (i = 0; i < bp->cp_nr_rings; i++) {
2683 		struct bnxt_napi *bnapi = bp->bnapi[i];
2684 		struct bnxt_cp_ring_info *cpr;
2685 		struct bnxt_ring_struct *ring;
2686 
2687 		if (!bnapi)
2688 			continue;
2689 
2690 		cpr = &bnapi->cp_ring;
2691 		cpr->bnapi = bnapi;
2692 		ring = &cpr->cp_ring_struct;
2693 
2694 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2695 		if (rc)
2696 			return rc;
2697 
2698 		if (ulp_msix && i >= ulp_base_vec)
2699 			ring->map_idx = i + ulp_msix;
2700 		else
2701 			ring->map_idx = i;
2702 
2703 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2704 			continue;
2705 
2706 		if (i < bp->rx_nr_rings) {
2707 			struct bnxt_cp_ring_info *cpr2 =
2708 				bnxt_alloc_cp_sub_ring(bp);
2709 
2710 			cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2711 			if (!cpr2)
2712 				return -ENOMEM;
2713 			cpr2->bnapi = bnapi;
2714 		}
2715 		if ((sh && i < bp->tx_nr_rings) ||
2716 		    (!sh && i >= bp->rx_nr_rings)) {
2717 			struct bnxt_cp_ring_info *cpr2 =
2718 				bnxt_alloc_cp_sub_ring(bp);
2719 
2720 			cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2721 			if (!cpr2)
2722 				return -ENOMEM;
2723 			cpr2->bnapi = bnapi;
2724 		}
2725 	}
2726 	return 0;
2727 }
2728 
2729 static void bnxt_init_ring_struct(struct bnxt *bp)
2730 {
2731 	int i;
2732 
2733 	for (i = 0; i < bp->cp_nr_rings; i++) {
2734 		struct bnxt_napi *bnapi = bp->bnapi[i];
2735 		struct bnxt_ring_mem_info *rmem;
2736 		struct bnxt_cp_ring_info *cpr;
2737 		struct bnxt_rx_ring_info *rxr;
2738 		struct bnxt_tx_ring_info *txr;
2739 		struct bnxt_ring_struct *ring;
2740 
2741 		if (!bnapi)
2742 			continue;
2743 
2744 		cpr = &bnapi->cp_ring;
2745 		ring = &cpr->cp_ring_struct;
2746 		rmem = &ring->ring_mem;
2747 		rmem->nr_pages = bp->cp_nr_pages;
2748 		rmem->page_size = HW_CMPD_RING_SIZE;
2749 		rmem->pg_arr = (void **)cpr->cp_desc_ring;
2750 		rmem->dma_arr = cpr->cp_desc_mapping;
2751 		rmem->vmem_size = 0;
2752 
2753 		rxr = bnapi->rx_ring;
2754 		if (!rxr)
2755 			goto skip_rx;
2756 
2757 		ring = &rxr->rx_ring_struct;
2758 		rmem = &ring->ring_mem;
2759 		rmem->nr_pages = bp->rx_nr_pages;
2760 		rmem->page_size = HW_RXBD_RING_SIZE;
2761 		rmem->pg_arr = (void **)rxr->rx_desc_ring;
2762 		rmem->dma_arr = rxr->rx_desc_mapping;
2763 		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2764 		rmem->vmem = (void **)&rxr->rx_buf_ring;
2765 
2766 		ring = &rxr->rx_agg_ring_struct;
2767 		rmem = &ring->ring_mem;
2768 		rmem->nr_pages = bp->rx_agg_nr_pages;
2769 		rmem->page_size = HW_RXBD_RING_SIZE;
2770 		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2771 		rmem->dma_arr = rxr->rx_agg_desc_mapping;
2772 		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2773 		rmem->vmem = (void **)&rxr->rx_agg_ring;
2774 
2775 skip_rx:
2776 		txr = bnapi->tx_ring;
2777 		if (!txr)
2778 			continue;
2779 
2780 		ring = &txr->tx_ring_struct;
2781 		rmem = &ring->ring_mem;
2782 		rmem->nr_pages = bp->tx_nr_pages;
2783 		rmem->page_size = HW_RXBD_RING_SIZE;
2784 		rmem->pg_arr = (void **)txr->tx_desc_ring;
2785 		rmem->dma_arr = txr->tx_desc_mapping;
2786 		rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2787 		rmem->vmem = (void **)&txr->tx_buf_ring;
2788 	}
2789 }
2790 
2791 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2792 {
2793 	int i;
2794 	u32 prod;
2795 	struct rx_bd **rx_buf_ring;
2796 
2797 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2798 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
2799 		int j;
2800 		struct rx_bd *rxbd;
2801 
2802 		rxbd = rx_buf_ring[i];
2803 		if (!rxbd)
2804 			continue;
2805 
2806 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2807 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2808 			rxbd->rx_bd_opaque = prod;
2809 		}
2810 	}
2811 }
2812 
2813 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2814 {
2815 	struct net_device *dev = bp->dev;
2816 	struct bnxt_rx_ring_info *rxr;
2817 	struct bnxt_ring_struct *ring;
2818 	u32 prod, type;
2819 	int i;
2820 
2821 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2822 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2823 
2824 	if (NET_IP_ALIGN == 2)
2825 		type |= RX_BD_FLAGS_SOP;
2826 
2827 	rxr = &bp->rx_ring[ring_nr];
2828 	ring = &rxr->rx_ring_struct;
2829 	bnxt_init_rxbd_pages(ring, type);
2830 
2831 	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2832 		rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2833 		if (IS_ERR(rxr->xdp_prog)) {
2834 			int rc = PTR_ERR(rxr->xdp_prog);
2835 
2836 			rxr->xdp_prog = NULL;
2837 			return rc;
2838 		}
2839 	}
2840 	prod = rxr->rx_prod;
2841 	for (i = 0; i < bp->rx_ring_size; i++) {
2842 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2843 			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2844 				    ring_nr, i, bp->rx_ring_size);
2845 			break;
2846 		}
2847 		prod = NEXT_RX(prod);
2848 	}
2849 	rxr->rx_prod = prod;
2850 	ring->fw_ring_id = INVALID_HW_RING_ID;
2851 
2852 	ring = &rxr->rx_agg_ring_struct;
2853 	ring->fw_ring_id = INVALID_HW_RING_ID;
2854 
2855 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2856 		return 0;
2857 
2858 	type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2859 		RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2860 
2861 	bnxt_init_rxbd_pages(ring, type);
2862 
2863 	prod = rxr->rx_agg_prod;
2864 	for (i = 0; i < bp->rx_agg_ring_size; i++) {
2865 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2866 			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2867 				    ring_nr, i, bp->rx_ring_size);
2868 			break;
2869 		}
2870 		prod = NEXT_RX_AGG(prod);
2871 	}
2872 	rxr->rx_agg_prod = prod;
2873 
2874 	if (bp->flags & BNXT_FLAG_TPA) {
2875 		if (rxr->rx_tpa) {
2876 			u8 *data;
2877 			dma_addr_t mapping;
2878 
2879 			for (i = 0; i < MAX_TPA; i++) {
2880 				data = __bnxt_alloc_rx_data(bp, &mapping,
2881 							    GFP_KERNEL);
2882 				if (!data)
2883 					return -ENOMEM;
2884 
2885 				rxr->rx_tpa[i].data = data;
2886 				rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2887 				rxr->rx_tpa[i].mapping = mapping;
2888 			}
2889 		} else {
2890 			netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2891 			return -ENOMEM;
2892 		}
2893 	}
2894 
2895 	return 0;
2896 }
2897 
2898 static void bnxt_init_cp_rings(struct bnxt *bp)
2899 {
2900 	int i, j;
2901 
2902 	for (i = 0; i < bp->cp_nr_rings; i++) {
2903 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2904 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2905 
2906 		ring->fw_ring_id = INVALID_HW_RING_ID;
2907 		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2908 		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2909 		for (j = 0; j < 2; j++) {
2910 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2911 
2912 			if (!cpr2)
2913 				continue;
2914 
2915 			ring = &cpr2->cp_ring_struct;
2916 			ring->fw_ring_id = INVALID_HW_RING_ID;
2917 			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2918 			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2919 		}
2920 	}
2921 }
2922 
2923 static int bnxt_init_rx_rings(struct bnxt *bp)
2924 {
2925 	int i, rc = 0;
2926 
2927 	if (BNXT_RX_PAGE_MODE(bp)) {
2928 		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2929 		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2930 	} else {
2931 		bp->rx_offset = BNXT_RX_OFFSET;
2932 		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2933 	}
2934 
2935 	for (i = 0; i < bp->rx_nr_rings; i++) {
2936 		rc = bnxt_init_one_rx_ring(bp, i);
2937 		if (rc)
2938 			break;
2939 	}
2940 
2941 	return rc;
2942 }
2943 
2944 static int bnxt_init_tx_rings(struct bnxt *bp)
2945 {
2946 	u16 i;
2947 
2948 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2949 				   MAX_SKB_FRAGS + 1);
2950 
2951 	for (i = 0; i < bp->tx_nr_rings; i++) {
2952 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2953 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2954 
2955 		ring->fw_ring_id = INVALID_HW_RING_ID;
2956 	}
2957 
2958 	return 0;
2959 }
2960 
2961 static void bnxt_free_ring_grps(struct bnxt *bp)
2962 {
2963 	kfree(bp->grp_info);
2964 	bp->grp_info = NULL;
2965 }
2966 
2967 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2968 {
2969 	int i;
2970 
2971 	if (irq_re_init) {
2972 		bp->grp_info = kcalloc(bp->cp_nr_rings,
2973 				       sizeof(struct bnxt_ring_grp_info),
2974 				       GFP_KERNEL);
2975 		if (!bp->grp_info)
2976 			return -ENOMEM;
2977 	}
2978 	for (i = 0; i < bp->cp_nr_rings; i++) {
2979 		if (irq_re_init)
2980 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2981 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2982 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2983 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2984 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2985 	}
2986 	return 0;
2987 }
2988 
2989 static void bnxt_free_vnics(struct bnxt *bp)
2990 {
2991 	kfree(bp->vnic_info);
2992 	bp->vnic_info = NULL;
2993 	bp->nr_vnics = 0;
2994 }
2995 
2996 static int bnxt_alloc_vnics(struct bnxt *bp)
2997 {
2998 	int num_vnics = 1;
2999 
3000 #ifdef CONFIG_RFS_ACCEL
3001 	if (bp->flags & BNXT_FLAG_RFS)
3002 		num_vnics += bp->rx_nr_rings;
3003 #endif
3004 
3005 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3006 		num_vnics++;
3007 
3008 	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3009 				GFP_KERNEL);
3010 	if (!bp->vnic_info)
3011 		return -ENOMEM;
3012 
3013 	bp->nr_vnics = num_vnics;
3014 	return 0;
3015 }
3016 
3017 static void bnxt_init_vnics(struct bnxt *bp)
3018 {
3019 	int i;
3020 
3021 	for (i = 0; i < bp->nr_vnics; i++) {
3022 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3023 		int j;
3024 
3025 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
3026 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3027 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3028 
3029 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3030 
3031 		if (bp->vnic_info[i].rss_hash_key) {
3032 			if (i == 0)
3033 				prandom_bytes(vnic->rss_hash_key,
3034 					      HW_HASH_KEY_SIZE);
3035 			else
3036 				memcpy(vnic->rss_hash_key,
3037 				       bp->vnic_info[0].rss_hash_key,
3038 				       HW_HASH_KEY_SIZE);
3039 		}
3040 	}
3041 }
3042 
3043 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3044 {
3045 	int pages;
3046 
3047 	pages = ring_size / desc_per_pg;
3048 
3049 	if (!pages)
3050 		return 1;
3051 
3052 	pages++;
3053 
3054 	while (pages & (pages - 1))
3055 		pages++;
3056 
3057 	return pages;
3058 }
3059 
3060 void bnxt_set_tpa_flags(struct bnxt *bp)
3061 {
3062 	bp->flags &= ~BNXT_FLAG_TPA;
3063 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3064 		return;
3065 	if (bp->dev->features & NETIF_F_LRO)
3066 		bp->flags |= BNXT_FLAG_LRO;
3067 	else if (bp->dev->features & NETIF_F_GRO_HW)
3068 		bp->flags |= BNXT_FLAG_GRO;
3069 }
3070 
3071 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3072  * be set on entry.
3073  */
3074 void bnxt_set_ring_params(struct bnxt *bp)
3075 {
3076 	u32 ring_size, rx_size, rx_space;
3077 	u32 agg_factor = 0, agg_ring_size = 0;
3078 
3079 	/* 8 for CRC and VLAN */
3080 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3081 
3082 	rx_space = rx_size + NET_SKB_PAD +
3083 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3084 
3085 	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3086 	ring_size = bp->rx_ring_size;
3087 	bp->rx_agg_ring_size = 0;
3088 	bp->rx_agg_nr_pages = 0;
3089 
3090 	if (bp->flags & BNXT_FLAG_TPA)
3091 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3092 
3093 	bp->flags &= ~BNXT_FLAG_JUMBO;
3094 	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3095 		u32 jumbo_factor;
3096 
3097 		bp->flags |= BNXT_FLAG_JUMBO;
3098 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3099 		if (jumbo_factor > agg_factor)
3100 			agg_factor = jumbo_factor;
3101 	}
3102 	agg_ring_size = ring_size * agg_factor;
3103 
3104 	if (agg_ring_size) {
3105 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3106 							RX_DESC_CNT);
3107 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3108 			u32 tmp = agg_ring_size;
3109 
3110 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3111 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3112 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3113 				    tmp, agg_ring_size);
3114 		}
3115 		bp->rx_agg_ring_size = agg_ring_size;
3116 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3117 		rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3118 		rx_space = rx_size + NET_SKB_PAD +
3119 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3120 	}
3121 
3122 	bp->rx_buf_use_size = rx_size;
3123 	bp->rx_buf_size = rx_space;
3124 
3125 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3126 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3127 
3128 	ring_size = bp->tx_ring_size;
3129 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3130 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3131 
3132 	ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3133 	bp->cp_ring_size = ring_size;
3134 
3135 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3136 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
3137 		bp->cp_nr_pages = MAX_CP_PAGES;
3138 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3139 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3140 			    ring_size, bp->cp_ring_size);
3141 	}
3142 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3143 	bp->cp_ring_mask = bp->cp_bit - 1;
3144 }
3145 
3146 /* Changing allocation mode of RX rings.
3147  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3148  */
3149 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3150 {
3151 	if (page_mode) {
3152 		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3153 			return -EOPNOTSUPP;
3154 		bp->dev->max_mtu =
3155 			min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3156 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3157 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3158 		bp->rx_dir = DMA_BIDIRECTIONAL;
3159 		bp->rx_skb_func = bnxt_rx_page_skb;
3160 		/* Disable LRO or GRO_HW */
3161 		netdev_update_features(bp->dev);
3162 	} else {
3163 		bp->dev->max_mtu = bp->max_mtu;
3164 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3165 		bp->rx_dir = DMA_FROM_DEVICE;
3166 		bp->rx_skb_func = bnxt_rx_skb;
3167 	}
3168 	return 0;
3169 }
3170 
3171 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3172 {
3173 	int i;
3174 	struct bnxt_vnic_info *vnic;
3175 	struct pci_dev *pdev = bp->pdev;
3176 
3177 	if (!bp->vnic_info)
3178 		return;
3179 
3180 	for (i = 0; i < bp->nr_vnics; i++) {
3181 		vnic = &bp->vnic_info[i];
3182 
3183 		kfree(vnic->fw_grp_ids);
3184 		vnic->fw_grp_ids = NULL;
3185 
3186 		kfree(vnic->uc_list);
3187 		vnic->uc_list = NULL;
3188 
3189 		if (vnic->mc_list) {
3190 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3191 					  vnic->mc_list, vnic->mc_list_mapping);
3192 			vnic->mc_list = NULL;
3193 		}
3194 
3195 		if (vnic->rss_table) {
3196 			dma_free_coherent(&pdev->dev, PAGE_SIZE,
3197 					  vnic->rss_table,
3198 					  vnic->rss_table_dma_addr);
3199 			vnic->rss_table = NULL;
3200 		}
3201 
3202 		vnic->rss_hash_key = NULL;
3203 		vnic->flags = 0;
3204 	}
3205 }
3206 
3207 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3208 {
3209 	int i, rc = 0, size;
3210 	struct bnxt_vnic_info *vnic;
3211 	struct pci_dev *pdev = bp->pdev;
3212 	int max_rings;
3213 
3214 	for (i = 0; i < bp->nr_vnics; i++) {
3215 		vnic = &bp->vnic_info[i];
3216 
3217 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3218 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3219 
3220 			if (mem_size > 0) {
3221 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3222 				if (!vnic->uc_list) {
3223 					rc = -ENOMEM;
3224 					goto out;
3225 				}
3226 			}
3227 		}
3228 
3229 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3230 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3231 			vnic->mc_list =
3232 				dma_alloc_coherent(&pdev->dev,
3233 						   vnic->mc_list_size,
3234 						   &vnic->mc_list_mapping,
3235 						   GFP_KERNEL);
3236 			if (!vnic->mc_list) {
3237 				rc = -ENOMEM;
3238 				goto out;
3239 			}
3240 		}
3241 
3242 		if (bp->flags & BNXT_FLAG_CHIP_P5)
3243 			goto vnic_skip_grps;
3244 
3245 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3246 			max_rings = bp->rx_nr_rings;
3247 		else
3248 			max_rings = 1;
3249 
3250 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3251 		if (!vnic->fw_grp_ids) {
3252 			rc = -ENOMEM;
3253 			goto out;
3254 		}
3255 vnic_skip_grps:
3256 		if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3257 		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3258 			continue;
3259 
3260 		/* Allocate rss table and hash key */
3261 		vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3262 						     &vnic->rss_table_dma_addr,
3263 						     GFP_KERNEL);
3264 		if (!vnic->rss_table) {
3265 			rc = -ENOMEM;
3266 			goto out;
3267 		}
3268 
3269 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3270 
3271 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3272 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3273 	}
3274 	return 0;
3275 
3276 out:
3277 	return rc;
3278 }
3279 
3280 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3281 {
3282 	struct pci_dev *pdev = bp->pdev;
3283 
3284 	if (bp->hwrm_cmd_resp_addr) {
3285 		dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3286 				  bp->hwrm_cmd_resp_dma_addr);
3287 		bp->hwrm_cmd_resp_addr = NULL;
3288 	}
3289 
3290 	if (bp->hwrm_cmd_kong_resp_addr) {
3291 		dma_free_coherent(&pdev->dev, PAGE_SIZE,
3292 				  bp->hwrm_cmd_kong_resp_addr,
3293 				  bp->hwrm_cmd_kong_resp_dma_addr);
3294 		bp->hwrm_cmd_kong_resp_addr = NULL;
3295 	}
3296 }
3297 
3298 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3299 {
3300 	struct pci_dev *pdev = bp->pdev;
3301 
3302 	bp->hwrm_cmd_kong_resp_addr =
3303 		dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3304 				   &bp->hwrm_cmd_kong_resp_dma_addr,
3305 				   GFP_KERNEL);
3306 	if (!bp->hwrm_cmd_kong_resp_addr)
3307 		return -ENOMEM;
3308 
3309 	return 0;
3310 }
3311 
3312 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3313 {
3314 	struct pci_dev *pdev = bp->pdev;
3315 
3316 	bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3317 						   &bp->hwrm_cmd_resp_dma_addr,
3318 						   GFP_KERNEL);
3319 	if (!bp->hwrm_cmd_resp_addr)
3320 		return -ENOMEM;
3321 
3322 	return 0;
3323 }
3324 
3325 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3326 {
3327 	if (bp->hwrm_short_cmd_req_addr) {
3328 		struct pci_dev *pdev = bp->pdev;
3329 
3330 		dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3331 				  bp->hwrm_short_cmd_req_addr,
3332 				  bp->hwrm_short_cmd_req_dma_addr);
3333 		bp->hwrm_short_cmd_req_addr = NULL;
3334 	}
3335 }
3336 
3337 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3338 {
3339 	struct pci_dev *pdev = bp->pdev;
3340 
3341 	bp->hwrm_short_cmd_req_addr =
3342 		dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3343 				   &bp->hwrm_short_cmd_req_dma_addr,
3344 				   GFP_KERNEL);
3345 	if (!bp->hwrm_short_cmd_req_addr)
3346 		return -ENOMEM;
3347 
3348 	return 0;
3349 }
3350 
3351 static void bnxt_free_port_stats(struct bnxt *bp)
3352 {
3353 	struct pci_dev *pdev = bp->pdev;
3354 
3355 	bp->flags &= ~BNXT_FLAG_PORT_STATS;
3356 	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3357 
3358 	if (bp->hw_rx_port_stats) {
3359 		dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3360 				  bp->hw_rx_port_stats,
3361 				  bp->hw_rx_port_stats_map);
3362 		bp->hw_rx_port_stats = NULL;
3363 	}
3364 
3365 	if (bp->hw_tx_port_stats_ext) {
3366 		dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3367 				  bp->hw_tx_port_stats_ext,
3368 				  bp->hw_tx_port_stats_ext_map);
3369 		bp->hw_tx_port_stats_ext = NULL;
3370 	}
3371 
3372 	if (bp->hw_rx_port_stats_ext) {
3373 		dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3374 				  bp->hw_rx_port_stats_ext,
3375 				  bp->hw_rx_port_stats_ext_map);
3376 		bp->hw_rx_port_stats_ext = NULL;
3377 	}
3378 }
3379 
3380 static void bnxt_free_ring_stats(struct bnxt *bp)
3381 {
3382 	struct pci_dev *pdev = bp->pdev;
3383 	int size, i;
3384 
3385 	if (!bp->bnapi)
3386 		return;
3387 
3388 	size = sizeof(struct ctx_hw_stats);
3389 
3390 	for (i = 0; i < bp->cp_nr_rings; i++) {
3391 		struct bnxt_napi *bnapi = bp->bnapi[i];
3392 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3393 
3394 		if (cpr->hw_stats) {
3395 			dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3396 					  cpr->hw_stats_map);
3397 			cpr->hw_stats = NULL;
3398 		}
3399 	}
3400 }
3401 
3402 static int bnxt_alloc_stats(struct bnxt *bp)
3403 {
3404 	u32 size, i;
3405 	struct pci_dev *pdev = bp->pdev;
3406 
3407 	size = sizeof(struct ctx_hw_stats);
3408 
3409 	for (i = 0; i < bp->cp_nr_rings; i++) {
3410 		struct bnxt_napi *bnapi = bp->bnapi[i];
3411 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3412 
3413 		cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3414 						   &cpr->hw_stats_map,
3415 						   GFP_KERNEL);
3416 		if (!cpr->hw_stats)
3417 			return -ENOMEM;
3418 
3419 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3420 	}
3421 
3422 	if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3423 		if (bp->hw_rx_port_stats)
3424 			goto alloc_ext_stats;
3425 
3426 		bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3427 					 sizeof(struct tx_port_stats) + 1024;
3428 
3429 		bp->hw_rx_port_stats =
3430 			dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3431 					   &bp->hw_rx_port_stats_map,
3432 					   GFP_KERNEL);
3433 		if (!bp->hw_rx_port_stats)
3434 			return -ENOMEM;
3435 
3436 		bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3437 				       512;
3438 		bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3439 					   sizeof(struct rx_port_stats) + 512;
3440 		bp->flags |= BNXT_FLAG_PORT_STATS;
3441 
3442 alloc_ext_stats:
3443 		/* Display extended statistics only if FW supports it */
3444 		if (bp->hwrm_spec_code < 0x10804 ||
3445 		    bp->hwrm_spec_code == 0x10900)
3446 			return 0;
3447 
3448 		if (bp->hw_rx_port_stats_ext)
3449 			goto alloc_tx_ext_stats;
3450 
3451 		bp->hw_rx_port_stats_ext =
3452 			dma_alloc_coherent(&pdev->dev,
3453 					   sizeof(struct rx_port_stats_ext),
3454 					   &bp->hw_rx_port_stats_ext_map,
3455 					   GFP_KERNEL);
3456 		if (!bp->hw_rx_port_stats_ext)
3457 			return 0;
3458 
3459 alloc_tx_ext_stats:
3460 		if (bp->hw_tx_port_stats_ext)
3461 			return 0;
3462 
3463 		if (bp->hwrm_spec_code >= 0x10902) {
3464 			bp->hw_tx_port_stats_ext =
3465 				dma_alloc_coherent(&pdev->dev,
3466 						   sizeof(struct tx_port_stats_ext),
3467 						   &bp->hw_tx_port_stats_ext_map,
3468 						   GFP_KERNEL);
3469 		}
3470 		bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3471 	}
3472 	return 0;
3473 }
3474 
3475 static void bnxt_clear_ring_indices(struct bnxt *bp)
3476 {
3477 	int i;
3478 
3479 	if (!bp->bnapi)
3480 		return;
3481 
3482 	for (i = 0; i < bp->cp_nr_rings; i++) {
3483 		struct bnxt_napi *bnapi = bp->bnapi[i];
3484 		struct bnxt_cp_ring_info *cpr;
3485 		struct bnxt_rx_ring_info *rxr;
3486 		struct bnxt_tx_ring_info *txr;
3487 
3488 		if (!bnapi)
3489 			continue;
3490 
3491 		cpr = &bnapi->cp_ring;
3492 		cpr->cp_raw_cons = 0;
3493 
3494 		txr = bnapi->tx_ring;
3495 		if (txr) {
3496 			txr->tx_prod = 0;
3497 			txr->tx_cons = 0;
3498 		}
3499 
3500 		rxr = bnapi->rx_ring;
3501 		if (rxr) {
3502 			rxr->rx_prod = 0;
3503 			rxr->rx_agg_prod = 0;
3504 			rxr->rx_sw_agg_prod = 0;
3505 			rxr->rx_next_cons = 0;
3506 		}
3507 	}
3508 }
3509 
3510 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3511 {
3512 #ifdef CONFIG_RFS_ACCEL
3513 	int i;
3514 
3515 	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
3516 	 * safe to delete the hash table.
3517 	 */
3518 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3519 		struct hlist_head *head;
3520 		struct hlist_node *tmp;
3521 		struct bnxt_ntuple_filter *fltr;
3522 
3523 		head = &bp->ntp_fltr_hash_tbl[i];
3524 		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3525 			hlist_del(&fltr->hash);
3526 			kfree(fltr);
3527 		}
3528 	}
3529 	if (irq_reinit) {
3530 		kfree(bp->ntp_fltr_bmap);
3531 		bp->ntp_fltr_bmap = NULL;
3532 	}
3533 	bp->ntp_fltr_count = 0;
3534 #endif
3535 }
3536 
3537 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3538 {
3539 #ifdef CONFIG_RFS_ACCEL
3540 	int i, rc = 0;
3541 
3542 	if (!(bp->flags & BNXT_FLAG_RFS))
3543 		return 0;
3544 
3545 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3546 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3547 
3548 	bp->ntp_fltr_count = 0;
3549 	bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3550 				    sizeof(long),
3551 				    GFP_KERNEL);
3552 
3553 	if (!bp->ntp_fltr_bmap)
3554 		rc = -ENOMEM;
3555 
3556 	return rc;
3557 #else
3558 	return 0;
3559 #endif
3560 }
3561 
3562 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3563 {
3564 	bnxt_free_vnic_attributes(bp);
3565 	bnxt_free_tx_rings(bp);
3566 	bnxt_free_rx_rings(bp);
3567 	bnxt_free_cp_rings(bp);
3568 	bnxt_free_ntp_fltrs(bp, irq_re_init);
3569 	if (irq_re_init) {
3570 		bnxt_free_ring_stats(bp);
3571 		bnxt_free_ring_grps(bp);
3572 		bnxt_free_vnics(bp);
3573 		kfree(bp->tx_ring_map);
3574 		bp->tx_ring_map = NULL;
3575 		kfree(bp->tx_ring);
3576 		bp->tx_ring = NULL;
3577 		kfree(bp->rx_ring);
3578 		bp->rx_ring = NULL;
3579 		kfree(bp->bnapi);
3580 		bp->bnapi = NULL;
3581 	} else {
3582 		bnxt_clear_ring_indices(bp);
3583 	}
3584 }
3585 
3586 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3587 {
3588 	int i, j, rc, size, arr_size;
3589 	void *bnapi;
3590 
3591 	if (irq_re_init) {
3592 		/* Allocate bnapi mem pointer array and mem block for
3593 		 * all queues
3594 		 */
3595 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3596 				bp->cp_nr_rings);
3597 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3598 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3599 		if (!bnapi)
3600 			return -ENOMEM;
3601 
3602 		bp->bnapi = bnapi;
3603 		bnapi += arr_size;
3604 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3605 			bp->bnapi[i] = bnapi;
3606 			bp->bnapi[i]->index = i;
3607 			bp->bnapi[i]->bp = bp;
3608 			if (bp->flags & BNXT_FLAG_CHIP_P5) {
3609 				struct bnxt_cp_ring_info *cpr =
3610 					&bp->bnapi[i]->cp_ring;
3611 
3612 				cpr->cp_ring_struct.ring_mem.flags =
3613 					BNXT_RMEM_RING_PTE_FLAG;
3614 			}
3615 		}
3616 
3617 		bp->rx_ring = kcalloc(bp->rx_nr_rings,
3618 				      sizeof(struct bnxt_rx_ring_info),
3619 				      GFP_KERNEL);
3620 		if (!bp->rx_ring)
3621 			return -ENOMEM;
3622 
3623 		for (i = 0; i < bp->rx_nr_rings; i++) {
3624 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3625 
3626 			if (bp->flags & BNXT_FLAG_CHIP_P5) {
3627 				rxr->rx_ring_struct.ring_mem.flags =
3628 					BNXT_RMEM_RING_PTE_FLAG;
3629 				rxr->rx_agg_ring_struct.ring_mem.flags =
3630 					BNXT_RMEM_RING_PTE_FLAG;
3631 			}
3632 			rxr->bnapi = bp->bnapi[i];
3633 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3634 		}
3635 
3636 		bp->tx_ring = kcalloc(bp->tx_nr_rings,
3637 				      sizeof(struct bnxt_tx_ring_info),
3638 				      GFP_KERNEL);
3639 		if (!bp->tx_ring)
3640 			return -ENOMEM;
3641 
3642 		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3643 					  GFP_KERNEL);
3644 
3645 		if (!bp->tx_ring_map)
3646 			return -ENOMEM;
3647 
3648 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3649 			j = 0;
3650 		else
3651 			j = bp->rx_nr_rings;
3652 
3653 		for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3654 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3655 
3656 			if (bp->flags & BNXT_FLAG_CHIP_P5)
3657 				txr->tx_ring_struct.ring_mem.flags =
3658 					BNXT_RMEM_RING_PTE_FLAG;
3659 			txr->bnapi = bp->bnapi[j];
3660 			bp->bnapi[j]->tx_ring = txr;
3661 			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3662 			if (i >= bp->tx_nr_rings_xdp) {
3663 				txr->txq_index = i - bp->tx_nr_rings_xdp;
3664 				bp->bnapi[j]->tx_int = bnxt_tx_int;
3665 			} else {
3666 				bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3667 				bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3668 			}
3669 		}
3670 
3671 		rc = bnxt_alloc_stats(bp);
3672 		if (rc)
3673 			goto alloc_mem_err;
3674 
3675 		rc = bnxt_alloc_ntp_fltrs(bp);
3676 		if (rc)
3677 			goto alloc_mem_err;
3678 
3679 		rc = bnxt_alloc_vnics(bp);
3680 		if (rc)
3681 			goto alloc_mem_err;
3682 	}
3683 
3684 	bnxt_init_ring_struct(bp);
3685 
3686 	rc = bnxt_alloc_rx_rings(bp);
3687 	if (rc)
3688 		goto alloc_mem_err;
3689 
3690 	rc = bnxt_alloc_tx_rings(bp);
3691 	if (rc)
3692 		goto alloc_mem_err;
3693 
3694 	rc = bnxt_alloc_cp_rings(bp);
3695 	if (rc)
3696 		goto alloc_mem_err;
3697 
3698 	bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3699 				  BNXT_VNIC_UCAST_FLAG;
3700 	rc = bnxt_alloc_vnic_attributes(bp);
3701 	if (rc)
3702 		goto alloc_mem_err;
3703 	return 0;
3704 
3705 alloc_mem_err:
3706 	bnxt_free_mem(bp, true);
3707 	return rc;
3708 }
3709 
3710 static void bnxt_disable_int(struct bnxt *bp)
3711 {
3712 	int i;
3713 
3714 	if (!bp->bnapi)
3715 		return;
3716 
3717 	for (i = 0; i < bp->cp_nr_rings; i++) {
3718 		struct bnxt_napi *bnapi = bp->bnapi[i];
3719 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3720 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3721 
3722 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
3723 			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3724 	}
3725 }
3726 
3727 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3728 {
3729 	struct bnxt_napi *bnapi = bp->bnapi[n];
3730 	struct bnxt_cp_ring_info *cpr;
3731 
3732 	cpr = &bnapi->cp_ring;
3733 	return cpr->cp_ring_struct.map_idx;
3734 }
3735 
3736 static void bnxt_disable_int_sync(struct bnxt *bp)
3737 {
3738 	int i;
3739 
3740 	atomic_inc(&bp->intr_sem);
3741 
3742 	bnxt_disable_int(bp);
3743 	for (i = 0; i < bp->cp_nr_rings; i++) {
3744 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3745 
3746 		synchronize_irq(bp->irq_tbl[map_idx].vector);
3747 	}
3748 }
3749 
3750 static void bnxt_enable_int(struct bnxt *bp)
3751 {
3752 	int i;
3753 
3754 	atomic_set(&bp->intr_sem, 0);
3755 	for (i = 0; i < bp->cp_nr_rings; i++) {
3756 		struct bnxt_napi *bnapi = bp->bnapi[i];
3757 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3758 
3759 		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
3760 	}
3761 }
3762 
3763 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3764 			    u16 cmpl_ring, u16 target_id)
3765 {
3766 	struct input *req = request;
3767 
3768 	req->req_type = cpu_to_le16(req_type);
3769 	req->cmpl_ring = cpu_to_le16(cmpl_ring);
3770 	req->target_id = cpu_to_le16(target_id);
3771 	if (bnxt_kong_hwrm_message(bp, req))
3772 		req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
3773 	else
3774 		req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3775 }
3776 
3777 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3778 				 int timeout, bool silent)
3779 {
3780 	int i, intr_process, rc, tmo_count;
3781 	struct input *req = msg;
3782 	u32 *data = msg;
3783 	__le32 *resp_len;
3784 	u8 *valid;
3785 	u16 cp_ring_id, len = 0;
3786 	struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3787 	u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3788 	struct hwrm_short_input short_input = {0};
3789 	u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
3790 	u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
3791 	u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
3792 	u16 dst = BNXT_HWRM_CHNL_CHIMP;
3793 
3794 	if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3795 		if (msg_len > bp->hwrm_max_ext_req_len ||
3796 		    !bp->hwrm_short_cmd_req_addr)
3797 			return -EINVAL;
3798 	}
3799 
3800 	if (bnxt_hwrm_kong_chnl(bp, req)) {
3801 		dst = BNXT_HWRM_CHNL_KONG;
3802 		bar_offset = BNXT_GRCPF_REG_KONG_COMM;
3803 		doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
3804 		resp = bp->hwrm_cmd_kong_resp_addr;
3805 		resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
3806 	}
3807 
3808 	memset(resp, 0, PAGE_SIZE);
3809 	cp_ring_id = le16_to_cpu(req->cmpl_ring);
3810 	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3811 
3812 	req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
3813 	/* currently supports only one outstanding message */
3814 	if (intr_process)
3815 		bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3816 
3817 	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
3818 	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3819 		void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3820 		u16 max_msg_len;
3821 
3822 		/* Set boundary for maximum extended request length for short
3823 		 * cmd format. If passed up from device use the max supported
3824 		 * internal req length.
3825 		 */
3826 		max_msg_len = bp->hwrm_max_ext_req_len;
3827 
3828 		memcpy(short_cmd_req, req, msg_len);
3829 		if (msg_len < max_msg_len)
3830 			memset(short_cmd_req + msg_len, 0,
3831 			       max_msg_len - msg_len);
3832 
3833 		short_input.req_type = req->req_type;
3834 		short_input.signature =
3835 				cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3836 		short_input.size = cpu_to_le16(msg_len);
3837 		short_input.req_addr =
3838 			cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3839 
3840 		data = (u32 *)&short_input;
3841 		msg_len = sizeof(short_input);
3842 
3843 		/* Sync memory write before updating doorbell */
3844 		wmb();
3845 
3846 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3847 	}
3848 
3849 	/* Write request msg to hwrm channel */
3850 	__iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
3851 
3852 	for (i = msg_len; i < max_req_len; i += 4)
3853 		writel(0, bp->bar0 + bar_offset + i);
3854 
3855 	/* Ring channel doorbell */
3856 	writel(1, bp->bar0 + doorbell_offset);
3857 
3858 	if (!timeout)
3859 		timeout = DFLT_HWRM_CMD_TIMEOUT;
3860 	/* convert timeout to usec */
3861 	timeout *= 1000;
3862 
3863 	i = 0;
3864 	/* Short timeout for the first few iterations:
3865 	 * number of loops = number of loops for short timeout +
3866 	 * number of loops for standard timeout.
3867 	 */
3868 	tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3869 	timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3870 	tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
3871 	resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
3872 
3873 	if (intr_process) {
3874 		u16 seq_id = bp->hwrm_intr_seq_id;
3875 
3876 		/* Wait until hwrm response cmpl interrupt is processed */
3877 		while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
3878 		       i++ < tmo_count) {
3879 			/* on first few passes, just barely sleep */
3880 			if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3881 				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3882 					     HWRM_SHORT_MAX_TIMEOUT);
3883 			else
3884 				usleep_range(HWRM_MIN_TIMEOUT,
3885 					     HWRM_MAX_TIMEOUT);
3886 		}
3887 
3888 		if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
3889 			netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3890 				   le16_to_cpu(req->req_type));
3891 			return -1;
3892 		}
3893 		len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3894 		      HWRM_RESP_LEN_SFT;
3895 		valid = resp_addr + len - 1;
3896 	} else {
3897 		int j;
3898 
3899 		/* Check if response len is updated */
3900 		for (i = 0; i < tmo_count; i++) {
3901 			len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3902 			      HWRM_RESP_LEN_SFT;
3903 			if (len)
3904 				break;
3905 			/* on first few passes, just barely sleep */
3906 			if (i < DFLT_HWRM_CMD_TIMEOUT)
3907 				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3908 					     HWRM_SHORT_MAX_TIMEOUT);
3909 			else
3910 				usleep_range(HWRM_MIN_TIMEOUT,
3911 					     HWRM_MAX_TIMEOUT);
3912 		}
3913 
3914 		if (i >= tmo_count) {
3915 			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3916 				   HWRM_TOTAL_TIMEOUT(i),
3917 				   le16_to_cpu(req->req_type),
3918 				   le16_to_cpu(req->seq_id), len);
3919 			return -1;
3920 		}
3921 
3922 		/* Last byte of resp contains valid bit */
3923 		valid = resp_addr + len - 1;
3924 		for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
3925 			/* make sure we read from updated DMA memory */
3926 			dma_rmb();
3927 			if (*valid)
3928 				break;
3929 			udelay(1);
3930 		}
3931 
3932 		if (j >= HWRM_VALID_BIT_DELAY_USEC) {
3933 			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3934 				   HWRM_TOTAL_TIMEOUT(i),
3935 				   le16_to_cpu(req->req_type),
3936 				   le16_to_cpu(req->seq_id), len, *valid);
3937 			return -1;
3938 		}
3939 	}
3940 
3941 	/* Zero valid bit for compatibility.  Valid bit in an older spec
3942 	 * may become a new field in a newer spec.  We must make sure that
3943 	 * a new field not implemented by old spec will read zero.
3944 	 */
3945 	*valid = 0;
3946 	rc = le16_to_cpu(resp->error_code);
3947 	if (rc && !silent)
3948 		netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3949 			   le16_to_cpu(resp->req_type),
3950 			   le16_to_cpu(resp->seq_id), rc);
3951 	return rc;
3952 }
3953 
3954 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3955 {
3956 	return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3957 }
3958 
3959 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3960 			      int timeout)
3961 {
3962 	return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3963 }
3964 
3965 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3966 {
3967 	int rc;
3968 
3969 	mutex_lock(&bp->hwrm_cmd_lock);
3970 	rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3971 	mutex_unlock(&bp->hwrm_cmd_lock);
3972 	return rc;
3973 }
3974 
3975 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3976 			     int timeout)
3977 {
3978 	int rc;
3979 
3980 	mutex_lock(&bp->hwrm_cmd_lock);
3981 	rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3982 	mutex_unlock(&bp->hwrm_cmd_lock);
3983 	return rc;
3984 }
3985 
3986 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3987 				     int bmap_size)
3988 {
3989 	struct hwrm_func_drv_rgtr_input req = {0};
3990 	DECLARE_BITMAP(async_events_bmap, 256);
3991 	u32 *events = (u32 *)async_events_bmap;
3992 	int i;
3993 
3994 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3995 
3996 	req.enables =
3997 		cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3998 
3999 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
4000 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
4001 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
4002 
4003 	if (bmap && bmap_size) {
4004 		for (i = 0; i < bmap_size; i++) {
4005 			if (test_bit(i, bmap))
4006 				__set_bit(i, async_events_bmap);
4007 		}
4008 	}
4009 
4010 	for (i = 0; i < 8; i++)
4011 		req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4012 
4013 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4014 }
4015 
4016 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4017 {
4018 	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4019 	struct hwrm_func_drv_rgtr_input req = {0};
4020 	int rc;
4021 
4022 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4023 
4024 	req.enables =
4025 		cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4026 			    FUNC_DRV_RGTR_REQ_ENABLES_VER);
4027 
4028 	req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4029 	req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
4030 	req.ver_maj_8b = DRV_VER_MAJ;
4031 	req.ver_min_8b = DRV_VER_MIN;
4032 	req.ver_upd_8b = DRV_VER_UPD;
4033 	req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4034 	req.ver_min = cpu_to_le16(DRV_VER_MIN);
4035 	req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4036 
4037 	if (BNXT_PF(bp)) {
4038 		u32 data[8];
4039 		int i;
4040 
4041 		memset(data, 0, sizeof(data));
4042 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4043 			u16 cmd = bnxt_vf_req_snif[i];
4044 			unsigned int bit, idx;
4045 
4046 			idx = cmd / 32;
4047 			bit = cmd % 32;
4048 			data[idx] |= 1 << bit;
4049 		}
4050 
4051 		for (i = 0; i < 8; i++)
4052 			req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4053 
4054 		req.enables |=
4055 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4056 	}
4057 
4058 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4059 		req.flags |= cpu_to_le32(
4060 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4061 
4062 	mutex_lock(&bp->hwrm_cmd_lock);
4063 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4064 	if (rc)
4065 		rc = -EIO;
4066 	else if (resp->flags &
4067 		 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4068 		bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4069 	mutex_unlock(&bp->hwrm_cmd_lock);
4070 	return rc;
4071 }
4072 
4073 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4074 {
4075 	struct hwrm_func_drv_unrgtr_input req = {0};
4076 
4077 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4078 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4079 }
4080 
4081 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4082 {
4083 	u32 rc = 0;
4084 	struct hwrm_tunnel_dst_port_free_input req = {0};
4085 
4086 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4087 	req.tunnel_type = tunnel_type;
4088 
4089 	switch (tunnel_type) {
4090 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4091 		req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4092 		break;
4093 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4094 		req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4095 		break;
4096 	default:
4097 		break;
4098 	}
4099 
4100 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4101 	if (rc)
4102 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4103 			   rc);
4104 	return rc;
4105 }
4106 
4107 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4108 					   u8 tunnel_type)
4109 {
4110 	u32 rc = 0;
4111 	struct hwrm_tunnel_dst_port_alloc_input req = {0};
4112 	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4113 
4114 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4115 
4116 	req.tunnel_type = tunnel_type;
4117 	req.tunnel_dst_port_val = port;
4118 
4119 	mutex_lock(&bp->hwrm_cmd_lock);
4120 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4121 	if (rc) {
4122 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4123 			   rc);
4124 		goto err_out;
4125 	}
4126 
4127 	switch (tunnel_type) {
4128 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4129 		bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4130 		break;
4131 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4132 		bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4133 		break;
4134 	default:
4135 		break;
4136 	}
4137 
4138 err_out:
4139 	mutex_unlock(&bp->hwrm_cmd_lock);
4140 	return rc;
4141 }
4142 
4143 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4144 {
4145 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4146 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4147 
4148 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4149 	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4150 
4151 	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4152 	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4153 	req.mask = cpu_to_le32(vnic->rx_mask);
4154 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4155 }
4156 
4157 #ifdef CONFIG_RFS_ACCEL
4158 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4159 					    struct bnxt_ntuple_filter *fltr)
4160 {
4161 	struct hwrm_cfa_ntuple_filter_free_input req = {0};
4162 
4163 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4164 	req.ntuple_filter_id = fltr->filter_id;
4165 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4166 }
4167 
4168 #define BNXT_NTP_FLTR_FLAGS					\
4169 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
4170 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
4171 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |	\
4172 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
4173 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
4174 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
4175 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
4176 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
4177 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
4178 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
4179 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
4180 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
4181 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
4182 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4183 
4184 #define BNXT_NTP_TUNNEL_FLTR_FLAG				\
4185 		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4186 
4187 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4188 					     struct bnxt_ntuple_filter *fltr)
4189 {
4190 	struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
4191 	struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4192 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4193 	struct flow_keys *keys = &fltr->fkeys;
4194 	int rc = 0;
4195 
4196 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4197 	req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4198 
4199 	req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4200 
4201 	req.ethertype = htons(ETH_P_IP);
4202 	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4203 	req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4204 	req.ip_protocol = keys->basic.ip_proto;
4205 
4206 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4207 		int i;
4208 
4209 		req.ethertype = htons(ETH_P_IPV6);
4210 		req.ip_addr_type =
4211 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4212 		*(struct in6_addr *)&req.src_ipaddr[0] =
4213 			keys->addrs.v6addrs.src;
4214 		*(struct in6_addr *)&req.dst_ipaddr[0] =
4215 			keys->addrs.v6addrs.dst;
4216 		for (i = 0; i < 4; i++) {
4217 			req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4218 			req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4219 		}
4220 	} else {
4221 		req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4222 		req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4223 		req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4224 		req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4225 	}
4226 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4227 		req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4228 		req.tunnel_type =
4229 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4230 	}
4231 
4232 	req.src_port = keys->ports.src;
4233 	req.src_port_mask = cpu_to_be16(0xffff);
4234 	req.dst_port = keys->ports.dst;
4235 	req.dst_port_mask = cpu_to_be16(0xffff);
4236 
4237 	req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4238 	mutex_lock(&bp->hwrm_cmd_lock);
4239 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4240 	if (!rc) {
4241 		resp = bnxt_get_hwrm_resp_addr(bp, &req);
4242 		fltr->filter_id = resp->ntuple_filter_id;
4243 	}
4244 	mutex_unlock(&bp->hwrm_cmd_lock);
4245 	return rc;
4246 }
4247 #endif
4248 
4249 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4250 				     u8 *mac_addr)
4251 {
4252 	u32 rc = 0;
4253 	struct hwrm_cfa_l2_filter_alloc_input req = {0};
4254 	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4255 
4256 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4257 	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4258 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4259 		req.flags |=
4260 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4261 	req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4262 	req.enables =
4263 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4264 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4265 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4266 	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4267 	req.l2_addr_mask[0] = 0xff;
4268 	req.l2_addr_mask[1] = 0xff;
4269 	req.l2_addr_mask[2] = 0xff;
4270 	req.l2_addr_mask[3] = 0xff;
4271 	req.l2_addr_mask[4] = 0xff;
4272 	req.l2_addr_mask[5] = 0xff;
4273 
4274 	mutex_lock(&bp->hwrm_cmd_lock);
4275 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4276 	if (!rc)
4277 		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4278 							resp->l2_filter_id;
4279 	mutex_unlock(&bp->hwrm_cmd_lock);
4280 	return rc;
4281 }
4282 
4283 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4284 {
4285 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4286 	int rc = 0;
4287 
4288 	/* Any associated ntuple filters will also be cleared by firmware. */
4289 	mutex_lock(&bp->hwrm_cmd_lock);
4290 	for (i = 0; i < num_of_vnics; i++) {
4291 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4292 
4293 		for (j = 0; j < vnic->uc_filter_count; j++) {
4294 			struct hwrm_cfa_l2_filter_free_input req = {0};
4295 
4296 			bnxt_hwrm_cmd_hdr_init(bp, &req,
4297 					       HWRM_CFA_L2_FILTER_FREE, -1, -1);
4298 
4299 			req.l2_filter_id = vnic->fw_l2_filter_id[j];
4300 
4301 			rc = _hwrm_send_message(bp, &req, sizeof(req),
4302 						HWRM_CMD_TIMEOUT);
4303 		}
4304 		vnic->uc_filter_count = 0;
4305 	}
4306 	mutex_unlock(&bp->hwrm_cmd_lock);
4307 
4308 	return rc;
4309 }
4310 
4311 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4312 {
4313 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4314 	struct hwrm_vnic_tpa_cfg_input req = {0};
4315 
4316 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4317 		return 0;
4318 
4319 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4320 
4321 	if (tpa_flags) {
4322 		u16 mss = bp->dev->mtu - 40;
4323 		u32 nsegs, n, segs = 0, flags;
4324 
4325 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4326 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4327 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4328 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4329 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4330 		if (tpa_flags & BNXT_FLAG_GRO)
4331 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4332 
4333 		req.flags = cpu_to_le32(flags);
4334 
4335 		req.enables =
4336 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4337 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4338 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4339 
4340 		/* Number of segs are log2 units, and first packet is not
4341 		 * included as part of this units.
4342 		 */
4343 		if (mss <= BNXT_RX_PAGE_SIZE) {
4344 			n = BNXT_RX_PAGE_SIZE / mss;
4345 			nsegs = (MAX_SKB_FRAGS - 1) * n;
4346 		} else {
4347 			n = mss / BNXT_RX_PAGE_SIZE;
4348 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
4349 				n++;
4350 			nsegs = (MAX_SKB_FRAGS - n) / n;
4351 		}
4352 
4353 		segs = ilog2(nsegs);
4354 		req.max_agg_segs = cpu_to_le16(segs);
4355 		req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
4356 
4357 		req.min_agg_len = cpu_to_le32(512);
4358 	}
4359 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4360 
4361 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4362 }
4363 
4364 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4365 {
4366 	struct bnxt_ring_grp_info *grp_info;
4367 
4368 	grp_info = &bp->grp_info[ring->grp_idx];
4369 	return grp_info->cp_fw_ring_id;
4370 }
4371 
4372 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4373 {
4374 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
4375 		struct bnxt_napi *bnapi = rxr->bnapi;
4376 		struct bnxt_cp_ring_info *cpr;
4377 
4378 		cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4379 		return cpr->cp_ring_struct.fw_ring_id;
4380 	} else {
4381 		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4382 	}
4383 }
4384 
4385 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4386 {
4387 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
4388 		struct bnxt_napi *bnapi = txr->bnapi;
4389 		struct bnxt_cp_ring_info *cpr;
4390 
4391 		cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4392 		return cpr->cp_ring_struct.fw_ring_id;
4393 	} else {
4394 		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4395 	}
4396 }
4397 
4398 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4399 {
4400 	u32 i, j, max_rings;
4401 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4402 	struct hwrm_vnic_rss_cfg_input req = {0};
4403 
4404 	if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4405 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4406 		return 0;
4407 
4408 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4409 	if (set_rss) {
4410 		req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4411 		req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4412 		if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4413 			if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4414 				max_rings = bp->rx_nr_rings - 1;
4415 			else
4416 				max_rings = bp->rx_nr_rings;
4417 		} else {
4418 			max_rings = 1;
4419 		}
4420 
4421 		/* Fill the RSS indirection table with ring group ids */
4422 		for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4423 			if (j == max_rings)
4424 				j = 0;
4425 			vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4426 		}
4427 
4428 		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4429 		req.hash_key_tbl_addr =
4430 			cpu_to_le64(vnic->rss_hash_key_dma_addr);
4431 	}
4432 	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4433 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4434 }
4435 
4436 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4437 {
4438 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4439 	u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4440 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4441 	struct hwrm_vnic_rss_cfg_input req = {0};
4442 
4443 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4444 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4445 	if (!set_rss) {
4446 		hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4447 		return 0;
4448 	}
4449 	req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4450 	req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4451 	req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4452 	req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4453 	nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4454 	for (i = 0, k = 0; i < nr_ctxs; i++) {
4455 		__le16 *ring_tbl = vnic->rss_table;
4456 		int rc;
4457 
4458 		req.ring_table_pair_index = i;
4459 		req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4460 		for (j = 0; j < 64; j++) {
4461 			u16 ring_id;
4462 
4463 			ring_id = rxr->rx_ring_struct.fw_ring_id;
4464 			*ring_tbl++ = cpu_to_le16(ring_id);
4465 			ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4466 			*ring_tbl++ = cpu_to_le16(ring_id);
4467 			rxr++;
4468 			k++;
4469 			if (k == max_rings) {
4470 				k = 0;
4471 				rxr = &bp->rx_ring[0];
4472 			}
4473 		}
4474 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4475 		if (rc)
4476 			return -EIO;
4477 	}
4478 	return 0;
4479 }
4480 
4481 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4482 {
4483 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4484 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
4485 
4486 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4487 	req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4488 				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4489 				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4490 	req.enables =
4491 		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4492 			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4493 	/* thresholds not implemented in firmware yet */
4494 	req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4495 	req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4496 	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4497 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4498 }
4499 
4500 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4501 					u16 ctx_idx)
4502 {
4503 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4504 
4505 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4506 	req.rss_cos_lb_ctx_id =
4507 		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4508 
4509 	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4510 	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4511 }
4512 
4513 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4514 {
4515 	int i, j;
4516 
4517 	for (i = 0; i < bp->nr_vnics; i++) {
4518 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4519 
4520 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4521 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4522 				bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4523 		}
4524 	}
4525 	bp->rsscos_nr_ctxs = 0;
4526 }
4527 
4528 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4529 {
4530 	int rc;
4531 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4532 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4533 						bp->hwrm_cmd_resp_addr;
4534 
4535 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4536 			       -1);
4537 
4538 	mutex_lock(&bp->hwrm_cmd_lock);
4539 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4540 	if (!rc)
4541 		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4542 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
4543 	mutex_unlock(&bp->hwrm_cmd_lock);
4544 
4545 	return rc;
4546 }
4547 
4548 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4549 {
4550 	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4551 		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4552 	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4553 }
4554 
4555 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
4556 {
4557 	unsigned int ring = 0, grp_idx;
4558 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4559 	struct hwrm_vnic_cfg_input req = {0};
4560 	u16 def_vlan = 0;
4561 
4562 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
4563 
4564 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
4565 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4566 
4567 		req.default_rx_ring_id =
4568 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4569 		req.default_cmpl_ring_id =
4570 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4571 		req.enables =
4572 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4573 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4574 		goto vnic_mru;
4575 	}
4576 	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
4577 	/* Only RSS support for now TBD: COS & LB */
4578 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4579 		req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4580 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4581 					   VNIC_CFG_REQ_ENABLES_MRU);
4582 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4583 		req.rss_rule =
4584 			cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4585 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4586 					   VNIC_CFG_REQ_ENABLES_MRU);
4587 		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
4588 	} else {
4589 		req.rss_rule = cpu_to_le16(0xffff);
4590 	}
4591 
4592 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4593 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
4594 		req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4595 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4596 	} else {
4597 		req.cos_rule = cpu_to_le16(0xffff);
4598 	}
4599 
4600 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4601 		ring = 0;
4602 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
4603 		ring = vnic_id - 1;
4604 	else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4605 		ring = bp->rx_nr_rings - 1;
4606 
4607 	grp_idx = bp->rx_ring[ring].bnapi->index;
4608 	req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
4609 	req.lb_rule = cpu_to_le16(0xffff);
4610 vnic_mru:
4611 	req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4612 			      VLAN_HLEN);
4613 
4614 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4615 #ifdef CONFIG_BNXT_SRIOV
4616 	if (BNXT_VF(bp))
4617 		def_vlan = bp->vf.vlan;
4618 #endif
4619 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
4620 		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
4621 	if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
4622 		req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
4623 
4624 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4625 }
4626 
4627 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4628 {
4629 	u32 rc = 0;
4630 
4631 	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4632 		struct hwrm_vnic_free_input req = {0};
4633 
4634 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4635 		req.vnic_id =
4636 			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4637 
4638 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4639 		if (rc)
4640 			return rc;
4641 		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4642 	}
4643 	return rc;
4644 }
4645 
4646 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4647 {
4648 	u16 i;
4649 
4650 	for (i = 0; i < bp->nr_vnics; i++)
4651 		bnxt_hwrm_vnic_free_one(bp, i);
4652 }
4653 
4654 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4655 				unsigned int start_rx_ring_idx,
4656 				unsigned int nr_rings)
4657 {
4658 	int rc = 0;
4659 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
4660 	struct hwrm_vnic_alloc_input req = {0};
4661 	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4662 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4663 
4664 	if (bp->flags & BNXT_FLAG_CHIP_P5)
4665 		goto vnic_no_ring_grps;
4666 
4667 	/* map ring groups to this vnic */
4668 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4669 		grp_idx = bp->rx_ring[i].bnapi->index;
4670 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
4671 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
4672 				   j, nr_rings);
4673 			break;
4674 		}
4675 		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
4676 	}
4677 
4678 vnic_no_ring_grps:
4679 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4680 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
4681 	if (vnic_id == 0)
4682 		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4683 
4684 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4685 
4686 	mutex_lock(&bp->hwrm_cmd_lock);
4687 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4688 	if (!rc)
4689 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
4690 	mutex_unlock(&bp->hwrm_cmd_lock);
4691 	return rc;
4692 }
4693 
4694 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4695 {
4696 	struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4697 	struct hwrm_vnic_qcaps_input req = {0};
4698 	int rc;
4699 
4700 	if (bp->hwrm_spec_code < 0x10600)
4701 		return 0;
4702 
4703 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4704 	mutex_lock(&bp->hwrm_cmd_lock);
4705 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4706 	if (!rc) {
4707 		u32 flags = le32_to_cpu(resp->flags);
4708 
4709 		if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4710 		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4711 			bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4712 		if (flags &
4713 		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4714 			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
4715 	}
4716 	mutex_unlock(&bp->hwrm_cmd_lock);
4717 	return rc;
4718 }
4719 
4720 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4721 {
4722 	u16 i;
4723 	u32 rc = 0;
4724 
4725 	if (bp->flags & BNXT_FLAG_CHIP_P5)
4726 		return 0;
4727 
4728 	mutex_lock(&bp->hwrm_cmd_lock);
4729 	for (i = 0; i < bp->rx_nr_rings; i++) {
4730 		struct hwrm_ring_grp_alloc_input req = {0};
4731 		struct hwrm_ring_grp_alloc_output *resp =
4732 					bp->hwrm_cmd_resp_addr;
4733 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4734 
4735 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4736 
4737 		req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4738 		req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4739 		req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4740 		req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4741 
4742 		rc = _hwrm_send_message(bp, &req, sizeof(req),
4743 					HWRM_CMD_TIMEOUT);
4744 		if (rc)
4745 			break;
4746 
4747 		bp->grp_info[grp_idx].fw_grp_id =
4748 			le32_to_cpu(resp->ring_group_id);
4749 	}
4750 	mutex_unlock(&bp->hwrm_cmd_lock);
4751 	return rc;
4752 }
4753 
4754 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4755 {
4756 	u16 i;
4757 	u32 rc = 0;
4758 	struct hwrm_ring_grp_free_input req = {0};
4759 
4760 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
4761 		return 0;
4762 
4763 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4764 
4765 	mutex_lock(&bp->hwrm_cmd_lock);
4766 	for (i = 0; i < bp->cp_nr_rings; i++) {
4767 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4768 			continue;
4769 		req.ring_group_id =
4770 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
4771 
4772 		rc = _hwrm_send_message(bp, &req, sizeof(req),
4773 					HWRM_CMD_TIMEOUT);
4774 		if (rc)
4775 			break;
4776 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4777 	}
4778 	mutex_unlock(&bp->hwrm_cmd_lock);
4779 	return rc;
4780 }
4781 
4782 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4783 				    struct bnxt_ring_struct *ring,
4784 				    u32 ring_type, u32 map_index)
4785 {
4786 	int rc = 0, err = 0;
4787 	struct hwrm_ring_alloc_input req = {0};
4788 	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4789 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
4790 	struct bnxt_ring_grp_info *grp_info;
4791 	u16 ring_id;
4792 
4793 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4794 
4795 	req.enables = 0;
4796 	if (rmem->nr_pages > 1) {
4797 		req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
4798 		/* Page size is in log2 units */
4799 		req.page_size = BNXT_PAGE_SHIFT;
4800 		req.page_tbl_depth = 1;
4801 	} else {
4802 		req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
4803 	}
4804 	req.fbo = 0;
4805 	/* Association of ring index with doorbell index and MSIX number */
4806 	req.logical_id = cpu_to_le16(map_index);
4807 
4808 	switch (ring_type) {
4809 	case HWRM_RING_ALLOC_TX: {
4810 		struct bnxt_tx_ring_info *txr;
4811 
4812 		txr = container_of(ring, struct bnxt_tx_ring_info,
4813 				   tx_ring_struct);
4814 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4815 		/* Association of transmit ring with completion ring */
4816 		grp_info = &bp->grp_info[ring->grp_idx];
4817 		req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
4818 		req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4819 		req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4820 		req.queue_id = cpu_to_le16(ring->queue_id);
4821 		break;
4822 	}
4823 	case HWRM_RING_ALLOC_RX:
4824 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4825 		req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4826 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
4827 			u16 flags = 0;
4828 
4829 			/* Association of rx ring with stats context */
4830 			grp_info = &bp->grp_info[ring->grp_idx];
4831 			req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
4832 			req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4833 			req.enables |= cpu_to_le32(
4834 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4835 			if (NET_IP_ALIGN == 2)
4836 				flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
4837 			req.flags = cpu_to_le16(flags);
4838 		}
4839 		break;
4840 	case HWRM_RING_ALLOC_AGG:
4841 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
4842 			req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
4843 			/* Association of agg ring with rx ring */
4844 			grp_info = &bp->grp_info[ring->grp_idx];
4845 			req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
4846 			req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
4847 			req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4848 			req.enables |= cpu_to_le32(
4849 				RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
4850 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4851 		} else {
4852 			req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4853 		}
4854 		req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4855 		break;
4856 	case HWRM_RING_ALLOC_CMPL:
4857 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4858 		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4859 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
4860 			/* Association of cp ring with nq */
4861 			grp_info = &bp->grp_info[map_index];
4862 			req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4863 			req.cq_handle = cpu_to_le64(ring->handle);
4864 			req.enables |= cpu_to_le32(
4865 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
4866 		} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
4867 			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4868 		}
4869 		break;
4870 	case HWRM_RING_ALLOC_NQ:
4871 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
4872 		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4873 		if (bp->flags & BNXT_FLAG_USING_MSIX)
4874 			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4875 		break;
4876 	default:
4877 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4878 			   ring_type);
4879 		return -1;
4880 	}
4881 
4882 	mutex_lock(&bp->hwrm_cmd_lock);
4883 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4884 	err = le16_to_cpu(resp->error_code);
4885 	ring_id = le16_to_cpu(resp->ring_id);
4886 	mutex_unlock(&bp->hwrm_cmd_lock);
4887 
4888 	if (rc || err) {
4889 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4890 			   ring_type, rc, err);
4891 		return -EIO;
4892 	}
4893 	ring->fw_ring_id = ring_id;
4894 	return rc;
4895 }
4896 
4897 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4898 {
4899 	int rc;
4900 
4901 	if (BNXT_PF(bp)) {
4902 		struct hwrm_func_cfg_input req = {0};
4903 
4904 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4905 		req.fid = cpu_to_le16(0xffff);
4906 		req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4907 		req.async_event_cr = cpu_to_le16(idx);
4908 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4909 	} else {
4910 		struct hwrm_func_vf_cfg_input req = {0};
4911 
4912 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4913 		req.enables =
4914 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4915 		req.async_event_cr = cpu_to_le16(idx);
4916 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4917 	}
4918 	return rc;
4919 }
4920 
4921 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
4922 			u32 map_idx, u32 xid)
4923 {
4924 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
4925 		if (BNXT_PF(bp))
4926 			db->doorbell = bp->bar1 + 0x10000;
4927 		else
4928 			db->doorbell = bp->bar1 + 0x4000;
4929 		switch (ring_type) {
4930 		case HWRM_RING_ALLOC_TX:
4931 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
4932 			break;
4933 		case HWRM_RING_ALLOC_RX:
4934 		case HWRM_RING_ALLOC_AGG:
4935 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
4936 			break;
4937 		case HWRM_RING_ALLOC_CMPL:
4938 			db->db_key64 = DBR_PATH_L2;
4939 			break;
4940 		case HWRM_RING_ALLOC_NQ:
4941 			db->db_key64 = DBR_PATH_L2;
4942 			break;
4943 		}
4944 		db->db_key64 |= (u64)xid << DBR_XID_SFT;
4945 	} else {
4946 		db->doorbell = bp->bar1 + map_idx * 0x80;
4947 		switch (ring_type) {
4948 		case HWRM_RING_ALLOC_TX:
4949 			db->db_key32 = DB_KEY_TX;
4950 			break;
4951 		case HWRM_RING_ALLOC_RX:
4952 		case HWRM_RING_ALLOC_AGG:
4953 			db->db_key32 = DB_KEY_RX;
4954 			break;
4955 		case HWRM_RING_ALLOC_CMPL:
4956 			db->db_key32 = DB_KEY_CP;
4957 			break;
4958 		}
4959 	}
4960 }
4961 
4962 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4963 {
4964 	int i, rc = 0;
4965 	u32 type;
4966 
4967 	if (bp->flags & BNXT_FLAG_CHIP_P5)
4968 		type = HWRM_RING_ALLOC_NQ;
4969 	else
4970 		type = HWRM_RING_ALLOC_CMPL;
4971 	for (i = 0; i < bp->cp_nr_rings; i++) {
4972 		struct bnxt_napi *bnapi = bp->bnapi[i];
4973 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4974 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4975 		u32 map_idx = ring->map_idx;
4976 
4977 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
4978 		if (rc)
4979 			goto err_out;
4980 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4981 		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4982 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4983 
4984 		if (!i) {
4985 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4986 			if (rc)
4987 				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4988 		}
4989 	}
4990 
4991 	type = HWRM_RING_ALLOC_TX;
4992 	for (i = 0; i < bp->tx_nr_rings; i++) {
4993 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4994 		struct bnxt_ring_struct *ring;
4995 		u32 map_idx;
4996 
4997 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
4998 			struct bnxt_napi *bnapi = txr->bnapi;
4999 			struct bnxt_cp_ring_info *cpr, *cpr2;
5000 			u32 type2 = HWRM_RING_ALLOC_CMPL;
5001 
5002 			cpr = &bnapi->cp_ring;
5003 			cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5004 			ring = &cpr2->cp_ring_struct;
5005 			ring->handle = BNXT_TX_HDL;
5006 			map_idx = bnapi->index;
5007 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5008 			if (rc)
5009 				goto err_out;
5010 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5011 				    ring->fw_ring_id);
5012 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5013 		}
5014 		ring = &txr->tx_ring_struct;
5015 		map_idx = i;
5016 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5017 		if (rc)
5018 			goto err_out;
5019 		bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5020 	}
5021 
5022 	type = HWRM_RING_ALLOC_RX;
5023 	for (i = 0; i < bp->rx_nr_rings; i++) {
5024 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5025 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5026 		struct bnxt_napi *bnapi = rxr->bnapi;
5027 		u32 map_idx = bnapi->index;
5028 
5029 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5030 		if (rc)
5031 			goto err_out;
5032 		bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5033 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5034 		bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5035 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5036 			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5037 			u32 type2 = HWRM_RING_ALLOC_CMPL;
5038 			struct bnxt_cp_ring_info *cpr2;
5039 
5040 			cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5041 			ring = &cpr2->cp_ring_struct;
5042 			ring->handle = BNXT_RX_HDL;
5043 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5044 			if (rc)
5045 				goto err_out;
5046 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5047 				    ring->fw_ring_id);
5048 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5049 		}
5050 	}
5051 
5052 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5053 		type = HWRM_RING_ALLOC_AGG;
5054 		for (i = 0; i < bp->rx_nr_rings; i++) {
5055 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5056 			struct bnxt_ring_struct *ring =
5057 						&rxr->rx_agg_ring_struct;
5058 			u32 grp_idx = ring->grp_idx;
5059 			u32 map_idx = grp_idx + bp->rx_nr_rings;
5060 
5061 			rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5062 			if (rc)
5063 				goto err_out;
5064 
5065 			bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5066 				    ring->fw_ring_id);
5067 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5068 			bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5069 		}
5070 	}
5071 err_out:
5072 	return rc;
5073 }
5074 
5075 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5076 				   struct bnxt_ring_struct *ring,
5077 				   u32 ring_type, int cmpl_ring_id)
5078 {
5079 	int rc;
5080 	struct hwrm_ring_free_input req = {0};
5081 	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5082 	u16 error_code;
5083 
5084 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5085 	req.ring_type = ring_type;
5086 	req.ring_id = cpu_to_le16(ring->fw_ring_id);
5087 
5088 	mutex_lock(&bp->hwrm_cmd_lock);
5089 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5090 	error_code = le16_to_cpu(resp->error_code);
5091 	mutex_unlock(&bp->hwrm_cmd_lock);
5092 
5093 	if (rc || error_code) {
5094 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5095 			   ring_type, rc, error_code);
5096 		return -EIO;
5097 	}
5098 	return 0;
5099 }
5100 
5101 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5102 {
5103 	u32 type;
5104 	int i;
5105 
5106 	if (!bp->bnapi)
5107 		return;
5108 
5109 	for (i = 0; i < bp->tx_nr_rings; i++) {
5110 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5111 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5112 		u32 cmpl_ring_id;
5113 
5114 		cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5115 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5116 			hwrm_ring_free_send_msg(bp, ring,
5117 						RING_FREE_REQ_RING_TYPE_TX,
5118 						close_path ? cmpl_ring_id :
5119 						INVALID_HW_RING_ID);
5120 			ring->fw_ring_id = INVALID_HW_RING_ID;
5121 		}
5122 	}
5123 
5124 	for (i = 0; i < bp->rx_nr_rings; i++) {
5125 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5126 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5127 		u32 grp_idx = rxr->bnapi->index;
5128 		u32 cmpl_ring_id;
5129 
5130 		cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5131 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5132 			hwrm_ring_free_send_msg(bp, ring,
5133 						RING_FREE_REQ_RING_TYPE_RX,
5134 						close_path ? cmpl_ring_id :
5135 						INVALID_HW_RING_ID);
5136 			ring->fw_ring_id = INVALID_HW_RING_ID;
5137 			bp->grp_info[grp_idx].rx_fw_ring_id =
5138 				INVALID_HW_RING_ID;
5139 		}
5140 	}
5141 
5142 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5143 		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5144 	else
5145 		type = RING_FREE_REQ_RING_TYPE_RX;
5146 	for (i = 0; i < bp->rx_nr_rings; i++) {
5147 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5148 		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5149 		u32 grp_idx = rxr->bnapi->index;
5150 		u32 cmpl_ring_id;
5151 
5152 		cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5153 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5154 			hwrm_ring_free_send_msg(bp, ring, type,
5155 						close_path ? cmpl_ring_id :
5156 						INVALID_HW_RING_ID);
5157 			ring->fw_ring_id = INVALID_HW_RING_ID;
5158 			bp->grp_info[grp_idx].agg_fw_ring_id =
5159 				INVALID_HW_RING_ID;
5160 		}
5161 	}
5162 
5163 	/* The completion rings are about to be freed.  After that the
5164 	 * IRQ doorbell will not work anymore.  So we need to disable
5165 	 * IRQ here.
5166 	 */
5167 	bnxt_disable_int_sync(bp);
5168 
5169 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5170 		type = RING_FREE_REQ_RING_TYPE_NQ;
5171 	else
5172 		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5173 	for (i = 0; i < bp->cp_nr_rings; i++) {
5174 		struct bnxt_napi *bnapi = bp->bnapi[i];
5175 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5176 		struct bnxt_ring_struct *ring;
5177 		int j;
5178 
5179 		for (j = 0; j < 2; j++) {
5180 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5181 
5182 			if (cpr2) {
5183 				ring = &cpr2->cp_ring_struct;
5184 				if (ring->fw_ring_id == INVALID_HW_RING_ID)
5185 					continue;
5186 				hwrm_ring_free_send_msg(bp, ring,
5187 					RING_FREE_REQ_RING_TYPE_L2_CMPL,
5188 					INVALID_HW_RING_ID);
5189 				ring->fw_ring_id = INVALID_HW_RING_ID;
5190 			}
5191 		}
5192 		ring = &cpr->cp_ring_struct;
5193 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5194 			hwrm_ring_free_send_msg(bp, ring, type,
5195 						INVALID_HW_RING_ID);
5196 			ring->fw_ring_id = INVALID_HW_RING_ID;
5197 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5198 		}
5199 	}
5200 }
5201 
5202 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5203 			   bool shared);
5204 
5205 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5206 {
5207 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5208 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5209 	struct hwrm_func_qcfg_input req = {0};
5210 	int rc;
5211 
5212 	if (bp->hwrm_spec_code < 0x10601)
5213 		return 0;
5214 
5215 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5216 	req.fid = cpu_to_le16(0xffff);
5217 	mutex_lock(&bp->hwrm_cmd_lock);
5218 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5219 	if (rc) {
5220 		mutex_unlock(&bp->hwrm_cmd_lock);
5221 		return -EIO;
5222 	}
5223 
5224 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5225 	if (BNXT_NEW_RM(bp)) {
5226 		u16 cp, stats;
5227 
5228 		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5229 		hw_resc->resv_hw_ring_grps =
5230 			le32_to_cpu(resp->alloc_hw_ring_grps);
5231 		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5232 		cp = le16_to_cpu(resp->alloc_cmpl_rings);
5233 		stats = le16_to_cpu(resp->alloc_stat_ctx);
5234 		hw_resc->resv_irqs = cp;
5235 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5236 			int rx = hw_resc->resv_rx_rings;
5237 			int tx = hw_resc->resv_tx_rings;
5238 
5239 			if (bp->flags & BNXT_FLAG_AGG_RINGS)
5240 				rx >>= 1;
5241 			if (cp < (rx + tx)) {
5242 				bnxt_trim_rings(bp, &rx, &tx, cp, false);
5243 				if (bp->flags & BNXT_FLAG_AGG_RINGS)
5244 					rx <<= 1;
5245 				hw_resc->resv_rx_rings = rx;
5246 				hw_resc->resv_tx_rings = tx;
5247 			}
5248 			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5249 			hw_resc->resv_hw_ring_grps = rx;
5250 		}
5251 		hw_resc->resv_cp_rings = cp;
5252 		hw_resc->resv_stat_ctxs = stats;
5253 	}
5254 	mutex_unlock(&bp->hwrm_cmd_lock);
5255 	return 0;
5256 }
5257 
5258 /* Caller must hold bp->hwrm_cmd_lock */
5259 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5260 {
5261 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5262 	struct hwrm_func_qcfg_input req = {0};
5263 	int rc;
5264 
5265 	if (bp->hwrm_spec_code < 0x10601)
5266 		return 0;
5267 
5268 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5269 	req.fid = cpu_to_le16(fid);
5270 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5271 	if (!rc)
5272 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5273 
5274 	return rc;
5275 }
5276 
5277 static bool bnxt_rfs_supported(struct bnxt *bp);
5278 
5279 static void
5280 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5281 			     int tx_rings, int rx_rings, int ring_grps,
5282 			     int cp_rings, int stats, int vnics)
5283 {
5284 	u32 enables = 0;
5285 
5286 	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5287 	req->fid = cpu_to_le16(0xffff);
5288 	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5289 	req->num_tx_rings = cpu_to_le16(tx_rings);
5290 	if (BNXT_NEW_RM(bp)) {
5291 		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5292 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5293 			enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5294 			enables |= tx_rings + ring_grps ?
5295 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5296 				   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5297 			enables |= rx_rings ?
5298 				FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5299 		} else {
5300 			enables |= cp_rings ?
5301 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5302 				   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5303 			enables |= ring_grps ?
5304 				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5305 				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5306 		}
5307 		enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5308 
5309 		req->num_rx_rings = cpu_to_le16(rx_rings);
5310 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5311 			req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5312 			req->num_msix = cpu_to_le16(cp_rings);
5313 			req->num_rsscos_ctxs =
5314 				cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5315 		} else {
5316 			req->num_cmpl_rings = cpu_to_le16(cp_rings);
5317 			req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5318 			req->num_rsscos_ctxs = cpu_to_le16(1);
5319 			if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5320 			    bnxt_rfs_supported(bp))
5321 				req->num_rsscos_ctxs =
5322 					cpu_to_le16(ring_grps + 1);
5323 		}
5324 		req->num_stat_ctxs = cpu_to_le16(stats);
5325 		req->num_vnics = cpu_to_le16(vnics);
5326 	}
5327 	req->enables = cpu_to_le32(enables);
5328 }
5329 
5330 static void
5331 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5332 			     struct hwrm_func_vf_cfg_input *req, int tx_rings,
5333 			     int rx_rings, int ring_grps, int cp_rings,
5334 			     int stats, int vnics)
5335 {
5336 	u32 enables = 0;
5337 
5338 	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5339 	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5340 	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5341 			      FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5342 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5343 		enables |= tx_rings + ring_grps ?
5344 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5345 			   FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5346 	} else {
5347 		enables |= cp_rings ?
5348 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5349 			   FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5350 		enables |= ring_grps ?
5351 			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5352 	}
5353 	enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5354 	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5355 
5356 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5357 	req->num_tx_rings = cpu_to_le16(tx_rings);
5358 	req->num_rx_rings = cpu_to_le16(rx_rings);
5359 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5360 		req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5361 		req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5362 	} else {
5363 		req->num_cmpl_rings = cpu_to_le16(cp_rings);
5364 		req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5365 		req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5366 	}
5367 	req->num_stat_ctxs = cpu_to_le16(stats);
5368 	req->num_vnics = cpu_to_le16(vnics);
5369 
5370 	req->enables = cpu_to_le32(enables);
5371 }
5372 
5373 static int
5374 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5375 			   int ring_grps, int cp_rings, int stats, int vnics)
5376 {
5377 	struct hwrm_func_cfg_input req = {0};
5378 	int rc;
5379 
5380 	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5381 				     cp_rings, stats, vnics);
5382 	if (!req.enables)
5383 		return 0;
5384 
5385 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5386 	if (rc)
5387 		return -ENOMEM;
5388 
5389 	if (bp->hwrm_spec_code < 0x10601)
5390 		bp->hw_resc.resv_tx_rings = tx_rings;
5391 
5392 	rc = bnxt_hwrm_get_rings(bp);
5393 	return rc;
5394 }
5395 
5396 static int
5397 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5398 			   int ring_grps, int cp_rings, int stats, int vnics)
5399 {
5400 	struct hwrm_func_vf_cfg_input req = {0};
5401 	int rc;
5402 
5403 	if (!BNXT_NEW_RM(bp)) {
5404 		bp->hw_resc.resv_tx_rings = tx_rings;
5405 		return 0;
5406 	}
5407 
5408 	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5409 				     cp_rings, stats, vnics);
5410 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5411 	if (rc)
5412 		return -ENOMEM;
5413 
5414 	rc = bnxt_hwrm_get_rings(bp);
5415 	return rc;
5416 }
5417 
5418 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5419 				   int cp, int stat, int vnic)
5420 {
5421 	if (BNXT_PF(bp))
5422 		return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5423 						  vnic);
5424 	else
5425 		return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5426 						  vnic);
5427 }
5428 
5429 int bnxt_nq_rings_in_use(struct bnxt *bp)
5430 {
5431 	int cp = bp->cp_nr_rings;
5432 	int ulp_msix, ulp_base;
5433 
5434 	ulp_msix = bnxt_get_ulp_msix_num(bp);
5435 	if (ulp_msix) {
5436 		ulp_base = bnxt_get_ulp_msix_base(bp);
5437 		cp += ulp_msix;
5438 		if ((ulp_base + ulp_msix) > cp)
5439 			cp = ulp_base + ulp_msix;
5440 	}
5441 	return cp;
5442 }
5443 
5444 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5445 {
5446 	int cp;
5447 
5448 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5449 		return bnxt_nq_rings_in_use(bp);
5450 
5451 	cp = bp->tx_nr_rings + bp->rx_nr_rings;
5452 	return cp;
5453 }
5454 
5455 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5456 {
5457 	return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
5458 }
5459 
5460 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5461 {
5462 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5463 	int cp = bnxt_cp_rings_in_use(bp);
5464 	int nq = bnxt_nq_rings_in_use(bp);
5465 	int rx = bp->rx_nr_rings, stat;
5466 	int vnic = 1, grp = rx;
5467 
5468 	if (bp->hwrm_spec_code < 0x10601)
5469 		return false;
5470 
5471 	if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5472 		return true;
5473 
5474 	if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5475 		vnic = rx + 1;
5476 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
5477 		rx <<= 1;
5478 	stat = bnxt_get_func_stat_ctxs(bp);
5479 	if (BNXT_NEW_RM(bp) &&
5480 	    (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5481 	     hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
5482 	     hw_resc->resv_stat_ctxs != stat ||
5483 	     (hw_resc->resv_hw_ring_grps != grp &&
5484 	      !(bp->flags & BNXT_FLAG_CHIP_P5))))
5485 		return true;
5486 	return false;
5487 }
5488 
5489 static int __bnxt_reserve_rings(struct bnxt *bp)
5490 {
5491 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5492 	int cp = bnxt_nq_rings_in_use(bp);
5493 	int tx = bp->tx_nr_rings;
5494 	int rx = bp->rx_nr_rings;
5495 	int grp, rx_rings, rc;
5496 	int vnic = 1, stat;
5497 	bool sh = false;
5498 
5499 	if (!bnxt_need_reserve_rings(bp))
5500 		return 0;
5501 
5502 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5503 		sh = true;
5504 	if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5505 		vnic = rx + 1;
5506 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
5507 		rx <<= 1;
5508 	grp = bp->rx_nr_rings;
5509 	stat = bnxt_get_func_stat_ctxs(bp);
5510 
5511 	rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5512 	if (rc)
5513 		return rc;
5514 
5515 	tx = hw_resc->resv_tx_rings;
5516 	if (BNXT_NEW_RM(bp)) {
5517 		rx = hw_resc->resv_rx_rings;
5518 		cp = hw_resc->resv_irqs;
5519 		grp = hw_resc->resv_hw_ring_grps;
5520 		vnic = hw_resc->resv_vnics;
5521 		stat = hw_resc->resv_stat_ctxs;
5522 	}
5523 
5524 	rx_rings = rx;
5525 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5526 		if (rx >= 2) {
5527 			rx_rings = rx >> 1;
5528 		} else {
5529 			if (netif_running(bp->dev))
5530 				return -ENOMEM;
5531 
5532 			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5533 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5534 			bp->dev->hw_features &= ~NETIF_F_LRO;
5535 			bp->dev->features &= ~NETIF_F_LRO;
5536 			bnxt_set_ring_params(bp);
5537 		}
5538 	}
5539 	rx_rings = min_t(int, rx_rings, grp);
5540 	cp = min_t(int, cp, bp->cp_nr_rings);
5541 	if (stat > bnxt_get_ulp_stat_ctxs(bp))
5542 		stat -= bnxt_get_ulp_stat_ctxs(bp);
5543 	cp = min_t(int, cp, stat);
5544 	rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5545 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
5546 		rx = rx_rings << 1;
5547 	cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5548 	bp->tx_nr_rings = tx;
5549 	bp->rx_nr_rings = rx_rings;
5550 	bp->cp_nr_rings = cp;
5551 
5552 	if (!tx || !rx || !cp || !grp || !vnic || !stat)
5553 		return -ENOMEM;
5554 
5555 	return rc;
5556 }
5557 
5558 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5559 				    int ring_grps, int cp_rings, int stats,
5560 				    int vnics)
5561 {
5562 	struct hwrm_func_vf_cfg_input req = {0};
5563 	u32 flags;
5564 	int rc;
5565 
5566 	if (!BNXT_NEW_RM(bp))
5567 		return 0;
5568 
5569 	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5570 				     cp_rings, stats, vnics);
5571 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5572 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5573 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5574 		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5575 		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5576 		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5577 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5578 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5579 
5580 	req.flags = cpu_to_le32(flags);
5581 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5582 	if (rc)
5583 		return -ENOMEM;
5584 	return 0;
5585 }
5586 
5587 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5588 				    int ring_grps, int cp_rings, int stats,
5589 				    int vnics)
5590 {
5591 	struct hwrm_func_cfg_input req = {0};
5592 	u32 flags;
5593 	int rc;
5594 
5595 	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5596 				     cp_rings, stats, vnics);
5597 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
5598 	if (BNXT_NEW_RM(bp)) {
5599 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5600 			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5601 			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5602 			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
5603 		if (bp->flags & BNXT_FLAG_CHIP_P5)
5604 			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5605 				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
5606 		else
5607 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5608 	}
5609 
5610 	req.flags = cpu_to_le32(flags);
5611 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5612 	if (rc)
5613 		return -ENOMEM;
5614 	return 0;
5615 }
5616 
5617 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5618 				 int ring_grps, int cp_rings, int stats,
5619 				 int vnics)
5620 {
5621 	if (bp->hwrm_spec_code < 0x10801)
5622 		return 0;
5623 
5624 	if (BNXT_PF(bp))
5625 		return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
5626 						ring_grps, cp_rings, stats,
5627 						vnics);
5628 
5629 	return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
5630 					cp_rings, stats, vnics);
5631 }
5632 
5633 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5634 {
5635 	struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5636 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5637 	struct hwrm_ring_aggint_qcaps_input req = {0};
5638 	int rc;
5639 
5640 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5641 	coal_cap->num_cmpl_dma_aggr_max = 63;
5642 	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5643 	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5644 	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5645 	coal_cap->int_lat_tmr_min_max = 65535;
5646 	coal_cap->int_lat_tmr_max_max = 65535;
5647 	coal_cap->num_cmpl_aggr_int_max = 65535;
5648 	coal_cap->timer_units = 80;
5649 
5650 	if (bp->hwrm_spec_code < 0x10902)
5651 		return;
5652 
5653 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5654 	mutex_lock(&bp->hwrm_cmd_lock);
5655 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5656 	if (!rc) {
5657 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
5658 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
5659 		coal_cap->num_cmpl_dma_aggr_max =
5660 			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5661 		coal_cap->num_cmpl_dma_aggr_during_int_max =
5662 			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5663 		coal_cap->cmpl_aggr_dma_tmr_max =
5664 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5665 		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5666 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5667 		coal_cap->int_lat_tmr_min_max =
5668 			le16_to_cpu(resp->int_lat_tmr_min_max);
5669 		coal_cap->int_lat_tmr_max_max =
5670 			le16_to_cpu(resp->int_lat_tmr_max_max);
5671 		coal_cap->num_cmpl_aggr_int_max =
5672 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
5673 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5674 	}
5675 	mutex_unlock(&bp->hwrm_cmd_lock);
5676 }
5677 
5678 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5679 {
5680 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5681 
5682 	return usec * 1000 / coal_cap->timer_units;
5683 }
5684 
5685 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5686 	struct bnxt_coal *hw_coal,
5687 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5688 {
5689 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5690 	u32 cmpl_params = coal_cap->cmpl_params;
5691 	u16 val, tmr, max, flags = 0;
5692 
5693 	max = hw_coal->bufs_per_record * 128;
5694 	if (hw_coal->budget)
5695 		max = hw_coal->bufs_per_record * hw_coal->budget;
5696 	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
5697 
5698 	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5699 	req->num_cmpl_aggr_int = cpu_to_le16(val);
5700 
5701 	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
5702 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
5703 
5704 	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5705 		      coal_cap->num_cmpl_dma_aggr_during_int_max);
5706 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5707 
5708 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
5709 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
5710 	req->int_lat_tmr_max = cpu_to_le16(tmr);
5711 
5712 	/* min timer set to 1/2 of interrupt timer */
5713 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
5714 		val = tmr / 2;
5715 		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
5716 		req->int_lat_tmr_min = cpu_to_le16(val);
5717 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5718 	}
5719 
5720 	/* buf timer set to 1/4 of interrupt timer */
5721 	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
5722 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
5723 
5724 	if (cmpl_params &
5725 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
5726 		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
5727 		val = clamp_t(u16, tmr, 1,
5728 			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
5729 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
5730 		req->enables |=
5731 			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
5732 	}
5733 
5734 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
5735 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
5736 	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
5737 	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
5738 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
5739 	req->flags = cpu_to_le16(flags);
5740 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
5741 }
5742 
5743 /* Caller holds bp->hwrm_cmd_lock */
5744 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
5745 				   struct bnxt_coal *hw_coal)
5746 {
5747 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5748 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5749 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5750 	u32 nq_params = coal_cap->nq_params;
5751 	u16 tmr;
5752 
5753 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
5754 		return 0;
5755 
5756 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5757 			       -1, -1);
5758 	req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
5759 	req.flags =
5760 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
5761 
5762 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
5763 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
5764 	req.int_lat_tmr_min = cpu_to_le16(tmr);
5765 	req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5766 	return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5767 }
5768 
5769 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
5770 {
5771 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
5772 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5773 	struct bnxt_coal coal;
5774 
5775 	/* Tick values in micro seconds.
5776 	 * 1 coal_buf x bufs_per_record = 1 completion record.
5777 	 */
5778 	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
5779 
5780 	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
5781 	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
5782 
5783 	if (!bnapi->rx_ring)
5784 		return -ENODEV;
5785 
5786 	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5787 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5788 
5789 	bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
5790 
5791 	req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
5792 
5793 	return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5794 				 HWRM_CMD_TIMEOUT);
5795 }
5796 
5797 int bnxt_hwrm_set_coal(struct bnxt *bp)
5798 {
5799 	int i, rc = 0;
5800 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5801 							   req_tx = {0}, *req;
5802 
5803 	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5804 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5805 	bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5806 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5807 
5808 	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
5809 	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
5810 
5811 	mutex_lock(&bp->hwrm_cmd_lock);
5812 	for (i = 0; i < bp->cp_nr_rings; i++) {
5813 		struct bnxt_napi *bnapi = bp->bnapi[i];
5814 		struct bnxt_coal *hw_coal;
5815 		u16 ring_id;
5816 
5817 		req = &req_rx;
5818 		if (!bnapi->rx_ring) {
5819 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5820 			req = &req_tx;
5821 		} else {
5822 			ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
5823 		}
5824 		req->ring_id = cpu_to_le16(ring_id);
5825 
5826 		rc = _hwrm_send_message(bp, req, sizeof(*req),
5827 					HWRM_CMD_TIMEOUT);
5828 		if (rc)
5829 			break;
5830 
5831 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5832 			continue;
5833 
5834 		if (bnapi->rx_ring && bnapi->tx_ring) {
5835 			req = &req_tx;
5836 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5837 			req->ring_id = cpu_to_le16(ring_id);
5838 			rc = _hwrm_send_message(bp, req, sizeof(*req),
5839 						HWRM_CMD_TIMEOUT);
5840 			if (rc)
5841 				break;
5842 		}
5843 		if (bnapi->rx_ring)
5844 			hw_coal = &bp->rx_coal;
5845 		else
5846 			hw_coal = &bp->tx_coal;
5847 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
5848 	}
5849 	mutex_unlock(&bp->hwrm_cmd_lock);
5850 	return rc;
5851 }
5852 
5853 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5854 {
5855 	int rc = 0, i;
5856 	struct hwrm_stat_ctx_free_input req = {0};
5857 
5858 	if (!bp->bnapi)
5859 		return 0;
5860 
5861 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5862 		return 0;
5863 
5864 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5865 
5866 	mutex_lock(&bp->hwrm_cmd_lock);
5867 	for (i = 0; i < bp->cp_nr_rings; i++) {
5868 		struct bnxt_napi *bnapi = bp->bnapi[i];
5869 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5870 
5871 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5872 			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5873 
5874 			rc = _hwrm_send_message(bp, &req, sizeof(req),
5875 						HWRM_CMD_TIMEOUT);
5876 			if (rc)
5877 				break;
5878 
5879 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5880 		}
5881 	}
5882 	mutex_unlock(&bp->hwrm_cmd_lock);
5883 	return rc;
5884 }
5885 
5886 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5887 {
5888 	int rc = 0, i;
5889 	struct hwrm_stat_ctx_alloc_input req = {0};
5890 	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5891 
5892 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5893 		return 0;
5894 
5895 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5896 
5897 	req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
5898 
5899 	mutex_lock(&bp->hwrm_cmd_lock);
5900 	for (i = 0; i < bp->cp_nr_rings; i++) {
5901 		struct bnxt_napi *bnapi = bp->bnapi[i];
5902 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5903 
5904 		req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5905 
5906 		rc = _hwrm_send_message(bp, &req, sizeof(req),
5907 					HWRM_CMD_TIMEOUT);
5908 		if (rc)
5909 			break;
5910 
5911 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5912 
5913 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5914 	}
5915 	mutex_unlock(&bp->hwrm_cmd_lock);
5916 	return rc;
5917 }
5918 
5919 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5920 {
5921 	struct hwrm_func_qcfg_input req = {0};
5922 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5923 	u16 flags;
5924 	int rc;
5925 
5926 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5927 	req.fid = cpu_to_le16(0xffff);
5928 	mutex_lock(&bp->hwrm_cmd_lock);
5929 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5930 	if (rc)
5931 		goto func_qcfg_exit;
5932 
5933 #ifdef CONFIG_BNXT_SRIOV
5934 	if (BNXT_VF(bp)) {
5935 		struct bnxt_vf_info *vf = &bp->vf;
5936 
5937 		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5938 	}
5939 #endif
5940 	flags = le16_to_cpu(resp->flags);
5941 	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5942 		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
5943 		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
5944 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
5945 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
5946 	}
5947 	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5948 		bp->flags |= BNXT_FLAG_MULTI_HOST;
5949 
5950 	switch (resp->port_partition_type) {
5951 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5952 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5953 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5954 		bp->port_partition_type = resp->port_partition_type;
5955 		break;
5956 	}
5957 	if (bp->hwrm_spec_code < 0x10707 ||
5958 	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5959 		bp->br_mode = BRIDGE_MODE_VEB;
5960 	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5961 		bp->br_mode = BRIDGE_MODE_VEPA;
5962 	else
5963 		bp->br_mode = BRIDGE_MODE_UNDEF;
5964 
5965 	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5966 	if (!bp->max_mtu)
5967 		bp->max_mtu = BNXT_MAX_MTU;
5968 
5969 func_qcfg_exit:
5970 	mutex_unlock(&bp->hwrm_cmd_lock);
5971 	return rc;
5972 }
5973 
5974 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5975 {
5976 	struct hwrm_func_backing_store_qcaps_input req = {0};
5977 	struct hwrm_func_backing_store_qcaps_output *resp =
5978 		bp->hwrm_cmd_resp_addr;
5979 	int rc;
5980 
5981 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
5982 		return 0;
5983 
5984 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
5985 	mutex_lock(&bp->hwrm_cmd_lock);
5986 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5987 	if (!rc) {
5988 		struct bnxt_ctx_pg_info *ctx_pg;
5989 		struct bnxt_ctx_mem_info *ctx;
5990 		int i;
5991 
5992 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
5993 		if (!ctx) {
5994 			rc = -ENOMEM;
5995 			goto ctx_err;
5996 		}
5997 		ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
5998 		if (!ctx_pg) {
5999 			kfree(ctx);
6000 			rc = -ENOMEM;
6001 			goto ctx_err;
6002 		}
6003 		for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6004 			ctx->tqm_mem[i] = ctx_pg;
6005 
6006 		bp->ctx = ctx;
6007 		ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6008 		ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6009 		ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6010 		ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6011 		ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6012 		ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6013 		ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6014 		ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6015 		ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6016 		ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6017 		ctx->vnic_max_vnic_entries =
6018 			le16_to_cpu(resp->vnic_max_vnic_entries);
6019 		ctx->vnic_max_ring_table_entries =
6020 			le16_to_cpu(resp->vnic_max_ring_table_entries);
6021 		ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6022 		ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6023 		ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6024 		ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6025 		ctx->tqm_min_entries_per_ring =
6026 			le32_to_cpu(resp->tqm_min_entries_per_ring);
6027 		ctx->tqm_max_entries_per_ring =
6028 			le32_to_cpu(resp->tqm_max_entries_per_ring);
6029 		ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6030 		if (!ctx->tqm_entries_multiple)
6031 			ctx->tqm_entries_multiple = 1;
6032 		ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6033 		ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6034 		ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6035 		ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6036 	} else {
6037 		rc = 0;
6038 	}
6039 ctx_err:
6040 	mutex_unlock(&bp->hwrm_cmd_lock);
6041 	return rc;
6042 }
6043 
6044 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6045 				  __le64 *pg_dir)
6046 {
6047 	u8 pg_size = 0;
6048 
6049 	if (BNXT_PAGE_SHIFT == 13)
6050 		pg_size = 1 << 4;
6051 	else if (BNXT_PAGE_SIZE == 16)
6052 		pg_size = 2 << 4;
6053 
6054 	*pg_attr = pg_size;
6055 	if (rmem->depth >= 1) {
6056 		if (rmem->depth == 2)
6057 			*pg_attr |= 2;
6058 		else
6059 			*pg_attr |= 1;
6060 		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6061 	} else {
6062 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6063 	}
6064 }
6065 
6066 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
6067 	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
6068 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
6069 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
6070 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
6071 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6072 
6073 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6074 {
6075 	struct hwrm_func_backing_store_cfg_input req = {0};
6076 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
6077 	struct bnxt_ctx_pg_info *ctx_pg;
6078 	__le32 *num_entries;
6079 	__le64 *pg_dir;
6080 	u8 *pg_attr;
6081 	int i, rc;
6082 	u32 ena;
6083 
6084 	if (!ctx)
6085 		return 0;
6086 
6087 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6088 	req.enables = cpu_to_le32(enables);
6089 
6090 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6091 		ctx_pg = &ctx->qp_mem;
6092 		req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6093 		req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6094 		req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6095 		req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6096 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6097 				      &req.qpc_pg_size_qpc_lvl,
6098 				      &req.qpc_page_dir);
6099 	}
6100 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6101 		ctx_pg = &ctx->srq_mem;
6102 		req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6103 		req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6104 		req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6105 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6106 				      &req.srq_pg_size_srq_lvl,
6107 				      &req.srq_page_dir);
6108 	}
6109 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6110 		ctx_pg = &ctx->cq_mem;
6111 		req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6112 		req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6113 		req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6114 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6115 				      &req.cq_page_dir);
6116 	}
6117 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6118 		ctx_pg = &ctx->vnic_mem;
6119 		req.vnic_num_vnic_entries =
6120 			cpu_to_le16(ctx->vnic_max_vnic_entries);
6121 		req.vnic_num_ring_table_entries =
6122 			cpu_to_le16(ctx->vnic_max_ring_table_entries);
6123 		req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6124 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6125 				      &req.vnic_pg_size_vnic_lvl,
6126 				      &req.vnic_page_dir);
6127 	}
6128 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6129 		ctx_pg = &ctx->stat_mem;
6130 		req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6131 		req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6132 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6133 				      &req.stat_pg_size_stat_lvl,
6134 				      &req.stat_page_dir);
6135 	}
6136 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6137 		ctx_pg = &ctx->mrav_mem;
6138 		req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6139 		req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6140 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6141 				      &req.mrav_pg_size_mrav_lvl,
6142 				      &req.mrav_page_dir);
6143 	}
6144 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6145 		ctx_pg = &ctx->tim_mem;
6146 		req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6147 		req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6148 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6149 				      &req.tim_pg_size_tim_lvl,
6150 				      &req.tim_page_dir);
6151 	}
6152 	for (i = 0, num_entries = &req.tqm_sp_num_entries,
6153 	     pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6154 	     pg_dir = &req.tqm_sp_page_dir,
6155 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6156 	     i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6157 		if (!(enables & ena))
6158 			continue;
6159 
6160 		req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6161 		ctx_pg = ctx->tqm_mem[i];
6162 		*num_entries = cpu_to_le32(ctx_pg->entries);
6163 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6164 	}
6165 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6166 	if (rc)
6167 		rc = -EIO;
6168 	return rc;
6169 }
6170 
6171 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6172 				  struct bnxt_ctx_pg_info *ctx_pg)
6173 {
6174 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6175 
6176 	rmem->page_size = BNXT_PAGE_SIZE;
6177 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
6178 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
6179 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6180 	if (rmem->depth >= 1)
6181 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6182 	return bnxt_alloc_ring(bp, rmem);
6183 }
6184 
6185 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6186 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6187 				  u8 depth)
6188 {
6189 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6190 	int rc;
6191 
6192 	if (!mem_size)
6193 		return 0;
6194 
6195 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6196 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6197 		ctx_pg->nr_pages = 0;
6198 		return -EINVAL;
6199 	}
6200 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6201 		int nr_tbls, i;
6202 
6203 		rmem->depth = 2;
6204 		ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6205 					     GFP_KERNEL);
6206 		if (!ctx_pg->ctx_pg_tbl)
6207 			return -ENOMEM;
6208 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6209 		rmem->nr_pages = nr_tbls;
6210 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6211 		if (rc)
6212 			return rc;
6213 		for (i = 0; i < nr_tbls; i++) {
6214 			struct bnxt_ctx_pg_info *pg_tbl;
6215 
6216 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6217 			if (!pg_tbl)
6218 				return -ENOMEM;
6219 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6220 			rmem = &pg_tbl->ring_mem;
6221 			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6222 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6223 			rmem->depth = 1;
6224 			rmem->nr_pages = MAX_CTX_PAGES;
6225 			if (i == (nr_tbls - 1)) {
6226 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6227 
6228 				if (rem)
6229 					rmem->nr_pages = rem;
6230 			}
6231 			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6232 			if (rc)
6233 				break;
6234 		}
6235 	} else {
6236 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6237 		if (rmem->nr_pages > 1 || depth)
6238 			rmem->depth = 1;
6239 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6240 	}
6241 	return rc;
6242 }
6243 
6244 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6245 				  struct bnxt_ctx_pg_info *ctx_pg)
6246 {
6247 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6248 
6249 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6250 	    ctx_pg->ctx_pg_tbl) {
6251 		int i, nr_tbls = rmem->nr_pages;
6252 
6253 		for (i = 0; i < nr_tbls; i++) {
6254 			struct bnxt_ctx_pg_info *pg_tbl;
6255 			struct bnxt_ring_mem_info *rmem2;
6256 
6257 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
6258 			if (!pg_tbl)
6259 				continue;
6260 			rmem2 = &pg_tbl->ring_mem;
6261 			bnxt_free_ring(bp, rmem2);
6262 			ctx_pg->ctx_pg_arr[i] = NULL;
6263 			kfree(pg_tbl);
6264 			ctx_pg->ctx_pg_tbl[i] = NULL;
6265 		}
6266 		kfree(ctx_pg->ctx_pg_tbl);
6267 		ctx_pg->ctx_pg_tbl = NULL;
6268 	}
6269 	bnxt_free_ring(bp, rmem);
6270 	ctx_pg->nr_pages = 0;
6271 }
6272 
6273 static void bnxt_free_ctx_mem(struct bnxt *bp)
6274 {
6275 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
6276 	int i;
6277 
6278 	if (!ctx)
6279 		return;
6280 
6281 	if (ctx->tqm_mem[0]) {
6282 		for (i = 0; i < bp->max_q + 1; i++)
6283 			bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6284 		kfree(ctx->tqm_mem[0]);
6285 		ctx->tqm_mem[0] = NULL;
6286 	}
6287 
6288 	bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6289 	bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6290 	bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6291 	bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6292 	bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6293 	bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6294 	bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6295 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6296 }
6297 
6298 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6299 {
6300 	struct bnxt_ctx_pg_info *ctx_pg;
6301 	struct bnxt_ctx_mem_info *ctx;
6302 	u32 mem_size, ena, entries;
6303 	u32 extra_srqs = 0;
6304 	u32 extra_qps = 0;
6305 	u8 pg_lvl = 1;
6306 	int i, rc;
6307 
6308 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6309 	if (rc) {
6310 		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6311 			   rc);
6312 		return rc;
6313 	}
6314 	ctx = bp->ctx;
6315 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6316 		return 0;
6317 
6318 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
6319 		pg_lvl = 2;
6320 		extra_qps = 65536;
6321 		extra_srqs = 8192;
6322 	}
6323 
6324 	ctx_pg = &ctx->qp_mem;
6325 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6326 			  extra_qps;
6327 	mem_size = ctx->qp_entry_size * ctx_pg->entries;
6328 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6329 	if (rc)
6330 		return rc;
6331 
6332 	ctx_pg = &ctx->srq_mem;
6333 	ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6334 	mem_size = ctx->srq_entry_size * ctx_pg->entries;
6335 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6336 	if (rc)
6337 		return rc;
6338 
6339 	ctx_pg = &ctx->cq_mem;
6340 	ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6341 	mem_size = ctx->cq_entry_size * ctx_pg->entries;
6342 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6343 	if (rc)
6344 		return rc;
6345 
6346 	ctx_pg = &ctx->vnic_mem;
6347 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
6348 			  ctx->vnic_max_ring_table_entries;
6349 	mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6350 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6351 	if (rc)
6352 		return rc;
6353 
6354 	ctx_pg = &ctx->stat_mem;
6355 	ctx_pg->entries = ctx->stat_max_entries;
6356 	mem_size = ctx->stat_entry_size * ctx_pg->entries;
6357 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6358 	if (rc)
6359 		return rc;
6360 
6361 	ena = 0;
6362 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6363 		goto skip_rdma;
6364 
6365 	ctx_pg = &ctx->mrav_mem;
6366 	ctx_pg->entries = extra_qps * 4;
6367 	mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6368 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6369 	if (rc)
6370 		return rc;
6371 	ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6372 
6373 	ctx_pg = &ctx->tim_mem;
6374 	ctx_pg->entries = ctx->qp_mem.entries;
6375 	mem_size = ctx->tim_entry_size * ctx_pg->entries;
6376 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6377 	if (rc)
6378 		return rc;
6379 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6380 
6381 skip_rdma:
6382 	entries = ctx->qp_max_l2_entries + extra_qps;
6383 	entries = roundup(entries, ctx->tqm_entries_multiple);
6384 	entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6385 			  ctx->tqm_max_entries_per_ring);
6386 	for (i = 0; i < bp->max_q + 1; i++) {
6387 		ctx_pg = ctx->tqm_mem[i];
6388 		ctx_pg->entries = entries;
6389 		mem_size = ctx->tqm_entry_size * entries;
6390 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6391 		if (rc)
6392 			return rc;
6393 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6394 	}
6395 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6396 	rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6397 	if (rc)
6398 		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6399 			   rc);
6400 	else
6401 		ctx->flags |= BNXT_CTX_FLAG_INITED;
6402 
6403 	return 0;
6404 }
6405 
6406 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6407 {
6408 	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6409 	struct hwrm_func_resource_qcaps_input req = {0};
6410 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6411 	int rc;
6412 
6413 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6414 	req.fid = cpu_to_le16(0xffff);
6415 
6416 	mutex_lock(&bp->hwrm_cmd_lock);
6417 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6418 				       HWRM_CMD_TIMEOUT);
6419 	if (rc) {
6420 		rc = -EIO;
6421 		goto hwrm_func_resc_qcaps_exit;
6422 	}
6423 
6424 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6425 	if (!all)
6426 		goto hwrm_func_resc_qcaps_exit;
6427 
6428 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6429 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6430 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6431 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6432 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6433 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6434 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6435 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6436 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6437 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6438 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6439 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6440 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6441 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6442 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6443 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6444 
6445 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
6446 		u16 max_msix = le16_to_cpu(resp->max_msix);
6447 
6448 		hw_resc->max_nqs = max_msix;
6449 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6450 	}
6451 
6452 	if (BNXT_PF(bp)) {
6453 		struct bnxt_pf_info *pf = &bp->pf;
6454 
6455 		pf->vf_resv_strategy =
6456 			le16_to_cpu(resp->vf_reservation_strategy);
6457 		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6458 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6459 	}
6460 hwrm_func_resc_qcaps_exit:
6461 	mutex_unlock(&bp->hwrm_cmd_lock);
6462 	return rc;
6463 }
6464 
6465 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6466 {
6467 	int rc = 0;
6468 	struct hwrm_func_qcaps_input req = {0};
6469 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6470 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6471 	u32 flags;
6472 
6473 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6474 	req.fid = cpu_to_le16(0xffff);
6475 
6476 	mutex_lock(&bp->hwrm_cmd_lock);
6477 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6478 	if (rc)
6479 		goto hwrm_func_qcaps_exit;
6480 
6481 	flags = le32_to_cpu(resp->flags);
6482 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6483 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6484 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6485 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6486 
6487 	bp->tx_push_thresh = 0;
6488 	if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6489 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6490 
6491 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6492 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6493 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6494 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6495 	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6496 	if (!hw_resc->max_hw_ring_grps)
6497 		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6498 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6499 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6500 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6501 
6502 	if (BNXT_PF(bp)) {
6503 		struct bnxt_pf_info *pf = &bp->pf;
6504 
6505 		pf->fw_fid = le16_to_cpu(resp->fid);
6506 		pf->port_id = le16_to_cpu(resp->port_id);
6507 		bp->dev->dev_port = pf->port_id;
6508 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
6509 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6510 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
6511 		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6512 		pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6513 		pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6514 		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6515 		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6516 		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6517 		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
6518 			bp->flags |= BNXT_FLAG_WOL_CAP;
6519 	} else {
6520 #ifdef CONFIG_BNXT_SRIOV
6521 		struct bnxt_vf_info *vf = &bp->vf;
6522 
6523 		vf->fw_fid = le16_to_cpu(resp->fid);
6524 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
6525 #endif
6526 	}
6527 
6528 hwrm_func_qcaps_exit:
6529 	mutex_unlock(&bp->hwrm_cmd_lock);
6530 	return rc;
6531 }
6532 
6533 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6534 
6535 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6536 {
6537 	int rc;
6538 
6539 	rc = __bnxt_hwrm_func_qcaps(bp);
6540 	if (rc)
6541 		return rc;
6542 	rc = bnxt_hwrm_queue_qportcfg(bp);
6543 	if (rc) {
6544 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6545 		return rc;
6546 	}
6547 	if (bp->hwrm_spec_code >= 0x10803) {
6548 		rc = bnxt_alloc_ctx_mem(bp);
6549 		if (rc)
6550 			return rc;
6551 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
6552 		if (!rc)
6553 			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
6554 	}
6555 	return 0;
6556 }
6557 
6558 static int bnxt_hwrm_func_reset(struct bnxt *bp)
6559 {
6560 	struct hwrm_func_reset_input req = {0};
6561 
6562 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6563 	req.enables = 0;
6564 
6565 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6566 }
6567 
6568 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6569 {
6570 	int rc = 0;
6571 	struct hwrm_queue_qportcfg_input req = {0};
6572 	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
6573 	u8 i, j, *qptr;
6574 	bool no_rdma;
6575 
6576 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6577 
6578 	mutex_lock(&bp->hwrm_cmd_lock);
6579 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6580 	if (rc)
6581 		goto qportcfg_exit;
6582 
6583 	if (!resp->max_configurable_queues) {
6584 		rc = -EINVAL;
6585 		goto qportcfg_exit;
6586 	}
6587 	bp->max_tc = resp->max_configurable_queues;
6588 	bp->max_lltc = resp->max_configurable_lossless_queues;
6589 	if (bp->max_tc > BNXT_MAX_QUEUE)
6590 		bp->max_tc = BNXT_MAX_QUEUE;
6591 
6592 	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6593 	qptr = &resp->queue_id0;
6594 	for (i = 0, j = 0; i < bp->max_tc; i++) {
6595 		bp->q_info[j].queue_id = *qptr;
6596 		bp->q_ids[i] = *qptr++;
6597 		bp->q_info[j].queue_profile = *qptr++;
6598 		bp->tc_to_qidx[j] = j;
6599 		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6600 		    (no_rdma && BNXT_PF(bp)))
6601 			j++;
6602 	}
6603 	bp->max_q = bp->max_tc;
6604 	bp->max_tc = max_t(u8, j, 1);
6605 
6606 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6607 		bp->max_tc = 1;
6608 
6609 	if (bp->max_lltc > bp->max_tc)
6610 		bp->max_lltc = bp->max_tc;
6611 
6612 qportcfg_exit:
6613 	mutex_unlock(&bp->hwrm_cmd_lock);
6614 	return rc;
6615 }
6616 
6617 static int bnxt_hwrm_ver_get(struct bnxt *bp)
6618 {
6619 	int rc;
6620 	struct hwrm_ver_get_input req = {0};
6621 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6622 	u32 dev_caps_cfg;
6623 
6624 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
6625 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6626 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6627 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
6628 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6629 	mutex_lock(&bp->hwrm_cmd_lock);
6630 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6631 	if (rc)
6632 		goto hwrm_ver_get_exit;
6633 
6634 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6635 
6636 	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6637 			     resp->hwrm_intf_min_8b << 8 |
6638 			     resp->hwrm_intf_upd_8b;
6639 	if (resp->hwrm_intf_maj_8b < 1) {
6640 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
6641 			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6642 			    resp->hwrm_intf_upd_8b);
6643 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
6644 	}
6645 	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
6646 		 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6647 		 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
6648 
6649 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
6650 	if (!bp->hwrm_cmd_timeout)
6651 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
6652 
6653 	if (resp->hwrm_intf_maj_8b >= 1) {
6654 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
6655 		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
6656 	}
6657 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
6658 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
6659 
6660 	bp->chip_num = le16_to_cpu(resp->chip_num);
6661 	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
6662 	    !resp->chip_metal)
6663 		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
6664 
6665 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
6666 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
6667 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
6668 		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
6669 
6670 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
6671 		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
6672 
6673 	if (dev_caps_cfg &
6674 	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
6675 		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
6676 
6677 hwrm_ver_get_exit:
6678 	mutex_unlock(&bp->hwrm_cmd_lock);
6679 	return rc;
6680 }
6681 
6682 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
6683 {
6684 	struct hwrm_fw_set_time_input req = {0};
6685 	struct tm tm;
6686 	time64_t now = ktime_get_real_seconds();
6687 
6688 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
6689 	    bp->hwrm_spec_code < 0x10400)
6690 		return -EOPNOTSUPP;
6691 
6692 	time64_to_tm(now, 0, &tm);
6693 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
6694 	req.year = cpu_to_le16(1900 + tm.tm_year);
6695 	req.month = 1 + tm.tm_mon;
6696 	req.day = tm.tm_mday;
6697 	req.hour = tm.tm_hour;
6698 	req.minute = tm.tm_min;
6699 	req.second = tm.tm_sec;
6700 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6701 }
6702 
6703 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
6704 {
6705 	int rc;
6706 	struct bnxt_pf_info *pf = &bp->pf;
6707 	struct hwrm_port_qstats_input req = {0};
6708 
6709 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
6710 		return 0;
6711 
6712 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
6713 	req.port_id = cpu_to_le16(pf->port_id);
6714 	req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
6715 	req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
6716 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6717 	return rc;
6718 }
6719 
6720 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6721 {
6722 	struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
6723 	struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
6724 	struct hwrm_port_qstats_ext_input req = {0};
6725 	struct bnxt_pf_info *pf = &bp->pf;
6726 	int rc;
6727 
6728 	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
6729 		return 0;
6730 
6731 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
6732 	req.port_id = cpu_to_le16(pf->port_id);
6733 	req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6734 	req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
6735 	req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
6736 	req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6737 	mutex_lock(&bp->hwrm_cmd_lock);
6738 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6739 	if (!rc) {
6740 		bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
6741 		bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
6742 	} else {
6743 		bp->fw_rx_stats_ext_size = 0;
6744 		bp->fw_tx_stats_ext_size = 0;
6745 	}
6746 	if (bp->fw_tx_stats_ext_size <=
6747 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
6748 		mutex_unlock(&bp->hwrm_cmd_lock);
6749 		bp->pri2cos_valid = 0;
6750 		return rc;
6751 	}
6752 
6753 	bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
6754 	req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
6755 
6756 	rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
6757 	if (!rc) {
6758 		struct hwrm_queue_pri2cos_qcfg_output *resp2;
6759 		u8 *pri2cos;
6760 		int i, j;
6761 
6762 		resp2 = bp->hwrm_cmd_resp_addr;
6763 		pri2cos = &resp2->pri0_cos_queue_id;
6764 		for (i = 0; i < 8; i++) {
6765 			u8 queue_id = pri2cos[i];
6766 
6767 			for (j = 0; j < bp->max_q; j++) {
6768 				if (bp->q_ids[j] == queue_id)
6769 					bp->pri2cos[i] = j;
6770 			}
6771 		}
6772 		bp->pri2cos_valid = 1;
6773 	}
6774 	mutex_unlock(&bp->hwrm_cmd_lock);
6775 	return rc;
6776 }
6777 
6778 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
6779 {
6780 	if (bp->vxlan_port_cnt) {
6781 		bnxt_hwrm_tunnel_dst_port_free(
6782 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6783 	}
6784 	bp->vxlan_port_cnt = 0;
6785 	if (bp->nge_port_cnt) {
6786 		bnxt_hwrm_tunnel_dst_port_free(
6787 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6788 	}
6789 	bp->nge_port_cnt = 0;
6790 }
6791 
6792 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
6793 {
6794 	int rc, i;
6795 	u32 tpa_flags = 0;
6796 
6797 	if (set_tpa)
6798 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
6799 	for (i = 0; i < bp->nr_vnics; i++) {
6800 		rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
6801 		if (rc) {
6802 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
6803 				   i, rc);
6804 			return rc;
6805 		}
6806 	}
6807 	return 0;
6808 }
6809 
6810 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
6811 {
6812 	int i;
6813 
6814 	for (i = 0; i < bp->nr_vnics; i++)
6815 		bnxt_hwrm_vnic_set_rss(bp, i, false);
6816 }
6817 
6818 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
6819 				    bool irq_re_init)
6820 {
6821 	if (bp->vnic_info) {
6822 		bnxt_hwrm_clear_vnic_filter(bp);
6823 		/* clear all RSS setting before free vnic ctx */
6824 		bnxt_hwrm_clear_vnic_rss(bp);
6825 		bnxt_hwrm_vnic_ctx_free(bp);
6826 		/* before free the vnic, undo the vnic tpa settings */
6827 		if (bp->flags & BNXT_FLAG_TPA)
6828 			bnxt_set_tpa(bp, false);
6829 		bnxt_hwrm_vnic_free(bp);
6830 	}
6831 	bnxt_hwrm_ring_free(bp, close_path);
6832 	bnxt_hwrm_ring_grp_free(bp);
6833 	if (irq_re_init) {
6834 		bnxt_hwrm_stat_ctx_free(bp);
6835 		bnxt_hwrm_free_tunnel_ports(bp);
6836 	}
6837 }
6838 
6839 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
6840 {
6841 	struct hwrm_func_cfg_input req = {0};
6842 	int rc;
6843 
6844 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6845 	req.fid = cpu_to_le16(0xffff);
6846 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
6847 	if (br_mode == BRIDGE_MODE_VEB)
6848 		req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
6849 	else if (br_mode == BRIDGE_MODE_VEPA)
6850 		req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
6851 	else
6852 		return -EINVAL;
6853 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6854 	if (rc)
6855 		rc = -EIO;
6856 	return rc;
6857 }
6858 
6859 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
6860 {
6861 	struct hwrm_func_cfg_input req = {0};
6862 	int rc;
6863 
6864 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
6865 		return 0;
6866 
6867 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6868 	req.fid = cpu_to_le16(0xffff);
6869 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
6870 	req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
6871 	if (size == 128)
6872 		req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
6873 
6874 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6875 	if (rc)
6876 		rc = -EIO;
6877 	return rc;
6878 }
6879 
6880 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6881 {
6882 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6883 	int rc;
6884 
6885 	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
6886 		goto skip_rss_ctx;
6887 
6888 	/* allocate context for vnic */
6889 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
6890 	if (rc) {
6891 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6892 			   vnic_id, rc);
6893 		goto vnic_setup_err;
6894 	}
6895 	bp->rsscos_nr_ctxs++;
6896 
6897 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6898 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
6899 		if (rc) {
6900 			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
6901 				   vnic_id, rc);
6902 			goto vnic_setup_err;
6903 		}
6904 		bp->rsscos_nr_ctxs++;
6905 	}
6906 
6907 skip_rss_ctx:
6908 	/* configure default vnic, ring grp */
6909 	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6910 	if (rc) {
6911 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6912 			   vnic_id, rc);
6913 		goto vnic_setup_err;
6914 	}
6915 
6916 	/* Enable RSS hashing on vnic */
6917 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
6918 	if (rc) {
6919 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
6920 			   vnic_id, rc);
6921 		goto vnic_setup_err;
6922 	}
6923 
6924 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6925 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6926 		if (rc) {
6927 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6928 				   vnic_id, rc);
6929 		}
6930 	}
6931 
6932 vnic_setup_err:
6933 	return rc;
6934 }
6935 
6936 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
6937 {
6938 	int rc, i, nr_ctxs;
6939 
6940 	nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
6941 	for (i = 0; i < nr_ctxs; i++) {
6942 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
6943 		if (rc) {
6944 			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
6945 				   vnic_id, i, rc);
6946 			break;
6947 		}
6948 		bp->rsscos_nr_ctxs++;
6949 	}
6950 	if (i < nr_ctxs)
6951 		return -ENOMEM;
6952 
6953 	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
6954 	if (rc) {
6955 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
6956 			   vnic_id, rc);
6957 		return rc;
6958 	}
6959 	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6960 	if (rc) {
6961 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6962 			   vnic_id, rc);
6963 		return rc;
6964 	}
6965 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6966 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6967 		if (rc) {
6968 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6969 				   vnic_id, rc);
6970 		}
6971 	}
6972 	return rc;
6973 }
6974 
6975 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6976 {
6977 	if (bp->flags & BNXT_FLAG_CHIP_P5)
6978 		return __bnxt_setup_vnic_p5(bp, vnic_id);
6979 	else
6980 		return __bnxt_setup_vnic(bp, vnic_id);
6981 }
6982 
6983 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
6984 {
6985 #ifdef CONFIG_RFS_ACCEL
6986 	int i, rc = 0;
6987 
6988 	for (i = 0; i < bp->rx_nr_rings; i++) {
6989 		struct bnxt_vnic_info *vnic;
6990 		u16 vnic_id = i + 1;
6991 		u16 ring_id = i;
6992 
6993 		if (vnic_id >= bp->nr_vnics)
6994 			break;
6995 
6996 		vnic = &bp->vnic_info[vnic_id];
6997 		vnic->flags |= BNXT_VNIC_RFS_FLAG;
6998 		if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6999 			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7000 		rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7001 		if (rc) {
7002 			netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7003 				   vnic_id, rc);
7004 			break;
7005 		}
7006 		rc = bnxt_setup_vnic(bp, vnic_id);
7007 		if (rc)
7008 			break;
7009 	}
7010 	return rc;
7011 #else
7012 	return 0;
7013 #endif
7014 }
7015 
7016 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7017 static bool bnxt_promisc_ok(struct bnxt *bp)
7018 {
7019 #ifdef CONFIG_BNXT_SRIOV
7020 	if (BNXT_VF(bp) && !bp->vf.vlan)
7021 		return false;
7022 #endif
7023 	return true;
7024 }
7025 
7026 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7027 {
7028 	unsigned int rc = 0;
7029 
7030 	rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7031 	if (rc) {
7032 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7033 			   rc);
7034 		return rc;
7035 	}
7036 
7037 	rc = bnxt_hwrm_vnic_cfg(bp, 1);
7038 	if (rc) {
7039 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7040 			   rc);
7041 		return rc;
7042 	}
7043 	return rc;
7044 }
7045 
7046 static int bnxt_cfg_rx_mode(struct bnxt *);
7047 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7048 
7049 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7050 {
7051 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7052 	int rc = 0;
7053 	unsigned int rx_nr_rings = bp->rx_nr_rings;
7054 
7055 	if (irq_re_init) {
7056 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
7057 		if (rc) {
7058 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7059 				   rc);
7060 			goto err_out;
7061 		}
7062 	}
7063 
7064 	rc = bnxt_hwrm_ring_alloc(bp);
7065 	if (rc) {
7066 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7067 		goto err_out;
7068 	}
7069 
7070 	rc = bnxt_hwrm_ring_grp_alloc(bp);
7071 	if (rc) {
7072 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7073 		goto err_out;
7074 	}
7075 
7076 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7077 		rx_nr_rings--;
7078 
7079 	/* default vnic 0 */
7080 	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7081 	if (rc) {
7082 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7083 		goto err_out;
7084 	}
7085 
7086 	rc = bnxt_setup_vnic(bp, 0);
7087 	if (rc)
7088 		goto err_out;
7089 
7090 	if (bp->flags & BNXT_FLAG_RFS) {
7091 		rc = bnxt_alloc_rfs_vnics(bp);
7092 		if (rc)
7093 			goto err_out;
7094 	}
7095 
7096 	if (bp->flags & BNXT_FLAG_TPA) {
7097 		rc = bnxt_set_tpa(bp, true);
7098 		if (rc)
7099 			goto err_out;
7100 	}
7101 
7102 	if (BNXT_VF(bp))
7103 		bnxt_update_vf_mac(bp);
7104 
7105 	/* Filter for default vnic 0 */
7106 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7107 	if (rc) {
7108 		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7109 		goto err_out;
7110 	}
7111 	vnic->uc_filter_count = 1;
7112 
7113 	vnic->rx_mask = 0;
7114 	if (bp->dev->flags & IFF_BROADCAST)
7115 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7116 
7117 	if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7118 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7119 
7120 	if (bp->dev->flags & IFF_ALLMULTI) {
7121 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7122 		vnic->mc_list_count = 0;
7123 	} else {
7124 		u32 mask = 0;
7125 
7126 		bnxt_mc_list_updated(bp, &mask);
7127 		vnic->rx_mask |= mask;
7128 	}
7129 
7130 	rc = bnxt_cfg_rx_mode(bp);
7131 	if (rc)
7132 		goto err_out;
7133 
7134 	rc = bnxt_hwrm_set_coal(bp);
7135 	if (rc)
7136 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7137 				rc);
7138 
7139 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7140 		rc = bnxt_setup_nitroa0_vnic(bp);
7141 		if (rc)
7142 			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7143 				   rc);
7144 	}
7145 
7146 	if (BNXT_VF(bp)) {
7147 		bnxt_hwrm_func_qcfg(bp);
7148 		netdev_update_features(bp->dev);
7149 	}
7150 
7151 	return 0;
7152 
7153 err_out:
7154 	bnxt_hwrm_resource_free(bp, 0, true);
7155 
7156 	return rc;
7157 }
7158 
7159 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7160 {
7161 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7162 	return 0;
7163 }
7164 
7165 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7166 {
7167 	bnxt_init_cp_rings(bp);
7168 	bnxt_init_rx_rings(bp);
7169 	bnxt_init_tx_rings(bp);
7170 	bnxt_init_ring_grps(bp, irq_re_init);
7171 	bnxt_init_vnics(bp);
7172 
7173 	return bnxt_init_chip(bp, irq_re_init);
7174 }
7175 
7176 static int bnxt_set_real_num_queues(struct bnxt *bp)
7177 {
7178 	int rc;
7179 	struct net_device *dev = bp->dev;
7180 
7181 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7182 					  bp->tx_nr_rings_xdp);
7183 	if (rc)
7184 		return rc;
7185 
7186 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7187 	if (rc)
7188 		return rc;
7189 
7190 #ifdef CONFIG_RFS_ACCEL
7191 	if (bp->flags & BNXT_FLAG_RFS)
7192 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7193 #endif
7194 
7195 	return rc;
7196 }
7197 
7198 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7199 			   bool shared)
7200 {
7201 	int _rx = *rx, _tx = *tx;
7202 
7203 	if (shared) {
7204 		*rx = min_t(int, _rx, max);
7205 		*tx = min_t(int, _tx, max);
7206 	} else {
7207 		if (max < 2)
7208 			return -ENOMEM;
7209 
7210 		while (_rx + _tx > max) {
7211 			if (_rx > _tx && _rx > 1)
7212 				_rx--;
7213 			else if (_tx > 1)
7214 				_tx--;
7215 		}
7216 		*rx = _rx;
7217 		*tx = _tx;
7218 	}
7219 	return 0;
7220 }
7221 
7222 static void bnxt_setup_msix(struct bnxt *bp)
7223 {
7224 	const int len = sizeof(bp->irq_tbl[0].name);
7225 	struct net_device *dev = bp->dev;
7226 	int tcs, i;
7227 
7228 	tcs = netdev_get_num_tc(dev);
7229 	if (tcs > 1) {
7230 		int i, off, count;
7231 
7232 		for (i = 0; i < tcs; i++) {
7233 			count = bp->tx_nr_rings_per_tc;
7234 			off = i * count;
7235 			netdev_set_tc_queue(dev, i, count, off);
7236 		}
7237 	}
7238 
7239 	for (i = 0; i < bp->cp_nr_rings; i++) {
7240 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7241 		char *attr;
7242 
7243 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7244 			attr = "TxRx";
7245 		else if (i < bp->rx_nr_rings)
7246 			attr = "rx";
7247 		else
7248 			attr = "tx";
7249 
7250 		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7251 			 attr, i);
7252 		bp->irq_tbl[map_idx].handler = bnxt_msix;
7253 	}
7254 }
7255 
7256 static void bnxt_setup_inta(struct bnxt *bp)
7257 {
7258 	const int len = sizeof(bp->irq_tbl[0].name);
7259 
7260 	if (netdev_get_num_tc(bp->dev))
7261 		netdev_reset_tc(bp->dev);
7262 
7263 	snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7264 		 0);
7265 	bp->irq_tbl[0].handler = bnxt_inta;
7266 }
7267 
7268 static int bnxt_setup_int_mode(struct bnxt *bp)
7269 {
7270 	int rc;
7271 
7272 	if (bp->flags & BNXT_FLAG_USING_MSIX)
7273 		bnxt_setup_msix(bp);
7274 	else
7275 		bnxt_setup_inta(bp);
7276 
7277 	rc = bnxt_set_real_num_queues(bp);
7278 	return rc;
7279 }
7280 
7281 #ifdef CONFIG_RFS_ACCEL
7282 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7283 {
7284 	return bp->hw_resc.max_rsscos_ctxs;
7285 }
7286 
7287 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7288 {
7289 	return bp->hw_resc.max_vnics;
7290 }
7291 #endif
7292 
7293 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7294 {
7295 	return bp->hw_resc.max_stat_ctxs;
7296 }
7297 
7298 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7299 {
7300 	return bp->hw_resc.max_cp_rings;
7301 }
7302 
7303 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7304 {
7305 	unsigned int cp = bp->hw_resc.max_cp_rings;
7306 
7307 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7308 		cp -= bnxt_get_ulp_msix_num(bp);
7309 
7310 	return cp;
7311 }
7312 
7313 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7314 {
7315 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7316 
7317 	if (bp->flags & BNXT_FLAG_CHIP_P5)
7318 		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7319 
7320 	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7321 }
7322 
7323 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7324 {
7325 	bp->hw_resc.max_irqs = max_irqs;
7326 }
7327 
7328 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7329 {
7330 	unsigned int cp;
7331 
7332 	cp = bnxt_get_max_func_cp_rings_for_en(bp);
7333 	if (bp->flags & BNXT_FLAG_CHIP_P5)
7334 		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7335 	else
7336 		return cp - bp->cp_nr_rings;
7337 }
7338 
7339 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7340 {
7341 	unsigned int stat;
7342 
7343 	stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
7344 	stat -= bp->cp_nr_rings;
7345 	return stat;
7346 }
7347 
7348 int bnxt_get_avail_msix(struct bnxt *bp, int num)
7349 {
7350 	int max_cp = bnxt_get_max_func_cp_rings(bp);
7351 	int max_irq = bnxt_get_max_func_irqs(bp);
7352 	int total_req = bp->cp_nr_rings + num;
7353 	int max_idx, avail_msix;
7354 
7355 	max_idx = bp->total_irqs;
7356 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7357 		max_idx = min_t(int, bp->total_irqs, max_cp);
7358 	avail_msix = max_idx - bp->cp_nr_rings;
7359 	if (!BNXT_NEW_RM(bp) || avail_msix >= num)
7360 		return avail_msix;
7361 
7362 	if (max_irq < total_req) {
7363 		num = max_irq - bp->cp_nr_rings;
7364 		if (num <= 0)
7365 			return 0;
7366 	}
7367 	return num;
7368 }
7369 
7370 static int bnxt_get_num_msix(struct bnxt *bp)
7371 {
7372 	if (!BNXT_NEW_RM(bp))
7373 		return bnxt_get_max_func_irqs(bp);
7374 
7375 	return bnxt_nq_rings_in_use(bp);
7376 }
7377 
7378 static int bnxt_init_msix(struct bnxt *bp)
7379 {
7380 	int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7381 	struct msix_entry *msix_ent;
7382 
7383 	total_vecs = bnxt_get_num_msix(bp);
7384 	max = bnxt_get_max_func_irqs(bp);
7385 	if (total_vecs > max)
7386 		total_vecs = max;
7387 
7388 	if (!total_vecs)
7389 		return 0;
7390 
7391 	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7392 	if (!msix_ent)
7393 		return -ENOMEM;
7394 
7395 	for (i = 0; i < total_vecs; i++) {
7396 		msix_ent[i].entry = i;
7397 		msix_ent[i].vector = 0;
7398 	}
7399 
7400 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7401 		min = 2;
7402 
7403 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
7404 	ulp_msix = bnxt_get_ulp_msix_num(bp);
7405 	if (total_vecs < 0 || total_vecs < ulp_msix) {
7406 		rc = -ENODEV;
7407 		goto msix_setup_exit;
7408 	}
7409 
7410 	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7411 	if (bp->irq_tbl) {
7412 		for (i = 0; i < total_vecs; i++)
7413 			bp->irq_tbl[i].vector = msix_ent[i].vector;
7414 
7415 		bp->total_irqs = total_vecs;
7416 		/* Trim rings based upon num of vectors allocated */
7417 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
7418 				     total_vecs - ulp_msix, min == 1);
7419 		if (rc)
7420 			goto msix_setup_exit;
7421 
7422 		bp->cp_nr_rings = (min == 1) ?
7423 				  max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7424 				  bp->tx_nr_rings + bp->rx_nr_rings;
7425 
7426 	} else {
7427 		rc = -ENOMEM;
7428 		goto msix_setup_exit;
7429 	}
7430 	bp->flags |= BNXT_FLAG_USING_MSIX;
7431 	kfree(msix_ent);
7432 	return 0;
7433 
7434 msix_setup_exit:
7435 	netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7436 	kfree(bp->irq_tbl);
7437 	bp->irq_tbl = NULL;
7438 	pci_disable_msix(bp->pdev);
7439 	kfree(msix_ent);
7440 	return rc;
7441 }
7442 
7443 static int bnxt_init_inta(struct bnxt *bp)
7444 {
7445 	bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7446 	if (!bp->irq_tbl)
7447 		return -ENOMEM;
7448 
7449 	bp->total_irqs = 1;
7450 	bp->rx_nr_rings = 1;
7451 	bp->tx_nr_rings = 1;
7452 	bp->cp_nr_rings = 1;
7453 	bp->flags |= BNXT_FLAG_SHARED_RINGS;
7454 	bp->irq_tbl[0].vector = bp->pdev->irq;
7455 	return 0;
7456 }
7457 
7458 static int bnxt_init_int_mode(struct bnxt *bp)
7459 {
7460 	int rc = 0;
7461 
7462 	if (bp->flags & BNXT_FLAG_MSIX_CAP)
7463 		rc = bnxt_init_msix(bp);
7464 
7465 	if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
7466 		/* fallback to INTA */
7467 		rc = bnxt_init_inta(bp);
7468 	}
7469 	return rc;
7470 }
7471 
7472 static void bnxt_clear_int_mode(struct bnxt *bp)
7473 {
7474 	if (bp->flags & BNXT_FLAG_USING_MSIX)
7475 		pci_disable_msix(bp->pdev);
7476 
7477 	kfree(bp->irq_tbl);
7478 	bp->irq_tbl = NULL;
7479 	bp->flags &= ~BNXT_FLAG_USING_MSIX;
7480 }
7481 
7482 int bnxt_reserve_rings(struct bnxt *bp)
7483 {
7484 	int tcs = netdev_get_num_tc(bp->dev);
7485 	bool reinit_irq = false;
7486 	int rc;
7487 
7488 	if (!bnxt_need_reserve_rings(bp))
7489 		return 0;
7490 
7491 	if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
7492 		bnxt_ulp_irq_stop(bp);
7493 		bnxt_clear_int_mode(bp);
7494 		reinit_irq = true;
7495 	}
7496 	rc = __bnxt_reserve_rings(bp);
7497 	if (reinit_irq) {
7498 		if (!rc)
7499 			rc = bnxt_init_int_mode(bp);
7500 		bnxt_ulp_irq_restart(bp, rc);
7501 	}
7502 	if (rc) {
7503 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
7504 		return rc;
7505 	}
7506 	if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7507 		netdev_err(bp->dev, "tx ring reservation failure\n");
7508 		netdev_reset_tc(bp->dev);
7509 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7510 		return -ENOMEM;
7511 	}
7512 	return 0;
7513 }
7514 
7515 static void bnxt_free_irq(struct bnxt *bp)
7516 {
7517 	struct bnxt_irq *irq;
7518 	int i;
7519 
7520 #ifdef CONFIG_RFS_ACCEL
7521 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7522 	bp->dev->rx_cpu_rmap = NULL;
7523 #endif
7524 	if (!bp->irq_tbl || !bp->bnapi)
7525 		return;
7526 
7527 	for (i = 0; i < bp->cp_nr_rings; i++) {
7528 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7529 
7530 		irq = &bp->irq_tbl[map_idx];
7531 		if (irq->requested) {
7532 			if (irq->have_cpumask) {
7533 				irq_set_affinity_hint(irq->vector, NULL);
7534 				free_cpumask_var(irq->cpu_mask);
7535 				irq->have_cpumask = 0;
7536 			}
7537 			free_irq(irq->vector, bp->bnapi[i]);
7538 		}
7539 
7540 		irq->requested = 0;
7541 	}
7542 }
7543 
7544 static int bnxt_request_irq(struct bnxt *bp)
7545 {
7546 	int i, j, rc = 0;
7547 	unsigned long flags = 0;
7548 #ifdef CONFIG_RFS_ACCEL
7549 	struct cpu_rmap *rmap;
7550 #endif
7551 
7552 	rc = bnxt_setup_int_mode(bp);
7553 	if (rc) {
7554 		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7555 			   rc);
7556 		return rc;
7557 	}
7558 #ifdef CONFIG_RFS_ACCEL
7559 	rmap = bp->dev->rx_cpu_rmap;
7560 #endif
7561 	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7562 		flags = IRQF_SHARED;
7563 
7564 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
7565 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7566 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7567 
7568 #ifdef CONFIG_RFS_ACCEL
7569 		if (rmap && bp->bnapi[i]->rx_ring) {
7570 			rc = irq_cpu_rmap_add(rmap, irq->vector);
7571 			if (rc)
7572 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
7573 					    j);
7574 			j++;
7575 		}
7576 #endif
7577 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7578 				 bp->bnapi[i]);
7579 		if (rc)
7580 			break;
7581 
7582 		irq->requested = 1;
7583 
7584 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7585 			int numa_node = dev_to_node(&bp->pdev->dev);
7586 
7587 			irq->have_cpumask = 1;
7588 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7589 					irq->cpu_mask);
7590 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7591 			if (rc) {
7592 				netdev_warn(bp->dev,
7593 					    "Set affinity failed, IRQ = %d\n",
7594 					    irq->vector);
7595 				break;
7596 			}
7597 		}
7598 	}
7599 	return rc;
7600 }
7601 
7602 static void bnxt_del_napi(struct bnxt *bp)
7603 {
7604 	int i;
7605 
7606 	if (!bp->bnapi)
7607 		return;
7608 
7609 	for (i = 0; i < bp->cp_nr_rings; i++) {
7610 		struct bnxt_napi *bnapi = bp->bnapi[i];
7611 
7612 		napi_hash_del(&bnapi->napi);
7613 		netif_napi_del(&bnapi->napi);
7614 	}
7615 	/* We called napi_hash_del() before netif_napi_del(), we need
7616 	 * to respect an RCU grace period before freeing napi structures.
7617 	 */
7618 	synchronize_net();
7619 }
7620 
7621 static void bnxt_init_napi(struct bnxt *bp)
7622 {
7623 	int i;
7624 	unsigned int cp_nr_rings = bp->cp_nr_rings;
7625 	struct bnxt_napi *bnapi;
7626 
7627 	if (bp->flags & BNXT_FLAG_USING_MSIX) {
7628 		int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
7629 
7630 		if (bp->flags & BNXT_FLAG_CHIP_P5)
7631 			poll_fn = bnxt_poll_p5;
7632 		else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7633 			cp_nr_rings--;
7634 		for (i = 0; i < cp_nr_rings; i++) {
7635 			bnapi = bp->bnapi[i];
7636 			netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
7637 		}
7638 		if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7639 			bnapi = bp->bnapi[cp_nr_rings];
7640 			netif_napi_add(bp->dev, &bnapi->napi,
7641 				       bnxt_poll_nitroa0, 64);
7642 		}
7643 	} else {
7644 		bnapi = bp->bnapi[0];
7645 		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
7646 	}
7647 }
7648 
7649 static void bnxt_disable_napi(struct bnxt *bp)
7650 {
7651 	int i;
7652 
7653 	if (!bp->bnapi)
7654 		return;
7655 
7656 	for (i = 0; i < bp->cp_nr_rings; i++) {
7657 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7658 
7659 		if (bp->bnapi[i]->rx_ring)
7660 			cancel_work_sync(&cpr->dim.work);
7661 
7662 		napi_disable(&bp->bnapi[i]->napi);
7663 	}
7664 }
7665 
7666 static void bnxt_enable_napi(struct bnxt *bp)
7667 {
7668 	int i;
7669 
7670 	for (i = 0; i < bp->cp_nr_rings; i++) {
7671 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7672 		bp->bnapi[i]->in_reset = false;
7673 
7674 		if (bp->bnapi[i]->rx_ring) {
7675 			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
7676 			cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
7677 		}
7678 		napi_enable(&bp->bnapi[i]->napi);
7679 	}
7680 }
7681 
7682 void bnxt_tx_disable(struct bnxt *bp)
7683 {
7684 	int i;
7685 	struct bnxt_tx_ring_info *txr;
7686 
7687 	if (bp->tx_ring) {
7688 		for (i = 0; i < bp->tx_nr_rings; i++) {
7689 			txr = &bp->tx_ring[i];
7690 			txr->dev_state = BNXT_DEV_STATE_CLOSING;
7691 		}
7692 	}
7693 	/* Stop all TX queues */
7694 	netif_tx_disable(bp->dev);
7695 	netif_carrier_off(bp->dev);
7696 }
7697 
7698 void bnxt_tx_enable(struct bnxt *bp)
7699 {
7700 	int i;
7701 	struct bnxt_tx_ring_info *txr;
7702 
7703 	for (i = 0; i < bp->tx_nr_rings; i++) {
7704 		txr = &bp->tx_ring[i];
7705 		txr->dev_state = 0;
7706 	}
7707 	netif_tx_wake_all_queues(bp->dev);
7708 	if (bp->link_info.link_up)
7709 		netif_carrier_on(bp->dev);
7710 }
7711 
7712 static void bnxt_report_link(struct bnxt *bp)
7713 {
7714 	if (bp->link_info.link_up) {
7715 		const char *duplex;
7716 		const char *flow_ctrl;
7717 		u32 speed;
7718 		u16 fec;
7719 
7720 		netif_carrier_on(bp->dev);
7721 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
7722 			duplex = "full";
7723 		else
7724 			duplex = "half";
7725 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
7726 			flow_ctrl = "ON - receive & transmit";
7727 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
7728 			flow_ctrl = "ON - transmit";
7729 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
7730 			flow_ctrl = "ON - receive";
7731 		else
7732 			flow_ctrl = "none";
7733 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
7734 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
7735 			    speed, duplex, flow_ctrl);
7736 		if (bp->flags & BNXT_FLAG_EEE_CAP)
7737 			netdev_info(bp->dev, "EEE is %s\n",
7738 				    bp->eee.eee_active ? "active" :
7739 							 "not active");
7740 		fec = bp->link_info.fec_cfg;
7741 		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
7742 			netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
7743 				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
7744 				    (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
7745 				     (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
7746 	} else {
7747 		netif_carrier_off(bp->dev);
7748 		netdev_err(bp->dev, "NIC Link is Down\n");
7749 	}
7750 }
7751 
7752 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
7753 {
7754 	int rc = 0;
7755 	struct hwrm_port_phy_qcaps_input req = {0};
7756 	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7757 	struct bnxt_link_info *link_info = &bp->link_info;
7758 
7759 	if (bp->hwrm_spec_code < 0x10201)
7760 		return 0;
7761 
7762 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
7763 
7764 	mutex_lock(&bp->hwrm_cmd_lock);
7765 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7766 	if (rc)
7767 		goto hwrm_phy_qcaps_exit;
7768 
7769 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
7770 		struct ethtool_eee *eee = &bp->eee;
7771 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
7772 
7773 		bp->flags |= BNXT_FLAG_EEE_CAP;
7774 		eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7775 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
7776 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
7777 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
7778 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
7779 	}
7780 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
7781 		if (bp->test_info)
7782 			bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
7783 	}
7784 	if (resp->supported_speeds_auto_mode)
7785 		link_info->support_auto_speeds =
7786 			le16_to_cpu(resp->supported_speeds_auto_mode);
7787 
7788 	bp->port_count = resp->port_cnt;
7789 
7790 hwrm_phy_qcaps_exit:
7791 	mutex_unlock(&bp->hwrm_cmd_lock);
7792 	return rc;
7793 }
7794 
7795 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
7796 {
7797 	int rc = 0;
7798 	struct bnxt_link_info *link_info = &bp->link_info;
7799 	struct hwrm_port_phy_qcfg_input req = {0};
7800 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7801 	u8 link_up = link_info->link_up;
7802 	u16 diff;
7803 
7804 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
7805 
7806 	mutex_lock(&bp->hwrm_cmd_lock);
7807 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7808 	if (rc) {
7809 		mutex_unlock(&bp->hwrm_cmd_lock);
7810 		return rc;
7811 	}
7812 
7813 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
7814 	link_info->phy_link_status = resp->link;
7815 	link_info->duplex = resp->duplex_cfg;
7816 	if (bp->hwrm_spec_code >= 0x10800)
7817 		link_info->duplex = resp->duplex_state;
7818 	link_info->pause = resp->pause;
7819 	link_info->auto_mode = resp->auto_mode;
7820 	link_info->auto_pause_setting = resp->auto_pause;
7821 	link_info->lp_pause = resp->link_partner_adv_pause;
7822 	link_info->force_pause_setting = resp->force_pause;
7823 	link_info->duplex_setting = resp->duplex_cfg;
7824 	if (link_info->phy_link_status == BNXT_LINK_LINK)
7825 		link_info->link_speed = le16_to_cpu(resp->link_speed);
7826 	else
7827 		link_info->link_speed = 0;
7828 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
7829 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
7830 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
7831 	link_info->lp_auto_link_speeds =
7832 		le16_to_cpu(resp->link_partner_adv_speeds);
7833 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
7834 	link_info->phy_ver[0] = resp->phy_maj;
7835 	link_info->phy_ver[1] = resp->phy_min;
7836 	link_info->phy_ver[2] = resp->phy_bld;
7837 	link_info->media_type = resp->media_type;
7838 	link_info->phy_type = resp->phy_type;
7839 	link_info->transceiver = resp->xcvr_pkg_type;
7840 	link_info->phy_addr = resp->eee_config_phy_addr &
7841 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
7842 	link_info->module_status = resp->module_status;
7843 
7844 	if (bp->flags & BNXT_FLAG_EEE_CAP) {
7845 		struct ethtool_eee *eee = &bp->eee;
7846 		u16 fw_speeds;
7847 
7848 		eee->eee_active = 0;
7849 		if (resp->eee_config_phy_addr &
7850 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
7851 			eee->eee_active = 1;
7852 			fw_speeds = le16_to_cpu(
7853 				resp->link_partner_adv_eee_link_speed_mask);
7854 			eee->lp_advertised =
7855 				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7856 		}
7857 
7858 		/* Pull initial EEE config */
7859 		if (!chng_link_state) {
7860 			if (resp->eee_config_phy_addr &
7861 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
7862 				eee->eee_enabled = 1;
7863 
7864 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
7865 			eee->advertised =
7866 				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7867 
7868 			if (resp->eee_config_phy_addr &
7869 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
7870 				__le32 tmr;
7871 
7872 				eee->tx_lpi_enabled = 1;
7873 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
7874 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
7875 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
7876 			}
7877 		}
7878 	}
7879 
7880 	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
7881 	if (bp->hwrm_spec_code >= 0x10504)
7882 		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
7883 
7884 	/* TODO: need to add more logic to report VF link */
7885 	if (chng_link_state) {
7886 		if (link_info->phy_link_status == BNXT_LINK_LINK)
7887 			link_info->link_up = 1;
7888 		else
7889 			link_info->link_up = 0;
7890 		if (link_up != link_info->link_up)
7891 			bnxt_report_link(bp);
7892 	} else {
7893 		/* alwasy link down if not require to update link state */
7894 		link_info->link_up = 0;
7895 	}
7896 	mutex_unlock(&bp->hwrm_cmd_lock);
7897 
7898 	if (!BNXT_SINGLE_PF(bp))
7899 		return 0;
7900 
7901 	diff = link_info->support_auto_speeds ^ link_info->advertising;
7902 	if ((link_info->support_auto_speeds | diff) !=
7903 	    link_info->support_auto_speeds) {
7904 		/* An advertised speed is no longer supported, so we need to
7905 		 * update the advertisement settings.  Caller holds RTNL
7906 		 * so we can modify link settings.
7907 		 */
7908 		link_info->advertising = link_info->support_auto_speeds;
7909 		if (link_info->autoneg & BNXT_AUTONEG_SPEED)
7910 			bnxt_hwrm_set_link_setting(bp, true, false);
7911 	}
7912 	return 0;
7913 }
7914 
7915 static void bnxt_get_port_module_status(struct bnxt *bp)
7916 {
7917 	struct bnxt_link_info *link_info = &bp->link_info;
7918 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
7919 	u8 module_status;
7920 
7921 	if (bnxt_update_link(bp, true))
7922 		return;
7923 
7924 	module_status = link_info->module_status;
7925 	switch (module_status) {
7926 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
7927 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
7928 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
7929 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
7930 			    bp->pf.port_id);
7931 		if (bp->hwrm_spec_code >= 0x10201) {
7932 			netdev_warn(bp->dev, "Module part number %s\n",
7933 				    resp->phy_vendor_partnumber);
7934 		}
7935 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
7936 			netdev_warn(bp->dev, "TX is disabled\n");
7937 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
7938 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
7939 	}
7940 }
7941 
7942 static void
7943 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
7944 {
7945 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
7946 		if (bp->hwrm_spec_code >= 0x10201)
7947 			req->auto_pause =
7948 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
7949 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7950 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
7951 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7952 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
7953 		req->enables |=
7954 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7955 	} else {
7956 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7957 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
7958 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7959 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
7960 		req->enables |=
7961 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
7962 		if (bp->hwrm_spec_code >= 0x10201) {
7963 			req->auto_pause = req->force_pause;
7964 			req->enables |= cpu_to_le32(
7965 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7966 		}
7967 	}
7968 }
7969 
7970 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
7971 				      struct hwrm_port_phy_cfg_input *req)
7972 {
7973 	u8 autoneg = bp->link_info.autoneg;
7974 	u16 fw_link_speed = bp->link_info.req_link_speed;
7975 	u16 advertising = bp->link_info.advertising;
7976 
7977 	if (autoneg & BNXT_AUTONEG_SPEED) {
7978 		req->auto_mode |=
7979 			PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
7980 
7981 		req->enables |= cpu_to_le32(
7982 			PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
7983 		req->auto_link_speed_mask = cpu_to_le16(advertising);
7984 
7985 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
7986 		req->flags |=
7987 			cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
7988 	} else {
7989 		req->force_link_speed = cpu_to_le16(fw_link_speed);
7990 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
7991 	}
7992 
7993 	/* tell chimp that the setting takes effect immediately */
7994 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
7995 }
7996 
7997 int bnxt_hwrm_set_pause(struct bnxt *bp)
7998 {
7999 	struct hwrm_port_phy_cfg_input req = {0};
8000 	int rc;
8001 
8002 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8003 	bnxt_hwrm_set_pause_common(bp, &req);
8004 
8005 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8006 	    bp->link_info.force_link_chng)
8007 		bnxt_hwrm_set_link_common(bp, &req);
8008 
8009 	mutex_lock(&bp->hwrm_cmd_lock);
8010 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8011 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8012 		/* since changing of pause setting doesn't trigger any link
8013 		 * change event, the driver needs to update the current pause
8014 		 * result upon successfully return of the phy_cfg command
8015 		 */
8016 		bp->link_info.pause =
8017 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8018 		bp->link_info.auto_pause_setting = 0;
8019 		if (!bp->link_info.force_link_chng)
8020 			bnxt_report_link(bp);
8021 	}
8022 	bp->link_info.force_link_chng = false;
8023 	mutex_unlock(&bp->hwrm_cmd_lock);
8024 	return rc;
8025 }
8026 
8027 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8028 			      struct hwrm_port_phy_cfg_input *req)
8029 {
8030 	struct ethtool_eee *eee = &bp->eee;
8031 
8032 	if (eee->eee_enabled) {
8033 		u16 eee_speeds;
8034 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8035 
8036 		if (eee->tx_lpi_enabled)
8037 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8038 		else
8039 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8040 
8041 		req->flags |= cpu_to_le32(flags);
8042 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8043 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8044 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8045 	} else {
8046 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8047 	}
8048 }
8049 
8050 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8051 {
8052 	struct hwrm_port_phy_cfg_input req = {0};
8053 
8054 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8055 	if (set_pause)
8056 		bnxt_hwrm_set_pause_common(bp, &req);
8057 
8058 	bnxt_hwrm_set_link_common(bp, &req);
8059 
8060 	if (set_eee)
8061 		bnxt_hwrm_set_eee(bp, &req);
8062 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8063 }
8064 
8065 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8066 {
8067 	struct hwrm_port_phy_cfg_input req = {0};
8068 
8069 	if (!BNXT_SINGLE_PF(bp))
8070 		return 0;
8071 
8072 	if (pci_num_vf(bp->pdev))
8073 		return 0;
8074 
8075 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8076 	req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8077 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8078 }
8079 
8080 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8081 {
8082 	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8083 	struct hwrm_func_drv_if_change_input req = {0};
8084 	bool resc_reinit = false;
8085 	int rc;
8086 
8087 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8088 		return 0;
8089 
8090 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8091 	if (up)
8092 		req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8093 	mutex_lock(&bp->hwrm_cmd_lock);
8094 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8095 	if (!rc && (resp->flags &
8096 		    cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
8097 		resc_reinit = true;
8098 	mutex_unlock(&bp->hwrm_cmd_lock);
8099 
8100 	if (up && resc_reinit && BNXT_NEW_RM(bp)) {
8101 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8102 
8103 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8104 		hw_resc->resv_cp_rings = 0;
8105 		hw_resc->resv_stat_ctxs = 0;
8106 		hw_resc->resv_irqs = 0;
8107 		hw_resc->resv_tx_rings = 0;
8108 		hw_resc->resv_rx_rings = 0;
8109 		hw_resc->resv_hw_ring_grps = 0;
8110 		hw_resc->resv_vnics = 0;
8111 		bp->tx_nr_rings = 0;
8112 		bp->rx_nr_rings = 0;
8113 	}
8114 	return rc;
8115 }
8116 
8117 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8118 {
8119 	struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8120 	struct hwrm_port_led_qcaps_input req = {0};
8121 	struct bnxt_pf_info *pf = &bp->pf;
8122 	int rc;
8123 
8124 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8125 		return 0;
8126 
8127 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8128 	req.port_id = cpu_to_le16(pf->port_id);
8129 	mutex_lock(&bp->hwrm_cmd_lock);
8130 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8131 	if (rc) {
8132 		mutex_unlock(&bp->hwrm_cmd_lock);
8133 		return rc;
8134 	}
8135 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8136 		int i;
8137 
8138 		bp->num_leds = resp->num_leds;
8139 		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8140 						 bp->num_leds);
8141 		for (i = 0; i < bp->num_leds; i++) {
8142 			struct bnxt_led_info *led = &bp->leds[i];
8143 			__le16 caps = led->led_state_caps;
8144 
8145 			if (!led->led_group_id ||
8146 			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
8147 				bp->num_leds = 0;
8148 				break;
8149 			}
8150 		}
8151 	}
8152 	mutex_unlock(&bp->hwrm_cmd_lock);
8153 	return 0;
8154 }
8155 
8156 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8157 {
8158 	struct hwrm_wol_filter_alloc_input req = {0};
8159 	struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8160 	int rc;
8161 
8162 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8163 	req.port_id = cpu_to_le16(bp->pf.port_id);
8164 	req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8165 	req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8166 	memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8167 	mutex_lock(&bp->hwrm_cmd_lock);
8168 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8169 	if (!rc)
8170 		bp->wol_filter_id = resp->wol_filter_id;
8171 	mutex_unlock(&bp->hwrm_cmd_lock);
8172 	return rc;
8173 }
8174 
8175 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8176 {
8177 	struct hwrm_wol_filter_free_input req = {0};
8178 	int rc;
8179 
8180 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8181 	req.port_id = cpu_to_le16(bp->pf.port_id);
8182 	req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8183 	req.wol_filter_id = bp->wol_filter_id;
8184 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8185 	return rc;
8186 }
8187 
8188 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8189 {
8190 	struct hwrm_wol_filter_qcfg_input req = {0};
8191 	struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8192 	u16 next_handle = 0;
8193 	int rc;
8194 
8195 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8196 	req.port_id = cpu_to_le16(bp->pf.port_id);
8197 	req.handle = cpu_to_le16(handle);
8198 	mutex_lock(&bp->hwrm_cmd_lock);
8199 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8200 	if (!rc) {
8201 		next_handle = le16_to_cpu(resp->next_handle);
8202 		if (next_handle != 0) {
8203 			if (resp->wol_type ==
8204 			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8205 				bp->wol = 1;
8206 				bp->wol_filter_id = resp->wol_filter_id;
8207 			}
8208 		}
8209 	}
8210 	mutex_unlock(&bp->hwrm_cmd_lock);
8211 	return next_handle;
8212 }
8213 
8214 static void bnxt_get_wol_settings(struct bnxt *bp)
8215 {
8216 	u16 handle = 0;
8217 
8218 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8219 		return;
8220 
8221 	do {
8222 		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8223 	} while (handle && handle != 0xffff);
8224 }
8225 
8226 #ifdef CONFIG_BNXT_HWMON
8227 static ssize_t bnxt_show_temp(struct device *dev,
8228 			      struct device_attribute *devattr, char *buf)
8229 {
8230 	struct hwrm_temp_monitor_query_input req = {0};
8231 	struct hwrm_temp_monitor_query_output *resp;
8232 	struct bnxt *bp = dev_get_drvdata(dev);
8233 	u32 temp = 0;
8234 
8235 	resp = bp->hwrm_cmd_resp_addr;
8236 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8237 	mutex_lock(&bp->hwrm_cmd_lock);
8238 	if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8239 		temp = resp->temp * 1000; /* display millidegree */
8240 	mutex_unlock(&bp->hwrm_cmd_lock);
8241 
8242 	return sprintf(buf, "%u\n", temp);
8243 }
8244 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8245 
8246 static struct attribute *bnxt_attrs[] = {
8247 	&sensor_dev_attr_temp1_input.dev_attr.attr,
8248 	NULL
8249 };
8250 ATTRIBUTE_GROUPS(bnxt);
8251 
8252 static void bnxt_hwmon_close(struct bnxt *bp)
8253 {
8254 	if (bp->hwmon_dev) {
8255 		hwmon_device_unregister(bp->hwmon_dev);
8256 		bp->hwmon_dev = NULL;
8257 	}
8258 }
8259 
8260 static void bnxt_hwmon_open(struct bnxt *bp)
8261 {
8262 	struct pci_dev *pdev = bp->pdev;
8263 
8264 	bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8265 							  DRV_MODULE_NAME, bp,
8266 							  bnxt_groups);
8267 	if (IS_ERR(bp->hwmon_dev)) {
8268 		bp->hwmon_dev = NULL;
8269 		dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8270 	}
8271 }
8272 #else
8273 static void bnxt_hwmon_close(struct bnxt *bp)
8274 {
8275 }
8276 
8277 static void bnxt_hwmon_open(struct bnxt *bp)
8278 {
8279 }
8280 #endif
8281 
8282 static bool bnxt_eee_config_ok(struct bnxt *bp)
8283 {
8284 	struct ethtool_eee *eee = &bp->eee;
8285 	struct bnxt_link_info *link_info = &bp->link_info;
8286 
8287 	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8288 		return true;
8289 
8290 	if (eee->eee_enabled) {
8291 		u32 advertising =
8292 			_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8293 
8294 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8295 			eee->eee_enabled = 0;
8296 			return false;
8297 		}
8298 		if (eee->advertised & ~advertising) {
8299 			eee->advertised = advertising & eee->supported;
8300 			return false;
8301 		}
8302 	}
8303 	return true;
8304 }
8305 
8306 static int bnxt_update_phy_setting(struct bnxt *bp)
8307 {
8308 	int rc;
8309 	bool update_link = false;
8310 	bool update_pause = false;
8311 	bool update_eee = false;
8312 	struct bnxt_link_info *link_info = &bp->link_info;
8313 
8314 	rc = bnxt_update_link(bp, true);
8315 	if (rc) {
8316 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8317 			   rc);
8318 		return rc;
8319 	}
8320 	if (!BNXT_SINGLE_PF(bp))
8321 		return 0;
8322 
8323 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8324 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8325 	    link_info->req_flow_ctrl)
8326 		update_pause = true;
8327 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8328 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
8329 		update_pause = true;
8330 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8331 		if (BNXT_AUTO_MODE(link_info->auto_mode))
8332 			update_link = true;
8333 		if (link_info->req_link_speed != link_info->force_link_speed)
8334 			update_link = true;
8335 		if (link_info->req_duplex != link_info->duplex_setting)
8336 			update_link = true;
8337 	} else {
8338 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8339 			update_link = true;
8340 		if (link_info->advertising != link_info->auto_link_speeds)
8341 			update_link = true;
8342 	}
8343 
8344 	/* The last close may have shutdown the link, so need to call
8345 	 * PHY_CFG to bring it back up.
8346 	 */
8347 	if (!netif_carrier_ok(bp->dev))
8348 		update_link = true;
8349 
8350 	if (!bnxt_eee_config_ok(bp))
8351 		update_eee = true;
8352 
8353 	if (update_link)
8354 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
8355 	else if (update_pause)
8356 		rc = bnxt_hwrm_set_pause(bp);
8357 	if (rc) {
8358 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8359 			   rc);
8360 		return rc;
8361 	}
8362 
8363 	return rc;
8364 }
8365 
8366 /* Common routine to pre-map certain register block to different GRC window.
8367  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8368  * in PF and 3 windows in VF that can be customized to map in different
8369  * register blocks.
8370  */
8371 static void bnxt_preset_reg_win(struct bnxt *bp)
8372 {
8373 	if (BNXT_PF(bp)) {
8374 		/* CAG registers map to GRC window #4 */
8375 		writel(BNXT_CAG_REG_BASE,
8376 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8377 	}
8378 }
8379 
8380 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8381 
8382 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8383 {
8384 	int rc = 0;
8385 
8386 	bnxt_preset_reg_win(bp);
8387 	netif_carrier_off(bp->dev);
8388 	if (irq_re_init) {
8389 		/* Reserve rings now if none were reserved at driver probe. */
8390 		rc = bnxt_init_dflt_ring_mode(bp);
8391 		if (rc) {
8392 			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8393 			return rc;
8394 		}
8395 	}
8396 	rc = bnxt_reserve_rings(bp);
8397 	if (rc)
8398 		return rc;
8399 	if ((bp->flags & BNXT_FLAG_RFS) &&
8400 	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8401 		/* disable RFS if falling back to INTA */
8402 		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8403 		bp->flags &= ~BNXT_FLAG_RFS;
8404 	}
8405 
8406 	rc = bnxt_alloc_mem(bp, irq_re_init);
8407 	if (rc) {
8408 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8409 		goto open_err_free_mem;
8410 	}
8411 
8412 	if (irq_re_init) {
8413 		bnxt_init_napi(bp);
8414 		rc = bnxt_request_irq(bp);
8415 		if (rc) {
8416 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
8417 			goto open_err_irq;
8418 		}
8419 	}
8420 
8421 	bnxt_enable_napi(bp);
8422 	bnxt_debug_dev_init(bp);
8423 
8424 	rc = bnxt_init_nic(bp, irq_re_init);
8425 	if (rc) {
8426 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8427 		goto open_err;
8428 	}
8429 
8430 	if (link_re_init) {
8431 		mutex_lock(&bp->link_lock);
8432 		rc = bnxt_update_phy_setting(bp);
8433 		mutex_unlock(&bp->link_lock);
8434 		if (rc) {
8435 			netdev_warn(bp->dev, "failed to update phy settings\n");
8436 			if (BNXT_SINGLE_PF(bp)) {
8437 				bp->link_info.phy_retry = true;
8438 				bp->link_info.phy_retry_expires =
8439 					jiffies + 5 * HZ;
8440 			}
8441 		}
8442 	}
8443 
8444 	if (irq_re_init)
8445 		udp_tunnel_get_rx_info(bp->dev);
8446 
8447 	set_bit(BNXT_STATE_OPEN, &bp->state);
8448 	bnxt_enable_int(bp);
8449 	/* Enable TX queues */
8450 	bnxt_tx_enable(bp);
8451 	mod_timer(&bp->timer, jiffies + bp->current_interval);
8452 	/* Poll link status and check for SFP+ module status */
8453 	bnxt_get_port_module_status(bp);
8454 
8455 	/* VF-reps may need to be re-opened after the PF is re-opened */
8456 	if (BNXT_PF(bp))
8457 		bnxt_vf_reps_open(bp);
8458 	return 0;
8459 
8460 open_err:
8461 	bnxt_debug_dev_exit(bp);
8462 	bnxt_disable_napi(bp);
8463 
8464 open_err_irq:
8465 	bnxt_del_napi(bp);
8466 
8467 open_err_free_mem:
8468 	bnxt_free_skbs(bp);
8469 	bnxt_free_irq(bp);
8470 	bnxt_free_mem(bp, true);
8471 	return rc;
8472 }
8473 
8474 /* rtnl_lock held */
8475 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8476 {
8477 	int rc = 0;
8478 
8479 	rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8480 	if (rc) {
8481 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8482 		dev_close(bp->dev);
8483 	}
8484 	return rc;
8485 }
8486 
8487 /* rtnl_lock held, open the NIC half way by allocating all resources, but
8488  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
8489  * self tests.
8490  */
8491 int bnxt_half_open_nic(struct bnxt *bp)
8492 {
8493 	int rc = 0;
8494 
8495 	rc = bnxt_alloc_mem(bp, false);
8496 	if (rc) {
8497 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8498 		goto half_open_err;
8499 	}
8500 	rc = bnxt_init_nic(bp, false);
8501 	if (rc) {
8502 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8503 		goto half_open_err;
8504 	}
8505 	return 0;
8506 
8507 half_open_err:
8508 	bnxt_free_skbs(bp);
8509 	bnxt_free_mem(bp, false);
8510 	dev_close(bp->dev);
8511 	return rc;
8512 }
8513 
8514 /* rtnl_lock held, this call can only be made after a previous successful
8515  * call to bnxt_half_open_nic().
8516  */
8517 void bnxt_half_close_nic(struct bnxt *bp)
8518 {
8519 	bnxt_hwrm_resource_free(bp, false, false);
8520 	bnxt_free_skbs(bp);
8521 	bnxt_free_mem(bp, false);
8522 }
8523 
8524 static int bnxt_open(struct net_device *dev)
8525 {
8526 	struct bnxt *bp = netdev_priv(dev);
8527 	int rc;
8528 
8529 	bnxt_hwrm_if_change(bp, true);
8530 	rc = __bnxt_open_nic(bp, true, true);
8531 	if (rc)
8532 		bnxt_hwrm_if_change(bp, false);
8533 
8534 	bnxt_hwmon_open(bp);
8535 
8536 	return rc;
8537 }
8538 
8539 static bool bnxt_drv_busy(struct bnxt *bp)
8540 {
8541 	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8542 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
8543 }
8544 
8545 static void bnxt_get_ring_stats(struct bnxt *bp,
8546 				struct rtnl_link_stats64 *stats);
8547 
8548 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8549 			     bool link_re_init)
8550 {
8551 	/* Close the VF-reps before closing PF */
8552 	if (BNXT_PF(bp))
8553 		bnxt_vf_reps_close(bp);
8554 
8555 	/* Change device state to avoid TX queue wake up's */
8556 	bnxt_tx_disable(bp);
8557 
8558 	clear_bit(BNXT_STATE_OPEN, &bp->state);
8559 	smp_mb__after_atomic();
8560 	while (bnxt_drv_busy(bp))
8561 		msleep(20);
8562 
8563 	/* Flush rings and and disable interrupts */
8564 	bnxt_shutdown_nic(bp, irq_re_init);
8565 
8566 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8567 
8568 	bnxt_debug_dev_exit(bp);
8569 	bnxt_disable_napi(bp);
8570 	del_timer_sync(&bp->timer);
8571 	bnxt_free_skbs(bp);
8572 
8573 	/* Save ring stats before shutdown */
8574 	if (bp->bnapi)
8575 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
8576 	if (irq_re_init) {
8577 		bnxt_free_irq(bp);
8578 		bnxt_del_napi(bp);
8579 	}
8580 	bnxt_free_mem(bp, irq_re_init);
8581 }
8582 
8583 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8584 {
8585 	int rc = 0;
8586 
8587 #ifdef CONFIG_BNXT_SRIOV
8588 	if (bp->sriov_cfg) {
8589 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8590 						      !bp->sriov_cfg,
8591 						      BNXT_SRIOV_CFG_WAIT_TMO);
8592 		if (rc)
8593 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8594 	}
8595 #endif
8596 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
8597 	return rc;
8598 }
8599 
8600 static int bnxt_close(struct net_device *dev)
8601 {
8602 	struct bnxt *bp = netdev_priv(dev);
8603 
8604 	bnxt_hwmon_close(bp);
8605 	bnxt_close_nic(bp, true, true);
8606 	bnxt_hwrm_shutdown_link(bp);
8607 	bnxt_hwrm_if_change(bp, false);
8608 	return 0;
8609 }
8610 
8611 /* rtnl_lock held */
8612 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8613 {
8614 	switch (cmd) {
8615 	case SIOCGMIIPHY:
8616 		/* fallthru */
8617 	case SIOCGMIIREG: {
8618 		if (!netif_running(dev))
8619 			return -EAGAIN;
8620 
8621 		return 0;
8622 	}
8623 
8624 	case SIOCSMIIREG:
8625 		if (!netif_running(dev))
8626 			return -EAGAIN;
8627 
8628 		return 0;
8629 
8630 	default:
8631 		/* do nothing */
8632 		break;
8633 	}
8634 	return -EOPNOTSUPP;
8635 }
8636 
8637 static void bnxt_get_ring_stats(struct bnxt *bp,
8638 				struct rtnl_link_stats64 *stats)
8639 {
8640 	int i;
8641 
8642 
8643 	for (i = 0; i < bp->cp_nr_rings; i++) {
8644 		struct bnxt_napi *bnapi = bp->bnapi[i];
8645 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8646 		struct ctx_hw_stats *hw_stats = cpr->hw_stats;
8647 
8648 		stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
8649 		stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
8650 		stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
8651 
8652 		stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
8653 		stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
8654 		stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
8655 
8656 		stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
8657 		stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
8658 		stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
8659 
8660 		stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
8661 		stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
8662 		stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
8663 
8664 		stats->rx_missed_errors +=
8665 			le64_to_cpu(hw_stats->rx_discard_pkts);
8666 
8667 		stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
8668 
8669 		stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
8670 	}
8671 }
8672 
8673 static void bnxt_add_prev_stats(struct bnxt *bp,
8674 				struct rtnl_link_stats64 *stats)
8675 {
8676 	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
8677 
8678 	stats->rx_packets += prev_stats->rx_packets;
8679 	stats->tx_packets += prev_stats->tx_packets;
8680 	stats->rx_bytes += prev_stats->rx_bytes;
8681 	stats->tx_bytes += prev_stats->tx_bytes;
8682 	stats->rx_missed_errors += prev_stats->rx_missed_errors;
8683 	stats->multicast += prev_stats->multicast;
8684 	stats->tx_dropped += prev_stats->tx_dropped;
8685 }
8686 
8687 static void
8688 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
8689 {
8690 	struct bnxt *bp = netdev_priv(dev);
8691 
8692 	set_bit(BNXT_STATE_READ_STATS, &bp->state);
8693 	/* Make sure bnxt_close_nic() sees that we are reading stats before
8694 	 * we check the BNXT_STATE_OPEN flag.
8695 	 */
8696 	smp_mb__after_atomic();
8697 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8698 		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8699 		*stats = bp->net_stats_prev;
8700 		return;
8701 	}
8702 
8703 	bnxt_get_ring_stats(bp, stats);
8704 	bnxt_add_prev_stats(bp, stats);
8705 
8706 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
8707 		struct rx_port_stats *rx = bp->hw_rx_port_stats;
8708 		struct tx_port_stats *tx = bp->hw_tx_port_stats;
8709 
8710 		stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
8711 		stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
8712 		stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
8713 					  le64_to_cpu(rx->rx_ovrsz_frames) +
8714 					  le64_to_cpu(rx->rx_runt_frames);
8715 		stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
8716 				   le64_to_cpu(rx->rx_jbr_frames);
8717 		stats->collisions = le64_to_cpu(tx->tx_total_collisions);
8718 		stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
8719 		stats->tx_errors = le64_to_cpu(tx->tx_err);
8720 	}
8721 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8722 }
8723 
8724 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
8725 {
8726 	struct net_device *dev = bp->dev;
8727 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8728 	struct netdev_hw_addr *ha;
8729 	u8 *haddr;
8730 	int mc_count = 0;
8731 	bool update = false;
8732 	int off = 0;
8733 
8734 	netdev_for_each_mc_addr(ha, dev) {
8735 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
8736 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8737 			vnic->mc_list_count = 0;
8738 			return false;
8739 		}
8740 		haddr = ha->addr;
8741 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
8742 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
8743 			update = true;
8744 		}
8745 		off += ETH_ALEN;
8746 		mc_count++;
8747 	}
8748 	if (mc_count)
8749 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
8750 
8751 	if (mc_count != vnic->mc_list_count) {
8752 		vnic->mc_list_count = mc_count;
8753 		update = true;
8754 	}
8755 	return update;
8756 }
8757 
8758 static bool bnxt_uc_list_updated(struct bnxt *bp)
8759 {
8760 	struct net_device *dev = bp->dev;
8761 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8762 	struct netdev_hw_addr *ha;
8763 	int off = 0;
8764 
8765 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
8766 		return true;
8767 
8768 	netdev_for_each_uc_addr(ha, dev) {
8769 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
8770 			return true;
8771 
8772 		off += ETH_ALEN;
8773 	}
8774 	return false;
8775 }
8776 
8777 static void bnxt_set_rx_mode(struct net_device *dev)
8778 {
8779 	struct bnxt *bp = netdev_priv(dev);
8780 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8781 	u32 mask = vnic->rx_mask;
8782 	bool mc_update = false;
8783 	bool uc_update;
8784 
8785 	if (!netif_running(dev))
8786 		return;
8787 
8788 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
8789 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
8790 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
8791 		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
8792 
8793 	if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8794 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8795 
8796 	uc_update = bnxt_uc_list_updated(bp);
8797 
8798 	if (dev->flags & IFF_BROADCAST)
8799 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8800 	if (dev->flags & IFF_ALLMULTI) {
8801 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8802 		vnic->mc_list_count = 0;
8803 	} else {
8804 		mc_update = bnxt_mc_list_updated(bp, &mask);
8805 	}
8806 
8807 	if (mask != vnic->rx_mask || uc_update || mc_update) {
8808 		vnic->rx_mask = mask;
8809 
8810 		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
8811 		bnxt_queue_sp_work(bp);
8812 	}
8813 }
8814 
8815 static int bnxt_cfg_rx_mode(struct bnxt *bp)
8816 {
8817 	struct net_device *dev = bp->dev;
8818 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8819 	struct netdev_hw_addr *ha;
8820 	int i, off = 0, rc;
8821 	bool uc_update;
8822 
8823 	netif_addr_lock_bh(dev);
8824 	uc_update = bnxt_uc_list_updated(bp);
8825 	netif_addr_unlock_bh(dev);
8826 
8827 	if (!uc_update)
8828 		goto skip_uc;
8829 
8830 	mutex_lock(&bp->hwrm_cmd_lock);
8831 	for (i = 1; i < vnic->uc_filter_count; i++) {
8832 		struct hwrm_cfa_l2_filter_free_input req = {0};
8833 
8834 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
8835 				       -1);
8836 
8837 		req.l2_filter_id = vnic->fw_l2_filter_id[i];
8838 
8839 		rc = _hwrm_send_message(bp, &req, sizeof(req),
8840 					HWRM_CMD_TIMEOUT);
8841 	}
8842 	mutex_unlock(&bp->hwrm_cmd_lock);
8843 
8844 	vnic->uc_filter_count = 1;
8845 
8846 	netif_addr_lock_bh(dev);
8847 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
8848 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8849 	} else {
8850 		netdev_for_each_uc_addr(ha, dev) {
8851 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
8852 			off += ETH_ALEN;
8853 			vnic->uc_filter_count++;
8854 		}
8855 	}
8856 	netif_addr_unlock_bh(dev);
8857 
8858 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
8859 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
8860 		if (rc) {
8861 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
8862 				   rc);
8863 			vnic->uc_filter_count = i;
8864 			return rc;
8865 		}
8866 	}
8867 
8868 skip_uc:
8869 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8870 	if (rc)
8871 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
8872 			   rc);
8873 
8874 	return rc;
8875 }
8876 
8877 static bool bnxt_can_reserve_rings(struct bnxt *bp)
8878 {
8879 #ifdef CONFIG_BNXT_SRIOV
8880 	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
8881 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8882 
8883 		/* No minimum rings were provisioned by the PF.  Don't
8884 		 * reserve rings by default when device is down.
8885 		 */
8886 		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
8887 			return true;
8888 
8889 		if (!netif_running(bp->dev))
8890 			return false;
8891 	}
8892 #endif
8893 	return true;
8894 }
8895 
8896 /* If the chip and firmware supports RFS */
8897 static bool bnxt_rfs_supported(struct bnxt *bp)
8898 {
8899 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8900 		return false;
8901 	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
8902 		return true;
8903 	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8904 		return true;
8905 	return false;
8906 }
8907 
8908 /* If runtime conditions support RFS */
8909 static bool bnxt_rfs_capable(struct bnxt *bp)
8910 {
8911 #ifdef CONFIG_RFS_ACCEL
8912 	int vnics, max_vnics, max_rss_ctxs;
8913 
8914 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8915 		return false;
8916 	if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
8917 		return false;
8918 
8919 	vnics = 1 + bp->rx_nr_rings;
8920 	max_vnics = bnxt_get_max_func_vnics(bp);
8921 	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
8922 
8923 	/* RSS contexts not a limiting factor */
8924 	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8925 		max_rss_ctxs = max_vnics;
8926 	if (vnics > max_vnics || vnics > max_rss_ctxs) {
8927 		if (bp->rx_nr_rings > 1)
8928 			netdev_warn(bp->dev,
8929 				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
8930 				    min(max_rss_ctxs - 1, max_vnics - 1));
8931 		return false;
8932 	}
8933 
8934 	if (!BNXT_NEW_RM(bp))
8935 		return true;
8936 
8937 	if (vnics == bp->hw_resc.resv_vnics)
8938 		return true;
8939 
8940 	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
8941 	if (vnics <= bp->hw_resc.resv_vnics)
8942 		return true;
8943 
8944 	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
8945 	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
8946 	return false;
8947 #else
8948 	return false;
8949 #endif
8950 }
8951 
8952 static netdev_features_t bnxt_fix_features(struct net_device *dev,
8953 					   netdev_features_t features)
8954 {
8955 	struct bnxt *bp = netdev_priv(dev);
8956 
8957 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
8958 		features &= ~NETIF_F_NTUPLE;
8959 
8960 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8961 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8962 
8963 	if (!(features & NETIF_F_GRO))
8964 		features &= ~NETIF_F_GRO_HW;
8965 
8966 	if (features & NETIF_F_GRO_HW)
8967 		features &= ~NETIF_F_LRO;
8968 
8969 	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
8970 	 * turned on or off together.
8971 	 */
8972 	if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
8973 	    (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
8974 		if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
8975 			features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8976 				      NETIF_F_HW_VLAN_STAG_RX);
8977 		else
8978 			features |= NETIF_F_HW_VLAN_CTAG_RX |
8979 				    NETIF_F_HW_VLAN_STAG_RX;
8980 	}
8981 #ifdef CONFIG_BNXT_SRIOV
8982 	if (BNXT_VF(bp)) {
8983 		if (bp->vf.vlan) {
8984 			features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8985 				      NETIF_F_HW_VLAN_STAG_RX);
8986 		}
8987 	}
8988 #endif
8989 	return features;
8990 }
8991 
8992 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
8993 {
8994 	struct bnxt *bp = netdev_priv(dev);
8995 	u32 flags = bp->flags;
8996 	u32 changes;
8997 	int rc = 0;
8998 	bool re_init = false;
8999 	bool update_tpa = false;
9000 
9001 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9002 	if (features & NETIF_F_GRO_HW)
9003 		flags |= BNXT_FLAG_GRO;
9004 	else if (features & NETIF_F_LRO)
9005 		flags |= BNXT_FLAG_LRO;
9006 
9007 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9008 		flags &= ~BNXT_FLAG_TPA;
9009 
9010 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
9011 		flags |= BNXT_FLAG_STRIP_VLAN;
9012 
9013 	if (features & NETIF_F_NTUPLE)
9014 		flags |= BNXT_FLAG_RFS;
9015 
9016 	changes = flags ^ bp->flags;
9017 	if (changes & BNXT_FLAG_TPA) {
9018 		update_tpa = true;
9019 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9020 		    (flags & BNXT_FLAG_TPA) == 0)
9021 			re_init = true;
9022 	}
9023 
9024 	if (changes & ~BNXT_FLAG_TPA)
9025 		re_init = true;
9026 
9027 	if (flags != bp->flags) {
9028 		u32 old_flags = bp->flags;
9029 
9030 		bp->flags = flags;
9031 
9032 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9033 			if (update_tpa)
9034 				bnxt_set_ring_params(bp);
9035 			return rc;
9036 		}
9037 
9038 		if (re_init) {
9039 			bnxt_close_nic(bp, false, false);
9040 			if (update_tpa)
9041 				bnxt_set_ring_params(bp);
9042 
9043 			return bnxt_open_nic(bp, false, false);
9044 		}
9045 		if (update_tpa) {
9046 			rc = bnxt_set_tpa(bp,
9047 					  (flags & BNXT_FLAG_TPA) ?
9048 					  true : false);
9049 			if (rc)
9050 				bp->flags = old_flags;
9051 		}
9052 	}
9053 	return rc;
9054 }
9055 
9056 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9057 				       u32 ring_id, u32 *prod, u32 *cons)
9058 {
9059 	struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9060 	struct hwrm_dbg_ring_info_get_input req = {0};
9061 	int rc;
9062 
9063 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9064 	req.ring_type = ring_type;
9065 	req.fw_ring_id = cpu_to_le32(ring_id);
9066 	mutex_lock(&bp->hwrm_cmd_lock);
9067 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9068 	if (!rc) {
9069 		*prod = le32_to_cpu(resp->producer_index);
9070 		*cons = le32_to_cpu(resp->consumer_index);
9071 	}
9072 	mutex_unlock(&bp->hwrm_cmd_lock);
9073 	return rc;
9074 }
9075 
9076 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9077 {
9078 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9079 	int i = bnapi->index;
9080 
9081 	if (!txr)
9082 		return;
9083 
9084 	netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9085 		    i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9086 		    txr->tx_cons);
9087 }
9088 
9089 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9090 {
9091 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9092 	int i = bnapi->index;
9093 
9094 	if (!rxr)
9095 		return;
9096 
9097 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9098 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9099 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9100 		    rxr->rx_sw_agg_prod);
9101 }
9102 
9103 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9104 {
9105 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9106 	int i = bnapi->index;
9107 
9108 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9109 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9110 }
9111 
9112 static void bnxt_dbg_dump_states(struct bnxt *bp)
9113 {
9114 	int i;
9115 	struct bnxt_napi *bnapi;
9116 
9117 	for (i = 0; i < bp->cp_nr_rings; i++) {
9118 		bnapi = bp->bnapi[i];
9119 		if (netif_msg_drv(bp)) {
9120 			bnxt_dump_tx_sw_state(bnapi);
9121 			bnxt_dump_rx_sw_state(bnapi);
9122 			bnxt_dump_cp_sw_state(bnapi);
9123 		}
9124 	}
9125 }
9126 
9127 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9128 {
9129 	if (!silent)
9130 		bnxt_dbg_dump_states(bp);
9131 	if (netif_running(bp->dev)) {
9132 		int rc;
9133 
9134 		if (!silent)
9135 			bnxt_ulp_stop(bp);
9136 		bnxt_close_nic(bp, false, false);
9137 		rc = bnxt_open_nic(bp, false, false);
9138 		if (!silent && !rc)
9139 			bnxt_ulp_start(bp);
9140 	}
9141 }
9142 
9143 static void bnxt_tx_timeout(struct net_device *dev)
9144 {
9145 	struct bnxt *bp = netdev_priv(dev);
9146 
9147 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
9148 	set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9149 	bnxt_queue_sp_work(bp);
9150 }
9151 
9152 static void bnxt_timer(struct timer_list *t)
9153 {
9154 	struct bnxt *bp = from_timer(bp, t, timer);
9155 	struct net_device *dev = bp->dev;
9156 
9157 	if (!netif_running(dev))
9158 		return;
9159 
9160 	if (atomic_read(&bp->intr_sem) != 0)
9161 		goto bnxt_restart_timer;
9162 
9163 	if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9164 	    bp->stats_coal_ticks) {
9165 		set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
9166 		bnxt_queue_sp_work(bp);
9167 	}
9168 
9169 	if (bnxt_tc_flower_enabled(bp)) {
9170 		set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
9171 		bnxt_queue_sp_work(bp);
9172 	}
9173 
9174 	if (bp->link_info.phy_retry) {
9175 		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
9176 			bp->link_info.phy_retry = 0;
9177 			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
9178 		} else {
9179 			set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
9180 			bnxt_queue_sp_work(bp);
9181 		}
9182 	}
9183 
9184 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
9185 		set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
9186 		bnxt_queue_sp_work(bp);
9187 	}
9188 bnxt_restart_timer:
9189 	mod_timer(&bp->timer, jiffies + bp->current_interval);
9190 }
9191 
9192 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
9193 {
9194 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9195 	 * set.  If the device is being closed, bnxt_close() may be holding
9196 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
9197 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9198 	 */
9199 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9200 	rtnl_lock();
9201 }
9202 
9203 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9204 {
9205 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9206 	rtnl_unlock();
9207 }
9208 
9209 /* Only called from bnxt_sp_task() */
9210 static void bnxt_reset(struct bnxt *bp, bool silent)
9211 {
9212 	bnxt_rtnl_lock_sp(bp);
9213 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
9214 		bnxt_reset_task(bp, silent);
9215 	bnxt_rtnl_unlock_sp(bp);
9216 }
9217 
9218 static void bnxt_chk_missed_irq(struct bnxt *bp)
9219 {
9220 	int i;
9221 
9222 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9223 		return;
9224 
9225 	for (i = 0; i < bp->cp_nr_rings; i++) {
9226 		struct bnxt_napi *bnapi = bp->bnapi[i];
9227 		struct bnxt_cp_ring_info *cpr;
9228 		u32 fw_ring_id;
9229 		int j;
9230 
9231 		if (!bnapi)
9232 			continue;
9233 
9234 		cpr = &bnapi->cp_ring;
9235 		for (j = 0; j < 2; j++) {
9236 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
9237 			u32 val[2];
9238 
9239 			if (!cpr2 || cpr2->has_more_work ||
9240 			    !bnxt_has_work(bp, cpr2))
9241 				continue;
9242 
9243 			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
9244 				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
9245 				continue;
9246 			}
9247 			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
9248 			bnxt_dbg_hwrm_ring_info_get(bp,
9249 				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
9250 				fw_ring_id, &val[0], &val[1]);
9251 			cpr->missed_irqs++;
9252 		}
9253 	}
9254 }
9255 
9256 static void bnxt_cfg_ntp_filters(struct bnxt *);
9257 
9258 static void bnxt_sp_task(struct work_struct *work)
9259 {
9260 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
9261 
9262 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9263 	smp_mb__after_atomic();
9264 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9265 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9266 		return;
9267 	}
9268 
9269 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
9270 		bnxt_cfg_rx_mode(bp);
9271 
9272 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
9273 		bnxt_cfg_ntp_filters(bp);
9274 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
9275 		bnxt_hwrm_exec_fwd_req(bp);
9276 	if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9277 		bnxt_hwrm_tunnel_dst_port_alloc(
9278 			bp, bp->vxlan_port,
9279 			TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9280 	}
9281 	if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9282 		bnxt_hwrm_tunnel_dst_port_free(
9283 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9284 	}
9285 	if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9286 		bnxt_hwrm_tunnel_dst_port_alloc(
9287 			bp, bp->nge_port,
9288 			TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9289 	}
9290 	if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9291 		bnxt_hwrm_tunnel_dst_port_free(
9292 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9293 	}
9294 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
9295 		bnxt_hwrm_port_qstats(bp);
9296 		bnxt_hwrm_port_qstats_ext(bp);
9297 	}
9298 
9299 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
9300 		int rc;
9301 
9302 		mutex_lock(&bp->link_lock);
9303 		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
9304 				       &bp->sp_event))
9305 			bnxt_hwrm_phy_qcaps(bp);
9306 
9307 		rc = bnxt_update_link(bp, true);
9308 		mutex_unlock(&bp->link_lock);
9309 		if (rc)
9310 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
9311 				   rc);
9312 	}
9313 	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
9314 		int rc;
9315 
9316 		mutex_lock(&bp->link_lock);
9317 		rc = bnxt_update_phy_setting(bp);
9318 		mutex_unlock(&bp->link_lock);
9319 		if (rc) {
9320 			netdev_warn(bp->dev, "update phy settings retry failed\n");
9321 		} else {
9322 			bp->link_info.phy_retry = false;
9323 			netdev_info(bp->dev, "update phy settings retry succeeded\n");
9324 		}
9325 	}
9326 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
9327 		mutex_lock(&bp->link_lock);
9328 		bnxt_get_port_module_status(bp);
9329 		mutex_unlock(&bp->link_lock);
9330 	}
9331 
9332 	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9333 		bnxt_tc_flow_stats_work(bp);
9334 
9335 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9336 		bnxt_chk_missed_irq(bp);
9337 
9338 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
9339 	 * must be the last functions to be called before exiting.
9340 	 */
9341 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9342 		bnxt_reset(bp, false);
9343 
9344 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9345 		bnxt_reset(bp, true);
9346 
9347 	smp_mb__before_atomic();
9348 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9349 }
9350 
9351 /* Under rtnl_lock */
9352 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9353 		     int tx_xdp)
9354 {
9355 	int max_rx, max_tx, tx_sets = 1;
9356 	int tx_rings_needed, stats;
9357 	int rx_rings = rx;
9358 	int cp, vnics, rc;
9359 
9360 	if (tcs)
9361 		tx_sets = tcs;
9362 
9363 	rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9364 	if (rc)
9365 		return rc;
9366 
9367 	if (max_rx < rx)
9368 		return -ENOMEM;
9369 
9370 	tx_rings_needed = tx * tx_sets + tx_xdp;
9371 	if (max_tx < tx_rings_needed)
9372 		return -ENOMEM;
9373 
9374 	vnics = 1;
9375 	if (bp->flags & BNXT_FLAG_RFS)
9376 		vnics += rx_rings;
9377 
9378 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
9379 		rx_rings <<= 1;
9380 	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
9381 	stats = cp;
9382 	if (BNXT_NEW_RM(bp)) {
9383 		cp += bnxt_get_ulp_msix_num(bp);
9384 		stats += bnxt_get_ulp_stat_ctxs(bp);
9385 	}
9386 	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
9387 				     stats, vnics);
9388 }
9389 
9390 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9391 {
9392 	if (bp->bar2) {
9393 		pci_iounmap(pdev, bp->bar2);
9394 		bp->bar2 = NULL;
9395 	}
9396 
9397 	if (bp->bar1) {
9398 		pci_iounmap(pdev, bp->bar1);
9399 		bp->bar1 = NULL;
9400 	}
9401 
9402 	if (bp->bar0) {
9403 		pci_iounmap(pdev, bp->bar0);
9404 		bp->bar0 = NULL;
9405 	}
9406 }
9407 
9408 static void bnxt_cleanup_pci(struct bnxt *bp)
9409 {
9410 	bnxt_unmap_bars(bp, bp->pdev);
9411 	pci_release_regions(bp->pdev);
9412 	pci_disable_device(bp->pdev);
9413 }
9414 
9415 static void bnxt_init_dflt_coal(struct bnxt *bp)
9416 {
9417 	struct bnxt_coal *coal;
9418 
9419 	/* Tick values in micro seconds.
9420 	 * 1 coal_buf x bufs_per_record = 1 completion record.
9421 	 */
9422 	coal = &bp->rx_coal;
9423 	coal->coal_ticks = 10;
9424 	coal->coal_bufs = 30;
9425 	coal->coal_ticks_irq = 1;
9426 	coal->coal_bufs_irq = 2;
9427 	coal->idle_thresh = 50;
9428 	coal->bufs_per_record = 2;
9429 	coal->budget = 64;		/* NAPI budget */
9430 
9431 	coal = &bp->tx_coal;
9432 	coal->coal_ticks = 28;
9433 	coal->coal_bufs = 30;
9434 	coal->coal_ticks_irq = 2;
9435 	coal->coal_bufs_irq = 2;
9436 	coal->bufs_per_record = 1;
9437 
9438 	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9439 }
9440 
9441 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9442 {
9443 	int rc;
9444 	struct bnxt *bp = netdev_priv(dev);
9445 
9446 	SET_NETDEV_DEV(dev, &pdev->dev);
9447 
9448 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
9449 	rc = pci_enable_device(pdev);
9450 	if (rc) {
9451 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9452 		goto init_err;
9453 	}
9454 
9455 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9456 		dev_err(&pdev->dev,
9457 			"Cannot find PCI device base address, aborting\n");
9458 		rc = -ENODEV;
9459 		goto init_err_disable;
9460 	}
9461 
9462 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9463 	if (rc) {
9464 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9465 		goto init_err_disable;
9466 	}
9467 
9468 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9469 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9470 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9471 		goto init_err_disable;
9472 	}
9473 
9474 	pci_set_master(pdev);
9475 
9476 	bp->dev = dev;
9477 	bp->pdev = pdev;
9478 
9479 	bp->bar0 = pci_ioremap_bar(pdev, 0);
9480 	if (!bp->bar0) {
9481 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9482 		rc = -ENOMEM;
9483 		goto init_err_release;
9484 	}
9485 
9486 	bp->bar1 = pci_ioremap_bar(pdev, 2);
9487 	if (!bp->bar1) {
9488 		dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9489 		rc = -ENOMEM;
9490 		goto init_err_release;
9491 	}
9492 
9493 	bp->bar2 = pci_ioremap_bar(pdev, 4);
9494 	if (!bp->bar2) {
9495 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9496 		rc = -ENOMEM;
9497 		goto init_err_release;
9498 	}
9499 
9500 	pci_enable_pcie_error_reporting(pdev);
9501 
9502 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
9503 
9504 	spin_lock_init(&bp->ntp_fltr_lock);
9505 #if BITS_PER_LONG == 32
9506 	spin_lock_init(&bp->db_lock);
9507 #endif
9508 
9509 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9510 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9511 
9512 	bnxt_init_dflt_coal(bp);
9513 
9514 	timer_setup(&bp->timer, bnxt_timer, 0);
9515 	bp->current_interval = BNXT_TIMER_INTERVAL;
9516 
9517 	clear_bit(BNXT_STATE_OPEN, &bp->state);
9518 	return 0;
9519 
9520 init_err_release:
9521 	bnxt_unmap_bars(bp, pdev);
9522 	pci_release_regions(pdev);
9523 
9524 init_err_disable:
9525 	pci_disable_device(pdev);
9526 
9527 init_err:
9528 	return rc;
9529 }
9530 
9531 /* rtnl_lock held */
9532 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9533 {
9534 	struct sockaddr *addr = p;
9535 	struct bnxt *bp = netdev_priv(dev);
9536 	int rc = 0;
9537 
9538 	if (!is_valid_ether_addr(addr->sa_data))
9539 		return -EADDRNOTAVAIL;
9540 
9541 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9542 		return 0;
9543 
9544 	rc = bnxt_approve_mac(bp, addr->sa_data, true);
9545 	if (rc)
9546 		return rc;
9547 
9548 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9549 	if (netif_running(dev)) {
9550 		bnxt_close_nic(bp, false, false);
9551 		rc = bnxt_open_nic(bp, false, false);
9552 	}
9553 
9554 	return rc;
9555 }
9556 
9557 /* rtnl_lock held */
9558 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
9559 {
9560 	struct bnxt *bp = netdev_priv(dev);
9561 
9562 	if (netif_running(dev))
9563 		bnxt_close_nic(bp, false, false);
9564 
9565 	dev->mtu = new_mtu;
9566 	bnxt_set_ring_params(bp);
9567 
9568 	if (netif_running(dev))
9569 		return bnxt_open_nic(bp, false, false);
9570 
9571 	return 0;
9572 }
9573 
9574 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
9575 {
9576 	struct bnxt *bp = netdev_priv(dev);
9577 	bool sh = false;
9578 	int rc;
9579 
9580 	if (tc > bp->max_tc) {
9581 		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
9582 			   tc, bp->max_tc);
9583 		return -EINVAL;
9584 	}
9585 
9586 	if (netdev_get_num_tc(dev) == tc)
9587 		return 0;
9588 
9589 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9590 		sh = true;
9591 
9592 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
9593 			      sh, tc, bp->tx_nr_rings_xdp);
9594 	if (rc)
9595 		return rc;
9596 
9597 	/* Needs to close the device and do hw resource re-allocations */
9598 	if (netif_running(bp->dev))
9599 		bnxt_close_nic(bp, true, false);
9600 
9601 	if (tc) {
9602 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
9603 		netdev_set_num_tc(dev, tc);
9604 	} else {
9605 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9606 		netdev_reset_tc(dev);
9607 	}
9608 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
9609 	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9610 			       bp->tx_nr_rings + bp->rx_nr_rings;
9611 
9612 	if (netif_running(bp->dev))
9613 		return bnxt_open_nic(bp, true, false);
9614 
9615 	return 0;
9616 }
9617 
9618 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9619 				  void *cb_priv)
9620 {
9621 	struct bnxt *bp = cb_priv;
9622 
9623 	if (!bnxt_tc_flower_enabled(bp) ||
9624 	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
9625 		return -EOPNOTSUPP;
9626 
9627 	switch (type) {
9628 	case TC_SETUP_CLSFLOWER:
9629 		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
9630 	default:
9631 		return -EOPNOTSUPP;
9632 	}
9633 }
9634 
9635 static int bnxt_setup_tc_block(struct net_device *dev,
9636 			       struct tc_block_offload *f)
9637 {
9638 	struct bnxt *bp = netdev_priv(dev);
9639 
9640 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9641 		return -EOPNOTSUPP;
9642 
9643 	switch (f->command) {
9644 	case TC_BLOCK_BIND:
9645 		return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
9646 					     bp, bp, f->extack);
9647 	case TC_BLOCK_UNBIND:
9648 		tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
9649 		return 0;
9650 	default:
9651 		return -EOPNOTSUPP;
9652 	}
9653 }
9654 
9655 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
9656 			 void *type_data)
9657 {
9658 	switch (type) {
9659 	case TC_SETUP_BLOCK:
9660 		return bnxt_setup_tc_block(dev, type_data);
9661 	case TC_SETUP_QDISC_MQPRIO: {
9662 		struct tc_mqprio_qopt *mqprio = type_data;
9663 
9664 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9665 
9666 		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
9667 	}
9668 	default:
9669 		return -EOPNOTSUPP;
9670 	}
9671 }
9672 
9673 #ifdef CONFIG_RFS_ACCEL
9674 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
9675 			    struct bnxt_ntuple_filter *f2)
9676 {
9677 	struct flow_keys *keys1 = &f1->fkeys;
9678 	struct flow_keys *keys2 = &f2->fkeys;
9679 
9680 	if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
9681 	    keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
9682 	    keys1->ports.ports == keys2->ports.ports &&
9683 	    keys1->basic.ip_proto == keys2->basic.ip_proto &&
9684 	    keys1->basic.n_proto == keys2->basic.n_proto &&
9685 	    keys1->control.flags == keys2->control.flags &&
9686 	    ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
9687 	    ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
9688 		return true;
9689 
9690 	return false;
9691 }
9692 
9693 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
9694 			      u16 rxq_index, u32 flow_id)
9695 {
9696 	struct bnxt *bp = netdev_priv(dev);
9697 	struct bnxt_ntuple_filter *fltr, *new_fltr;
9698 	struct flow_keys *fkeys;
9699 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
9700 	int rc = 0, idx, bit_id, l2_idx = 0;
9701 	struct hlist_head *head;
9702 
9703 	if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
9704 		struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9705 		int off = 0, j;
9706 
9707 		netif_addr_lock_bh(dev);
9708 		for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
9709 			if (ether_addr_equal(eth->h_dest,
9710 					     vnic->uc_list + off)) {
9711 				l2_idx = j + 1;
9712 				break;
9713 			}
9714 		}
9715 		netif_addr_unlock_bh(dev);
9716 		if (!l2_idx)
9717 			return -EINVAL;
9718 	}
9719 	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
9720 	if (!new_fltr)
9721 		return -ENOMEM;
9722 
9723 	fkeys = &new_fltr->fkeys;
9724 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
9725 		rc = -EPROTONOSUPPORT;
9726 		goto err_free;
9727 	}
9728 
9729 	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
9730 	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
9731 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
9732 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
9733 		rc = -EPROTONOSUPPORT;
9734 		goto err_free;
9735 	}
9736 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
9737 	    bp->hwrm_spec_code < 0x10601) {
9738 		rc = -EPROTONOSUPPORT;
9739 		goto err_free;
9740 	}
9741 	if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
9742 	    bp->hwrm_spec_code < 0x10601) {
9743 		rc = -EPROTONOSUPPORT;
9744 		goto err_free;
9745 	}
9746 
9747 	memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
9748 	memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
9749 
9750 	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
9751 	head = &bp->ntp_fltr_hash_tbl[idx];
9752 	rcu_read_lock();
9753 	hlist_for_each_entry_rcu(fltr, head, hash) {
9754 		if (bnxt_fltr_match(fltr, new_fltr)) {
9755 			rcu_read_unlock();
9756 			rc = 0;
9757 			goto err_free;
9758 		}
9759 	}
9760 	rcu_read_unlock();
9761 
9762 	spin_lock_bh(&bp->ntp_fltr_lock);
9763 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
9764 					 BNXT_NTP_FLTR_MAX_FLTR, 0);
9765 	if (bit_id < 0) {
9766 		spin_unlock_bh(&bp->ntp_fltr_lock);
9767 		rc = -ENOMEM;
9768 		goto err_free;
9769 	}
9770 
9771 	new_fltr->sw_id = (u16)bit_id;
9772 	new_fltr->flow_id = flow_id;
9773 	new_fltr->l2_fltr_idx = l2_idx;
9774 	new_fltr->rxq = rxq_index;
9775 	hlist_add_head_rcu(&new_fltr->hash, head);
9776 	bp->ntp_fltr_count++;
9777 	spin_unlock_bh(&bp->ntp_fltr_lock);
9778 
9779 	set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
9780 	bnxt_queue_sp_work(bp);
9781 
9782 	return new_fltr->sw_id;
9783 
9784 err_free:
9785 	kfree(new_fltr);
9786 	return rc;
9787 }
9788 
9789 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9790 {
9791 	int i;
9792 
9793 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
9794 		struct hlist_head *head;
9795 		struct hlist_node *tmp;
9796 		struct bnxt_ntuple_filter *fltr;
9797 		int rc;
9798 
9799 		head = &bp->ntp_fltr_hash_tbl[i];
9800 		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
9801 			bool del = false;
9802 
9803 			if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
9804 				if (rps_may_expire_flow(bp->dev, fltr->rxq,
9805 							fltr->flow_id,
9806 							fltr->sw_id)) {
9807 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
9808 									 fltr);
9809 					del = true;
9810 				}
9811 			} else {
9812 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
9813 								       fltr);
9814 				if (rc)
9815 					del = true;
9816 				else
9817 					set_bit(BNXT_FLTR_VALID, &fltr->state);
9818 			}
9819 
9820 			if (del) {
9821 				spin_lock_bh(&bp->ntp_fltr_lock);
9822 				hlist_del_rcu(&fltr->hash);
9823 				bp->ntp_fltr_count--;
9824 				spin_unlock_bh(&bp->ntp_fltr_lock);
9825 				synchronize_rcu();
9826 				clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
9827 				kfree(fltr);
9828 			}
9829 		}
9830 	}
9831 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9832 		netdev_info(bp->dev, "Receive PF driver unload event!");
9833 }
9834 
9835 #else
9836 
9837 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9838 {
9839 }
9840 
9841 #endif /* CONFIG_RFS_ACCEL */
9842 
9843 static void bnxt_udp_tunnel_add(struct net_device *dev,
9844 				struct udp_tunnel_info *ti)
9845 {
9846 	struct bnxt *bp = netdev_priv(dev);
9847 
9848 	if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9849 		return;
9850 
9851 	if (!netif_running(dev))
9852 		return;
9853 
9854 	switch (ti->type) {
9855 	case UDP_TUNNEL_TYPE_VXLAN:
9856 		if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
9857 			return;
9858 
9859 		bp->vxlan_port_cnt++;
9860 		if (bp->vxlan_port_cnt == 1) {
9861 			bp->vxlan_port = ti->port;
9862 			set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
9863 			bnxt_queue_sp_work(bp);
9864 		}
9865 		break;
9866 	case UDP_TUNNEL_TYPE_GENEVE:
9867 		if (bp->nge_port_cnt && bp->nge_port != ti->port)
9868 			return;
9869 
9870 		bp->nge_port_cnt++;
9871 		if (bp->nge_port_cnt == 1) {
9872 			bp->nge_port = ti->port;
9873 			set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
9874 		}
9875 		break;
9876 	default:
9877 		return;
9878 	}
9879 
9880 	bnxt_queue_sp_work(bp);
9881 }
9882 
9883 static void bnxt_udp_tunnel_del(struct net_device *dev,
9884 				struct udp_tunnel_info *ti)
9885 {
9886 	struct bnxt *bp = netdev_priv(dev);
9887 
9888 	if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9889 		return;
9890 
9891 	if (!netif_running(dev))
9892 		return;
9893 
9894 	switch (ti->type) {
9895 	case UDP_TUNNEL_TYPE_VXLAN:
9896 		if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
9897 			return;
9898 		bp->vxlan_port_cnt--;
9899 
9900 		if (bp->vxlan_port_cnt != 0)
9901 			return;
9902 
9903 		set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
9904 		break;
9905 	case UDP_TUNNEL_TYPE_GENEVE:
9906 		if (!bp->nge_port_cnt || bp->nge_port != ti->port)
9907 			return;
9908 		bp->nge_port_cnt--;
9909 
9910 		if (bp->nge_port_cnt != 0)
9911 			return;
9912 
9913 		set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
9914 		break;
9915 	default:
9916 		return;
9917 	}
9918 
9919 	bnxt_queue_sp_work(bp);
9920 }
9921 
9922 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9923 			       struct net_device *dev, u32 filter_mask,
9924 			       int nlflags)
9925 {
9926 	struct bnxt *bp = netdev_priv(dev);
9927 
9928 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
9929 				       nlflags, filter_mask, NULL);
9930 }
9931 
9932 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
9933 			       u16 flags, struct netlink_ext_ack *extack)
9934 {
9935 	struct bnxt *bp = netdev_priv(dev);
9936 	struct nlattr *attr, *br_spec;
9937 	int rem, rc = 0;
9938 
9939 	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
9940 		return -EOPNOTSUPP;
9941 
9942 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9943 	if (!br_spec)
9944 		return -EINVAL;
9945 
9946 	nla_for_each_nested(attr, br_spec, rem) {
9947 		u16 mode;
9948 
9949 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
9950 			continue;
9951 
9952 		if (nla_len(attr) < sizeof(mode))
9953 			return -EINVAL;
9954 
9955 		mode = nla_get_u16(attr);
9956 		if (mode == bp->br_mode)
9957 			break;
9958 
9959 		rc = bnxt_hwrm_set_br_mode(bp, mode);
9960 		if (!rc)
9961 			bp->br_mode = mode;
9962 		break;
9963 	}
9964 	return rc;
9965 }
9966 
9967 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
9968 				   size_t len)
9969 {
9970 	struct bnxt *bp = netdev_priv(dev);
9971 	int rc;
9972 
9973 	/* The PF and it's VF-reps only support the switchdev framework */
9974 	if (!BNXT_PF(bp))
9975 		return -EOPNOTSUPP;
9976 
9977 	rc = snprintf(buf, len, "p%d", bp->pf.port_id);
9978 
9979 	if (rc >= len)
9980 		return -EOPNOTSUPP;
9981 	return 0;
9982 }
9983 
9984 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
9985 {
9986 	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
9987 		return -EOPNOTSUPP;
9988 
9989 	/* The PF and it's VF-reps only support the switchdev framework */
9990 	if (!BNXT_PF(bp))
9991 		return -EOPNOTSUPP;
9992 
9993 	switch (attr->id) {
9994 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
9995 		attr->u.ppid.id_len = sizeof(bp->switch_id);
9996 		memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
9997 		break;
9998 	default:
9999 		return -EOPNOTSUPP;
10000 	}
10001 	return 0;
10002 }
10003 
10004 static int bnxt_swdev_port_attr_get(struct net_device *dev,
10005 				    struct switchdev_attr *attr)
10006 {
10007 	return bnxt_port_attr_get(netdev_priv(dev), attr);
10008 }
10009 
10010 static const struct switchdev_ops bnxt_switchdev_ops = {
10011 	.switchdev_port_attr_get	= bnxt_swdev_port_attr_get
10012 };
10013 
10014 static const struct net_device_ops bnxt_netdev_ops = {
10015 	.ndo_open		= bnxt_open,
10016 	.ndo_start_xmit		= bnxt_start_xmit,
10017 	.ndo_stop		= bnxt_close,
10018 	.ndo_get_stats64	= bnxt_get_stats64,
10019 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
10020 	.ndo_do_ioctl		= bnxt_ioctl,
10021 	.ndo_validate_addr	= eth_validate_addr,
10022 	.ndo_set_mac_address	= bnxt_change_mac_addr,
10023 	.ndo_change_mtu		= bnxt_change_mtu,
10024 	.ndo_fix_features	= bnxt_fix_features,
10025 	.ndo_set_features	= bnxt_set_features,
10026 	.ndo_tx_timeout		= bnxt_tx_timeout,
10027 #ifdef CONFIG_BNXT_SRIOV
10028 	.ndo_get_vf_config	= bnxt_get_vf_config,
10029 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
10030 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
10031 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
10032 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
10033 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
10034 	.ndo_set_vf_trust	= bnxt_set_vf_trust,
10035 #endif
10036 	.ndo_setup_tc           = bnxt_setup_tc,
10037 #ifdef CONFIG_RFS_ACCEL
10038 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
10039 #endif
10040 	.ndo_udp_tunnel_add	= bnxt_udp_tunnel_add,
10041 	.ndo_udp_tunnel_del	= bnxt_udp_tunnel_del,
10042 	.ndo_bpf		= bnxt_xdp,
10043 	.ndo_bridge_getlink	= bnxt_bridge_getlink,
10044 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
10045 	.ndo_get_phys_port_name = bnxt_get_phys_port_name
10046 };
10047 
10048 static void bnxt_remove_one(struct pci_dev *pdev)
10049 {
10050 	struct net_device *dev = pci_get_drvdata(pdev);
10051 	struct bnxt *bp = netdev_priv(dev);
10052 
10053 	if (BNXT_PF(bp)) {
10054 		bnxt_sriov_disable(bp);
10055 		bnxt_dl_unregister(bp);
10056 	}
10057 
10058 	pci_disable_pcie_error_reporting(pdev);
10059 	unregister_netdev(dev);
10060 	bnxt_shutdown_tc(bp);
10061 	bnxt_cancel_sp_work(bp);
10062 	bp->sp_event = 0;
10063 
10064 	bnxt_clear_int_mode(bp);
10065 	bnxt_hwrm_func_drv_unrgtr(bp);
10066 	bnxt_free_hwrm_resources(bp);
10067 	bnxt_free_hwrm_short_cmd_req(bp);
10068 	bnxt_ethtool_free(bp);
10069 	bnxt_dcb_free(bp);
10070 	kfree(bp->edev);
10071 	bp->edev = NULL;
10072 	bnxt_free_ctx_mem(bp);
10073 	kfree(bp->ctx);
10074 	bp->ctx = NULL;
10075 	bnxt_cleanup_pci(bp);
10076 	bnxt_free_port_stats(bp);
10077 	free_netdev(dev);
10078 }
10079 
10080 static int bnxt_probe_phy(struct bnxt *bp)
10081 {
10082 	int rc = 0;
10083 	struct bnxt_link_info *link_info = &bp->link_info;
10084 
10085 	rc = bnxt_hwrm_phy_qcaps(bp);
10086 	if (rc) {
10087 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
10088 			   rc);
10089 		return rc;
10090 	}
10091 	mutex_init(&bp->link_lock);
10092 
10093 	rc = bnxt_update_link(bp, false);
10094 	if (rc) {
10095 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
10096 			   rc);
10097 		return rc;
10098 	}
10099 
10100 	/* Older firmware does not have supported_auto_speeds, so assume
10101 	 * that all supported speeds can be autonegotiated.
10102 	 */
10103 	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
10104 		link_info->support_auto_speeds = link_info->support_speeds;
10105 
10106 	/*initialize the ethool setting copy with NVM settings */
10107 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10108 		link_info->autoneg = BNXT_AUTONEG_SPEED;
10109 		if (bp->hwrm_spec_code >= 0x10201) {
10110 			if (link_info->auto_pause_setting &
10111 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10112 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10113 		} else {
10114 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10115 		}
10116 		link_info->advertising = link_info->auto_link_speeds;
10117 	} else {
10118 		link_info->req_link_speed = link_info->force_link_speed;
10119 		link_info->req_duplex = link_info->duplex_setting;
10120 	}
10121 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10122 		link_info->req_flow_ctrl =
10123 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10124 	else
10125 		link_info->req_flow_ctrl = link_info->force_pause_setting;
10126 	return rc;
10127 }
10128 
10129 static int bnxt_get_max_irq(struct pci_dev *pdev)
10130 {
10131 	u16 ctrl;
10132 
10133 	if (!pdev->msix_cap)
10134 		return 1;
10135 
10136 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
10137 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
10138 }
10139 
10140 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10141 				int *max_cp)
10142 {
10143 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10144 	int max_ring_grps = 0, max_irq;
10145 
10146 	*max_tx = hw_resc->max_tx_rings;
10147 	*max_rx = hw_resc->max_rx_rings;
10148 	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
10149 	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
10150 			bnxt_get_ulp_msix_num(bp),
10151 			hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
10152 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10153 		*max_cp = min_t(int, *max_cp, max_irq);
10154 	max_ring_grps = hw_resc->max_hw_ring_grps;
10155 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
10156 		*max_cp -= 1;
10157 		*max_rx -= 2;
10158 	}
10159 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
10160 		*max_rx >>= 1;
10161 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
10162 		bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
10163 		/* On P5 chips, max_cp output param should be available NQs */
10164 		*max_cp = max_irq;
10165 	}
10166 	*max_rx = min_t(int, *max_rx, max_ring_grps);
10167 }
10168 
10169 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
10170 {
10171 	int rx, tx, cp;
10172 
10173 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
10174 	*max_rx = rx;
10175 	*max_tx = tx;
10176 	if (!rx || !tx || !cp)
10177 		return -ENOMEM;
10178 
10179 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
10180 }
10181 
10182 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10183 			       bool shared)
10184 {
10185 	int rc;
10186 
10187 	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10188 	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
10189 		/* Not enough rings, try disabling agg rings. */
10190 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
10191 		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10192 		if (rc) {
10193 			/* set BNXT_FLAG_AGG_RINGS back for consistency */
10194 			bp->flags |= BNXT_FLAG_AGG_RINGS;
10195 			return rc;
10196 		}
10197 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
10198 		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10199 		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10200 		bnxt_set_ring_params(bp);
10201 	}
10202 
10203 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
10204 		int max_cp, max_stat, max_irq;
10205 
10206 		/* Reserve minimum resources for RoCE */
10207 		max_cp = bnxt_get_max_func_cp_rings(bp);
10208 		max_stat = bnxt_get_max_func_stat_ctxs(bp);
10209 		max_irq = bnxt_get_max_func_irqs(bp);
10210 		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
10211 		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
10212 		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
10213 			return 0;
10214 
10215 		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
10216 		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
10217 		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
10218 		max_cp = min_t(int, max_cp, max_irq);
10219 		max_cp = min_t(int, max_cp, max_stat);
10220 		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
10221 		if (rc)
10222 			rc = 0;
10223 	}
10224 	return rc;
10225 }
10226 
10227 /* In initial default shared ring setting, each shared ring must have a
10228  * RX/TX ring pair.
10229  */
10230 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
10231 {
10232 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
10233 	bp->rx_nr_rings = bp->cp_nr_rings;
10234 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
10235 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10236 }
10237 
10238 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
10239 {
10240 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
10241 
10242 	if (!bnxt_can_reserve_rings(bp))
10243 		return 0;
10244 
10245 	if (sh)
10246 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
10247 	dflt_rings = netif_get_num_default_rss_queues();
10248 	/* Reduce default rings on multi-port cards so that total default
10249 	 * rings do not exceed CPU count.
10250 	 */
10251 	if (bp->port_count > 1) {
10252 		int max_rings =
10253 			max_t(int, num_online_cpus() / bp->port_count, 1);
10254 
10255 		dflt_rings = min_t(int, dflt_rings, max_rings);
10256 	}
10257 	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
10258 	if (rc)
10259 		return rc;
10260 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
10261 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
10262 	if (sh)
10263 		bnxt_trim_dflt_sh_rings(bp);
10264 	else
10265 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
10266 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10267 
10268 	rc = __bnxt_reserve_rings(bp);
10269 	if (rc)
10270 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
10271 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10272 	if (sh)
10273 		bnxt_trim_dflt_sh_rings(bp);
10274 
10275 	/* Rings may have been trimmed, re-reserve the trimmed rings. */
10276 	if (bnxt_need_reserve_rings(bp)) {
10277 		rc = __bnxt_reserve_rings(bp);
10278 		if (rc)
10279 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
10280 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10281 	}
10282 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10283 		bp->rx_nr_rings++;
10284 		bp->cp_nr_rings++;
10285 	}
10286 	return rc;
10287 }
10288 
10289 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
10290 {
10291 	int rc;
10292 
10293 	if (bp->tx_nr_rings)
10294 		return 0;
10295 
10296 	bnxt_ulp_irq_stop(bp);
10297 	bnxt_clear_int_mode(bp);
10298 	rc = bnxt_set_dflt_rings(bp, true);
10299 	if (rc) {
10300 		netdev_err(bp->dev, "Not enough rings available.\n");
10301 		goto init_dflt_ring_err;
10302 	}
10303 	rc = bnxt_init_int_mode(bp);
10304 	if (rc)
10305 		goto init_dflt_ring_err;
10306 
10307 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10308 	if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
10309 		bp->flags |= BNXT_FLAG_RFS;
10310 		bp->dev->features |= NETIF_F_NTUPLE;
10311 	}
10312 init_dflt_ring_err:
10313 	bnxt_ulp_irq_restart(bp, rc);
10314 	return rc;
10315 }
10316 
10317 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
10318 {
10319 	int rc;
10320 
10321 	ASSERT_RTNL();
10322 	bnxt_hwrm_func_qcaps(bp);
10323 
10324 	if (netif_running(bp->dev))
10325 		__bnxt_close_nic(bp, true, false);
10326 
10327 	bnxt_ulp_irq_stop(bp);
10328 	bnxt_clear_int_mode(bp);
10329 	rc = bnxt_init_int_mode(bp);
10330 	bnxt_ulp_irq_restart(bp, rc);
10331 
10332 	if (netif_running(bp->dev)) {
10333 		if (rc)
10334 			dev_close(bp->dev);
10335 		else
10336 			rc = bnxt_open_nic(bp, true, false);
10337 	}
10338 
10339 	return rc;
10340 }
10341 
10342 static int bnxt_init_mac_addr(struct bnxt *bp)
10343 {
10344 	int rc = 0;
10345 
10346 	if (BNXT_PF(bp)) {
10347 		memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10348 	} else {
10349 #ifdef CONFIG_BNXT_SRIOV
10350 		struct bnxt_vf_info *vf = &bp->vf;
10351 		bool strict_approval = true;
10352 
10353 		if (is_valid_ether_addr(vf->mac_addr)) {
10354 			/* overwrite netdev dev_addr with admin VF MAC */
10355 			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
10356 			/* Older PF driver or firmware may not approve this
10357 			 * correctly.
10358 			 */
10359 			strict_approval = false;
10360 		} else {
10361 			eth_hw_addr_random(bp->dev);
10362 		}
10363 		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
10364 #endif
10365 	}
10366 	return rc;
10367 }
10368 
10369 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10370 {
10371 	static int version_printed;
10372 	struct net_device *dev;
10373 	struct bnxt *bp;
10374 	int rc, max_irqs;
10375 
10376 	if (pci_is_bridge(pdev))
10377 		return -ENODEV;
10378 
10379 	if (version_printed++ == 0)
10380 		pr_info("%s", version);
10381 
10382 	max_irqs = bnxt_get_max_irq(pdev);
10383 	dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10384 	if (!dev)
10385 		return -ENOMEM;
10386 
10387 	bp = netdev_priv(dev);
10388 	bnxt_set_max_func_irqs(bp, max_irqs);
10389 
10390 	if (bnxt_vf_pciid(ent->driver_data))
10391 		bp->flags |= BNXT_FLAG_VF;
10392 
10393 	if (pdev->msix_cap)
10394 		bp->flags |= BNXT_FLAG_MSIX_CAP;
10395 
10396 	rc = bnxt_init_board(pdev, dev);
10397 	if (rc < 0)
10398 		goto init_err_free;
10399 
10400 	dev->netdev_ops = &bnxt_netdev_ops;
10401 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10402 	dev->ethtool_ops = &bnxt_ethtool_ops;
10403 	SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
10404 	pci_set_drvdata(pdev, dev);
10405 
10406 	rc = bnxt_alloc_hwrm_resources(bp);
10407 	if (rc)
10408 		goto init_err_pci_clean;
10409 
10410 	mutex_init(&bp->hwrm_cmd_lock);
10411 	rc = bnxt_hwrm_ver_get(bp);
10412 	if (rc)
10413 		goto init_err_pci_clean;
10414 
10415 	if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10416 		rc = bnxt_alloc_kong_hwrm_resources(bp);
10417 		if (rc)
10418 			bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10419 	}
10420 
10421 	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10422 	    bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10423 		rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10424 		if (rc)
10425 			goto init_err_pci_clean;
10426 	}
10427 
10428 	if (BNXT_CHIP_P5(bp))
10429 		bp->flags |= BNXT_FLAG_CHIP_P5;
10430 
10431 	rc = bnxt_hwrm_func_reset(bp);
10432 	if (rc)
10433 		goto init_err_pci_clean;
10434 
10435 	bnxt_hwrm_fw_set_time(bp);
10436 
10437 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10438 			   NETIF_F_TSO | NETIF_F_TSO6 |
10439 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10440 			   NETIF_F_GSO_IPXIP4 |
10441 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10442 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
10443 			   NETIF_F_RXCSUM | NETIF_F_GRO;
10444 
10445 	if (BNXT_SUPPORTS_TPA(bp))
10446 		dev->hw_features |= NETIF_F_LRO;
10447 
10448 	dev->hw_enc_features =
10449 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10450 			NETIF_F_TSO | NETIF_F_TSO6 |
10451 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10452 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10453 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
10454 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10455 				    NETIF_F_GSO_GRE_CSUM;
10456 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10457 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10458 			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
10459 	if (BNXT_SUPPORTS_TPA(bp))
10460 		dev->hw_features |= NETIF_F_GRO_HW;
10461 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
10462 	if (dev->features & NETIF_F_GRO_HW)
10463 		dev->features &= ~NETIF_F_LRO;
10464 	dev->priv_flags |= IFF_UNICAST_FLT;
10465 
10466 #ifdef CONFIG_BNXT_SRIOV
10467 	init_waitqueue_head(&bp->sriov_cfg_wait);
10468 	mutex_init(&bp->sriov_lock);
10469 #endif
10470 	if (BNXT_SUPPORTS_TPA(bp)) {
10471 		bp->gro_func = bnxt_gro_func_5730x;
10472 		if (BNXT_CHIP_P4(bp))
10473 			bp->gro_func = bnxt_gro_func_5731x;
10474 	}
10475 	if (!BNXT_CHIP_P4_PLUS(bp))
10476 		bp->flags |= BNXT_FLAG_DOUBLE_DB;
10477 
10478 	rc = bnxt_hwrm_func_drv_rgtr(bp);
10479 	if (rc)
10480 		goto init_err_pci_clean;
10481 
10482 	rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10483 	if (rc)
10484 		goto init_err_pci_clean;
10485 
10486 	bp->ulp_probe = bnxt_ulp_probe;
10487 
10488 	rc = bnxt_hwrm_queue_qportcfg(bp);
10489 	if (rc) {
10490 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10491 			   rc);
10492 		rc = -1;
10493 		goto init_err_pci_clean;
10494 	}
10495 	/* Get the MAX capabilities for this function */
10496 	rc = bnxt_hwrm_func_qcaps(bp);
10497 	if (rc) {
10498 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10499 			   rc);
10500 		rc = -1;
10501 		goto init_err_pci_clean;
10502 	}
10503 	rc = bnxt_init_mac_addr(bp);
10504 	if (rc) {
10505 		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10506 		rc = -EADDRNOTAVAIL;
10507 		goto init_err_pci_clean;
10508 	}
10509 
10510 	bnxt_hwrm_func_qcfg(bp);
10511 	bnxt_hwrm_vnic_qcaps(bp);
10512 	bnxt_hwrm_port_led_qcaps(bp);
10513 	bnxt_ethtool_init(bp);
10514 	bnxt_dcb_init(bp);
10515 
10516 	/* MTU range: 60 - FW defined max */
10517 	dev->min_mtu = ETH_ZLEN;
10518 	dev->max_mtu = bp->max_mtu;
10519 
10520 	rc = bnxt_probe_phy(bp);
10521 	if (rc)
10522 		goto init_err_pci_clean;
10523 
10524 	bnxt_set_rx_skb_mode(bp, false);
10525 	bnxt_set_tpa_flags(bp);
10526 	bnxt_set_ring_params(bp);
10527 	rc = bnxt_set_dflt_rings(bp, true);
10528 	if (rc) {
10529 		netdev_err(bp->dev, "Not enough rings available.\n");
10530 		rc = -ENOMEM;
10531 		goto init_err_pci_clean;
10532 	}
10533 
10534 	/* Default RSS hash cfg. */
10535 	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10536 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10537 			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10538 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10539 	if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10540 		bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10541 		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10542 				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10543 	}
10544 
10545 	if (bnxt_rfs_supported(bp)) {
10546 		dev->hw_features |= NETIF_F_NTUPLE;
10547 		if (bnxt_rfs_capable(bp)) {
10548 			bp->flags |= BNXT_FLAG_RFS;
10549 			dev->features |= NETIF_F_NTUPLE;
10550 		}
10551 	}
10552 
10553 	if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
10554 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
10555 
10556 	rc = bnxt_init_int_mode(bp);
10557 	if (rc)
10558 		goto init_err_pci_clean;
10559 
10560 	/* No TC has been set yet and rings may have been trimmed due to
10561 	 * limited MSIX, so we re-initialize the TX rings per TC.
10562 	 */
10563 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10564 
10565 	bnxt_get_wol_settings(bp);
10566 	if (bp->flags & BNXT_FLAG_WOL_CAP)
10567 		device_set_wakeup_enable(&pdev->dev, bp->wol);
10568 	else
10569 		device_set_wakeup_capable(&pdev->dev, false);
10570 
10571 	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10572 
10573 	bnxt_hwrm_coal_params_qcaps(bp);
10574 
10575 	if (BNXT_PF(bp)) {
10576 		if (!bnxt_pf_wq) {
10577 			bnxt_pf_wq =
10578 				create_singlethread_workqueue("bnxt_pf_wq");
10579 			if (!bnxt_pf_wq) {
10580 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
10581 				goto init_err_pci_clean;
10582 			}
10583 		}
10584 		bnxt_init_tc(bp);
10585 	}
10586 
10587 	rc = register_netdev(dev);
10588 	if (rc)
10589 		goto init_err_cleanup_tc;
10590 
10591 	if (BNXT_PF(bp))
10592 		bnxt_dl_register(bp);
10593 
10594 	netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
10595 		    board_info[ent->driver_data].name,
10596 		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
10597 	pcie_print_link_status(pdev);
10598 
10599 	return 0;
10600 
10601 init_err_cleanup_tc:
10602 	bnxt_shutdown_tc(bp);
10603 	bnxt_clear_int_mode(bp);
10604 
10605 init_err_pci_clean:
10606 	bnxt_free_hwrm_resources(bp);
10607 	bnxt_free_ctx_mem(bp);
10608 	kfree(bp->ctx);
10609 	bp->ctx = NULL;
10610 	bnxt_cleanup_pci(bp);
10611 
10612 init_err_free:
10613 	free_netdev(dev);
10614 	return rc;
10615 }
10616 
10617 static void bnxt_shutdown(struct pci_dev *pdev)
10618 {
10619 	struct net_device *dev = pci_get_drvdata(pdev);
10620 	struct bnxt *bp;
10621 
10622 	if (!dev)
10623 		return;
10624 
10625 	rtnl_lock();
10626 	bp = netdev_priv(dev);
10627 	if (!bp)
10628 		goto shutdown_exit;
10629 
10630 	if (netif_running(dev))
10631 		dev_close(dev);
10632 
10633 	bnxt_ulp_shutdown(bp);
10634 
10635 	if (system_state == SYSTEM_POWER_OFF) {
10636 		bnxt_clear_int_mode(bp);
10637 		pci_wake_from_d3(pdev, bp->wol);
10638 		pci_set_power_state(pdev, PCI_D3hot);
10639 	}
10640 
10641 shutdown_exit:
10642 	rtnl_unlock();
10643 }
10644 
10645 #ifdef CONFIG_PM_SLEEP
10646 static int bnxt_suspend(struct device *device)
10647 {
10648 	struct pci_dev *pdev = to_pci_dev(device);
10649 	struct net_device *dev = pci_get_drvdata(pdev);
10650 	struct bnxt *bp = netdev_priv(dev);
10651 	int rc = 0;
10652 
10653 	rtnl_lock();
10654 	if (netif_running(dev)) {
10655 		netif_device_detach(dev);
10656 		rc = bnxt_close(dev);
10657 	}
10658 	bnxt_hwrm_func_drv_unrgtr(bp);
10659 	rtnl_unlock();
10660 	return rc;
10661 }
10662 
10663 static int bnxt_resume(struct device *device)
10664 {
10665 	struct pci_dev *pdev = to_pci_dev(device);
10666 	struct net_device *dev = pci_get_drvdata(pdev);
10667 	struct bnxt *bp = netdev_priv(dev);
10668 	int rc = 0;
10669 
10670 	rtnl_lock();
10671 	if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
10672 		rc = -ENODEV;
10673 		goto resume_exit;
10674 	}
10675 	rc = bnxt_hwrm_func_reset(bp);
10676 	if (rc) {
10677 		rc = -EBUSY;
10678 		goto resume_exit;
10679 	}
10680 	bnxt_get_wol_settings(bp);
10681 	if (netif_running(dev)) {
10682 		rc = bnxt_open(dev);
10683 		if (!rc)
10684 			netif_device_attach(dev);
10685 	}
10686 
10687 resume_exit:
10688 	rtnl_unlock();
10689 	return rc;
10690 }
10691 
10692 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
10693 #define BNXT_PM_OPS (&bnxt_pm_ops)
10694 
10695 #else
10696 
10697 #define BNXT_PM_OPS NULL
10698 
10699 #endif /* CONFIG_PM_SLEEP */
10700 
10701 /**
10702  * bnxt_io_error_detected - called when PCI error is detected
10703  * @pdev: Pointer to PCI device
10704  * @state: The current pci connection state
10705  *
10706  * This function is called after a PCI bus error affecting
10707  * this device has been detected.
10708  */
10709 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
10710 					       pci_channel_state_t state)
10711 {
10712 	struct net_device *netdev = pci_get_drvdata(pdev);
10713 	struct bnxt *bp = netdev_priv(netdev);
10714 
10715 	netdev_info(netdev, "PCI I/O error detected\n");
10716 
10717 	rtnl_lock();
10718 	netif_device_detach(netdev);
10719 
10720 	bnxt_ulp_stop(bp);
10721 
10722 	if (state == pci_channel_io_perm_failure) {
10723 		rtnl_unlock();
10724 		return PCI_ERS_RESULT_DISCONNECT;
10725 	}
10726 
10727 	if (netif_running(netdev))
10728 		bnxt_close(netdev);
10729 
10730 	pci_disable_device(pdev);
10731 	rtnl_unlock();
10732 
10733 	/* Request a slot slot reset. */
10734 	return PCI_ERS_RESULT_NEED_RESET;
10735 }
10736 
10737 /**
10738  * bnxt_io_slot_reset - called after the pci bus has been reset.
10739  * @pdev: Pointer to PCI device
10740  *
10741  * Restart the card from scratch, as if from a cold-boot.
10742  * At this point, the card has exprienced a hard reset,
10743  * followed by fixups by BIOS, and has its config space
10744  * set up identically to what it was at cold boot.
10745  */
10746 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
10747 {
10748 	struct net_device *netdev = pci_get_drvdata(pdev);
10749 	struct bnxt *bp = netdev_priv(netdev);
10750 	int err = 0;
10751 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
10752 
10753 	netdev_info(bp->dev, "PCI Slot Reset\n");
10754 
10755 	rtnl_lock();
10756 
10757 	if (pci_enable_device(pdev)) {
10758 		dev_err(&pdev->dev,
10759 			"Cannot re-enable PCI device after reset.\n");
10760 	} else {
10761 		pci_set_master(pdev);
10762 
10763 		err = bnxt_hwrm_func_reset(bp);
10764 		if (!err && netif_running(netdev))
10765 			err = bnxt_open(netdev);
10766 
10767 		if (!err) {
10768 			result = PCI_ERS_RESULT_RECOVERED;
10769 			bnxt_ulp_start(bp);
10770 		}
10771 	}
10772 
10773 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
10774 		dev_close(netdev);
10775 
10776 	rtnl_unlock();
10777 
10778 	return PCI_ERS_RESULT_RECOVERED;
10779 }
10780 
10781 /**
10782  * bnxt_io_resume - called when traffic can start flowing again.
10783  * @pdev: Pointer to PCI device
10784  *
10785  * This callback is called when the error recovery driver tells
10786  * us that its OK to resume normal operation.
10787  */
10788 static void bnxt_io_resume(struct pci_dev *pdev)
10789 {
10790 	struct net_device *netdev = pci_get_drvdata(pdev);
10791 
10792 	rtnl_lock();
10793 
10794 	netif_device_attach(netdev);
10795 
10796 	rtnl_unlock();
10797 }
10798 
10799 static const struct pci_error_handlers bnxt_err_handler = {
10800 	.error_detected	= bnxt_io_error_detected,
10801 	.slot_reset	= bnxt_io_slot_reset,
10802 	.resume		= bnxt_io_resume
10803 };
10804 
10805 static struct pci_driver bnxt_pci_driver = {
10806 	.name		= DRV_MODULE_NAME,
10807 	.id_table	= bnxt_pci_tbl,
10808 	.probe		= bnxt_init_one,
10809 	.remove		= bnxt_remove_one,
10810 	.shutdown	= bnxt_shutdown,
10811 	.driver.pm	= BNXT_PM_OPS,
10812 	.err_handler	= &bnxt_err_handler,
10813 #if defined(CONFIG_BNXT_SRIOV)
10814 	.sriov_configure = bnxt_sriov_configure,
10815 #endif
10816 };
10817 
10818 static int __init bnxt_init(void)
10819 {
10820 	bnxt_debug_init();
10821 	return pci_register_driver(&bnxt_pci_driver);
10822 }
10823 
10824 static void __exit bnxt_exit(void)
10825 {
10826 	pci_unregister_driver(&bnxt_pci_driver);
10827 	if (bnxt_pf_wq)
10828 		destroy_workqueue(bnxt_pf_wq);
10829 	bnxt_debug_exit();
10830 }
10831 
10832 module_init(bnxt_init);
10833 module_exit(bnxt_exit);
10834