1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/module.h>
11 
12 #include <linux/stringify.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/errno.h>
16 #include <linux/ioport.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/bitops.h>
26 #include <linux/io.h>
27 #include <linux/irq.h>
28 #include <linux/delay.h>
29 #include <asm/byteorder.h>
30 #include <asm/page.h>
31 #include <linux/time.h>
32 #include <linux/mii.h>
33 #include <linux/if.h>
34 #include <linux/if_vlan.h>
35 #include <net/ip.h>
36 #include <net/tcp.h>
37 #include <net/udp.h>
38 #include <net/checksum.h>
39 #include <net/ip6_checksum.h>
40 #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
41 #include <net/vxlan.h>
42 #endif
43 #ifdef CONFIG_NET_RX_BUSY_POLL
44 #include <net/busy_poll.h>
45 #endif
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 
54 #include "bnxt_hsi.h"
55 #include "bnxt.h"
56 #include "bnxt_sriov.h"
57 #include "bnxt_ethtool.h"
58 
59 #define BNXT_TX_TIMEOUT		(5 * HZ)
60 
61 static const char version[] =
62 	"Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
63 
64 MODULE_LICENSE("GPL");
65 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
66 MODULE_VERSION(DRV_MODULE_VERSION);
67 
68 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
69 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70 #define BNXT_RX_COPY_THRESH 256
71 
72 #define BNXT_TX_PUSH_THRESH 164
73 
74 enum board_idx {
75 	BCM57301,
76 	BCM57302,
77 	BCM57304,
78 	BCM57402,
79 	BCM57404,
80 	BCM57406,
81 	BCM57314,
82 	BCM57304_VF,
83 	BCM57404_VF,
84 };
85 
86 /* indexed by enum above */
87 static const struct {
88 	char *name;
89 } board_info[] = {
90 	{ "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
91 	{ "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
92 	{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
93 	{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
94 	{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
95 	{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
96 	{ "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
97 	{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
98 	{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
99 };
100 
101 static const struct pci_device_id bnxt_pci_tbl[] = {
102 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
103 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
104 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
105 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
106 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
107 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
108 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
109 #ifdef CONFIG_BNXT_SRIOV
110 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
111 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
112 #endif
113 	{ 0 }
114 };
115 
116 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
117 
118 static const u16 bnxt_vf_req_snif[] = {
119 	HWRM_FUNC_CFG,
120 	HWRM_PORT_PHY_QCFG,
121 	HWRM_CFA_L2_FILTER_ALLOC,
122 };
123 
124 static const u16 bnxt_async_events_arr[] = {
125 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
126 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
127 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
128 	HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
129 };
130 
131 static bool bnxt_vf_pciid(enum board_idx idx)
132 {
133 	return (idx == BCM57304_VF || idx == BCM57404_VF);
134 }
135 
136 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
137 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
138 #define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
139 
140 #define BNXT_CP_DB_REARM(db, raw_cons)					\
141 		writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
142 
143 #define BNXT_CP_DB(db, raw_cons)					\
144 		writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
145 
146 #define BNXT_CP_DB_IRQ_DIS(db)						\
147 		writel(DB_CP_IRQ_DIS_FLAGS, db)
148 
149 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
150 {
151 	/* Tell compiler to fetch tx indices from memory. */
152 	barrier();
153 
154 	return bp->tx_ring_size -
155 		((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
156 }
157 
158 static const u16 bnxt_lhint_arr[] = {
159 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
160 	TX_BD_FLAGS_LHINT_512_TO_1023,
161 	TX_BD_FLAGS_LHINT_1024_TO_2047,
162 	TX_BD_FLAGS_LHINT_1024_TO_2047,
163 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
164 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
165 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
166 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
167 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
168 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
169 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
170 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
171 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
172 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
173 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
174 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
175 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
176 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
177 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
178 };
179 
180 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
181 {
182 	struct bnxt *bp = netdev_priv(dev);
183 	struct tx_bd *txbd;
184 	struct tx_bd_ext *txbd1;
185 	struct netdev_queue *txq;
186 	int i;
187 	dma_addr_t mapping;
188 	unsigned int length, pad = 0;
189 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
190 	u16 prod, last_frag;
191 	struct pci_dev *pdev = bp->pdev;
192 	struct bnxt_tx_ring_info *txr;
193 	struct bnxt_sw_tx_bd *tx_buf;
194 
195 	i = skb_get_queue_mapping(skb);
196 	if (unlikely(i >= bp->tx_nr_rings)) {
197 		dev_kfree_skb_any(skb);
198 		return NETDEV_TX_OK;
199 	}
200 
201 	txr = &bp->tx_ring[i];
202 	txq = netdev_get_tx_queue(dev, i);
203 	prod = txr->tx_prod;
204 
205 	free_size = bnxt_tx_avail(bp, txr);
206 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
207 		netif_tx_stop_queue(txq);
208 		return NETDEV_TX_BUSY;
209 	}
210 
211 	length = skb->len;
212 	len = skb_headlen(skb);
213 	last_frag = skb_shinfo(skb)->nr_frags;
214 
215 	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
216 
217 	txbd->tx_bd_opaque = prod;
218 
219 	tx_buf = &txr->tx_buf_ring[prod];
220 	tx_buf->skb = skb;
221 	tx_buf->nr_frags = last_frag;
222 
223 	vlan_tag_flags = 0;
224 	cfa_action = 0;
225 	if (skb_vlan_tag_present(skb)) {
226 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
227 				 skb_vlan_tag_get(skb);
228 		/* Currently supports 8021Q, 8021AD vlan offloads
229 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
230 		 */
231 		if (skb->vlan_proto == htons(ETH_P_8021Q))
232 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
233 	}
234 
235 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
236 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
237 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
238 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
239 		void *pdata = tx_push_buf->data;
240 		u64 *end;
241 		int j, push_len;
242 
243 		/* Set COAL_NOW to be ready quickly for the next push */
244 		tx_push->tx_bd_len_flags_type =
245 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
246 					TX_BD_TYPE_LONG_TX_BD |
247 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
248 					TX_BD_FLAGS_COAL_NOW |
249 					TX_BD_FLAGS_PACKET_END |
250 					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
251 
252 		if (skb->ip_summed == CHECKSUM_PARTIAL)
253 			tx_push1->tx_bd_hsize_lflags =
254 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
255 		else
256 			tx_push1->tx_bd_hsize_lflags = 0;
257 
258 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
259 		tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
260 
261 		end = pdata + length;
262 		end = PTR_ALIGN(end, 8) - 1;
263 		*end = 0;
264 
265 		skb_copy_from_linear_data(skb, pdata, len);
266 		pdata += len;
267 		for (j = 0; j < last_frag; j++) {
268 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
269 			void *fptr;
270 
271 			fptr = skb_frag_address_safe(frag);
272 			if (!fptr)
273 				goto normal_tx;
274 
275 			memcpy(pdata, fptr, skb_frag_size(frag));
276 			pdata += skb_frag_size(frag);
277 		}
278 
279 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
280 		txbd->tx_bd_haddr = txr->data_mapping;
281 		prod = NEXT_TX(prod);
282 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
283 		memcpy(txbd, tx_push1, sizeof(*txbd));
284 		prod = NEXT_TX(prod);
285 		tx_push->doorbell =
286 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
287 		txr->tx_prod = prod;
288 
289 		netdev_tx_sent_queue(txq, skb->len);
290 
291 		push_len = (length + sizeof(*tx_push) + 7) / 8;
292 		if (push_len > 16) {
293 			__iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
294 			__iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
295 					 push_len - 16);
296 		} else {
297 			__iowrite64_copy(txr->tx_doorbell, tx_push_buf,
298 					 push_len);
299 		}
300 
301 		tx_buf->is_push = 1;
302 		goto tx_done;
303 	}
304 
305 normal_tx:
306 	if (length < BNXT_MIN_PKT_SIZE) {
307 		pad = BNXT_MIN_PKT_SIZE - length;
308 		if (skb_pad(skb, pad)) {
309 			/* SKB already freed. */
310 			tx_buf->skb = NULL;
311 			return NETDEV_TX_OK;
312 		}
313 		length = BNXT_MIN_PKT_SIZE;
314 	}
315 
316 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
317 
318 	if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
319 		dev_kfree_skb_any(skb);
320 		tx_buf->skb = NULL;
321 		return NETDEV_TX_OK;
322 	}
323 
324 	dma_unmap_addr_set(tx_buf, mapping, mapping);
325 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
326 		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
327 
328 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
329 
330 	prod = NEXT_TX(prod);
331 	txbd1 = (struct tx_bd_ext *)
332 		&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
333 
334 	txbd1->tx_bd_hsize_lflags = 0;
335 	if (skb_is_gso(skb)) {
336 		u32 hdr_len;
337 
338 		if (skb->encapsulation)
339 			hdr_len = skb_inner_network_offset(skb) +
340 				skb_inner_network_header_len(skb) +
341 				inner_tcp_hdrlen(skb);
342 		else
343 			hdr_len = skb_transport_offset(skb) +
344 				tcp_hdrlen(skb);
345 
346 		txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
347 					TX_BD_FLAGS_T_IPID |
348 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
349 		length = skb_shinfo(skb)->gso_size;
350 		txbd1->tx_bd_mss = cpu_to_le32(length);
351 		length += hdr_len;
352 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
353 		txbd1->tx_bd_hsize_lflags =
354 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
355 		txbd1->tx_bd_mss = 0;
356 	}
357 
358 	length >>= 9;
359 	flags |= bnxt_lhint_arr[length];
360 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
361 
362 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
363 	txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
364 	for (i = 0; i < last_frag; i++) {
365 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
366 
367 		prod = NEXT_TX(prod);
368 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
369 
370 		len = skb_frag_size(frag);
371 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
372 					   DMA_TO_DEVICE);
373 
374 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
375 			goto tx_dma_error;
376 
377 		tx_buf = &txr->tx_buf_ring[prod];
378 		dma_unmap_addr_set(tx_buf, mapping, mapping);
379 
380 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
381 
382 		flags = len << TX_BD_LEN_SHIFT;
383 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
384 	}
385 
386 	flags &= ~TX_BD_LEN;
387 	txbd->tx_bd_len_flags_type =
388 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
389 			    TX_BD_FLAGS_PACKET_END);
390 
391 	netdev_tx_sent_queue(txq, skb->len);
392 
393 	/* Sync BD data before updating doorbell */
394 	wmb();
395 
396 	prod = NEXT_TX(prod);
397 	txr->tx_prod = prod;
398 
399 	writel(DB_KEY_TX | prod, txr->tx_doorbell);
400 	writel(DB_KEY_TX | prod, txr->tx_doorbell);
401 
402 tx_done:
403 
404 	mmiowb();
405 
406 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
407 		netif_tx_stop_queue(txq);
408 
409 		/* netif_tx_stop_queue() must be done before checking
410 		 * tx index in bnxt_tx_avail() below, because in
411 		 * bnxt_tx_int(), we update tx index before checking for
412 		 * netif_tx_queue_stopped().
413 		 */
414 		smp_mb();
415 		if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
416 			netif_tx_wake_queue(txq);
417 	}
418 	return NETDEV_TX_OK;
419 
420 tx_dma_error:
421 	last_frag = i;
422 
423 	/* start back at beginning and unmap skb */
424 	prod = txr->tx_prod;
425 	tx_buf = &txr->tx_buf_ring[prod];
426 	tx_buf->skb = NULL;
427 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
428 			 skb_headlen(skb), PCI_DMA_TODEVICE);
429 	prod = NEXT_TX(prod);
430 
431 	/* unmap remaining mapped pages */
432 	for (i = 0; i < last_frag; i++) {
433 		prod = NEXT_TX(prod);
434 		tx_buf = &txr->tx_buf_ring[prod];
435 		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
436 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
437 			       PCI_DMA_TODEVICE);
438 	}
439 
440 	dev_kfree_skb_any(skb);
441 	return NETDEV_TX_OK;
442 }
443 
444 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
445 {
446 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
447 	int index = txr - &bp->tx_ring[0];
448 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
449 	u16 cons = txr->tx_cons;
450 	struct pci_dev *pdev = bp->pdev;
451 	int i;
452 	unsigned int tx_bytes = 0;
453 
454 	for (i = 0; i < nr_pkts; i++) {
455 		struct bnxt_sw_tx_bd *tx_buf;
456 		struct sk_buff *skb;
457 		int j, last;
458 
459 		tx_buf = &txr->tx_buf_ring[cons];
460 		cons = NEXT_TX(cons);
461 		skb = tx_buf->skb;
462 		tx_buf->skb = NULL;
463 
464 		if (tx_buf->is_push) {
465 			tx_buf->is_push = 0;
466 			goto next_tx_int;
467 		}
468 
469 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
470 				 skb_headlen(skb), PCI_DMA_TODEVICE);
471 		last = tx_buf->nr_frags;
472 
473 		for (j = 0; j < last; j++) {
474 			cons = NEXT_TX(cons);
475 			tx_buf = &txr->tx_buf_ring[cons];
476 			dma_unmap_page(
477 				&pdev->dev,
478 				dma_unmap_addr(tx_buf, mapping),
479 				skb_frag_size(&skb_shinfo(skb)->frags[j]),
480 				PCI_DMA_TODEVICE);
481 		}
482 
483 next_tx_int:
484 		cons = NEXT_TX(cons);
485 
486 		tx_bytes += skb->len;
487 		dev_kfree_skb_any(skb);
488 	}
489 
490 	netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
491 	txr->tx_cons = cons;
492 
493 	/* Need to make the tx_cons update visible to bnxt_start_xmit()
494 	 * before checking for netif_tx_queue_stopped().  Without the
495 	 * memory barrier, there is a small possibility that bnxt_start_xmit()
496 	 * will miss it and cause the queue to be stopped forever.
497 	 */
498 	smp_mb();
499 
500 	if (unlikely(netif_tx_queue_stopped(txq)) &&
501 	    (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
502 		__netif_tx_lock(txq, smp_processor_id());
503 		if (netif_tx_queue_stopped(txq) &&
504 		    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
505 		    txr->dev_state != BNXT_DEV_STATE_CLOSING)
506 			netif_tx_wake_queue(txq);
507 		__netif_tx_unlock(txq);
508 	}
509 }
510 
511 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
512 				       gfp_t gfp)
513 {
514 	u8 *data;
515 	struct pci_dev *pdev = bp->pdev;
516 
517 	data = kmalloc(bp->rx_buf_size, gfp);
518 	if (!data)
519 		return NULL;
520 
521 	*mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
522 				  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
523 
524 	if (dma_mapping_error(&pdev->dev, *mapping)) {
525 		kfree(data);
526 		data = NULL;
527 	}
528 	return data;
529 }
530 
531 static inline int bnxt_alloc_rx_data(struct bnxt *bp,
532 				     struct bnxt_rx_ring_info *rxr,
533 				     u16 prod, gfp_t gfp)
534 {
535 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
536 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
537 	u8 *data;
538 	dma_addr_t mapping;
539 
540 	data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
541 	if (!data)
542 		return -ENOMEM;
543 
544 	rx_buf->data = data;
545 	dma_unmap_addr_set(rx_buf, mapping, mapping);
546 
547 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
548 
549 	return 0;
550 }
551 
552 static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
553 			       u8 *data)
554 {
555 	u16 prod = rxr->rx_prod;
556 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
557 	struct rx_bd *cons_bd, *prod_bd;
558 
559 	prod_rx_buf = &rxr->rx_buf_ring[prod];
560 	cons_rx_buf = &rxr->rx_buf_ring[cons];
561 
562 	prod_rx_buf->data = data;
563 
564 	dma_unmap_addr_set(prod_rx_buf, mapping,
565 			   dma_unmap_addr(cons_rx_buf, mapping));
566 
567 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
568 	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
569 
570 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
571 }
572 
573 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
574 {
575 	u16 next, max = rxr->rx_agg_bmap_size;
576 
577 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
578 	if (next >= max)
579 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
580 	return next;
581 }
582 
583 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
584 				     struct bnxt_rx_ring_info *rxr,
585 				     u16 prod, gfp_t gfp)
586 {
587 	struct rx_bd *rxbd =
588 		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
589 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
590 	struct pci_dev *pdev = bp->pdev;
591 	struct page *page;
592 	dma_addr_t mapping;
593 	u16 sw_prod = rxr->rx_sw_agg_prod;
594 	unsigned int offset = 0;
595 
596 	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
597 		page = rxr->rx_page;
598 		if (!page) {
599 			page = alloc_page(gfp);
600 			if (!page)
601 				return -ENOMEM;
602 			rxr->rx_page = page;
603 			rxr->rx_page_offset = 0;
604 		}
605 		offset = rxr->rx_page_offset;
606 		rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
607 		if (rxr->rx_page_offset == PAGE_SIZE)
608 			rxr->rx_page = NULL;
609 		else
610 			get_page(page);
611 	} else {
612 		page = alloc_page(gfp);
613 		if (!page)
614 			return -ENOMEM;
615 	}
616 
617 	mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
618 			       PCI_DMA_FROMDEVICE);
619 	if (dma_mapping_error(&pdev->dev, mapping)) {
620 		__free_page(page);
621 		return -EIO;
622 	}
623 
624 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
625 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
626 
627 	__set_bit(sw_prod, rxr->rx_agg_bmap);
628 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
629 	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
630 
631 	rx_agg_buf->page = page;
632 	rx_agg_buf->offset = offset;
633 	rx_agg_buf->mapping = mapping;
634 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
635 	rxbd->rx_bd_opaque = sw_prod;
636 	return 0;
637 }
638 
639 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
640 				   u32 agg_bufs)
641 {
642 	struct bnxt *bp = bnapi->bp;
643 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
644 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
645 	u16 prod = rxr->rx_agg_prod;
646 	u16 sw_prod = rxr->rx_sw_agg_prod;
647 	u32 i;
648 
649 	for (i = 0; i < agg_bufs; i++) {
650 		u16 cons;
651 		struct rx_agg_cmp *agg;
652 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
653 		struct rx_bd *prod_bd;
654 		struct page *page;
655 
656 		agg = (struct rx_agg_cmp *)
657 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
658 		cons = agg->rx_agg_cmp_opaque;
659 		__clear_bit(cons, rxr->rx_agg_bmap);
660 
661 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
662 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
663 
664 		__set_bit(sw_prod, rxr->rx_agg_bmap);
665 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
666 		cons_rx_buf = &rxr->rx_agg_ring[cons];
667 
668 		/* It is possible for sw_prod to be equal to cons, so
669 		 * set cons_rx_buf->page to NULL first.
670 		 */
671 		page = cons_rx_buf->page;
672 		cons_rx_buf->page = NULL;
673 		prod_rx_buf->page = page;
674 		prod_rx_buf->offset = cons_rx_buf->offset;
675 
676 		prod_rx_buf->mapping = cons_rx_buf->mapping;
677 
678 		prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
679 
680 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
681 		prod_bd->rx_bd_opaque = sw_prod;
682 
683 		prod = NEXT_RX_AGG(prod);
684 		sw_prod = NEXT_RX_AGG(sw_prod);
685 		cp_cons = NEXT_CMP(cp_cons);
686 	}
687 	rxr->rx_agg_prod = prod;
688 	rxr->rx_sw_agg_prod = sw_prod;
689 }
690 
691 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
692 				   struct bnxt_rx_ring_info *rxr, u16 cons,
693 				   u16 prod, u8 *data, dma_addr_t dma_addr,
694 				   unsigned int len)
695 {
696 	int err;
697 	struct sk_buff *skb;
698 
699 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
700 	if (unlikely(err)) {
701 		bnxt_reuse_rx_data(rxr, cons, data);
702 		return NULL;
703 	}
704 
705 	skb = build_skb(data, 0);
706 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
707 			 PCI_DMA_FROMDEVICE);
708 	if (!skb) {
709 		kfree(data);
710 		return NULL;
711 	}
712 
713 	skb_reserve(skb, BNXT_RX_OFFSET);
714 	skb_put(skb, len);
715 	return skb;
716 }
717 
718 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
719 				     struct sk_buff *skb, u16 cp_cons,
720 				     u32 agg_bufs)
721 {
722 	struct pci_dev *pdev = bp->pdev;
723 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
724 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
725 	u16 prod = rxr->rx_agg_prod;
726 	u32 i;
727 
728 	for (i = 0; i < agg_bufs; i++) {
729 		u16 cons, frag_len;
730 		struct rx_agg_cmp *agg;
731 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
732 		struct page *page;
733 		dma_addr_t mapping;
734 
735 		agg = (struct rx_agg_cmp *)
736 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
737 		cons = agg->rx_agg_cmp_opaque;
738 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
739 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
740 
741 		cons_rx_buf = &rxr->rx_agg_ring[cons];
742 		skb_fill_page_desc(skb, i, cons_rx_buf->page,
743 				   cons_rx_buf->offset, frag_len);
744 		__clear_bit(cons, rxr->rx_agg_bmap);
745 
746 		/* It is possible for bnxt_alloc_rx_page() to allocate
747 		 * a sw_prod index that equals the cons index, so we
748 		 * need to clear the cons entry now.
749 		 */
750 		mapping = dma_unmap_addr(cons_rx_buf, mapping);
751 		page = cons_rx_buf->page;
752 		cons_rx_buf->page = NULL;
753 
754 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
755 			struct skb_shared_info *shinfo;
756 			unsigned int nr_frags;
757 
758 			shinfo = skb_shinfo(skb);
759 			nr_frags = --shinfo->nr_frags;
760 			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
761 
762 			dev_kfree_skb(skb);
763 
764 			cons_rx_buf->page = page;
765 
766 			/* Update prod since possibly some pages have been
767 			 * allocated already.
768 			 */
769 			rxr->rx_agg_prod = prod;
770 			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
771 			return NULL;
772 		}
773 
774 		dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
775 			       PCI_DMA_FROMDEVICE);
776 
777 		skb->data_len += frag_len;
778 		skb->len += frag_len;
779 		skb->truesize += PAGE_SIZE;
780 
781 		prod = NEXT_RX_AGG(prod);
782 		cp_cons = NEXT_CMP(cp_cons);
783 	}
784 	rxr->rx_agg_prod = prod;
785 	return skb;
786 }
787 
788 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
789 			       u8 agg_bufs, u32 *raw_cons)
790 {
791 	u16 last;
792 	struct rx_agg_cmp *agg;
793 
794 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
795 	last = RING_CMP(*raw_cons);
796 	agg = (struct rx_agg_cmp *)
797 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
798 	return RX_AGG_CMP_VALID(agg, *raw_cons);
799 }
800 
801 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
802 					    unsigned int len,
803 					    dma_addr_t mapping)
804 {
805 	struct bnxt *bp = bnapi->bp;
806 	struct pci_dev *pdev = bp->pdev;
807 	struct sk_buff *skb;
808 
809 	skb = napi_alloc_skb(&bnapi->napi, len);
810 	if (!skb)
811 		return NULL;
812 
813 	dma_sync_single_for_cpu(&pdev->dev, mapping,
814 				bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
815 
816 	memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
817 
818 	dma_sync_single_for_device(&pdev->dev, mapping,
819 				   bp->rx_copy_thresh,
820 				   PCI_DMA_FROMDEVICE);
821 
822 	skb_put(skb, len);
823 	return skb;
824 }
825 
826 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
827 			   u32 *raw_cons, void *cmp)
828 {
829 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
830 	struct rx_cmp *rxcmp = cmp;
831 	u32 tmp_raw_cons = *raw_cons;
832 	u8 cmp_type, agg_bufs = 0;
833 
834 	cmp_type = RX_CMP_TYPE(rxcmp);
835 
836 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
837 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
838 			    RX_CMP_AGG_BUFS) >>
839 			   RX_CMP_AGG_BUFS_SHIFT;
840 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
841 		struct rx_tpa_end_cmp *tpa_end = cmp;
842 
843 		agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
844 			    RX_TPA_END_CMP_AGG_BUFS) >>
845 			   RX_TPA_END_CMP_AGG_BUFS_SHIFT;
846 	}
847 
848 	if (agg_bufs) {
849 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
850 			return -EBUSY;
851 	}
852 	*raw_cons = tmp_raw_cons;
853 	return 0;
854 }
855 
856 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
857 {
858 	if (!rxr->bnapi->in_reset) {
859 		rxr->bnapi->in_reset = true;
860 		set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
861 		schedule_work(&bp->sp_task);
862 	}
863 	rxr->rx_next_cons = 0xffff;
864 }
865 
866 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
867 			   struct rx_tpa_start_cmp *tpa_start,
868 			   struct rx_tpa_start_cmp_ext *tpa_start1)
869 {
870 	u8 agg_id = TPA_START_AGG_ID(tpa_start);
871 	u16 cons, prod;
872 	struct bnxt_tpa_info *tpa_info;
873 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
874 	struct rx_bd *prod_bd;
875 	dma_addr_t mapping;
876 
877 	cons = tpa_start->rx_tpa_start_cmp_opaque;
878 	prod = rxr->rx_prod;
879 	cons_rx_buf = &rxr->rx_buf_ring[cons];
880 	prod_rx_buf = &rxr->rx_buf_ring[prod];
881 	tpa_info = &rxr->rx_tpa[agg_id];
882 
883 	if (unlikely(cons != rxr->rx_next_cons)) {
884 		bnxt_sched_reset(bp, rxr);
885 		return;
886 	}
887 
888 	prod_rx_buf->data = tpa_info->data;
889 
890 	mapping = tpa_info->mapping;
891 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
892 
893 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
894 
895 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
896 
897 	tpa_info->data = cons_rx_buf->data;
898 	cons_rx_buf->data = NULL;
899 	tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
900 
901 	tpa_info->len =
902 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
903 				RX_TPA_START_CMP_LEN_SHIFT;
904 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
905 		u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
906 
907 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
908 		tpa_info->gso_type = SKB_GSO_TCPV4;
909 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
910 		if (hash_type == 3)
911 			tpa_info->gso_type = SKB_GSO_TCPV6;
912 		tpa_info->rss_hash =
913 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
914 	} else {
915 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
916 		tpa_info->gso_type = 0;
917 		if (netif_msg_rx_err(bp))
918 			netdev_warn(bp->dev, "TPA packet without valid hash\n");
919 	}
920 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
921 	tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
922 
923 	rxr->rx_prod = NEXT_RX(prod);
924 	cons = NEXT_RX(cons);
925 	rxr->rx_next_cons = NEXT_RX(cons);
926 	cons_rx_buf = &rxr->rx_buf_ring[cons];
927 
928 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
929 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
930 	cons_rx_buf->data = NULL;
931 }
932 
933 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
934 			   u16 cp_cons, u32 agg_bufs)
935 {
936 	if (agg_bufs)
937 		bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
938 }
939 
940 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
941 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
942 
943 static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
944 					   struct rx_tpa_end_cmp *tpa_end,
945 					   struct rx_tpa_end_cmp_ext *tpa_end1,
946 					   struct sk_buff *skb)
947 {
948 #ifdef CONFIG_INET
949 	struct tcphdr *th;
950 	int payload_off, tcp_opt_len = 0;
951 	int len, nw_off;
952 	u16 segs;
953 
954 	segs = TPA_END_TPA_SEGS(tpa_end);
955 	if (segs == 1)
956 		return skb;
957 
958 	NAPI_GRO_CB(skb)->count = segs;
959 	skb_shinfo(skb)->gso_size =
960 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
961 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
962 	payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
963 		       RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
964 		      RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
965 	if (TPA_END_GRO_TS(tpa_end))
966 		tcp_opt_len = 12;
967 
968 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
969 		struct iphdr *iph;
970 
971 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
972 			 ETH_HLEN;
973 		skb_set_network_header(skb, nw_off);
974 		iph = ip_hdr(skb);
975 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
976 		len = skb->len - skb_transport_offset(skb);
977 		th = tcp_hdr(skb);
978 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
979 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
980 		struct ipv6hdr *iph;
981 
982 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
983 			 ETH_HLEN;
984 		skb_set_network_header(skb, nw_off);
985 		iph = ipv6_hdr(skb);
986 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
987 		len = skb->len - skb_transport_offset(skb);
988 		th = tcp_hdr(skb);
989 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
990 	} else {
991 		dev_kfree_skb_any(skb);
992 		return NULL;
993 	}
994 	tcp_gro_complete(skb);
995 
996 	if (nw_off) { /* tunnel */
997 		struct udphdr *uh = NULL;
998 
999 		if (skb->protocol == htons(ETH_P_IP)) {
1000 			struct iphdr *iph = (struct iphdr *)skb->data;
1001 
1002 			if (iph->protocol == IPPROTO_UDP)
1003 				uh = (struct udphdr *)(iph + 1);
1004 		} else {
1005 			struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1006 
1007 			if (iph->nexthdr == IPPROTO_UDP)
1008 				uh = (struct udphdr *)(iph + 1);
1009 		}
1010 		if (uh) {
1011 			if (uh->check)
1012 				skb_shinfo(skb)->gso_type |=
1013 					SKB_GSO_UDP_TUNNEL_CSUM;
1014 			else
1015 				skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1016 		}
1017 	}
1018 #endif
1019 	return skb;
1020 }
1021 
1022 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1023 					   struct bnxt_napi *bnapi,
1024 					   u32 *raw_cons,
1025 					   struct rx_tpa_end_cmp *tpa_end,
1026 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1027 					   bool *agg_event)
1028 {
1029 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1030 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1031 	u8 agg_id = TPA_END_AGG_ID(tpa_end);
1032 	u8 *data, agg_bufs;
1033 	u16 cp_cons = RING_CMP(*raw_cons);
1034 	unsigned int len;
1035 	struct bnxt_tpa_info *tpa_info;
1036 	dma_addr_t mapping;
1037 	struct sk_buff *skb;
1038 
1039 	if (unlikely(bnapi->in_reset)) {
1040 		int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1041 
1042 		if (rc < 0)
1043 			return ERR_PTR(-EBUSY);
1044 		return NULL;
1045 	}
1046 
1047 	tpa_info = &rxr->rx_tpa[agg_id];
1048 	data = tpa_info->data;
1049 	prefetch(data);
1050 	len = tpa_info->len;
1051 	mapping = tpa_info->mapping;
1052 
1053 	agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1054 		    RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1055 
1056 	if (agg_bufs) {
1057 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1058 			return ERR_PTR(-EBUSY);
1059 
1060 		*agg_event = true;
1061 		cp_cons = NEXT_CMP(cp_cons);
1062 	}
1063 
1064 	if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
1065 		bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1066 		netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1067 			    agg_bufs, (int)MAX_SKB_FRAGS);
1068 		return NULL;
1069 	}
1070 
1071 	if (len <= bp->rx_copy_thresh) {
1072 		skb = bnxt_copy_skb(bnapi, data, len, mapping);
1073 		if (!skb) {
1074 			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1075 			return NULL;
1076 		}
1077 	} else {
1078 		u8 *new_data;
1079 		dma_addr_t new_mapping;
1080 
1081 		new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1082 		if (!new_data) {
1083 			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1084 			return NULL;
1085 		}
1086 
1087 		tpa_info->data = new_data;
1088 		tpa_info->mapping = new_mapping;
1089 
1090 		skb = build_skb(data, 0);
1091 		dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
1092 				 PCI_DMA_FROMDEVICE);
1093 
1094 		if (!skb) {
1095 			kfree(data);
1096 			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1097 			return NULL;
1098 		}
1099 		skb_reserve(skb, BNXT_RX_OFFSET);
1100 		skb_put(skb, len);
1101 	}
1102 
1103 	if (agg_bufs) {
1104 		skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1105 		if (!skb) {
1106 			/* Page reuse already handled by bnxt_rx_pages(). */
1107 			return NULL;
1108 		}
1109 	}
1110 	skb->protocol = eth_type_trans(skb, bp->dev);
1111 
1112 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1113 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1114 
1115 	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1116 		netdev_features_t features = skb->dev->features;
1117 		u16 vlan_proto = tpa_info->metadata >>
1118 			RX_CMP_FLAGS2_METADATA_TPID_SFT;
1119 
1120 		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1121 		     vlan_proto == ETH_P_8021Q) ||
1122 		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1123 		     vlan_proto == ETH_P_8021AD)) {
1124 			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1125 					       tpa_info->metadata &
1126 					       RX_CMP_FLAGS2_METADATA_VID_MASK);
1127 		}
1128 	}
1129 
1130 	skb_checksum_none_assert(skb);
1131 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1132 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1133 		skb->csum_level =
1134 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1135 	}
1136 
1137 	if (TPA_END_GRO(tpa_end))
1138 		skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
1139 
1140 	return skb;
1141 }
1142 
1143 /* returns the following:
1144  * 1       - 1 packet successfully received
1145  * 0       - successful TPA_START, packet not completed yet
1146  * -EBUSY  - completion ring does not have all the agg buffers yet
1147  * -ENOMEM - packet aborted due to out of memory
1148  * -EIO    - packet aborted due to hw error indicated in BD
1149  */
1150 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1151 		       bool *agg_event)
1152 {
1153 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1154 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1155 	struct net_device *dev = bp->dev;
1156 	struct rx_cmp *rxcmp;
1157 	struct rx_cmp_ext *rxcmp1;
1158 	u32 tmp_raw_cons = *raw_cons;
1159 	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1160 	struct bnxt_sw_rx_bd *rx_buf;
1161 	unsigned int len;
1162 	u8 *data, agg_bufs, cmp_type;
1163 	dma_addr_t dma_addr;
1164 	struct sk_buff *skb;
1165 	int rc = 0;
1166 
1167 	rxcmp = (struct rx_cmp *)
1168 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1169 
1170 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1171 	cp_cons = RING_CMP(tmp_raw_cons);
1172 	rxcmp1 = (struct rx_cmp_ext *)
1173 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1174 
1175 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1176 		return -EBUSY;
1177 
1178 	cmp_type = RX_CMP_TYPE(rxcmp);
1179 
1180 	prod = rxr->rx_prod;
1181 
1182 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1183 		bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1184 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
1185 
1186 		goto next_rx_no_prod;
1187 
1188 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1189 		skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1190 				   (struct rx_tpa_end_cmp *)rxcmp,
1191 				   (struct rx_tpa_end_cmp_ext *)rxcmp1,
1192 				   agg_event);
1193 
1194 		if (unlikely(IS_ERR(skb)))
1195 			return -EBUSY;
1196 
1197 		rc = -ENOMEM;
1198 		if (likely(skb)) {
1199 			skb_record_rx_queue(skb, bnapi->index);
1200 			skb_mark_napi_id(skb, &bnapi->napi);
1201 			if (bnxt_busy_polling(bnapi))
1202 				netif_receive_skb(skb);
1203 			else
1204 				napi_gro_receive(&bnapi->napi, skb);
1205 			rc = 1;
1206 		}
1207 		goto next_rx_no_prod;
1208 	}
1209 
1210 	cons = rxcmp->rx_cmp_opaque;
1211 	rx_buf = &rxr->rx_buf_ring[cons];
1212 	data = rx_buf->data;
1213 	if (unlikely(cons != rxr->rx_next_cons)) {
1214 		int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1215 
1216 		bnxt_sched_reset(bp, rxr);
1217 		return rc1;
1218 	}
1219 	prefetch(data);
1220 
1221 	agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1222 				RX_CMP_AGG_BUFS_SHIFT;
1223 
1224 	if (agg_bufs) {
1225 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1226 			return -EBUSY;
1227 
1228 		cp_cons = NEXT_CMP(cp_cons);
1229 		*agg_event = true;
1230 	}
1231 
1232 	rx_buf->data = NULL;
1233 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1234 		bnxt_reuse_rx_data(rxr, cons, data);
1235 		if (agg_bufs)
1236 			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1237 
1238 		rc = -EIO;
1239 		goto next_rx;
1240 	}
1241 
1242 	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1243 	dma_addr = dma_unmap_addr(rx_buf, mapping);
1244 
1245 	if (len <= bp->rx_copy_thresh) {
1246 		skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1247 		bnxt_reuse_rx_data(rxr, cons, data);
1248 		if (!skb) {
1249 			rc = -ENOMEM;
1250 			goto next_rx;
1251 		}
1252 	} else {
1253 		skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1254 		if (!skb) {
1255 			rc = -ENOMEM;
1256 			goto next_rx;
1257 		}
1258 	}
1259 
1260 	if (agg_bufs) {
1261 		skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1262 		if (!skb) {
1263 			rc = -ENOMEM;
1264 			goto next_rx;
1265 		}
1266 	}
1267 
1268 	if (RX_CMP_HASH_VALID(rxcmp)) {
1269 		u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1270 		enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1271 
1272 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1273 		if (hash_type != 1 && hash_type != 3)
1274 			type = PKT_HASH_TYPE_L3;
1275 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1276 	}
1277 
1278 	skb->protocol = eth_type_trans(skb, dev);
1279 
1280 	if (rxcmp1->rx_cmp_flags2 &
1281 	    cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
1282 		netdev_features_t features = skb->dev->features;
1283 		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1284 		u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1285 
1286 		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1287 		     vlan_proto == ETH_P_8021Q) ||
1288 		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1289 		     vlan_proto == ETH_P_8021AD))
1290 			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1291 					       meta_data &
1292 					       RX_CMP_FLAGS2_METADATA_VID_MASK);
1293 	}
1294 
1295 	skb_checksum_none_assert(skb);
1296 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
1297 		if (dev->features & NETIF_F_RXCSUM) {
1298 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1299 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1300 		}
1301 	} else {
1302 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1303 			if (dev->features & NETIF_F_RXCSUM)
1304 				cpr->rx_l4_csum_errors++;
1305 		}
1306 	}
1307 
1308 	skb_record_rx_queue(skb, bnapi->index);
1309 	skb_mark_napi_id(skb, &bnapi->napi);
1310 	if (bnxt_busy_polling(bnapi))
1311 		netif_receive_skb(skb);
1312 	else
1313 		napi_gro_receive(&bnapi->napi, skb);
1314 	rc = 1;
1315 
1316 next_rx:
1317 	rxr->rx_prod = NEXT_RX(prod);
1318 	rxr->rx_next_cons = NEXT_RX(cons);
1319 
1320 next_rx_no_prod:
1321 	*raw_cons = tmp_raw_cons;
1322 
1323 	return rc;
1324 }
1325 
1326 #define BNXT_GET_EVENT_PORT(data)	\
1327 	((data) &				\
1328 	 HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1329 
1330 static int bnxt_async_event_process(struct bnxt *bp,
1331 				    struct hwrm_async_event_cmpl *cmpl)
1332 {
1333 	u16 event_id = le16_to_cpu(cmpl->event_id);
1334 
1335 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
1336 	switch (event_id) {
1337 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1338 		u32 data1 = le32_to_cpu(cmpl->event_data1);
1339 		struct bnxt_link_info *link_info = &bp->link_info;
1340 
1341 		if (BNXT_VF(bp))
1342 			goto async_event_process_exit;
1343 		if (data1 & 0x20000) {
1344 			u16 fw_speed = link_info->force_link_speed;
1345 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1346 
1347 			netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1348 				    speed);
1349 		}
1350 		/* fall thru */
1351 	}
1352 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1353 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1354 		break;
1355 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1356 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1357 		break;
1358 	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1359 		u32 data1 = le32_to_cpu(cmpl->event_data1);
1360 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
1361 
1362 		if (BNXT_VF(bp))
1363 			break;
1364 
1365 		if (bp->pf.port_id != port_id)
1366 			break;
1367 
1368 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1369 		break;
1370 	}
1371 	default:
1372 		netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1373 			   event_id);
1374 		goto async_event_process_exit;
1375 	}
1376 	schedule_work(&bp->sp_task);
1377 async_event_process_exit:
1378 	return 0;
1379 }
1380 
1381 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1382 {
1383 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1384 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1385 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1386 				(struct hwrm_fwd_req_cmpl *)txcmp;
1387 
1388 	switch (cmpl_type) {
1389 	case CMPL_BASE_TYPE_HWRM_DONE:
1390 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
1391 		if (seq_id == bp->hwrm_intr_seq_id)
1392 			bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1393 		else
1394 			netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1395 		break;
1396 
1397 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1398 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1399 
1400 		if ((vf_id < bp->pf.first_vf_id) ||
1401 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1402 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1403 				   vf_id);
1404 			return -EINVAL;
1405 		}
1406 
1407 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1408 		set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1409 		schedule_work(&bp->sp_task);
1410 		break;
1411 
1412 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1413 		bnxt_async_event_process(bp,
1414 					 (struct hwrm_async_event_cmpl *)txcmp);
1415 
1416 	default:
1417 		break;
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1424 {
1425 	struct bnxt_napi *bnapi = dev_instance;
1426 	struct bnxt *bp = bnapi->bp;
1427 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1428 	u32 cons = RING_CMP(cpr->cp_raw_cons);
1429 
1430 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1431 	napi_schedule(&bnapi->napi);
1432 	return IRQ_HANDLED;
1433 }
1434 
1435 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1436 {
1437 	u32 raw_cons = cpr->cp_raw_cons;
1438 	u16 cons = RING_CMP(raw_cons);
1439 	struct tx_cmp *txcmp;
1440 
1441 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1442 
1443 	return TX_CMP_VALID(txcmp, raw_cons);
1444 }
1445 
1446 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1447 {
1448 	struct bnxt_napi *bnapi = dev_instance;
1449 	struct bnxt *bp = bnapi->bp;
1450 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1451 	u32 cons = RING_CMP(cpr->cp_raw_cons);
1452 	u32 int_status;
1453 
1454 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1455 
1456 	if (!bnxt_has_work(bp, cpr)) {
1457 		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1458 		/* return if erroneous interrupt */
1459 		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1460 			return IRQ_NONE;
1461 	}
1462 
1463 	/* disable ring IRQ */
1464 	BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1465 
1466 	/* Return here if interrupt is shared and is disabled. */
1467 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
1468 		return IRQ_HANDLED;
1469 
1470 	napi_schedule(&bnapi->napi);
1471 	return IRQ_HANDLED;
1472 }
1473 
1474 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1475 {
1476 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1477 	u32 raw_cons = cpr->cp_raw_cons;
1478 	u32 cons;
1479 	int tx_pkts = 0;
1480 	int rx_pkts = 0;
1481 	bool rx_event = false;
1482 	bool agg_event = false;
1483 	struct tx_cmp *txcmp;
1484 
1485 	while (1) {
1486 		int rc;
1487 
1488 		cons = RING_CMP(raw_cons);
1489 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1490 
1491 		if (!TX_CMP_VALID(txcmp, raw_cons))
1492 			break;
1493 
1494 		/* The valid test of the entry must be done first before
1495 		 * reading any further.
1496 		 */
1497 		dma_rmb();
1498 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1499 			tx_pkts++;
1500 			/* return full budget so NAPI will complete. */
1501 			if (unlikely(tx_pkts > bp->tx_wake_thresh))
1502 				rx_pkts = budget;
1503 		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1504 			rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1505 			if (likely(rc >= 0))
1506 				rx_pkts += rc;
1507 			else if (rc == -EBUSY)	/* partial completion */
1508 				break;
1509 			rx_event = true;
1510 		} else if (unlikely((TX_CMP_TYPE(txcmp) ==
1511 				     CMPL_BASE_TYPE_HWRM_DONE) ||
1512 				    (TX_CMP_TYPE(txcmp) ==
1513 				     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1514 				    (TX_CMP_TYPE(txcmp) ==
1515 				     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1516 			bnxt_hwrm_handler(bp, txcmp);
1517 		}
1518 		raw_cons = NEXT_RAW_CMP(raw_cons);
1519 
1520 		if (rx_pkts == budget)
1521 			break;
1522 	}
1523 
1524 	cpr->cp_raw_cons = raw_cons;
1525 	/* ACK completion ring before freeing tx ring and producing new
1526 	 * buffers in rx/agg rings to prevent overflowing the completion
1527 	 * ring.
1528 	 */
1529 	BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1530 
1531 	if (tx_pkts)
1532 		bnxt_tx_int(bp, bnapi, tx_pkts);
1533 
1534 	if (rx_event) {
1535 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1536 
1537 		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1538 		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1539 		if (agg_event) {
1540 			writel(DB_KEY_RX | rxr->rx_agg_prod,
1541 			       rxr->rx_agg_doorbell);
1542 			writel(DB_KEY_RX | rxr->rx_agg_prod,
1543 			       rxr->rx_agg_doorbell);
1544 		}
1545 	}
1546 	return rx_pkts;
1547 }
1548 
1549 static int bnxt_poll(struct napi_struct *napi, int budget)
1550 {
1551 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1552 	struct bnxt *bp = bnapi->bp;
1553 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1554 	int work_done = 0;
1555 
1556 	if (!bnxt_lock_napi(bnapi))
1557 		return budget;
1558 
1559 	while (1) {
1560 		work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1561 
1562 		if (work_done >= budget)
1563 			break;
1564 
1565 		if (!bnxt_has_work(bp, cpr)) {
1566 			napi_complete(napi);
1567 			BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1568 			break;
1569 		}
1570 	}
1571 	mmiowb();
1572 	bnxt_unlock_napi(bnapi);
1573 	return work_done;
1574 }
1575 
1576 #ifdef CONFIG_NET_RX_BUSY_POLL
1577 static int bnxt_busy_poll(struct napi_struct *napi)
1578 {
1579 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1580 	struct bnxt *bp = bnapi->bp;
1581 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1582 	int rx_work, budget = 4;
1583 
1584 	if (atomic_read(&bp->intr_sem) != 0)
1585 		return LL_FLUSH_FAILED;
1586 
1587 	if (!bnxt_lock_poll(bnapi))
1588 		return LL_FLUSH_BUSY;
1589 
1590 	rx_work = bnxt_poll_work(bp, bnapi, budget);
1591 
1592 	BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1593 
1594 	bnxt_unlock_poll(bnapi);
1595 	return rx_work;
1596 }
1597 #endif
1598 
1599 static void bnxt_free_tx_skbs(struct bnxt *bp)
1600 {
1601 	int i, max_idx;
1602 	struct pci_dev *pdev = bp->pdev;
1603 
1604 	if (!bp->tx_ring)
1605 		return;
1606 
1607 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1608 	for (i = 0; i < bp->tx_nr_rings; i++) {
1609 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1610 		int j;
1611 
1612 		for (j = 0; j < max_idx;) {
1613 			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1614 			struct sk_buff *skb = tx_buf->skb;
1615 			int k, last;
1616 
1617 			if (!skb) {
1618 				j++;
1619 				continue;
1620 			}
1621 
1622 			tx_buf->skb = NULL;
1623 
1624 			if (tx_buf->is_push) {
1625 				dev_kfree_skb(skb);
1626 				j += 2;
1627 				continue;
1628 			}
1629 
1630 			dma_unmap_single(&pdev->dev,
1631 					 dma_unmap_addr(tx_buf, mapping),
1632 					 skb_headlen(skb),
1633 					 PCI_DMA_TODEVICE);
1634 
1635 			last = tx_buf->nr_frags;
1636 			j += 2;
1637 			for (k = 0; k < last; k++, j++) {
1638 				int ring_idx = j & bp->tx_ring_mask;
1639 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1640 
1641 				tx_buf = &txr->tx_buf_ring[ring_idx];
1642 				dma_unmap_page(
1643 					&pdev->dev,
1644 					dma_unmap_addr(tx_buf, mapping),
1645 					skb_frag_size(frag), PCI_DMA_TODEVICE);
1646 			}
1647 			dev_kfree_skb(skb);
1648 		}
1649 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1650 	}
1651 }
1652 
1653 static void bnxt_free_rx_skbs(struct bnxt *bp)
1654 {
1655 	int i, max_idx, max_agg_idx;
1656 	struct pci_dev *pdev = bp->pdev;
1657 
1658 	if (!bp->rx_ring)
1659 		return;
1660 
1661 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1662 	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1663 	for (i = 0; i < bp->rx_nr_rings; i++) {
1664 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1665 		int j;
1666 
1667 		if (rxr->rx_tpa) {
1668 			for (j = 0; j < MAX_TPA; j++) {
1669 				struct bnxt_tpa_info *tpa_info =
1670 							&rxr->rx_tpa[j];
1671 				u8 *data = tpa_info->data;
1672 
1673 				if (!data)
1674 					continue;
1675 
1676 				dma_unmap_single(
1677 					&pdev->dev,
1678 					dma_unmap_addr(tpa_info, mapping),
1679 					bp->rx_buf_use_size,
1680 					PCI_DMA_FROMDEVICE);
1681 
1682 				tpa_info->data = NULL;
1683 
1684 				kfree(data);
1685 			}
1686 		}
1687 
1688 		for (j = 0; j < max_idx; j++) {
1689 			struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1690 			u8 *data = rx_buf->data;
1691 
1692 			if (!data)
1693 				continue;
1694 
1695 			dma_unmap_single(&pdev->dev,
1696 					 dma_unmap_addr(rx_buf, mapping),
1697 					 bp->rx_buf_use_size,
1698 					 PCI_DMA_FROMDEVICE);
1699 
1700 			rx_buf->data = NULL;
1701 
1702 			kfree(data);
1703 		}
1704 
1705 		for (j = 0; j < max_agg_idx; j++) {
1706 			struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1707 				&rxr->rx_agg_ring[j];
1708 			struct page *page = rx_agg_buf->page;
1709 
1710 			if (!page)
1711 				continue;
1712 
1713 			dma_unmap_page(&pdev->dev,
1714 				       dma_unmap_addr(rx_agg_buf, mapping),
1715 				       BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
1716 
1717 			rx_agg_buf->page = NULL;
1718 			__clear_bit(j, rxr->rx_agg_bmap);
1719 
1720 			__free_page(page);
1721 		}
1722 		if (rxr->rx_page) {
1723 			__free_page(rxr->rx_page);
1724 			rxr->rx_page = NULL;
1725 		}
1726 	}
1727 }
1728 
1729 static void bnxt_free_skbs(struct bnxt *bp)
1730 {
1731 	bnxt_free_tx_skbs(bp);
1732 	bnxt_free_rx_skbs(bp);
1733 }
1734 
1735 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1736 {
1737 	struct pci_dev *pdev = bp->pdev;
1738 	int i;
1739 
1740 	for (i = 0; i < ring->nr_pages; i++) {
1741 		if (!ring->pg_arr[i])
1742 			continue;
1743 
1744 		dma_free_coherent(&pdev->dev, ring->page_size,
1745 				  ring->pg_arr[i], ring->dma_arr[i]);
1746 
1747 		ring->pg_arr[i] = NULL;
1748 	}
1749 	if (ring->pg_tbl) {
1750 		dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1751 				  ring->pg_tbl, ring->pg_tbl_map);
1752 		ring->pg_tbl = NULL;
1753 	}
1754 	if (ring->vmem_size && *ring->vmem) {
1755 		vfree(*ring->vmem);
1756 		*ring->vmem = NULL;
1757 	}
1758 }
1759 
1760 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1761 {
1762 	int i;
1763 	struct pci_dev *pdev = bp->pdev;
1764 
1765 	if (ring->nr_pages > 1) {
1766 		ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1767 						  ring->nr_pages * 8,
1768 						  &ring->pg_tbl_map,
1769 						  GFP_KERNEL);
1770 		if (!ring->pg_tbl)
1771 			return -ENOMEM;
1772 	}
1773 
1774 	for (i = 0; i < ring->nr_pages; i++) {
1775 		ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1776 						     ring->page_size,
1777 						     &ring->dma_arr[i],
1778 						     GFP_KERNEL);
1779 		if (!ring->pg_arr[i])
1780 			return -ENOMEM;
1781 
1782 		if (ring->nr_pages > 1)
1783 			ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1784 	}
1785 
1786 	if (ring->vmem_size) {
1787 		*ring->vmem = vzalloc(ring->vmem_size);
1788 		if (!(*ring->vmem))
1789 			return -ENOMEM;
1790 	}
1791 	return 0;
1792 }
1793 
1794 static void bnxt_free_rx_rings(struct bnxt *bp)
1795 {
1796 	int i;
1797 
1798 	if (!bp->rx_ring)
1799 		return;
1800 
1801 	for (i = 0; i < bp->rx_nr_rings; i++) {
1802 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1803 		struct bnxt_ring_struct *ring;
1804 
1805 		kfree(rxr->rx_tpa);
1806 		rxr->rx_tpa = NULL;
1807 
1808 		kfree(rxr->rx_agg_bmap);
1809 		rxr->rx_agg_bmap = NULL;
1810 
1811 		ring = &rxr->rx_ring_struct;
1812 		bnxt_free_ring(bp, ring);
1813 
1814 		ring = &rxr->rx_agg_ring_struct;
1815 		bnxt_free_ring(bp, ring);
1816 	}
1817 }
1818 
1819 static int bnxt_alloc_rx_rings(struct bnxt *bp)
1820 {
1821 	int i, rc, agg_rings = 0, tpa_rings = 0;
1822 
1823 	if (!bp->rx_ring)
1824 		return -ENOMEM;
1825 
1826 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
1827 		agg_rings = 1;
1828 
1829 	if (bp->flags & BNXT_FLAG_TPA)
1830 		tpa_rings = 1;
1831 
1832 	for (i = 0; i < bp->rx_nr_rings; i++) {
1833 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
1834 		struct bnxt_ring_struct *ring;
1835 
1836 		ring = &rxr->rx_ring_struct;
1837 
1838 		rc = bnxt_alloc_ring(bp, ring);
1839 		if (rc)
1840 			return rc;
1841 
1842 		if (agg_rings) {
1843 			u16 mem_size;
1844 
1845 			ring = &rxr->rx_agg_ring_struct;
1846 			rc = bnxt_alloc_ring(bp, ring);
1847 			if (rc)
1848 				return rc;
1849 
1850 			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
1851 			mem_size = rxr->rx_agg_bmap_size / 8;
1852 			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
1853 			if (!rxr->rx_agg_bmap)
1854 				return -ENOMEM;
1855 
1856 			if (tpa_rings) {
1857 				rxr->rx_tpa = kcalloc(MAX_TPA,
1858 						sizeof(struct bnxt_tpa_info),
1859 						GFP_KERNEL);
1860 				if (!rxr->rx_tpa)
1861 					return -ENOMEM;
1862 			}
1863 		}
1864 	}
1865 	return 0;
1866 }
1867 
1868 static void bnxt_free_tx_rings(struct bnxt *bp)
1869 {
1870 	int i;
1871 	struct pci_dev *pdev = bp->pdev;
1872 
1873 	if (!bp->tx_ring)
1874 		return;
1875 
1876 	for (i = 0; i < bp->tx_nr_rings; i++) {
1877 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1878 		struct bnxt_ring_struct *ring;
1879 
1880 		if (txr->tx_push) {
1881 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
1882 					  txr->tx_push, txr->tx_push_mapping);
1883 			txr->tx_push = NULL;
1884 		}
1885 
1886 		ring = &txr->tx_ring_struct;
1887 
1888 		bnxt_free_ring(bp, ring);
1889 	}
1890 }
1891 
1892 static int bnxt_alloc_tx_rings(struct bnxt *bp)
1893 {
1894 	int i, j, rc;
1895 	struct pci_dev *pdev = bp->pdev;
1896 
1897 	bp->tx_push_size = 0;
1898 	if (bp->tx_push_thresh) {
1899 		int push_size;
1900 
1901 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1902 					bp->tx_push_thresh);
1903 
1904 		if (push_size > 256) {
1905 			push_size = 0;
1906 			bp->tx_push_thresh = 0;
1907 		}
1908 
1909 		bp->tx_push_size = push_size;
1910 	}
1911 
1912 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
1913 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1914 		struct bnxt_ring_struct *ring;
1915 
1916 		ring = &txr->tx_ring_struct;
1917 
1918 		rc = bnxt_alloc_ring(bp, ring);
1919 		if (rc)
1920 			return rc;
1921 
1922 		if (bp->tx_push_size) {
1923 			dma_addr_t mapping;
1924 
1925 			/* One pre-allocated DMA buffer to backup
1926 			 * TX push operation
1927 			 */
1928 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
1929 						bp->tx_push_size,
1930 						&txr->tx_push_mapping,
1931 						GFP_KERNEL);
1932 
1933 			if (!txr->tx_push)
1934 				return -ENOMEM;
1935 
1936 			mapping = txr->tx_push_mapping +
1937 				sizeof(struct tx_push_bd);
1938 			txr->data_mapping = cpu_to_le64(mapping);
1939 
1940 			memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
1941 		}
1942 		ring->queue_id = bp->q_info[j].queue_id;
1943 		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
1944 			j++;
1945 	}
1946 	return 0;
1947 }
1948 
1949 static void bnxt_free_cp_rings(struct bnxt *bp)
1950 {
1951 	int i;
1952 
1953 	if (!bp->bnapi)
1954 		return;
1955 
1956 	for (i = 0; i < bp->cp_nr_rings; i++) {
1957 		struct bnxt_napi *bnapi = bp->bnapi[i];
1958 		struct bnxt_cp_ring_info *cpr;
1959 		struct bnxt_ring_struct *ring;
1960 
1961 		if (!bnapi)
1962 			continue;
1963 
1964 		cpr = &bnapi->cp_ring;
1965 		ring = &cpr->cp_ring_struct;
1966 
1967 		bnxt_free_ring(bp, ring);
1968 	}
1969 }
1970 
1971 static int bnxt_alloc_cp_rings(struct bnxt *bp)
1972 {
1973 	int i, rc;
1974 
1975 	for (i = 0; i < bp->cp_nr_rings; i++) {
1976 		struct bnxt_napi *bnapi = bp->bnapi[i];
1977 		struct bnxt_cp_ring_info *cpr;
1978 		struct bnxt_ring_struct *ring;
1979 
1980 		if (!bnapi)
1981 			continue;
1982 
1983 		cpr = &bnapi->cp_ring;
1984 		ring = &cpr->cp_ring_struct;
1985 
1986 		rc = bnxt_alloc_ring(bp, ring);
1987 		if (rc)
1988 			return rc;
1989 	}
1990 	return 0;
1991 }
1992 
1993 static void bnxt_init_ring_struct(struct bnxt *bp)
1994 {
1995 	int i;
1996 
1997 	for (i = 0; i < bp->cp_nr_rings; i++) {
1998 		struct bnxt_napi *bnapi = bp->bnapi[i];
1999 		struct bnxt_cp_ring_info *cpr;
2000 		struct bnxt_rx_ring_info *rxr;
2001 		struct bnxt_tx_ring_info *txr;
2002 		struct bnxt_ring_struct *ring;
2003 
2004 		if (!bnapi)
2005 			continue;
2006 
2007 		cpr = &bnapi->cp_ring;
2008 		ring = &cpr->cp_ring_struct;
2009 		ring->nr_pages = bp->cp_nr_pages;
2010 		ring->page_size = HW_CMPD_RING_SIZE;
2011 		ring->pg_arr = (void **)cpr->cp_desc_ring;
2012 		ring->dma_arr = cpr->cp_desc_mapping;
2013 		ring->vmem_size = 0;
2014 
2015 		rxr = bnapi->rx_ring;
2016 		if (!rxr)
2017 			goto skip_rx;
2018 
2019 		ring = &rxr->rx_ring_struct;
2020 		ring->nr_pages = bp->rx_nr_pages;
2021 		ring->page_size = HW_RXBD_RING_SIZE;
2022 		ring->pg_arr = (void **)rxr->rx_desc_ring;
2023 		ring->dma_arr = rxr->rx_desc_mapping;
2024 		ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2025 		ring->vmem = (void **)&rxr->rx_buf_ring;
2026 
2027 		ring = &rxr->rx_agg_ring_struct;
2028 		ring->nr_pages = bp->rx_agg_nr_pages;
2029 		ring->page_size = HW_RXBD_RING_SIZE;
2030 		ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2031 		ring->dma_arr = rxr->rx_agg_desc_mapping;
2032 		ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2033 		ring->vmem = (void **)&rxr->rx_agg_ring;
2034 
2035 skip_rx:
2036 		txr = bnapi->tx_ring;
2037 		if (!txr)
2038 			continue;
2039 
2040 		ring = &txr->tx_ring_struct;
2041 		ring->nr_pages = bp->tx_nr_pages;
2042 		ring->page_size = HW_RXBD_RING_SIZE;
2043 		ring->pg_arr = (void **)txr->tx_desc_ring;
2044 		ring->dma_arr = txr->tx_desc_mapping;
2045 		ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2046 		ring->vmem = (void **)&txr->tx_buf_ring;
2047 	}
2048 }
2049 
2050 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2051 {
2052 	int i;
2053 	u32 prod;
2054 	struct rx_bd **rx_buf_ring;
2055 
2056 	rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2057 	for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2058 		int j;
2059 		struct rx_bd *rxbd;
2060 
2061 		rxbd = rx_buf_ring[i];
2062 		if (!rxbd)
2063 			continue;
2064 
2065 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2066 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2067 			rxbd->rx_bd_opaque = prod;
2068 		}
2069 	}
2070 }
2071 
2072 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2073 {
2074 	struct net_device *dev = bp->dev;
2075 	struct bnxt_rx_ring_info *rxr;
2076 	struct bnxt_ring_struct *ring;
2077 	u32 prod, type;
2078 	int i;
2079 
2080 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2081 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2082 
2083 	if (NET_IP_ALIGN == 2)
2084 		type |= RX_BD_FLAGS_SOP;
2085 
2086 	rxr = &bp->rx_ring[ring_nr];
2087 	ring = &rxr->rx_ring_struct;
2088 	bnxt_init_rxbd_pages(ring, type);
2089 
2090 	prod = rxr->rx_prod;
2091 	for (i = 0; i < bp->rx_ring_size; i++) {
2092 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2093 			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2094 				    ring_nr, i, bp->rx_ring_size);
2095 			break;
2096 		}
2097 		prod = NEXT_RX(prod);
2098 	}
2099 	rxr->rx_prod = prod;
2100 	ring->fw_ring_id = INVALID_HW_RING_ID;
2101 
2102 	ring = &rxr->rx_agg_ring_struct;
2103 	ring->fw_ring_id = INVALID_HW_RING_ID;
2104 
2105 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2106 		return 0;
2107 
2108 	type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2109 		RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2110 
2111 	bnxt_init_rxbd_pages(ring, type);
2112 
2113 	prod = rxr->rx_agg_prod;
2114 	for (i = 0; i < bp->rx_agg_ring_size; i++) {
2115 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2116 			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2117 				    ring_nr, i, bp->rx_ring_size);
2118 			break;
2119 		}
2120 		prod = NEXT_RX_AGG(prod);
2121 	}
2122 	rxr->rx_agg_prod = prod;
2123 
2124 	if (bp->flags & BNXT_FLAG_TPA) {
2125 		if (rxr->rx_tpa) {
2126 			u8 *data;
2127 			dma_addr_t mapping;
2128 
2129 			for (i = 0; i < MAX_TPA; i++) {
2130 				data = __bnxt_alloc_rx_data(bp, &mapping,
2131 							    GFP_KERNEL);
2132 				if (!data)
2133 					return -ENOMEM;
2134 
2135 				rxr->rx_tpa[i].data = data;
2136 				rxr->rx_tpa[i].mapping = mapping;
2137 			}
2138 		} else {
2139 			netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2140 			return -ENOMEM;
2141 		}
2142 	}
2143 
2144 	return 0;
2145 }
2146 
2147 static int bnxt_init_rx_rings(struct bnxt *bp)
2148 {
2149 	int i, rc = 0;
2150 
2151 	for (i = 0; i < bp->rx_nr_rings; i++) {
2152 		rc = bnxt_init_one_rx_ring(bp, i);
2153 		if (rc)
2154 			break;
2155 	}
2156 
2157 	return rc;
2158 }
2159 
2160 static int bnxt_init_tx_rings(struct bnxt *bp)
2161 {
2162 	u16 i;
2163 
2164 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2165 				   MAX_SKB_FRAGS + 1);
2166 
2167 	for (i = 0; i < bp->tx_nr_rings; i++) {
2168 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2169 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2170 
2171 		ring->fw_ring_id = INVALID_HW_RING_ID;
2172 	}
2173 
2174 	return 0;
2175 }
2176 
2177 static void bnxt_free_ring_grps(struct bnxt *bp)
2178 {
2179 	kfree(bp->grp_info);
2180 	bp->grp_info = NULL;
2181 }
2182 
2183 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2184 {
2185 	int i;
2186 
2187 	if (irq_re_init) {
2188 		bp->grp_info = kcalloc(bp->cp_nr_rings,
2189 				       sizeof(struct bnxt_ring_grp_info),
2190 				       GFP_KERNEL);
2191 		if (!bp->grp_info)
2192 			return -ENOMEM;
2193 	}
2194 	for (i = 0; i < bp->cp_nr_rings; i++) {
2195 		if (irq_re_init)
2196 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2197 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2198 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2199 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2200 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2201 	}
2202 	return 0;
2203 }
2204 
2205 static void bnxt_free_vnics(struct bnxt *bp)
2206 {
2207 	kfree(bp->vnic_info);
2208 	bp->vnic_info = NULL;
2209 	bp->nr_vnics = 0;
2210 }
2211 
2212 static int bnxt_alloc_vnics(struct bnxt *bp)
2213 {
2214 	int num_vnics = 1;
2215 
2216 #ifdef CONFIG_RFS_ACCEL
2217 	if (bp->flags & BNXT_FLAG_RFS)
2218 		num_vnics += bp->rx_nr_rings;
2219 #endif
2220 
2221 	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2222 				GFP_KERNEL);
2223 	if (!bp->vnic_info)
2224 		return -ENOMEM;
2225 
2226 	bp->nr_vnics = num_vnics;
2227 	return 0;
2228 }
2229 
2230 static void bnxt_init_vnics(struct bnxt *bp)
2231 {
2232 	int i;
2233 
2234 	for (i = 0; i < bp->nr_vnics; i++) {
2235 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2236 
2237 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
2238 		vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
2239 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2240 
2241 		if (bp->vnic_info[i].rss_hash_key) {
2242 			if (i == 0)
2243 				prandom_bytes(vnic->rss_hash_key,
2244 					      HW_HASH_KEY_SIZE);
2245 			else
2246 				memcpy(vnic->rss_hash_key,
2247 				       bp->vnic_info[0].rss_hash_key,
2248 				       HW_HASH_KEY_SIZE);
2249 		}
2250 	}
2251 }
2252 
2253 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2254 {
2255 	int pages;
2256 
2257 	pages = ring_size / desc_per_pg;
2258 
2259 	if (!pages)
2260 		return 1;
2261 
2262 	pages++;
2263 
2264 	while (pages & (pages - 1))
2265 		pages++;
2266 
2267 	return pages;
2268 }
2269 
2270 static void bnxt_set_tpa_flags(struct bnxt *bp)
2271 {
2272 	bp->flags &= ~BNXT_FLAG_TPA;
2273 	if (bp->dev->features & NETIF_F_LRO)
2274 		bp->flags |= BNXT_FLAG_LRO;
2275 	if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
2276 		bp->flags |= BNXT_FLAG_GRO;
2277 }
2278 
2279 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2280  * be set on entry.
2281  */
2282 void bnxt_set_ring_params(struct bnxt *bp)
2283 {
2284 	u32 ring_size, rx_size, rx_space;
2285 	u32 agg_factor = 0, agg_ring_size = 0;
2286 
2287 	/* 8 for CRC and VLAN */
2288 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2289 
2290 	rx_space = rx_size + NET_SKB_PAD +
2291 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2292 
2293 	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2294 	ring_size = bp->rx_ring_size;
2295 	bp->rx_agg_ring_size = 0;
2296 	bp->rx_agg_nr_pages = 0;
2297 
2298 	if (bp->flags & BNXT_FLAG_TPA)
2299 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2300 
2301 	bp->flags &= ~BNXT_FLAG_JUMBO;
2302 	if (rx_space > PAGE_SIZE) {
2303 		u32 jumbo_factor;
2304 
2305 		bp->flags |= BNXT_FLAG_JUMBO;
2306 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2307 		if (jumbo_factor > agg_factor)
2308 			agg_factor = jumbo_factor;
2309 	}
2310 	agg_ring_size = ring_size * agg_factor;
2311 
2312 	if (agg_ring_size) {
2313 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2314 							RX_DESC_CNT);
2315 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2316 			u32 tmp = agg_ring_size;
2317 
2318 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2319 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2320 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2321 				    tmp, agg_ring_size);
2322 		}
2323 		bp->rx_agg_ring_size = agg_ring_size;
2324 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2325 		rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2326 		rx_space = rx_size + NET_SKB_PAD +
2327 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2328 	}
2329 
2330 	bp->rx_buf_use_size = rx_size;
2331 	bp->rx_buf_size = rx_space;
2332 
2333 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2334 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2335 
2336 	ring_size = bp->tx_ring_size;
2337 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2338 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2339 
2340 	ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2341 	bp->cp_ring_size = ring_size;
2342 
2343 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2344 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
2345 		bp->cp_nr_pages = MAX_CP_PAGES;
2346 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2347 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2348 			    ring_size, bp->cp_ring_size);
2349 	}
2350 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2351 	bp->cp_ring_mask = bp->cp_bit - 1;
2352 }
2353 
2354 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2355 {
2356 	int i;
2357 	struct bnxt_vnic_info *vnic;
2358 	struct pci_dev *pdev = bp->pdev;
2359 
2360 	if (!bp->vnic_info)
2361 		return;
2362 
2363 	for (i = 0; i < bp->nr_vnics; i++) {
2364 		vnic = &bp->vnic_info[i];
2365 
2366 		kfree(vnic->fw_grp_ids);
2367 		vnic->fw_grp_ids = NULL;
2368 
2369 		kfree(vnic->uc_list);
2370 		vnic->uc_list = NULL;
2371 
2372 		if (vnic->mc_list) {
2373 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2374 					  vnic->mc_list, vnic->mc_list_mapping);
2375 			vnic->mc_list = NULL;
2376 		}
2377 
2378 		if (vnic->rss_table) {
2379 			dma_free_coherent(&pdev->dev, PAGE_SIZE,
2380 					  vnic->rss_table,
2381 					  vnic->rss_table_dma_addr);
2382 			vnic->rss_table = NULL;
2383 		}
2384 
2385 		vnic->rss_hash_key = NULL;
2386 		vnic->flags = 0;
2387 	}
2388 }
2389 
2390 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2391 {
2392 	int i, rc = 0, size;
2393 	struct bnxt_vnic_info *vnic;
2394 	struct pci_dev *pdev = bp->pdev;
2395 	int max_rings;
2396 
2397 	for (i = 0; i < bp->nr_vnics; i++) {
2398 		vnic = &bp->vnic_info[i];
2399 
2400 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2401 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2402 
2403 			if (mem_size > 0) {
2404 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2405 				if (!vnic->uc_list) {
2406 					rc = -ENOMEM;
2407 					goto out;
2408 				}
2409 			}
2410 		}
2411 
2412 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2413 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2414 			vnic->mc_list =
2415 				dma_alloc_coherent(&pdev->dev,
2416 						   vnic->mc_list_size,
2417 						   &vnic->mc_list_mapping,
2418 						   GFP_KERNEL);
2419 			if (!vnic->mc_list) {
2420 				rc = -ENOMEM;
2421 				goto out;
2422 			}
2423 		}
2424 
2425 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2426 			max_rings = bp->rx_nr_rings;
2427 		else
2428 			max_rings = 1;
2429 
2430 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2431 		if (!vnic->fw_grp_ids) {
2432 			rc = -ENOMEM;
2433 			goto out;
2434 		}
2435 
2436 		/* Allocate rss table and hash key */
2437 		vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2438 						     &vnic->rss_table_dma_addr,
2439 						     GFP_KERNEL);
2440 		if (!vnic->rss_table) {
2441 			rc = -ENOMEM;
2442 			goto out;
2443 		}
2444 
2445 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2446 
2447 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2448 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2449 	}
2450 	return 0;
2451 
2452 out:
2453 	return rc;
2454 }
2455 
2456 static void bnxt_free_hwrm_resources(struct bnxt *bp)
2457 {
2458 	struct pci_dev *pdev = bp->pdev;
2459 
2460 	dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2461 			  bp->hwrm_cmd_resp_dma_addr);
2462 
2463 	bp->hwrm_cmd_resp_addr = NULL;
2464 	if (bp->hwrm_dbg_resp_addr) {
2465 		dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2466 				  bp->hwrm_dbg_resp_addr,
2467 				  bp->hwrm_dbg_resp_dma_addr);
2468 
2469 		bp->hwrm_dbg_resp_addr = NULL;
2470 	}
2471 }
2472 
2473 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2474 {
2475 	struct pci_dev *pdev = bp->pdev;
2476 
2477 	bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2478 						   &bp->hwrm_cmd_resp_dma_addr,
2479 						   GFP_KERNEL);
2480 	if (!bp->hwrm_cmd_resp_addr)
2481 		return -ENOMEM;
2482 	bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2483 						    HWRM_DBG_REG_BUF_SIZE,
2484 						    &bp->hwrm_dbg_resp_dma_addr,
2485 						    GFP_KERNEL);
2486 	if (!bp->hwrm_dbg_resp_addr)
2487 		netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2488 
2489 	return 0;
2490 }
2491 
2492 static void bnxt_free_stats(struct bnxt *bp)
2493 {
2494 	u32 size, i;
2495 	struct pci_dev *pdev = bp->pdev;
2496 
2497 	if (bp->hw_rx_port_stats) {
2498 		dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2499 				  bp->hw_rx_port_stats,
2500 				  bp->hw_rx_port_stats_map);
2501 		bp->hw_rx_port_stats = NULL;
2502 		bp->flags &= ~BNXT_FLAG_PORT_STATS;
2503 	}
2504 
2505 	if (!bp->bnapi)
2506 		return;
2507 
2508 	size = sizeof(struct ctx_hw_stats);
2509 
2510 	for (i = 0; i < bp->cp_nr_rings; i++) {
2511 		struct bnxt_napi *bnapi = bp->bnapi[i];
2512 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2513 
2514 		if (cpr->hw_stats) {
2515 			dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2516 					  cpr->hw_stats_map);
2517 			cpr->hw_stats = NULL;
2518 		}
2519 	}
2520 }
2521 
2522 static int bnxt_alloc_stats(struct bnxt *bp)
2523 {
2524 	u32 size, i;
2525 	struct pci_dev *pdev = bp->pdev;
2526 
2527 	size = sizeof(struct ctx_hw_stats);
2528 
2529 	for (i = 0; i < bp->cp_nr_rings; i++) {
2530 		struct bnxt_napi *bnapi = bp->bnapi[i];
2531 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2532 
2533 		cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2534 						   &cpr->hw_stats_map,
2535 						   GFP_KERNEL);
2536 		if (!cpr->hw_stats)
2537 			return -ENOMEM;
2538 
2539 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2540 	}
2541 
2542 	if (BNXT_PF(bp)) {
2543 		bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2544 					 sizeof(struct tx_port_stats) + 1024;
2545 
2546 		bp->hw_rx_port_stats =
2547 			dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2548 					   &bp->hw_rx_port_stats_map,
2549 					   GFP_KERNEL);
2550 		if (!bp->hw_rx_port_stats)
2551 			return -ENOMEM;
2552 
2553 		bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2554 				       512;
2555 		bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2556 					   sizeof(struct rx_port_stats) + 512;
2557 		bp->flags |= BNXT_FLAG_PORT_STATS;
2558 	}
2559 	return 0;
2560 }
2561 
2562 static void bnxt_clear_ring_indices(struct bnxt *bp)
2563 {
2564 	int i;
2565 
2566 	if (!bp->bnapi)
2567 		return;
2568 
2569 	for (i = 0; i < bp->cp_nr_rings; i++) {
2570 		struct bnxt_napi *bnapi = bp->bnapi[i];
2571 		struct bnxt_cp_ring_info *cpr;
2572 		struct bnxt_rx_ring_info *rxr;
2573 		struct bnxt_tx_ring_info *txr;
2574 
2575 		if (!bnapi)
2576 			continue;
2577 
2578 		cpr = &bnapi->cp_ring;
2579 		cpr->cp_raw_cons = 0;
2580 
2581 		txr = bnapi->tx_ring;
2582 		if (txr) {
2583 			txr->tx_prod = 0;
2584 			txr->tx_cons = 0;
2585 		}
2586 
2587 		rxr = bnapi->rx_ring;
2588 		if (rxr) {
2589 			rxr->rx_prod = 0;
2590 			rxr->rx_agg_prod = 0;
2591 			rxr->rx_sw_agg_prod = 0;
2592 			rxr->rx_next_cons = 0;
2593 		}
2594 	}
2595 }
2596 
2597 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2598 {
2599 #ifdef CONFIG_RFS_ACCEL
2600 	int i;
2601 
2602 	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
2603 	 * safe to delete the hash table.
2604 	 */
2605 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2606 		struct hlist_head *head;
2607 		struct hlist_node *tmp;
2608 		struct bnxt_ntuple_filter *fltr;
2609 
2610 		head = &bp->ntp_fltr_hash_tbl[i];
2611 		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2612 			hlist_del(&fltr->hash);
2613 			kfree(fltr);
2614 		}
2615 	}
2616 	if (irq_reinit) {
2617 		kfree(bp->ntp_fltr_bmap);
2618 		bp->ntp_fltr_bmap = NULL;
2619 	}
2620 	bp->ntp_fltr_count = 0;
2621 #endif
2622 }
2623 
2624 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2625 {
2626 #ifdef CONFIG_RFS_ACCEL
2627 	int i, rc = 0;
2628 
2629 	if (!(bp->flags & BNXT_FLAG_RFS))
2630 		return 0;
2631 
2632 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2633 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2634 
2635 	bp->ntp_fltr_count = 0;
2636 	bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2637 				    GFP_KERNEL);
2638 
2639 	if (!bp->ntp_fltr_bmap)
2640 		rc = -ENOMEM;
2641 
2642 	return rc;
2643 #else
2644 	return 0;
2645 #endif
2646 }
2647 
2648 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2649 {
2650 	bnxt_free_vnic_attributes(bp);
2651 	bnxt_free_tx_rings(bp);
2652 	bnxt_free_rx_rings(bp);
2653 	bnxt_free_cp_rings(bp);
2654 	bnxt_free_ntp_fltrs(bp, irq_re_init);
2655 	if (irq_re_init) {
2656 		bnxt_free_stats(bp);
2657 		bnxt_free_ring_grps(bp);
2658 		bnxt_free_vnics(bp);
2659 		kfree(bp->tx_ring);
2660 		bp->tx_ring = NULL;
2661 		kfree(bp->rx_ring);
2662 		bp->rx_ring = NULL;
2663 		kfree(bp->bnapi);
2664 		bp->bnapi = NULL;
2665 	} else {
2666 		bnxt_clear_ring_indices(bp);
2667 	}
2668 }
2669 
2670 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2671 {
2672 	int i, j, rc, size, arr_size;
2673 	void *bnapi;
2674 
2675 	if (irq_re_init) {
2676 		/* Allocate bnapi mem pointer array and mem block for
2677 		 * all queues
2678 		 */
2679 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2680 				bp->cp_nr_rings);
2681 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2682 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2683 		if (!bnapi)
2684 			return -ENOMEM;
2685 
2686 		bp->bnapi = bnapi;
2687 		bnapi += arr_size;
2688 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2689 			bp->bnapi[i] = bnapi;
2690 			bp->bnapi[i]->index = i;
2691 			bp->bnapi[i]->bp = bp;
2692 		}
2693 
2694 		bp->rx_ring = kcalloc(bp->rx_nr_rings,
2695 				      sizeof(struct bnxt_rx_ring_info),
2696 				      GFP_KERNEL);
2697 		if (!bp->rx_ring)
2698 			return -ENOMEM;
2699 
2700 		for (i = 0; i < bp->rx_nr_rings; i++) {
2701 			bp->rx_ring[i].bnapi = bp->bnapi[i];
2702 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2703 		}
2704 
2705 		bp->tx_ring = kcalloc(bp->tx_nr_rings,
2706 				      sizeof(struct bnxt_tx_ring_info),
2707 				      GFP_KERNEL);
2708 		if (!bp->tx_ring)
2709 			return -ENOMEM;
2710 
2711 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
2712 			j = 0;
2713 		else
2714 			j = bp->rx_nr_rings;
2715 
2716 		for (i = 0; i < bp->tx_nr_rings; i++, j++) {
2717 			bp->tx_ring[i].bnapi = bp->bnapi[j];
2718 			bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
2719 		}
2720 
2721 		rc = bnxt_alloc_stats(bp);
2722 		if (rc)
2723 			goto alloc_mem_err;
2724 
2725 		rc = bnxt_alloc_ntp_fltrs(bp);
2726 		if (rc)
2727 			goto alloc_mem_err;
2728 
2729 		rc = bnxt_alloc_vnics(bp);
2730 		if (rc)
2731 			goto alloc_mem_err;
2732 	}
2733 
2734 	bnxt_init_ring_struct(bp);
2735 
2736 	rc = bnxt_alloc_rx_rings(bp);
2737 	if (rc)
2738 		goto alloc_mem_err;
2739 
2740 	rc = bnxt_alloc_tx_rings(bp);
2741 	if (rc)
2742 		goto alloc_mem_err;
2743 
2744 	rc = bnxt_alloc_cp_rings(bp);
2745 	if (rc)
2746 		goto alloc_mem_err;
2747 
2748 	bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2749 				  BNXT_VNIC_UCAST_FLAG;
2750 	rc = bnxt_alloc_vnic_attributes(bp);
2751 	if (rc)
2752 		goto alloc_mem_err;
2753 	return 0;
2754 
2755 alloc_mem_err:
2756 	bnxt_free_mem(bp, true);
2757 	return rc;
2758 }
2759 
2760 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2761 			    u16 cmpl_ring, u16 target_id)
2762 {
2763 	struct input *req = request;
2764 
2765 	req->req_type = cpu_to_le16(req_type);
2766 	req->cmpl_ring = cpu_to_le16(cmpl_ring);
2767 	req->target_id = cpu_to_le16(target_id);
2768 	req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
2769 }
2770 
2771 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
2772 				 int timeout, bool silent)
2773 {
2774 	int i, intr_process, rc, tmo_count;
2775 	struct input *req = msg;
2776 	u32 *data = msg;
2777 	__le32 *resp_len, *valid;
2778 	u16 cp_ring_id, len = 0;
2779 	struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
2780 
2781 	req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
2782 	memset(resp, 0, PAGE_SIZE);
2783 	cp_ring_id = le16_to_cpu(req->cmpl_ring);
2784 	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
2785 
2786 	/* Write request msg to hwrm channel */
2787 	__iowrite32_copy(bp->bar0, data, msg_len / 4);
2788 
2789 	for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
2790 		writel(0, bp->bar0 + i);
2791 
2792 	/* currently supports only one outstanding message */
2793 	if (intr_process)
2794 		bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
2795 
2796 	/* Ring channel doorbell */
2797 	writel(1, bp->bar0 + 0x100);
2798 
2799 	if (!timeout)
2800 		timeout = DFLT_HWRM_CMD_TIMEOUT;
2801 
2802 	i = 0;
2803 	tmo_count = timeout * 40;
2804 	if (intr_process) {
2805 		/* Wait until hwrm response cmpl interrupt is processed */
2806 		while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
2807 		       i++ < tmo_count) {
2808 			usleep_range(25, 40);
2809 		}
2810 
2811 		if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
2812 			netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
2813 				   le16_to_cpu(req->req_type));
2814 			return -1;
2815 		}
2816 	} else {
2817 		/* Check if response len is updated */
2818 		resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
2819 		for (i = 0; i < tmo_count; i++) {
2820 			len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
2821 			      HWRM_RESP_LEN_SFT;
2822 			if (len)
2823 				break;
2824 			usleep_range(25, 40);
2825 		}
2826 
2827 		if (i >= tmo_count) {
2828 			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
2829 				   timeout, le16_to_cpu(req->req_type),
2830 				   le16_to_cpu(req->seq_id), len);
2831 			return -1;
2832 		}
2833 
2834 		/* Last word of resp contains valid bit */
2835 		valid = bp->hwrm_cmd_resp_addr + len - 4;
2836 		for (i = 0; i < 5; i++) {
2837 			if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
2838 				break;
2839 			udelay(1);
2840 		}
2841 
2842 		if (i >= 5) {
2843 			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
2844 				   timeout, le16_to_cpu(req->req_type),
2845 				   le16_to_cpu(req->seq_id), len, *valid);
2846 			return -1;
2847 		}
2848 	}
2849 
2850 	rc = le16_to_cpu(resp->error_code);
2851 	if (rc && !silent)
2852 		netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
2853 			   le16_to_cpu(resp->req_type),
2854 			   le16_to_cpu(resp->seq_id), rc);
2855 	return rc;
2856 }
2857 
2858 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2859 {
2860 	return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
2861 }
2862 
2863 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2864 {
2865 	int rc;
2866 
2867 	mutex_lock(&bp->hwrm_cmd_lock);
2868 	rc = _hwrm_send_message(bp, msg, msg_len, timeout);
2869 	mutex_unlock(&bp->hwrm_cmd_lock);
2870 	return rc;
2871 }
2872 
2873 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
2874 			     int timeout)
2875 {
2876 	int rc;
2877 
2878 	mutex_lock(&bp->hwrm_cmd_lock);
2879 	rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
2880 	mutex_unlock(&bp->hwrm_cmd_lock);
2881 	return rc;
2882 }
2883 
2884 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2885 {
2886 	struct hwrm_func_drv_rgtr_input req = {0};
2887 	int i;
2888 	DECLARE_BITMAP(async_events_bmap, 256);
2889 	u32 *events = (u32 *)async_events_bmap;
2890 
2891 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
2892 
2893 	req.enables =
2894 		cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2895 			    FUNC_DRV_RGTR_REQ_ENABLES_VER |
2896 			    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
2897 
2898 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
2899 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
2900 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
2901 
2902 	for (i = 0; i < 8; i++)
2903 		req.async_event_fwd[i] |= cpu_to_le32(events[i]);
2904 
2905 	req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
2906 	req.ver_maj = DRV_VER_MAJ;
2907 	req.ver_min = DRV_VER_MIN;
2908 	req.ver_upd = DRV_VER_UPD;
2909 
2910 	if (BNXT_PF(bp)) {
2911 		DECLARE_BITMAP(vf_req_snif_bmap, 256);
2912 		u32 *data = (u32 *)vf_req_snif_bmap;
2913 
2914 		memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
2915 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2916 			__set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2917 
2918 		for (i = 0; i < 8; i++)
2919 			req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2920 
2921 		req.enables |=
2922 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2923 	}
2924 
2925 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2926 }
2927 
2928 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
2929 {
2930 	struct hwrm_func_drv_unrgtr_input req = {0};
2931 
2932 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
2933 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2934 }
2935 
2936 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
2937 {
2938 	u32 rc = 0;
2939 	struct hwrm_tunnel_dst_port_free_input req = {0};
2940 
2941 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
2942 	req.tunnel_type = tunnel_type;
2943 
2944 	switch (tunnel_type) {
2945 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
2946 		req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
2947 		break;
2948 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
2949 		req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
2950 		break;
2951 	default:
2952 		break;
2953 	}
2954 
2955 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2956 	if (rc)
2957 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
2958 			   rc);
2959 	return rc;
2960 }
2961 
2962 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
2963 					   u8 tunnel_type)
2964 {
2965 	u32 rc = 0;
2966 	struct hwrm_tunnel_dst_port_alloc_input req = {0};
2967 	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2968 
2969 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
2970 
2971 	req.tunnel_type = tunnel_type;
2972 	req.tunnel_dst_port_val = port;
2973 
2974 	mutex_lock(&bp->hwrm_cmd_lock);
2975 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2976 	if (rc) {
2977 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
2978 			   rc);
2979 		goto err_out;
2980 	}
2981 
2982 	if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
2983 		bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2984 
2985 	else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
2986 		bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
2987 err_out:
2988 	mutex_unlock(&bp->hwrm_cmd_lock);
2989 	return rc;
2990 }
2991 
2992 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
2993 {
2994 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2995 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2996 
2997 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
2998 	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
2999 
3000 	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3001 	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3002 	req.mask = cpu_to_le32(vnic->rx_mask);
3003 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3004 }
3005 
3006 #ifdef CONFIG_RFS_ACCEL
3007 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3008 					    struct bnxt_ntuple_filter *fltr)
3009 {
3010 	struct hwrm_cfa_ntuple_filter_free_input req = {0};
3011 
3012 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3013 	req.ntuple_filter_id = fltr->filter_id;
3014 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3015 }
3016 
3017 #define BNXT_NTP_FLTR_FLAGS					\
3018 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
3019 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
3020 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |	\
3021 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
3022 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
3023 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
3024 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
3025 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
3026 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
3027 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
3028 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
3029 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
3030 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
3031 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3032 
3033 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3034 					     struct bnxt_ntuple_filter *fltr)
3035 {
3036 	int rc = 0;
3037 	struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3038 	struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3039 		bp->hwrm_cmd_resp_addr;
3040 	struct flow_keys *keys = &fltr->fkeys;
3041 	struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3042 
3043 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
3044 	req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
3045 
3046 	req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3047 
3048 	req.ethertype = htons(ETH_P_IP);
3049 	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
3050 	req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
3051 	req.ip_protocol = keys->basic.ip_proto;
3052 
3053 	req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3054 	req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3055 	req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3056 	req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3057 
3058 	req.src_port = keys->ports.src;
3059 	req.src_port_mask = cpu_to_be16(0xffff);
3060 	req.dst_port = keys->ports.dst;
3061 	req.dst_port_mask = cpu_to_be16(0xffff);
3062 
3063 	req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
3064 	mutex_lock(&bp->hwrm_cmd_lock);
3065 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3066 	if (!rc)
3067 		fltr->filter_id = resp->ntuple_filter_id;
3068 	mutex_unlock(&bp->hwrm_cmd_lock);
3069 	return rc;
3070 }
3071 #endif
3072 
3073 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3074 				     u8 *mac_addr)
3075 {
3076 	u32 rc = 0;
3077 	struct hwrm_cfa_l2_filter_alloc_input req = {0};
3078 	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3079 
3080 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
3081 	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
3082 				CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
3083 	req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
3084 	req.enables =
3085 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
3086 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
3087 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3088 	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3089 	req.l2_addr_mask[0] = 0xff;
3090 	req.l2_addr_mask[1] = 0xff;
3091 	req.l2_addr_mask[2] = 0xff;
3092 	req.l2_addr_mask[3] = 0xff;
3093 	req.l2_addr_mask[4] = 0xff;
3094 	req.l2_addr_mask[5] = 0xff;
3095 
3096 	mutex_lock(&bp->hwrm_cmd_lock);
3097 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3098 	if (!rc)
3099 		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3100 							resp->l2_filter_id;
3101 	mutex_unlock(&bp->hwrm_cmd_lock);
3102 	return rc;
3103 }
3104 
3105 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3106 {
3107 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3108 	int rc = 0;
3109 
3110 	/* Any associated ntuple filters will also be cleared by firmware. */
3111 	mutex_lock(&bp->hwrm_cmd_lock);
3112 	for (i = 0; i < num_of_vnics; i++) {
3113 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3114 
3115 		for (j = 0; j < vnic->uc_filter_count; j++) {
3116 			struct hwrm_cfa_l2_filter_free_input req = {0};
3117 
3118 			bnxt_hwrm_cmd_hdr_init(bp, &req,
3119 					       HWRM_CFA_L2_FILTER_FREE, -1, -1);
3120 
3121 			req.l2_filter_id = vnic->fw_l2_filter_id[j];
3122 
3123 			rc = _hwrm_send_message(bp, &req, sizeof(req),
3124 						HWRM_CMD_TIMEOUT);
3125 		}
3126 		vnic->uc_filter_count = 0;
3127 	}
3128 	mutex_unlock(&bp->hwrm_cmd_lock);
3129 
3130 	return rc;
3131 }
3132 
3133 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3134 {
3135 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3136 	struct hwrm_vnic_tpa_cfg_input req = {0};
3137 
3138 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3139 
3140 	if (tpa_flags) {
3141 		u16 mss = bp->dev->mtu - 40;
3142 		u32 nsegs, n, segs = 0, flags;
3143 
3144 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3145 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3146 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3147 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3148 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3149 		if (tpa_flags & BNXT_FLAG_GRO)
3150 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3151 
3152 		req.flags = cpu_to_le32(flags);
3153 
3154 		req.enables =
3155 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3156 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3157 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3158 
3159 		/* Number of segs are log2 units, and first packet is not
3160 		 * included as part of this units.
3161 		 */
3162 		if (mss <= BNXT_RX_PAGE_SIZE) {
3163 			n = BNXT_RX_PAGE_SIZE / mss;
3164 			nsegs = (MAX_SKB_FRAGS - 1) * n;
3165 		} else {
3166 			n = mss / BNXT_RX_PAGE_SIZE;
3167 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
3168 				n++;
3169 			nsegs = (MAX_SKB_FRAGS - n) / n;
3170 		}
3171 
3172 		segs = ilog2(nsegs);
3173 		req.max_agg_segs = cpu_to_le16(segs);
3174 		req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
3175 
3176 		req.min_agg_len = cpu_to_le32(512);
3177 	}
3178 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3179 
3180 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3181 }
3182 
3183 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3184 {
3185 	u32 i, j, max_rings;
3186 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3187 	struct hwrm_vnic_rss_cfg_input req = {0};
3188 
3189 	if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
3190 		return 0;
3191 
3192 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3193 	if (set_rss) {
3194 		vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
3195 				 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
3196 				 BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
3197 				 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
3198 
3199 		req.hash_type = cpu_to_le32(vnic->hash_type);
3200 
3201 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3202 			max_rings = bp->rx_nr_rings;
3203 		else
3204 			max_rings = 1;
3205 
3206 		/* Fill the RSS indirection table with ring group ids */
3207 		for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3208 			if (j == max_rings)
3209 				j = 0;
3210 			vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3211 		}
3212 
3213 		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3214 		req.hash_key_tbl_addr =
3215 			cpu_to_le64(vnic->rss_hash_key_dma_addr);
3216 	}
3217 	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3218 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3219 }
3220 
3221 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3222 {
3223 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3224 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
3225 
3226 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3227 	req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3228 				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3229 				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3230 	req.enables =
3231 		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3232 			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3233 	/* thresholds not implemented in firmware yet */
3234 	req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3235 	req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3236 	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3237 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3238 }
3239 
3240 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
3241 {
3242 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3243 
3244 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3245 	req.rss_cos_lb_ctx_id =
3246 		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
3247 
3248 	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3249 	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3250 }
3251 
3252 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3253 {
3254 	int i;
3255 
3256 	for (i = 0; i < bp->nr_vnics; i++) {
3257 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3258 
3259 		if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
3260 			bnxt_hwrm_vnic_ctx_free_one(bp, i);
3261 	}
3262 	bp->rsscos_nr_ctxs = 0;
3263 }
3264 
3265 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
3266 {
3267 	int rc;
3268 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3269 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3270 						bp->hwrm_cmd_resp_addr;
3271 
3272 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3273 			       -1);
3274 
3275 	mutex_lock(&bp->hwrm_cmd_lock);
3276 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3277 	if (!rc)
3278 		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
3279 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
3280 	mutex_unlock(&bp->hwrm_cmd_lock);
3281 
3282 	return rc;
3283 }
3284 
3285 static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3286 {
3287 	unsigned int ring = 0, grp_idx;
3288 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3289 	struct hwrm_vnic_cfg_input req = {0};
3290 
3291 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3292 	/* Only RSS support for now TBD: COS & LB */
3293 	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
3294 				  VNIC_CFG_REQ_ENABLES_RSS_RULE);
3295 	req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3296 	req.cos_rule = cpu_to_le16(0xffff);
3297 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3298 		ring = 0;
3299 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3300 		ring = vnic_id - 1;
3301 
3302 	grp_idx = bp->rx_ring[ring].bnapi->index;
3303 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3304 	req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3305 
3306 	req.lb_rule = cpu_to_le16(0xffff);
3307 	req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3308 			      VLAN_HLEN);
3309 
3310 	if (bp->flags & BNXT_FLAG_STRIP_VLAN)
3311 		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3312 
3313 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3314 }
3315 
3316 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3317 {
3318 	u32 rc = 0;
3319 
3320 	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3321 		struct hwrm_vnic_free_input req = {0};
3322 
3323 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3324 		req.vnic_id =
3325 			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3326 
3327 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3328 		if (rc)
3329 			return rc;
3330 		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3331 	}
3332 	return rc;
3333 }
3334 
3335 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3336 {
3337 	u16 i;
3338 
3339 	for (i = 0; i < bp->nr_vnics; i++)
3340 		bnxt_hwrm_vnic_free_one(bp, i);
3341 }
3342 
3343 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3344 				unsigned int start_rx_ring_idx,
3345 				unsigned int nr_rings)
3346 {
3347 	int rc = 0;
3348 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
3349 	struct hwrm_vnic_alloc_input req = {0};
3350 	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3351 
3352 	/* map ring groups to this vnic */
3353 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3354 		grp_idx = bp->rx_ring[i].bnapi->index;
3355 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
3356 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3357 				   j, nr_rings);
3358 			break;
3359 		}
3360 		bp->vnic_info[vnic_id].fw_grp_ids[j] =
3361 					bp->grp_info[grp_idx].fw_grp_id;
3362 	}
3363 
3364 	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3365 	if (vnic_id == 0)
3366 		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3367 
3368 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3369 
3370 	mutex_lock(&bp->hwrm_cmd_lock);
3371 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3372 	if (!rc)
3373 		bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3374 	mutex_unlock(&bp->hwrm_cmd_lock);
3375 	return rc;
3376 }
3377 
3378 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3379 {
3380 	u16 i;
3381 	u32 rc = 0;
3382 
3383 	mutex_lock(&bp->hwrm_cmd_lock);
3384 	for (i = 0; i < bp->rx_nr_rings; i++) {
3385 		struct hwrm_ring_grp_alloc_input req = {0};
3386 		struct hwrm_ring_grp_alloc_output *resp =
3387 					bp->hwrm_cmd_resp_addr;
3388 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
3389 
3390 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3391 
3392 		req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3393 		req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3394 		req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3395 		req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
3396 
3397 		rc = _hwrm_send_message(bp, &req, sizeof(req),
3398 					HWRM_CMD_TIMEOUT);
3399 		if (rc)
3400 			break;
3401 
3402 		bp->grp_info[grp_idx].fw_grp_id =
3403 			le32_to_cpu(resp->ring_group_id);
3404 	}
3405 	mutex_unlock(&bp->hwrm_cmd_lock);
3406 	return rc;
3407 }
3408 
3409 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3410 {
3411 	u16 i;
3412 	u32 rc = 0;
3413 	struct hwrm_ring_grp_free_input req = {0};
3414 
3415 	if (!bp->grp_info)
3416 		return 0;
3417 
3418 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3419 
3420 	mutex_lock(&bp->hwrm_cmd_lock);
3421 	for (i = 0; i < bp->cp_nr_rings; i++) {
3422 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3423 			continue;
3424 		req.ring_group_id =
3425 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
3426 
3427 		rc = _hwrm_send_message(bp, &req, sizeof(req),
3428 					HWRM_CMD_TIMEOUT);
3429 		if (rc)
3430 			break;
3431 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3432 	}
3433 	mutex_unlock(&bp->hwrm_cmd_lock);
3434 	return rc;
3435 }
3436 
3437 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3438 				    struct bnxt_ring_struct *ring,
3439 				    u32 ring_type, u32 map_index,
3440 				    u32 stats_ctx_id)
3441 {
3442 	int rc = 0, err = 0;
3443 	struct hwrm_ring_alloc_input req = {0};
3444 	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3445 	u16 ring_id;
3446 
3447 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3448 
3449 	req.enables = 0;
3450 	if (ring->nr_pages > 1) {
3451 		req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3452 		/* Page size is in log2 units */
3453 		req.page_size = BNXT_PAGE_SHIFT;
3454 		req.page_tbl_depth = 1;
3455 	} else {
3456 		req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
3457 	}
3458 	req.fbo = 0;
3459 	/* Association of ring index with doorbell index and MSIX number */
3460 	req.logical_id = cpu_to_le16(map_index);
3461 
3462 	switch (ring_type) {
3463 	case HWRM_RING_ALLOC_TX:
3464 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3465 		/* Association of transmit ring with completion ring */
3466 		req.cmpl_ring_id =
3467 			cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3468 		req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3469 		req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3470 		req.queue_id = cpu_to_le16(ring->queue_id);
3471 		break;
3472 	case HWRM_RING_ALLOC_RX:
3473 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3474 		req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3475 		break;
3476 	case HWRM_RING_ALLOC_AGG:
3477 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3478 		req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3479 		break;
3480 	case HWRM_RING_ALLOC_CMPL:
3481 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3482 		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3483 		if (bp->flags & BNXT_FLAG_USING_MSIX)
3484 			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3485 		break;
3486 	default:
3487 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3488 			   ring_type);
3489 		return -1;
3490 	}
3491 
3492 	mutex_lock(&bp->hwrm_cmd_lock);
3493 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3494 	err = le16_to_cpu(resp->error_code);
3495 	ring_id = le16_to_cpu(resp->ring_id);
3496 	mutex_unlock(&bp->hwrm_cmd_lock);
3497 
3498 	if (rc || err) {
3499 		switch (ring_type) {
3500 		case RING_FREE_REQ_RING_TYPE_CMPL:
3501 			netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3502 				   rc, err);
3503 			return -1;
3504 
3505 		case RING_FREE_REQ_RING_TYPE_RX:
3506 			netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3507 				   rc, err);
3508 			return -1;
3509 
3510 		case RING_FREE_REQ_RING_TYPE_TX:
3511 			netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3512 				   rc, err);
3513 			return -1;
3514 
3515 		default:
3516 			netdev_err(bp->dev, "Invalid ring\n");
3517 			return -1;
3518 		}
3519 	}
3520 	ring->fw_ring_id = ring_id;
3521 	return rc;
3522 }
3523 
3524 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3525 {
3526 	int i, rc = 0;
3527 
3528 	for (i = 0; i < bp->cp_nr_rings; i++) {
3529 		struct bnxt_napi *bnapi = bp->bnapi[i];
3530 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3531 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3532 
3533 		cpr->cp_doorbell = bp->bar1 + i * 0x80;
3534 		rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3535 					      INVALID_STATS_CTX_ID);
3536 		if (rc)
3537 			goto err_out;
3538 		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3539 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3540 	}
3541 
3542 	for (i = 0; i < bp->tx_nr_rings; i++) {
3543 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3544 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3545 		u32 map_idx = txr->bnapi->index;
3546 		u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
3547 
3548 		rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
3549 					      map_idx, fw_stats_ctx);
3550 		if (rc)
3551 			goto err_out;
3552 		txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
3553 	}
3554 
3555 	for (i = 0; i < bp->rx_nr_rings; i++) {
3556 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3557 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3558 		u32 map_idx = rxr->bnapi->index;
3559 
3560 		rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
3561 					      map_idx, INVALID_STATS_CTX_ID);
3562 		if (rc)
3563 			goto err_out;
3564 		rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
3565 		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
3566 		bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3567 	}
3568 
3569 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3570 		for (i = 0; i < bp->rx_nr_rings; i++) {
3571 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3572 			struct bnxt_ring_struct *ring =
3573 						&rxr->rx_agg_ring_struct;
3574 			u32 grp_idx = rxr->bnapi->index;
3575 			u32 map_idx = grp_idx + bp->rx_nr_rings;
3576 
3577 			rc = hwrm_ring_alloc_send_msg(bp, ring,
3578 						      HWRM_RING_ALLOC_AGG,
3579 						      map_idx,
3580 						      INVALID_STATS_CTX_ID);
3581 			if (rc)
3582 				goto err_out;
3583 
3584 			rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
3585 			writel(DB_KEY_RX | rxr->rx_agg_prod,
3586 			       rxr->rx_agg_doorbell);
3587 			bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
3588 		}
3589 	}
3590 err_out:
3591 	return rc;
3592 }
3593 
3594 static int hwrm_ring_free_send_msg(struct bnxt *bp,
3595 				   struct bnxt_ring_struct *ring,
3596 				   u32 ring_type, int cmpl_ring_id)
3597 {
3598 	int rc;
3599 	struct hwrm_ring_free_input req = {0};
3600 	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3601 	u16 error_code;
3602 
3603 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
3604 	req.ring_type = ring_type;
3605 	req.ring_id = cpu_to_le16(ring->fw_ring_id);
3606 
3607 	mutex_lock(&bp->hwrm_cmd_lock);
3608 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3609 	error_code = le16_to_cpu(resp->error_code);
3610 	mutex_unlock(&bp->hwrm_cmd_lock);
3611 
3612 	if (rc || error_code) {
3613 		switch (ring_type) {
3614 		case RING_FREE_REQ_RING_TYPE_CMPL:
3615 			netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3616 				   rc);
3617 			return rc;
3618 		case RING_FREE_REQ_RING_TYPE_RX:
3619 			netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3620 				   rc);
3621 			return rc;
3622 		case RING_FREE_REQ_RING_TYPE_TX:
3623 			netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3624 				   rc);
3625 			return rc;
3626 		default:
3627 			netdev_err(bp->dev, "Invalid ring\n");
3628 			return -1;
3629 		}
3630 	}
3631 	return 0;
3632 }
3633 
3634 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
3635 {
3636 	int i;
3637 
3638 	if (!bp->bnapi)
3639 		return;
3640 
3641 	for (i = 0; i < bp->tx_nr_rings; i++) {
3642 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3643 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3644 		u32 grp_idx = txr->bnapi->index;
3645 		u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3646 
3647 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3648 			hwrm_ring_free_send_msg(bp, ring,
3649 						RING_FREE_REQ_RING_TYPE_TX,
3650 						close_path ? cmpl_ring_id :
3651 						INVALID_HW_RING_ID);
3652 			ring->fw_ring_id = INVALID_HW_RING_ID;
3653 		}
3654 	}
3655 
3656 	for (i = 0; i < bp->rx_nr_rings; i++) {
3657 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3658 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3659 		u32 grp_idx = rxr->bnapi->index;
3660 		u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3661 
3662 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3663 			hwrm_ring_free_send_msg(bp, ring,
3664 						RING_FREE_REQ_RING_TYPE_RX,
3665 						close_path ? cmpl_ring_id :
3666 						INVALID_HW_RING_ID);
3667 			ring->fw_ring_id = INVALID_HW_RING_ID;
3668 			bp->grp_info[grp_idx].rx_fw_ring_id =
3669 				INVALID_HW_RING_ID;
3670 		}
3671 	}
3672 
3673 	for (i = 0; i < bp->rx_nr_rings; i++) {
3674 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3675 		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
3676 		u32 grp_idx = rxr->bnapi->index;
3677 		u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
3678 
3679 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3680 			hwrm_ring_free_send_msg(bp, ring,
3681 						RING_FREE_REQ_RING_TYPE_RX,
3682 						close_path ? cmpl_ring_id :
3683 						INVALID_HW_RING_ID);
3684 			ring->fw_ring_id = INVALID_HW_RING_ID;
3685 			bp->grp_info[grp_idx].agg_fw_ring_id =
3686 				INVALID_HW_RING_ID;
3687 		}
3688 	}
3689 
3690 	for (i = 0; i < bp->cp_nr_rings; i++) {
3691 		struct bnxt_napi *bnapi = bp->bnapi[i];
3692 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3693 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3694 
3695 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3696 			hwrm_ring_free_send_msg(bp, ring,
3697 						RING_FREE_REQ_RING_TYPE_CMPL,
3698 						INVALID_HW_RING_ID);
3699 			ring->fw_ring_id = INVALID_HW_RING_ID;
3700 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3701 		}
3702 	}
3703 }
3704 
3705 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
3706 	u32 buf_tmrs, u16 flags,
3707 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3708 {
3709 	req->flags = cpu_to_le16(flags);
3710 	req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
3711 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
3712 	req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
3713 	req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
3714 	/* Minimum time between 2 interrupts set to buf_tmr x 2 */
3715 	req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
3716 	req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
3717 	req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
3718 }
3719 
3720 int bnxt_hwrm_set_coal(struct bnxt *bp)
3721 {
3722 	int i, rc = 0;
3723 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
3724 							   req_tx = {0}, *req;
3725 	u16 max_buf, max_buf_irq;
3726 	u16 buf_tmr, buf_tmr_irq;
3727 	u32 flags;
3728 
3729 	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
3730 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
3731 	bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
3732 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
3733 
3734 	/* Each rx completion (2 records) should be DMAed immediately.
3735 	 * DMA 1/4 of the completion buffers at a time.
3736 	 */
3737 	max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
3738 	/* max_buf must not be zero */
3739 	max_buf = clamp_t(u16, max_buf, 1, 63);
3740 	max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
3741 	buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
3742 	/* buf timer set to 1/4 of interrupt timer */
3743 	buf_tmr = max_t(u16, buf_tmr / 4, 1);
3744 	buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
3745 	buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
3746 
3747 	flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3748 
3749 	/* RING_IDLE generates more IRQs for lower latency.  Enable it only
3750 	 * if coal_ticks is less than 25 us.
3751 	 */
3752 	if (bp->rx_coal_ticks < 25)
3753 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
3754 
3755 	bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
3756 				  buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
3757 
3758 	/* max_buf must not be zero */
3759 	max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
3760 	max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
3761 	buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
3762 	/* buf timer set to 1/4 of interrupt timer */
3763 	buf_tmr = max_t(u16, buf_tmr / 4, 1);
3764 	buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
3765 	buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
3766 
3767 	flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3768 	bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
3769 				  buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
3770 
3771 	mutex_lock(&bp->hwrm_cmd_lock);
3772 	for (i = 0; i < bp->cp_nr_rings; i++) {
3773 		struct bnxt_napi *bnapi = bp->bnapi[i];
3774 
3775 		req = &req_rx;
3776 		if (!bnapi->rx_ring)
3777 			req = &req_tx;
3778 		req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3779 
3780 		rc = _hwrm_send_message(bp, req, sizeof(*req),
3781 					HWRM_CMD_TIMEOUT);
3782 		if (rc)
3783 			break;
3784 	}
3785 	mutex_unlock(&bp->hwrm_cmd_lock);
3786 	return rc;
3787 }
3788 
3789 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
3790 {
3791 	int rc = 0, i;
3792 	struct hwrm_stat_ctx_free_input req = {0};
3793 
3794 	if (!bp->bnapi)
3795 		return 0;
3796 
3797 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
3798 
3799 	mutex_lock(&bp->hwrm_cmd_lock);
3800 	for (i = 0; i < bp->cp_nr_rings; i++) {
3801 		struct bnxt_napi *bnapi = bp->bnapi[i];
3802 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3803 
3804 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
3805 			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
3806 
3807 			rc = _hwrm_send_message(bp, &req, sizeof(req),
3808 						HWRM_CMD_TIMEOUT);
3809 			if (rc)
3810 				break;
3811 
3812 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3813 		}
3814 	}
3815 	mutex_unlock(&bp->hwrm_cmd_lock);
3816 	return rc;
3817 }
3818 
3819 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3820 {
3821 	int rc = 0, i;
3822 	struct hwrm_stat_ctx_alloc_input req = {0};
3823 	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3824 
3825 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
3826 
3827 	req.update_period_ms = cpu_to_le32(1000);
3828 
3829 	mutex_lock(&bp->hwrm_cmd_lock);
3830 	for (i = 0; i < bp->cp_nr_rings; i++) {
3831 		struct bnxt_napi *bnapi = bp->bnapi[i];
3832 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3833 
3834 		req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
3835 
3836 		rc = _hwrm_send_message(bp, &req, sizeof(req),
3837 					HWRM_CMD_TIMEOUT);
3838 		if (rc)
3839 			break;
3840 
3841 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
3842 
3843 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
3844 	}
3845 	mutex_unlock(&bp->hwrm_cmd_lock);
3846 	return 0;
3847 }
3848 
3849 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3850 {
3851 	int rc = 0;
3852 	struct hwrm_func_qcaps_input req = {0};
3853 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3854 
3855 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
3856 	req.fid = cpu_to_le16(0xffff);
3857 
3858 	mutex_lock(&bp->hwrm_cmd_lock);
3859 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3860 	if (rc)
3861 		goto hwrm_func_qcaps_exit;
3862 
3863 	if (BNXT_PF(bp)) {
3864 		struct bnxt_pf_info *pf = &bp->pf;
3865 
3866 		pf->fw_fid = le16_to_cpu(resp->fid);
3867 		pf->port_id = le16_to_cpu(resp->port_id);
3868 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
3869 		memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3870 		pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3871 		pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3872 		pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3873 		pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3874 		pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3875 		if (!pf->max_hw_ring_grps)
3876 			pf->max_hw_ring_grps = pf->max_tx_rings;
3877 		pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3878 		pf->max_vnics = le16_to_cpu(resp->max_vnics);
3879 		pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3880 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
3881 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
3882 		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
3883 		pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
3884 		pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
3885 		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
3886 		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
3887 		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
3888 	} else {
3889 #ifdef CONFIG_BNXT_SRIOV
3890 		struct bnxt_vf_info *vf = &bp->vf;
3891 
3892 		vf->fw_fid = le16_to_cpu(resp->fid);
3893 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
3894 		if (is_valid_ether_addr(vf->mac_addr))
3895 			/* overwrite netdev dev_adr with admin VF MAC */
3896 			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3897 		else
3898 			random_ether_addr(bp->dev->dev_addr);
3899 
3900 		vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3901 		vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3902 		vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3903 		vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3904 		vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3905 		if (!vf->max_hw_ring_grps)
3906 			vf->max_hw_ring_grps = vf->max_tx_rings;
3907 		vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3908 		vf->max_vnics = le16_to_cpu(resp->max_vnics);
3909 		vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3910 #endif
3911 	}
3912 
3913 	bp->tx_push_thresh = 0;
3914 	if (resp->flags &
3915 	    cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
3916 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
3917 
3918 hwrm_func_qcaps_exit:
3919 	mutex_unlock(&bp->hwrm_cmd_lock);
3920 	return rc;
3921 }
3922 
3923 static int bnxt_hwrm_func_reset(struct bnxt *bp)
3924 {
3925 	struct hwrm_func_reset_input req = {0};
3926 
3927 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
3928 	req.enables = 0;
3929 
3930 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
3931 }
3932 
3933 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
3934 {
3935 	int rc = 0;
3936 	struct hwrm_queue_qportcfg_input req = {0};
3937 	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
3938 	u8 i, *qptr;
3939 
3940 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
3941 
3942 	mutex_lock(&bp->hwrm_cmd_lock);
3943 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3944 	if (rc)
3945 		goto qportcfg_exit;
3946 
3947 	if (!resp->max_configurable_queues) {
3948 		rc = -EINVAL;
3949 		goto qportcfg_exit;
3950 	}
3951 	bp->max_tc = resp->max_configurable_queues;
3952 	if (bp->max_tc > BNXT_MAX_QUEUE)
3953 		bp->max_tc = BNXT_MAX_QUEUE;
3954 
3955 	qptr = &resp->queue_id0;
3956 	for (i = 0; i < bp->max_tc; i++) {
3957 		bp->q_info[i].queue_id = *qptr++;
3958 		bp->q_info[i].queue_profile = *qptr++;
3959 	}
3960 
3961 qportcfg_exit:
3962 	mutex_unlock(&bp->hwrm_cmd_lock);
3963 	return rc;
3964 }
3965 
3966 static int bnxt_hwrm_ver_get(struct bnxt *bp)
3967 {
3968 	int rc;
3969 	struct hwrm_ver_get_input req = {0};
3970 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3971 
3972 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
3973 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3974 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3975 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
3976 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
3977 	mutex_lock(&bp->hwrm_cmd_lock);
3978 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3979 	if (rc)
3980 		goto hwrm_ver_get_exit;
3981 
3982 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
3983 
3984 	bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
3985 			     resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
3986 	if (resp->hwrm_intf_maj < 1) {
3987 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
3988 			    resp->hwrm_intf_maj, resp->hwrm_intf_min,
3989 			    resp->hwrm_intf_upd);
3990 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
3991 	}
3992 	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
3993 		 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
3994 		 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
3995 
3996 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
3997 	if (!bp->hwrm_cmd_timeout)
3998 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
3999 
4000 	if (resp->hwrm_intf_maj >= 1)
4001 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4002 
4003 hwrm_ver_get_exit:
4004 	mutex_unlock(&bp->hwrm_cmd_lock);
4005 	return rc;
4006 }
4007 
4008 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4009 {
4010 	int rc;
4011 	struct bnxt_pf_info *pf = &bp->pf;
4012 	struct hwrm_port_qstats_input req = {0};
4013 
4014 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4015 		return 0;
4016 
4017 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4018 	req.port_id = cpu_to_le16(pf->port_id);
4019 	req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4020 	req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4021 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4022 	return rc;
4023 }
4024 
4025 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4026 {
4027 	if (bp->vxlan_port_cnt) {
4028 		bnxt_hwrm_tunnel_dst_port_free(
4029 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4030 	}
4031 	bp->vxlan_port_cnt = 0;
4032 	if (bp->nge_port_cnt) {
4033 		bnxt_hwrm_tunnel_dst_port_free(
4034 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4035 	}
4036 	bp->nge_port_cnt = 0;
4037 }
4038 
4039 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4040 {
4041 	int rc, i;
4042 	u32 tpa_flags = 0;
4043 
4044 	if (set_tpa)
4045 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
4046 	for (i = 0; i < bp->nr_vnics; i++) {
4047 		rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4048 		if (rc) {
4049 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4050 				   rc, i);
4051 			return rc;
4052 		}
4053 	}
4054 	return 0;
4055 }
4056 
4057 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4058 {
4059 	int i;
4060 
4061 	for (i = 0; i < bp->nr_vnics; i++)
4062 		bnxt_hwrm_vnic_set_rss(bp, i, false);
4063 }
4064 
4065 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4066 				    bool irq_re_init)
4067 {
4068 	if (bp->vnic_info) {
4069 		bnxt_hwrm_clear_vnic_filter(bp);
4070 		/* clear all RSS setting before free vnic ctx */
4071 		bnxt_hwrm_clear_vnic_rss(bp);
4072 		bnxt_hwrm_vnic_ctx_free(bp);
4073 		/* before free the vnic, undo the vnic tpa settings */
4074 		if (bp->flags & BNXT_FLAG_TPA)
4075 			bnxt_set_tpa(bp, false);
4076 		bnxt_hwrm_vnic_free(bp);
4077 	}
4078 	bnxt_hwrm_ring_free(bp, close_path);
4079 	bnxt_hwrm_ring_grp_free(bp);
4080 	if (irq_re_init) {
4081 		bnxt_hwrm_stat_ctx_free(bp);
4082 		bnxt_hwrm_free_tunnel_ports(bp);
4083 	}
4084 }
4085 
4086 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4087 {
4088 	int rc;
4089 
4090 	/* allocate context for vnic */
4091 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
4092 	if (rc) {
4093 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4094 			   vnic_id, rc);
4095 		goto vnic_setup_err;
4096 	}
4097 	bp->rsscos_nr_ctxs++;
4098 
4099 	/* configure default vnic, ring grp */
4100 	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4101 	if (rc) {
4102 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4103 			   vnic_id, rc);
4104 		goto vnic_setup_err;
4105 	}
4106 
4107 	/* Enable RSS hashing on vnic */
4108 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4109 	if (rc) {
4110 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4111 			   vnic_id, rc);
4112 		goto vnic_setup_err;
4113 	}
4114 
4115 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4116 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4117 		if (rc) {
4118 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4119 				   vnic_id, rc);
4120 		}
4121 	}
4122 
4123 vnic_setup_err:
4124 	return rc;
4125 }
4126 
4127 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4128 {
4129 #ifdef CONFIG_RFS_ACCEL
4130 	int i, rc = 0;
4131 
4132 	for (i = 0; i < bp->rx_nr_rings; i++) {
4133 		u16 vnic_id = i + 1;
4134 		u16 ring_id = i;
4135 
4136 		if (vnic_id >= bp->nr_vnics)
4137 			break;
4138 
4139 		bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
4140 		rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
4141 		if (rc) {
4142 			netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4143 				   vnic_id, rc);
4144 			break;
4145 		}
4146 		rc = bnxt_setup_vnic(bp, vnic_id);
4147 		if (rc)
4148 			break;
4149 	}
4150 	return rc;
4151 #else
4152 	return 0;
4153 #endif
4154 }
4155 
4156 static int bnxt_cfg_rx_mode(struct bnxt *);
4157 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
4158 
4159 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4160 {
4161 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4162 	int rc = 0;
4163 
4164 	if (irq_re_init) {
4165 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
4166 		if (rc) {
4167 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4168 				   rc);
4169 			goto err_out;
4170 		}
4171 	}
4172 
4173 	rc = bnxt_hwrm_ring_alloc(bp);
4174 	if (rc) {
4175 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
4176 		goto err_out;
4177 	}
4178 
4179 	rc = bnxt_hwrm_ring_grp_alloc(bp);
4180 	if (rc) {
4181 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
4182 		goto err_out;
4183 	}
4184 
4185 	/* default vnic 0 */
4186 	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
4187 	if (rc) {
4188 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
4189 		goto err_out;
4190 	}
4191 
4192 	rc = bnxt_setup_vnic(bp, 0);
4193 	if (rc)
4194 		goto err_out;
4195 
4196 	if (bp->flags & BNXT_FLAG_RFS) {
4197 		rc = bnxt_alloc_rfs_vnics(bp);
4198 		if (rc)
4199 			goto err_out;
4200 	}
4201 
4202 	if (bp->flags & BNXT_FLAG_TPA) {
4203 		rc = bnxt_set_tpa(bp, true);
4204 		if (rc)
4205 			goto err_out;
4206 	}
4207 
4208 	if (BNXT_VF(bp))
4209 		bnxt_update_vf_mac(bp);
4210 
4211 	/* Filter for default vnic 0 */
4212 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
4213 	if (rc) {
4214 		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4215 		goto err_out;
4216 	}
4217 	vnic->uc_filter_count = 1;
4218 
4219 	vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
4220 
4221 	if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
4222 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4223 
4224 	if (bp->dev->flags & IFF_ALLMULTI) {
4225 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4226 		vnic->mc_list_count = 0;
4227 	} else {
4228 		u32 mask = 0;
4229 
4230 		bnxt_mc_list_updated(bp, &mask);
4231 		vnic->rx_mask |= mask;
4232 	}
4233 
4234 	rc = bnxt_cfg_rx_mode(bp);
4235 	if (rc)
4236 		goto err_out;
4237 
4238 	rc = bnxt_hwrm_set_coal(bp);
4239 	if (rc)
4240 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
4241 			    rc);
4242 
4243 	return 0;
4244 
4245 err_out:
4246 	bnxt_hwrm_resource_free(bp, 0, true);
4247 
4248 	return rc;
4249 }
4250 
4251 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
4252 {
4253 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
4254 	return 0;
4255 }
4256 
4257 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
4258 {
4259 	bnxt_init_rx_rings(bp);
4260 	bnxt_init_tx_rings(bp);
4261 	bnxt_init_ring_grps(bp, irq_re_init);
4262 	bnxt_init_vnics(bp);
4263 
4264 	return bnxt_init_chip(bp, irq_re_init);
4265 }
4266 
4267 static void bnxt_disable_int(struct bnxt *bp)
4268 {
4269 	int i;
4270 
4271 	if (!bp->bnapi)
4272 		return;
4273 
4274 	for (i = 0; i < bp->cp_nr_rings; i++) {
4275 		struct bnxt_napi *bnapi = bp->bnapi[i];
4276 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4277 
4278 		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4279 	}
4280 }
4281 
4282 static void bnxt_enable_int(struct bnxt *bp)
4283 {
4284 	int i;
4285 
4286 	atomic_set(&bp->intr_sem, 0);
4287 	for (i = 0; i < bp->cp_nr_rings; i++) {
4288 		struct bnxt_napi *bnapi = bp->bnapi[i];
4289 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4290 
4291 		BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4292 	}
4293 }
4294 
4295 static int bnxt_set_real_num_queues(struct bnxt *bp)
4296 {
4297 	int rc;
4298 	struct net_device *dev = bp->dev;
4299 
4300 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4301 	if (rc)
4302 		return rc;
4303 
4304 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4305 	if (rc)
4306 		return rc;
4307 
4308 #ifdef CONFIG_RFS_ACCEL
4309 	if (bp->flags & BNXT_FLAG_RFS)
4310 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
4311 #endif
4312 
4313 	return rc;
4314 }
4315 
4316 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4317 			   bool shared)
4318 {
4319 	int _rx = *rx, _tx = *tx;
4320 
4321 	if (shared) {
4322 		*rx = min_t(int, _rx, max);
4323 		*tx = min_t(int, _tx, max);
4324 	} else {
4325 		if (max < 2)
4326 			return -ENOMEM;
4327 
4328 		while (_rx + _tx > max) {
4329 			if (_rx > _tx && _rx > 1)
4330 				_rx--;
4331 			else if (_tx > 1)
4332 				_tx--;
4333 		}
4334 		*rx = _rx;
4335 		*tx = _tx;
4336 	}
4337 	return 0;
4338 }
4339 
4340 static int bnxt_setup_msix(struct bnxt *bp)
4341 {
4342 	struct msix_entry *msix_ent;
4343 	struct net_device *dev = bp->dev;
4344 	int i, total_vecs, rc = 0, min = 1;
4345 	const int len = sizeof(bp->irq_tbl[0].name);
4346 
4347 	bp->flags &= ~BNXT_FLAG_USING_MSIX;
4348 	total_vecs = bp->cp_nr_rings;
4349 
4350 	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4351 	if (!msix_ent)
4352 		return -ENOMEM;
4353 
4354 	for (i = 0; i < total_vecs; i++) {
4355 		msix_ent[i].entry = i;
4356 		msix_ent[i].vector = 0;
4357 	}
4358 
4359 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
4360 		min = 2;
4361 
4362 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
4363 	if (total_vecs < 0) {
4364 		rc = -ENODEV;
4365 		goto msix_setup_exit;
4366 	}
4367 
4368 	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4369 	if (bp->irq_tbl) {
4370 		int tcs;
4371 
4372 		/* Trim rings based upon num of vectors allocated */
4373 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
4374 				     total_vecs, min == 1);
4375 		if (rc)
4376 			goto msix_setup_exit;
4377 
4378 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4379 		tcs = netdev_get_num_tc(dev);
4380 		if (tcs > 1) {
4381 			bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4382 			if (bp->tx_nr_rings_per_tc == 0) {
4383 				netdev_reset_tc(dev);
4384 				bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4385 			} else {
4386 				int i, off, count;
4387 
4388 				bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4389 				for (i = 0; i < tcs; i++) {
4390 					count = bp->tx_nr_rings_per_tc;
4391 					off = i * count;
4392 					netdev_set_tc_queue(dev, i, count, off);
4393 				}
4394 			}
4395 		}
4396 		bp->cp_nr_rings = total_vecs;
4397 
4398 		for (i = 0; i < bp->cp_nr_rings; i++) {
4399 			char *attr;
4400 
4401 			bp->irq_tbl[i].vector = msix_ent[i].vector;
4402 			if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4403 				attr = "TxRx";
4404 			else if (i < bp->rx_nr_rings)
4405 				attr = "rx";
4406 			else
4407 				attr = "tx";
4408 
4409 			snprintf(bp->irq_tbl[i].name, len,
4410 				 "%s-%s-%d", dev->name, attr, i);
4411 			bp->irq_tbl[i].handler = bnxt_msix;
4412 		}
4413 		rc = bnxt_set_real_num_queues(bp);
4414 		if (rc)
4415 			goto msix_setup_exit;
4416 	} else {
4417 		rc = -ENOMEM;
4418 		goto msix_setup_exit;
4419 	}
4420 	bp->flags |= BNXT_FLAG_USING_MSIX;
4421 	kfree(msix_ent);
4422 	return 0;
4423 
4424 msix_setup_exit:
4425 	netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4426 	pci_disable_msix(bp->pdev);
4427 	kfree(msix_ent);
4428 	return rc;
4429 }
4430 
4431 static int bnxt_setup_inta(struct bnxt *bp)
4432 {
4433 	int rc;
4434 	const int len = sizeof(bp->irq_tbl[0].name);
4435 
4436 	if (netdev_get_num_tc(bp->dev))
4437 		netdev_reset_tc(bp->dev);
4438 
4439 	bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4440 	if (!bp->irq_tbl) {
4441 		rc = -ENOMEM;
4442 		return rc;
4443 	}
4444 	bp->rx_nr_rings = 1;
4445 	bp->tx_nr_rings = 1;
4446 	bp->cp_nr_rings = 1;
4447 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4448 	bp->flags |= BNXT_FLAG_SHARED_RINGS;
4449 	bp->irq_tbl[0].vector = bp->pdev->irq;
4450 	snprintf(bp->irq_tbl[0].name, len,
4451 		 "%s-%s-%d", bp->dev->name, "TxRx", 0);
4452 	bp->irq_tbl[0].handler = bnxt_inta;
4453 	rc = bnxt_set_real_num_queues(bp);
4454 	return rc;
4455 }
4456 
4457 static int bnxt_setup_int_mode(struct bnxt *bp)
4458 {
4459 	int rc = 0;
4460 
4461 	if (bp->flags & BNXT_FLAG_MSIX_CAP)
4462 		rc = bnxt_setup_msix(bp);
4463 
4464 	if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
4465 		/* fallback to INTA */
4466 		rc = bnxt_setup_inta(bp);
4467 	}
4468 	return rc;
4469 }
4470 
4471 static void bnxt_free_irq(struct bnxt *bp)
4472 {
4473 	struct bnxt_irq *irq;
4474 	int i;
4475 
4476 #ifdef CONFIG_RFS_ACCEL
4477 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4478 	bp->dev->rx_cpu_rmap = NULL;
4479 #endif
4480 	if (!bp->irq_tbl)
4481 		return;
4482 
4483 	for (i = 0; i < bp->cp_nr_rings; i++) {
4484 		irq = &bp->irq_tbl[i];
4485 		if (irq->requested)
4486 			free_irq(irq->vector, bp->bnapi[i]);
4487 		irq->requested = 0;
4488 	}
4489 	if (bp->flags & BNXT_FLAG_USING_MSIX)
4490 		pci_disable_msix(bp->pdev);
4491 	kfree(bp->irq_tbl);
4492 	bp->irq_tbl = NULL;
4493 }
4494 
4495 static int bnxt_request_irq(struct bnxt *bp)
4496 {
4497 	int i, j, rc = 0;
4498 	unsigned long flags = 0;
4499 #ifdef CONFIG_RFS_ACCEL
4500 	struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4501 #endif
4502 
4503 	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4504 		flags = IRQF_SHARED;
4505 
4506 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
4507 		struct bnxt_irq *irq = &bp->irq_tbl[i];
4508 #ifdef CONFIG_RFS_ACCEL
4509 		if (rmap && bp->bnapi[i]->rx_ring) {
4510 			rc = irq_cpu_rmap_add(rmap, irq->vector);
4511 			if (rc)
4512 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
4513 					    j);
4514 			j++;
4515 		}
4516 #endif
4517 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4518 				 bp->bnapi[i]);
4519 		if (rc)
4520 			break;
4521 
4522 		irq->requested = 1;
4523 	}
4524 	return rc;
4525 }
4526 
4527 static void bnxt_del_napi(struct bnxt *bp)
4528 {
4529 	int i;
4530 
4531 	if (!bp->bnapi)
4532 		return;
4533 
4534 	for (i = 0; i < bp->cp_nr_rings; i++) {
4535 		struct bnxt_napi *bnapi = bp->bnapi[i];
4536 
4537 		napi_hash_del(&bnapi->napi);
4538 		netif_napi_del(&bnapi->napi);
4539 	}
4540 }
4541 
4542 static void bnxt_init_napi(struct bnxt *bp)
4543 {
4544 	int i;
4545 	struct bnxt_napi *bnapi;
4546 
4547 	if (bp->flags & BNXT_FLAG_USING_MSIX) {
4548 		for (i = 0; i < bp->cp_nr_rings; i++) {
4549 			bnapi = bp->bnapi[i];
4550 			netif_napi_add(bp->dev, &bnapi->napi,
4551 				       bnxt_poll, 64);
4552 		}
4553 	} else {
4554 		bnapi = bp->bnapi[0];
4555 		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
4556 	}
4557 }
4558 
4559 static void bnxt_disable_napi(struct bnxt *bp)
4560 {
4561 	int i;
4562 
4563 	if (!bp->bnapi)
4564 		return;
4565 
4566 	for (i = 0; i < bp->cp_nr_rings; i++) {
4567 		napi_disable(&bp->bnapi[i]->napi);
4568 		bnxt_disable_poll(bp->bnapi[i]);
4569 	}
4570 }
4571 
4572 static void bnxt_enable_napi(struct bnxt *bp)
4573 {
4574 	int i;
4575 
4576 	for (i = 0; i < bp->cp_nr_rings; i++) {
4577 		bp->bnapi[i]->in_reset = false;
4578 		bnxt_enable_poll(bp->bnapi[i]);
4579 		napi_enable(&bp->bnapi[i]->napi);
4580 	}
4581 }
4582 
4583 static void bnxt_tx_disable(struct bnxt *bp)
4584 {
4585 	int i;
4586 	struct bnxt_tx_ring_info *txr;
4587 	struct netdev_queue *txq;
4588 
4589 	if (bp->tx_ring) {
4590 		for (i = 0; i < bp->tx_nr_rings; i++) {
4591 			txr = &bp->tx_ring[i];
4592 			txq = netdev_get_tx_queue(bp->dev, i);
4593 			__netif_tx_lock(txq, smp_processor_id());
4594 			txr->dev_state = BNXT_DEV_STATE_CLOSING;
4595 			__netif_tx_unlock(txq);
4596 		}
4597 	}
4598 	/* Stop all TX queues */
4599 	netif_tx_disable(bp->dev);
4600 	netif_carrier_off(bp->dev);
4601 }
4602 
4603 static void bnxt_tx_enable(struct bnxt *bp)
4604 {
4605 	int i;
4606 	struct bnxt_tx_ring_info *txr;
4607 	struct netdev_queue *txq;
4608 
4609 	for (i = 0; i < bp->tx_nr_rings; i++) {
4610 		txr = &bp->tx_ring[i];
4611 		txq = netdev_get_tx_queue(bp->dev, i);
4612 		txr->dev_state = 0;
4613 	}
4614 	netif_tx_wake_all_queues(bp->dev);
4615 	if (bp->link_info.link_up)
4616 		netif_carrier_on(bp->dev);
4617 }
4618 
4619 static void bnxt_report_link(struct bnxt *bp)
4620 {
4621 	if (bp->link_info.link_up) {
4622 		const char *duplex;
4623 		const char *flow_ctrl;
4624 		u16 speed;
4625 
4626 		netif_carrier_on(bp->dev);
4627 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
4628 			duplex = "full";
4629 		else
4630 			duplex = "half";
4631 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
4632 			flow_ctrl = "ON - receive & transmit";
4633 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
4634 			flow_ctrl = "ON - transmit";
4635 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
4636 			flow_ctrl = "ON - receive";
4637 		else
4638 			flow_ctrl = "none";
4639 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
4640 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
4641 			    speed, duplex, flow_ctrl);
4642 		if (bp->flags & BNXT_FLAG_EEE_CAP)
4643 			netdev_info(bp->dev, "EEE is %s\n",
4644 				    bp->eee.eee_active ? "active" :
4645 							 "not active");
4646 	} else {
4647 		netif_carrier_off(bp->dev);
4648 		netdev_err(bp->dev, "NIC Link is Down\n");
4649 	}
4650 }
4651 
4652 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
4653 {
4654 	int rc = 0;
4655 	struct hwrm_port_phy_qcaps_input req = {0};
4656 	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4657 
4658 	if (bp->hwrm_spec_code < 0x10201)
4659 		return 0;
4660 
4661 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
4662 
4663 	mutex_lock(&bp->hwrm_cmd_lock);
4664 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4665 	if (rc)
4666 		goto hwrm_phy_qcaps_exit;
4667 
4668 	if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
4669 		struct ethtool_eee *eee = &bp->eee;
4670 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
4671 
4672 		bp->flags |= BNXT_FLAG_EEE_CAP;
4673 		eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4674 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
4675 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
4676 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
4677 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
4678 	}
4679 
4680 hwrm_phy_qcaps_exit:
4681 	mutex_unlock(&bp->hwrm_cmd_lock);
4682 	return rc;
4683 }
4684 
4685 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4686 {
4687 	int rc = 0;
4688 	struct bnxt_link_info *link_info = &bp->link_info;
4689 	struct hwrm_port_phy_qcfg_input req = {0};
4690 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4691 	u8 link_up = link_info->link_up;
4692 
4693 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
4694 
4695 	mutex_lock(&bp->hwrm_cmd_lock);
4696 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4697 	if (rc) {
4698 		mutex_unlock(&bp->hwrm_cmd_lock);
4699 		return rc;
4700 	}
4701 
4702 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
4703 	link_info->phy_link_status = resp->link;
4704 	link_info->duplex =  resp->duplex;
4705 	link_info->pause = resp->pause;
4706 	link_info->auto_mode = resp->auto_mode;
4707 	link_info->auto_pause_setting = resp->auto_pause;
4708 	link_info->lp_pause = resp->link_partner_adv_pause;
4709 	link_info->force_pause_setting = resp->force_pause;
4710 	link_info->duplex_setting = resp->duplex;
4711 	if (link_info->phy_link_status == BNXT_LINK_LINK)
4712 		link_info->link_speed = le16_to_cpu(resp->link_speed);
4713 	else
4714 		link_info->link_speed = 0;
4715 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
4716 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
4717 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
4718 	link_info->lp_auto_link_speeds =
4719 		le16_to_cpu(resp->link_partner_adv_speeds);
4720 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
4721 	link_info->phy_ver[0] = resp->phy_maj;
4722 	link_info->phy_ver[1] = resp->phy_min;
4723 	link_info->phy_ver[2] = resp->phy_bld;
4724 	link_info->media_type = resp->media_type;
4725 	link_info->phy_type = resp->phy_type;
4726 	link_info->transceiver = resp->xcvr_pkg_type;
4727 	link_info->phy_addr = resp->eee_config_phy_addr &
4728 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
4729 	link_info->module_status = resp->module_status;
4730 
4731 	if (bp->flags & BNXT_FLAG_EEE_CAP) {
4732 		struct ethtool_eee *eee = &bp->eee;
4733 		u16 fw_speeds;
4734 
4735 		eee->eee_active = 0;
4736 		if (resp->eee_config_phy_addr &
4737 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
4738 			eee->eee_active = 1;
4739 			fw_speeds = le16_to_cpu(
4740 				resp->link_partner_adv_eee_link_speed_mask);
4741 			eee->lp_advertised =
4742 				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4743 		}
4744 
4745 		/* Pull initial EEE config */
4746 		if (!chng_link_state) {
4747 			if (resp->eee_config_phy_addr &
4748 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
4749 				eee->eee_enabled = 1;
4750 
4751 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
4752 			eee->advertised =
4753 				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
4754 
4755 			if (resp->eee_config_phy_addr &
4756 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
4757 				__le32 tmr;
4758 
4759 				eee->tx_lpi_enabled = 1;
4760 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
4761 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
4762 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
4763 			}
4764 		}
4765 	}
4766 	/* TODO: need to add more logic to report VF link */
4767 	if (chng_link_state) {
4768 		if (link_info->phy_link_status == BNXT_LINK_LINK)
4769 			link_info->link_up = 1;
4770 		else
4771 			link_info->link_up = 0;
4772 		if (link_up != link_info->link_up)
4773 			bnxt_report_link(bp);
4774 	} else {
4775 		/* alwasy link down if not require to update link state */
4776 		link_info->link_up = 0;
4777 	}
4778 	mutex_unlock(&bp->hwrm_cmd_lock);
4779 	return 0;
4780 }
4781 
4782 static void bnxt_get_port_module_status(struct bnxt *bp)
4783 {
4784 	struct bnxt_link_info *link_info = &bp->link_info;
4785 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
4786 	u8 module_status;
4787 
4788 	if (bnxt_update_link(bp, true))
4789 		return;
4790 
4791 	module_status = link_info->module_status;
4792 	switch (module_status) {
4793 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
4794 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
4795 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
4796 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
4797 			    bp->pf.port_id);
4798 		if (bp->hwrm_spec_code >= 0x10201) {
4799 			netdev_warn(bp->dev, "Module part number %s\n",
4800 				    resp->phy_vendor_partnumber);
4801 		}
4802 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
4803 			netdev_warn(bp->dev, "TX is disabled\n");
4804 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
4805 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
4806 	}
4807 }
4808 
4809 static void
4810 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4811 {
4812 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
4813 		if (bp->hwrm_spec_code >= 0x10201)
4814 			req->auto_pause =
4815 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
4816 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4817 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4818 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4819 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
4820 		req->enables |=
4821 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4822 	} else {
4823 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4824 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
4825 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4826 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
4827 		req->enables |=
4828 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
4829 		if (bp->hwrm_spec_code >= 0x10201) {
4830 			req->auto_pause = req->force_pause;
4831 			req->enables |= cpu_to_le32(
4832 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4833 		}
4834 	}
4835 }
4836 
4837 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4838 				      struct hwrm_port_phy_cfg_input *req)
4839 {
4840 	u8 autoneg = bp->link_info.autoneg;
4841 	u16 fw_link_speed = bp->link_info.req_link_speed;
4842 	u32 advertising = bp->link_info.advertising;
4843 
4844 	if (autoneg & BNXT_AUTONEG_SPEED) {
4845 		req->auto_mode |=
4846 			PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
4847 
4848 		req->enables |= cpu_to_le32(
4849 			PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
4850 		req->auto_link_speed_mask = cpu_to_le16(advertising);
4851 
4852 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
4853 		req->flags |=
4854 			cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
4855 	} else {
4856 		req->force_link_speed = cpu_to_le16(fw_link_speed);
4857 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
4858 	}
4859 
4860 	/* tell chimp that the setting takes effect immediately */
4861 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4862 }
4863 
4864 int bnxt_hwrm_set_pause(struct bnxt *bp)
4865 {
4866 	struct hwrm_port_phy_cfg_input req = {0};
4867 	int rc;
4868 
4869 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4870 	bnxt_hwrm_set_pause_common(bp, &req);
4871 
4872 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
4873 	    bp->link_info.force_link_chng)
4874 		bnxt_hwrm_set_link_common(bp, &req);
4875 
4876 	mutex_lock(&bp->hwrm_cmd_lock);
4877 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4878 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
4879 		/* since changing of pause setting doesn't trigger any link
4880 		 * change event, the driver needs to update the current pause
4881 		 * result upon successfully return of the phy_cfg command
4882 		 */
4883 		bp->link_info.pause =
4884 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
4885 		bp->link_info.auto_pause_setting = 0;
4886 		if (!bp->link_info.force_link_chng)
4887 			bnxt_report_link(bp);
4888 	}
4889 	bp->link_info.force_link_chng = false;
4890 	mutex_unlock(&bp->hwrm_cmd_lock);
4891 	return rc;
4892 }
4893 
4894 static void bnxt_hwrm_set_eee(struct bnxt *bp,
4895 			      struct hwrm_port_phy_cfg_input *req)
4896 {
4897 	struct ethtool_eee *eee = &bp->eee;
4898 
4899 	if (eee->eee_enabled) {
4900 		u16 eee_speeds;
4901 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
4902 
4903 		if (eee->tx_lpi_enabled)
4904 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
4905 		else
4906 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
4907 
4908 		req->flags |= cpu_to_le32(flags);
4909 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
4910 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
4911 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
4912 	} else {
4913 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
4914 	}
4915 }
4916 
4917 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
4918 {
4919 	struct hwrm_port_phy_cfg_input req = {0};
4920 
4921 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4922 	if (set_pause)
4923 		bnxt_hwrm_set_pause_common(bp, &req);
4924 
4925 	bnxt_hwrm_set_link_common(bp, &req);
4926 
4927 	if (set_eee)
4928 		bnxt_hwrm_set_eee(bp, &req);
4929 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4930 }
4931 
4932 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
4933 {
4934 	struct hwrm_port_phy_cfg_input req = {0};
4935 
4936 	if (BNXT_VF(bp))
4937 		return 0;
4938 
4939 	if (pci_num_vf(bp->pdev))
4940 		return 0;
4941 
4942 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4943 	req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
4944 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4945 }
4946 
4947 static bool bnxt_eee_config_ok(struct bnxt *bp)
4948 {
4949 	struct ethtool_eee *eee = &bp->eee;
4950 	struct bnxt_link_info *link_info = &bp->link_info;
4951 
4952 	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
4953 		return true;
4954 
4955 	if (eee->eee_enabled) {
4956 		u32 advertising =
4957 			_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
4958 
4959 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4960 			eee->eee_enabled = 0;
4961 			return false;
4962 		}
4963 		if (eee->advertised & ~advertising) {
4964 			eee->advertised = advertising & eee->supported;
4965 			return false;
4966 		}
4967 	}
4968 	return true;
4969 }
4970 
4971 static int bnxt_update_phy_setting(struct bnxt *bp)
4972 {
4973 	int rc;
4974 	bool update_link = false;
4975 	bool update_pause = false;
4976 	bool update_eee = false;
4977 	struct bnxt_link_info *link_info = &bp->link_info;
4978 
4979 	rc = bnxt_update_link(bp, true);
4980 	if (rc) {
4981 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
4982 			   rc);
4983 		return rc;
4984 	}
4985 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4986 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
4987 	    link_info->req_flow_ctrl)
4988 		update_pause = true;
4989 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4990 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
4991 		update_pause = true;
4992 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4993 		if (BNXT_AUTO_MODE(link_info->auto_mode))
4994 			update_link = true;
4995 		if (link_info->req_link_speed != link_info->force_link_speed)
4996 			update_link = true;
4997 		if (link_info->req_duplex != link_info->duplex_setting)
4998 			update_link = true;
4999 	} else {
5000 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
5001 			update_link = true;
5002 		if (link_info->advertising != link_info->auto_link_speeds)
5003 			update_link = true;
5004 	}
5005 
5006 	if (!bnxt_eee_config_ok(bp))
5007 		update_eee = true;
5008 
5009 	if (update_link)
5010 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
5011 	else if (update_pause)
5012 		rc = bnxt_hwrm_set_pause(bp);
5013 	if (rc) {
5014 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
5015 			   rc);
5016 		return rc;
5017 	}
5018 
5019 	return rc;
5020 }
5021 
5022 /* Common routine to pre-map certain register block to different GRC window.
5023  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
5024  * in PF and 3 windows in VF that can be customized to map in different
5025  * register blocks.
5026  */
5027 static void bnxt_preset_reg_win(struct bnxt *bp)
5028 {
5029 	if (BNXT_PF(bp)) {
5030 		/* CAG registers map to GRC window #4 */
5031 		writel(BNXT_CAG_REG_BASE,
5032 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
5033 	}
5034 }
5035 
5036 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5037 {
5038 	int rc = 0;
5039 
5040 	bnxt_preset_reg_win(bp);
5041 	netif_carrier_off(bp->dev);
5042 	if (irq_re_init) {
5043 		rc = bnxt_setup_int_mode(bp);
5044 		if (rc) {
5045 			netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
5046 				   rc);
5047 			return rc;
5048 		}
5049 	}
5050 	if ((bp->flags & BNXT_FLAG_RFS) &&
5051 	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
5052 		/* disable RFS if falling back to INTA */
5053 		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
5054 		bp->flags &= ~BNXT_FLAG_RFS;
5055 	}
5056 
5057 	rc = bnxt_alloc_mem(bp, irq_re_init);
5058 	if (rc) {
5059 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
5060 		goto open_err_free_mem;
5061 	}
5062 
5063 	if (irq_re_init) {
5064 		bnxt_init_napi(bp);
5065 		rc = bnxt_request_irq(bp);
5066 		if (rc) {
5067 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
5068 			goto open_err;
5069 		}
5070 	}
5071 
5072 	bnxt_enable_napi(bp);
5073 
5074 	rc = bnxt_init_nic(bp, irq_re_init);
5075 	if (rc) {
5076 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
5077 		goto open_err;
5078 	}
5079 
5080 	if (link_re_init) {
5081 		rc = bnxt_update_phy_setting(bp);
5082 		if (rc)
5083 			netdev_warn(bp->dev, "failed to update phy settings\n");
5084 	}
5085 
5086 	if (irq_re_init) {
5087 #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
5088 		vxlan_get_rx_port(bp->dev);
5089 #endif
5090 		if (!bnxt_hwrm_tunnel_dst_port_alloc(
5091 				bp, htons(0x17c1),
5092 				TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
5093 			bp->nge_port_cnt = 1;
5094 	}
5095 
5096 	set_bit(BNXT_STATE_OPEN, &bp->state);
5097 	bnxt_enable_int(bp);
5098 	/* Enable TX queues */
5099 	bnxt_tx_enable(bp);
5100 	mod_timer(&bp->timer, jiffies + bp->current_interval);
5101 	/* Poll link status and check for SFP+ module status */
5102 	bnxt_get_port_module_status(bp);
5103 
5104 	return 0;
5105 
5106 open_err:
5107 	bnxt_disable_napi(bp);
5108 	bnxt_del_napi(bp);
5109 
5110 open_err_free_mem:
5111 	bnxt_free_skbs(bp);
5112 	bnxt_free_irq(bp);
5113 	bnxt_free_mem(bp, true);
5114 	return rc;
5115 }
5116 
5117 /* rtnl_lock held */
5118 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5119 {
5120 	int rc = 0;
5121 
5122 	rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
5123 	if (rc) {
5124 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
5125 		dev_close(bp->dev);
5126 	}
5127 	return rc;
5128 }
5129 
5130 static int bnxt_open(struct net_device *dev)
5131 {
5132 	struct bnxt *bp = netdev_priv(dev);
5133 	int rc = 0;
5134 
5135 	rc = bnxt_hwrm_func_reset(bp);
5136 	if (rc) {
5137 		netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
5138 			   rc);
5139 		rc = -1;
5140 		return rc;
5141 	}
5142 	return __bnxt_open_nic(bp, true, true);
5143 }
5144 
5145 static void bnxt_disable_int_sync(struct bnxt *bp)
5146 {
5147 	int i;
5148 
5149 	atomic_inc(&bp->intr_sem);
5150 	if (!netif_running(bp->dev))
5151 		return;
5152 
5153 	bnxt_disable_int(bp);
5154 	for (i = 0; i < bp->cp_nr_rings; i++)
5155 		synchronize_irq(bp->irq_tbl[i].vector);
5156 }
5157 
5158 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5159 {
5160 	int rc = 0;
5161 
5162 #ifdef CONFIG_BNXT_SRIOV
5163 	if (bp->sriov_cfg) {
5164 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
5165 						      !bp->sriov_cfg,
5166 						      BNXT_SRIOV_CFG_WAIT_TMO);
5167 		if (rc)
5168 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
5169 	}
5170 #endif
5171 	/* Change device state to avoid TX queue wake up's */
5172 	bnxt_tx_disable(bp);
5173 
5174 	clear_bit(BNXT_STATE_OPEN, &bp->state);
5175 	smp_mb__after_atomic();
5176 	while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
5177 		msleep(20);
5178 
5179 	/* Flush rings before disabling interrupts */
5180 	bnxt_shutdown_nic(bp, irq_re_init);
5181 
5182 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
5183 
5184 	bnxt_disable_napi(bp);
5185 	bnxt_disable_int_sync(bp);
5186 	del_timer_sync(&bp->timer);
5187 	bnxt_free_skbs(bp);
5188 
5189 	if (irq_re_init) {
5190 		bnxt_free_irq(bp);
5191 		bnxt_del_napi(bp);
5192 	}
5193 	bnxt_free_mem(bp, irq_re_init);
5194 	return rc;
5195 }
5196 
5197 static int bnxt_close(struct net_device *dev)
5198 {
5199 	struct bnxt *bp = netdev_priv(dev);
5200 
5201 	bnxt_close_nic(bp, true, true);
5202 	bnxt_hwrm_shutdown_link(bp);
5203 	return 0;
5204 }
5205 
5206 /* rtnl_lock held */
5207 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5208 {
5209 	switch (cmd) {
5210 	case SIOCGMIIPHY:
5211 		/* fallthru */
5212 	case SIOCGMIIREG: {
5213 		if (!netif_running(dev))
5214 			return -EAGAIN;
5215 
5216 		return 0;
5217 	}
5218 
5219 	case SIOCSMIIREG:
5220 		if (!netif_running(dev))
5221 			return -EAGAIN;
5222 
5223 		return 0;
5224 
5225 	default:
5226 		/* do nothing */
5227 		break;
5228 	}
5229 	return -EOPNOTSUPP;
5230 }
5231 
5232 static struct rtnl_link_stats64 *
5233 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5234 {
5235 	u32 i;
5236 	struct bnxt *bp = netdev_priv(dev);
5237 
5238 	memset(stats, 0, sizeof(struct rtnl_link_stats64));
5239 
5240 	if (!bp->bnapi)
5241 		return stats;
5242 
5243 	/* TODO check if we need to synchronize with bnxt_close path */
5244 	for (i = 0; i < bp->cp_nr_rings; i++) {
5245 		struct bnxt_napi *bnapi = bp->bnapi[i];
5246 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5247 		struct ctx_hw_stats *hw_stats = cpr->hw_stats;
5248 
5249 		stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
5250 		stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
5251 		stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
5252 
5253 		stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
5254 		stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
5255 		stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
5256 
5257 		stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
5258 		stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
5259 		stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
5260 
5261 		stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
5262 		stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
5263 		stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
5264 
5265 		stats->rx_missed_errors +=
5266 			le64_to_cpu(hw_stats->rx_discard_pkts);
5267 
5268 		stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
5269 
5270 		stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
5271 	}
5272 
5273 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
5274 		struct rx_port_stats *rx = bp->hw_rx_port_stats;
5275 		struct tx_port_stats *tx = bp->hw_tx_port_stats;
5276 
5277 		stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
5278 		stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
5279 		stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
5280 					  le64_to_cpu(rx->rx_ovrsz_frames) +
5281 					  le64_to_cpu(rx->rx_runt_frames);
5282 		stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
5283 				   le64_to_cpu(rx->rx_jbr_frames);
5284 		stats->collisions = le64_to_cpu(tx->tx_total_collisions);
5285 		stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
5286 		stats->tx_errors = le64_to_cpu(tx->tx_err);
5287 	}
5288 
5289 	return stats;
5290 }
5291 
5292 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
5293 {
5294 	struct net_device *dev = bp->dev;
5295 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5296 	struct netdev_hw_addr *ha;
5297 	u8 *haddr;
5298 	int mc_count = 0;
5299 	bool update = false;
5300 	int off = 0;
5301 
5302 	netdev_for_each_mc_addr(ha, dev) {
5303 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
5304 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5305 			vnic->mc_list_count = 0;
5306 			return false;
5307 		}
5308 		haddr = ha->addr;
5309 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
5310 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
5311 			update = true;
5312 		}
5313 		off += ETH_ALEN;
5314 		mc_count++;
5315 	}
5316 	if (mc_count)
5317 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
5318 
5319 	if (mc_count != vnic->mc_list_count) {
5320 		vnic->mc_list_count = mc_count;
5321 		update = true;
5322 	}
5323 	return update;
5324 }
5325 
5326 static bool bnxt_uc_list_updated(struct bnxt *bp)
5327 {
5328 	struct net_device *dev = bp->dev;
5329 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5330 	struct netdev_hw_addr *ha;
5331 	int off = 0;
5332 
5333 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
5334 		return true;
5335 
5336 	netdev_for_each_uc_addr(ha, dev) {
5337 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
5338 			return true;
5339 
5340 		off += ETH_ALEN;
5341 	}
5342 	return false;
5343 }
5344 
5345 static void bnxt_set_rx_mode(struct net_device *dev)
5346 {
5347 	struct bnxt *bp = netdev_priv(dev);
5348 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5349 	u32 mask = vnic->rx_mask;
5350 	bool mc_update = false;
5351 	bool uc_update;
5352 
5353 	if (!netif_running(dev))
5354 		return;
5355 
5356 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
5357 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5358 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5359 
5360 	/* Only allow PF to be in promiscuous mode */
5361 	if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
5362 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5363 
5364 	uc_update = bnxt_uc_list_updated(bp);
5365 
5366 	if (dev->flags & IFF_ALLMULTI) {
5367 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5368 		vnic->mc_list_count = 0;
5369 	} else {
5370 		mc_update = bnxt_mc_list_updated(bp, &mask);
5371 	}
5372 
5373 	if (mask != vnic->rx_mask || uc_update || mc_update) {
5374 		vnic->rx_mask = mask;
5375 
5376 		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
5377 		schedule_work(&bp->sp_task);
5378 	}
5379 }
5380 
5381 static int bnxt_cfg_rx_mode(struct bnxt *bp)
5382 {
5383 	struct net_device *dev = bp->dev;
5384 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5385 	struct netdev_hw_addr *ha;
5386 	int i, off = 0, rc;
5387 	bool uc_update;
5388 
5389 	netif_addr_lock_bh(dev);
5390 	uc_update = bnxt_uc_list_updated(bp);
5391 	netif_addr_unlock_bh(dev);
5392 
5393 	if (!uc_update)
5394 		goto skip_uc;
5395 
5396 	mutex_lock(&bp->hwrm_cmd_lock);
5397 	for (i = 1; i < vnic->uc_filter_count; i++) {
5398 		struct hwrm_cfa_l2_filter_free_input req = {0};
5399 
5400 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
5401 				       -1);
5402 
5403 		req.l2_filter_id = vnic->fw_l2_filter_id[i];
5404 
5405 		rc = _hwrm_send_message(bp, &req, sizeof(req),
5406 					HWRM_CMD_TIMEOUT);
5407 	}
5408 	mutex_unlock(&bp->hwrm_cmd_lock);
5409 
5410 	vnic->uc_filter_count = 1;
5411 
5412 	netif_addr_lock_bh(dev);
5413 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
5414 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5415 	} else {
5416 		netdev_for_each_uc_addr(ha, dev) {
5417 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
5418 			off += ETH_ALEN;
5419 			vnic->uc_filter_count++;
5420 		}
5421 	}
5422 	netif_addr_unlock_bh(dev);
5423 
5424 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
5425 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
5426 		if (rc) {
5427 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
5428 				   rc);
5429 			vnic->uc_filter_count = i;
5430 			return rc;
5431 		}
5432 	}
5433 
5434 skip_uc:
5435 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
5436 	if (rc)
5437 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
5438 			   rc);
5439 
5440 	return rc;
5441 }
5442 
5443 static bool bnxt_rfs_capable(struct bnxt *bp)
5444 {
5445 #ifdef CONFIG_RFS_ACCEL
5446 	struct bnxt_pf_info *pf = &bp->pf;
5447 	int vnics;
5448 
5449 	if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
5450 		return false;
5451 
5452 	vnics = 1 + bp->rx_nr_rings;
5453 	if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
5454 		return false;
5455 
5456 	return true;
5457 #else
5458 	return false;
5459 #endif
5460 }
5461 
5462 static netdev_features_t bnxt_fix_features(struct net_device *dev,
5463 					   netdev_features_t features)
5464 {
5465 	struct bnxt *bp = netdev_priv(dev);
5466 
5467 	if (!bnxt_rfs_capable(bp))
5468 		features &= ~NETIF_F_NTUPLE;
5469 	return features;
5470 }
5471 
5472 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
5473 {
5474 	struct bnxt *bp = netdev_priv(dev);
5475 	u32 flags = bp->flags;
5476 	u32 changes;
5477 	int rc = 0;
5478 	bool re_init = false;
5479 	bool update_tpa = false;
5480 
5481 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
5482 	if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
5483 		flags |= BNXT_FLAG_GRO;
5484 	if (features & NETIF_F_LRO)
5485 		flags |= BNXT_FLAG_LRO;
5486 
5487 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5488 		flags |= BNXT_FLAG_STRIP_VLAN;
5489 
5490 	if (features & NETIF_F_NTUPLE)
5491 		flags |= BNXT_FLAG_RFS;
5492 
5493 	changes = flags ^ bp->flags;
5494 	if (changes & BNXT_FLAG_TPA) {
5495 		update_tpa = true;
5496 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
5497 		    (flags & BNXT_FLAG_TPA) == 0)
5498 			re_init = true;
5499 	}
5500 
5501 	if (changes & ~BNXT_FLAG_TPA)
5502 		re_init = true;
5503 
5504 	if (flags != bp->flags) {
5505 		u32 old_flags = bp->flags;
5506 
5507 		bp->flags = flags;
5508 
5509 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5510 			if (update_tpa)
5511 				bnxt_set_ring_params(bp);
5512 			return rc;
5513 		}
5514 
5515 		if (re_init) {
5516 			bnxt_close_nic(bp, false, false);
5517 			if (update_tpa)
5518 				bnxt_set_ring_params(bp);
5519 
5520 			return bnxt_open_nic(bp, false, false);
5521 		}
5522 		if (update_tpa) {
5523 			rc = bnxt_set_tpa(bp,
5524 					  (flags & BNXT_FLAG_TPA) ?
5525 					  true : false);
5526 			if (rc)
5527 				bp->flags = old_flags;
5528 		}
5529 	}
5530 	return rc;
5531 }
5532 
5533 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
5534 {
5535 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
5536 	int i = bnapi->index;
5537 
5538 	if (!txr)
5539 		return;
5540 
5541 	netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
5542 		    i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
5543 		    txr->tx_cons);
5544 }
5545 
5546 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
5547 {
5548 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
5549 	int i = bnapi->index;
5550 
5551 	if (!rxr)
5552 		return;
5553 
5554 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
5555 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
5556 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
5557 		    rxr->rx_sw_agg_prod);
5558 }
5559 
5560 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
5561 {
5562 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5563 	int i = bnapi->index;
5564 
5565 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
5566 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
5567 }
5568 
5569 static void bnxt_dbg_dump_states(struct bnxt *bp)
5570 {
5571 	int i;
5572 	struct bnxt_napi *bnapi;
5573 
5574 	for (i = 0; i < bp->cp_nr_rings; i++) {
5575 		bnapi = bp->bnapi[i];
5576 		if (netif_msg_drv(bp)) {
5577 			bnxt_dump_tx_sw_state(bnapi);
5578 			bnxt_dump_rx_sw_state(bnapi);
5579 			bnxt_dump_cp_sw_state(bnapi);
5580 		}
5581 	}
5582 }
5583 
5584 static void bnxt_reset_task(struct bnxt *bp)
5585 {
5586 	bnxt_dbg_dump_states(bp);
5587 	if (netif_running(bp->dev)) {
5588 		bnxt_close_nic(bp, false, false);
5589 		bnxt_open_nic(bp, false, false);
5590 	}
5591 }
5592 
5593 static void bnxt_tx_timeout(struct net_device *dev)
5594 {
5595 	struct bnxt *bp = netdev_priv(dev);
5596 
5597 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
5598 	set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
5599 	schedule_work(&bp->sp_task);
5600 }
5601 
5602 #ifdef CONFIG_NET_POLL_CONTROLLER
5603 static void bnxt_poll_controller(struct net_device *dev)
5604 {
5605 	struct bnxt *bp = netdev_priv(dev);
5606 	int i;
5607 
5608 	for (i = 0; i < bp->cp_nr_rings; i++) {
5609 		struct bnxt_irq *irq = &bp->irq_tbl[i];
5610 
5611 		disable_irq(irq->vector);
5612 		irq->handler(irq->vector, bp->bnapi[i]);
5613 		enable_irq(irq->vector);
5614 	}
5615 }
5616 #endif
5617 
5618 static void bnxt_timer(unsigned long data)
5619 {
5620 	struct bnxt *bp = (struct bnxt *)data;
5621 	struct net_device *dev = bp->dev;
5622 
5623 	if (!netif_running(dev))
5624 		return;
5625 
5626 	if (atomic_read(&bp->intr_sem) != 0)
5627 		goto bnxt_restart_timer;
5628 
5629 	if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
5630 		set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
5631 		schedule_work(&bp->sp_task);
5632 	}
5633 bnxt_restart_timer:
5634 	mod_timer(&bp->timer, jiffies + bp->current_interval);
5635 }
5636 
5637 static void bnxt_cfg_ntp_filters(struct bnxt *);
5638 
5639 static void bnxt_sp_task(struct work_struct *work)
5640 {
5641 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5642 	int rc;
5643 
5644 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5645 	smp_mb__after_atomic();
5646 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5647 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5648 		return;
5649 	}
5650 
5651 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5652 		bnxt_cfg_rx_mode(bp);
5653 
5654 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
5655 		bnxt_cfg_ntp_filters(bp);
5656 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
5657 		rc = bnxt_update_link(bp, true);
5658 		if (rc)
5659 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
5660 				   rc);
5661 	}
5662 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
5663 		bnxt_hwrm_exec_fwd_req(bp);
5664 	if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
5665 		bnxt_hwrm_tunnel_dst_port_alloc(
5666 			bp, bp->vxlan_port,
5667 			TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5668 	}
5669 	if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
5670 		bnxt_hwrm_tunnel_dst_port_free(
5671 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5672 	}
5673 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5674 		/* bnxt_reset_task() calls bnxt_close_nic() which waits
5675 		 * for BNXT_STATE_IN_SP_TASK to clear.
5676 		 */
5677 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5678 		rtnl_lock();
5679 		bnxt_reset_task(bp);
5680 		set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5681 		rtnl_unlock();
5682 	}
5683 
5684 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
5685 		bnxt_get_port_module_status(bp);
5686 
5687 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
5688 		bnxt_hwrm_port_qstats(bp);
5689 
5690 	smp_mb__before_atomic();
5691 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5692 }
5693 
5694 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5695 {
5696 	int rc;
5697 	struct bnxt *bp = netdev_priv(dev);
5698 
5699 	SET_NETDEV_DEV(dev, &pdev->dev);
5700 
5701 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
5702 	rc = pci_enable_device(pdev);
5703 	if (rc) {
5704 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5705 		goto init_err;
5706 	}
5707 
5708 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5709 		dev_err(&pdev->dev,
5710 			"Cannot find PCI device base address, aborting\n");
5711 		rc = -ENODEV;
5712 		goto init_err_disable;
5713 	}
5714 
5715 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5716 	if (rc) {
5717 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5718 		goto init_err_disable;
5719 	}
5720 
5721 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
5722 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
5723 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
5724 		goto init_err_disable;
5725 	}
5726 
5727 	pci_set_master(pdev);
5728 
5729 	bp->dev = dev;
5730 	bp->pdev = pdev;
5731 
5732 	bp->bar0 = pci_ioremap_bar(pdev, 0);
5733 	if (!bp->bar0) {
5734 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5735 		rc = -ENOMEM;
5736 		goto init_err_release;
5737 	}
5738 
5739 	bp->bar1 = pci_ioremap_bar(pdev, 2);
5740 	if (!bp->bar1) {
5741 		dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
5742 		rc = -ENOMEM;
5743 		goto init_err_release;
5744 	}
5745 
5746 	bp->bar2 = pci_ioremap_bar(pdev, 4);
5747 	if (!bp->bar2) {
5748 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
5749 		rc = -ENOMEM;
5750 		goto init_err_release;
5751 	}
5752 
5753 	pci_enable_pcie_error_reporting(pdev);
5754 
5755 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
5756 
5757 	spin_lock_init(&bp->ntp_fltr_lock);
5758 
5759 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
5760 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
5761 
5762 	/* tick values in micro seconds */
5763 	bp->rx_coal_ticks = 12;
5764 	bp->rx_coal_bufs = 30;
5765 	bp->rx_coal_ticks_irq = 1;
5766 	bp->rx_coal_bufs_irq = 2;
5767 
5768 	bp->tx_coal_ticks = 25;
5769 	bp->tx_coal_bufs = 30;
5770 	bp->tx_coal_ticks_irq = 2;
5771 	bp->tx_coal_bufs_irq = 2;
5772 
5773 	init_timer(&bp->timer);
5774 	bp->timer.data = (unsigned long)bp;
5775 	bp->timer.function = bnxt_timer;
5776 	bp->current_interval = BNXT_TIMER_INTERVAL;
5777 
5778 	clear_bit(BNXT_STATE_OPEN, &bp->state);
5779 
5780 	return 0;
5781 
5782 init_err_release:
5783 	if (bp->bar2) {
5784 		pci_iounmap(pdev, bp->bar2);
5785 		bp->bar2 = NULL;
5786 	}
5787 
5788 	if (bp->bar1) {
5789 		pci_iounmap(pdev, bp->bar1);
5790 		bp->bar1 = NULL;
5791 	}
5792 
5793 	if (bp->bar0) {
5794 		pci_iounmap(pdev, bp->bar0);
5795 		bp->bar0 = NULL;
5796 	}
5797 
5798 	pci_release_regions(pdev);
5799 
5800 init_err_disable:
5801 	pci_disable_device(pdev);
5802 
5803 init_err:
5804 	return rc;
5805 }
5806 
5807 /* rtnl_lock held */
5808 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5809 {
5810 	struct sockaddr *addr = p;
5811 	struct bnxt *bp = netdev_priv(dev);
5812 	int rc = 0;
5813 
5814 	if (!is_valid_ether_addr(addr->sa_data))
5815 		return -EADDRNOTAVAIL;
5816 
5817 	rc = bnxt_approve_mac(bp, addr->sa_data);
5818 	if (rc)
5819 		return rc;
5820 
5821 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5822 		return 0;
5823 
5824 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5825 	if (netif_running(dev)) {
5826 		bnxt_close_nic(bp, false, false);
5827 		rc = bnxt_open_nic(bp, false, false);
5828 	}
5829 
5830 	return rc;
5831 }
5832 
5833 /* rtnl_lock held */
5834 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
5835 {
5836 	struct bnxt *bp = netdev_priv(dev);
5837 
5838 	if (new_mtu < 60 || new_mtu > 9000)
5839 		return -EINVAL;
5840 
5841 	if (netif_running(dev))
5842 		bnxt_close_nic(bp, false, false);
5843 
5844 	dev->mtu = new_mtu;
5845 	bnxt_set_ring_params(bp);
5846 
5847 	if (netif_running(dev))
5848 		return bnxt_open_nic(bp, false, false);
5849 
5850 	return 0;
5851 }
5852 
5853 static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
5854 			 struct tc_to_netdev *ntc)
5855 {
5856 	struct bnxt *bp = netdev_priv(dev);
5857 	u8 tc;
5858 
5859 	if (ntc->type != TC_SETUP_MQPRIO)
5860 		return -EINVAL;
5861 
5862 	tc = ntc->tc;
5863 
5864 	if (tc > bp->max_tc) {
5865 		netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
5866 			   tc, bp->max_tc);
5867 		return -EINVAL;
5868 	}
5869 
5870 	if (netdev_get_num_tc(dev) == tc)
5871 		return 0;
5872 
5873 	if (tc) {
5874 		int max_rx_rings, max_tx_rings, rc;
5875 		bool sh = false;
5876 
5877 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5878 			sh = true;
5879 
5880 		rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
5881 		if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
5882 			return -ENOMEM;
5883 	}
5884 
5885 	/* Needs to close the device and do hw resource re-allocations */
5886 	if (netif_running(bp->dev))
5887 		bnxt_close_nic(bp, true, false);
5888 
5889 	if (tc) {
5890 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
5891 		netdev_set_num_tc(dev, tc);
5892 	} else {
5893 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5894 		netdev_reset_tc(dev);
5895 	}
5896 	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
5897 	bp->num_stat_ctxs = bp->cp_nr_rings;
5898 
5899 	if (netif_running(bp->dev))
5900 		return bnxt_open_nic(bp, true, false);
5901 
5902 	return 0;
5903 }
5904 
5905 #ifdef CONFIG_RFS_ACCEL
5906 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
5907 			    struct bnxt_ntuple_filter *f2)
5908 {
5909 	struct flow_keys *keys1 = &f1->fkeys;
5910 	struct flow_keys *keys2 = &f2->fkeys;
5911 
5912 	if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
5913 	    keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
5914 	    keys1->ports.ports == keys2->ports.ports &&
5915 	    keys1->basic.ip_proto == keys2->basic.ip_proto &&
5916 	    keys1->basic.n_proto == keys2->basic.n_proto &&
5917 	    ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
5918 		return true;
5919 
5920 	return false;
5921 }
5922 
5923 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
5924 			      u16 rxq_index, u32 flow_id)
5925 {
5926 	struct bnxt *bp = netdev_priv(dev);
5927 	struct bnxt_ntuple_filter *fltr, *new_fltr;
5928 	struct flow_keys *fkeys;
5929 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
5930 	int rc = 0, idx, bit_id;
5931 	struct hlist_head *head;
5932 
5933 	if (skb->encapsulation)
5934 		return -EPROTONOSUPPORT;
5935 
5936 	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
5937 	if (!new_fltr)
5938 		return -ENOMEM;
5939 
5940 	fkeys = &new_fltr->fkeys;
5941 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
5942 		rc = -EPROTONOSUPPORT;
5943 		goto err_free;
5944 	}
5945 
5946 	if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
5947 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
5948 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
5949 		rc = -EPROTONOSUPPORT;
5950 		goto err_free;
5951 	}
5952 
5953 	memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
5954 
5955 	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
5956 	head = &bp->ntp_fltr_hash_tbl[idx];
5957 	rcu_read_lock();
5958 	hlist_for_each_entry_rcu(fltr, head, hash) {
5959 		if (bnxt_fltr_match(fltr, new_fltr)) {
5960 			rcu_read_unlock();
5961 			rc = 0;
5962 			goto err_free;
5963 		}
5964 	}
5965 	rcu_read_unlock();
5966 
5967 	spin_lock_bh(&bp->ntp_fltr_lock);
5968 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5969 					 BNXT_NTP_FLTR_MAX_FLTR, 0);
5970 	if (bit_id < 0) {
5971 		spin_unlock_bh(&bp->ntp_fltr_lock);
5972 		rc = -ENOMEM;
5973 		goto err_free;
5974 	}
5975 
5976 	new_fltr->sw_id = (u16)bit_id;
5977 	new_fltr->flow_id = flow_id;
5978 	new_fltr->rxq = rxq_index;
5979 	hlist_add_head_rcu(&new_fltr->hash, head);
5980 	bp->ntp_fltr_count++;
5981 	spin_unlock_bh(&bp->ntp_fltr_lock);
5982 
5983 	set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
5984 	schedule_work(&bp->sp_task);
5985 
5986 	return new_fltr->sw_id;
5987 
5988 err_free:
5989 	kfree(new_fltr);
5990 	return rc;
5991 }
5992 
5993 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5994 {
5995 	int i;
5996 
5997 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5998 		struct hlist_head *head;
5999 		struct hlist_node *tmp;
6000 		struct bnxt_ntuple_filter *fltr;
6001 		int rc;
6002 
6003 		head = &bp->ntp_fltr_hash_tbl[i];
6004 		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
6005 			bool del = false;
6006 
6007 			if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
6008 				if (rps_may_expire_flow(bp->dev, fltr->rxq,
6009 							fltr->flow_id,
6010 							fltr->sw_id)) {
6011 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
6012 									 fltr);
6013 					del = true;
6014 				}
6015 			} else {
6016 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
6017 								       fltr);
6018 				if (rc)
6019 					del = true;
6020 				else
6021 					set_bit(BNXT_FLTR_VALID, &fltr->state);
6022 			}
6023 
6024 			if (del) {
6025 				spin_lock_bh(&bp->ntp_fltr_lock);
6026 				hlist_del_rcu(&fltr->hash);
6027 				bp->ntp_fltr_count--;
6028 				spin_unlock_bh(&bp->ntp_fltr_lock);
6029 				synchronize_rcu();
6030 				clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
6031 				kfree(fltr);
6032 			}
6033 		}
6034 	}
6035 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
6036 		netdev_info(bp->dev, "Receive PF driver unload event!");
6037 }
6038 
6039 #else
6040 
6041 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6042 {
6043 }
6044 
6045 #endif /* CONFIG_RFS_ACCEL */
6046 
6047 static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
6048 				__be16 port)
6049 {
6050 	struct bnxt *bp = netdev_priv(dev);
6051 
6052 	if (!netif_running(dev))
6053 		return;
6054 
6055 	if (sa_family != AF_INET6 && sa_family != AF_INET)
6056 		return;
6057 
6058 	if (bp->vxlan_port_cnt && bp->vxlan_port != port)
6059 		return;
6060 
6061 	bp->vxlan_port_cnt++;
6062 	if (bp->vxlan_port_cnt == 1) {
6063 		bp->vxlan_port = port;
6064 		set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
6065 		schedule_work(&bp->sp_task);
6066 	}
6067 }
6068 
6069 static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
6070 				__be16 port)
6071 {
6072 	struct bnxt *bp = netdev_priv(dev);
6073 
6074 	if (!netif_running(dev))
6075 		return;
6076 
6077 	if (sa_family != AF_INET6 && sa_family != AF_INET)
6078 		return;
6079 
6080 	if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
6081 		bp->vxlan_port_cnt--;
6082 
6083 		if (bp->vxlan_port_cnt == 0) {
6084 			set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
6085 			schedule_work(&bp->sp_task);
6086 		}
6087 	}
6088 }
6089 
6090 static const struct net_device_ops bnxt_netdev_ops = {
6091 	.ndo_open		= bnxt_open,
6092 	.ndo_start_xmit		= bnxt_start_xmit,
6093 	.ndo_stop		= bnxt_close,
6094 	.ndo_get_stats64	= bnxt_get_stats64,
6095 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
6096 	.ndo_do_ioctl		= bnxt_ioctl,
6097 	.ndo_validate_addr	= eth_validate_addr,
6098 	.ndo_set_mac_address	= bnxt_change_mac_addr,
6099 	.ndo_change_mtu		= bnxt_change_mtu,
6100 	.ndo_fix_features	= bnxt_fix_features,
6101 	.ndo_set_features	= bnxt_set_features,
6102 	.ndo_tx_timeout		= bnxt_tx_timeout,
6103 #ifdef CONFIG_BNXT_SRIOV
6104 	.ndo_get_vf_config	= bnxt_get_vf_config,
6105 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
6106 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
6107 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
6108 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
6109 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
6110 #endif
6111 #ifdef CONFIG_NET_POLL_CONTROLLER
6112 	.ndo_poll_controller	= bnxt_poll_controller,
6113 #endif
6114 	.ndo_setup_tc           = bnxt_setup_tc,
6115 #ifdef CONFIG_RFS_ACCEL
6116 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
6117 #endif
6118 	.ndo_add_vxlan_port	= bnxt_add_vxlan_port,
6119 	.ndo_del_vxlan_port	= bnxt_del_vxlan_port,
6120 #ifdef CONFIG_NET_RX_BUSY_POLL
6121 	.ndo_busy_poll		= bnxt_busy_poll,
6122 #endif
6123 };
6124 
6125 static void bnxt_remove_one(struct pci_dev *pdev)
6126 {
6127 	struct net_device *dev = pci_get_drvdata(pdev);
6128 	struct bnxt *bp = netdev_priv(dev);
6129 
6130 	if (BNXT_PF(bp))
6131 		bnxt_sriov_disable(bp);
6132 
6133 	pci_disable_pcie_error_reporting(pdev);
6134 	unregister_netdev(dev);
6135 	cancel_work_sync(&bp->sp_task);
6136 	bp->sp_event = 0;
6137 
6138 	bnxt_hwrm_func_drv_unrgtr(bp);
6139 	bnxt_free_hwrm_resources(bp);
6140 	pci_iounmap(pdev, bp->bar2);
6141 	pci_iounmap(pdev, bp->bar1);
6142 	pci_iounmap(pdev, bp->bar0);
6143 	free_netdev(dev);
6144 
6145 	pci_release_regions(pdev);
6146 	pci_disable_device(pdev);
6147 }
6148 
6149 static int bnxt_probe_phy(struct bnxt *bp)
6150 {
6151 	int rc = 0;
6152 	struct bnxt_link_info *link_info = &bp->link_info;
6153 
6154 	rc = bnxt_hwrm_phy_qcaps(bp);
6155 	if (rc) {
6156 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
6157 			   rc);
6158 		return rc;
6159 	}
6160 
6161 	rc = bnxt_update_link(bp, false);
6162 	if (rc) {
6163 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
6164 			   rc);
6165 		return rc;
6166 	}
6167 
6168 	/*initialize the ethool setting copy with NVM settings */
6169 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
6170 		link_info->autoneg = BNXT_AUTONEG_SPEED;
6171 		if (bp->hwrm_spec_code >= 0x10201) {
6172 			if (link_info->auto_pause_setting &
6173 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
6174 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6175 		} else {
6176 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6177 		}
6178 		link_info->advertising = link_info->auto_link_speeds;
6179 	} else {
6180 		link_info->req_link_speed = link_info->force_link_speed;
6181 		link_info->req_duplex = link_info->duplex_setting;
6182 	}
6183 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
6184 		link_info->req_flow_ctrl =
6185 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
6186 	else
6187 		link_info->req_flow_ctrl = link_info->force_pause_setting;
6188 	return rc;
6189 }
6190 
6191 static int bnxt_get_max_irq(struct pci_dev *pdev)
6192 {
6193 	u16 ctrl;
6194 
6195 	if (!pdev->msix_cap)
6196 		return 1;
6197 
6198 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
6199 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
6200 }
6201 
6202 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6203 				int *max_cp)
6204 {
6205 	int max_ring_grps = 0;
6206 
6207 #ifdef CONFIG_BNXT_SRIOV
6208 	if (!BNXT_PF(bp)) {
6209 		*max_tx = bp->vf.max_tx_rings;
6210 		*max_rx = bp->vf.max_rx_rings;
6211 		*max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
6212 		*max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
6213 		max_ring_grps = bp->vf.max_hw_ring_grps;
6214 	} else
6215 #endif
6216 	{
6217 		*max_tx = bp->pf.max_tx_rings;
6218 		*max_rx = bp->pf.max_rx_rings;
6219 		*max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
6220 		*max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
6221 		max_ring_grps = bp->pf.max_hw_ring_grps;
6222 	}
6223 
6224 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
6225 		*max_rx >>= 1;
6226 	*max_rx = min_t(int, *max_rx, max_ring_grps);
6227 }
6228 
6229 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
6230 {
6231 	int rx, tx, cp;
6232 
6233 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
6234 	if (!rx || !tx || !cp)
6235 		return -ENOMEM;
6236 
6237 	*max_rx = rx;
6238 	*max_tx = tx;
6239 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
6240 }
6241 
6242 static int bnxt_set_dflt_rings(struct bnxt *bp)
6243 {
6244 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
6245 	bool sh = true;
6246 
6247 	if (sh)
6248 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
6249 	dflt_rings = netif_get_num_default_rss_queues();
6250 	rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6251 	if (rc)
6252 		return rc;
6253 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
6254 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
6255 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6256 	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6257 			       bp->tx_nr_rings + bp->rx_nr_rings;
6258 	bp->num_stat_ctxs = bp->cp_nr_rings;
6259 	return rc;
6260 }
6261 
6262 static void bnxt_parse_log_pcie_link(struct bnxt *bp)
6263 {
6264 	enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
6265 	enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
6266 
6267 	if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
6268 	    speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
6269 		netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
6270 	else
6271 		netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
6272 			    speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
6273 			    speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
6274 			    speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
6275 			    "Unknown", width);
6276 }
6277 
6278 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6279 {
6280 	static int version_printed;
6281 	struct net_device *dev;
6282 	struct bnxt *bp;
6283 	int rc, max_irqs;
6284 
6285 	if (version_printed++ == 0)
6286 		pr_info("%s", version);
6287 
6288 	max_irqs = bnxt_get_max_irq(pdev);
6289 	dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
6290 	if (!dev)
6291 		return -ENOMEM;
6292 
6293 	bp = netdev_priv(dev);
6294 
6295 	if (bnxt_vf_pciid(ent->driver_data))
6296 		bp->flags |= BNXT_FLAG_VF;
6297 
6298 	if (pdev->msix_cap)
6299 		bp->flags |= BNXT_FLAG_MSIX_CAP;
6300 
6301 	rc = bnxt_init_board(pdev, dev);
6302 	if (rc < 0)
6303 		goto init_err_free;
6304 
6305 	dev->netdev_ops = &bnxt_netdev_ops;
6306 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
6307 	dev->ethtool_ops = &bnxt_ethtool_ops;
6308 
6309 	pci_set_drvdata(pdev, dev);
6310 
6311 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6312 			   NETIF_F_TSO | NETIF_F_TSO6 |
6313 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
6314 			   NETIF_F_GSO_IPXIP4 |
6315 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
6316 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
6317 			   NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
6318 
6319 	dev->hw_enc_features =
6320 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
6321 			NETIF_F_TSO | NETIF_F_TSO6 |
6322 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
6323 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
6324 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
6325 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
6326 				    NETIF_F_GSO_GRE_CSUM;
6327 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
6328 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6329 			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
6330 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
6331 	dev->priv_flags |= IFF_UNICAST_FLT;
6332 
6333 #ifdef CONFIG_BNXT_SRIOV
6334 	init_waitqueue_head(&bp->sriov_cfg_wait);
6335 #endif
6336 	rc = bnxt_alloc_hwrm_resources(bp);
6337 	if (rc)
6338 		goto init_err;
6339 
6340 	mutex_init(&bp->hwrm_cmd_lock);
6341 	bnxt_hwrm_ver_get(bp);
6342 
6343 	rc = bnxt_hwrm_func_drv_rgtr(bp);
6344 	if (rc)
6345 		goto init_err;
6346 
6347 	/* Get the MAX capabilities for this function */
6348 	rc = bnxt_hwrm_func_qcaps(bp);
6349 	if (rc) {
6350 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
6351 			   rc);
6352 		rc = -1;
6353 		goto init_err;
6354 	}
6355 
6356 	rc = bnxt_hwrm_queue_qportcfg(bp);
6357 	if (rc) {
6358 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
6359 			   rc);
6360 		rc = -1;
6361 		goto init_err;
6362 	}
6363 
6364 	bnxt_set_tpa_flags(bp);
6365 	bnxt_set_ring_params(bp);
6366 	if (BNXT_PF(bp))
6367 		bp->pf.max_irqs = max_irqs;
6368 #if defined(CONFIG_BNXT_SRIOV)
6369 	else
6370 		bp->vf.max_irqs = max_irqs;
6371 #endif
6372 	bnxt_set_dflt_rings(bp);
6373 
6374 	if (BNXT_PF(bp)) {
6375 		dev->hw_features |= NETIF_F_NTUPLE;
6376 		if (bnxt_rfs_capable(bp)) {
6377 			bp->flags |= BNXT_FLAG_RFS;
6378 			dev->features |= NETIF_F_NTUPLE;
6379 		}
6380 	}
6381 
6382 	if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
6383 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
6384 
6385 	rc = bnxt_probe_phy(bp);
6386 	if (rc)
6387 		goto init_err;
6388 
6389 	rc = register_netdev(dev);
6390 	if (rc)
6391 		goto init_err;
6392 
6393 	netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
6394 		    board_info[ent->driver_data].name,
6395 		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
6396 
6397 	bnxt_parse_log_pcie_link(bp);
6398 
6399 	return 0;
6400 
6401 init_err:
6402 	pci_iounmap(pdev, bp->bar0);
6403 	pci_release_regions(pdev);
6404 	pci_disable_device(pdev);
6405 
6406 init_err_free:
6407 	free_netdev(dev);
6408 	return rc;
6409 }
6410 
6411 /**
6412  * bnxt_io_error_detected - called when PCI error is detected
6413  * @pdev: Pointer to PCI device
6414  * @state: The current pci connection state
6415  *
6416  * This function is called after a PCI bus error affecting
6417  * this device has been detected.
6418  */
6419 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
6420 					       pci_channel_state_t state)
6421 {
6422 	struct net_device *netdev = pci_get_drvdata(pdev);
6423 
6424 	netdev_info(netdev, "PCI I/O error detected\n");
6425 
6426 	rtnl_lock();
6427 	netif_device_detach(netdev);
6428 
6429 	if (state == pci_channel_io_perm_failure) {
6430 		rtnl_unlock();
6431 		return PCI_ERS_RESULT_DISCONNECT;
6432 	}
6433 
6434 	if (netif_running(netdev))
6435 		bnxt_close(netdev);
6436 
6437 	pci_disable_device(pdev);
6438 	rtnl_unlock();
6439 
6440 	/* Request a slot slot reset. */
6441 	return PCI_ERS_RESULT_NEED_RESET;
6442 }
6443 
6444 /**
6445  * bnxt_io_slot_reset - called after the pci bus has been reset.
6446  * @pdev: Pointer to PCI device
6447  *
6448  * Restart the card from scratch, as if from a cold-boot.
6449  * At this point, the card has exprienced a hard reset,
6450  * followed by fixups by BIOS, and has its config space
6451  * set up identically to what it was at cold boot.
6452  */
6453 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
6454 {
6455 	struct net_device *netdev = pci_get_drvdata(pdev);
6456 	struct bnxt *bp = netdev_priv(netdev);
6457 	int err = 0;
6458 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6459 
6460 	netdev_info(bp->dev, "PCI Slot Reset\n");
6461 
6462 	rtnl_lock();
6463 
6464 	if (pci_enable_device(pdev)) {
6465 		dev_err(&pdev->dev,
6466 			"Cannot re-enable PCI device after reset.\n");
6467 	} else {
6468 		pci_set_master(pdev);
6469 
6470 		if (netif_running(netdev))
6471 			err = bnxt_open(netdev);
6472 
6473 		if (!err)
6474 			result = PCI_ERS_RESULT_RECOVERED;
6475 	}
6476 
6477 	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
6478 		dev_close(netdev);
6479 
6480 	rtnl_unlock();
6481 
6482 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
6483 	if (err) {
6484 		dev_err(&pdev->dev,
6485 			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
6486 			 err); /* non-fatal, continue */
6487 	}
6488 
6489 	return PCI_ERS_RESULT_RECOVERED;
6490 }
6491 
6492 /**
6493  * bnxt_io_resume - called when traffic can start flowing again.
6494  * @pdev: Pointer to PCI device
6495  *
6496  * This callback is called when the error recovery driver tells
6497  * us that its OK to resume normal operation.
6498  */
6499 static void bnxt_io_resume(struct pci_dev *pdev)
6500 {
6501 	struct net_device *netdev = pci_get_drvdata(pdev);
6502 
6503 	rtnl_lock();
6504 
6505 	netif_device_attach(netdev);
6506 
6507 	rtnl_unlock();
6508 }
6509 
6510 static const struct pci_error_handlers bnxt_err_handler = {
6511 	.error_detected	= bnxt_io_error_detected,
6512 	.slot_reset	= bnxt_io_slot_reset,
6513 	.resume		= bnxt_io_resume
6514 };
6515 
6516 static struct pci_driver bnxt_pci_driver = {
6517 	.name		= DRV_MODULE_NAME,
6518 	.id_table	= bnxt_pci_tbl,
6519 	.probe		= bnxt_init_one,
6520 	.remove		= bnxt_remove_one,
6521 	.err_handler	= &bnxt_err_handler,
6522 #if defined(CONFIG_BNXT_SRIOV)
6523 	.sriov_configure = bnxt_sriov_configure,
6524 #endif
6525 };
6526 
6527 module_pci_driver(bnxt_pci_driver);
6528