1c6d30e83SMichael Chan /* Broadcom NetXtreme-C/E network driver.
2c6d30e83SMichael Chan  *
3c6d30e83SMichael Chan  * Copyright (c) 2016-2017 Broadcom Limited
4c6d30e83SMichael Chan  *
5c6d30e83SMichael Chan  * This program is free software; you can redistribute it and/or modify
6c6d30e83SMichael Chan  * it under the terms of the GNU General Public License as published by
7c6d30e83SMichael Chan  * the Free Software Foundation.
8c6d30e83SMichael Chan  */
9c6d30e83SMichael Chan #include <linux/kernel.h>
10c6d30e83SMichael Chan #include <linux/errno.h>
11c6d30e83SMichael Chan #include <linux/pci.h>
12c6d30e83SMichael Chan #include <linux/netdevice.h>
13c6d30e83SMichael Chan #include <linux/etherdevice.h>
14c6d30e83SMichael Chan #include <linux/if_vlan.h>
15c6d30e83SMichael Chan #include <linux/bpf.h>
16c6d30e83SMichael Chan #include <linux/bpf_trace.h>
17c6d30e83SMichael Chan #include <linux/filter.h>
18a9ca9f9cSYunsheng Lin #include <net/page_pool/helpers.h>
19c6d30e83SMichael Chan #include "bnxt_hsi.h"
20c6d30e83SMichael Chan #include "bnxt.h"
21c6d30e83SMichael Chan #include "bnxt_xdp.h"
22c6d30e83SMichael Chan 
234f81def2SPavan Chebbi DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
244f81def2SPavan Chebbi 
bnxt_xmit_bd(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,struct xdp_buff * xdp)25c1ba92a8SMichael Chan struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
26c1ba92a8SMichael Chan 				   struct bnxt_tx_ring_info *txr,
27a7559bc8SAndy Gospodarek 				   dma_addr_t mapping, u32 len,
28a7559bc8SAndy Gospodarek 				   struct xdp_buff *xdp)
2938413406SMichael Chan {
30a7559bc8SAndy Gospodarek 	struct skb_shared_info *sinfo;
3153f8c2d3SMichael Chan 	struct bnxt_sw_tx_bd *tx_buf;
3238413406SMichael Chan 	struct tx_bd *txbd;
33a7559bc8SAndy Gospodarek 	int num_frags = 0;
3438413406SMichael Chan 	u32 flags;
3538413406SMichael Chan 	u16 prod;
36a7559bc8SAndy Gospodarek 	int i;
3738413406SMichael Chan 
38a7559bc8SAndy Gospodarek 	if (xdp && xdp_buff_has_frags(xdp)) {
39a7559bc8SAndy Gospodarek 		sinfo = xdp_get_shared_info_from_buff(xdp);
40a7559bc8SAndy Gospodarek 		num_frags = sinfo->nr_frags;
41a7559bc8SAndy Gospodarek 	}
42a7559bc8SAndy Gospodarek 
43a7559bc8SAndy Gospodarek 	/* fill up the first buffer */
4438413406SMichael Chan 	prod = txr->tx_prod;
4538413406SMichael Chan 	tx_buf = &txr->tx_buf_ring[prod];
46a7559bc8SAndy Gospodarek 	tx_buf->nr_frags = num_frags;
47a7559bc8SAndy Gospodarek 	if (xdp)
48a7559bc8SAndy Gospodarek 		tx_buf->page = virt_to_head_page(xdp->data);
4938413406SMichael Chan 
5038413406SMichael Chan 	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
5153f8c2d3SMichael Chan 	flags = (len << TX_BD_LEN_SHIFT) |
5253f8c2d3SMichael Chan 		((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
5353f8c2d3SMichael Chan 		bnxt_lhint_arr[len >> 9];
5438413406SMichael Chan 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
5538413406SMichael Chan 	txbd->tx_bd_opaque = prod;
5638413406SMichael Chan 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
5738413406SMichael Chan 
58a7559bc8SAndy Gospodarek 	/* now let us fill up the frags into the next buffers */
59a7559bc8SAndy Gospodarek 	for (i = 0; i < num_frags ; i++) {
60a7559bc8SAndy Gospodarek 		skb_frag_t *frag = &sinfo->frags[i];
61a7559bc8SAndy Gospodarek 		struct bnxt_sw_tx_bd *frag_tx_buf;
62a7559bc8SAndy Gospodarek 		dma_addr_t frag_mapping;
63a7559bc8SAndy Gospodarek 		int frag_len;
64a7559bc8SAndy Gospodarek 
6538413406SMichael Chan 		prod = NEXT_TX(prod);
6636647b20SJakub Kicinski 		WRITE_ONCE(txr->tx_prod, prod);
67a7559bc8SAndy Gospodarek 
68a7559bc8SAndy Gospodarek 		/* first fill up the first buffer */
69a7559bc8SAndy Gospodarek 		frag_tx_buf = &txr->tx_buf_ring[prod];
70a7559bc8SAndy Gospodarek 		frag_tx_buf->page = skb_frag_page(frag);
71a7559bc8SAndy Gospodarek 
72a7559bc8SAndy Gospodarek 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
73a7559bc8SAndy Gospodarek 
74a7559bc8SAndy Gospodarek 		frag_len = skb_frag_size(frag);
75a7559bc8SAndy Gospodarek 		flags = frag_len << TX_BD_LEN_SHIFT;
76a7559bc8SAndy Gospodarek 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
77*f0534c0fSAndy Gospodarek 		frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
78*f0534c0fSAndy Gospodarek 			       skb_frag_off(frag);
79a7559bc8SAndy Gospodarek 		txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
80a7559bc8SAndy Gospodarek 
81a7559bc8SAndy Gospodarek 		len = frag_len;
82a7559bc8SAndy Gospodarek 	}
83a7559bc8SAndy Gospodarek 
84a7559bc8SAndy Gospodarek 	flags &= ~TX_BD_LEN;
85a7559bc8SAndy Gospodarek 	txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
86a7559bc8SAndy Gospodarek 			TX_BD_FLAGS_PACKET_END);
87a7559bc8SAndy Gospodarek 	/* Sync TX BD */
88a7559bc8SAndy Gospodarek 	wmb();
89a7559bc8SAndy Gospodarek 	prod = NEXT_TX(prod);
9036647b20SJakub Kicinski 	WRITE_ONCE(txr->tx_prod, prod);
91a7559bc8SAndy Gospodarek 
9253f8c2d3SMichael Chan 	return tx_buf;
93c1ba92a8SMichael Chan }
94c1ba92a8SMichael Chan 
__bnxt_xmit_xdp(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,u16 rx_prod,struct xdp_buff * xdp)95c1ba92a8SMichael Chan static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
96a7559bc8SAndy Gospodarek 			    dma_addr_t mapping, u32 len, u16 rx_prod,
97a7559bc8SAndy Gospodarek 			    struct xdp_buff *xdp)
98c1ba92a8SMichael Chan {
99c1ba92a8SMichael Chan 	struct bnxt_sw_tx_bd *tx_buf;
100c1ba92a8SMichael Chan 
101a7559bc8SAndy Gospodarek 	tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
102c1ba92a8SMichael Chan 	tx_buf->rx_prod = rx_prod;
103c1ba92a8SMichael Chan 	tx_buf->action = XDP_TX;
104a7559bc8SAndy Gospodarek 
10538413406SMichael Chan }
10638413406SMichael Chan 
__bnxt_xmit_xdp_redirect(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,struct xdp_frame * xdpf)107f18c2b77SAndy Gospodarek static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
108f18c2b77SAndy Gospodarek 				     struct bnxt_tx_ring_info *txr,
109f18c2b77SAndy Gospodarek 				     dma_addr_t mapping, u32 len,
110f18c2b77SAndy Gospodarek 				     struct xdp_frame *xdpf)
111f18c2b77SAndy Gospodarek {
112f18c2b77SAndy Gospodarek 	struct bnxt_sw_tx_bd *tx_buf;
113f18c2b77SAndy Gospodarek 
114a7559bc8SAndy Gospodarek 	tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
115f18c2b77SAndy Gospodarek 	tx_buf->action = XDP_REDIRECT;
116f18c2b77SAndy Gospodarek 	tx_buf->xdpf = xdpf;
117f18c2b77SAndy Gospodarek 	dma_unmap_addr_set(tx_buf, mapping, mapping);
118f18c2b77SAndy Gospodarek 	dma_unmap_len_set(tx_buf, len, 0);
119f18c2b77SAndy Gospodarek }
120f18c2b77SAndy Gospodarek 
bnxt_tx_int_xdp(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)12137b61cdaSJakub Kicinski void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
12238413406SMichael Chan {
12338413406SMichael Chan 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
12438413406SMichael Chan 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
125c1ba92a8SMichael Chan 	bool rx_doorbell_needed = false;
12637b61cdaSJakub Kicinski 	int nr_pkts = bnapi->tx_pkts;
12738413406SMichael Chan 	struct bnxt_sw_tx_bd *tx_buf;
12838413406SMichael Chan 	u16 tx_cons = txr->tx_cons;
12938413406SMichael Chan 	u16 last_tx_cons = tx_cons;
130a7559bc8SAndy Gospodarek 	int i, j, frags;
13138413406SMichael Chan 
13237b61cdaSJakub Kicinski 	if (!budget)
13337b61cdaSJakub Kicinski 		return;
13437b61cdaSJakub Kicinski 
13538413406SMichael Chan 	for (i = 0; i < nr_pkts; i++) {
136c1ba92a8SMichael Chan 		tx_buf = &txr->tx_buf_ring[tx_cons];
137c1ba92a8SMichael Chan 
138f18c2b77SAndy Gospodarek 		if (tx_buf->action == XDP_REDIRECT) {
139f18c2b77SAndy Gospodarek 			struct pci_dev *pdev = bp->pdev;
140f18c2b77SAndy Gospodarek 
141f18c2b77SAndy Gospodarek 			dma_unmap_single(&pdev->dev,
142f18c2b77SAndy Gospodarek 					 dma_unmap_addr(tx_buf, mapping),
143f18c2b77SAndy Gospodarek 					 dma_unmap_len(tx_buf, len),
144df70303dSChristophe JAILLET 					 DMA_TO_DEVICE);
145f18c2b77SAndy Gospodarek 			xdp_return_frame(tx_buf->xdpf);
146f18c2b77SAndy Gospodarek 			tx_buf->action = 0;
147f18c2b77SAndy Gospodarek 			tx_buf->xdpf = NULL;
148f18c2b77SAndy Gospodarek 		} else if (tx_buf->action == XDP_TX) {
1492b56b3d9SJakub Kicinski 			tx_buf->action = 0;
150c1ba92a8SMichael Chan 			rx_doorbell_needed = true;
15138413406SMichael Chan 			last_tx_cons = tx_cons;
152a7559bc8SAndy Gospodarek 
153a7559bc8SAndy Gospodarek 			frags = tx_buf->nr_frags;
154a7559bc8SAndy Gospodarek 			for (j = 0; j < frags; j++) {
155a7559bc8SAndy Gospodarek 				tx_cons = NEXT_TX(tx_cons);
156a7559bc8SAndy Gospodarek 				tx_buf = &txr->tx_buf_ring[tx_cons];
157a7559bc8SAndy Gospodarek 				page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
158a7559bc8SAndy Gospodarek 			}
1592b56b3d9SJakub Kicinski 		} else {
1602b56b3d9SJakub Kicinski 			bnxt_sched_reset_txr(bp, txr, i);
1612b56b3d9SJakub Kicinski 			return;
162c1ba92a8SMichael Chan 		}
16338413406SMichael Chan 		tx_cons = NEXT_TX(tx_cons);
16438413406SMichael Chan 	}
16537b61cdaSJakub Kicinski 
16637b61cdaSJakub Kicinski 	bnapi->tx_pkts = 0;
16736647b20SJakub Kicinski 	WRITE_ONCE(txr->tx_cons, tx_cons);
168c1ba92a8SMichael Chan 	if (rx_doorbell_needed) {
16938413406SMichael Chan 		tx_buf = &txr->tx_buf_ring[last_tx_cons];
170c1ba92a8SMichael Chan 		bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
171a7559bc8SAndy Gospodarek 
17238413406SMichael Chan 	}
17338413406SMichael Chan }
17438413406SMichael Chan 
bnxt_xdp_attached(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)175b231c3f3SAndy Gospodarek bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
176b231c3f3SAndy Gospodarek {
177b231c3f3SAndy Gospodarek 	struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
178b231c3f3SAndy Gospodarek 
179b231c3f3SAndy Gospodarek 	return !!xdp_prog;
180b231c3f3SAndy Gospodarek }
181b231c3f3SAndy Gospodarek 
bnxt_xdp_buff_init(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,u8 * data_ptr,unsigned int len,struct xdp_buff * xdp)182b231c3f3SAndy Gospodarek void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
183bbfc17e5SMichael Chan 			u16 cons, u8 *data_ptr, unsigned int len,
184b231c3f3SAndy Gospodarek 			struct xdp_buff *xdp)
185b231c3f3SAndy Gospodarek {
186f6974b4cSSomnath Kotur 	u32 buflen = BNXT_RX_PAGE_SIZE;
187b231c3f3SAndy Gospodarek 	struct bnxt_sw_rx_bd *rx_buf;
188b231c3f3SAndy Gospodarek 	struct pci_dev *pdev;
189b231c3f3SAndy Gospodarek 	dma_addr_t mapping;
190b231c3f3SAndy Gospodarek 	u32 offset;
191b231c3f3SAndy Gospodarek 
192b231c3f3SAndy Gospodarek 	pdev = bp->pdev;
193b231c3f3SAndy Gospodarek 	rx_buf = &rxr->rx_buf_ring[cons];
194b231c3f3SAndy Gospodarek 	offset = bp->rx_offset;
195b231c3f3SAndy Gospodarek 
196b231c3f3SAndy Gospodarek 	mapping = rx_buf->mapping - bp->rx_dma_offset;
197bbfc17e5SMichael Chan 	dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
198b231c3f3SAndy Gospodarek 
1997dd3de7cSPavan Chebbi 	xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
200bbfc17e5SMichael Chan 	xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
201b231c3f3SAndy Gospodarek }
202b231c3f3SAndy Gospodarek 
bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info * rxr,struct xdp_buff * xdp)203a7559bc8SAndy Gospodarek void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
204a7559bc8SAndy Gospodarek 			      struct xdp_buff *xdp)
205a7559bc8SAndy Gospodarek {
206a7559bc8SAndy Gospodarek 	struct skb_shared_info *shinfo;
207a7559bc8SAndy Gospodarek 	int i;
208a7559bc8SAndy Gospodarek 
209a7559bc8SAndy Gospodarek 	if (!xdp || !xdp_buff_has_frags(xdp))
210a7559bc8SAndy Gospodarek 		return;
211a7559bc8SAndy Gospodarek 	shinfo = xdp_get_shared_info_from_buff(xdp);
212a7559bc8SAndy Gospodarek 	for (i = 0; i < shinfo->nr_frags; i++) {
213a7559bc8SAndy Gospodarek 		struct page *page = skb_frag_page(&shinfo->frags[i]);
214a7559bc8SAndy Gospodarek 
215a7559bc8SAndy Gospodarek 		page_pool_recycle_direct(rxr->page_pool, page);
216a7559bc8SAndy Gospodarek 	}
217a7559bc8SAndy Gospodarek 	shinfo->nr_frags = 0;
218a7559bc8SAndy Gospodarek }
219a7559bc8SAndy Gospodarek 
220c6d30e83SMichael Chan /* returns the following:
221c6d30e83SMichael Chan  * true    - packet consumed by XDP and new buffer is allocated.
222c6d30e83SMichael Chan  * false   - packet should be passed to the stack.
223c6d30e83SMichael Chan  */
bnxt_rx_xdp(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,struct xdp_buff xdp,struct page * page,u8 ** data_ptr,unsigned int * len,u8 * event)224c6d30e83SMichael Chan bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
2259b3e6078SMichael Chan 		 struct xdp_buff xdp, struct page *page, u8 **data_ptr,
2269b3e6078SMichael Chan 		 unsigned int *len, u8 *event)
227c6d30e83SMichael Chan {
228c6d30e83SMichael Chan 	struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
22938413406SMichael Chan 	struct bnxt_tx_ring_info *txr;
230c6d30e83SMichael Chan 	struct bnxt_sw_rx_bd *rx_buf;
231c6d30e83SMichael Chan 	struct pci_dev *pdev;
232c6d30e83SMichael Chan 	dma_addr_t mapping;
233a7559bc8SAndy Gospodarek 	u32 tx_needed = 1;
234c6d30e83SMichael Chan 	void *orig_data;
23538413406SMichael Chan 	u32 tx_avail;
236c6d30e83SMichael Chan 	u32 offset;
237c6d30e83SMichael Chan 	u32 act;
238c6d30e83SMichael Chan 
239c6d30e83SMichael Chan 	if (!xdp_prog)
240c6d30e83SMichael Chan 		return false;
241c6d30e83SMichael Chan 
242c6d30e83SMichael Chan 	pdev = bp->pdev;
243c6d30e83SMichael Chan 	offset = bp->rx_offset;
244c6d30e83SMichael Chan 
245f18c2b77SAndy Gospodarek 	txr = rxr->bnapi->tx_ring;
24643b5169dSLorenzo Bianconi 	/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
247c6d30e83SMichael Chan 	orig_data = xdp.data;
248c6d30e83SMichael Chan 
249c6d30e83SMichael Chan 	act = bpf_prog_run_xdp(xdp_prog, &xdp);
250c6d30e83SMichael Chan 
25138413406SMichael Chan 	tx_avail = bnxt_tx_avail(bp, txr);
25238413406SMichael Chan 	/* If the tx ring is not full, we must not update the rx producer yet
25338413406SMichael Chan 	 * because we may still be transmitting on some BDs.
25438413406SMichael Chan 	 */
25538413406SMichael Chan 	if (tx_avail != bp->tx_ring_size)
25638413406SMichael Chan 		*event &= ~BNXT_RX_EVENT;
25738413406SMichael Chan 
258b968e735SNikita V. Shirokov 	*len = xdp.data_end - xdp.data;
2599b3e6078SMichael Chan 	if (orig_data != xdp.data) {
260c6d30e83SMichael Chan 		offset = xdp.data - xdp.data_hard_start;
2619b3e6078SMichael Chan 		*data_ptr = xdp.data_hard_start + offset;
2629b3e6078SMichael Chan 	}
263b231c3f3SAndy Gospodarek 
264c6d30e83SMichael Chan 	switch (act) {
265c6d30e83SMichael Chan 	case XDP_PASS:
266c6d30e83SMichael Chan 		return false;
267c6d30e83SMichael Chan 
26838413406SMichael Chan 	case XDP_TX:
269b231c3f3SAndy Gospodarek 		rx_buf = &rxr->rx_buf_ring[cons];
270b231c3f3SAndy Gospodarek 		mapping = rx_buf->mapping - bp->rx_dma_offset;
271a7559bc8SAndy Gospodarek 		*event = 0;
272b231c3f3SAndy Gospodarek 
273a7559bc8SAndy Gospodarek 		if (unlikely(xdp_buff_has_frags(&xdp))) {
274a7559bc8SAndy Gospodarek 			struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
275a7559bc8SAndy Gospodarek 
276a7559bc8SAndy Gospodarek 			tx_needed += sinfo->nr_frags;
277a7559bc8SAndy Gospodarek 			*event = BNXT_AGG_EVENT;
278a7559bc8SAndy Gospodarek 		}
279a7559bc8SAndy Gospodarek 
280a7559bc8SAndy Gospodarek 		if (tx_avail < tx_needed) {
28138413406SMichael Chan 			trace_xdp_exception(bp->dev, xdp_prog, act);
282a7559bc8SAndy Gospodarek 			bnxt_xdp_buff_frags_free(rxr, &xdp);
28338413406SMichael Chan 			bnxt_reuse_rx_data(rxr, cons, page);
28438413406SMichael Chan 			return true;
28538413406SMichael Chan 		}
28638413406SMichael Chan 
28738413406SMichael Chan 		dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
28838413406SMichael Chan 					   bp->rx_dir);
289a7559bc8SAndy Gospodarek 
290a7559bc8SAndy Gospodarek 		*event |= BNXT_TX_EVENT;
29152c06092SAndy Gospodarek 		__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
292a7559bc8SAndy Gospodarek 				NEXT_RX(rxr->rx_prod), &xdp);
29338413406SMichael Chan 		bnxt_reuse_rx_data(rxr, cons, page);
29438413406SMichael Chan 		return true;
295f18c2b77SAndy Gospodarek 	case XDP_REDIRECT:
296f18c2b77SAndy Gospodarek 		/* if we are calling this here then we know that the
297f18c2b77SAndy Gospodarek 		 * redirect is coming from a frame received by the
298f18c2b77SAndy Gospodarek 		 * bnxt_en driver.
299f18c2b77SAndy Gospodarek 		 */
300b231c3f3SAndy Gospodarek 		rx_buf = &rxr->rx_buf_ring[cons];
301b231c3f3SAndy Gospodarek 		mapping = rx_buf->mapping - bp->rx_dma_offset;
302f18c2b77SAndy Gospodarek 		dma_unmap_page_attrs(&pdev->dev, mapping,
303f6974b4cSSomnath Kotur 				     BNXT_RX_PAGE_SIZE, bp->rx_dir,
304f18c2b77SAndy Gospodarek 				     DMA_ATTR_WEAK_ORDERING);
305f18c2b77SAndy Gospodarek 
306f18c2b77SAndy Gospodarek 		/* if we are unable to allocate a new buffer, abort and reuse */
307f18c2b77SAndy Gospodarek 		if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
308f18c2b77SAndy Gospodarek 			trace_xdp_exception(bp->dev, xdp_prog, act);
309a7559bc8SAndy Gospodarek 			bnxt_xdp_buff_frags_free(rxr, &xdp);
310f18c2b77SAndy Gospodarek 			bnxt_reuse_rx_data(rxr, cons, page);
311f18c2b77SAndy Gospodarek 			return true;
312f18c2b77SAndy Gospodarek 		}
313f18c2b77SAndy Gospodarek 
314f18c2b77SAndy Gospodarek 		if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
315f18c2b77SAndy Gospodarek 			trace_xdp_exception(bp->dev, xdp_prog, act);
316322b87caSAndy Gospodarek 			page_pool_recycle_direct(rxr->page_pool, page);
317f18c2b77SAndy Gospodarek 			return true;
318f18c2b77SAndy Gospodarek 		}
319f18c2b77SAndy Gospodarek 
320f18c2b77SAndy Gospodarek 		*event |= BNXT_REDIRECT_EVENT;
321f18c2b77SAndy Gospodarek 		break;
322c6d30e83SMichael Chan 	default:
323c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
324df561f66SGustavo A. R. Silva 		fallthrough;
325c6d30e83SMichael Chan 	case XDP_ABORTED:
326c6d30e83SMichael Chan 		trace_xdp_exception(bp->dev, xdp_prog, act);
327df561f66SGustavo A. R. Silva 		fallthrough;
328c6d30e83SMichael Chan 	case XDP_DROP:
329a7559bc8SAndy Gospodarek 		bnxt_xdp_buff_frags_free(rxr, &xdp);
330c6d30e83SMichael Chan 		bnxt_reuse_rx_data(rxr, cons, page);
331c6d30e83SMichael Chan 		break;
332c6d30e83SMichael Chan 	}
333c6d30e83SMichael Chan 	return true;
334c6d30e83SMichael Chan }
335c6d30e83SMichael Chan 
bnxt_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)336f18c2b77SAndy Gospodarek int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
337f18c2b77SAndy Gospodarek 		  struct xdp_frame **frames, u32 flags)
338f18c2b77SAndy Gospodarek {
339f18c2b77SAndy Gospodarek 	struct bnxt *bp = netdev_priv(dev);
340f18c2b77SAndy Gospodarek 	struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
341f18c2b77SAndy Gospodarek 	struct pci_dev *pdev = bp->pdev;
342f18c2b77SAndy Gospodarek 	struct bnxt_tx_ring_info *txr;
343f18c2b77SAndy Gospodarek 	dma_addr_t mapping;
344fdc13979SLorenzo Bianconi 	int nxmit = 0;
345f18c2b77SAndy Gospodarek 	int ring;
346f18c2b77SAndy Gospodarek 	int i;
347f18c2b77SAndy Gospodarek 
348f18c2b77SAndy Gospodarek 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
349f18c2b77SAndy Gospodarek 	    !bp->tx_nr_rings_xdp ||
350f18c2b77SAndy Gospodarek 	    !xdp_prog)
351f18c2b77SAndy Gospodarek 		return -EINVAL;
352f18c2b77SAndy Gospodarek 
353f18c2b77SAndy Gospodarek 	ring = smp_processor_id() % bp->tx_nr_rings_xdp;
354f18c2b77SAndy Gospodarek 	txr = &bp->tx_ring[ring];
355f18c2b77SAndy Gospodarek 
35627d4073fSRay Jui 	if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
35727d4073fSRay Jui 		return -EINVAL;
35827d4073fSRay Jui 
3594f81def2SPavan Chebbi 	if (static_branch_unlikely(&bnxt_xdp_locking_key))
3604f81def2SPavan Chebbi 		spin_lock(&txr->xdp_tx_lock);
3614f81def2SPavan Chebbi 
362f18c2b77SAndy Gospodarek 	for (i = 0; i < num_frames; i++) {
363f18c2b77SAndy Gospodarek 		struct xdp_frame *xdp = frames[i];
364f18c2b77SAndy Gospodarek 
36527d4073fSRay Jui 		if (!bnxt_tx_avail(bp, txr))
366fdc13979SLorenzo Bianconi 			break;
367f18c2b77SAndy Gospodarek 
368f18c2b77SAndy Gospodarek 		mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
369f18c2b77SAndy Gospodarek 					 DMA_TO_DEVICE);
370f18c2b77SAndy Gospodarek 
371fdc13979SLorenzo Bianconi 		if (dma_mapping_error(&pdev->dev, mapping))
372fdc13979SLorenzo Bianconi 			break;
373fdc13979SLorenzo Bianconi 
374f18c2b77SAndy Gospodarek 		__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
375fdc13979SLorenzo Bianconi 		nxmit++;
376f18c2b77SAndy Gospodarek 	}
377f18c2b77SAndy Gospodarek 
378f18c2b77SAndy Gospodarek 	if (flags & XDP_XMIT_FLUSH) {
379f18c2b77SAndy Gospodarek 		/* Sync BD data before updating doorbell */
380f18c2b77SAndy Gospodarek 		wmb();
381f18c2b77SAndy Gospodarek 		bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
382f18c2b77SAndy Gospodarek 	}
383f18c2b77SAndy Gospodarek 
3844f81def2SPavan Chebbi 	if (static_branch_unlikely(&bnxt_xdp_locking_key))
3854f81def2SPavan Chebbi 		spin_unlock(&txr->xdp_tx_lock);
3864f81def2SPavan Chebbi 
387fdc13979SLorenzo Bianconi 	return nxmit;
388f18c2b77SAndy Gospodarek }
389f18c2b77SAndy Gospodarek 
390c6d30e83SMichael Chan /* Under rtnl_lock */
bnxt_xdp_set(struct bnxt * bp,struct bpf_prog * prog)391c6d30e83SMichael Chan static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
392c6d30e83SMichael Chan {
393c6d30e83SMichael Chan 	struct net_device *dev = bp->dev;
394c6d30e83SMichael Chan 	int tx_xdp = 0, rc, tc;
395c6d30e83SMichael Chan 	struct bpf_prog *old;
396c6d30e83SMichael Chan 
3979f4b2830SAndy Gospodarek 	if (prog && !prog->aux->xdp_has_frags &&
3989f4b2830SAndy Gospodarek 	    bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
3999f4b2830SAndy Gospodarek 		netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
400c6d30e83SMichael Chan 			    bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
401c6d30e83SMichael Chan 		return -EOPNOTSUPP;
402c6d30e83SMichael Chan 	}
403c6d30e83SMichael Chan 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
404c6d30e83SMichael Chan 		netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
405c6d30e83SMichael Chan 		return -EOPNOTSUPP;
406c6d30e83SMichael Chan 	}
4071abeacc1SMichael Chan 	if (prog)
408c6d30e83SMichael Chan 		tx_xdp = bp->rx_nr_rings;
409c6d30e83SMichael Chan 
410c6d30e83SMichael Chan 	tc = netdev_get_num_tc(dev);
411c6d30e83SMichael Chan 	if (!tc)
412c6d30e83SMichael Chan 		tc = 1;
41398fdbe73SMichael Chan 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
4143b6b34dfSMichael Chan 			      true, tc, tx_xdp);
415c6d30e83SMichael Chan 	if (rc) {
416c6d30e83SMichael Chan 		netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
417c6d30e83SMichael Chan 		return rc;
418c6d30e83SMichael Chan 	}
419c6d30e83SMichael Chan 	if (netif_running(dev))
420c6d30e83SMichael Chan 		bnxt_close_nic(bp, true, false);
421c6d30e83SMichael Chan 
422c6d30e83SMichael Chan 	old = xchg(&bp->xdp_prog, prog);
423c6d30e83SMichael Chan 	if (old)
424c6d30e83SMichael Chan 		bpf_prog_put(old);
425c6d30e83SMichael Chan 
426c6d30e83SMichael Chan 	if (prog) {
427c6d30e83SMichael Chan 		bnxt_set_rx_skb_mode(bp, true);
42866c0e13aSMarek Majtyka 		xdp_features_set_redirect_target(dev, true);
429c6d30e83SMichael Chan 	} else {
430c6d30e83SMichael Chan 		int rx, tx;
431c6d30e83SMichael Chan 
43266c0e13aSMarek Majtyka 		xdp_features_clear_redirect_target(dev);
433c6d30e83SMichael Chan 		bnxt_set_rx_skb_mode(bp, false);
434c6d30e83SMichael Chan 		bnxt_get_max_rings(bp, &rx, &tx, true);
435c6d30e83SMichael Chan 		if (rx > 1) {
436c6d30e83SMichael Chan 			bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
437c6d30e83SMichael Chan 			bp->dev->hw_features |= NETIF_F_LRO;
438c6d30e83SMichael Chan 		}
439c6d30e83SMichael Chan 	}
440c6d30e83SMichael Chan 	bp->tx_nr_rings_xdp = tx_xdp;
441c6d30e83SMichael Chan 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
442c6d30e83SMichael Chan 	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
443c6d30e83SMichael Chan 	bnxt_set_tpa_flags(bp);
444c6d30e83SMichael Chan 	bnxt_set_ring_params(bp);
445c6d30e83SMichael Chan 
446c6d30e83SMichael Chan 	if (netif_running(dev))
447c6d30e83SMichael Chan 		return bnxt_open_nic(bp, true, false);
448c6d30e83SMichael Chan 
449c6d30e83SMichael Chan 	return 0;
450c6d30e83SMichael Chan }
451c6d30e83SMichael Chan 
bnxt_xdp(struct net_device * dev,struct netdev_bpf * xdp)452f4e63525SJakub Kicinski int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
453c6d30e83SMichael Chan {
454c6d30e83SMichael Chan 	struct bnxt *bp = netdev_priv(dev);
455c6d30e83SMichael Chan 	int rc;
456c6d30e83SMichael Chan 
457c6d30e83SMichael Chan 	switch (xdp->command) {
458c6d30e83SMichael Chan 	case XDP_SETUP_PROG:
459c6d30e83SMichael Chan 		rc = bnxt_xdp_set(bp, xdp->prog);
460c6d30e83SMichael Chan 		break;
461c6d30e83SMichael Chan 	default:
462c6d30e83SMichael Chan 		rc = -EINVAL;
463c6d30e83SMichael Chan 		break;
464c6d30e83SMichael Chan 	}
465c6d30e83SMichael Chan 	return rc;
466c6d30e83SMichael Chan }
4671dc4c557SAndy Gospodarek 
4681dc4c557SAndy Gospodarek struct sk_buff *
bnxt_xdp_build_skb(struct bnxt * bp,struct sk_buff * skb,u8 num_frags,struct page_pool * pool,struct xdp_buff * xdp,struct rx_cmp_ext * rxcmp1)4691dc4c557SAndy Gospodarek bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
4701dc4c557SAndy Gospodarek 		   struct page_pool *pool, struct xdp_buff *xdp,
4711dc4c557SAndy Gospodarek 		   struct rx_cmp_ext *rxcmp1)
4721dc4c557SAndy Gospodarek {
4731dc4c557SAndy Gospodarek 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
4741dc4c557SAndy Gospodarek 
4751dc4c557SAndy Gospodarek 	if (!skb)
4761dc4c557SAndy Gospodarek 		return NULL;
4771dc4c557SAndy Gospodarek 	skb_checksum_none_assert(skb);
4781dc4c557SAndy Gospodarek 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
4791dc4c557SAndy Gospodarek 		if (bp->dev->features & NETIF_F_RXCSUM) {
4801dc4c557SAndy Gospodarek 			skb->ip_summed = CHECKSUM_UNNECESSARY;
4811dc4c557SAndy Gospodarek 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
4821dc4c557SAndy Gospodarek 		}
4831dc4c557SAndy Gospodarek 	}
4841dc4c557SAndy Gospodarek 	xdp_update_skb_shared_info(skb, num_frags,
4851dc4c557SAndy Gospodarek 				   sinfo->xdp_frags_size,
486f6974b4cSSomnath Kotur 				   BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
4871dc4c557SAndy Gospodarek 				   xdp_buff_is_frag_pfmemalloc(xdp));
4881dc4c557SAndy Gospodarek 	return skb;
4891dc4c557SAndy Gospodarek }
490