xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/nfp_net_dp.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
162d03330SJakub Kicinski // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
262d03330SJakub Kicinski /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
362d03330SJakub Kicinski 
462d03330SJakub Kicinski #include "nfp_app.h"
562d03330SJakub Kicinski #include "nfp_net_dp.h"
662d03330SJakub Kicinski #include "nfp_net_xsk.h"
762d03330SJakub Kicinski 
862d03330SJakub Kicinski /**
962d03330SJakub Kicinski  * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1062d03330SJakub Kicinski  * @dp:		NFP Net data path struct
1162d03330SJakub Kicinski  * @dma_addr:	Pointer to storage for DMA address (output param)
1262d03330SJakub Kicinski  *
1362d03330SJakub Kicinski  * This function will allcate a new page frag, map it for DMA.
1462d03330SJakub Kicinski  *
1562d03330SJakub Kicinski  * Return: allocated page frag or NULL on failure.
1662d03330SJakub Kicinski  */
nfp_net_rx_alloc_one(struct nfp_net_dp * dp,dma_addr_t * dma_addr)1762d03330SJakub Kicinski void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1862d03330SJakub Kicinski {
1962d03330SJakub Kicinski 	void *frag;
2062d03330SJakub Kicinski 
2162d03330SJakub Kicinski 	if (!dp->xdp_prog) {
2262d03330SJakub Kicinski 		frag = netdev_alloc_frag(dp->fl_bufsz);
2362d03330SJakub Kicinski 	} else {
2462d03330SJakub Kicinski 		struct page *page;
2562d03330SJakub Kicinski 
2662d03330SJakub Kicinski 		page = alloc_page(GFP_KERNEL);
2762d03330SJakub Kicinski 		frag = page ? page_address(page) : NULL;
2862d03330SJakub Kicinski 	}
2962d03330SJakub Kicinski 	if (!frag) {
3062d03330SJakub Kicinski 		nn_dp_warn(dp, "Failed to alloc receive page frag\n");
3162d03330SJakub Kicinski 		return NULL;
3262d03330SJakub Kicinski 	}
3362d03330SJakub Kicinski 
3462d03330SJakub Kicinski 	*dma_addr = nfp_net_dma_map_rx(dp, frag);
3562d03330SJakub Kicinski 	if (dma_mapping_error(dp->dev, *dma_addr)) {
3662d03330SJakub Kicinski 		nfp_net_free_frag(frag, dp->xdp_prog);
3762d03330SJakub Kicinski 		nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
3862d03330SJakub Kicinski 		return NULL;
3962d03330SJakub Kicinski 	}
4062d03330SJakub Kicinski 
4162d03330SJakub Kicinski 	return frag;
4262d03330SJakub Kicinski }
4362d03330SJakub Kicinski 
4462d03330SJakub Kicinski /**
4562d03330SJakub Kicinski  * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
4662d03330SJakub Kicinski  * @tx_ring:  TX ring structure
470dcf7f50SJakub Kicinski  * @dp:	      NFP Net data path struct
4862d03330SJakub Kicinski  * @r_vec:    IRQ vector servicing this ring
4962d03330SJakub Kicinski  * @idx:      Ring index
5062d03330SJakub Kicinski  * @is_xdp:   Is this an XDP TX ring?
5162d03330SJakub Kicinski  */
5262d03330SJakub Kicinski static void
nfp_net_tx_ring_init(struct nfp_net_tx_ring * tx_ring,struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,unsigned int idx,bool is_xdp)530dcf7f50SJakub Kicinski nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp,
5462d03330SJakub Kicinski 		     struct nfp_net_r_vector *r_vec, unsigned int idx,
5562d03330SJakub Kicinski 		     bool is_xdp)
5662d03330SJakub Kicinski {
5762d03330SJakub Kicinski 	struct nfp_net *nn = r_vec->nfp_net;
5862d03330SJakub Kicinski 
5962d03330SJakub Kicinski 	tx_ring->idx = idx;
6062d03330SJakub Kicinski 	tx_ring->r_vec = r_vec;
6162d03330SJakub Kicinski 	tx_ring->is_xdp = is_xdp;
6262d03330SJakub Kicinski 	u64_stats_init(&tx_ring->r_vec->tx_sync);
6362d03330SJakub Kicinski 
6462d03330SJakub Kicinski 	tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
650dcf7f50SJakub Kicinski 	tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL;
6662d03330SJakub Kicinski 	tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
6762d03330SJakub Kicinski }
6862d03330SJakub Kicinski 
6962d03330SJakub Kicinski /**
7062d03330SJakub Kicinski  * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
7162d03330SJakub Kicinski  * @rx_ring:  RX ring structure
7262d03330SJakub Kicinski  * @r_vec:    IRQ vector servicing this ring
7362d03330SJakub Kicinski  * @idx:      Ring index
7462d03330SJakub Kicinski  */
7562d03330SJakub Kicinski static void
nfp_net_rx_ring_init(struct nfp_net_rx_ring * rx_ring,struct nfp_net_r_vector * r_vec,unsigned int idx)7662d03330SJakub Kicinski nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
7762d03330SJakub Kicinski 		     struct nfp_net_r_vector *r_vec, unsigned int idx)
7862d03330SJakub Kicinski {
7962d03330SJakub Kicinski 	struct nfp_net *nn = r_vec->nfp_net;
8062d03330SJakub Kicinski 
8162d03330SJakub Kicinski 	rx_ring->idx = idx;
8262d03330SJakub Kicinski 	rx_ring->r_vec = r_vec;
8362d03330SJakub Kicinski 	u64_stats_init(&rx_ring->r_vec->rx_sync);
8462d03330SJakub Kicinski 
8562d03330SJakub Kicinski 	rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
8662d03330SJakub Kicinski 	rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
8762d03330SJakub Kicinski }
8862d03330SJakub Kicinski 
8962d03330SJakub Kicinski /**
9062d03330SJakub Kicinski  * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
9162d03330SJakub Kicinski  * @rx_ring:	RX ring structure
9262d03330SJakub Kicinski  *
9362d03330SJakub Kicinski  * Assumes that the device is stopped, must be idempotent.
9462d03330SJakub Kicinski  */
nfp_net_rx_ring_reset(struct nfp_net_rx_ring * rx_ring)9562d03330SJakub Kicinski void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
9662d03330SJakub Kicinski {
9762d03330SJakub Kicinski 	unsigned int wr_idx, last_idx;
9862d03330SJakub Kicinski 
9962d03330SJakub Kicinski 	/* wr_p == rd_p means ring was never fed FL bufs.  RX rings are always
10062d03330SJakub Kicinski 	 * kept at cnt - 1 FL bufs.
10162d03330SJakub Kicinski 	 */
10262d03330SJakub Kicinski 	if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
10362d03330SJakub Kicinski 		return;
10462d03330SJakub Kicinski 
10562d03330SJakub Kicinski 	/* Move the empty entry to the end of the list */
10662d03330SJakub Kicinski 	wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
10762d03330SJakub Kicinski 	last_idx = rx_ring->cnt - 1;
10862d03330SJakub Kicinski 	if (rx_ring->r_vec->xsk_pool) {
10962d03330SJakub Kicinski 		rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
11062d03330SJakub Kicinski 		memset(&rx_ring->xsk_rxbufs[last_idx], 0,
11162d03330SJakub Kicinski 		       sizeof(*rx_ring->xsk_rxbufs));
11262d03330SJakub Kicinski 	} else {
11362d03330SJakub Kicinski 		rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
11462d03330SJakub Kicinski 		memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
11562d03330SJakub Kicinski 	}
11662d03330SJakub Kicinski 
11762d03330SJakub Kicinski 	memset(rx_ring->rxds, 0, rx_ring->size);
11862d03330SJakub Kicinski 	rx_ring->wr_p = 0;
11962d03330SJakub Kicinski 	rx_ring->rd_p = 0;
12062d03330SJakub Kicinski }
12162d03330SJakub Kicinski 
12262d03330SJakub Kicinski /**
12362d03330SJakub Kicinski  * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
12462d03330SJakub Kicinski  * @dp:		NFP Net data path struct
12562d03330SJakub Kicinski  * @rx_ring:	RX ring to remove buffers from
12662d03330SJakub Kicinski  *
12762d03330SJakub Kicinski  * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
12862d03330SJakub Kicinski  * entries.  After device is disabled nfp_net_rx_ring_reset() must be called
12962d03330SJakub Kicinski  * to restore required ring geometry.
13062d03330SJakub Kicinski  */
13162d03330SJakub Kicinski static void
nfp_net_rx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)13262d03330SJakub Kicinski nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
13362d03330SJakub Kicinski 			  struct nfp_net_rx_ring *rx_ring)
13462d03330SJakub Kicinski {
13562d03330SJakub Kicinski 	unsigned int i;
13662d03330SJakub Kicinski 
13762d03330SJakub Kicinski 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
13862d03330SJakub Kicinski 		return;
13962d03330SJakub Kicinski 
14062d03330SJakub Kicinski 	for (i = 0; i < rx_ring->cnt - 1; i++) {
14162d03330SJakub Kicinski 		/* NULL skb can only happen when initial filling of the ring
14262d03330SJakub Kicinski 		 * fails to allocate enough buffers and calls here to free
14362d03330SJakub Kicinski 		 * already allocated ones.
14462d03330SJakub Kicinski 		 */
14562d03330SJakub Kicinski 		if (!rx_ring->rxbufs[i].frag)
14662d03330SJakub Kicinski 			continue;
14762d03330SJakub Kicinski 
14862d03330SJakub Kicinski 		nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
14962d03330SJakub Kicinski 		nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
15062d03330SJakub Kicinski 		rx_ring->rxbufs[i].dma_addr = 0;
15162d03330SJakub Kicinski 		rx_ring->rxbufs[i].frag = NULL;
15262d03330SJakub Kicinski 	}
15362d03330SJakub Kicinski }
15462d03330SJakub Kicinski 
15562d03330SJakub Kicinski /**
15662d03330SJakub Kicinski  * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
15762d03330SJakub Kicinski  * @dp:		NFP Net data path struct
15862d03330SJakub Kicinski  * @rx_ring:	RX ring to remove buffers from
15962d03330SJakub Kicinski  */
16062d03330SJakub Kicinski static int
nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)16162d03330SJakub Kicinski nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
16262d03330SJakub Kicinski 			   struct nfp_net_rx_ring *rx_ring)
16362d03330SJakub Kicinski {
16462d03330SJakub Kicinski 	struct nfp_net_rx_buf *rxbufs;
16562d03330SJakub Kicinski 	unsigned int i;
16662d03330SJakub Kicinski 
16762d03330SJakub Kicinski 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
16862d03330SJakub Kicinski 		return 0;
16962d03330SJakub Kicinski 
17062d03330SJakub Kicinski 	rxbufs = rx_ring->rxbufs;
17162d03330SJakub Kicinski 
17262d03330SJakub Kicinski 	for (i = 0; i < rx_ring->cnt - 1; i++) {
17362d03330SJakub Kicinski 		rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
17462d03330SJakub Kicinski 		if (!rxbufs[i].frag) {
17562d03330SJakub Kicinski 			nfp_net_rx_ring_bufs_free(dp, rx_ring);
17662d03330SJakub Kicinski 			return -ENOMEM;
17762d03330SJakub Kicinski 		}
17862d03330SJakub Kicinski 	}
17962d03330SJakub Kicinski 
18062d03330SJakub Kicinski 	return 0;
18162d03330SJakub Kicinski }
18262d03330SJakub Kicinski 
nfp_net_tx_rings_prepare(struct nfp_net * nn,struct nfp_net_dp * dp)18362d03330SJakub Kicinski int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
18462d03330SJakub Kicinski {
18562d03330SJakub Kicinski 	unsigned int r;
18662d03330SJakub Kicinski 
18762d03330SJakub Kicinski 	dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
18862d03330SJakub Kicinski 			       GFP_KERNEL);
18962d03330SJakub Kicinski 	if (!dp->tx_rings)
19062d03330SJakub Kicinski 		return -ENOMEM;
19162d03330SJakub Kicinski 
1920dcf7f50SJakub Kicinski 	if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
1930dcf7f50SJakub Kicinski 		dp->txrwb = dma_alloc_coherent(dp->dev,
1940dcf7f50SJakub Kicinski 					       dp->num_tx_rings * sizeof(u64),
1950dcf7f50SJakub Kicinski 					       &dp->txrwb_dma, GFP_KERNEL);
1960dcf7f50SJakub Kicinski 		if (!dp->txrwb)
1970dcf7f50SJakub Kicinski 			goto err_free_rings;
1980dcf7f50SJakub Kicinski 	}
1990dcf7f50SJakub Kicinski 
20062d03330SJakub Kicinski 	for (r = 0; r < dp->num_tx_rings; r++) {
20162d03330SJakub Kicinski 		int bias = 0;
20262d03330SJakub Kicinski 
20362d03330SJakub Kicinski 		if (r >= dp->num_stack_tx_rings)
20462d03330SJakub Kicinski 			bias = dp->num_stack_tx_rings;
20562d03330SJakub Kicinski 
2060dcf7f50SJakub Kicinski 		nfp_net_tx_ring_init(&dp->tx_rings[r], dp,
2070dcf7f50SJakub Kicinski 				     &nn->r_vecs[r - bias], r, bias);
20862d03330SJakub Kicinski 
20962d03330SJakub Kicinski 		if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
21062d03330SJakub Kicinski 			goto err_free_prev;
21162d03330SJakub Kicinski 
21262d03330SJakub Kicinski 		if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
21362d03330SJakub Kicinski 			goto err_free_ring;
21462d03330SJakub Kicinski 	}
21562d03330SJakub Kicinski 
21662d03330SJakub Kicinski 	return 0;
21762d03330SJakub Kicinski 
21862d03330SJakub Kicinski err_free_prev:
21962d03330SJakub Kicinski 	while (r--) {
22062d03330SJakub Kicinski 		nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
22162d03330SJakub Kicinski err_free_ring:
22262d03330SJakub Kicinski 		nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
22362d03330SJakub Kicinski 	}
2240dcf7f50SJakub Kicinski 	if (dp->txrwb)
2250dcf7f50SJakub Kicinski 		dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
2260dcf7f50SJakub Kicinski 				  dp->txrwb, dp->txrwb_dma);
2270dcf7f50SJakub Kicinski err_free_rings:
22862d03330SJakub Kicinski 	kfree(dp->tx_rings);
22962d03330SJakub Kicinski 	return -ENOMEM;
23062d03330SJakub Kicinski }
23162d03330SJakub Kicinski 
nfp_net_tx_rings_free(struct nfp_net_dp * dp)23262d03330SJakub Kicinski void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
23362d03330SJakub Kicinski {
23462d03330SJakub Kicinski 	unsigned int r;
23562d03330SJakub Kicinski 
23662d03330SJakub Kicinski 	for (r = 0; r < dp->num_tx_rings; r++) {
23762d03330SJakub Kicinski 		nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
23862d03330SJakub Kicinski 		nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
23962d03330SJakub Kicinski 	}
24062d03330SJakub Kicinski 
2410dcf7f50SJakub Kicinski 	if (dp->txrwb)
2420dcf7f50SJakub Kicinski 		dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
2430dcf7f50SJakub Kicinski 				  dp->txrwb, dp->txrwb_dma);
24462d03330SJakub Kicinski 	kfree(dp->tx_rings);
24562d03330SJakub Kicinski }
24662d03330SJakub Kicinski 
24762d03330SJakub Kicinski /**
24862d03330SJakub Kicinski  * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
24962d03330SJakub Kicinski  * @rx_ring:  RX ring to free
25062d03330SJakub Kicinski  */
nfp_net_rx_ring_free(struct nfp_net_rx_ring * rx_ring)25162d03330SJakub Kicinski static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
25262d03330SJakub Kicinski {
25362d03330SJakub Kicinski 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
25462d03330SJakub Kicinski 	struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
25562d03330SJakub Kicinski 
25662d03330SJakub Kicinski 	if (dp->netdev)
25762d03330SJakub Kicinski 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
25862d03330SJakub Kicinski 
25962d03330SJakub Kicinski 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
26062d03330SJakub Kicinski 		kvfree(rx_ring->xsk_rxbufs);
26162d03330SJakub Kicinski 	else
26262d03330SJakub Kicinski 		kvfree(rx_ring->rxbufs);
26362d03330SJakub Kicinski 
26462d03330SJakub Kicinski 	if (rx_ring->rxds)
26562d03330SJakub Kicinski 		dma_free_coherent(dp->dev, rx_ring->size,
26662d03330SJakub Kicinski 				  rx_ring->rxds, rx_ring->dma);
26762d03330SJakub Kicinski 
26862d03330SJakub Kicinski 	rx_ring->cnt = 0;
26962d03330SJakub Kicinski 	rx_ring->rxbufs = NULL;
27062d03330SJakub Kicinski 	rx_ring->xsk_rxbufs = NULL;
27162d03330SJakub Kicinski 	rx_ring->rxds = NULL;
27262d03330SJakub Kicinski 	rx_ring->dma = 0;
27362d03330SJakub Kicinski 	rx_ring->size = 0;
27462d03330SJakub Kicinski }
27562d03330SJakub Kicinski 
27662d03330SJakub Kicinski /**
27762d03330SJakub Kicinski  * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
27862d03330SJakub Kicinski  * @dp:	      NFP Net data path struct
27962d03330SJakub Kicinski  * @rx_ring:  RX ring to allocate
28062d03330SJakub Kicinski  *
28162d03330SJakub Kicinski  * Return: 0 on success, negative errno otherwise.
28262d03330SJakub Kicinski  */
28362d03330SJakub Kicinski static int
nfp_net_rx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)28462d03330SJakub Kicinski nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
28562d03330SJakub Kicinski {
28662d03330SJakub Kicinski 	enum xdp_mem_type mem_type;
28762d03330SJakub Kicinski 	size_t rxbuf_sw_desc_sz;
28862d03330SJakub Kicinski 	int err;
28962d03330SJakub Kicinski 
29062d03330SJakub Kicinski 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
29162d03330SJakub Kicinski 		mem_type = MEM_TYPE_XSK_BUFF_POOL;
29262d03330SJakub Kicinski 		rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
29362d03330SJakub Kicinski 	} else {
29462d03330SJakub Kicinski 		mem_type = MEM_TYPE_PAGE_ORDER0;
29562d03330SJakub Kicinski 		rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
29662d03330SJakub Kicinski 	}
29762d03330SJakub Kicinski 
29862d03330SJakub Kicinski 	if (dp->netdev) {
29962d03330SJakub Kicinski 		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
30062d03330SJakub Kicinski 				       rx_ring->idx, rx_ring->r_vec->napi.napi_id);
30162d03330SJakub Kicinski 		if (err < 0)
30262d03330SJakub Kicinski 			return err;
30362d03330SJakub Kicinski 
30462d03330SJakub Kicinski 		err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
30562d03330SJakub Kicinski 		if (err)
30662d03330SJakub Kicinski 			goto err_alloc;
30762d03330SJakub Kicinski 	}
30862d03330SJakub Kicinski 
30962d03330SJakub Kicinski 	rx_ring->cnt = dp->rxd_cnt;
31062d03330SJakub Kicinski 	rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
31162d03330SJakub Kicinski 	rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
31262d03330SJakub Kicinski 					   &rx_ring->dma,
31362d03330SJakub Kicinski 					   GFP_KERNEL | __GFP_NOWARN);
31462d03330SJakub Kicinski 	if (!rx_ring->rxds) {
31562d03330SJakub Kicinski 		netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
31662d03330SJakub Kicinski 			    rx_ring->cnt);
31762d03330SJakub Kicinski 		goto err_alloc;
31862d03330SJakub Kicinski 	}
31962d03330SJakub Kicinski 
32062d03330SJakub Kicinski 	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
32162d03330SJakub Kicinski 		rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
32262d03330SJakub Kicinski 					       GFP_KERNEL);
32362d03330SJakub Kicinski 		if (!rx_ring->xsk_rxbufs)
32462d03330SJakub Kicinski 			goto err_alloc;
32562d03330SJakub Kicinski 	} else {
32662d03330SJakub Kicinski 		rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
32762d03330SJakub Kicinski 					   GFP_KERNEL);
32862d03330SJakub Kicinski 		if (!rx_ring->rxbufs)
32962d03330SJakub Kicinski 			goto err_alloc;
33062d03330SJakub Kicinski 	}
33162d03330SJakub Kicinski 
33262d03330SJakub Kicinski 	return 0;
33362d03330SJakub Kicinski 
33462d03330SJakub Kicinski err_alloc:
33562d03330SJakub Kicinski 	nfp_net_rx_ring_free(rx_ring);
33662d03330SJakub Kicinski 	return -ENOMEM;
33762d03330SJakub Kicinski }
33862d03330SJakub Kicinski 
nfp_net_rx_rings_prepare(struct nfp_net * nn,struct nfp_net_dp * dp)33962d03330SJakub Kicinski int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
34062d03330SJakub Kicinski {
34162d03330SJakub Kicinski 	unsigned int r;
34262d03330SJakub Kicinski 
34362d03330SJakub Kicinski 	dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
34462d03330SJakub Kicinski 			       GFP_KERNEL);
34562d03330SJakub Kicinski 	if (!dp->rx_rings)
34662d03330SJakub Kicinski 		return -ENOMEM;
34762d03330SJakub Kicinski 
34862d03330SJakub Kicinski 	for (r = 0; r < dp->num_rx_rings; r++) {
34962d03330SJakub Kicinski 		nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
35062d03330SJakub Kicinski 
35162d03330SJakub Kicinski 		if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
35262d03330SJakub Kicinski 			goto err_free_prev;
35362d03330SJakub Kicinski 
35462d03330SJakub Kicinski 		if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
35562d03330SJakub Kicinski 			goto err_free_ring;
35662d03330SJakub Kicinski 	}
35762d03330SJakub Kicinski 
35862d03330SJakub Kicinski 	return 0;
35962d03330SJakub Kicinski 
36062d03330SJakub Kicinski err_free_prev:
36162d03330SJakub Kicinski 	while (r--) {
36262d03330SJakub Kicinski 		nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
36362d03330SJakub Kicinski err_free_ring:
36462d03330SJakub Kicinski 		nfp_net_rx_ring_free(&dp->rx_rings[r]);
36562d03330SJakub Kicinski 	}
36662d03330SJakub Kicinski 	kfree(dp->rx_rings);
36762d03330SJakub Kicinski 	return -ENOMEM;
36862d03330SJakub Kicinski }
36962d03330SJakub Kicinski 
nfp_net_rx_rings_free(struct nfp_net_dp * dp)37062d03330SJakub Kicinski void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
37162d03330SJakub Kicinski {
37262d03330SJakub Kicinski 	unsigned int r;
37362d03330SJakub Kicinski 
37462d03330SJakub Kicinski 	for (r = 0; r < dp->num_rx_rings; r++) {
37562d03330SJakub Kicinski 		nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
37662d03330SJakub Kicinski 		nfp_net_rx_ring_free(&dp->rx_rings[r]);
37762d03330SJakub Kicinski 	}
37862d03330SJakub Kicinski 
37962d03330SJakub Kicinski 	kfree(dp->rx_rings);
38062d03330SJakub Kicinski }
38162d03330SJakub Kicinski 
38262d03330SJakub Kicinski void
nfp_net_rx_ring_hw_cfg_write(struct nfp_net * nn,struct nfp_net_rx_ring * rx_ring,unsigned int idx)38362d03330SJakub Kicinski nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
38462d03330SJakub Kicinski 			     struct nfp_net_rx_ring *rx_ring, unsigned int idx)
38562d03330SJakub Kicinski {
38662d03330SJakub Kicinski 	/* Write the DMA address, size and MSI-X info to the device */
38762d03330SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
38862d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
38962d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
39062d03330SJakub Kicinski }
39162d03330SJakub Kicinski 
39262d03330SJakub Kicinski void
nfp_net_tx_ring_hw_cfg_write(struct nfp_net * nn,struct nfp_net_tx_ring * tx_ring,unsigned int idx)39362d03330SJakub Kicinski nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
39462d03330SJakub Kicinski 			     struct nfp_net_tx_ring *tx_ring, unsigned int idx)
39562d03330SJakub Kicinski {
39662d03330SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
3970dcf7f50SJakub Kicinski 	if (tx_ring->txrwb) {
3980dcf7f50SJakub Kicinski 		*tx_ring->txrwb = 0;
3990dcf7f50SJakub Kicinski 		nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx),
4000dcf7f50SJakub Kicinski 			  nn->dp.txrwb_dma + idx * sizeof(u64));
4010dcf7f50SJakub Kicinski 	}
40262d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
40362d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
40462d03330SJakub Kicinski }
40562d03330SJakub Kicinski 
nfp_net_vec_clear_ring_data(struct nfp_net * nn,unsigned int idx)40662d03330SJakub Kicinski void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
40762d03330SJakub Kicinski {
40862d03330SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
40962d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
41062d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
41162d03330SJakub Kicinski 
41262d03330SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
4130dcf7f50SJakub Kicinski 	nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx), 0);
41462d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
41562d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
41662d03330SJakub Kicinski }
41762d03330SJakub Kicinski 
nfp_net_tx(struct sk_buff * skb,struct net_device * netdev)4186fd86efaSJakub Kicinski netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
41962d03330SJakub Kicinski {
4206fd86efaSJakub Kicinski 	struct nfp_net *nn = netdev_priv(netdev);
42162d03330SJakub Kicinski 
4226fd86efaSJakub Kicinski 	return nn->dp.ops->xmit(skb, netdev);
42362d03330SJakub Kicinski }
42462d03330SJakub Kicinski 
__nfp_ctrl_tx(struct nfp_net * nn,struct sk_buff * skb)42562d03330SJakub Kicinski bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
42662d03330SJakub Kicinski {
4276fd86efaSJakub Kicinski 	struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
4286fd86efaSJakub Kicinski 
4296fd86efaSJakub Kicinski 	return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
43062d03330SJakub Kicinski }
43162d03330SJakub Kicinski 
nfp_ctrl_tx(struct nfp_net * nn,struct sk_buff * skb)43262d03330SJakub Kicinski bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
43362d03330SJakub Kicinski {
4346fd86efaSJakub Kicinski 	struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
4356fd86efaSJakub Kicinski 	bool ret;
4366fd86efaSJakub Kicinski 
4376fd86efaSJakub Kicinski 	spin_lock_bh(&r_vec->lock);
4386fd86efaSJakub Kicinski 	ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
4396fd86efaSJakub Kicinski 	spin_unlock_bh(&r_vec->lock);
4406fd86efaSJakub Kicinski 
4416fd86efaSJakub Kicinski 	return ret;
44262d03330SJakub Kicinski }
443*67d2656bSDiana Wang 
nfp_net_vlan_strip(struct sk_buff * skb,const struct nfp_net_rx_desc * rxd,const struct nfp_meta_parsed * meta)444*67d2656bSDiana Wang bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
445*67d2656bSDiana Wang 			const struct nfp_meta_parsed *meta)
446*67d2656bSDiana Wang {
447*67d2656bSDiana Wang 	u16 tpid = 0, tci = 0;
448*67d2656bSDiana Wang 
449*67d2656bSDiana Wang 	if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) {
450*67d2656bSDiana Wang 		tpid = ETH_P_8021Q;
451*67d2656bSDiana Wang 		tci = le16_to_cpu(rxd->rxd.vlan);
452*67d2656bSDiana Wang 	} else if (meta->vlan.stripped) {
453*67d2656bSDiana Wang 		if (meta->vlan.tpid == NFP_NET_VLAN_CTAG)
454*67d2656bSDiana Wang 			tpid = ETH_P_8021Q;
455*67d2656bSDiana Wang 		else if (meta->vlan.tpid == NFP_NET_VLAN_STAG)
456*67d2656bSDiana Wang 			tpid = ETH_P_8021AD;
457*67d2656bSDiana Wang 		else
458*67d2656bSDiana Wang 			return false;
459*67d2656bSDiana Wang 
460*67d2656bSDiana Wang 		tci = meta->vlan.tci;
461*67d2656bSDiana Wang 	}
462*67d2656bSDiana Wang 	if (tpid)
463*67d2656bSDiana Wang 		__vlan_hwaccel_put_tag(skb, htons(tpid), tci);
464*67d2656bSDiana Wang 
465*67d2656bSDiana Wang 	return true;
466*67d2656bSDiana Wang }
467