xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/nfp_net_dp.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
162d03330SJakub Kicinski /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
262d03330SJakub Kicinski /* Copyright (C) 2019 Netronome Systems, Inc. */
362d03330SJakub Kicinski 
462d03330SJakub Kicinski #ifndef _NFP_NET_DP_
562d03330SJakub Kicinski #define _NFP_NET_DP_
662d03330SJakub Kicinski 
762d03330SJakub Kicinski #include "nfp_net.h"
862d03330SJakub Kicinski 
nfp_net_dma_map_rx(struct nfp_net_dp * dp,void * frag)962d03330SJakub Kicinski static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
1062d03330SJakub Kicinski {
1162d03330SJakub Kicinski 	return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
1262d03330SJakub Kicinski 				    dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
1362d03330SJakub Kicinski 				    dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
1462d03330SJakub Kicinski }
1562d03330SJakub Kicinski 
1662d03330SJakub Kicinski static inline void
nfp_net_dma_sync_dev_rx(const struct nfp_net_dp * dp,dma_addr_t dma_addr)1762d03330SJakub Kicinski nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
1862d03330SJakub Kicinski {
1962d03330SJakub Kicinski 	dma_sync_single_for_device(dp->dev, dma_addr,
2062d03330SJakub Kicinski 				   dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
2162d03330SJakub Kicinski 				   dp->rx_dma_dir);
2262d03330SJakub Kicinski }
2362d03330SJakub Kicinski 
nfp_net_dma_unmap_rx(struct nfp_net_dp * dp,dma_addr_t dma_addr)2462d03330SJakub Kicinski static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
2562d03330SJakub Kicinski 					dma_addr_t dma_addr)
2662d03330SJakub Kicinski {
2762d03330SJakub Kicinski 	dma_unmap_single_attrs(dp->dev, dma_addr,
2862d03330SJakub Kicinski 			       dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
2962d03330SJakub Kicinski 			       dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
3062d03330SJakub Kicinski }
3162d03330SJakub Kicinski 
nfp_net_dma_sync_cpu_rx(struct nfp_net_dp * dp,dma_addr_t dma_addr,unsigned int len)3262d03330SJakub Kicinski static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
3362d03330SJakub Kicinski 					   dma_addr_t dma_addr,
3462d03330SJakub Kicinski 					   unsigned int len)
3562d03330SJakub Kicinski {
3662d03330SJakub Kicinski 	dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
3762d03330SJakub Kicinski 				len, dp->rx_dma_dir);
3862d03330SJakub Kicinski }
3962d03330SJakub Kicinski 
4062d03330SJakub Kicinski /**
4162d03330SJakub Kicinski  * nfp_net_tx_full() - check if the TX ring is full
4262d03330SJakub Kicinski  * @tx_ring: TX ring to check
4362d03330SJakub Kicinski  * @dcnt:    Number of descriptors that need to be enqueued (must be >= 1)
4462d03330SJakub Kicinski  *
4562d03330SJakub Kicinski  * This function checks, based on the *host copy* of read/write
4662d03330SJakub Kicinski  * pointer if a given TX ring is full.  The real TX queue may have
4762d03330SJakub Kicinski  * some newly made available slots.
4862d03330SJakub Kicinski  *
4962d03330SJakub Kicinski  * Return: True if the ring is full.
5062d03330SJakub Kicinski  */
nfp_net_tx_full(struct nfp_net_tx_ring * tx_ring,int dcnt)5162d03330SJakub Kicinski static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
5262d03330SJakub Kicinski {
5362d03330SJakub Kicinski 	return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
5462d03330SJakub Kicinski }
5562d03330SJakub Kicinski 
nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring * tx_ring)5662d03330SJakub Kicinski static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
5762d03330SJakub Kicinski {
5862d03330SJakub Kicinski 	wmb(); /* drain writebuffer */
5962d03330SJakub Kicinski 	nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
6062d03330SJakub Kicinski 	tx_ring->wr_ptr_add = 0;
6162d03330SJakub Kicinski }
6262d03330SJakub Kicinski 
630dcf7f50SJakub Kicinski static inline u32
nfp_net_read_tx_cmpl(struct nfp_net_tx_ring * tx_ring,struct nfp_net_dp * dp)640dcf7f50SJakub Kicinski nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
650dcf7f50SJakub Kicinski {
660dcf7f50SJakub Kicinski 	if (tx_ring->txrwb)
670dcf7f50SJakub Kicinski 		return *tx_ring->txrwb;
680dcf7f50SJakub Kicinski 	return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
690dcf7f50SJakub Kicinski }
700dcf7f50SJakub Kicinski 
nfp_net_free_frag(void * frag,bool xdp)7162d03330SJakub Kicinski static inline void nfp_net_free_frag(void *frag, bool xdp)
7262d03330SJakub Kicinski {
7362d03330SJakub Kicinski 	if (!xdp)
7462d03330SJakub Kicinski 		skb_free_frag(frag);
7562d03330SJakub Kicinski 	else
7662d03330SJakub Kicinski 		__free_page(virt_to_page(frag));
7762d03330SJakub Kicinski }
7862d03330SJakub Kicinski 
7962d03330SJakub Kicinski /**
8062d03330SJakub Kicinski  * nfp_net_irq_unmask() - Unmask automasked interrupt
8162d03330SJakub Kicinski  * @nn:       NFP Network structure
8262d03330SJakub Kicinski  * @entry_nr: MSI-X table entry
8362d03330SJakub Kicinski  *
8462d03330SJakub Kicinski  * Clear the ICR for the IRQ entry.
8562d03330SJakub Kicinski  */
nfp_net_irq_unmask(struct nfp_net * nn,unsigned int entry_nr)8662d03330SJakub Kicinski static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
8762d03330SJakub Kicinski {
8862d03330SJakub Kicinski 	nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
8962d03330SJakub Kicinski 	nn_pci_flush(nn);
9062d03330SJakub Kicinski }
9162d03330SJakub Kicinski 
9262d03330SJakub Kicinski struct seq_file;
9362d03330SJakub Kicinski 
9462d03330SJakub Kicinski /* Common */
9562d03330SJakub Kicinski void
9662d03330SJakub Kicinski nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
9762d03330SJakub Kicinski 			     struct nfp_net_rx_ring *rx_ring, unsigned int idx);
9862d03330SJakub Kicinski void
9962d03330SJakub Kicinski nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
10062d03330SJakub Kicinski 			     struct nfp_net_tx_ring *tx_ring, unsigned int idx);
10162d03330SJakub Kicinski void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
10262d03330SJakub Kicinski 
10362d03330SJakub Kicinski void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
10462d03330SJakub Kicinski int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
10562d03330SJakub Kicinski int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
10662d03330SJakub Kicinski void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
10762d03330SJakub Kicinski void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
10862d03330SJakub Kicinski void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
109*67d2656bSDiana Wang bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
110*67d2656bSDiana Wang 			const struct nfp_meta_parsed *meta);
11162d03330SJakub Kicinski 
1126fd86efaSJakub Kicinski enum nfp_nfd_version {
1136fd86efaSJakub Kicinski 	NFP_NFD_VER_NFD3,
114c10d12e3SJakub Kicinski 	NFP_NFD_VER_NFDK,
1156fd86efaSJakub Kicinski };
1166fd86efaSJakub Kicinski 
1176fd86efaSJakub Kicinski /**
1186fd86efaSJakub Kicinski  * struct nfp_dp_ops - Hooks to wrap different implementation of different dp
1196fd86efaSJakub Kicinski  * @version:			Indicate dp type
120d6488c49SJakub Kicinski  * @tx_min_desc_per_pkt:	Minimal TX descs needed for each packet
121b94b6a13SJakub Kicinski  * @cap_mask:			Mask of supported features
1225f30671dSYinjun Zhang  * @dma_mask:			DMA addressing capability
1236fd86efaSJakub Kicinski  * @poll:			Napi poll for normal rx/tx
1246fd86efaSJakub Kicinski  * @xsk_poll:			Napi poll when xsk is enabled
1256fd86efaSJakub Kicinski  * @ctrl_poll:			Tasklet poll for ctrl rx/tx
1266fd86efaSJakub Kicinski  * @xmit:			Xmit for normal path
1276fd86efaSJakub Kicinski  * @ctrl_tx_one:		Xmit for ctrl path
1286fd86efaSJakub Kicinski  * @rx_ring_fill_freelist:	Give buffers from the ring to FW
1296fd86efaSJakub Kicinski  * @tx_ring_alloc:		Allocate resource for a TX ring
1306fd86efaSJakub Kicinski  * @tx_ring_reset:		Free any untransmitted buffers and reset pointers
1316fd86efaSJakub Kicinski  * @tx_ring_free:		Free resources allocated to a TX ring
1326fd86efaSJakub Kicinski  * @tx_ring_bufs_alloc:		Allocate resource for each TX buffer
1336fd86efaSJakub Kicinski  * @tx_ring_bufs_free:		Free resources allocated to each TX buffer
1346fd86efaSJakub Kicinski  * @print_tx_descs:		Show TX ring's info for debug purpose
1356fd86efaSJakub Kicinski  */
1366fd86efaSJakub Kicinski struct nfp_dp_ops {
1376fd86efaSJakub Kicinski 	enum nfp_nfd_version version;
138d6488c49SJakub Kicinski 	unsigned int tx_min_desc_per_pkt;
139b94b6a13SJakub Kicinski 	u32 cap_mask;
1405f30671dSYinjun Zhang 	u64 dma_mask;
1416fd86efaSJakub Kicinski 
1426fd86efaSJakub Kicinski 	int (*poll)(struct napi_struct *napi, int budget);
1436fd86efaSJakub Kicinski 	int (*xsk_poll)(struct napi_struct *napi, int budget);
1446fd86efaSJakub Kicinski 	void (*ctrl_poll)(struct tasklet_struct *t);
1456fd86efaSJakub Kicinski 	netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
1466fd86efaSJakub Kicinski 	bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1476fd86efaSJakub Kicinski 			    struct sk_buff *skb, bool old);
1486fd86efaSJakub Kicinski 	void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
14962d03330SJakub Kicinski 				      struct nfp_net_rx_ring *rx_ring);
1506fd86efaSJakub Kicinski 	int (*tx_ring_alloc)(struct nfp_net_dp *dp,
15162d03330SJakub Kicinski 			     struct nfp_net_tx_ring *tx_ring);
1526fd86efaSJakub Kicinski 	void (*tx_ring_reset)(struct nfp_net_dp *dp,
15362d03330SJakub Kicinski 			      struct nfp_net_tx_ring *tx_ring);
1546fd86efaSJakub Kicinski 	void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
1556fd86efaSJakub Kicinski 	int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
1566fd86efaSJakub Kicinski 				  struct nfp_net_tx_ring *tx_ring);
1576fd86efaSJakub Kicinski 	void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
1586fd86efaSJakub Kicinski 				  struct nfp_net_tx_ring *tx_ring);
1596fd86efaSJakub Kicinski 
1606fd86efaSJakub Kicinski 	void (*print_tx_descs)(struct seq_file *file,
16162d03330SJakub Kicinski 			       struct nfp_net_r_vector *r_vec,
16262d03330SJakub Kicinski 			       struct nfp_net_tx_ring *tx_ring,
16362d03330SJakub Kicinski 			       u32 d_rd_p, u32 d_wr_p);
1646fd86efaSJakub Kicinski };
1656fd86efaSJakub Kicinski 
1666fd86efaSJakub Kicinski static inline void
nfp_net_tx_ring_reset(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)1676fd86efaSJakub Kicinski nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1686fd86efaSJakub Kicinski {
1696fd86efaSJakub Kicinski 	return dp->ops->tx_ring_reset(dp, tx_ring);
1706fd86efaSJakub Kicinski }
1716fd86efaSJakub Kicinski 
1726fd86efaSJakub Kicinski static inline void
nfp_net_rx_ring_fill_freelist(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)1736fd86efaSJakub Kicinski nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1746fd86efaSJakub Kicinski 			      struct nfp_net_rx_ring *rx_ring)
1756fd86efaSJakub Kicinski {
1766fd86efaSJakub Kicinski 	dp->ops->rx_ring_fill_freelist(dp, rx_ring);
1776fd86efaSJakub Kicinski }
1786fd86efaSJakub Kicinski 
1796fd86efaSJakub Kicinski static inline int
nfp_net_tx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)1806fd86efaSJakub Kicinski nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1816fd86efaSJakub Kicinski {
1826fd86efaSJakub Kicinski 	return dp->ops->tx_ring_alloc(dp, tx_ring);
1836fd86efaSJakub Kicinski }
1846fd86efaSJakub Kicinski 
1856fd86efaSJakub Kicinski static inline void
nfp_net_tx_ring_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)1866fd86efaSJakub Kicinski nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1876fd86efaSJakub Kicinski {
1886fd86efaSJakub Kicinski 	dp->ops->tx_ring_free(tx_ring);
1896fd86efaSJakub Kicinski }
1906fd86efaSJakub Kicinski 
1916fd86efaSJakub Kicinski static inline int
nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)1926fd86efaSJakub Kicinski nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
1936fd86efaSJakub Kicinski 			   struct nfp_net_tx_ring *tx_ring)
1946fd86efaSJakub Kicinski {
1956fd86efaSJakub Kicinski 	return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
1966fd86efaSJakub Kicinski }
1976fd86efaSJakub Kicinski 
1986fd86efaSJakub Kicinski static inline void
nfp_net_tx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_tx_ring * tx_ring)1996fd86efaSJakub Kicinski nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
2006fd86efaSJakub Kicinski 			  struct nfp_net_tx_ring *tx_ring)
2016fd86efaSJakub Kicinski {
2026fd86efaSJakub Kicinski 	dp->ops->tx_ring_bufs_free(dp, tx_ring);
2036fd86efaSJakub Kicinski }
2046fd86efaSJakub Kicinski 
2056fd86efaSJakub Kicinski static inline void
nfp_net_debugfs_print_tx_descs(struct seq_file * file,struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,struct nfp_net_tx_ring * tx_ring,u32 d_rd_p,u32 d_wr_p)2066fd86efaSJakub Kicinski nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
2076fd86efaSJakub Kicinski 			       struct nfp_net_r_vector *r_vec,
2086fd86efaSJakub Kicinski 			       struct nfp_net_tx_ring *tx_ring,
2096fd86efaSJakub Kicinski 			       u32 d_rd_p, u32 d_wr_p)
2106fd86efaSJakub Kicinski {
2116fd86efaSJakub Kicinski 	dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
2126fd86efaSJakub Kicinski }
2136fd86efaSJakub Kicinski 
2146fd86efaSJakub Kicinski extern const struct nfp_dp_ops nfp_nfd3_ops;
215c10d12e3SJakub Kicinski extern const struct nfp_dp_ops nfp_nfdk_ops;
2166fd86efaSJakub Kicinski 
2176fd86efaSJakub Kicinski netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
2186fd86efaSJakub Kicinski 
21962d03330SJakub Kicinski #endif /* _NFP_NET_DP_ */
220