xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h (revision 5804c19b80bf625c6a9925317f845e497434d6d3)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /* Marvell RVU Ethernet driver
3   *
4   * Copyright (C) 2020 Marvell.
5   *
6   */
7  
8  #ifndef OTX2_TXRX_H
9  #define OTX2_TXRX_H
10  
11  #include <linux/etherdevice.h>
12  #include <linux/iommu.h>
13  #include <linux/if_vlan.h>
14  #include <net/xdp.h>
15  
16  #define LBK_CHAN_BASE	0x000
17  #define SDP_CHAN_BASE	0x700
18  #define CGX_CHAN_BASE	0x800
19  
20  #define OTX2_DATA_ALIGN(X)	ALIGN(X, OTX2_ALIGN)
21  #define OTX2_HEAD_ROOM		OTX2_ALIGN
22  
23  #define	OTX2_ETH_HLEN		(VLAN_ETH_HLEN + VLAN_HLEN)
24  #define	OTX2_MIN_MTU		60
25  
26  #define OTX2_PAGE_POOL_SZ	2048
27  
28  #define OTX2_MAX_GSO_SEGS	255
29  #define OTX2_MAX_FRAGS_IN_SQE	9
30  
31  #define MAX_XDP_MTU	(1530 - OTX2_ETH_HLEN)
32  
33  /* Rx buffer size should be in multiples of 128bytes */
34  #define RCV_FRAG_LEN1(x)				\
35  		((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
36  		OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
37  
38  /* Prefer 2048 byte buffers for better last level cache
39   * utilization or data distribution across regions.
40   */
41  #define RCV_FRAG_LEN(x)	\
42  		((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
43  
44  #define DMA_BUFFER_LEN(x)	((x) - OTX2_HEAD_ROOM)
45  
46  /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
47   * is equal to this value.
48   */
49  #define CQ_CQE_THRESH_DEFAULT	10
50  
51  /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
52   * is nonzero and this much time elapses after that.
53   */
54  #define CQ_TIMER_THRESH_DEFAULT	1  /* 1 usec */
55  #define CQ_TIMER_THRESH_MAX     25 /* 25 usec */
56  
57  /* Min number of CQs (of the ones mapped to this CINT)
58   * with valid CQEs.
59   */
60  #define CQ_QCOUNT_DEFAULT	1
61  
62  #define CQ_OP_STAT_OP_ERR       63
63  #define CQ_OP_STAT_CQ_ERR       46
64  
65  struct queue_stats {
66  	u64	bytes;
67  	u64	pkts;
68  };
69  
70  struct otx2_rcv_queue {
71  	struct queue_stats	stats;
72  };
73  
74  struct sg_list {
75  	u16	num_segs;
76  	u64	skb;
77  	u64	size[OTX2_MAX_FRAGS_IN_SQE];
78  	u64	dma_addr[OTX2_MAX_FRAGS_IN_SQE];
79  };
80  
81  struct otx2_snd_queue {
82  	u8			aura_id;
83  	u16			head;
84  	u16			cons_head;
85  	u16			sqe_size;
86  	u32			sqe_cnt;
87  	u16			num_sqbs;
88  	u16			sqe_thresh;
89  	u8			sqe_per_sqb;
90  	u64			 io_addr;
91  	u64			*aura_fc_addr;
92  	u64			*lmt_addr;
93  	void			*sqe_base;
94  	struct qmem		*sqe;
95  	struct qmem		*tso_hdrs;
96  	struct sg_list		*sg;
97  	struct qmem		*timestamps;
98  	struct queue_stats	stats;
99  	u16			sqb_count;
100  	u64			*sqb_ptrs;
101  } ____cacheline_aligned_in_smp;
102  
103  enum cq_type {
104  	CQ_RX,
105  	CQ_TX,
106  	CQ_XDP,
107  	CQ_QOS,
108  	CQS_PER_CINT = 4, /* RQ + SQ + XDP + QOS_SQ */
109  };
110  
111  struct otx2_cq_poll {
112  	void			*dev;
113  #define CINT_INVALID_CQ		255
114  	u8			cint_idx;
115  	u8			cq_ids[CQS_PER_CINT];
116  	struct dim		dim;
117  	struct napi_struct	napi;
118  };
119  
120  struct otx2_pool {
121  	struct qmem		*stack;
122  	struct qmem		*fc_addr;
123  	struct page_pool	*page_pool;
124  	u16			rbsize;
125  };
126  
127  struct otx2_cq_queue {
128  	u8			cq_idx;
129  	u8			cq_type;
130  	u8			cint_idx; /* CQ interrupt id */
131  	u8			refill_task_sched;
132  	u16			cqe_size;
133  	u16			pool_ptrs;
134  	u32			cqe_cnt;
135  	u32			cq_head;
136  	u32			cq_tail;
137  	u32			pend_cqe;
138  	void			*cqe_base;
139  	struct qmem		*cqe;
140  	struct otx2_pool	*rbpool;
141  	struct xdp_rxq_info xdp_rxq;
142  } ____cacheline_aligned_in_smp;
143  
144  struct otx2_qset {
145  	u32			rqe_cnt;
146  	u32			sqe_cnt; /* Keep these two at top */
147  #define OTX2_MAX_CQ_CNT		64
148  	u16			cq_cnt;
149  	u16			xqe_size;
150  	struct otx2_pool	*pool;
151  	struct otx2_cq_poll	*napi;
152  	struct otx2_cq_queue	*cq;
153  	struct otx2_snd_queue	*sq;
154  	struct otx2_rcv_queue	*rq;
155  };
156  
157  /* Translate IOVA to physical address */
otx2_iova_to_phys(void * iommu_domain,dma_addr_t dma_addr)158  static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
159  {
160  	/* Translation is installed only when IOMMU is present */
161  	if (likely(iommu_domain))
162  		return iommu_iova_to_phys(iommu_domain, dma_addr);
163  	return dma_addr;
164  }
165  
166  int otx2_napi_handler(struct napi_struct *napi, int budget);
167  bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
168  			struct sk_buff *skb, u16 qidx);
169  void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
170  		     int size, int qidx);
171  void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
172  		    int size, int qidx);
173  int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
174  int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
175  #endif /* OTX2_TXRX_H */
176