xref: /openbmc/linux/net/xdp/xsk_queue.h (revision 77ab8d5d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
8 
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 
12 #include "xdp_umem_props.h"
13 
14 #define RX_BATCH_SIZE 16
15 
16 struct xdp_ring {
17 	u32 producer ____cacheline_aligned_in_smp;
18 	u32 consumer ____cacheline_aligned_in_smp;
19 };
20 
21 /* Used for the RX and TX queues for packets */
22 struct xdp_rxtx_ring {
23 	struct xdp_ring ptrs;
24 	struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
25 };
26 
27 /* Used for the fill and completion queues for buffers */
28 struct xdp_umem_ring {
29 	struct xdp_ring ptrs;
30 	u32 desc[0] ____cacheline_aligned_in_smp;
31 };
32 
33 struct xsk_queue {
34 	struct xdp_umem_props umem_props;
35 	u32 ring_mask;
36 	u32 nentries;
37 	u32 prod_head;
38 	u32 prod_tail;
39 	u32 cons_head;
40 	u32 cons_tail;
41 	struct xdp_ring *ring;
42 	u64 invalid_descs;
43 };
44 
45 /* Common functions operating for both RXTX and umem queues */
46 
47 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
48 {
49 	return q ? q->invalid_descs : 0;
50 }
51 
52 static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
53 {
54 	u32 entries = q->prod_tail - q->cons_tail;
55 
56 	if (entries == 0) {
57 		/* Refresh the local pointer */
58 		q->prod_tail = READ_ONCE(q->ring->producer);
59 		entries = q->prod_tail - q->cons_tail;
60 	}
61 
62 	return (entries > dcnt) ? dcnt : entries;
63 }
64 
65 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
66 {
67 	u32 free_entries = q->nentries - (producer - q->cons_tail);
68 
69 	if (free_entries >= dcnt)
70 		return free_entries;
71 
72 	/* Refresh the local tail pointer */
73 	q->cons_tail = READ_ONCE(q->ring->consumer);
74 	return q->nentries - (producer - q->cons_tail);
75 }
76 
77 /* UMEM queue */
78 
79 static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
80 {
81 	if (unlikely(idx >= q->umem_props.nframes)) {
82 		q->invalid_descs++;
83 		return false;
84 	}
85 	return true;
86 }
87 
88 static inline u32 *xskq_validate_id(struct xsk_queue *q)
89 {
90 	while (q->cons_tail != q->cons_head) {
91 		struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
92 		unsigned int idx = q->cons_tail & q->ring_mask;
93 
94 		if (xskq_is_valid_id(q, ring->desc[idx]))
95 			return &ring->desc[idx];
96 
97 		q->cons_tail++;
98 	}
99 
100 	return NULL;
101 }
102 
103 static inline u32 *xskq_peek_id(struct xsk_queue *q)
104 {
105 	struct xdp_umem_ring *ring;
106 
107 	if (q->cons_tail == q->cons_head) {
108 		WRITE_ONCE(q->ring->consumer, q->cons_tail);
109 		q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
110 
111 		/* Order consumer and data */
112 		smp_rmb();
113 
114 		return xskq_validate_id(q);
115 	}
116 
117 	ring = (struct xdp_umem_ring *)q->ring;
118 	return &ring->desc[q->cons_tail & q->ring_mask];
119 }
120 
121 static inline void xskq_discard_id(struct xsk_queue *q)
122 {
123 	q->cons_tail++;
124 	(void)xskq_validate_id(q);
125 }
126 
127 static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
128 {
129 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
130 
131 	ring->desc[q->prod_tail++ & q->ring_mask] = id;
132 
133 	/* Order producer and data */
134 	smp_wmb();
135 
136 	WRITE_ONCE(q->ring->producer, q->prod_tail);
137 	return 0;
138 }
139 
140 static inline int xskq_reserve_id(struct xsk_queue *q)
141 {
142 	if (xskq_nb_free(q, q->prod_head, 1) == 0)
143 		return -ENOSPC;
144 
145 	q->prod_head++;
146 	return 0;
147 }
148 
149 /* Rx/Tx queue */
150 
151 static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
152 {
153 	u32 buff_len;
154 
155 	if (unlikely(d->idx >= q->umem_props.nframes)) {
156 		q->invalid_descs++;
157 		return false;
158 	}
159 
160 	buff_len = q->umem_props.frame_size;
161 	if (unlikely(d->len > buff_len || d->len == 0 ||
162 		     d->offset > buff_len || d->offset + d->len > buff_len)) {
163 		q->invalid_descs++;
164 		return false;
165 	}
166 
167 	return true;
168 }
169 
170 static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
171 						  struct xdp_desc *desc)
172 {
173 	while (q->cons_tail != q->cons_head) {
174 		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
175 		unsigned int idx = q->cons_tail & q->ring_mask;
176 
177 		if (xskq_is_valid_desc(q, &ring->desc[idx])) {
178 			if (desc)
179 				*desc = ring->desc[idx];
180 			return desc;
181 		}
182 
183 		q->cons_tail++;
184 	}
185 
186 	return NULL;
187 }
188 
189 static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
190 					      struct xdp_desc *desc)
191 {
192 	struct xdp_rxtx_ring *ring;
193 
194 	if (q->cons_tail == q->cons_head) {
195 		WRITE_ONCE(q->ring->consumer, q->cons_tail);
196 		q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
197 
198 		/* Order consumer and data */
199 		smp_rmb();
200 
201 		return xskq_validate_desc(q, desc);
202 	}
203 
204 	ring = (struct xdp_rxtx_ring *)q->ring;
205 	*desc = ring->desc[q->cons_tail & q->ring_mask];
206 	return desc;
207 }
208 
209 static inline void xskq_discard_desc(struct xsk_queue *q)
210 {
211 	q->cons_tail++;
212 	(void)xskq_validate_desc(q, NULL);
213 }
214 
215 static inline int xskq_produce_batch_desc(struct xsk_queue *q,
216 					  u32 id, u32 len, u16 offset)
217 {
218 	struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
219 	unsigned int idx;
220 
221 	if (xskq_nb_free(q, q->prod_head, 1) == 0)
222 		return -ENOSPC;
223 
224 	idx = (q->prod_head++) & q->ring_mask;
225 	ring->desc[idx].idx = id;
226 	ring->desc[idx].len = len;
227 	ring->desc[idx].offset = offset;
228 
229 	return 0;
230 }
231 
232 static inline void xskq_produce_flush_desc(struct xsk_queue *q)
233 {
234 	/* Order producer and data */
235 	smp_wmb();
236 
237 	q->prod_tail = q->prod_head,
238 	WRITE_ONCE(q->ring->producer, q->prod_tail);
239 }
240 
241 static inline bool xskq_full_desc(struct xsk_queue *q)
242 {
243 	return xskq_nb_avail(q, q->nentries) == q->nentries;
244 }
245 
246 static inline bool xskq_empty_desc(struct xsk_queue *q)
247 {
248 	return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
249 }
250 
251 void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
252 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
253 void xskq_destroy(struct xsk_queue *q_ops);
254 
255 #endif /* _LINUX_XSK_QUEUE_H */
256