xref: /openbmc/linux/net/xdp/xsk_queue.h (revision 63a64a56bc3f77c74085047ee45356ac850da3e8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
8 
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
13 
14 #include "xsk.h"
15 
16 struct xdp_ring {
17 	u32 producer ____cacheline_aligned_in_smp;
18 	/* Hinder the adjacent cache prefetcher to prefetch the consumer
19 	 * pointer if the producer pointer is touched and vice versa.
20 	 */
21 	u32 pad1 ____cacheline_aligned_in_smp;
22 	u32 consumer ____cacheline_aligned_in_smp;
23 	u32 pad2 ____cacheline_aligned_in_smp;
24 	u32 flags;
25 	u32 pad3 ____cacheline_aligned_in_smp;
26 };
27 
28 /* Used for the RX and TX queues for packets */
29 struct xdp_rxtx_ring {
30 	struct xdp_ring ptrs;
31 	struct xdp_desc desc[] ____cacheline_aligned_in_smp;
32 };
33 
34 /* Used for the fill and completion queues for buffers */
35 struct xdp_umem_ring {
36 	struct xdp_ring ptrs;
37 	u64 desc[] ____cacheline_aligned_in_smp;
38 };
39 
40 struct xsk_queue {
41 	u32 ring_mask;
42 	u32 nentries;
43 	u32 cached_prod;
44 	u32 cached_cons;
45 	struct xdp_ring *ring;
46 	u64 invalid_descs;
47 	u64 queue_empty_descs;
48 	size_t ring_vmalloc_size;
49 };
50 
51 /* The structure of the shared state of the rings are a simple
52  * circular buffer, as outlined in
53  * Documentation/core-api/circular-buffers.rst. For the Rx and
54  * completion ring, the kernel is the producer and user space is the
55  * consumer. For the Tx and fill rings, the kernel is the consumer and
56  * user space is the producer.
57  *
58  * producer                         consumer
59  *
60  * if (LOAD ->consumer) {  (A)      LOAD.acq ->producer  (C)
61  *    STORE $data                   LOAD $data
62  *    STORE.rel ->producer (B)      STORE.rel ->consumer (D)
63  * }
64  *
65  * (A) pairs with (D), and (B) pairs with (C).
66  *
67  * Starting with (B), it protects the data from being written after
68  * the producer pointer. If this barrier was missing, the consumer
69  * could observe the producer pointer being set and thus load the data
70  * before the producer has written the new data. The consumer would in
71  * this case load the old data.
72  *
73  * (C) protects the consumer from speculatively loading the data before
74  * the producer pointer actually has been read. If we do not have this
75  * barrier, some architectures could load old data as speculative loads
76  * are not discarded as the CPU does not know there is a dependency
77  * between ->producer and data.
78  *
79  * (A) is a control dependency that separates the load of ->consumer
80  * from the stores of $data. In case ->consumer indicates there is no
81  * room in the buffer to store $data we do not. The dependency will
82  * order both of the stores after the loads. So no barrier is needed.
83  *
84  * (D) protects the load of the data to be observed to happen after the
85  * store of the consumer pointer. If we did not have this memory
86  * barrier, the producer could observe the consumer pointer being set
87  * and overwrite the data with a new value before the consumer got the
88  * chance to read the old value. The consumer would thus miss reading
89  * the old entry and very likely read the new entry twice, once right
90  * now and again after circling through the ring.
91  */
92 
93 /* The operations on the rings are the following:
94  *
95  * producer                           consumer
96  *
97  * RESERVE entries                    PEEK in the ring for entries
98  * WRITE data into the ring           READ data from the ring
99  * SUBMIT entries                     RELEASE entries
100  *
101  * The producer reserves one or more entries in the ring. It can then
102  * fill in these entries and finally submit them so that they can be
103  * seen and read by the consumer.
104  *
105  * The consumer peeks into the ring to see if the producer has written
106  * any new entries. If so, the consumer can then read these entries
107  * and when it is done reading them release them back to the producer
108  * so that the producer can use these slots to fill in new entries.
109  *
110  * The function names below reflect these operations.
111  */
112 
113 /* Functions that read and validate content from consumer rings. */
114 
115 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
116 {
117 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
118 	u32 idx = cached_cons & q->ring_mask;
119 
120 	*addr = ring->desc[idx];
121 }
122 
123 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
124 {
125 	if (q->cached_cons != q->cached_prod) {
126 		__xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
127 		return true;
128 	}
129 
130 	return false;
131 }
132 
133 static inline bool xp_unused_options_set(u32 options)
134 {
135 	return options & ~XDP_PKT_CONTD;
136 }
137 
138 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
139 					    struct xdp_desc *desc)
140 {
141 	u64 offset = desc->addr & (pool->chunk_size - 1);
142 
143 	if (offset + desc->len > pool->chunk_size)
144 		return false;
145 
146 	if (desc->addr >= pool->addrs_cnt)
147 		return false;
148 
149 	if (xp_unused_options_set(desc->options))
150 		return false;
151 	return true;
152 }
153 
154 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
155 					      struct xdp_desc *desc)
156 {
157 	u64 addr = xp_unaligned_add_offset_to_addr(desc->addr);
158 
159 	if (desc->len > pool->chunk_size)
160 		return false;
161 
162 	if (addr >= pool->addrs_cnt || addr + desc->len > pool->addrs_cnt ||
163 	    xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
164 		return false;
165 
166 	if (xp_unused_options_set(desc->options))
167 		return false;
168 	return true;
169 }
170 
171 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
172 				    struct xdp_desc *desc)
173 {
174 	return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
175 		xp_aligned_validate_desc(pool, desc);
176 }
177 
178 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
179 					   struct xdp_desc *d,
180 					   struct xsk_buff_pool *pool)
181 {
182 	if (!xp_validate_desc(pool, d)) {
183 		q->invalid_descs++;
184 		return false;
185 	}
186 	return true;
187 }
188 
189 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
190 				       struct xdp_desc *desc,
191 				       struct xsk_buff_pool *pool)
192 {
193 	while (q->cached_cons != q->cached_prod) {
194 		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
195 		u32 idx = q->cached_cons & q->ring_mask;
196 
197 		*desc = ring->desc[idx];
198 		if (xskq_cons_is_valid_desc(q, desc, pool))
199 			return true;
200 
201 		q->cached_cons++;
202 	}
203 
204 	return false;
205 }
206 
207 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
208 {
209 	q->cached_cons += cnt;
210 }
211 
212 static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
213 					    u32 max)
214 {
215 	u32 cached_cons = q->cached_cons, nb_entries = 0;
216 	struct xdp_desc *descs = pool->tx_descs;
217 
218 	while (cached_cons != q->cached_prod && nb_entries < max) {
219 		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
220 		u32 idx = cached_cons & q->ring_mask;
221 
222 		descs[nb_entries] = ring->desc[idx];
223 		if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) {
224 			/* Skip the entry */
225 			cached_cons++;
226 			continue;
227 		}
228 
229 		nb_entries++;
230 		cached_cons++;
231 	}
232 
233 	/* Release valid plus any invalid entries */
234 	xskq_cons_release_n(q, cached_cons - q->cached_cons);
235 	return nb_entries;
236 }
237 
238 /* Functions for consumers */
239 
240 static inline void __xskq_cons_release(struct xsk_queue *q)
241 {
242 	smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
243 }
244 
245 static inline void __xskq_cons_peek(struct xsk_queue *q)
246 {
247 	/* Refresh the local pointer */
248 	q->cached_prod = smp_load_acquire(&q->ring->producer);  /* C, matches B */
249 }
250 
251 static inline void xskq_cons_get_entries(struct xsk_queue *q)
252 {
253 	__xskq_cons_release(q);
254 	__xskq_cons_peek(q);
255 }
256 
257 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
258 {
259 	u32 entries = q->cached_prod - q->cached_cons;
260 
261 	if (entries >= max)
262 		return max;
263 
264 	__xskq_cons_peek(q);
265 	entries = q->cached_prod - q->cached_cons;
266 
267 	return entries >= max ? max : entries;
268 }
269 
270 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
271 {
272 	return xskq_cons_nb_entries(q, cnt) >= cnt;
273 }
274 
275 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
276 {
277 	if (q->cached_prod == q->cached_cons)
278 		xskq_cons_get_entries(q);
279 	return xskq_cons_read_addr_unchecked(q, addr);
280 }
281 
282 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
283 				       struct xdp_desc *desc,
284 				       struct xsk_buff_pool *pool)
285 {
286 	if (q->cached_prod == q->cached_cons)
287 		xskq_cons_get_entries(q);
288 	return xskq_cons_read_desc(q, desc, pool);
289 }
290 
291 /* To improve performance in the xskq_cons_release functions, only update local state here.
292  * Reflect this to global state when we get new entries from the ring in
293  * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
294  */
295 static inline void xskq_cons_release(struct xsk_queue *q)
296 {
297 	q->cached_cons++;
298 }
299 
300 static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
301 {
302 	/* No barriers needed since data is not accessed */
303 	return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
304 }
305 
306 /* Functions for producers */
307 
308 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
309 {
310 	u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
311 
312 	if (free_entries >= max)
313 		return max;
314 
315 	/* Refresh the local tail pointer */
316 	q->cached_cons = READ_ONCE(q->ring->consumer);
317 	free_entries = q->nentries - (q->cached_prod - q->cached_cons);
318 
319 	return free_entries >= max ? max : free_entries;
320 }
321 
322 static inline bool xskq_prod_is_full(struct xsk_queue *q)
323 {
324 	return xskq_prod_nb_free(q, 1) ? false : true;
325 }
326 
327 static inline void xskq_prod_cancel(struct xsk_queue *q)
328 {
329 	q->cached_prod--;
330 }
331 
332 static inline int xskq_prod_reserve(struct xsk_queue *q)
333 {
334 	if (xskq_prod_is_full(q))
335 		return -ENOSPC;
336 
337 	/* A, matches D */
338 	q->cached_prod++;
339 	return 0;
340 }
341 
342 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
343 {
344 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
345 
346 	if (xskq_prod_is_full(q))
347 		return -ENOSPC;
348 
349 	/* A, matches D */
350 	ring->desc[q->cached_prod++ & q->ring_mask] = addr;
351 	return 0;
352 }
353 
354 static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
355 					      u32 nb_entries)
356 {
357 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
358 	u32 i, cached_prod;
359 
360 	/* A, matches D */
361 	cached_prod = q->cached_prod;
362 	for (i = 0; i < nb_entries; i++)
363 		ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
364 	q->cached_prod = cached_prod;
365 }
366 
367 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
368 					 u64 addr, u32 len, u32 flags)
369 {
370 	struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
371 	u32 idx;
372 
373 	if (xskq_prod_is_full(q))
374 		return -ENOBUFS;
375 
376 	/* A, matches D */
377 	idx = q->cached_prod++ & q->ring_mask;
378 	ring->desc[idx].addr = addr;
379 	ring->desc[idx].len = len;
380 	ring->desc[idx].options = flags;
381 
382 	return 0;
383 }
384 
385 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
386 {
387 	smp_store_release(&q->ring->producer, idx); /* B, matches C */
388 }
389 
390 static inline void xskq_prod_submit(struct xsk_queue *q)
391 {
392 	__xskq_prod_submit(q, q->cached_prod);
393 }
394 
395 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
396 {
397 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
398 	u32 idx = q->ring->producer;
399 
400 	ring->desc[idx++ & q->ring_mask] = addr;
401 
402 	__xskq_prod_submit(q, idx);
403 }
404 
405 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
406 {
407 	__xskq_prod_submit(q, q->ring->producer + nb_entries);
408 }
409 
410 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
411 {
412 	/* No barriers needed since data is not accessed */
413 	return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
414 }
415 
416 /* For both producers and consumers */
417 
418 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
419 {
420 	return q ? q->invalid_descs : 0;
421 }
422 
423 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
424 {
425 	return q ? q->queue_empty_descs : 0;
426 }
427 
428 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
429 void xskq_destroy(struct xsk_queue *q_ops);
430 
431 #endif /* _LINUX_XSK_QUEUE_H */
432