xref: /openbmc/linux/net/xdp/xsk_queue.h (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1dac09149SBjörn Töpel /* SPDX-License-Identifier: GPL-2.0 */
2dac09149SBjörn Töpel /* XDP user-space ring structure
3423f3832SMagnus Karlsson  * Copyright(c) 2018 Intel Corporation.
4423f3832SMagnus Karlsson  */
5423f3832SMagnus Karlsson 
6423f3832SMagnus Karlsson #ifndef _LINUX_XSK_QUEUE_H
7423f3832SMagnus Karlsson #define _LINUX_XSK_QUEUE_H
8423f3832SMagnus Karlsson 
9423f3832SMagnus Karlsson #include <linux/types.h>
10423f3832SMagnus Karlsson #include <linux/if_xdp.h>
11e61e62b9SBjörn Töpel #include <net/xdp_sock.h>
122b43470aSBjörn Töpel #include <net/xsk_buff_pool.h>
13423f3832SMagnus Karlsson 
1489e4a376SBjörn Töpel #include "xsk.h"
1589e4a376SBjörn Töpel 
16b3a9e0beSBjörn Töpel struct xdp_ring {
17b3a9e0beSBjörn Töpel 	u32 producer ____cacheline_aligned_in_smp;
18c3f01fdcSMagnus Karlsson 	/* Hinder the adjacent cache prefetcher to prefetch the consumer
19c3f01fdcSMagnus Karlsson 	 * pointer if the producer pointer is touched and vice versa.
20c3f01fdcSMagnus Karlsson 	 */
21b8c7aeceSMagnus Karlsson 	u32 pad1 ____cacheline_aligned_in_smp;
22b3a9e0beSBjörn Töpel 	u32 consumer ____cacheline_aligned_in_smp;
23b8c7aeceSMagnus Karlsson 	u32 pad2 ____cacheline_aligned_in_smp;
2477cd0d7bSMagnus Karlsson 	u32 flags;
25b8c7aeceSMagnus Karlsson 	u32 pad3 ____cacheline_aligned_in_smp;
26b3a9e0beSBjörn Töpel };
27b3a9e0beSBjörn Töpel 
28b3a9e0beSBjörn Töpel /* Used for the RX and TX queues for packets */
29b3a9e0beSBjörn Töpel struct xdp_rxtx_ring {
30b3a9e0beSBjörn Töpel 	struct xdp_ring ptrs;
3195e486f5SGustavo A. R. Silva 	struct xdp_desc desc[] ____cacheline_aligned_in_smp;
32b3a9e0beSBjörn Töpel };
33b3a9e0beSBjörn Töpel 
34b3a9e0beSBjörn Töpel /* Used for the fill and completion queues for buffers */
35b3a9e0beSBjörn Töpel struct xdp_umem_ring {
36b3a9e0beSBjörn Töpel 	struct xdp_ring ptrs;
3795e486f5SGustavo A. R. Silva 	u64 desc[] ____cacheline_aligned_in_smp;
38b3a9e0beSBjörn Töpel };
39b3a9e0beSBjörn Töpel 
40423f3832SMagnus Karlsson struct xsk_queue {
41423f3832SMagnus Karlsson 	u32 ring_mask;
42423f3832SMagnus Karlsson 	u32 nentries;
43d7012f05SMagnus Karlsson 	u32 cached_prod;
44c5ed924bSMagnus Karlsson 	u32 cached_cons;
45423f3832SMagnus Karlsson 	struct xdp_ring *ring;
46423f3832SMagnus Karlsson 	u64 invalid_descs;
478aa5a335SCiara Loftus 	u64 queue_empty_descs;
489f78bf33SXuan Zhuo 	size_t ring_vmalloc_size;
49423f3832SMagnus Karlsson };
50423f3832SMagnus Karlsson 
51*d5581966SMaciej Fijalkowski struct parsed_desc {
52*d5581966SMaciej Fijalkowski 	u32 mb;
53*d5581966SMaciej Fijalkowski 	u32 valid;
54*d5581966SMaciej Fijalkowski };
55*d5581966SMaciej Fijalkowski 
56a23b3f56SBjörn Töpel /* The structure of the shared state of the rings are a simple
57a23b3f56SBjörn Töpel  * circular buffer, as outlined in
58a23b3f56SBjörn Töpel  * Documentation/core-api/circular-buffers.rst. For the Rx and
59a23b3f56SBjörn Töpel  * completion ring, the kernel is the producer and user space is the
60a23b3f56SBjörn Töpel  * consumer. For the Tx and fill rings, the kernel is the consumer and
61a23b3f56SBjörn Töpel  * user space is the producer.
62f63666deSMagnus Karlsson  *
63f63666deSMagnus Karlsson  * producer                         consumer
64f63666deSMagnus Karlsson  *
65a23b3f56SBjörn Töpel  * if (LOAD ->consumer) {  (A)      LOAD.acq ->producer  (C)
66f63666deSMagnus Karlsson  *    STORE $data                   LOAD $data
67a23b3f56SBjörn Töpel  *    STORE.rel ->producer (B)      STORE.rel ->consumer (D)
68f63666deSMagnus Karlsson  * }
69f63666deSMagnus Karlsson  *
70f63666deSMagnus Karlsson  * (A) pairs with (D), and (B) pairs with (C).
71f63666deSMagnus Karlsson  *
72f63666deSMagnus Karlsson  * Starting with (B), it protects the data from being written after
73f63666deSMagnus Karlsson  * the producer pointer. If this barrier was missing, the consumer
74f63666deSMagnus Karlsson  * could observe the producer pointer being set and thus load the data
75f63666deSMagnus Karlsson  * before the producer has written the new data. The consumer would in
76f63666deSMagnus Karlsson  * this case load the old data.
77f63666deSMagnus Karlsson  *
78f63666deSMagnus Karlsson  * (C) protects the consumer from speculatively loading the data before
79f63666deSMagnus Karlsson  * the producer pointer actually has been read. If we do not have this
80f63666deSMagnus Karlsson  * barrier, some architectures could load old data as speculative loads
81f63666deSMagnus Karlsson  * are not discarded as the CPU does not know there is a dependency
82f63666deSMagnus Karlsson  * between ->producer and data.
83f63666deSMagnus Karlsson  *
84f63666deSMagnus Karlsson  * (A) is a control dependency that separates the load of ->consumer
85f63666deSMagnus Karlsson  * from the stores of $data. In case ->consumer indicates there is no
86a23b3f56SBjörn Töpel  * room in the buffer to store $data we do not. The dependency will
87a23b3f56SBjörn Töpel  * order both of the stores after the loads. So no barrier is needed.
88f63666deSMagnus Karlsson  *
89f63666deSMagnus Karlsson  * (D) protects the load of the data to be observed to happen after the
90f63666deSMagnus Karlsson  * store of the consumer pointer. If we did not have this memory
91f63666deSMagnus Karlsson  * barrier, the producer could observe the consumer pointer being set
92f63666deSMagnus Karlsson  * and overwrite the data with a new value before the consumer got the
93f63666deSMagnus Karlsson  * chance to read the old value. The consumer would thus miss reading
94f63666deSMagnus Karlsson  * the old entry and very likely read the new entry twice, once right
95f63666deSMagnus Karlsson  * now and again after circling through the ring.
96f63666deSMagnus Karlsson  */
97f63666deSMagnus Karlsson 
9815d8c916SMagnus Karlsson /* The operations on the rings are the following:
9915d8c916SMagnus Karlsson  *
10015d8c916SMagnus Karlsson  * producer                           consumer
10115d8c916SMagnus Karlsson  *
10215d8c916SMagnus Karlsson  * RESERVE entries                    PEEK in the ring for entries
10315d8c916SMagnus Karlsson  * WRITE data into the ring           READ data from the ring
10415d8c916SMagnus Karlsson  * SUBMIT entries                     RELEASE entries
10515d8c916SMagnus Karlsson  *
10615d8c916SMagnus Karlsson  * The producer reserves one or more entries in the ring. It can then
10715d8c916SMagnus Karlsson  * fill in these entries and finally submit them so that they can be
10815d8c916SMagnus Karlsson  * seen and read by the consumer.
10915d8c916SMagnus Karlsson  *
11015d8c916SMagnus Karlsson  * The consumer peeks into the ring to see if the producer has written
111f1fc8eceSCiara Loftus  * any new entries. If so, the consumer can then read these entries
11215d8c916SMagnus Karlsson  * and when it is done reading them release them back to the producer
11315d8c916SMagnus Karlsson  * so that the producer can use these slots to fill in new entries.
11415d8c916SMagnus Karlsson  *
11515d8c916SMagnus Karlsson  * The function names below reflect these operations.
11615d8c916SMagnus Karlsson  */
117c497176cSBjörn Töpel 
11815d8c916SMagnus Karlsson /* Functions that read and validate content from consumer rings. */
119c497176cSBjörn Töpel 
__xskq_cons_read_addr_unchecked(struct xsk_queue * q,u32 cached_cons,u64 * addr)12047e4075dSMagnus Karlsson static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
1212b43470aSBjörn Töpel {
1222b43470aSBjörn Töpel 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
12347e4075dSMagnus Karlsson 	u32 idx = cached_cons & q->ring_mask;
1242b43470aSBjörn Töpel 
1252b43470aSBjörn Töpel 	*addr = ring->desc[idx];
12647e4075dSMagnus Karlsson }
12747e4075dSMagnus Karlsson 
xskq_cons_read_addr_unchecked(struct xsk_queue * q,u64 * addr)12847e4075dSMagnus Karlsson static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
12947e4075dSMagnus Karlsson {
13047e4075dSMagnus Karlsson 	if (q->cached_cons != q->cached_prod) {
13147e4075dSMagnus Karlsson 		__xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
1322b43470aSBjörn Töpel 		return true;
1332b43470aSBjörn Töpel 	}
1342b43470aSBjörn Töpel 
1352b43470aSBjörn Töpel 	return false;
1362b43470aSBjörn Töpel }
1372b43470aSBjörn Töpel 
xp_unused_options_set(u32 options)13863a64a56STirthendu Sarkar static inline bool xp_unused_options_set(u32 options)
13963a64a56STirthendu Sarkar {
14063a64a56STirthendu Sarkar 	return options & ~XDP_PKT_CONTD;
14163a64a56STirthendu Sarkar }
14263a64a56STirthendu Sarkar 
xp_aligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)14326062b18SBjörn Töpel static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
14426062b18SBjörn Töpel 					    struct xdp_desc *desc)
14526062b18SBjörn Töpel {
1460c5f4859SKal Conley 	u64 offset = desc->addr & (pool->chunk_size - 1);
14726062b18SBjörn Töpel 
14807428da9STirthendu Sarkar 	if (!desc->len)
14907428da9STirthendu Sarkar 		return false;
15007428da9STirthendu Sarkar 
1510c5f4859SKal Conley 	if (offset + desc->len > pool->chunk_size)
152f654fae4SMagnus Karlsson 		return false;
153f654fae4SMagnus Karlsson 
1540c5f4859SKal Conley 	if (desc->addr >= pool->addrs_cnt)
15526062b18SBjörn Töpel 		return false;
15626062b18SBjörn Töpel 
15763a64a56STirthendu Sarkar 	if (xp_unused_options_set(desc->options))
15826062b18SBjörn Töpel 		return false;
15926062b18SBjörn Töpel 	return true;
16026062b18SBjörn Töpel }
16126062b18SBjörn Töpel 
xp_unaligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)16226062b18SBjörn Töpel static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
16326062b18SBjörn Töpel 					      struct xdp_desc *desc)
16426062b18SBjörn Töpel {
1651ba83f50SKal Conley 	u64 addr = xp_unaligned_add_offset_to_addr(desc->addr);
16626062b18SBjörn Töpel 
16707428da9STirthendu Sarkar 	if (!desc->len)
16807428da9STirthendu Sarkar 		return false;
16907428da9STirthendu Sarkar 
17026062b18SBjörn Töpel 	if (desc->len > pool->chunk_size)
17126062b18SBjörn Töpel 		return false;
17226062b18SBjörn Töpel 
1731ba83f50SKal Conley 	if (addr >= pool->addrs_cnt || addr + desc->len > pool->addrs_cnt ||
17426062b18SBjörn Töpel 	    xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
17526062b18SBjörn Töpel 		return false;
17626062b18SBjörn Töpel 
17763a64a56STirthendu Sarkar 	if (xp_unused_options_set(desc->options))
17826062b18SBjörn Töpel 		return false;
17926062b18SBjörn Töpel 	return true;
18026062b18SBjörn Töpel }
18126062b18SBjörn Töpel 
xp_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)18226062b18SBjörn Töpel static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
18326062b18SBjörn Töpel 				    struct xdp_desc *desc)
18426062b18SBjörn Töpel {
18526062b18SBjörn Töpel 	return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
18626062b18SBjörn Töpel 		xp_aligned_validate_desc(pool, desc);
18726062b18SBjörn Töpel }
18826062b18SBjörn Töpel 
xskq_has_descs(struct xsk_queue * q)189cf24f5a5STirthendu Sarkar static inline bool xskq_has_descs(struct xsk_queue *q)
190cf24f5a5STirthendu Sarkar {
191cf24f5a5STirthendu Sarkar 	return q->cached_cons != q->cached_prod;
192cf24f5a5STirthendu Sarkar }
193cf24f5a5STirthendu Sarkar 
xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool)19403896ef1SMagnus Karlsson static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
19503896ef1SMagnus Karlsson 					   struct xdp_desc *d,
1961c1efc2aSMagnus Karlsson 					   struct xsk_buff_pool *pool)
19735fcde7fSMagnus Karlsson {
1981c1efc2aSMagnus Karlsson 	if (!xp_validate_desc(pool, d)) {
199c05cd364SKevin Laatz 		q->invalid_descs++;
200c05cd364SKevin Laatz 		return false;
201c05cd364SKevin Laatz 	}
20235fcde7fSMagnus Karlsson 	return true;
20335fcde7fSMagnus Karlsson }
20435fcde7fSMagnus Karlsson 
xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)20503896ef1SMagnus Karlsson static inline bool xskq_cons_read_desc(struct xsk_queue *q,
206c05cd364SKevin Laatz 				       struct xdp_desc *desc,
2071c1efc2aSMagnus Karlsson 				       struct xsk_buff_pool *pool)
20835fcde7fSMagnus Karlsson {
209cf24f5a5STirthendu Sarkar 	if (q->cached_cons != q->cached_prod) {
21035fcde7fSMagnus Karlsson 		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
211c5ed924bSMagnus Karlsson 		u32 idx = q->cached_cons & q->ring_mask;
21235fcde7fSMagnus Karlsson 
213c34787fcSMagnus Karlsson 		*desc = ring->desc[idx];
214cf24f5a5STirthendu Sarkar 		return xskq_cons_is_valid_desc(q, desc, pool);
21535fcde7fSMagnus Karlsson 	}
21635fcde7fSMagnus Karlsson 
217cf24f5a5STirthendu Sarkar 	q->queue_empty_descs++;
21803896ef1SMagnus Karlsson 	return false;
21935fcde7fSMagnus Karlsson }
22035fcde7fSMagnus Karlsson 
xskq_cons_release_n(struct xsk_queue * q,u32 cnt)221c00c4461SMaciej Fijalkowski static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
222c00c4461SMaciej Fijalkowski {
223c00c4461SMaciej Fijalkowski 	q->cached_cons += cnt;
224c00c4461SMaciej Fijalkowski }
225c00c4461SMaciej Fijalkowski 
parse_desc(struct xsk_queue * q,struct xsk_buff_pool * pool,struct xdp_desc * desc,struct parsed_desc * parsed)226*d5581966SMaciej Fijalkowski static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
227*d5581966SMaciej Fijalkowski 			      struct xdp_desc *desc, struct parsed_desc *parsed)
228*d5581966SMaciej Fijalkowski {
229*d5581966SMaciej Fijalkowski 	parsed->valid = xskq_cons_is_valid_desc(q, desc, pool);
230*d5581966SMaciej Fijalkowski 	parsed->mb = xp_mb_desc(desc);
231*d5581966SMaciej Fijalkowski }
232*d5581966SMaciej Fijalkowski 
233*d5581966SMaciej Fijalkowski static inline
xskq_cons_read_desc_batch(struct xsk_queue * q,struct xsk_buff_pool * pool,u32 max)234*d5581966SMaciej Fijalkowski u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
235d1bc532eSMagnus Karlsson 			      u32 max)
2369349eb3aSMagnus Karlsson {
2379349eb3aSMagnus Karlsson 	u32 cached_cons = q->cached_cons, nb_entries = 0;
238d1bc532eSMagnus Karlsson 	struct xdp_desc *descs = pool->tx_descs;
239*d5581966SMaciej Fijalkowski 	u32 total_descs = 0, nr_frags = 0;
2409349eb3aSMagnus Karlsson 
241*d5581966SMaciej Fijalkowski 	/* track first entry, if stumble upon *any* invalid descriptor, rewind
242*d5581966SMaciej Fijalkowski 	 * current packet that consists of frags and stop the processing
243*d5581966SMaciej Fijalkowski 	 */
2449349eb3aSMagnus Karlsson 	while (cached_cons != q->cached_prod && nb_entries < max) {
2459349eb3aSMagnus Karlsson 		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
2469349eb3aSMagnus Karlsson 		u32 idx = cached_cons & q->ring_mask;
247*d5581966SMaciej Fijalkowski 		struct parsed_desc parsed;
2489349eb3aSMagnus Karlsson 
2499349eb3aSMagnus Karlsson 		descs[nb_entries] = ring->desc[idx];
2509349eb3aSMagnus Karlsson 		cached_cons++;
251*d5581966SMaciej Fijalkowski 		parse_desc(q, pool, &descs[nb_entries], &parsed);
252*d5581966SMaciej Fijalkowski 		if (unlikely(!parsed.valid))
253*d5581966SMaciej Fijalkowski 			break;
2549349eb3aSMagnus Karlsson 
255*d5581966SMaciej Fijalkowski 		if (likely(!parsed.mb)) {
256*d5581966SMaciej Fijalkowski 			total_descs += (nr_frags + 1);
257*d5581966SMaciej Fijalkowski 			nr_frags = 0;
258*d5581966SMaciej Fijalkowski 		} else {
259*d5581966SMaciej Fijalkowski 			nr_frags++;
260*d5581966SMaciej Fijalkowski 			if (nr_frags == pool->netdev->xdp_zc_max_segs) {
261*d5581966SMaciej Fijalkowski 				nr_frags = 0;
262*d5581966SMaciej Fijalkowski 				break;
263*d5581966SMaciej Fijalkowski 			}
264*d5581966SMaciej Fijalkowski 		}
2659349eb3aSMagnus Karlsson 		nb_entries++;
2669349eb3aSMagnus Karlsson 	}
2679349eb3aSMagnus Karlsson 
268*d5581966SMaciej Fijalkowski 	cached_cons -= nr_frags;
269c00c4461SMaciej Fijalkowski 	/* Release valid plus any invalid entries */
270c00c4461SMaciej Fijalkowski 	xskq_cons_release_n(q, cached_cons - q->cached_cons);
271*d5581966SMaciej Fijalkowski 	return total_descs;
2729349eb3aSMagnus Karlsson }
2739349eb3aSMagnus Karlsson 
27415d8c916SMagnus Karlsson /* Functions for consumers */
27515d8c916SMagnus Karlsson 
__xskq_cons_release(struct xsk_queue * q)27615d8c916SMagnus Karlsson static inline void __xskq_cons_release(struct xsk_queue *q)
27715d8c916SMagnus Karlsson {
278a23b3f56SBjörn Töpel 	smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
27915d8c916SMagnus Karlsson }
28015d8c916SMagnus Karlsson 
__xskq_cons_peek(struct xsk_queue * q)28115d8c916SMagnus Karlsson static inline void __xskq_cons_peek(struct xsk_queue *q)
28215d8c916SMagnus Karlsson {
28315d8c916SMagnus Karlsson 	/* Refresh the local pointer */
284a23b3f56SBjörn Töpel 	q->cached_prod = smp_load_acquire(&q->ring->producer);  /* C, matches B */
28515d8c916SMagnus Karlsson }
28615d8c916SMagnus Karlsson 
xskq_cons_get_entries(struct xsk_queue * q)28715d8c916SMagnus Karlsson static inline void xskq_cons_get_entries(struct xsk_queue *q)
28815d8c916SMagnus Karlsson {
28915d8c916SMagnus Karlsson 	__xskq_cons_release(q);
29015d8c916SMagnus Karlsson 	__xskq_cons_peek(q);
29115d8c916SMagnus Karlsson }
29215d8c916SMagnus Karlsson 
xskq_cons_nb_entries(struct xsk_queue * q,u32 max)2939349eb3aSMagnus Karlsson static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
29415d8c916SMagnus Karlsson {
29515d8c916SMagnus Karlsson 	u32 entries = q->cached_prod - q->cached_cons;
29615d8c916SMagnus Karlsson 
2979349eb3aSMagnus Karlsson 	if (entries >= max)
2989349eb3aSMagnus Karlsson 		return max;
29915d8c916SMagnus Karlsson 
30015d8c916SMagnus Karlsson 	__xskq_cons_peek(q);
30115d8c916SMagnus Karlsson 	entries = q->cached_prod - q->cached_cons;
30215d8c916SMagnus Karlsson 
3039349eb3aSMagnus Karlsson 	return entries >= max ? max : entries;
3049349eb3aSMagnus Karlsson }
3059349eb3aSMagnus Karlsson 
xskq_cons_has_entries(struct xsk_queue * q,u32 cnt)3069349eb3aSMagnus Karlsson static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
3079349eb3aSMagnus Karlsson {
3080fb53aabSMaciej Fijalkowski 	return xskq_cons_nb_entries(q, cnt) >= cnt;
30915d8c916SMagnus Karlsson }
31015d8c916SMagnus Karlsson 
xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr)3112b43470aSBjörn Töpel static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
3122b43470aSBjörn Töpel {
3132b43470aSBjörn Töpel 	if (q->cached_prod == q->cached_cons)
3142b43470aSBjörn Töpel 		xskq_cons_get_entries(q);
3152b43470aSBjörn Töpel 	return xskq_cons_read_addr_unchecked(q, addr);
3162b43470aSBjörn Töpel }
3172b43470aSBjörn Töpel 
xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)31803896ef1SMagnus Karlsson static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
319c05cd364SKevin Laatz 				       struct xdp_desc *desc,
3201c1efc2aSMagnus Karlsson 				       struct xsk_buff_pool *pool)
32135fcde7fSMagnus Karlsson {
322c5ed924bSMagnus Karlsson 	if (q->cached_prod == q->cached_cons)
323c5ed924bSMagnus Karlsson 		xskq_cons_get_entries(q);
3241c1efc2aSMagnus Karlsson 	return xskq_cons_read_desc(q, desc, pool);
32535fcde7fSMagnus Karlsson }
32635fcde7fSMagnus Karlsson 
3279349eb3aSMagnus Karlsson /* To improve performance in the xskq_cons_release functions, only update local state here.
3289349eb3aSMagnus Karlsson  * Reflect this to global state when we get new entries from the ring in
3299349eb3aSMagnus Karlsson  * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
3309349eb3aSMagnus Karlsson  */
xskq_cons_release(struct xsk_queue * q)33115d8c916SMagnus Karlsson static inline void xskq_cons_release(struct xsk_queue *q)
33215d8c916SMagnus Karlsson {
33315d8c916SMagnus Karlsson 	q->cached_cons++;
33415d8c916SMagnus Karlsson }
33515d8c916SMagnus Karlsson 
xskq_cons_cancel_n(struct xsk_queue * q,u32 cnt)336b7f72a30STirthendu Sarkar static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt)
337b7f72a30STirthendu Sarkar {
338b7f72a30STirthendu Sarkar 	q->cached_cons -= cnt;
339b7f72a30STirthendu Sarkar }
340b7f72a30STirthendu Sarkar 
xskq_cons_present_entries(struct xsk_queue * q)3413413f041SXuan Zhuo static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
3423413f041SXuan Zhuo {
3433413f041SXuan Zhuo 	/* No barriers needed since data is not accessed */
3443413f041SXuan Zhuo 	return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
3453413f041SXuan Zhuo }
3463413f041SXuan Zhuo 
34715d8c916SMagnus Karlsson /* Functions for producers */
34815d8c916SMagnus Karlsson 
xskq_prod_nb_free(struct xsk_queue * q,u32 max)3499349eb3aSMagnus Karlsson static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
35015d8c916SMagnus Karlsson {
35115d8c916SMagnus Karlsson 	u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
35215d8c916SMagnus Karlsson 
3539349eb3aSMagnus Karlsson 	if (free_entries >= max)
3549349eb3aSMagnus Karlsson 		return max;
35515d8c916SMagnus Karlsson 
35615d8c916SMagnus Karlsson 	/* Refresh the local tail pointer */
35715d8c916SMagnus Karlsson 	q->cached_cons = READ_ONCE(q->ring->consumer);
35815d8c916SMagnus Karlsson 	free_entries = q->nentries - (q->cached_prod - q->cached_cons);
35915d8c916SMagnus Karlsson 
3609349eb3aSMagnus Karlsson 	return free_entries >= max ? max : free_entries;
3619349eb3aSMagnus Karlsson }
3629349eb3aSMagnus Karlsson 
xskq_prod_is_full(struct xsk_queue * q)3639349eb3aSMagnus Karlsson static inline bool xskq_prod_is_full(struct xsk_queue *q)
3649349eb3aSMagnus Karlsson {
3659349eb3aSMagnus Karlsson 	return xskq_prod_nb_free(q, 1) ? false : true;
36615d8c916SMagnus Karlsson }
36715d8c916SMagnus Karlsson 
xskq_prod_cancel_n(struct xsk_queue * q,u32 cnt)368b7f72a30STirthendu Sarkar static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
369b1b95cb5SMagnus Karlsson {
370b7f72a30STirthendu Sarkar 	q->cached_prod -= cnt;
371b1b95cb5SMagnus Karlsson }
372b1b95cb5SMagnus Karlsson 
xskq_prod_reserve(struct xsk_queue * q)37315d8c916SMagnus Karlsson static inline int xskq_prod_reserve(struct xsk_queue *q)
37415d8c916SMagnus Karlsson {
37515d8c916SMagnus Karlsson 	if (xskq_prod_is_full(q))
37615d8c916SMagnus Karlsson 		return -ENOSPC;
37715d8c916SMagnus Karlsson 
37815d8c916SMagnus Karlsson 	/* A, matches D */
37915d8c916SMagnus Karlsson 	q->cached_prod++;
38015d8c916SMagnus Karlsson 	return 0;
38115d8c916SMagnus Karlsson }
38215d8c916SMagnus Karlsson 
xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr)38315d8c916SMagnus Karlsson static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
38415d8c916SMagnus Karlsson {
38515d8c916SMagnus Karlsson 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
38615d8c916SMagnus Karlsson 
38715d8c916SMagnus Karlsson 	if (xskq_prod_is_full(q))
38815d8c916SMagnus Karlsson 		return -ENOSPC;
38915d8c916SMagnus Karlsson 
39015d8c916SMagnus Karlsson 	/* A, matches D */
39115d8c916SMagnus Karlsson 	ring->desc[q->cached_prod++ & q->ring_mask] = addr;
39215d8c916SMagnus Karlsson 	return 0;
39315d8c916SMagnus Karlsson }
39415d8c916SMagnus Karlsson 
xskq_prod_write_addr_batch(struct xsk_queue * q,struct xdp_desc * descs,u32 nb_entries)395c00c4461SMaciej Fijalkowski static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
396c00c4461SMaciej Fijalkowski 					      u32 nb_entries)
3979349eb3aSMagnus Karlsson {
3989349eb3aSMagnus Karlsson 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
399c00c4461SMaciej Fijalkowski 	u32 i, cached_prod;
4009349eb3aSMagnus Karlsson 
4019349eb3aSMagnus Karlsson 	/* A, matches D */
4029349eb3aSMagnus Karlsson 	cached_prod = q->cached_prod;
4039349eb3aSMagnus Karlsson 	for (i = 0; i < nb_entries; i++)
4049349eb3aSMagnus Karlsson 		ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
4059349eb3aSMagnus Karlsson 	q->cached_prod = cached_prod;
4069349eb3aSMagnus Karlsson }
4079349eb3aSMagnus Karlsson 
xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len,u32 flags)40859e35e55SMagnus Karlsson static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
40963a64a56STirthendu Sarkar 					 u64 addr, u32 len, u32 flags)
410c497176cSBjörn Töpel {
411c497176cSBjörn Töpel 	struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
41259e35e55SMagnus Karlsson 	u32 idx;
413c497176cSBjörn Töpel 
414df0ae6f7SMagnus Karlsson 	if (xskq_prod_is_full(q))
415c6c1f11bSBjörn Töpel 		return -ENOBUFS;
416c497176cSBjörn Töpel 
417f63666deSMagnus Karlsson 	/* A, matches D */
418d7012f05SMagnus Karlsson 	idx = q->cached_prod++ & q->ring_mask;
419bbff2f32SBjörn Töpel 	ring->desc[idx].addr = addr;
420c497176cSBjörn Töpel 	ring->desc[idx].len = len;
42163a64a56STirthendu Sarkar 	ring->desc[idx].options = flags;
422c497176cSBjörn Töpel 
423c497176cSBjörn Töpel 	return 0;
424c497176cSBjörn Töpel }
425c497176cSBjörn Töpel 
__xskq_prod_submit(struct xsk_queue * q,u32 idx)42615d8c916SMagnus Karlsson static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
42735fcde7fSMagnus Karlsson {
428a23b3f56SBjörn Töpel 	smp_store_release(&q->ring->producer, idx); /* B, matches C */
42915d8c916SMagnus Karlsson }
43015d8c916SMagnus Karlsson 
xskq_prod_submit(struct xsk_queue * q)43115d8c916SMagnus Karlsson static inline void xskq_prod_submit(struct xsk_queue *q)
43215d8c916SMagnus Karlsson {
43315d8c916SMagnus Karlsson 	__xskq_prod_submit(q, q->cached_prod);
43415d8c916SMagnus Karlsson }
43515d8c916SMagnus Karlsson 
xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries)43615d8c916SMagnus Karlsson static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
43715d8c916SMagnus Karlsson {
43815d8c916SMagnus Karlsson 	__xskq_prod_submit(q, q->ring->producer + nb_entries);
43935fcde7fSMagnus Karlsson }
44035fcde7fSMagnus Karlsson 
xskq_prod_is_empty(struct xsk_queue * q)44159e35e55SMagnus Karlsson static inline bool xskq_prod_is_empty(struct xsk_queue *q)
442c497176cSBjörn Töpel {
44311cc2d21SMagnus Karlsson 	/* No barriers needed since data is not accessed */
44411cc2d21SMagnus Karlsson 	return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
445c497176cSBjörn Töpel }
446c497176cSBjörn Töpel 
44715d8c916SMagnus Karlsson /* For both producers and consumers */
44815d8c916SMagnus Karlsson 
xskq_nb_invalid_descs(struct xsk_queue * q)44915d8c916SMagnus Karlsson static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
45015d8c916SMagnus Karlsson {
45115d8c916SMagnus Karlsson 	return q ? q->invalid_descs : 0;
45215d8c916SMagnus Karlsson }
45315d8c916SMagnus Karlsson 
xskq_nb_queue_empty_descs(struct xsk_queue * q)4548aa5a335SCiara Loftus static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
4558aa5a335SCiara Loftus {
4568aa5a335SCiara Loftus 	return q ? q->queue_empty_descs : 0;
4578aa5a335SCiara Loftus }
4588aa5a335SCiara Loftus 
459b9b6b68eSBjörn Töpel struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
460c497176cSBjörn Töpel void xskq_destroy(struct xsk_queue *q_ops);
461423f3832SMagnus Karlsson 
462423f3832SMagnus Karlsson #endif /* _LINUX_XSK_QUEUE_H */
463