xref: /openbmc/linux/net/xdp/xsk_queue.c (revision 6491d698)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space ring structure
3  * Copyright(c) 2018 Intel Corporation.
4  */
5 
6 #include <linux/log2.h>
7 #include <linux/slab.h>
8 #include <linux/overflow.h>
9 
10 #include "xsk_queue.h"
11 
12 void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
13 {
14 	if (!q)
15 		return;
16 
17 	q->size = size;
18 	q->chunk_mask = chunk_mask;
19 }
20 
21 static u32 xskq_umem_get_ring_size(struct xsk_queue *q)
22 {
23 	return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64);
24 }
25 
26 static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q)
27 {
28 	return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc);
29 }
30 
31 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
32 {
33 	struct xsk_queue *q;
34 	gfp_t gfp_flags;
35 	size_t size;
36 
37 	q = kzalloc(sizeof(*q), GFP_KERNEL);
38 	if (!q)
39 		return NULL;
40 
41 	q->nentries = nentries;
42 	q->ring_mask = nentries - 1;
43 
44 	gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
45 		    __GFP_COMP  | __GFP_NORETRY;
46 	size = umem_queue ? xskq_umem_get_ring_size(q) :
47 	       xskq_rxtx_get_ring_size(q);
48 
49 	q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
50 						      get_order(size));
51 	if (!q->ring) {
52 		kfree(q);
53 		return NULL;
54 	}
55 
56 	return q;
57 }
58 
59 void xskq_destroy(struct xsk_queue *q)
60 {
61 	if (!q)
62 		return;
63 
64 	page_frag_free(q->ring);
65 	kfree(q);
66 }
67 
68 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
69 {
70 	struct xdp_umem_fq_reuse *newq;
71 
72 	/* Check for overflow */
73 	if (nentries > (u32)roundup_pow_of_two(nentries))
74 		return NULL;
75 	nentries = roundup_pow_of_two(nentries);
76 
77 	newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
78 	if (!newq)
79 		return NULL;
80 	memset(newq, 0, offsetof(typeof(*newq), handles));
81 
82 	newq->nentries = nentries;
83 	return newq;
84 }
85 EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
86 
87 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
88 					  struct xdp_umem_fq_reuse *newq)
89 {
90 	struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
91 
92 	if (!oldq) {
93 		umem->fq_reuse = newq;
94 		return NULL;
95 	}
96 
97 	if (newq->nentries < oldq->length)
98 		return newq;
99 
100 	memcpy(newq->handles, oldq->handles,
101 	       array_size(oldq->length, sizeof(u64)));
102 	newq->length = oldq->length;
103 
104 	umem->fq_reuse = newq;
105 	return oldq;
106 }
107 EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
108 
109 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
110 {
111 	kvfree(rq);
112 }
113 EXPORT_SYMBOL_GPL(xsk_reuseq_free);
114 
115 void xsk_reuseq_destroy(struct xdp_umem *umem)
116 {
117 	xsk_reuseq_free(umem->fq_reuse);
118 	umem->fq_reuse = NULL;
119 }
120