1423f3832SMagnus Karlsson // SPDX-License-Identifier: GPL-2.0
2423f3832SMagnus Karlsson /* XDP user-space ring structure
3423f3832SMagnus Karlsson * Copyright(c) 2018 Intel Corporation.
4423f3832SMagnus Karlsson */
5423f3832SMagnus Karlsson
6f5bd9138SJakub Kicinski #include <linux/log2.h>
7423f3832SMagnus Karlsson #include <linux/slab.h>
8f5bd9138SJakub Kicinski #include <linux/overflow.h>
99f78bf33SXuan Zhuo #include <linux/vmalloc.h>
10a71506a4SMagnus Karlsson #include <net/xdp_sock_drv.h>
11423f3832SMagnus Karlsson
12423f3832SMagnus Karlsson #include "xsk_queue.h"
13423f3832SMagnus Karlsson
xskq_get_ring_size(struct xsk_queue * q,bool umem_queue)141d9cb1f3SMagnus Karlsson static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
15423f3832SMagnus Karlsson {
161d9cb1f3SMagnus Karlsson struct xdp_umem_ring *umem_ring;
171d9cb1f3SMagnus Karlsson struct xdp_rxtx_ring *rxtx_ring;
18423f3832SMagnus Karlsson
191d9cb1f3SMagnus Karlsson if (umem_queue)
201d9cb1f3SMagnus Karlsson return struct_size(umem_ring, desc, q->nentries);
211d9cb1f3SMagnus Karlsson return struct_size(rxtx_ring, desc, q->nentries);
22b9b6b68eSBjörn Töpel }
23b9b6b68eSBjörn Töpel
xskq_create(u32 nentries,bool umem_queue)24b9b6b68eSBjörn Töpel struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
25423f3832SMagnus Karlsson {
26423f3832SMagnus Karlsson struct xsk_queue *q;
27423f3832SMagnus Karlsson size_t size;
28423f3832SMagnus Karlsson
29423f3832SMagnus Karlsson q = kzalloc(sizeof(*q), GFP_KERNEL);
30423f3832SMagnus Karlsson if (!q)
31423f3832SMagnus Karlsson return NULL;
32423f3832SMagnus Karlsson
33423f3832SMagnus Karlsson q->nentries = nentries;
34423f3832SMagnus Karlsson q->ring_mask = nentries - 1;
35423f3832SMagnus Karlsson
361d9cb1f3SMagnus Karlsson size = xskq_get_ring_size(q, umem_queue);
37*a12bbb3cSAndrew Kanner
38*a12bbb3cSAndrew Kanner /* size which is overflowing or close to SIZE_MAX will become 0 in
39*a12bbb3cSAndrew Kanner * PAGE_ALIGN(), checking SIZE_MAX is enough due to the previous
40*a12bbb3cSAndrew Kanner * is_power_of_2(), the rest will be handled by vmalloc_user()
41*a12bbb3cSAndrew Kanner */
42*a12bbb3cSAndrew Kanner if (unlikely(size == SIZE_MAX)) {
43*a12bbb3cSAndrew Kanner kfree(q);
44*a12bbb3cSAndrew Kanner return NULL;
45*a12bbb3cSAndrew Kanner }
46*a12bbb3cSAndrew Kanner
479f78bf33SXuan Zhuo size = PAGE_ALIGN(size);
48423f3832SMagnus Karlsson
499f78bf33SXuan Zhuo q->ring = vmalloc_user(size);
50423f3832SMagnus Karlsson if (!q->ring) {
51423f3832SMagnus Karlsson kfree(q);
52423f3832SMagnus Karlsson return NULL;
53423f3832SMagnus Karlsson }
54423f3832SMagnus Karlsson
559f78bf33SXuan Zhuo q->ring_vmalloc_size = size;
56423f3832SMagnus Karlsson return q;
57423f3832SMagnus Karlsson }
58423f3832SMagnus Karlsson
xskq_destroy(struct xsk_queue * q)59423f3832SMagnus Karlsson void xskq_destroy(struct xsk_queue *q)
60423f3832SMagnus Karlsson {
61423f3832SMagnus Karlsson if (!q)
62423f3832SMagnus Karlsson return;
63423f3832SMagnus Karlsson
649f78bf33SXuan Zhuo vfree(q->ring);
65423f3832SMagnus Karlsson kfree(q);
66423f3832SMagnus Karlsson }
67