xref: /openbmc/linux/kernel/bpf/queue_stack_maps.c (revision a37fb7ef24a475012547fa28f0148d2e538ad5d4)
1f1a2e44aSMauricio Vasquez B // SPDX-License-Identifier: GPL-2.0
2f1a2e44aSMauricio Vasquez B /*
3f1a2e44aSMauricio Vasquez B  * queue_stack_maps.c: BPF queue and stack maps
4f1a2e44aSMauricio Vasquez B  *
5f1a2e44aSMauricio Vasquez B  * Copyright (c) 2018 Politecnico di Torino
6f1a2e44aSMauricio Vasquez B  */
7f1a2e44aSMauricio Vasquez B #include <linux/bpf.h>
8f1a2e44aSMauricio Vasquez B #include <linux/list.h>
9f1a2e44aSMauricio Vasquez B #include <linux/slab.h>
10813961deSAlexei Starovoitov #include <linux/capability.h>
11f1a2e44aSMauricio Vasquez B #include "percpu_freelist.h"
12f1a2e44aSMauricio Vasquez B 
13f1a2e44aSMauricio Vasquez B #define QUEUE_STACK_CREATE_FLAG_MASK \
14591fe988SDaniel Borkmann 	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
15f1a2e44aSMauricio Vasquez B 
16f1a2e44aSMauricio Vasquez B struct bpf_queue_stack {
17f1a2e44aSMauricio Vasquez B 	struct bpf_map map;
18f1a2e44aSMauricio Vasquez B 	raw_spinlock_t lock;
19f1a2e44aSMauricio Vasquez B 	u32 head, tail;
20f1a2e44aSMauricio Vasquez B 	u32 size; /* max_entries + 1 */
21f1a2e44aSMauricio Vasquez B 
22385bbf7bSGustavo A. R. Silva 	char elements[] __aligned(8);
23f1a2e44aSMauricio Vasquez B };
24f1a2e44aSMauricio Vasquez B 
25f1a2e44aSMauricio Vasquez B static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
26f1a2e44aSMauricio Vasquez B {
27f1a2e44aSMauricio Vasquez B 	return container_of(map, struct bpf_queue_stack, map);
28f1a2e44aSMauricio Vasquez B }
29f1a2e44aSMauricio Vasquez B 
30f1a2e44aSMauricio Vasquez B static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
31f1a2e44aSMauricio Vasquez B {
32f1a2e44aSMauricio Vasquez B 	return qs->head == qs->tail;
33f1a2e44aSMauricio Vasquez B }
34f1a2e44aSMauricio Vasquez B 
35f1a2e44aSMauricio Vasquez B static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
36f1a2e44aSMauricio Vasquez B {
37f1a2e44aSMauricio Vasquez B 	u32 head = qs->head + 1;
38f1a2e44aSMauricio Vasquez B 
39f1a2e44aSMauricio Vasquez B 	if (unlikely(head >= qs->size))
40f1a2e44aSMauricio Vasquez B 		head = 0;
41f1a2e44aSMauricio Vasquez B 
42f1a2e44aSMauricio Vasquez B 	return head == qs->tail;
43f1a2e44aSMauricio Vasquez B }
44f1a2e44aSMauricio Vasquez B 
45f1a2e44aSMauricio Vasquez B /* Called from syscall */
46f1a2e44aSMauricio Vasquez B static int queue_stack_map_alloc_check(union bpf_attr *attr)
47f1a2e44aSMauricio Vasquez B {
482c78ee89SAlexei Starovoitov 	if (!bpf_capable())
49813961deSAlexei Starovoitov 		return -EPERM;
50813961deSAlexei Starovoitov 
51f1a2e44aSMauricio Vasquez B 	/* check sanity of attributes */
52f1a2e44aSMauricio Vasquez B 	if (attr->max_entries == 0 || attr->key_size != 0 ||
53813961deSAlexei Starovoitov 	    attr->value_size == 0 ||
54591fe988SDaniel Borkmann 	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
55591fe988SDaniel Borkmann 	    !bpf_map_flags_access_ok(attr->map_flags))
56f1a2e44aSMauricio Vasquez B 		return -EINVAL;
57f1a2e44aSMauricio Vasquez B 
58f1a2e44aSMauricio Vasquez B 	if (attr->value_size > KMALLOC_MAX_SIZE)
59f1a2e44aSMauricio Vasquez B 		/* if value_size is bigger, the user space won't be able to
60f1a2e44aSMauricio Vasquez B 		 * access the elements.
61f1a2e44aSMauricio Vasquez B 		 */
62f1a2e44aSMauricio Vasquez B 		return -E2BIG;
63f1a2e44aSMauricio Vasquez B 
64f1a2e44aSMauricio Vasquez B 	return 0;
65f1a2e44aSMauricio Vasquez B }
66f1a2e44aSMauricio Vasquez B 
67f1a2e44aSMauricio Vasquez B static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
68f1a2e44aSMauricio Vasquez B {
69*a37fb7efSRoman Gushchin 	int numa_node = bpf_map_attr_numa_node(attr);
70f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs;
71*a37fb7efSRoman Gushchin 	u64 size, queue_size;
72f1a2e44aSMauricio Vasquez B 
73813961deSAlexei Starovoitov 	size = (u64) attr->max_entries + 1;
74*a37fb7efSRoman Gushchin 	queue_size = sizeof(*qs) + size * attr->value_size;
75f1a2e44aSMauricio Vasquez B 
76f1a2e44aSMauricio Vasquez B 	qs = bpf_map_area_alloc(queue_size, numa_node);
77*a37fb7efSRoman Gushchin 	if (!qs)
78f1a2e44aSMauricio Vasquez B 		return ERR_PTR(-ENOMEM);
79f1a2e44aSMauricio Vasquez B 
80f1a2e44aSMauricio Vasquez B 	memset(qs, 0, sizeof(*qs));
81f1a2e44aSMauricio Vasquez B 
82f1a2e44aSMauricio Vasquez B 	bpf_map_init_from_attr(&qs->map, attr);
83f1a2e44aSMauricio Vasquez B 
84f1a2e44aSMauricio Vasquez B 	qs->size = size;
85f1a2e44aSMauricio Vasquez B 
86f1a2e44aSMauricio Vasquez B 	raw_spin_lock_init(&qs->lock);
87f1a2e44aSMauricio Vasquez B 
88f1a2e44aSMauricio Vasquez B 	return &qs->map;
89f1a2e44aSMauricio Vasquez B }
90f1a2e44aSMauricio Vasquez B 
91f1a2e44aSMauricio Vasquez B /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
92f1a2e44aSMauricio Vasquez B static void queue_stack_map_free(struct bpf_map *map)
93f1a2e44aSMauricio Vasquez B {
94f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
95f1a2e44aSMauricio Vasquez B 
96f1a2e44aSMauricio Vasquez B 	bpf_map_area_free(qs);
97f1a2e44aSMauricio Vasquez B }
98f1a2e44aSMauricio Vasquez B 
99f1a2e44aSMauricio Vasquez B static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
100f1a2e44aSMauricio Vasquez B {
101f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
102f1a2e44aSMauricio Vasquez B 	unsigned long flags;
103f1a2e44aSMauricio Vasquez B 	int err = 0;
104f1a2e44aSMauricio Vasquez B 	void *ptr;
105f1a2e44aSMauricio Vasquez B 
106f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, flags);
107f1a2e44aSMauricio Vasquez B 
108f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_empty(qs)) {
109d3f66e41SDaniel Borkmann 		memset(value, 0, qs->map.value_size);
110f1a2e44aSMauricio Vasquez B 		err = -ENOENT;
111f1a2e44aSMauricio Vasquez B 		goto out;
112f1a2e44aSMauricio Vasquez B 	}
113f1a2e44aSMauricio Vasquez B 
114f1a2e44aSMauricio Vasquez B 	ptr = &qs->elements[qs->tail * qs->map.value_size];
115f1a2e44aSMauricio Vasquez B 	memcpy(value, ptr, qs->map.value_size);
116f1a2e44aSMauricio Vasquez B 
117f1a2e44aSMauricio Vasquez B 	if (delete) {
118f1a2e44aSMauricio Vasquez B 		if (unlikely(++qs->tail >= qs->size))
119f1a2e44aSMauricio Vasquez B 			qs->tail = 0;
120f1a2e44aSMauricio Vasquez B 	}
121f1a2e44aSMauricio Vasquez B 
122f1a2e44aSMauricio Vasquez B out:
123f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, flags);
124f1a2e44aSMauricio Vasquez B 	return err;
125f1a2e44aSMauricio Vasquez B }
126f1a2e44aSMauricio Vasquez B 
127f1a2e44aSMauricio Vasquez B 
128f1a2e44aSMauricio Vasquez B static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
129f1a2e44aSMauricio Vasquez B {
130f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
131f1a2e44aSMauricio Vasquez B 	unsigned long flags;
132f1a2e44aSMauricio Vasquez B 	int err = 0;
133f1a2e44aSMauricio Vasquez B 	void *ptr;
134f1a2e44aSMauricio Vasquez B 	u32 index;
135f1a2e44aSMauricio Vasquez B 
136f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, flags);
137f1a2e44aSMauricio Vasquez B 
138f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_empty(qs)) {
139d3f66e41SDaniel Borkmann 		memset(value, 0, qs->map.value_size);
140f1a2e44aSMauricio Vasquez B 		err = -ENOENT;
141f1a2e44aSMauricio Vasquez B 		goto out;
142f1a2e44aSMauricio Vasquez B 	}
143f1a2e44aSMauricio Vasquez B 
144f1a2e44aSMauricio Vasquez B 	index = qs->head - 1;
145f1a2e44aSMauricio Vasquez B 	if (unlikely(index >= qs->size))
146f1a2e44aSMauricio Vasquez B 		index = qs->size - 1;
147f1a2e44aSMauricio Vasquez B 
148f1a2e44aSMauricio Vasquez B 	ptr = &qs->elements[index * qs->map.value_size];
149f1a2e44aSMauricio Vasquez B 	memcpy(value, ptr, qs->map.value_size);
150f1a2e44aSMauricio Vasquez B 
151f1a2e44aSMauricio Vasquez B 	if (delete)
152f1a2e44aSMauricio Vasquez B 		qs->head = index;
153f1a2e44aSMauricio Vasquez B 
154f1a2e44aSMauricio Vasquez B out:
155f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, flags);
156f1a2e44aSMauricio Vasquez B 	return err;
157f1a2e44aSMauricio Vasquez B }
158f1a2e44aSMauricio Vasquez B 
159f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
160f1a2e44aSMauricio Vasquez B static int queue_map_peek_elem(struct bpf_map *map, void *value)
161f1a2e44aSMauricio Vasquez B {
162f1a2e44aSMauricio Vasquez B 	return __queue_map_get(map, value, false);
163f1a2e44aSMauricio Vasquez B }
164f1a2e44aSMauricio Vasquez B 
165f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
166f1a2e44aSMauricio Vasquez B static int stack_map_peek_elem(struct bpf_map *map, void *value)
167f1a2e44aSMauricio Vasquez B {
168f1a2e44aSMauricio Vasquez B 	return __stack_map_get(map, value, false);
169f1a2e44aSMauricio Vasquez B }
170f1a2e44aSMauricio Vasquez B 
171f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
172f1a2e44aSMauricio Vasquez B static int queue_map_pop_elem(struct bpf_map *map, void *value)
173f1a2e44aSMauricio Vasquez B {
174f1a2e44aSMauricio Vasquez B 	return __queue_map_get(map, value, true);
175f1a2e44aSMauricio Vasquez B }
176f1a2e44aSMauricio Vasquez B 
177f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
178f1a2e44aSMauricio Vasquez B static int stack_map_pop_elem(struct bpf_map *map, void *value)
179f1a2e44aSMauricio Vasquez B {
180f1a2e44aSMauricio Vasquez B 	return __stack_map_get(map, value, true);
181f1a2e44aSMauricio Vasquez B }
182f1a2e44aSMauricio Vasquez B 
183f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
184f1a2e44aSMauricio Vasquez B static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
185f1a2e44aSMauricio Vasquez B 				     u64 flags)
186f1a2e44aSMauricio Vasquez B {
187f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
188f1a2e44aSMauricio Vasquez B 	unsigned long irq_flags;
189f1a2e44aSMauricio Vasquez B 	int err = 0;
190f1a2e44aSMauricio Vasquez B 	void *dst;
191f1a2e44aSMauricio Vasquez B 
192f1a2e44aSMauricio Vasquez B 	/* BPF_EXIST is used to force making room for a new element in case the
193f1a2e44aSMauricio Vasquez B 	 * map is full
194f1a2e44aSMauricio Vasquez B 	 */
195f1a2e44aSMauricio Vasquez B 	bool replace = (flags & BPF_EXIST);
196f1a2e44aSMauricio Vasquez B 
197f1a2e44aSMauricio Vasquez B 	/* Check supported flags for queue and stack maps */
198f1a2e44aSMauricio Vasquez B 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
199f1a2e44aSMauricio Vasquez B 		return -EINVAL;
200f1a2e44aSMauricio Vasquez B 
201f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, irq_flags);
202f1a2e44aSMauricio Vasquez B 
203f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_full(qs)) {
204f1a2e44aSMauricio Vasquez B 		if (!replace) {
205f1a2e44aSMauricio Vasquez B 			err = -E2BIG;
206f1a2e44aSMauricio Vasquez B 			goto out;
207f1a2e44aSMauricio Vasquez B 		}
208f1a2e44aSMauricio Vasquez B 		/* advance tail pointer to overwrite oldest element */
209f1a2e44aSMauricio Vasquez B 		if (unlikely(++qs->tail >= qs->size))
210f1a2e44aSMauricio Vasquez B 			qs->tail = 0;
211f1a2e44aSMauricio Vasquez B 	}
212f1a2e44aSMauricio Vasquez B 
213f1a2e44aSMauricio Vasquez B 	dst = &qs->elements[qs->head * qs->map.value_size];
214f1a2e44aSMauricio Vasquez B 	memcpy(dst, value, qs->map.value_size);
215f1a2e44aSMauricio Vasquez B 
216f1a2e44aSMauricio Vasquez B 	if (unlikely(++qs->head >= qs->size))
217f1a2e44aSMauricio Vasquez B 		qs->head = 0;
218f1a2e44aSMauricio Vasquez B 
219f1a2e44aSMauricio Vasquez B out:
220f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
221f1a2e44aSMauricio Vasquez B 	return err;
222f1a2e44aSMauricio Vasquez B }
223f1a2e44aSMauricio Vasquez B 
224f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
225f1a2e44aSMauricio Vasquez B static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
226f1a2e44aSMauricio Vasquez B {
227f1a2e44aSMauricio Vasquez B 	return NULL;
228f1a2e44aSMauricio Vasquez B }
229f1a2e44aSMauricio Vasquez B 
230f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
231f1a2e44aSMauricio Vasquez B static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
232f1a2e44aSMauricio Vasquez B 				       void *value, u64 flags)
233f1a2e44aSMauricio Vasquez B {
234f1a2e44aSMauricio Vasquez B 	return -EINVAL;
235f1a2e44aSMauricio Vasquez B }
236f1a2e44aSMauricio Vasquez B 
237f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
238f1a2e44aSMauricio Vasquez B static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
239f1a2e44aSMauricio Vasquez B {
240f1a2e44aSMauricio Vasquez B 	return -EINVAL;
241f1a2e44aSMauricio Vasquez B }
242f1a2e44aSMauricio Vasquez B 
243f1a2e44aSMauricio Vasquez B /* Called from syscall */
244f1a2e44aSMauricio Vasquez B static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
245f1a2e44aSMauricio Vasquez B 					void *next_key)
246f1a2e44aSMauricio Vasquez B {
247f1a2e44aSMauricio Vasquez B 	return -EINVAL;
248f1a2e44aSMauricio Vasquez B }
249f1a2e44aSMauricio Vasquez B 
2502872e9acSAndrey Ignatov static int queue_map_btf_id;
251f1a2e44aSMauricio Vasquez B const struct bpf_map_ops queue_map_ops = {
252f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
253f1a2e44aSMauricio Vasquez B 	.map_alloc_check = queue_stack_map_alloc_check,
254f1a2e44aSMauricio Vasquez B 	.map_alloc = queue_stack_map_alloc,
255f1a2e44aSMauricio Vasquez B 	.map_free = queue_stack_map_free,
256f1a2e44aSMauricio Vasquez B 	.map_lookup_elem = queue_stack_map_lookup_elem,
257f1a2e44aSMauricio Vasquez B 	.map_update_elem = queue_stack_map_update_elem,
258f1a2e44aSMauricio Vasquez B 	.map_delete_elem = queue_stack_map_delete_elem,
259f1a2e44aSMauricio Vasquez B 	.map_push_elem = queue_stack_map_push_elem,
260f1a2e44aSMauricio Vasquez B 	.map_pop_elem = queue_map_pop_elem,
261f1a2e44aSMauricio Vasquez B 	.map_peek_elem = queue_map_peek_elem,
262f1a2e44aSMauricio Vasquez B 	.map_get_next_key = queue_stack_map_get_next_key,
2632872e9acSAndrey Ignatov 	.map_btf_name = "bpf_queue_stack",
2642872e9acSAndrey Ignatov 	.map_btf_id = &queue_map_btf_id,
265f1a2e44aSMauricio Vasquez B };
266f1a2e44aSMauricio Vasquez B 
2672872e9acSAndrey Ignatov static int stack_map_btf_id;
268f1a2e44aSMauricio Vasquez B const struct bpf_map_ops stack_map_ops = {
269f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
270f1a2e44aSMauricio Vasquez B 	.map_alloc_check = queue_stack_map_alloc_check,
271f1a2e44aSMauricio Vasquez B 	.map_alloc = queue_stack_map_alloc,
272f1a2e44aSMauricio Vasquez B 	.map_free = queue_stack_map_free,
273f1a2e44aSMauricio Vasquez B 	.map_lookup_elem = queue_stack_map_lookup_elem,
274f1a2e44aSMauricio Vasquez B 	.map_update_elem = queue_stack_map_update_elem,
275f1a2e44aSMauricio Vasquez B 	.map_delete_elem = queue_stack_map_delete_elem,
276f1a2e44aSMauricio Vasquez B 	.map_push_elem = queue_stack_map_push_elem,
277f1a2e44aSMauricio Vasquez B 	.map_pop_elem = stack_map_pop_elem,
278f1a2e44aSMauricio Vasquez B 	.map_peek_elem = stack_map_peek_elem,
279f1a2e44aSMauricio Vasquez B 	.map_get_next_key = queue_stack_map_get_next_key,
2802872e9acSAndrey Ignatov 	.map_btf_name = "bpf_queue_stack",
2812872e9acSAndrey Ignatov 	.map_btf_id = &stack_map_btf_id,
282f1a2e44aSMauricio Vasquez B };
283