xref: /openbmc/linux/kernel/bpf/queue_stack_maps.c (revision 813961de3ee6474dd5703e883471fd941d6c8f69)
1f1a2e44aSMauricio Vasquez B // SPDX-License-Identifier: GPL-2.0
2f1a2e44aSMauricio Vasquez B /*
3f1a2e44aSMauricio Vasquez B  * queue_stack_maps.c: BPF queue and stack maps
4f1a2e44aSMauricio Vasquez B  *
5f1a2e44aSMauricio Vasquez B  * Copyright (c) 2018 Politecnico di Torino
6f1a2e44aSMauricio Vasquez B  */
7f1a2e44aSMauricio Vasquez B #include <linux/bpf.h>
8f1a2e44aSMauricio Vasquez B #include <linux/list.h>
9f1a2e44aSMauricio Vasquez B #include <linux/slab.h>
10*813961deSAlexei Starovoitov #include <linux/capability.h>
11f1a2e44aSMauricio Vasquez B #include "percpu_freelist.h"
12f1a2e44aSMauricio Vasquez B 
13f1a2e44aSMauricio Vasquez B #define QUEUE_STACK_CREATE_FLAG_MASK \
14f1a2e44aSMauricio Vasquez B 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
15f1a2e44aSMauricio Vasquez B 
16f1a2e44aSMauricio Vasquez B 
17f1a2e44aSMauricio Vasquez B struct bpf_queue_stack {
18f1a2e44aSMauricio Vasquez B 	struct bpf_map map;
19f1a2e44aSMauricio Vasquez B 	raw_spinlock_t lock;
20f1a2e44aSMauricio Vasquez B 	u32 head, tail;
21f1a2e44aSMauricio Vasquez B 	u32 size; /* max_entries + 1 */
22f1a2e44aSMauricio Vasquez B 
23f1a2e44aSMauricio Vasquez B 	char elements[0] __aligned(8);
24f1a2e44aSMauricio Vasquez B };
25f1a2e44aSMauricio Vasquez B 
26f1a2e44aSMauricio Vasquez B static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
27f1a2e44aSMauricio Vasquez B {
28f1a2e44aSMauricio Vasquez B 	return container_of(map, struct bpf_queue_stack, map);
29f1a2e44aSMauricio Vasquez B }
30f1a2e44aSMauricio Vasquez B 
31f1a2e44aSMauricio Vasquez B static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
32f1a2e44aSMauricio Vasquez B {
33f1a2e44aSMauricio Vasquez B 	return qs->head == qs->tail;
34f1a2e44aSMauricio Vasquez B }
35f1a2e44aSMauricio Vasquez B 
36f1a2e44aSMauricio Vasquez B static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
37f1a2e44aSMauricio Vasquez B {
38f1a2e44aSMauricio Vasquez B 	u32 head = qs->head + 1;
39f1a2e44aSMauricio Vasquez B 
40f1a2e44aSMauricio Vasquez B 	if (unlikely(head >= qs->size))
41f1a2e44aSMauricio Vasquez B 		head = 0;
42f1a2e44aSMauricio Vasquez B 
43f1a2e44aSMauricio Vasquez B 	return head == qs->tail;
44f1a2e44aSMauricio Vasquez B }
45f1a2e44aSMauricio Vasquez B 
46f1a2e44aSMauricio Vasquez B /* Called from syscall */
47f1a2e44aSMauricio Vasquez B static int queue_stack_map_alloc_check(union bpf_attr *attr)
48f1a2e44aSMauricio Vasquez B {
49*813961deSAlexei Starovoitov 	if (!capable(CAP_SYS_ADMIN))
50*813961deSAlexei Starovoitov 		return -EPERM;
51*813961deSAlexei Starovoitov 
52f1a2e44aSMauricio Vasquez B 	/* check sanity of attributes */
53f1a2e44aSMauricio Vasquez B 	if (attr->max_entries == 0 || attr->key_size != 0 ||
54*813961deSAlexei Starovoitov 	    attr->value_size == 0 ||
55f1a2e44aSMauricio Vasquez B 	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
56f1a2e44aSMauricio Vasquez B 		return -EINVAL;
57f1a2e44aSMauricio Vasquez B 
58f1a2e44aSMauricio Vasquez B 	if (attr->value_size > KMALLOC_MAX_SIZE)
59f1a2e44aSMauricio Vasquez B 		/* if value_size is bigger, the user space won't be able to
60f1a2e44aSMauricio Vasquez B 		 * access the elements.
61f1a2e44aSMauricio Vasquez B 		 */
62f1a2e44aSMauricio Vasquez B 		return -E2BIG;
63f1a2e44aSMauricio Vasquez B 
64f1a2e44aSMauricio Vasquez B 	return 0;
65f1a2e44aSMauricio Vasquez B }
66f1a2e44aSMauricio Vasquez B 
67f1a2e44aSMauricio Vasquez B static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
68f1a2e44aSMauricio Vasquez B {
69f1a2e44aSMauricio Vasquez B 	int ret, numa_node = bpf_map_attr_numa_node(attr);
70f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs;
71*813961deSAlexei Starovoitov 	u64 size, queue_size, cost;
72f1a2e44aSMauricio Vasquez B 
73*813961deSAlexei Starovoitov 	size = (u64) attr->max_entries + 1;
74*813961deSAlexei Starovoitov 	cost = queue_size = sizeof(*qs) + size * attr->value_size;
75f1a2e44aSMauricio Vasquez B 	if (cost >= U32_MAX - PAGE_SIZE)
76f1a2e44aSMauricio Vasquez B 		return ERR_PTR(-E2BIG);
77f1a2e44aSMauricio Vasquez B 
78f1a2e44aSMauricio Vasquez B 	cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
79f1a2e44aSMauricio Vasquez B 
80f1a2e44aSMauricio Vasquez B 	ret = bpf_map_precharge_memlock(cost);
81f1a2e44aSMauricio Vasquez B 	if (ret < 0)
82f1a2e44aSMauricio Vasquez B 		return ERR_PTR(ret);
83f1a2e44aSMauricio Vasquez B 
84f1a2e44aSMauricio Vasquez B 	qs = bpf_map_area_alloc(queue_size, numa_node);
85f1a2e44aSMauricio Vasquez B 	if (!qs)
86f1a2e44aSMauricio Vasquez B 		return ERR_PTR(-ENOMEM);
87f1a2e44aSMauricio Vasquez B 
88f1a2e44aSMauricio Vasquez B 	memset(qs, 0, sizeof(*qs));
89f1a2e44aSMauricio Vasquez B 
90f1a2e44aSMauricio Vasquez B 	bpf_map_init_from_attr(&qs->map, attr);
91f1a2e44aSMauricio Vasquez B 
92f1a2e44aSMauricio Vasquez B 	qs->map.pages = cost;
93f1a2e44aSMauricio Vasquez B 	qs->size = size;
94f1a2e44aSMauricio Vasquez B 
95f1a2e44aSMauricio Vasquez B 	raw_spin_lock_init(&qs->lock);
96f1a2e44aSMauricio Vasquez B 
97f1a2e44aSMauricio Vasquez B 	return &qs->map;
98f1a2e44aSMauricio Vasquez B }
99f1a2e44aSMauricio Vasquez B 
100f1a2e44aSMauricio Vasquez B /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
101f1a2e44aSMauricio Vasquez B static void queue_stack_map_free(struct bpf_map *map)
102f1a2e44aSMauricio Vasquez B {
103f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
104f1a2e44aSMauricio Vasquez B 
105f1a2e44aSMauricio Vasquez B 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
106f1a2e44aSMauricio Vasquez B 	 * so the programs (can be more than one that used this map) were
107f1a2e44aSMauricio Vasquez B 	 * disconnected from events. Wait for outstanding critical sections in
108f1a2e44aSMauricio Vasquez B 	 * these programs to complete
109f1a2e44aSMauricio Vasquez B 	 */
110f1a2e44aSMauricio Vasquez B 	synchronize_rcu();
111f1a2e44aSMauricio Vasquez B 
112f1a2e44aSMauricio Vasquez B 	bpf_map_area_free(qs);
113f1a2e44aSMauricio Vasquez B }
114f1a2e44aSMauricio Vasquez B 
115f1a2e44aSMauricio Vasquez B static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
116f1a2e44aSMauricio Vasquez B {
117f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
118f1a2e44aSMauricio Vasquez B 	unsigned long flags;
119f1a2e44aSMauricio Vasquez B 	int err = 0;
120f1a2e44aSMauricio Vasquez B 	void *ptr;
121f1a2e44aSMauricio Vasquez B 
122f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, flags);
123f1a2e44aSMauricio Vasquez B 
124f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_empty(qs)) {
125d3f66e41SDaniel Borkmann 		memset(value, 0, qs->map.value_size);
126f1a2e44aSMauricio Vasquez B 		err = -ENOENT;
127f1a2e44aSMauricio Vasquez B 		goto out;
128f1a2e44aSMauricio Vasquez B 	}
129f1a2e44aSMauricio Vasquez B 
130f1a2e44aSMauricio Vasquez B 	ptr = &qs->elements[qs->tail * qs->map.value_size];
131f1a2e44aSMauricio Vasquez B 	memcpy(value, ptr, qs->map.value_size);
132f1a2e44aSMauricio Vasquez B 
133f1a2e44aSMauricio Vasquez B 	if (delete) {
134f1a2e44aSMauricio Vasquez B 		if (unlikely(++qs->tail >= qs->size))
135f1a2e44aSMauricio Vasquez B 			qs->tail = 0;
136f1a2e44aSMauricio Vasquez B 	}
137f1a2e44aSMauricio Vasquez B 
138f1a2e44aSMauricio Vasquez B out:
139f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, flags);
140f1a2e44aSMauricio Vasquez B 	return err;
141f1a2e44aSMauricio Vasquez B }
142f1a2e44aSMauricio Vasquez B 
143f1a2e44aSMauricio Vasquez B 
144f1a2e44aSMauricio Vasquez B static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
145f1a2e44aSMauricio Vasquez B {
146f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
147f1a2e44aSMauricio Vasquez B 	unsigned long flags;
148f1a2e44aSMauricio Vasquez B 	int err = 0;
149f1a2e44aSMauricio Vasquez B 	void *ptr;
150f1a2e44aSMauricio Vasquez B 	u32 index;
151f1a2e44aSMauricio Vasquez B 
152f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, flags);
153f1a2e44aSMauricio Vasquez B 
154f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_empty(qs)) {
155d3f66e41SDaniel Borkmann 		memset(value, 0, qs->map.value_size);
156f1a2e44aSMauricio Vasquez B 		err = -ENOENT;
157f1a2e44aSMauricio Vasquez B 		goto out;
158f1a2e44aSMauricio Vasquez B 	}
159f1a2e44aSMauricio Vasquez B 
160f1a2e44aSMauricio Vasquez B 	index = qs->head - 1;
161f1a2e44aSMauricio Vasquez B 	if (unlikely(index >= qs->size))
162f1a2e44aSMauricio Vasquez B 		index = qs->size - 1;
163f1a2e44aSMauricio Vasquez B 
164f1a2e44aSMauricio Vasquez B 	ptr = &qs->elements[index * qs->map.value_size];
165f1a2e44aSMauricio Vasquez B 	memcpy(value, ptr, qs->map.value_size);
166f1a2e44aSMauricio Vasquez B 
167f1a2e44aSMauricio Vasquez B 	if (delete)
168f1a2e44aSMauricio Vasquez B 		qs->head = index;
169f1a2e44aSMauricio Vasquez B 
170f1a2e44aSMauricio Vasquez B out:
171f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, flags);
172f1a2e44aSMauricio Vasquez B 	return err;
173f1a2e44aSMauricio Vasquez B }
174f1a2e44aSMauricio Vasquez B 
175f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
176f1a2e44aSMauricio Vasquez B static int queue_map_peek_elem(struct bpf_map *map, void *value)
177f1a2e44aSMauricio Vasquez B {
178f1a2e44aSMauricio Vasquez B 	return __queue_map_get(map, value, false);
179f1a2e44aSMauricio Vasquez B }
180f1a2e44aSMauricio Vasquez B 
181f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
182f1a2e44aSMauricio Vasquez B static int stack_map_peek_elem(struct bpf_map *map, void *value)
183f1a2e44aSMauricio Vasquez B {
184f1a2e44aSMauricio Vasquez B 	return __stack_map_get(map, value, false);
185f1a2e44aSMauricio Vasquez B }
186f1a2e44aSMauricio Vasquez B 
187f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
188f1a2e44aSMauricio Vasquez B static int queue_map_pop_elem(struct bpf_map *map, void *value)
189f1a2e44aSMauricio Vasquez B {
190f1a2e44aSMauricio Vasquez B 	return __queue_map_get(map, value, true);
191f1a2e44aSMauricio Vasquez B }
192f1a2e44aSMauricio Vasquez B 
193f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
194f1a2e44aSMauricio Vasquez B static int stack_map_pop_elem(struct bpf_map *map, void *value)
195f1a2e44aSMauricio Vasquez B {
196f1a2e44aSMauricio Vasquez B 	return __stack_map_get(map, value, true);
197f1a2e44aSMauricio Vasquez B }
198f1a2e44aSMauricio Vasquez B 
199f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
200f1a2e44aSMauricio Vasquez B static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
201f1a2e44aSMauricio Vasquez B 				     u64 flags)
202f1a2e44aSMauricio Vasquez B {
203f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
204f1a2e44aSMauricio Vasquez B 	unsigned long irq_flags;
205f1a2e44aSMauricio Vasquez B 	int err = 0;
206f1a2e44aSMauricio Vasquez B 	void *dst;
207f1a2e44aSMauricio Vasquez B 
208f1a2e44aSMauricio Vasquez B 	/* BPF_EXIST is used to force making room for a new element in case the
209f1a2e44aSMauricio Vasquez B 	 * map is full
210f1a2e44aSMauricio Vasquez B 	 */
211f1a2e44aSMauricio Vasquez B 	bool replace = (flags & BPF_EXIST);
212f1a2e44aSMauricio Vasquez B 
213f1a2e44aSMauricio Vasquez B 	/* Check supported flags for queue and stack maps */
214f1a2e44aSMauricio Vasquez B 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
215f1a2e44aSMauricio Vasquez B 		return -EINVAL;
216f1a2e44aSMauricio Vasquez B 
217f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, irq_flags);
218f1a2e44aSMauricio Vasquez B 
219f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_full(qs)) {
220f1a2e44aSMauricio Vasquez B 		if (!replace) {
221f1a2e44aSMauricio Vasquez B 			err = -E2BIG;
222f1a2e44aSMauricio Vasquez B 			goto out;
223f1a2e44aSMauricio Vasquez B 		}
224f1a2e44aSMauricio Vasquez B 		/* advance tail pointer to overwrite oldest element */
225f1a2e44aSMauricio Vasquez B 		if (unlikely(++qs->tail >= qs->size))
226f1a2e44aSMauricio Vasquez B 			qs->tail = 0;
227f1a2e44aSMauricio Vasquez B 	}
228f1a2e44aSMauricio Vasquez B 
229f1a2e44aSMauricio Vasquez B 	dst = &qs->elements[qs->head * qs->map.value_size];
230f1a2e44aSMauricio Vasquez B 	memcpy(dst, value, qs->map.value_size);
231f1a2e44aSMauricio Vasquez B 
232f1a2e44aSMauricio Vasquez B 	if (unlikely(++qs->head >= qs->size))
233f1a2e44aSMauricio Vasquez B 		qs->head = 0;
234f1a2e44aSMauricio Vasquez B 
235f1a2e44aSMauricio Vasquez B out:
236f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
237f1a2e44aSMauricio Vasquez B 	return err;
238f1a2e44aSMauricio Vasquez B }
239f1a2e44aSMauricio Vasquez B 
240f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
241f1a2e44aSMauricio Vasquez B static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
242f1a2e44aSMauricio Vasquez B {
243f1a2e44aSMauricio Vasquez B 	return NULL;
244f1a2e44aSMauricio Vasquez B }
245f1a2e44aSMauricio Vasquez B 
246f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
247f1a2e44aSMauricio Vasquez B static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
248f1a2e44aSMauricio Vasquez B 				       void *value, u64 flags)
249f1a2e44aSMauricio Vasquez B {
250f1a2e44aSMauricio Vasquez B 	return -EINVAL;
251f1a2e44aSMauricio Vasquez B }
252f1a2e44aSMauricio Vasquez B 
253f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
254f1a2e44aSMauricio Vasquez B static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
255f1a2e44aSMauricio Vasquez B {
256f1a2e44aSMauricio Vasquez B 	return -EINVAL;
257f1a2e44aSMauricio Vasquez B }
258f1a2e44aSMauricio Vasquez B 
259f1a2e44aSMauricio Vasquez B /* Called from syscall */
260f1a2e44aSMauricio Vasquez B static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
261f1a2e44aSMauricio Vasquez B 					void *next_key)
262f1a2e44aSMauricio Vasquez B {
263f1a2e44aSMauricio Vasquez B 	return -EINVAL;
264f1a2e44aSMauricio Vasquez B }
265f1a2e44aSMauricio Vasquez B 
266f1a2e44aSMauricio Vasquez B const struct bpf_map_ops queue_map_ops = {
267f1a2e44aSMauricio Vasquez B 	.map_alloc_check = queue_stack_map_alloc_check,
268f1a2e44aSMauricio Vasquez B 	.map_alloc = queue_stack_map_alloc,
269f1a2e44aSMauricio Vasquez B 	.map_free = queue_stack_map_free,
270f1a2e44aSMauricio Vasquez B 	.map_lookup_elem = queue_stack_map_lookup_elem,
271f1a2e44aSMauricio Vasquez B 	.map_update_elem = queue_stack_map_update_elem,
272f1a2e44aSMauricio Vasquez B 	.map_delete_elem = queue_stack_map_delete_elem,
273f1a2e44aSMauricio Vasquez B 	.map_push_elem = queue_stack_map_push_elem,
274f1a2e44aSMauricio Vasquez B 	.map_pop_elem = queue_map_pop_elem,
275f1a2e44aSMauricio Vasquez B 	.map_peek_elem = queue_map_peek_elem,
276f1a2e44aSMauricio Vasquez B 	.map_get_next_key = queue_stack_map_get_next_key,
277f1a2e44aSMauricio Vasquez B };
278f1a2e44aSMauricio Vasquez B 
279f1a2e44aSMauricio Vasquez B const struct bpf_map_ops stack_map_ops = {
280f1a2e44aSMauricio Vasquez B 	.map_alloc_check = queue_stack_map_alloc_check,
281f1a2e44aSMauricio Vasquez B 	.map_alloc = queue_stack_map_alloc,
282f1a2e44aSMauricio Vasquez B 	.map_free = queue_stack_map_free,
283f1a2e44aSMauricio Vasquez B 	.map_lookup_elem = queue_stack_map_lookup_elem,
284f1a2e44aSMauricio Vasquez B 	.map_update_elem = queue_stack_map_update_elem,
285f1a2e44aSMauricio Vasquez B 	.map_delete_elem = queue_stack_map_delete_elem,
286f1a2e44aSMauricio Vasquez B 	.map_push_elem = queue_stack_map_push_elem,
287f1a2e44aSMauricio Vasquez B 	.map_pop_elem = stack_map_pop_elem,
288f1a2e44aSMauricio Vasquez B 	.map_peek_elem = stack_map_peek_elem,
289f1a2e44aSMauricio Vasquez B 	.map_get_next_key = queue_stack_map_get_next_key,
290f1a2e44aSMauricio Vasquez B };
291