xref: /openbmc/linux/kernel/bpf/queue_stack_maps.c (revision 2c78ee89)
1f1a2e44aSMauricio Vasquez B // SPDX-License-Identifier: GPL-2.0
2f1a2e44aSMauricio Vasquez B /*
3f1a2e44aSMauricio Vasquez B  * queue_stack_maps.c: BPF queue and stack maps
4f1a2e44aSMauricio Vasquez B  *
5f1a2e44aSMauricio Vasquez B  * Copyright (c) 2018 Politecnico di Torino
6f1a2e44aSMauricio Vasquez B  */
7f1a2e44aSMauricio Vasquez B #include <linux/bpf.h>
8f1a2e44aSMauricio Vasquez B #include <linux/list.h>
9f1a2e44aSMauricio Vasquez B #include <linux/slab.h>
10813961deSAlexei Starovoitov #include <linux/capability.h>
11f1a2e44aSMauricio Vasquez B #include "percpu_freelist.h"
12f1a2e44aSMauricio Vasquez B 
13f1a2e44aSMauricio Vasquez B #define QUEUE_STACK_CREATE_FLAG_MASK \
14591fe988SDaniel Borkmann 	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
15f1a2e44aSMauricio Vasquez B 
16f1a2e44aSMauricio Vasquez B struct bpf_queue_stack {
17f1a2e44aSMauricio Vasquez B 	struct bpf_map map;
18f1a2e44aSMauricio Vasquez B 	raw_spinlock_t lock;
19f1a2e44aSMauricio Vasquez B 	u32 head, tail;
20f1a2e44aSMauricio Vasquez B 	u32 size; /* max_entries + 1 */
21f1a2e44aSMauricio Vasquez B 
22385bbf7bSGustavo A. R. Silva 	char elements[] __aligned(8);
23f1a2e44aSMauricio Vasquez B };
24f1a2e44aSMauricio Vasquez B 
25f1a2e44aSMauricio Vasquez B static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
26f1a2e44aSMauricio Vasquez B {
27f1a2e44aSMauricio Vasquez B 	return container_of(map, struct bpf_queue_stack, map);
28f1a2e44aSMauricio Vasquez B }
29f1a2e44aSMauricio Vasquez B 
30f1a2e44aSMauricio Vasquez B static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
31f1a2e44aSMauricio Vasquez B {
32f1a2e44aSMauricio Vasquez B 	return qs->head == qs->tail;
33f1a2e44aSMauricio Vasquez B }
34f1a2e44aSMauricio Vasquez B 
35f1a2e44aSMauricio Vasquez B static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
36f1a2e44aSMauricio Vasquez B {
37f1a2e44aSMauricio Vasquez B 	u32 head = qs->head + 1;
38f1a2e44aSMauricio Vasquez B 
39f1a2e44aSMauricio Vasquez B 	if (unlikely(head >= qs->size))
40f1a2e44aSMauricio Vasquez B 		head = 0;
41f1a2e44aSMauricio Vasquez B 
42f1a2e44aSMauricio Vasquez B 	return head == qs->tail;
43f1a2e44aSMauricio Vasquez B }
44f1a2e44aSMauricio Vasquez B 
45f1a2e44aSMauricio Vasquez B /* Called from syscall */
46f1a2e44aSMauricio Vasquez B static int queue_stack_map_alloc_check(union bpf_attr *attr)
47f1a2e44aSMauricio Vasquez B {
482c78ee89SAlexei Starovoitov 	if (!bpf_capable())
49813961deSAlexei Starovoitov 		return -EPERM;
50813961deSAlexei Starovoitov 
51f1a2e44aSMauricio Vasquez B 	/* check sanity of attributes */
52f1a2e44aSMauricio Vasquez B 	if (attr->max_entries == 0 || attr->key_size != 0 ||
53813961deSAlexei Starovoitov 	    attr->value_size == 0 ||
54591fe988SDaniel Borkmann 	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
55591fe988SDaniel Borkmann 	    !bpf_map_flags_access_ok(attr->map_flags))
56f1a2e44aSMauricio Vasquez B 		return -EINVAL;
57f1a2e44aSMauricio Vasquez B 
58f1a2e44aSMauricio Vasquez B 	if (attr->value_size > KMALLOC_MAX_SIZE)
59f1a2e44aSMauricio Vasquez B 		/* if value_size is bigger, the user space won't be able to
60f1a2e44aSMauricio Vasquez B 		 * access the elements.
61f1a2e44aSMauricio Vasquez B 		 */
62f1a2e44aSMauricio Vasquez B 		return -E2BIG;
63f1a2e44aSMauricio Vasquez B 
64f1a2e44aSMauricio Vasquez B 	return 0;
65f1a2e44aSMauricio Vasquez B }
66f1a2e44aSMauricio Vasquez B 
67f1a2e44aSMauricio Vasquez B static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
68f1a2e44aSMauricio Vasquez B {
69f1a2e44aSMauricio Vasquez B 	int ret, numa_node = bpf_map_attr_numa_node(attr);
70b936ca64SRoman Gushchin 	struct bpf_map_memory mem = {0};
71f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs;
72813961deSAlexei Starovoitov 	u64 size, queue_size, cost;
73f1a2e44aSMauricio Vasquez B 
74813961deSAlexei Starovoitov 	size = (u64) attr->max_entries + 1;
75813961deSAlexei Starovoitov 	cost = queue_size = sizeof(*qs) + size * attr->value_size;
76f1a2e44aSMauricio Vasquez B 
77b936ca64SRoman Gushchin 	ret = bpf_map_charge_init(&mem, cost);
78f1a2e44aSMauricio Vasquez B 	if (ret < 0)
79f1a2e44aSMauricio Vasquez B 		return ERR_PTR(ret);
80f1a2e44aSMauricio Vasquez B 
81f1a2e44aSMauricio Vasquez B 	qs = bpf_map_area_alloc(queue_size, numa_node);
82b936ca64SRoman Gushchin 	if (!qs) {
83b936ca64SRoman Gushchin 		bpf_map_charge_finish(&mem);
84f1a2e44aSMauricio Vasquez B 		return ERR_PTR(-ENOMEM);
85b936ca64SRoman Gushchin 	}
86f1a2e44aSMauricio Vasquez B 
87f1a2e44aSMauricio Vasquez B 	memset(qs, 0, sizeof(*qs));
88f1a2e44aSMauricio Vasquez B 
89f1a2e44aSMauricio Vasquez B 	bpf_map_init_from_attr(&qs->map, attr);
90f1a2e44aSMauricio Vasquez B 
91b936ca64SRoman Gushchin 	bpf_map_charge_move(&qs->map.memory, &mem);
92f1a2e44aSMauricio Vasquez B 	qs->size = size;
93f1a2e44aSMauricio Vasquez B 
94f1a2e44aSMauricio Vasquez B 	raw_spin_lock_init(&qs->lock);
95f1a2e44aSMauricio Vasquez B 
96f1a2e44aSMauricio Vasquez B 	return &qs->map;
97f1a2e44aSMauricio Vasquez B }
98f1a2e44aSMauricio Vasquez B 
99f1a2e44aSMauricio Vasquez B /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
100f1a2e44aSMauricio Vasquez B static void queue_stack_map_free(struct bpf_map *map)
101f1a2e44aSMauricio Vasquez B {
102f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
103f1a2e44aSMauricio Vasquez B 
104f1a2e44aSMauricio Vasquez B 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
105f1a2e44aSMauricio Vasquez B 	 * so the programs (can be more than one that used this map) were
106f1a2e44aSMauricio Vasquez B 	 * disconnected from events. Wait for outstanding critical sections in
107f1a2e44aSMauricio Vasquez B 	 * these programs to complete
108f1a2e44aSMauricio Vasquez B 	 */
109f1a2e44aSMauricio Vasquez B 	synchronize_rcu();
110f1a2e44aSMauricio Vasquez B 
111f1a2e44aSMauricio Vasquez B 	bpf_map_area_free(qs);
112f1a2e44aSMauricio Vasquez B }
113f1a2e44aSMauricio Vasquez B 
114f1a2e44aSMauricio Vasquez B static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
115f1a2e44aSMauricio Vasquez B {
116f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
117f1a2e44aSMauricio Vasquez B 	unsigned long flags;
118f1a2e44aSMauricio Vasquez B 	int err = 0;
119f1a2e44aSMauricio Vasquez B 	void *ptr;
120f1a2e44aSMauricio Vasquez B 
121f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, flags);
122f1a2e44aSMauricio Vasquez B 
123f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_empty(qs)) {
124d3f66e41SDaniel Borkmann 		memset(value, 0, qs->map.value_size);
125f1a2e44aSMauricio Vasquez B 		err = -ENOENT;
126f1a2e44aSMauricio Vasquez B 		goto out;
127f1a2e44aSMauricio Vasquez B 	}
128f1a2e44aSMauricio Vasquez B 
129f1a2e44aSMauricio Vasquez B 	ptr = &qs->elements[qs->tail * qs->map.value_size];
130f1a2e44aSMauricio Vasquez B 	memcpy(value, ptr, qs->map.value_size);
131f1a2e44aSMauricio Vasquez B 
132f1a2e44aSMauricio Vasquez B 	if (delete) {
133f1a2e44aSMauricio Vasquez B 		if (unlikely(++qs->tail >= qs->size))
134f1a2e44aSMauricio Vasquez B 			qs->tail = 0;
135f1a2e44aSMauricio Vasquez B 	}
136f1a2e44aSMauricio Vasquez B 
137f1a2e44aSMauricio Vasquez B out:
138f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, flags);
139f1a2e44aSMauricio Vasquez B 	return err;
140f1a2e44aSMauricio Vasquez B }
141f1a2e44aSMauricio Vasquez B 
142f1a2e44aSMauricio Vasquez B 
143f1a2e44aSMauricio Vasquez B static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
144f1a2e44aSMauricio Vasquez B {
145f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
146f1a2e44aSMauricio Vasquez B 	unsigned long flags;
147f1a2e44aSMauricio Vasquez B 	int err = 0;
148f1a2e44aSMauricio Vasquez B 	void *ptr;
149f1a2e44aSMauricio Vasquez B 	u32 index;
150f1a2e44aSMauricio Vasquez B 
151f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, flags);
152f1a2e44aSMauricio Vasquez B 
153f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_empty(qs)) {
154d3f66e41SDaniel Borkmann 		memset(value, 0, qs->map.value_size);
155f1a2e44aSMauricio Vasquez B 		err = -ENOENT;
156f1a2e44aSMauricio Vasquez B 		goto out;
157f1a2e44aSMauricio Vasquez B 	}
158f1a2e44aSMauricio Vasquez B 
159f1a2e44aSMauricio Vasquez B 	index = qs->head - 1;
160f1a2e44aSMauricio Vasquez B 	if (unlikely(index >= qs->size))
161f1a2e44aSMauricio Vasquez B 		index = qs->size - 1;
162f1a2e44aSMauricio Vasquez B 
163f1a2e44aSMauricio Vasquez B 	ptr = &qs->elements[index * qs->map.value_size];
164f1a2e44aSMauricio Vasquez B 	memcpy(value, ptr, qs->map.value_size);
165f1a2e44aSMauricio Vasquez B 
166f1a2e44aSMauricio Vasquez B 	if (delete)
167f1a2e44aSMauricio Vasquez B 		qs->head = index;
168f1a2e44aSMauricio Vasquez B 
169f1a2e44aSMauricio Vasquez B out:
170f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, flags);
171f1a2e44aSMauricio Vasquez B 	return err;
172f1a2e44aSMauricio Vasquez B }
173f1a2e44aSMauricio Vasquez B 
174f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
175f1a2e44aSMauricio Vasquez B static int queue_map_peek_elem(struct bpf_map *map, void *value)
176f1a2e44aSMauricio Vasquez B {
177f1a2e44aSMauricio Vasquez B 	return __queue_map_get(map, value, false);
178f1a2e44aSMauricio Vasquez B }
179f1a2e44aSMauricio Vasquez B 
180f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
181f1a2e44aSMauricio Vasquez B static int stack_map_peek_elem(struct bpf_map *map, void *value)
182f1a2e44aSMauricio Vasquez B {
183f1a2e44aSMauricio Vasquez B 	return __stack_map_get(map, value, false);
184f1a2e44aSMauricio Vasquez B }
185f1a2e44aSMauricio Vasquez B 
186f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
187f1a2e44aSMauricio Vasquez B static int queue_map_pop_elem(struct bpf_map *map, void *value)
188f1a2e44aSMauricio Vasquez B {
189f1a2e44aSMauricio Vasquez B 	return __queue_map_get(map, value, true);
190f1a2e44aSMauricio Vasquez B }
191f1a2e44aSMauricio Vasquez B 
192f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
193f1a2e44aSMauricio Vasquez B static int stack_map_pop_elem(struct bpf_map *map, void *value)
194f1a2e44aSMauricio Vasquez B {
195f1a2e44aSMauricio Vasquez B 	return __stack_map_get(map, value, true);
196f1a2e44aSMauricio Vasquez B }
197f1a2e44aSMauricio Vasquez B 
198f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
199f1a2e44aSMauricio Vasquez B static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
200f1a2e44aSMauricio Vasquez B 				     u64 flags)
201f1a2e44aSMauricio Vasquez B {
202f1a2e44aSMauricio Vasquez B 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
203f1a2e44aSMauricio Vasquez B 	unsigned long irq_flags;
204f1a2e44aSMauricio Vasquez B 	int err = 0;
205f1a2e44aSMauricio Vasquez B 	void *dst;
206f1a2e44aSMauricio Vasquez B 
207f1a2e44aSMauricio Vasquez B 	/* BPF_EXIST is used to force making room for a new element in case the
208f1a2e44aSMauricio Vasquez B 	 * map is full
209f1a2e44aSMauricio Vasquez B 	 */
210f1a2e44aSMauricio Vasquez B 	bool replace = (flags & BPF_EXIST);
211f1a2e44aSMauricio Vasquez B 
212f1a2e44aSMauricio Vasquez B 	/* Check supported flags for queue and stack maps */
213f1a2e44aSMauricio Vasquez B 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
214f1a2e44aSMauricio Vasquez B 		return -EINVAL;
215f1a2e44aSMauricio Vasquez B 
216f1a2e44aSMauricio Vasquez B 	raw_spin_lock_irqsave(&qs->lock, irq_flags);
217f1a2e44aSMauricio Vasquez B 
218f1a2e44aSMauricio Vasquez B 	if (queue_stack_map_is_full(qs)) {
219f1a2e44aSMauricio Vasquez B 		if (!replace) {
220f1a2e44aSMauricio Vasquez B 			err = -E2BIG;
221f1a2e44aSMauricio Vasquez B 			goto out;
222f1a2e44aSMauricio Vasquez B 		}
223f1a2e44aSMauricio Vasquez B 		/* advance tail pointer to overwrite oldest element */
224f1a2e44aSMauricio Vasquez B 		if (unlikely(++qs->tail >= qs->size))
225f1a2e44aSMauricio Vasquez B 			qs->tail = 0;
226f1a2e44aSMauricio Vasquez B 	}
227f1a2e44aSMauricio Vasquez B 
228f1a2e44aSMauricio Vasquez B 	dst = &qs->elements[qs->head * qs->map.value_size];
229f1a2e44aSMauricio Vasquez B 	memcpy(dst, value, qs->map.value_size);
230f1a2e44aSMauricio Vasquez B 
231f1a2e44aSMauricio Vasquez B 	if (unlikely(++qs->head >= qs->size))
232f1a2e44aSMauricio Vasquez B 		qs->head = 0;
233f1a2e44aSMauricio Vasquez B 
234f1a2e44aSMauricio Vasquez B out:
235f1a2e44aSMauricio Vasquez B 	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
236f1a2e44aSMauricio Vasquez B 	return err;
237f1a2e44aSMauricio Vasquez B }
238f1a2e44aSMauricio Vasquez B 
239f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
240f1a2e44aSMauricio Vasquez B static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
241f1a2e44aSMauricio Vasquez B {
242f1a2e44aSMauricio Vasquez B 	return NULL;
243f1a2e44aSMauricio Vasquez B }
244f1a2e44aSMauricio Vasquez B 
245f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
246f1a2e44aSMauricio Vasquez B static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
247f1a2e44aSMauricio Vasquez B 				       void *value, u64 flags)
248f1a2e44aSMauricio Vasquez B {
249f1a2e44aSMauricio Vasquez B 	return -EINVAL;
250f1a2e44aSMauricio Vasquez B }
251f1a2e44aSMauricio Vasquez B 
252f1a2e44aSMauricio Vasquez B /* Called from syscall or from eBPF program */
253f1a2e44aSMauricio Vasquez B static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
254f1a2e44aSMauricio Vasquez B {
255f1a2e44aSMauricio Vasquez B 	return -EINVAL;
256f1a2e44aSMauricio Vasquez B }
257f1a2e44aSMauricio Vasquez B 
258f1a2e44aSMauricio Vasquez B /* Called from syscall */
259f1a2e44aSMauricio Vasquez B static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
260f1a2e44aSMauricio Vasquez B 					void *next_key)
261f1a2e44aSMauricio Vasquez B {
262f1a2e44aSMauricio Vasquez B 	return -EINVAL;
263f1a2e44aSMauricio Vasquez B }
264f1a2e44aSMauricio Vasquez B 
265f1a2e44aSMauricio Vasquez B const struct bpf_map_ops queue_map_ops = {
266f1a2e44aSMauricio Vasquez B 	.map_alloc_check = queue_stack_map_alloc_check,
267f1a2e44aSMauricio Vasquez B 	.map_alloc = queue_stack_map_alloc,
268f1a2e44aSMauricio Vasquez B 	.map_free = queue_stack_map_free,
269f1a2e44aSMauricio Vasquez B 	.map_lookup_elem = queue_stack_map_lookup_elem,
270f1a2e44aSMauricio Vasquez B 	.map_update_elem = queue_stack_map_update_elem,
271f1a2e44aSMauricio Vasquez B 	.map_delete_elem = queue_stack_map_delete_elem,
272f1a2e44aSMauricio Vasquez B 	.map_push_elem = queue_stack_map_push_elem,
273f1a2e44aSMauricio Vasquez B 	.map_pop_elem = queue_map_pop_elem,
274f1a2e44aSMauricio Vasquez B 	.map_peek_elem = queue_map_peek_elem,
275f1a2e44aSMauricio Vasquez B 	.map_get_next_key = queue_stack_map_get_next_key,
276f1a2e44aSMauricio Vasquez B };
277f1a2e44aSMauricio Vasquez B 
278f1a2e44aSMauricio Vasquez B const struct bpf_map_ops stack_map_ops = {
279f1a2e44aSMauricio Vasquez B 	.map_alloc_check = queue_stack_map_alloc_check,
280f1a2e44aSMauricio Vasquez B 	.map_alloc = queue_stack_map_alloc,
281f1a2e44aSMauricio Vasquez B 	.map_free = queue_stack_map_free,
282f1a2e44aSMauricio Vasquez B 	.map_lookup_elem = queue_stack_map_lookup_elem,
283f1a2e44aSMauricio Vasquez B 	.map_update_elem = queue_stack_map_update_elem,
284f1a2e44aSMauricio Vasquez B 	.map_delete_elem = queue_stack_map_delete_elem,
285f1a2e44aSMauricio Vasquez B 	.map_push_elem = queue_stack_map_push_elem,
286f1a2e44aSMauricio Vasquez B 	.map_pop_elem = stack_map_pop_elem,
287f1a2e44aSMauricio Vasquez B 	.map_peek_elem = stack_map_peek_elem,
288f1a2e44aSMauricio Vasquez B 	.map_get_next_key = queue_stack_map_get_next_key,
289f1a2e44aSMauricio Vasquez B };
290