xref: /openbmc/linux/kernel/bpf/queue_stack_maps.c (revision 2a954832)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * queue_stack_maps.c: BPF queue and stack maps
4  *
5  * Copyright (c) 2018 Politecnico di Torino
6  */
7 #include <linux/bpf.h>
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/capability.h>
11 #include <linux/btf_ids.h>
12 #include "percpu_freelist.h"
13 
14 #define QUEUE_STACK_CREATE_FLAG_MASK \
15 	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
16 
17 struct bpf_queue_stack {
18 	struct bpf_map map;
19 	raw_spinlock_t lock;
20 	u32 head, tail;
21 	u32 size; /* max_entries + 1 */
22 
23 	char elements[] __aligned(8);
24 };
25 
26 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
27 {
28 	return container_of(map, struct bpf_queue_stack, map);
29 }
30 
31 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
32 {
33 	return qs->head == qs->tail;
34 }
35 
36 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
37 {
38 	u32 head = qs->head + 1;
39 
40 	if (unlikely(head >= qs->size))
41 		head = 0;
42 
43 	return head == qs->tail;
44 }
45 
46 /* Called from syscall */
47 static int queue_stack_map_alloc_check(union bpf_attr *attr)
48 {
49 	if (!bpf_capable())
50 		return -EPERM;
51 
52 	/* check sanity of attributes */
53 	if (attr->max_entries == 0 || attr->key_size != 0 ||
54 	    attr->value_size == 0 ||
55 	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
56 	    !bpf_map_flags_access_ok(attr->map_flags))
57 		return -EINVAL;
58 
59 	if (attr->value_size > KMALLOC_MAX_SIZE)
60 		/* if value_size is bigger, the user space won't be able to
61 		 * access the elements.
62 		 */
63 		return -E2BIG;
64 
65 	return 0;
66 }
67 
68 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
69 {
70 	int numa_node = bpf_map_attr_numa_node(attr);
71 	struct bpf_queue_stack *qs;
72 	u64 size, queue_size;
73 
74 	size = (u64) attr->max_entries + 1;
75 	queue_size = sizeof(*qs) + size * attr->value_size;
76 
77 	qs = bpf_map_area_alloc(queue_size, numa_node);
78 	if (!qs)
79 		return ERR_PTR(-ENOMEM);
80 
81 	bpf_map_init_from_attr(&qs->map, attr);
82 
83 	qs->size = size;
84 
85 	raw_spin_lock_init(&qs->lock);
86 
87 	return &qs->map;
88 }
89 
90 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
91 static void queue_stack_map_free(struct bpf_map *map)
92 {
93 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
94 
95 	bpf_map_area_free(qs);
96 }
97 
98 static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
99 {
100 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
101 	unsigned long flags;
102 	int err = 0;
103 	void *ptr;
104 
105 	raw_spin_lock_irqsave(&qs->lock, flags);
106 
107 	if (queue_stack_map_is_empty(qs)) {
108 		memset(value, 0, qs->map.value_size);
109 		err = -ENOENT;
110 		goto out;
111 	}
112 
113 	ptr = &qs->elements[qs->tail * qs->map.value_size];
114 	memcpy(value, ptr, qs->map.value_size);
115 
116 	if (delete) {
117 		if (unlikely(++qs->tail >= qs->size))
118 			qs->tail = 0;
119 	}
120 
121 out:
122 	raw_spin_unlock_irqrestore(&qs->lock, flags);
123 	return err;
124 }
125 
126 
127 static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
128 {
129 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
130 	unsigned long flags;
131 	int err = 0;
132 	void *ptr;
133 	u32 index;
134 
135 	raw_spin_lock_irqsave(&qs->lock, flags);
136 
137 	if (queue_stack_map_is_empty(qs)) {
138 		memset(value, 0, qs->map.value_size);
139 		err = -ENOENT;
140 		goto out;
141 	}
142 
143 	index = qs->head - 1;
144 	if (unlikely(index >= qs->size))
145 		index = qs->size - 1;
146 
147 	ptr = &qs->elements[index * qs->map.value_size];
148 	memcpy(value, ptr, qs->map.value_size);
149 
150 	if (delete)
151 		qs->head = index;
152 
153 out:
154 	raw_spin_unlock_irqrestore(&qs->lock, flags);
155 	return err;
156 }
157 
158 /* Called from syscall or from eBPF program */
159 static long queue_map_peek_elem(struct bpf_map *map, void *value)
160 {
161 	return __queue_map_get(map, value, false);
162 }
163 
164 /* Called from syscall or from eBPF program */
165 static long stack_map_peek_elem(struct bpf_map *map, void *value)
166 {
167 	return __stack_map_get(map, value, false);
168 }
169 
170 /* Called from syscall or from eBPF program */
171 static long queue_map_pop_elem(struct bpf_map *map, void *value)
172 {
173 	return __queue_map_get(map, value, true);
174 }
175 
176 /* Called from syscall or from eBPF program */
177 static long stack_map_pop_elem(struct bpf_map *map, void *value)
178 {
179 	return __stack_map_get(map, value, true);
180 }
181 
182 /* Called from syscall or from eBPF program */
183 static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
184 				      u64 flags)
185 {
186 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
187 	unsigned long irq_flags;
188 	int err = 0;
189 	void *dst;
190 
191 	/* BPF_EXIST is used to force making room for a new element in case the
192 	 * map is full
193 	 */
194 	bool replace = (flags & BPF_EXIST);
195 
196 	/* Check supported flags for queue and stack maps */
197 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
198 		return -EINVAL;
199 
200 	raw_spin_lock_irqsave(&qs->lock, irq_flags);
201 
202 	if (queue_stack_map_is_full(qs)) {
203 		if (!replace) {
204 			err = -E2BIG;
205 			goto out;
206 		}
207 		/* advance tail pointer to overwrite oldest element */
208 		if (unlikely(++qs->tail >= qs->size))
209 			qs->tail = 0;
210 	}
211 
212 	dst = &qs->elements[qs->head * qs->map.value_size];
213 	memcpy(dst, value, qs->map.value_size);
214 
215 	if (unlikely(++qs->head >= qs->size))
216 		qs->head = 0;
217 
218 out:
219 	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
220 	return err;
221 }
222 
223 /* Called from syscall or from eBPF program */
224 static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
225 {
226 	return NULL;
227 }
228 
229 /* Called from syscall or from eBPF program */
230 static long queue_stack_map_update_elem(struct bpf_map *map, void *key,
231 					void *value, u64 flags)
232 {
233 	return -EINVAL;
234 }
235 
236 /* Called from syscall or from eBPF program */
237 static long queue_stack_map_delete_elem(struct bpf_map *map, void *key)
238 {
239 	return -EINVAL;
240 }
241 
242 /* Called from syscall */
243 static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
244 					void *next_key)
245 {
246 	return -EINVAL;
247 }
248 
249 static u64 queue_stack_map_mem_usage(const struct bpf_map *map)
250 {
251 	u64 usage = sizeof(struct bpf_queue_stack);
252 
253 	usage += ((u64)map->max_entries + 1) * map->value_size;
254 	return usage;
255 }
256 
257 BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
258 const struct bpf_map_ops queue_map_ops = {
259 	.map_meta_equal = bpf_map_meta_equal,
260 	.map_alloc_check = queue_stack_map_alloc_check,
261 	.map_alloc = queue_stack_map_alloc,
262 	.map_free = queue_stack_map_free,
263 	.map_lookup_elem = queue_stack_map_lookup_elem,
264 	.map_update_elem = queue_stack_map_update_elem,
265 	.map_delete_elem = queue_stack_map_delete_elem,
266 	.map_push_elem = queue_stack_map_push_elem,
267 	.map_pop_elem = queue_map_pop_elem,
268 	.map_peek_elem = queue_map_peek_elem,
269 	.map_get_next_key = queue_stack_map_get_next_key,
270 	.map_mem_usage = queue_stack_map_mem_usage,
271 	.map_btf_id = &queue_map_btf_ids[0],
272 };
273 
274 const struct bpf_map_ops stack_map_ops = {
275 	.map_meta_equal = bpf_map_meta_equal,
276 	.map_alloc_check = queue_stack_map_alloc_check,
277 	.map_alloc = queue_stack_map_alloc,
278 	.map_free = queue_stack_map_free,
279 	.map_lookup_elem = queue_stack_map_lookup_elem,
280 	.map_update_elem = queue_stack_map_update_elem,
281 	.map_delete_elem = queue_stack_map_delete_elem,
282 	.map_push_elem = queue_stack_map_push_elem,
283 	.map_pop_elem = stack_map_pop_elem,
284 	.map_peek_elem = stack_map_peek_elem,
285 	.map_get_next_key = queue_stack_map_get_next_key,
286 	.map_mem_usage = queue_stack_map_mem_usage,
287 	.map_btf_id = &queue_map_btf_ids[0],
288 };
289