xref: /openbmc/linux/kernel/bpf/queue_stack_maps.c (revision 81fa7a69c2174ed8de314b9c231ef30a8718e5e1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * queue_stack_maps.c: BPF queue and stack maps
4  *
5  * Copyright (c) 2018 Politecnico di Torino
6  */
7 #include <linux/bpf.h>
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include "percpu_freelist.h"
11 
12 #define QUEUE_STACK_CREATE_FLAG_MASK \
13 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
14 
15 
16 struct bpf_queue_stack {
17 	struct bpf_map map;
18 	raw_spinlock_t lock;
19 	u32 head, tail;
20 	u32 size; /* max_entries + 1 */
21 
22 	char elements[0] __aligned(8);
23 };
24 
25 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
26 {
27 	return container_of(map, struct bpf_queue_stack, map);
28 }
29 
30 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
31 {
32 	return qs->head == qs->tail;
33 }
34 
35 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
36 {
37 	u32 head = qs->head + 1;
38 
39 	if (unlikely(head >= qs->size))
40 		head = 0;
41 
42 	return head == qs->tail;
43 }
44 
45 /* Called from syscall */
46 static int queue_stack_map_alloc_check(union bpf_attr *attr)
47 {
48 	/* check sanity of attributes */
49 	if (attr->max_entries == 0 || attr->key_size != 0 ||
50 	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
51 		return -EINVAL;
52 
53 	if (attr->value_size > KMALLOC_MAX_SIZE)
54 		/* if value_size is bigger, the user space won't be able to
55 		 * access the elements.
56 		 */
57 		return -E2BIG;
58 
59 	return 0;
60 }
61 
62 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
63 {
64 	int ret, numa_node = bpf_map_attr_numa_node(attr);
65 	struct bpf_queue_stack *qs;
66 	u32 size, value_size;
67 	u64 queue_size, cost;
68 
69 	size = attr->max_entries + 1;
70 	value_size = attr->value_size;
71 
72 	queue_size = sizeof(*qs) + (u64) value_size * size;
73 
74 	cost = queue_size;
75 	if (cost >= U32_MAX - PAGE_SIZE)
76 		return ERR_PTR(-E2BIG);
77 
78 	cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
79 
80 	ret = bpf_map_precharge_memlock(cost);
81 	if (ret < 0)
82 		return ERR_PTR(ret);
83 
84 	qs = bpf_map_area_alloc(queue_size, numa_node);
85 	if (!qs)
86 		return ERR_PTR(-ENOMEM);
87 
88 	memset(qs, 0, sizeof(*qs));
89 
90 	bpf_map_init_from_attr(&qs->map, attr);
91 
92 	qs->map.pages = cost;
93 	qs->size = size;
94 
95 	raw_spin_lock_init(&qs->lock);
96 
97 	return &qs->map;
98 }
99 
100 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
101 static void queue_stack_map_free(struct bpf_map *map)
102 {
103 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
104 
105 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
106 	 * so the programs (can be more than one that used this map) were
107 	 * disconnected from events. Wait for outstanding critical sections in
108 	 * these programs to complete
109 	 */
110 	synchronize_rcu();
111 
112 	bpf_map_area_free(qs);
113 }
114 
115 static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
116 {
117 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
118 	unsigned long flags;
119 	int err = 0;
120 	void *ptr;
121 
122 	raw_spin_lock_irqsave(&qs->lock, flags);
123 
124 	if (queue_stack_map_is_empty(qs)) {
125 		err = -ENOENT;
126 		goto out;
127 	}
128 
129 	ptr = &qs->elements[qs->tail * qs->map.value_size];
130 	memcpy(value, ptr, qs->map.value_size);
131 
132 	if (delete) {
133 		if (unlikely(++qs->tail >= qs->size))
134 			qs->tail = 0;
135 	}
136 
137 out:
138 	raw_spin_unlock_irqrestore(&qs->lock, flags);
139 	return err;
140 }
141 
142 
143 static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
144 {
145 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
146 	unsigned long flags;
147 	int err = 0;
148 	void *ptr;
149 	u32 index;
150 
151 	raw_spin_lock_irqsave(&qs->lock, flags);
152 
153 	if (queue_stack_map_is_empty(qs)) {
154 		err = -ENOENT;
155 		goto out;
156 	}
157 
158 	index = qs->head - 1;
159 	if (unlikely(index >= qs->size))
160 		index = qs->size - 1;
161 
162 	ptr = &qs->elements[index * qs->map.value_size];
163 	memcpy(value, ptr, qs->map.value_size);
164 
165 	if (delete)
166 		qs->head = index;
167 
168 out:
169 	raw_spin_unlock_irqrestore(&qs->lock, flags);
170 	return err;
171 }
172 
173 /* Called from syscall or from eBPF program */
174 static int queue_map_peek_elem(struct bpf_map *map, void *value)
175 {
176 	return __queue_map_get(map, value, false);
177 }
178 
179 /* Called from syscall or from eBPF program */
180 static int stack_map_peek_elem(struct bpf_map *map, void *value)
181 {
182 	return __stack_map_get(map, value, false);
183 }
184 
185 /* Called from syscall or from eBPF program */
186 static int queue_map_pop_elem(struct bpf_map *map, void *value)
187 {
188 	return __queue_map_get(map, value, true);
189 }
190 
191 /* Called from syscall or from eBPF program */
192 static int stack_map_pop_elem(struct bpf_map *map, void *value)
193 {
194 	return __stack_map_get(map, value, true);
195 }
196 
197 /* Called from syscall or from eBPF program */
198 static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
199 				     u64 flags)
200 {
201 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
202 	unsigned long irq_flags;
203 	int err = 0;
204 	void *dst;
205 
206 	/* BPF_EXIST is used to force making room for a new element in case the
207 	 * map is full
208 	 */
209 	bool replace = (flags & BPF_EXIST);
210 
211 	/* Check supported flags for queue and stack maps */
212 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
213 		return -EINVAL;
214 
215 	raw_spin_lock_irqsave(&qs->lock, irq_flags);
216 
217 	if (queue_stack_map_is_full(qs)) {
218 		if (!replace) {
219 			err = -E2BIG;
220 			goto out;
221 		}
222 		/* advance tail pointer to overwrite oldest element */
223 		if (unlikely(++qs->tail >= qs->size))
224 			qs->tail = 0;
225 	}
226 
227 	dst = &qs->elements[qs->head * qs->map.value_size];
228 	memcpy(dst, value, qs->map.value_size);
229 
230 	if (unlikely(++qs->head >= qs->size))
231 		qs->head = 0;
232 
233 out:
234 	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
235 	return err;
236 }
237 
238 /* Called from syscall or from eBPF program */
239 static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
240 {
241 	return NULL;
242 }
243 
244 /* Called from syscall or from eBPF program */
245 static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
246 				       void *value, u64 flags)
247 {
248 	return -EINVAL;
249 }
250 
251 /* Called from syscall or from eBPF program */
252 static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
253 {
254 	return -EINVAL;
255 }
256 
257 /* Called from syscall */
258 static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
259 					void *next_key)
260 {
261 	return -EINVAL;
262 }
263 
264 const struct bpf_map_ops queue_map_ops = {
265 	.map_alloc_check = queue_stack_map_alloc_check,
266 	.map_alloc = queue_stack_map_alloc,
267 	.map_free = queue_stack_map_free,
268 	.map_lookup_elem = queue_stack_map_lookup_elem,
269 	.map_update_elem = queue_stack_map_update_elem,
270 	.map_delete_elem = queue_stack_map_delete_elem,
271 	.map_push_elem = queue_stack_map_push_elem,
272 	.map_pop_elem = queue_map_pop_elem,
273 	.map_peek_elem = queue_map_peek_elem,
274 	.map_get_next_key = queue_stack_map_get_next_key,
275 };
276 
277 const struct bpf_map_ops stack_map_ops = {
278 	.map_alloc_check = queue_stack_map_alloc_check,
279 	.map_alloc = queue_stack_map_alloc,
280 	.map_free = queue_stack_map_free,
281 	.map_lookup_elem = queue_stack_map_lookup_elem,
282 	.map_update_elem = queue_stack_map_update_elem,
283 	.map_delete_elem = queue_stack_map_delete_elem,
284 	.map_push_elem = queue_stack_map_push_elem,
285 	.map_pop_elem = stack_map_pop_elem,
286 	.map_peek_elem = stack_map_peek_elem,
287 	.map_get_next_key = queue_stack_map_get_next_key,
288 };
289