xref: /openbmc/linux/kernel/bpf/arraymap.c (revision d236d361)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  * Copyright (c) 2016,2017 Facebook
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
19 
20 #include "map_in_map.h"
21 
22 static void bpf_array_free_percpu(struct bpf_array *array)
23 {
24 	int i;
25 
26 	for (i = 0; i < array->map.max_entries; i++)
27 		free_percpu(array->pptrs[i]);
28 }
29 
30 static int bpf_array_alloc_percpu(struct bpf_array *array)
31 {
32 	void __percpu *ptr;
33 	int i;
34 
35 	for (i = 0; i < array->map.max_entries; i++) {
36 		ptr = __alloc_percpu_gfp(array->elem_size, 8,
37 					 GFP_USER | __GFP_NOWARN);
38 		if (!ptr) {
39 			bpf_array_free_percpu(array);
40 			return -ENOMEM;
41 		}
42 		array->pptrs[i] = ptr;
43 	}
44 
45 	return 0;
46 }
47 
48 /* Called from syscall */
49 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
50 {
51 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52 	struct bpf_array *array;
53 	u64 array_size;
54 	u32 elem_size;
55 
56 	/* check sanity of attributes */
57 	if (attr->max_entries == 0 || attr->key_size != 4 ||
58 	    attr->value_size == 0 || attr->map_flags)
59 		return ERR_PTR(-EINVAL);
60 
61 	if (attr->value_size > KMALLOC_MAX_SIZE)
62 		/* if value_size is bigger, the user space won't be able to
63 		 * access the elements.
64 		 */
65 		return ERR_PTR(-E2BIG);
66 
67 	elem_size = round_up(attr->value_size, 8);
68 
69 	array_size = sizeof(*array);
70 	if (percpu)
71 		array_size += (u64) attr->max_entries * sizeof(void *);
72 	else
73 		array_size += (u64) attr->max_entries * elem_size;
74 
75 	/* make sure there is no u32 overflow later in round_up() */
76 	if (array_size >= U32_MAX - PAGE_SIZE)
77 		return ERR_PTR(-ENOMEM);
78 
79 	/* allocate all map elements and zero-initialize them */
80 	array = bpf_map_area_alloc(array_size);
81 	if (!array)
82 		return ERR_PTR(-ENOMEM);
83 
84 	/* copy mandatory map attributes */
85 	array->map.map_type = attr->map_type;
86 	array->map.key_size = attr->key_size;
87 	array->map.value_size = attr->value_size;
88 	array->map.max_entries = attr->max_entries;
89 	array->elem_size = elem_size;
90 
91 	if (!percpu)
92 		goto out;
93 
94 	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
95 
96 	if (array_size >= U32_MAX - PAGE_SIZE ||
97 	    elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
98 		bpf_map_area_free(array);
99 		return ERR_PTR(-ENOMEM);
100 	}
101 out:
102 	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
103 
104 	return &array->map;
105 }
106 
107 /* Called from syscall or from eBPF program */
108 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
109 {
110 	struct bpf_array *array = container_of(map, struct bpf_array, map);
111 	u32 index = *(u32 *)key;
112 
113 	if (unlikely(index >= array->map.max_entries))
114 		return NULL;
115 
116 	return array->value + array->elem_size * index;
117 }
118 
119 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
120 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
121 {
122 	struct bpf_insn *insn = insn_buf;
123 	u32 elem_size = round_up(map->value_size, 8);
124 	const int ret = BPF_REG_0;
125 	const int map_ptr = BPF_REG_1;
126 	const int index = BPF_REG_2;
127 
128 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
129 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
130 	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
131 
132 	if (is_power_of_2(elem_size)) {
133 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
134 	} else {
135 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
136 	}
137 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
138 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
139 	*insn++ = BPF_MOV64_IMM(ret, 0);
140 	return insn - insn_buf;
141 }
142 
143 /* Called from eBPF program */
144 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
145 {
146 	struct bpf_array *array = container_of(map, struct bpf_array, map);
147 	u32 index = *(u32 *)key;
148 
149 	if (unlikely(index >= array->map.max_entries))
150 		return NULL;
151 
152 	return this_cpu_ptr(array->pptrs[index]);
153 }
154 
155 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
156 {
157 	struct bpf_array *array = container_of(map, struct bpf_array, map);
158 	u32 index = *(u32 *)key;
159 	void __percpu *pptr;
160 	int cpu, off = 0;
161 	u32 size;
162 
163 	if (unlikely(index >= array->map.max_entries))
164 		return -ENOENT;
165 
166 	/* per_cpu areas are zero-filled and bpf programs can only
167 	 * access 'value_size' of them, so copying rounded areas
168 	 * will not leak any kernel data
169 	 */
170 	size = round_up(map->value_size, 8);
171 	rcu_read_lock();
172 	pptr = array->pptrs[index];
173 	for_each_possible_cpu(cpu) {
174 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
175 		off += size;
176 	}
177 	rcu_read_unlock();
178 	return 0;
179 }
180 
181 /* Called from syscall */
182 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
183 {
184 	struct bpf_array *array = container_of(map, struct bpf_array, map);
185 	u32 index = key ? *(u32 *)key : U32_MAX;
186 	u32 *next = (u32 *)next_key;
187 
188 	if (index >= array->map.max_entries) {
189 		*next = 0;
190 		return 0;
191 	}
192 
193 	if (index == array->map.max_entries - 1)
194 		return -ENOENT;
195 
196 	*next = index + 1;
197 	return 0;
198 }
199 
200 /* Called from syscall or from eBPF program */
201 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
202 				 u64 map_flags)
203 {
204 	struct bpf_array *array = container_of(map, struct bpf_array, map);
205 	u32 index = *(u32 *)key;
206 
207 	if (unlikely(map_flags > BPF_EXIST))
208 		/* unknown flags */
209 		return -EINVAL;
210 
211 	if (unlikely(index >= array->map.max_entries))
212 		/* all elements were pre-allocated, cannot insert a new one */
213 		return -E2BIG;
214 
215 	if (unlikely(map_flags == BPF_NOEXIST))
216 		/* all elements already exist */
217 		return -EEXIST;
218 
219 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
220 		memcpy(this_cpu_ptr(array->pptrs[index]),
221 		       value, map->value_size);
222 	else
223 		memcpy(array->value + array->elem_size * index,
224 		       value, map->value_size);
225 	return 0;
226 }
227 
228 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
229 			    u64 map_flags)
230 {
231 	struct bpf_array *array = container_of(map, struct bpf_array, map);
232 	u32 index = *(u32 *)key;
233 	void __percpu *pptr;
234 	int cpu, off = 0;
235 	u32 size;
236 
237 	if (unlikely(map_flags > BPF_EXIST))
238 		/* unknown flags */
239 		return -EINVAL;
240 
241 	if (unlikely(index >= array->map.max_entries))
242 		/* all elements were pre-allocated, cannot insert a new one */
243 		return -E2BIG;
244 
245 	if (unlikely(map_flags == BPF_NOEXIST))
246 		/* all elements already exist */
247 		return -EEXIST;
248 
249 	/* the user space will provide round_up(value_size, 8) bytes that
250 	 * will be copied into per-cpu area. bpf programs can only access
251 	 * value_size of it. During lookup the same extra bytes will be
252 	 * returned or zeros which were zero-filled by percpu_alloc,
253 	 * so no kernel data leaks possible
254 	 */
255 	size = round_up(map->value_size, 8);
256 	rcu_read_lock();
257 	pptr = array->pptrs[index];
258 	for_each_possible_cpu(cpu) {
259 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
260 		off += size;
261 	}
262 	rcu_read_unlock();
263 	return 0;
264 }
265 
266 /* Called from syscall or from eBPF program */
267 static int array_map_delete_elem(struct bpf_map *map, void *key)
268 {
269 	return -EINVAL;
270 }
271 
272 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
273 static void array_map_free(struct bpf_map *map)
274 {
275 	struct bpf_array *array = container_of(map, struct bpf_array, map);
276 
277 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
278 	 * so the programs (can be more than one that used this map) were
279 	 * disconnected from events. Wait for outstanding programs to complete
280 	 * and free the array
281 	 */
282 	synchronize_rcu();
283 
284 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
285 		bpf_array_free_percpu(array);
286 
287 	bpf_map_area_free(array);
288 }
289 
290 const struct bpf_map_ops array_map_ops = {
291 	.map_alloc = array_map_alloc,
292 	.map_free = array_map_free,
293 	.map_get_next_key = array_map_get_next_key,
294 	.map_lookup_elem = array_map_lookup_elem,
295 	.map_update_elem = array_map_update_elem,
296 	.map_delete_elem = array_map_delete_elem,
297 	.map_gen_lookup = array_map_gen_lookup,
298 };
299 
300 const struct bpf_map_ops percpu_array_map_ops = {
301 	.map_alloc = array_map_alloc,
302 	.map_free = array_map_free,
303 	.map_get_next_key = array_map_get_next_key,
304 	.map_lookup_elem = percpu_array_map_lookup_elem,
305 	.map_update_elem = array_map_update_elem,
306 	.map_delete_elem = array_map_delete_elem,
307 };
308 
309 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
310 {
311 	/* only file descriptors can be stored in this type of map */
312 	if (attr->value_size != sizeof(u32))
313 		return ERR_PTR(-EINVAL);
314 	return array_map_alloc(attr);
315 }
316 
317 static void fd_array_map_free(struct bpf_map *map)
318 {
319 	struct bpf_array *array = container_of(map, struct bpf_array, map);
320 	int i;
321 
322 	synchronize_rcu();
323 
324 	/* make sure it's empty */
325 	for (i = 0; i < array->map.max_entries; i++)
326 		BUG_ON(array->ptrs[i] != NULL);
327 
328 	bpf_map_area_free(array);
329 }
330 
331 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
332 {
333 	return NULL;
334 }
335 
336 /* only called from syscall */
337 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
338 				 void *key, void *value, u64 map_flags)
339 {
340 	struct bpf_array *array = container_of(map, struct bpf_array, map);
341 	void *new_ptr, *old_ptr;
342 	u32 index = *(u32 *)key, ufd;
343 
344 	if (map_flags != BPF_ANY)
345 		return -EINVAL;
346 
347 	if (index >= array->map.max_entries)
348 		return -E2BIG;
349 
350 	ufd = *(u32 *)value;
351 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
352 	if (IS_ERR(new_ptr))
353 		return PTR_ERR(new_ptr);
354 
355 	old_ptr = xchg(array->ptrs + index, new_ptr);
356 	if (old_ptr)
357 		map->ops->map_fd_put_ptr(old_ptr);
358 
359 	return 0;
360 }
361 
362 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
363 {
364 	struct bpf_array *array = container_of(map, struct bpf_array, map);
365 	void *old_ptr;
366 	u32 index = *(u32 *)key;
367 
368 	if (index >= array->map.max_entries)
369 		return -E2BIG;
370 
371 	old_ptr = xchg(array->ptrs + index, NULL);
372 	if (old_ptr) {
373 		map->ops->map_fd_put_ptr(old_ptr);
374 		return 0;
375 	} else {
376 		return -ENOENT;
377 	}
378 }
379 
380 static void *prog_fd_array_get_ptr(struct bpf_map *map,
381 				   struct file *map_file, int fd)
382 {
383 	struct bpf_array *array = container_of(map, struct bpf_array, map);
384 	struct bpf_prog *prog = bpf_prog_get(fd);
385 
386 	if (IS_ERR(prog))
387 		return prog;
388 
389 	if (!bpf_prog_array_compatible(array, prog)) {
390 		bpf_prog_put(prog);
391 		return ERR_PTR(-EINVAL);
392 	}
393 
394 	return prog;
395 }
396 
397 static void prog_fd_array_put_ptr(void *ptr)
398 {
399 	bpf_prog_put(ptr);
400 }
401 
402 /* decrement refcnt of all bpf_progs that are stored in this map */
403 void bpf_fd_array_map_clear(struct bpf_map *map)
404 {
405 	struct bpf_array *array = container_of(map, struct bpf_array, map);
406 	int i;
407 
408 	for (i = 0; i < array->map.max_entries; i++)
409 		fd_array_map_delete_elem(map, &i);
410 }
411 
412 const struct bpf_map_ops prog_array_map_ops = {
413 	.map_alloc = fd_array_map_alloc,
414 	.map_free = fd_array_map_free,
415 	.map_get_next_key = array_map_get_next_key,
416 	.map_lookup_elem = fd_array_map_lookup_elem,
417 	.map_delete_elem = fd_array_map_delete_elem,
418 	.map_fd_get_ptr = prog_fd_array_get_ptr,
419 	.map_fd_put_ptr = prog_fd_array_put_ptr,
420 };
421 
422 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
423 						   struct file *map_file)
424 {
425 	struct bpf_event_entry *ee;
426 
427 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
428 	if (ee) {
429 		ee->event = perf_file->private_data;
430 		ee->perf_file = perf_file;
431 		ee->map_file = map_file;
432 	}
433 
434 	return ee;
435 }
436 
437 static void __bpf_event_entry_free(struct rcu_head *rcu)
438 {
439 	struct bpf_event_entry *ee;
440 
441 	ee = container_of(rcu, struct bpf_event_entry, rcu);
442 	fput(ee->perf_file);
443 	kfree(ee);
444 }
445 
446 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
447 {
448 	call_rcu(&ee->rcu, __bpf_event_entry_free);
449 }
450 
451 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
452 					 struct file *map_file, int fd)
453 {
454 	const struct perf_event_attr *attr;
455 	struct bpf_event_entry *ee;
456 	struct perf_event *event;
457 	struct file *perf_file;
458 
459 	perf_file = perf_event_get(fd);
460 	if (IS_ERR(perf_file))
461 		return perf_file;
462 
463 	event = perf_file->private_data;
464 	ee = ERR_PTR(-EINVAL);
465 
466 	attr = perf_event_attrs(event);
467 	if (IS_ERR(attr) || attr->inherit)
468 		goto err_out;
469 
470 	switch (attr->type) {
471 	case PERF_TYPE_SOFTWARE:
472 		if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
473 			goto err_out;
474 		/* fall-through */
475 	case PERF_TYPE_RAW:
476 	case PERF_TYPE_HARDWARE:
477 		ee = bpf_event_entry_gen(perf_file, map_file);
478 		if (ee)
479 			return ee;
480 		ee = ERR_PTR(-ENOMEM);
481 		/* fall-through */
482 	default:
483 		break;
484 	}
485 
486 err_out:
487 	fput(perf_file);
488 	return ee;
489 }
490 
491 static void perf_event_fd_array_put_ptr(void *ptr)
492 {
493 	bpf_event_entry_free_rcu(ptr);
494 }
495 
496 static void perf_event_fd_array_release(struct bpf_map *map,
497 					struct file *map_file)
498 {
499 	struct bpf_array *array = container_of(map, struct bpf_array, map);
500 	struct bpf_event_entry *ee;
501 	int i;
502 
503 	rcu_read_lock();
504 	for (i = 0; i < array->map.max_entries; i++) {
505 		ee = READ_ONCE(array->ptrs[i]);
506 		if (ee && ee->map_file == map_file)
507 			fd_array_map_delete_elem(map, &i);
508 	}
509 	rcu_read_unlock();
510 }
511 
512 const struct bpf_map_ops perf_event_array_map_ops = {
513 	.map_alloc = fd_array_map_alloc,
514 	.map_free = fd_array_map_free,
515 	.map_get_next_key = array_map_get_next_key,
516 	.map_lookup_elem = fd_array_map_lookup_elem,
517 	.map_delete_elem = fd_array_map_delete_elem,
518 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
519 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
520 	.map_release = perf_event_fd_array_release,
521 };
522 
523 #ifdef CONFIG_CGROUPS
524 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
525 				     struct file *map_file /* not used */,
526 				     int fd)
527 {
528 	return cgroup_get_from_fd(fd);
529 }
530 
531 static void cgroup_fd_array_put_ptr(void *ptr)
532 {
533 	/* cgroup_put free cgrp after a rcu grace period */
534 	cgroup_put(ptr);
535 }
536 
537 static void cgroup_fd_array_free(struct bpf_map *map)
538 {
539 	bpf_fd_array_map_clear(map);
540 	fd_array_map_free(map);
541 }
542 
543 const struct bpf_map_ops cgroup_array_map_ops = {
544 	.map_alloc = fd_array_map_alloc,
545 	.map_free = cgroup_fd_array_free,
546 	.map_get_next_key = array_map_get_next_key,
547 	.map_lookup_elem = fd_array_map_lookup_elem,
548 	.map_delete_elem = fd_array_map_delete_elem,
549 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
550 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
551 };
552 #endif
553 
554 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
555 {
556 	struct bpf_map *map, *inner_map_meta;
557 
558 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
559 	if (IS_ERR(inner_map_meta))
560 		return inner_map_meta;
561 
562 	map = fd_array_map_alloc(attr);
563 	if (IS_ERR(map)) {
564 		bpf_map_meta_free(inner_map_meta);
565 		return map;
566 	}
567 
568 	map->inner_map_meta = inner_map_meta;
569 
570 	return map;
571 }
572 
573 static void array_of_map_free(struct bpf_map *map)
574 {
575 	/* map->inner_map_meta is only accessed by syscall which
576 	 * is protected by fdget/fdput.
577 	 */
578 	bpf_map_meta_free(map->inner_map_meta);
579 	bpf_fd_array_map_clear(map);
580 	fd_array_map_free(map);
581 }
582 
583 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
584 {
585 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
586 
587 	if (!inner_map)
588 		return NULL;
589 
590 	return READ_ONCE(*inner_map);
591 }
592 
593 const struct bpf_map_ops array_of_maps_map_ops = {
594 	.map_alloc = array_of_map_alloc,
595 	.map_free = array_of_map_free,
596 	.map_get_next_key = array_map_get_next_key,
597 	.map_lookup_elem = array_of_map_lookup_elem,
598 	.map_delete_elem = fd_array_map_delete_elem,
599 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
600 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
601 };
602