xref: /openbmc/linux/kernel/bpf/arraymap.c (revision c9933d49)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016,2017 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 #include <linux/btf_ids.h>
15 
16 #include "map_in_map.h"
17 
18 #define ARRAY_CREATE_FLAG_MASK \
19 	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
20 	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
21 
22 static void bpf_array_free_percpu(struct bpf_array *array)
23 {
24 	int i;
25 
26 	for (i = 0; i < array->map.max_entries; i++) {
27 		free_percpu(array->pptrs[i]);
28 		cond_resched();
29 	}
30 }
31 
32 static int bpf_array_alloc_percpu(struct bpf_array *array)
33 {
34 	void __percpu *ptr;
35 	int i;
36 
37 	for (i = 0; i < array->map.max_entries; i++) {
38 		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39 					   GFP_USER | __GFP_NOWARN);
40 		if (!ptr) {
41 			bpf_array_free_percpu(array);
42 			return -ENOMEM;
43 		}
44 		array->pptrs[i] = ptr;
45 		cond_resched();
46 	}
47 
48 	return 0;
49 }
50 
51 /* Called from syscall */
52 int array_map_alloc_check(union bpf_attr *attr)
53 {
54 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 	int numa_node = bpf_map_attr_numa_node(attr);
56 
57 	/* check sanity of attributes */
58 	if (attr->max_entries == 0 || attr->key_size != 4 ||
59 	    attr->value_size == 0 ||
60 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61 	    !bpf_map_flags_access_ok(attr->map_flags) ||
62 	    (percpu && numa_node != NUMA_NO_NODE))
63 		return -EINVAL;
64 
65 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
66 	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
67 		return -EINVAL;
68 
69 	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70 	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
71 		return -EINVAL;
72 
73 	if (attr->value_size > KMALLOC_MAX_SIZE)
74 		/* if value_size is bigger, the user space won't be able to
75 		 * access the elements.
76 		 */
77 		return -E2BIG;
78 
79 	return 0;
80 }
81 
82 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
83 {
84 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
85 	int numa_node = bpf_map_attr_numa_node(attr);
86 	u32 elem_size, index_mask, max_entries;
87 	bool bypass_spec_v1 = bpf_bypass_spec_v1();
88 	u64 array_size, mask64;
89 	struct bpf_array *array;
90 
91 	elem_size = round_up(attr->value_size, 8);
92 
93 	max_entries = attr->max_entries;
94 
95 	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
96 	 * upper most bit set in u32 space is undefined behavior due to
97 	 * resulting 1U << 32, so do it manually here in u64 space.
98 	 */
99 	mask64 = fls_long(max_entries - 1);
100 	mask64 = 1ULL << mask64;
101 	mask64 -= 1;
102 
103 	index_mask = mask64;
104 	if (!bypass_spec_v1) {
105 		/* round up array size to nearest power of 2,
106 		 * since cpu will speculate within index_mask limits
107 		 */
108 		max_entries = index_mask + 1;
109 		/* Check for overflows. */
110 		if (max_entries < attr->max_entries)
111 			return ERR_PTR(-E2BIG);
112 	}
113 
114 	array_size = sizeof(*array);
115 	if (percpu) {
116 		array_size += (u64) max_entries * sizeof(void *);
117 	} else {
118 		/* rely on vmalloc() to return page-aligned memory and
119 		 * ensure array->value is exactly page-aligned
120 		 */
121 		if (attr->map_flags & BPF_F_MMAPABLE) {
122 			array_size = PAGE_ALIGN(array_size);
123 			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
124 		} else {
125 			array_size += (u64) max_entries * elem_size;
126 		}
127 	}
128 
129 	/* allocate all map elements and zero-initialize them */
130 	if (attr->map_flags & BPF_F_MMAPABLE) {
131 		void *data;
132 
133 		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
134 		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
135 		if (!data)
136 			return ERR_PTR(-ENOMEM);
137 		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
138 			- offsetof(struct bpf_array, value);
139 	} else {
140 		array = bpf_map_area_alloc(array_size, numa_node);
141 	}
142 	if (!array)
143 		return ERR_PTR(-ENOMEM);
144 	array->index_mask = index_mask;
145 	array->map.bypass_spec_v1 = bypass_spec_v1;
146 
147 	/* copy mandatory map attributes */
148 	bpf_map_init_from_attr(&array->map, attr);
149 	array->elem_size = elem_size;
150 
151 	if (percpu && bpf_array_alloc_percpu(array)) {
152 		bpf_map_area_free(array);
153 		return ERR_PTR(-ENOMEM);
154 	}
155 
156 	return &array->map;
157 }
158 
159 /* Called from syscall or from eBPF program */
160 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
161 {
162 	struct bpf_array *array = container_of(map, struct bpf_array, map);
163 	u32 index = *(u32 *)key;
164 
165 	if (unlikely(index >= array->map.max_entries))
166 		return NULL;
167 
168 	return array->value + array->elem_size * (index & array->index_mask);
169 }
170 
171 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
172 				       u32 off)
173 {
174 	struct bpf_array *array = container_of(map, struct bpf_array, map);
175 
176 	if (map->max_entries != 1)
177 		return -ENOTSUPP;
178 	if (off >= map->value_size)
179 		return -EINVAL;
180 
181 	*imm = (unsigned long)array->value;
182 	return 0;
183 }
184 
185 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
186 				       u32 *off)
187 {
188 	struct bpf_array *array = container_of(map, struct bpf_array, map);
189 	u64 base = (unsigned long)array->value;
190 	u64 range = array->elem_size;
191 
192 	if (map->max_entries != 1)
193 		return -ENOTSUPP;
194 	if (imm < base || imm >= base + range)
195 		return -ENOENT;
196 
197 	*off = imm - base;
198 	return 0;
199 }
200 
201 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
202 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
203 {
204 	struct bpf_array *array = container_of(map, struct bpf_array, map);
205 	struct bpf_insn *insn = insn_buf;
206 	u32 elem_size = round_up(map->value_size, 8);
207 	const int ret = BPF_REG_0;
208 	const int map_ptr = BPF_REG_1;
209 	const int index = BPF_REG_2;
210 
211 	if (map->map_flags & BPF_F_INNER_MAP)
212 		return -EOPNOTSUPP;
213 
214 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
215 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
216 	if (!map->bypass_spec_v1) {
217 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
218 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
219 	} else {
220 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
221 	}
222 
223 	if (is_power_of_2(elem_size)) {
224 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
225 	} else {
226 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
227 	}
228 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
229 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
230 	*insn++ = BPF_MOV64_IMM(ret, 0);
231 	return insn - insn_buf;
232 }
233 
234 /* Called from eBPF program */
235 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
236 {
237 	struct bpf_array *array = container_of(map, struct bpf_array, map);
238 	u32 index = *(u32 *)key;
239 
240 	if (unlikely(index >= array->map.max_entries))
241 		return NULL;
242 
243 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
244 }
245 
246 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
247 {
248 	struct bpf_array *array = container_of(map, struct bpf_array, map);
249 	u32 index = *(u32 *)key;
250 	void __percpu *pptr;
251 	int cpu, off = 0;
252 	u32 size;
253 
254 	if (unlikely(index >= array->map.max_entries))
255 		return -ENOENT;
256 
257 	/* per_cpu areas are zero-filled and bpf programs can only
258 	 * access 'value_size' of them, so copying rounded areas
259 	 * will not leak any kernel data
260 	 */
261 	size = round_up(map->value_size, 8);
262 	rcu_read_lock();
263 	pptr = array->pptrs[index & array->index_mask];
264 	for_each_possible_cpu(cpu) {
265 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
266 		off += size;
267 	}
268 	rcu_read_unlock();
269 	return 0;
270 }
271 
272 /* Called from syscall */
273 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
274 {
275 	struct bpf_array *array = container_of(map, struct bpf_array, map);
276 	u32 index = key ? *(u32 *)key : U32_MAX;
277 	u32 *next = (u32 *)next_key;
278 
279 	if (index >= array->map.max_entries) {
280 		*next = 0;
281 		return 0;
282 	}
283 
284 	if (index == array->map.max_entries - 1)
285 		return -ENOENT;
286 
287 	*next = index + 1;
288 	return 0;
289 }
290 
291 static void check_and_free_fields(struct bpf_array *arr, void *val)
292 {
293 	if (map_value_has_timer(&arr->map))
294 		bpf_timer_cancel_and_free(val + arr->map.timer_off);
295 	if (map_value_has_kptrs(&arr->map))
296 		bpf_map_free_kptrs(&arr->map, val);
297 }
298 
299 /* Called from syscall or from eBPF program */
300 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
301 				 u64 map_flags)
302 {
303 	struct bpf_array *array = container_of(map, struct bpf_array, map);
304 	u32 index = *(u32 *)key;
305 	char *val;
306 
307 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
308 		/* unknown flags */
309 		return -EINVAL;
310 
311 	if (unlikely(index >= array->map.max_entries))
312 		/* all elements were pre-allocated, cannot insert a new one */
313 		return -E2BIG;
314 
315 	if (unlikely(map_flags & BPF_NOEXIST))
316 		/* all elements already exist */
317 		return -EEXIST;
318 
319 	if (unlikely((map_flags & BPF_F_LOCK) &&
320 		     !map_value_has_spin_lock(map)))
321 		return -EINVAL;
322 
323 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
324 		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
325 		       value, map->value_size);
326 	} else {
327 		val = array->value +
328 			array->elem_size * (index & array->index_mask);
329 		if (map_flags & BPF_F_LOCK)
330 			copy_map_value_locked(map, val, value, false);
331 		else
332 			copy_map_value(map, val, value);
333 		check_and_free_fields(array, val);
334 	}
335 	return 0;
336 }
337 
338 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
339 			    u64 map_flags)
340 {
341 	struct bpf_array *array = container_of(map, struct bpf_array, map);
342 	u32 index = *(u32 *)key;
343 	void __percpu *pptr;
344 	int cpu, off = 0;
345 	u32 size;
346 
347 	if (unlikely(map_flags > BPF_EXIST))
348 		/* unknown flags */
349 		return -EINVAL;
350 
351 	if (unlikely(index >= array->map.max_entries))
352 		/* all elements were pre-allocated, cannot insert a new one */
353 		return -E2BIG;
354 
355 	if (unlikely(map_flags == BPF_NOEXIST))
356 		/* all elements already exist */
357 		return -EEXIST;
358 
359 	/* the user space will provide round_up(value_size, 8) bytes that
360 	 * will be copied into per-cpu area. bpf programs can only access
361 	 * value_size of it. During lookup the same extra bytes will be
362 	 * returned or zeros which were zero-filled by percpu_alloc,
363 	 * so no kernel data leaks possible
364 	 */
365 	size = round_up(map->value_size, 8);
366 	rcu_read_lock();
367 	pptr = array->pptrs[index & array->index_mask];
368 	for_each_possible_cpu(cpu) {
369 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
370 		off += size;
371 	}
372 	rcu_read_unlock();
373 	return 0;
374 }
375 
376 /* Called from syscall or from eBPF program */
377 static int array_map_delete_elem(struct bpf_map *map, void *key)
378 {
379 	return -EINVAL;
380 }
381 
382 static void *array_map_vmalloc_addr(struct bpf_array *array)
383 {
384 	return (void *)round_down((unsigned long)array, PAGE_SIZE);
385 }
386 
387 static void array_map_free_timers(struct bpf_map *map)
388 {
389 	struct bpf_array *array = container_of(map, struct bpf_array, map);
390 	int i;
391 
392 	/* We don't reset or free kptr on uref dropping to zero. */
393 	if (!map_value_has_timer(map))
394 		return;
395 
396 	for (i = 0; i < array->map.max_entries; i++)
397 		bpf_timer_cancel_and_free(array->value + array->elem_size * i +
398 					  map->timer_off);
399 }
400 
401 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
402 static void array_map_free(struct bpf_map *map)
403 {
404 	struct bpf_array *array = container_of(map, struct bpf_array, map);
405 	int i;
406 
407 	if (map_value_has_kptrs(map)) {
408 		for (i = 0; i < array->map.max_entries; i++)
409 			bpf_map_free_kptrs(map, array->value + array->elem_size * i);
410 		bpf_map_free_kptr_off_tab(map);
411 	}
412 
413 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
414 		bpf_array_free_percpu(array);
415 
416 	if (array->map.map_flags & BPF_F_MMAPABLE)
417 		bpf_map_area_free(array_map_vmalloc_addr(array));
418 	else
419 		bpf_map_area_free(array);
420 }
421 
422 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
423 				    struct seq_file *m)
424 {
425 	void *value;
426 
427 	rcu_read_lock();
428 
429 	value = array_map_lookup_elem(map, key);
430 	if (!value) {
431 		rcu_read_unlock();
432 		return;
433 	}
434 
435 	if (map->btf_key_type_id)
436 		seq_printf(m, "%u: ", *(u32 *)key);
437 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
438 	seq_puts(m, "\n");
439 
440 	rcu_read_unlock();
441 }
442 
443 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
444 					   struct seq_file *m)
445 {
446 	struct bpf_array *array = container_of(map, struct bpf_array, map);
447 	u32 index = *(u32 *)key;
448 	void __percpu *pptr;
449 	int cpu;
450 
451 	rcu_read_lock();
452 
453 	seq_printf(m, "%u: {\n", *(u32 *)key);
454 	pptr = array->pptrs[index & array->index_mask];
455 	for_each_possible_cpu(cpu) {
456 		seq_printf(m, "\tcpu%d: ", cpu);
457 		btf_type_seq_show(map->btf, map->btf_value_type_id,
458 				  per_cpu_ptr(pptr, cpu), m);
459 		seq_puts(m, "\n");
460 	}
461 	seq_puts(m, "}\n");
462 
463 	rcu_read_unlock();
464 }
465 
466 static int array_map_check_btf(const struct bpf_map *map,
467 			       const struct btf *btf,
468 			       const struct btf_type *key_type,
469 			       const struct btf_type *value_type)
470 {
471 	u32 int_data;
472 
473 	/* One exception for keyless BTF: .bss/.data/.rodata map */
474 	if (btf_type_is_void(key_type)) {
475 		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
476 		    map->max_entries != 1)
477 			return -EINVAL;
478 
479 		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
480 			return -EINVAL;
481 
482 		return 0;
483 	}
484 
485 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
486 		return -EINVAL;
487 
488 	int_data = *(u32 *)(key_type + 1);
489 	/* bpf array can only take a u32 key. This check makes sure
490 	 * that the btf matches the attr used during map_create.
491 	 */
492 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
493 		return -EINVAL;
494 
495 	return 0;
496 }
497 
498 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
499 {
500 	struct bpf_array *array = container_of(map, struct bpf_array, map);
501 	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
502 
503 	if (!(map->map_flags & BPF_F_MMAPABLE))
504 		return -EINVAL;
505 
506 	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
507 	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
508 		return -EINVAL;
509 
510 	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
511 				   vma->vm_pgoff + pgoff);
512 }
513 
514 static bool array_map_meta_equal(const struct bpf_map *meta0,
515 				 const struct bpf_map *meta1)
516 {
517 	if (!bpf_map_meta_equal(meta0, meta1))
518 		return false;
519 	return meta0->map_flags & BPF_F_INNER_MAP ? true :
520 	       meta0->max_entries == meta1->max_entries;
521 }
522 
523 struct bpf_iter_seq_array_map_info {
524 	struct bpf_map *map;
525 	void *percpu_value_buf;
526 	u32 index;
527 };
528 
529 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
530 {
531 	struct bpf_iter_seq_array_map_info *info = seq->private;
532 	struct bpf_map *map = info->map;
533 	struct bpf_array *array;
534 	u32 index;
535 
536 	if (info->index >= map->max_entries)
537 		return NULL;
538 
539 	if (*pos == 0)
540 		++*pos;
541 	array = container_of(map, struct bpf_array, map);
542 	index = info->index & array->index_mask;
543 	if (info->percpu_value_buf)
544 	       return array->pptrs[index];
545 	return array->value + array->elem_size * index;
546 }
547 
548 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
549 {
550 	struct bpf_iter_seq_array_map_info *info = seq->private;
551 	struct bpf_map *map = info->map;
552 	struct bpf_array *array;
553 	u32 index;
554 
555 	++*pos;
556 	++info->index;
557 	if (info->index >= map->max_entries)
558 		return NULL;
559 
560 	array = container_of(map, struct bpf_array, map);
561 	index = info->index & array->index_mask;
562 	if (info->percpu_value_buf)
563 	       return array->pptrs[index];
564 	return array->value + array->elem_size * index;
565 }
566 
567 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
568 {
569 	struct bpf_iter_seq_array_map_info *info = seq->private;
570 	struct bpf_iter__bpf_map_elem ctx = {};
571 	struct bpf_map *map = info->map;
572 	struct bpf_iter_meta meta;
573 	struct bpf_prog *prog;
574 	int off = 0, cpu = 0;
575 	void __percpu **pptr;
576 	u32 size;
577 
578 	meta.seq = seq;
579 	prog = bpf_iter_get_info(&meta, v == NULL);
580 	if (!prog)
581 		return 0;
582 
583 	ctx.meta = &meta;
584 	ctx.map = info->map;
585 	if (v) {
586 		ctx.key = &info->index;
587 
588 		if (!info->percpu_value_buf) {
589 			ctx.value = v;
590 		} else {
591 			pptr = v;
592 			size = round_up(map->value_size, 8);
593 			for_each_possible_cpu(cpu) {
594 				bpf_long_memcpy(info->percpu_value_buf + off,
595 						per_cpu_ptr(pptr, cpu),
596 						size);
597 				off += size;
598 			}
599 			ctx.value = info->percpu_value_buf;
600 		}
601 	}
602 
603 	return bpf_iter_run_prog(prog, &ctx);
604 }
605 
606 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
607 {
608 	return __bpf_array_map_seq_show(seq, v);
609 }
610 
611 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
612 {
613 	if (!v)
614 		(void)__bpf_array_map_seq_show(seq, NULL);
615 }
616 
617 static int bpf_iter_init_array_map(void *priv_data,
618 				   struct bpf_iter_aux_info *aux)
619 {
620 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
621 	struct bpf_map *map = aux->map;
622 	void *value_buf;
623 	u32 buf_size;
624 
625 	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
626 		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
627 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
628 		if (!value_buf)
629 			return -ENOMEM;
630 
631 		seq_info->percpu_value_buf = value_buf;
632 	}
633 
634 	seq_info->map = map;
635 	return 0;
636 }
637 
638 static void bpf_iter_fini_array_map(void *priv_data)
639 {
640 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
641 
642 	kfree(seq_info->percpu_value_buf);
643 }
644 
645 static const struct seq_operations bpf_array_map_seq_ops = {
646 	.start	= bpf_array_map_seq_start,
647 	.next	= bpf_array_map_seq_next,
648 	.stop	= bpf_array_map_seq_stop,
649 	.show	= bpf_array_map_seq_show,
650 };
651 
652 static const struct bpf_iter_seq_info iter_seq_info = {
653 	.seq_ops		= &bpf_array_map_seq_ops,
654 	.init_seq_private	= bpf_iter_init_array_map,
655 	.fini_seq_private	= bpf_iter_fini_array_map,
656 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
657 };
658 
659 static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
660 				   void *callback_ctx, u64 flags)
661 {
662 	u32 i, key, num_elems = 0;
663 	struct bpf_array *array;
664 	bool is_percpu;
665 	u64 ret = 0;
666 	void *val;
667 
668 	if (flags != 0)
669 		return -EINVAL;
670 
671 	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
672 	array = container_of(map, struct bpf_array, map);
673 	if (is_percpu)
674 		migrate_disable();
675 	for (i = 0; i < map->max_entries; i++) {
676 		if (is_percpu)
677 			val = this_cpu_ptr(array->pptrs[i]);
678 		else
679 			val = array->value + array->elem_size * i;
680 		num_elems++;
681 		key = i;
682 		ret = callback_fn((u64)(long)map, (u64)(long)&key,
683 				  (u64)(long)val, (u64)(long)callback_ctx, 0);
684 		/* return value: 0 - continue, 1 - stop and return */
685 		if (ret)
686 			break;
687 	}
688 
689 	if (is_percpu)
690 		migrate_enable();
691 	return num_elems;
692 }
693 
694 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
695 const struct bpf_map_ops array_map_ops = {
696 	.map_meta_equal = array_map_meta_equal,
697 	.map_alloc_check = array_map_alloc_check,
698 	.map_alloc = array_map_alloc,
699 	.map_free = array_map_free,
700 	.map_get_next_key = array_map_get_next_key,
701 	.map_release_uref = array_map_free_timers,
702 	.map_lookup_elem = array_map_lookup_elem,
703 	.map_update_elem = array_map_update_elem,
704 	.map_delete_elem = array_map_delete_elem,
705 	.map_gen_lookup = array_map_gen_lookup,
706 	.map_direct_value_addr = array_map_direct_value_addr,
707 	.map_direct_value_meta = array_map_direct_value_meta,
708 	.map_mmap = array_map_mmap,
709 	.map_seq_show_elem = array_map_seq_show_elem,
710 	.map_check_btf = array_map_check_btf,
711 	.map_lookup_batch = generic_map_lookup_batch,
712 	.map_update_batch = generic_map_update_batch,
713 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
714 	.map_for_each_callback = bpf_for_each_array_elem,
715 	.map_btf_id = &array_map_btf_ids[0],
716 	.iter_seq_info = &iter_seq_info,
717 };
718 
719 const struct bpf_map_ops percpu_array_map_ops = {
720 	.map_meta_equal = bpf_map_meta_equal,
721 	.map_alloc_check = array_map_alloc_check,
722 	.map_alloc = array_map_alloc,
723 	.map_free = array_map_free,
724 	.map_get_next_key = array_map_get_next_key,
725 	.map_lookup_elem = percpu_array_map_lookup_elem,
726 	.map_update_elem = array_map_update_elem,
727 	.map_delete_elem = array_map_delete_elem,
728 	.map_seq_show_elem = percpu_array_map_seq_show_elem,
729 	.map_check_btf = array_map_check_btf,
730 	.map_lookup_batch = generic_map_lookup_batch,
731 	.map_update_batch = generic_map_update_batch,
732 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
733 	.map_for_each_callback = bpf_for_each_array_elem,
734 	.map_btf_id = &array_map_btf_ids[0],
735 	.iter_seq_info = &iter_seq_info,
736 };
737 
738 static int fd_array_map_alloc_check(union bpf_attr *attr)
739 {
740 	/* only file descriptors can be stored in this type of map */
741 	if (attr->value_size != sizeof(u32))
742 		return -EINVAL;
743 	/* Program read-only/write-only not supported for special maps yet. */
744 	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
745 		return -EINVAL;
746 	return array_map_alloc_check(attr);
747 }
748 
749 static void fd_array_map_free(struct bpf_map *map)
750 {
751 	struct bpf_array *array = container_of(map, struct bpf_array, map);
752 	int i;
753 
754 	/* make sure it's empty */
755 	for (i = 0; i < array->map.max_entries; i++)
756 		BUG_ON(array->ptrs[i] != NULL);
757 
758 	bpf_map_area_free(array);
759 }
760 
761 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
762 {
763 	return ERR_PTR(-EOPNOTSUPP);
764 }
765 
766 /* only called from syscall */
767 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
768 {
769 	void **elem, *ptr;
770 	int ret =  0;
771 
772 	if (!map->ops->map_fd_sys_lookup_elem)
773 		return -ENOTSUPP;
774 
775 	rcu_read_lock();
776 	elem = array_map_lookup_elem(map, key);
777 	if (elem && (ptr = READ_ONCE(*elem)))
778 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
779 	else
780 		ret = -ENOENT;
781 	rcu_read_unlock();
782 
783 	return ret;
784 }
785 
786 /* only called from syscall */
787 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
788 				 void *key, void *value, u64 map_flags)
789 {
790 	struct bpf_array *array = container_of(map, struct bpf_array, map);
791 	void *new_ptr, *old_ptr;
792 	u32 index = *(u32 *)key, ufd;
793 
794 	if (map_flags != BPF_ANY)
795 		return -EINVAL;
796 
797 	if (index >= array->map.max_entries)
798 		return -E2BIG;
799 
800 	ufd = *(u32 *)value;
801 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
802 	if (IS_ERR(new_ptr))
803 		return PTR_ERR(new_ptr);
804 
805 	if (map->ops->map_poke_run) {
806 		mutex_lock(&array->aux->poke_mutex);
807 		old_ptr = xchg(array->ptrs + index, new_ptr);
808 		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
809 		mutex_unlock(&array->aux->poke_mutex);
810 	} else {
811 		old_ptr = xchg(array->ptrs + index, new_ptr);
812 	}
813 
814 	if (old_ptr)
815 		map->ops->map_fd_put_ptr(old_ptr);
816 	return 0;
817 }
818 
819 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
820 {
821 	struct bpf_array *array = container_of(map, struct bpf_array, map);
822 	void *old_ptr;
823 	u32 index = *(u32 *)key;
824 
825 	if (index >= array->map.max_entries)
826 		return -E2BIG;
827 
828 	if (map->ops->map_poke_run) {
829 		mutex_lock(&array->aux->poke_mutex);
830 		old_ptr = xchg(array->ptrs + index, NULL);
831 		map->ops->map_poke_run(map, index, old_ptr, NULL);
832 		mutex_unlock(&array->aux->poke_mutex);
833 	} else {
834 		old_ptr = xchg(array->ptrs + index, NULL);
835 	}
836 
837 	if (old_ptr) {
838 		map->ops->map_fd_put_ptr(old_ptr);
839 		return 0;
840 	} else {
841 		return -ENOENT;
842 	}
843 }
844 
845 static void *prog_fd_array_get_ptr(struct bpf_map *map,
846 				   struct file *map_file, int fd)
847 {
848 	struct bpf_prog *prog = bpf_prog_get(fd);
849 
850 	if (IS_ERR(prog))
851 		return prog;
852 
853 	if (!bpf_prog_map_compatible(map, prog)) {
854 		bpf_prog_put(prog);
855 		return ERR_PTR(-EINVAL);
856 	}
857 
858 	return prog;
859 }
860 
861 static void prog_fd_array_put_ptr(void *ptr)
862 {
863 	bpf_prog_put(ptr);
864 }
865 
866 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
867 {
868 	return ((struct bpf_prog *)ptr)->aux->id;
869 }
870 
871 /* decrement refcnt of all bpf_progs that are stored in this map */
872 static void bpf_fd_array_map_clear(struct bpf_map *map)
873 {
874 	struct bpf_array *array = container_of(map, struct bpf_array, map);
875 	int i;
876 
877 	for (i = 0; i < array->map.max_entries; i++)
878 		fd_array_map_delete_elem(map, &i);
879 }
880 
881 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
882 					 struct seq_file *m)
883 {
884 	void **elem, *ptr;
885 	u32 prog_id;
886 
887 	rcu_read_lock();
888 
889 	elem = array_map_lookup_elem(map, key);
890 	if (elem) {
891 		ptr = READ_ONCE(*elem);
892 		if (ptr) {
893 			seq_printf(m, "%u: ", *(u32 *)key);
894 			prog_id = prog_fd_array_sys_lookup_elem(ptr);
895 			btf_type_seq_show(map->btf, map->btf_value_type_id,
896 					  &prog_id, m);
897 			seq_puts(m, "\n");
898 		}
899 	}
900 
901 	rcu_read_unlock();
902 }
903 
904 struct prog_poke_elem {
905 	struct list_head list;
906 	struct bpf_prog_aux *aux;
907 };
908 
909 static int prog_array_map_poke_track(struct bpf_map *map,
910 				     struct bpf_prog_aux *prog_aux)
911 {
912 	struct prog_poke_elem *elem;
913 	struct bpf_array_aux *aux;
914 	int ret = 0;
915 
916 	aux = container_of(map, struct bpf_array, map)->aux;
917 	mutex_lock(&aux->poke_mutex);
918 	list_for_each_entry(elem, &aux->poke_progs, list) {
919 		if (elem->aux == prog_aux)
920 			goto out;
921 	}
922 
923 	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
924 	if (!elem) {
925 		ret = -ENOMEM;
926 		goto out;
927 	}
928 
929 	INIT_LIST_HEAD(&elem->list);
930 	/* We must track the program's aux info at this point in time
931 	 * since the program pointer itself may not be stable yet, see
932 	 * also comment in prog_array_map_poke_run().
933 	 */
934 	elem->aux = prog_aux;
935 
936 	list_add_tail(&elem->list, &aux->poke_progs);
937 out:
938 	mutex_unlock(&aux->poke_mutex);
939 	return ret;
940 }
941 
942 static void prog_array_map_poke_untrack(struct bpf_map *map,
943 					struct bpf_prog_aux *prog_aux)
944 {
945 	struct prog_poke_elem *elem, *tmp;
946 	struct bpf_array_aux *aux;
947 
948 	aux = container_of(map, struct bpf_array, map)->aux;
949 	mutex_lock(&aux->poke_mutex);
950 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
951 		if (elem->aux == prog_aux) {
952 			list_del_init(&elem->list);
953 			kfree(elem);
954 			break;
955 		}
956 	}
957 	mutex_unlock(&aux->poke_mutex);
958 }
959 
960 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
961 				    struct bpf_prog *old,
962 				    struct bpf_prog *new)
963 {
964 	u8 *old_addr, *new_addr, *old_bypass_addr;
965 	struct prog_poke_elem *elem;
966 	struct bpf_array_aux *aux;
967 
968 	aux = container_of(map, struct bpf_array, map)->aux;
969 	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
970 
971 	list_for_each_entry(elem, &aux->poke_progs, list) {
972 		struct bpf_jit_poke_descriptor *poke;
973 		int i, ret;
974 
975 		for (i = 0; i < elem->aux->size_poke_tab; i++) {
976 			poke = &elem->aux->poke_tab[i];
977 
978 			/* Few things to be aware of:
979 			 *
980 			 * 1) We can only ever access aux in this context, but
981 			 *    not aux->prog since it might not be stable yet and
982 			 *    there could be danger of use after free otherwise.
983 			 * 2) Initially when we start tracking aux, the program
984 			 *    is not JITed yet and also does not have a kallsyms
985 			 *    entry. We skip these as poke->tailcall_target_stable
986 			 *    is not active yet. The JIT will do the final fixup
987 			 *    before setting it stable. The various
988 			 *    poke->tailcall_target_stable are successively
989 			 *    activated, so tail call updates can arrive from here
990 			 *    while JIT is still finishing its final fixup for
991 			 *    non-activated poke entries.
992 			 * 3) On program teardown, the program's kallsym entry gets
993 			 *    removed out of RCU callback, but we can only untrack
994 			 *    from sleepable context, therefore bpf_arch_text_poke()
995 			 *    might not see that this is in BPF text section and
996 			 *    bails out with -EINVAL. As these are unreachable since
997 			 *    RCU grace period already passed, we simply skip them.
998 			 * 4) Also programs reaching refcount of zero while patching
999 			 *    is in progress is okay since we're protected under
1000 			 *    poke_mutex and untrack the programs before the JIT
1001 			 *    buffer is freed. When we're still in the middle of
1002 			 *    patching and suddenly kallsyms entry of the program
1003 			 *    gets evicted, we just skip the rest which is fine due
1004 			 *    to point 3).
1005 			 * 5) Any other error happening below from bpf_arch_text_poke()
1006 			 *    is a unexpected bug.
1007 			 */
1008 			if (!READ_ONCE(poke->tailcall_target_stable))
1009 				continue;
1010 			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1011 				continue;
1012 			if (poke->tail_call.map != map ||
1013 			    poke->tail_call.key != key)
1014 				continue;
1015 
1016 			old_bypass_addr = old ? NULL : poke->bypass_addr;
1017 			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1018 			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1019 
1020 			if (new) {
1021 				ret = bpf_arch_text_poke(poke->tailcall_target,
1022 							 BPF_MOD_JUMP,
1023 							 old_addr, new_addr);
1024 				BUG_ON(ret < 0 && ret != -EINVAL);
1025 				if (!old) {
1026 					ret = bpf_arch_text_poke(poke->tailcall_bypass,
1027 								 BPF_MOD_JUMP,
1028 								 poke->bypass_addr,
1029 								 NULL);
1030 					BUG_ON(ret < 0 && ret != -EINVAL);
1031 				}
1032 			} else {
1033 				ret = bpf_arch_text_poke(poke->tailcall_bypass,
1034 							 BPF_MOD_JUMP,
1035 							 old_bypass_addr,
1036 							 poke->bypass_addr);
1037 				BUG_ON(ret < 0 && ret != -EINVAL);
1038 				/* let other CPUs finish the execution of program
1039 				 * so that it will not possible to expose them
1040 				 * to invalid nop, stack unwind, nop state
1041 				 */
1042 				if (!ret)
1043 					synchronize_rcu();
1044 				ret = bpf_arch_text_poke(poke->tailcall_target,
1045 							 BPF_MOD_JUMP,
1046 							 old_addr, NULL);
1047 				BUG_ON(ret < 0 && ret != -EINVAL);
1048 			}
1049 		}
1050 	}
1051 }
1052 
1053 static void prog_array_map_clear_deferred(struct work_struct *work)
1054 {
1055 	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1056 					   work)->map;
1057 	bpf_fd_array_map_clear(map);
1058 	bpf_map_put(map);
1059 }
1060 
1061 static void prog_array_map_clear(struct bpf_map *map)
1062 {
1063 	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1064 						 map)->aux;
1065 	bpf_map_inc(map);
1066 	schedule_work(&aux->work);
1067 }
1068 
1069 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1070 {
1071 	struct bpf_array_aux *aux;
1072 	struct bpf_map *map;
1073 
1074 	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1075 	if (!aux)
1076 		return ERR_PTR(-ENOMEM);
1077 
1078 	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1079 	INIT_LIST_HEAD(&aux->poke_progs);
1080 	mutex_init(&aux->poke_mutex);
1081 
1082 	map = array_map_alloc(attr);
1083 	if (IS_ERR(map)) {
1084 		kfree(aux);
1085 		return map;
1086 	}
1087 
1088 	container_of(map, struct bpf_array, map)->aux = aux;
1089 	aux->map = map;
1090 
1091 	return map;
1092 }
1093 
1094 static void prog_array_map_free(struct bpf_map *map)
1095 {
1096 	struct prog_poke_elem *elem, *tmp;
1097 	struct bpf_array_aux *aux;
1098 
1099 	aux = container_of(map, struct bpf_array, map)->aux;
1100 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1101 		list_del_init(&elem->list);
1102 		kfree(elem);
1103 	}
1104 	kfree(aux);
1105 	fd_array_map_free(map);
1106 }
1107 
1108 /* prog_array->aux->{type,jited} is a runtime binding.
1109  * Doing static check alone in the verifier is not enough.
1110  * Thus, prog_array_map cannot be used as an inner_map
1111  * and map_meta_equal is not implemented.
1112  */
1113 const struct bpf_map_ops prog_array_map_ops = {
1114 	.map_alloc_check = fd_array_map_alloc_check,
1115 	.map_alloc = prog_array_map_alloc,
1116 	.map_free = prog_array_map_free,
1117 	.map_poke_track = prog_array_map_poke_track,
1118 	.map_poke_untrack = prog_array_map_poke_untrack,
1119 	.map_poke_run = prog_array_map_poke_run,
1120 	.map_get_next_key = array_map_get_next_key,
1121 	.map_lookup_elem = fd_array_map_lookup_elem,
1122 	.map_delete_elem = fd_array_map_delete_elem,
1123 	.map_fd_get_ptr = prog_fd_array_get_ptr,
1124 	.map_fd_put_ptr = prog_fd_array_put_ptr,
1125 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1126 	.map_release_uref = prog_array_map_clear,
1127 	.map_seq_show_elem = prog_array_map_seq_show_elem,
1128 	.map_btf_id = &array_map_btf_ids[0],
1129 };
1130 
1131 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1132 						   struct file *map_file)
1133 {
1134 	struct bpf_event_entry *ee;
1135 
1136 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1137 	if (ee) {
1138 		ee->event = perf_file->private_data;
1139 		ee->perf_file = perf_file;
1140 		ee->map_file = map_file;
1141 	}
1142 
1143 	return ee;
1144 }
1145 
1146 static void __bpf_event_entry_free(struct rcu_head *rcu)
1147 {
1148 	struct bpf_event_entry *ee;
1149 
1150 	ee = container_of(rcu, struct bpf_event_entry, rcu);
1151 	fput(ee->perf_file);
1152 	kfree(ee);
1153 }
1154 
1155 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1156 {
1157 	call_rcu(&ee->rcu, __bpf_event_entry_free);
1158 }
1159 
1160 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1161 					 struct file *map_file, int fd)
1162 {
1163 	struct bpf_event_entry *ee;
1164 	struct perf_event *event;
1165 	struct file *perf_file;
1166 	u64 value;
1167 
1168 	perf_file = perf_event_get(fd);
1169 	if (IS_ERR(perf_file))
1170 		return perf_file;
1171 
1172 	ee = ERR_PTR(-EOPNOTSUPP);
1173 	event = perf_file->private_data;
1174 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1175 		goto err_out;
1176 
1177 	ee = bpf_event_entry_gen(perf_file, map_file);
1178 	if (ee)
1179 		return ee;
1180 	ee = ERR_PTR(-ENOMEM);
1181 err_out:
1182 	fput(perf_file);
1183 	return ee;
1184 }
1185 
1186 static void perf_event_fd_array_put_ptr(void *ptr)
1187 {
1188 	bpf_event_entry_free_rcu(ptr);
1189 }
1190 
1191 static void perf_event_fd_array_release(struct bpf_map *map,
1192 					struct file *map_file)
1193 {
1194 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1195 	struct bpf_event_entry *ee;
1196 	int i;
1197 
1198 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1199 		return;
1200 
1201 	rcu_read_lock();
1202 	for (i = 0; i < array->map.max_entries; i++) {
1203 		ee = READ_ONCE(array->ptrs[i]);
1204 		if (ee && ee->map_file == map_file)
1205 			fd_array_map_delete_elem(map, &i);
1206 	}
1207 	rcu_read_unlock();
1208 }
1209 
1210 static void perf_event_fd_array_map_free(struct bpf_map *map)
1211 {
1212 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1213 		bpf_fd_array_map_clear(map);
1214 	fd_array_map_free(map);
1215 }
1216 
1217 const struct bpf_map_ops perf_event_array_map_ops = {
1218 	.map_meta_equal = bpf_map_meta_equal,
1219 	.map_alloc_check = fd_array_map_alloc_check,
1220 	.map_alloc = array_map_alloc,
1221 	.map_free = perf_event_fd_array_map_free,
1222 	.map_get_next_key = array_map_get_next_key,
1223 	.map_lookup_elem = fd_array_map_lookup_elem,
1224 	.map_delete_elem = fd_array_map_delete_elem,
1225 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1226 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1227 	.map_release = perf_event_fd_array_release,
1228 	.map_check_btf = map_check_no_btf,
1229 	.map_btf_id = &array_map_btf_ids[0],
1230 };
1231 
1232 #ifdef CONFIG_CGROUPS
1233 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1234 				     struct file *map_file /* not used */,
1235 				     int fd)
1236 {
1237 	return cgroup_get_from_fd(fd);
1238 }
1239 
1240 static void cgroup_fd_array_put_ptr(void *ptr)
1241 {
1242 	/* cgroup_put free cgrp after a rcu grace period */
1243 	cgroup_put(ptr);
1244 }
1245 
1246 static void cgroup_fd_array_free(struct bpf_map *map)
1247 {
1248 	bpf_fd_array_map_clear(map);
1249 	fd_array_map_free(map);
1250 }
1251 
1252 const struct bpf_map_ops cgroup_array_map_ops = {
1253 	.map_meta_equal = bpf_map_meta_equal,
1254 	.map_alloc_check = fd_array_map_alloc_check,
1255 	.map_alloc = array_map_alloc,
1256 	.map_free = cgroup_fd_array_free,
1257 	.map_get_next_key = array_map_get_next_key,
1258 	.map_lookup_elem = fd_array_map_lookup_elem,
1259 	.map_delete_elem = fd_array_map_delete_elem,
1260 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1261 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1262 	.map_check_btf = map_check_no_btf,
1263 	.map_btf_id = &array_map_btf_ids[0],
1264 };
1265 #endif
1266 
1267 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1268 {
1269 	struct bpf_map *map, *inner_map_meta;
1270 
1271 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1272 	if (IS_ERR(inner_map_meta))
1273 		return inner_map_meta;
1274 
1275 	map = array_map_alloc(attr);
1276 	if (IS_ERR(map)) {
1277 		bpf_map_meta_free(inner_map_meta);
1278 		return map;
1279 	}
1280 
1281 	map->inner_map_meta = inner_map_meta;
1282 
1283 	return map;
1284 }
1285 
1286 static void array_of_map_free(struct bpf_map *map)
1287 {
1288 	/* map->inner_map_meta is only accessed by syscall which
1289 	 * is protected by fdget/fdput.
1290 	 */
1291 	bpf_map_meta_free(map->inner_map_meta);
1292 	bpf_fd_array_map_clear(map);
1293 	fd_array_map_free(map);
1294 }
1295 
1296 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1297 {
1298 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1299 
1300 	if (!inner_map)
1301 		return NULL;
1302 
1303 	return READ_ONCE(*inner_map);
1304 }
1305 
1306 static int array_of_map_gen_lookup(struct bpf_map *map,
1307 				   struct bpf_insn *insn_buf)
1308 {
1309 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1310 	u32 elem_size = round_up(map->value_size, 8);
1311 	struct bpf_insn *insn = insn_buf;
1312 	const int ret = BPF_REG_0;
1313 	const int map_ptr = BPF_REG_1;
1314 	const int index = BPF_REG_2;
1315 
1316 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1317 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1318 	if (!map->bypass_spec_v1) {
1319 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1320 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1321 	} else {
1322 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1323 	}
1324 	if (is_power_of_2(elem_size))
1325 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1326 	else
1327 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1328 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1329 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1330 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1331 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1332 	*insn++ = BPF_MOV64_IMM(ret, 0);
1333 
1334 	return insn - insn_buf;
1335 }
1336 
1337 const struct bpf_map_ops array_of_maps_map_ops = {
1338 	.map_alloc_check = fd_array_map_alloc_check,
1339 	.map_alloc = array_of_map_alloc,
1340 	.map_free = array_of_map_free,
1341 	.map_get_next_key = array_map_get_next_key,
1342 	.map_lookup_elem = array_of_map_lookup_elem,
1343 	.map_delete_elem = fd_array_map_delete_elem,
1344 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1345 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1346 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1347 	.map_gen_lookup = array_of_map_gen_lookup,
1348 	.map_check_btf = map_check_no_btf,
1349 	.map_btf_id = &array_map_btf_ids[0],
1350 };
1351