xref: /openbmc/linux/kernel/bpf/arraymap.c (revision 747f7a29)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016,2017 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 #include <linux/btf_ids.h>
15 
16 #include "map_in_map.h"
17 
18 #define ARRAY_CREATE_FLAG_MASK \
19 	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
20 	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
21 
22 static void bpf_array_free_percpu(struct bpf_array *array)
23 {
24 	int i;
25 
26 	for (i = 0; i < array->map.max_entries; i++) {
27 		free_percpu(array->pptrs[i]);
28 		cond_resched();
29 	}
30 }
31 
32 static int bpf_array_alloc_percpu(struct bpf_array *array)
33 {
34 	void __percpu *ptr;
35 	int i;
36 
37 	for (i = 0; i < array->map.max_entries; i++) {
38 		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39 					   GFP_USER | __GFP_NOWARN);
40 		if (!ptr) {
41 			bpf_array_free_percpu(array);
42 			return -ENOMEM;
43 		}
44 		array->pptrs[i] = ptr;
45 		cond_resched();
46 	}
47 
48 	return 0;
49 }
50 
51 /* Called from syscall */
52 int array_map_alloc_check(union bpf_attr *attr)
53 {
54 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 	int numa_node = bpf_map_attr_numa_node(attr);
56 
57 	/* check sanity of attributes */
58 	if (attr->max_entries == 0 || attr->key_size != 4 ||
59 	    attr->value_size == 0 ||
60 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61 	    !bpf_map_flags_access_ok(attr->map_flags) ||
62 	    (percpu && numa_node != NUMA_NO_NODE))
63 		return -EINVAL;
64 
65 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
66 	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
67 		return -EINVAL;
68 
69 	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70 	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
71 		return -EINVAL;
72 
73 	/* avoid overflow on round_up(map->value_size) */
74 	if (attr->value_size > INT_MAX)
75 		return -E2BIG;
76 
77 	return 0;
78 }
79 
80 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
81 {
82 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
83 	int numa_node = bpf_map_attr_numa_node(attr);
84 	u32 elem_size, index_mask, max_entries;
85 	bool bypass_spec_v1 = bpf_bypass_spec_v1();
86 	u64 array_size, mask64;
87 	struct bpf_array *array;
88 
89 	elem_size = round_up(attr->value_size, 8);
90 
91 	max_entries = attr->max_entries;
92 
93 	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
94 	 * upper most bit set in u32 space is undefined behavior due to
95 	 * resulting 1U << 32, so do it manually here in u64 space.
96 	 */
97 	mask64 = fls_long(max_entries - 1);
98 	mask64 = 1ULL << mask64;
99 	mask64 -= 1;
100 
101 	index_mask = mask64;
102 	if (!bypass_spec_v1) {
103 		/* round up array size to nearest power of 2,
104 		 * since cpu will speculate within index_mask limits
105 		 */
106 		max_entries = index_mask + 1;
107 		/* Check for overflows. */
108 		if (max_entries < attr->max_entries)
109 			return ERR_PTR(-E2BIG);
110 	}
111 
112 	array_size = sizeof(*array);
113 	if (percpu) {
114 		array_size += (u64) max_entries * sizeof(void *);
115 	} else {
116 		/* rely on vmalloc() to return page-aligned memory and
117 		 * ensure array->value is exactly page-aligned
118 		 */
119 		if (attr->map_flags & BPF_F_MMAPABLE) {
120 			array_size = PAGE_ALIGN(array_size);
121 			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
122 		} else {
123 			array_size += (u64) max_entries * elem_size;
124 		}
125 	}
126 
127 	/* allocate all map elements and zero-initialize them */
128 	if (attr->map_flags & BPF_F_MMAPABLE) {
129 		void *data;
130 
131 		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
132 		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
133 		if (!data)
134 			return ERR_PTR(-ENOMEM);
135 		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
136 			- offsetof(struct bpf_array, value);
137 	} else {
138 		array = bpf_map_area_alloc(array_size, numa_node);
139 	}
140 	if (!array)
141 		return ERR_PTR(-ENOMEM);
142 	array->index_mask = index_mask;
143 	array->map.bypass_spec_v1 = bypass_spec_v1;
144 
145 	/* copy mandatory map attributes */
146 	bpf_map_init_from_attr(&array->map, attr);
147 	array->elem_size = elem_size;
148 
149 	if (percpu && bpf_array_alloc_percpu(array)) {
150 		bpf_map_area_free(array);
151 		return ERR_PTR(-ENOMEM);
152 	}
153 
154 	return &array->map;
155 }
156 
157 static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
158 {
159 	return array->value + (u64)array->elem_size * index;
160 }
161 
162 /* Called from syscall or from eBPF program */
163 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
164 {
165 	struct bpf_array *array = container_of(map, struct bpf_array, map);
166 	u32 index = *(u32 *)key;
167 
168 	if (unlikely(index >= array->map.max_entries))
169 		return NULL;
170 
171 	return array->value + (u64)array->elem_size * (index & array->index_mask);
172 }
173 
174 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
175 				       u32 off)
176 {
177 	struct bpf_array *array = container_of(map, struct bpf_array, map);
178 
179 	if (map->max_entries != 1)
180 		return -ENOTSUPP;
181 	if (off >= map->value_size)
182 		return -EINVAL;
183 
184 	*imm = (unsigned long)array->value;
185 	return 0;
186 }
187 
188 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
189 				       u32 *off)
190 {
191 	struct bpf_array *array = container_of(map, struct bpf_array, map);
192 	u64 base = (unsigned long)array->value;
193 	u64 range = array->elem_size;
194 
195 	if (map->max_entries != 1)
196 		return -ENOTSUPP;
197 	if (imm < base || imm >= base + range)
198 		return -ENOENT;
199 
200 	*off = imm - base;
201 	return 0;
202 }
203 
204 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
205 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
206 {
207 	struct bpf_array *array = container_of(map, struct bpf_array, map);
208 	struct bpf_insn *insn = insn_buf;
209 	u32 elem_size = array->elem_size;
210 	const int ret = BPF_REG_0;
211 	const int map_ptr = BPF_REG_1;
212 	const int index = BPF_REG_2;
213 
214 	if (map->map_flags & BPF_F_INNER_MAP)
215 		return -EOPNOTSUPP;
216 
217 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
218 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
219 	if (!map->bypass_spec_v1) {
220 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
221 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
222 	} else {
223 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
224 	}
225 
226 	if (is_power_of_2(elem_size)) {
227 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
228 	} else {
229 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
230 	}
231 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
232 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
233 	*insn++ = BPF_MOV64_IMM(ret, 0);
234 	return insn - insn_buf;
235 }
236 
237 /* Called from eBPF program */
238 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
239 {
240 	struct bpf_array *array = container_of(map, struct bpf_array, map);
241 	u32 index = *(u32 *)key;
242 
243 	if (unlikely(index >= array->map.max_entries))
244 		return NULL;
245 
246 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
247 }
248 
249 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
250 {
251 	struct bpf_array *array = container_of(map, struct bpf_array, map);
252 	u32 index = *(u32 *)key;
253 
254 	if (cpu >= nr_cpu_ids)
255 		return NULL;
256 
257 	if (unlikely(index >= array->map.max_entries))
258 		return NULL;
259 
260 	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
261 }
262 
263 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
264 {
265 	struct bpf_array *array = container_of(map, struct bpf_array, map);
266 	u32 index = *(u32 *)key;
267 	void __percpu *pptr;
268 	int cpu, off = 0;
269 	u32 size;
270 
271 	if (unlikely(index >= array->map.max_entries))
272 		return -ENOENT;
273 
274 	/* per_cpu areas are zero-filled and bpf programs can only
275 	 * access 'value_size' of them, so copying rounded areas
276 	 * will not leak any kernel data
277 	 */
278 	size = array->elem_size;
279 	rcu_read_lock();
280 	pptr = array->pptrs[index & array->index_mask];
281 	for_each_possible_cpu(cpu) {
282 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
283 		off += size;
284 	}
285 	rcu_read_unlock();
286 	return 0;
287 }
288 
289 /* Called from syscall */
290 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
291 {
292 	struct bpf_array *array = container_of(map, struct bpf_array, map);
293 	u32 index = key ? *(u32 *)key : U32_MAX;
294 	u32 *next = (u32 *)next_key;
295 
296 	if (index >= array->map.max_entries) {
297 		*next = 0;
298 		return 0;
299 	}
300 
301 	if (index == array->map.max_entries - 1)
302 		return -ENOENT;
303 
304 	*next = index + 1;
305 	return 0;
306 }
307 
308 static void check_and_free_fields(struct bpf_array *arr, void *val)
309 {
310 	if (map_value_has_timer(&arr->map))
311 		bpf_timer_cancel_and_free(val + arr->map.timer_off);
312 	if (map_value_has_kptrs(&arr->map))
313 		bpf_map_free_kptrs(&arr->map, val);
314 }
315 
316 /* Called from syscall or from eBPF program */
317 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
318 				 u64 map_flags)
319 {
320 	struct bpf_array *array = container_of(map, struct bpf_array, map);
321 	u32 index = *(u32 *)key;
322 	char *val;
323 
324 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
325 		/* unknown flags */
326 		return -EINVAL;
327 
328 	if (unlikely(index >= array->map.max_entries))
329 		/* all elements were pre-allocated, cannot insert a new one */
330 		return -E2BIG;
331 
332 	if (unlikely(map_flags & BPF_NOEXIST))
333 		/* all elements already exist */
334 		return -EEXIST;
335 
336 	if (unlikely((map_flags & BPF_F_LOCK) &&
337 		     !map_value_has_spin_lock(map)))
338 		return -EINVAL;
339 
340 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
341 		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
342 		       value, map->value_size);
343 	} else {
344 		val = array->value +
345 			(u64)array->elem_size * (index & array->index_mask);
346 		if (map_flags & BPF_F_LOCK)
347 			copy_map_value_locked(map, val, value, false);
348 		else
349 			copy_map_value(map, val, value);
350 		check_and_free_fields(array, val);
351 	}
352 	return 0;
353 }
354 
355 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
356 			    u64 map_flags)
357 {
358 	struct bpf_array *array = container_of(map, struct bpf_array, map);
359 	u32 index = *(u32 *)key;
360 	void __percpu *pptr;
361 	int cpu, off = 0;
362 	u32 size;
363 
364 	if (unlikely(map_flags > BPF_EXIST))
365 		/* unknown flags */
366 		return -EINVAL;
367 
368 	if (unlikely(index >= array->map.max_entries))
369 		/* all elements were pre-allocated, cannot insert a new one */
370 		return -E2BIG;
371 
372 	if (unlikely(map_flags == BPF_NOEXIST))
373 		/* all elements already exist */
374 		return -EEXIST;
375 
376 	/* the user space will provide round_up(value_size, 8) bytes that
377 	 * will be copied into per-cpu area. bpf programs can only access
378 	 * value_size of it. During lookup the same extra bytes will be
379 	 * returned or zeros which were zero-filled by percpu_alloc,
380 	 * so no kernel data leaks possible
381 	 */
382 	size = array->elem_size;
383 	rcu_read_lock();
384 	pptr = array->pptrs[index & array->index_mask];
385 	for_each_possible_cpu(cpu) {
386 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
387 		off += size;
388 	}
389 	rcu_read_unlock();
390 	return 0;
391 }
392 
393 /* Called from syscall or from eBPF program */
394 static int array_map_delete_elem(struct bpf_map *map, void *key)
395 {
396 	return -EINVAL;
397 }
398 
399 static void *array_map_vmalloc_addr(struct bpf_array *array)
400 {
401 	return (void *)round_down((unsigned long)array, PAGE_SIZE);
402 }
403 
404 static void array_map_free_timers(struct bpf_map *map)
405 {
406 	struct bpf_array *array = container_of(map, struct bpf_array, map);
407 	int i;
408 
409 	/* We don't reset or free kptr on uref dropping to zero. */
410 	if (!map_value_has_timer(map))
411 		return;
412 
413 	for (i = 0; i < array->map.max_entries; i++)
414 		bpf_timer_cancel_and_free(array_map_elem_ptr(array, i) + map->timer_off);
415 }
416 
417 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
418 static void array_map_free(struct bpf_map *map)
419 {
420 	struct bpf_array *array = container_of(map, struct bpf_array, map);
421 	int i;
422 
423 	if (map_value_has_kptrs(map)) {
424 		for (i = 0; i < array->map.max_entries; i++)
425 			bpf_map_free_kptrs(map, array_map_elem_ptr(array, i));
426 		bpf_map_free_kptr_off_tab(map);
427 	}
428 
429 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
430 		bpf_array_free_percpu(array);
431 
432 	if (array->map.map_flags & BPF_F_MMAPABLE)
433 		bpf_map_area_free(array_map_vmalloc_addr(array));
434 	else
435 		bpf_map_area_free(array);
436 }
437 
438 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
439 				    struct seq_file *m)
440 {
441 	void *value;
442 
443 	rcu_read_lock();
444 
445 	value = array_map_lookup_elem(map, key);
446 	if (!value) {
447 		rcu_read_unlock();
448 		return;
449 	}
450 
451 	if (map->btf_key_type_id)
452 		seq_printf(m, "%u: ", *(u32 *)key);
453 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
454 	seq_puts(m, "\n");
455 
456 	rcu_read_unlock();
457 }
458 
459 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
460 					   struct seq_file *m)
461 {
462 	struct bpf_array *array = container_of(map, struct bpf_array, map);
463 	u32 index = *(u32 *)key;
464 	void __percpu *pptr;
465 	int cpu;
466 
467 	rcu_read_lock();
468 
469 	seq_printf(m, "%u: {\n", *(u32 *)key);
470 	pptr = array->pptrs[index & array->index_mask];
471 	for_each_possible_cpu(cpu) {
472 		seq_printf(m, "\tcpu%d: ", cpu);
473 		btf_type_seq_show(map->btf, map->btf_value_type_id,
474 				  per_cpu_ptr(pptr, cpu), m);
475 		seq_puts(m, "\n");
476 	}
477 	seq_puts(m, "}\n");
478 
479 	rcu_read_unlock();
480 }
481 
482 static int array_map_check_btf(const struct bpf_map *map,
483 			       const struct btf *btf,
484 			       const struct btf_type *key_type,
485 			       const struct btf_type *value_type)
486 {
487 	u32 int_data;
488 
489 	/* One exception for keyless BTF: .bss/.data/.rodata map */
490 	if (btf_type_is_void(key_type)) {
491 		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
492 		    map->max_entries != 1)
493 			return -EINVAL;
494 
495 		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
496 			return -EINVAL;
497 
498 		return 0;
499 	}
500 
501 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
502 		return -EINVAL;
503 
504 	int_data = *(u32 *)(key_type + 1);
505 	/* bpf array can only take a u32 key. This check makes sure
506 	 * that the btf matches the attr used during map_create.
507 	 */
508 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
509 		return -EINVAL;
510 
511 	return 0;
512 }
513 
514 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
515 {
516 	struct bpf_array *array = container_of(map, struct bpf_array, map);
517 	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
518 
519 	if (!(map->map_flags & BPF_F_MMAPABLE))
520 		return -EINVAL;
521 
522 	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
523 	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
524 		return -EINVAL;
525 
526 	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
527 				   vma->vm_pgoff + pgoff);
528 }
529 
530 static bool array_map_meta_equal(const struct bpf_map *meta0,
531 				 const struct bpf_map *meta1)
532 {
533 	if (!bpf_map_meta_equal(meta0, meta1))
534 		return false;
535 	return meta0->map_flags & BPF_F_INNER_MAP ? true :
536 	       meta0->max_entries == meta1->max_entries;
537 }
538 
539 struct bpf_iter_seq_array_map_info {
540 	struct bpf_map *map;
541 	void *percpu_value_buf;
542 	u32 index;
543 };
544 
545 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
546 {
547 	struct bpf_iter_seq_array_map_info *info = seq->private;
548 	struct bpf_map *map = info->map;
549 	struct bpf_array *array;
550 	u32 index;
551 
552 	if (info->index >= map->max_entries)
553 		return NULL;
554 
555 	if (*pos == 0)
556 		++*pos;
557 	array = container_of(map, struct bpf_array, map);
558 	index = info->index & array->index_mask;
559 	if (info->percpu_value_buf)
560 	       return array->pptrs[index];
561 	return array_map_elem_ptr(array, index);
562 }
563 
564 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
565 {
566 	struct bpf_iter_seq_array_map_info *info = seq->private;
567 	struct bpf_map *map = info->map;
568 	struct bpf_array *array;
569 	u32 index;
570 
571 	++*pos;
572 	++info->index;
573 	if (info->index >= map->max_entries)
574 		return NULL;
575 
576 	array = container_of(map, struct bpf_array, map);
577 	index = info->index & array->index_mask;
578 	if (info->percpu_value_buf)
579 	       return array->pptrs[index];
580 	return array_map_elem_ptr(array, index);
581 }
582 
583 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
584 {
585 	struct bpf_iter_seq_array_map_info *info = seq->private;
586 	struct bpf_iter__bpf_map_elem ctx = {};
587 	struct bpf_map *map = info->map;
588 	struct bpf_array *array = container_of(map, struct bpf_array, map);
589 	struct bpf_iter_meta meta;
590 	struct bpf_prog *prog;
591 	int off = 0, cpu = 0;
592 	void __percpu **pptr;
593 	u32 size;
594 
595 	meta.seq = seq;
596 	prog = bpf_iter_get_info(&meta, v == NULL);
597 	if (!prog)
598 		return 0;
599 
600 	ctx.meta = &meta;
601 	ctx.map = info->map;
602 	if (v) {
603 		ctx.key = &info->index;
604 
605 		if (!info->percpu_value_buf) {
606 			ctx.value = v;
607 		} else {
608 			pptr = v;
609 			size = array->elem_size;
610 			for_each_possible_cpu(cpu) {
611 				bpf_long_memcpy(info->percpu_value_buf + off,
612 						per_cpu_ptr(pptr, cpu),
613 						size);
614 				off += size;
615 			}
616 			ctx.value = info->percpu_value_buf;
617 		}
618 	}
619 
620 	return bpf_iter_run_prog(prog, &ctx);
621 }
622 
623 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
624 {
625 	return __bpf_array_map_seq_show(seq, v);
626 }
627 
628 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
629 {
630 	if (!v)
631 		(void)__bpf_array_map_seq_show(seq, NULL);
632 }
633 
634 static int bpf_iter_init_array_map(void *priv_data,
635 				   struct bpf_iter_aux_info *aux)
636 {
637 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
638 	struct bpf_map *map = aux->map;
639 	struct bpf_array *array = container_of(map, struct bpf_array, map);
640 	void *value_buf;
641 	u32 buf_size;
642 
643 	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
644 		buf_size = array->elem_size * num_possible_cpus();
645 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
646 		if (!value_buf)
647 			return -ENOMEM;
648 
649 		seq_info->percpu_value_buf = value_buf;
650 	}
651 
652 	seq_info->map = map;
653 	return 0;
654 }
655 
656 static void bpf_iter_fini_array_map(void *priv_data)
657 {
658 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
659 
660 	kfree(seq_info->percpu_value_buf);
661 }
662 
663 static const struct seq_operations bpf_array_map_seq_ops = {
664 	.start	= bpf_array_map_seq_start,
665 	.next	= bpf_array_map_seq_next,
666 	.stop	= bpf_array_map_seq_stop,
667 	.show	= bpf_array_map_seq_show,
668 };
669 
670 static const struct bpf_iter_seq_info iter_seq_info = {
671 	.seq_ops		= &bpf_array_map_seq_ops,
672 	.init_seq_private	= bpf_iter_init_array_map,
673 	.fini_seq_private	= bpf_iter_fini_array_map,
674 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
675 };
676 
677 static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
678 				   void *callback_ctx, u64 flags)
679 {
680 	u32 i, key, num_elems = 0;
681 	struct bpf_array *array;
682 	bool is_percpu;
683 	u64 ret = 0;
684 	void *val;
685 
686 	if (flags != 0)
687 		return -EINVAL;
688 
689 	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
690 	array = container_of(map, struct bpf_array, map);
691 	if (is_percpu)
692 		migrate_disable();
693 	for (i = 0; i < map->max_entries; i++) {
694 		if (is_percpu)
695 			val = this_cpu_ptr(array->pptrs[i]);
696 		else
697 			val = array_map_elem_ptr(array, i);
698 		num_elems++;
699 		key = i;
700 		ret = callback_fn((u64)(long)map, (u64)(long)&key,
701 				  (u64)(long)val, (u64)(long)callback_ctx, 0);
702 		/* return value: 0 - continue, 1 - stop and return */
703 		if (ret)
704 			break;
705 	}
706 
707 	if (is_percpu)
708 		migrate_enable();
709 	return num_elems;
710 }
711 
712 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
713 const struct bpf_map_ops array_map_ops = {
714 	.map_meta_equal = array_map_meta_equal,
715 	.map_alloc_check = array_map_alloc_check,
716 	.map_alloc = array_map_alloc,
717 	.map_free = array_map_free,
718 	.map_get_next_key = array_map_get_next_key,
719 	.map_release_uref = array_map_free_timers,
720 	.map_lookup_elem = array_map_lookup_elem,
721 	.map_update_elem = array_map_update_elem,
722 	.map_delete_elem = array_map_delete_elem,
723 	.map_gen_lookup = array_map_gen_lookup,
724 	.map_direct_value_addr = array_map_direct_value_addr,
725 	.map_direct_value_meta = array_map_direct_value_meta,
726 	.map_mmap = array_map_mmap,
727 	.map_seq_show_elem = array_map_seq_show_elem,
728 	.map_check_btf = array_map_check_btf,
729 	.map_lookup_batch = generic_map_lookup_batch,
730 	.map_update_batch = generic_map_update_batch,
731 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
732 	.map_for_each_callback = bpf_for_each_array_elem,
733 	.map_btf_id = &array_map_btf_ids[0],
734 	.iter_seq_info = &iter_seq_info,
735 };
736 
737 const struct bpf_map_ops percpu_array_map_ops = {
738 	.map_meta_equal = bpf_map_meta_equal,
739 	.map_alloc_check = array_map_alloc_check,
740 	.map_alloc = array_map_alloc,
741 	.map_free = array_map_free,
742 	.map_get_next_key = array_map_get_next_key,
743 	.map_lookup_elem = percpu_array_map_lookup_elem,
744 	.map_update_elem = array_map_update_elem,
745 	.map_delete_elem = array_map_delete_elem,
746 	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
747 	.map_seq_show_elem = percpu_array_map_seq_show_elem,
748 	.map_check_btf = array_map_check_btf,
749 	.map_lookup_batch = generic_map_lookup_batch,
750 	.map_update_batch = generic_map_update_batch,
751 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
752 	.map_for_each_callback = bpf_for_each_array_elem,
753 	.map_btf_id = &array_map_btf_ids[0],
754 	.iter_seq_info = &iter_seq_info,
755 };
756 
757 static int fd_array_map_alloc_check(union bpf_attr *attr)
758 {
759 	/* only file descriptors can be stored in this type of map */
760 	if (attr->value_size != sizeof(u32))
761 		return -EINVAL;
762 	/* Program read-only/write-only not supported for special maps yet. */
763 	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
764 		return -EINVAL;
765 	return array_map_alloc_check(attr);
766 }
767 
768 static void fd_array_map_free(struct bpf_map *map)
769 {
770 	struct bpf_array *array = container_of(map, struct bpf_array, map);
771 	int i;
772 
773 	/* make sure it's empty */
774 	for (i = 0; i < array->map.max_entries; i++)
775 		BUG_ON(array->ptrs[i] != NULL);
776 
777 	bpf_map_area_free(array);
778 }
779 
780 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
781 {
782 	return ERR_PTR(-EOPNOTSUPP);
783 }
784 
785 /* only called from syscall */
786 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
787 {
788 	void **elem, *ptr;
789 	int ret =  0;
790 
791 	if (!map->ops->map_fd_sys_lookup_elem)
792 		return -ENOTSUPP;
793 
794 	rcu_read_lock();
795 	elem = array_map_lookup_elem(map, key);
796 	if (elem && (ptr = READ_ONCE(*elem)))
797 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
798 	else
799 		ret = -ENOENT;
800 	rcu_read_unlock();
801 
802 	return ret;
803 }
804 
805 /* only called from syscall */
806 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
807 				 void *key, void *value, u64 map_flags)
808 {
809 	struct bpf_array *array = container_of(map, struct bpf_array, map);
810 	void *new_ptr, *old_ptr;
811 	u32 index = *(u32 *)key, ufd;
812 
813 	if (map_flags != BPF_ANY)
814 		return -EINVAL;
815 
816 	if (index >= array->map.max_entries)
817 		return -E2BIG;
818 
819 	ufd = *(u32 *)value;
820 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
821 	if (IS_ERR(new_ptr))
822 		return PTR_ERR(new_ptr);
823 
824 	if (map->ops->map_poke_run) {
825 		mutex_lock(&array->aux->poke_mutex);
826 		old_ptr = xchg(array->ptrs + index, new_ptr);
827 		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
828 		mutex_unlock(&array->aux->poke_mutex);
829 	} else {
830 		old_ptr = xchg(array->ptrs + index, new_ptr);
831 	}
832 
833 	if (old_ptr)
834 		map->ops->map_fd_put_ptr(old_ptr);
835 	return 0;
836 }
837 
838 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
839 {
840 	struct bpf_array *array = container_of(map, struct bpf_array, map);
841 	void *old_ptr;
842 	u32 index = *(u32 *)key;
843 
844 	if (index >= array->map.max_entries)
845 		return -E2BIG;
846 
847 	if (map->ops->map_poke_run) {
848 		mutex_lock(&array->aux->poke_mutex);
849 		old_ptr = xchg(array->ptrs + index, NULL);
850 		map->ops->map_poke_run(map, index, old_ptr, NULL);
851 		mutex_unlock(&array->aux->poke_mutex);
852 	} else {
853 		old_ptr = xchg(array->ptrs + index, NULL);
854 	}
855 
856 	if (old_ptr) {
857 		map->ops->map_fd_put_ptr(old_ptr);
858 		return 0;
859 	} else {
860 		return -ENOENT;
861 	}
862 }
863 
864 static void *prog_fd_array_get_ptr(struct bpf_map *map,
865 				   struct file *map_file, int fd)
866 {
867 	struct bpf_prog *prog = bpf_prog_get(fd);
868 
869 	if (IS_ERR(prog))
870 		return prog;
871 
872 	if (!bpf_prog_map_compatible(map, prog)) {
873 		bpf_prog_put(prog);
874 		return ERR_PTR(-EINVAL);
875 	}
876 
877 	return prog;
878 }
879 
880 static void prog_fd_array_put_ptr(void *ptr)
881 {
882 	bpf_prog_put(ptr);
883 }
884 
885 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
886 {
887 	return ((struct bpf_prog *)ptr)->aux->id;
888 }
889 
890 /* decrement refcnt of all bpf_progs that are stored in this map */
891 static void bpf_fd_array_map_clear(struct bpf_map *map)
892 {
893 	struct bpf_array *array = container_of(map, struct bpf_array, map);
894 	int i;
895 
896 	for (i = 0; i < array->map.max_entries; i++)
897 		fd_array_map_delete_elem(map, &i);
898 }
899 
900 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
901 					 struct seq_file *m)
902 {
903 	void **elem, *ptr;
904 	u32 prog_id;
905 
906 	rcu_read_lock();
907 
908 	elem = array_map_lookup_elem(map, key);
909 	if (elem) {
910 		ptr = READ_ONCE(*elem);
911 		if (ptr) {
912 			seq_printf(m, "%u: ", *(u32 *)key);
913 			prog_id = prog_fd_array_sys_lookup_elem(ptr);
914 			btf_type_seq_show(map->btf, map->btf_value_type_id,
915 					  &prog_id, m);
916 			seq_puts(m, "\n");
917 		}
918 	}
919 
920 	rcu_read_unlock();
921 }
922 
923 struct prog_poke_elem {
924 	struct list_head list;
925 	struct bpf_prog_aux *aux;
926 };
927 
928 static int prog_array_map_poke_track(struct bpf_map *map,
929 				     struct bpf_prog_aux *prog_aux)
930 {
931 	struct prog_poke_elem *elem;
932 	struct bpf_array_aux *aux;
933 	int ret = 0;
934 
935 	aux = container_of(map, struct bpf_array, map)->aux;
936 	mutex_lock(&aux->poke_mutex);
937 	list_for_each_entry(elem, &aux->poke_progs, list) {
938 		if (elem->aux == prog_aux)
939 			goto out;
940 	}
941 
942 	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
943 	if (!elem) {
944 		ret = -ENOMEM;
945 		goto out;
946 	}
947 
948 	INIT_LIST_HEAD(&elem->list);
949 	/* We must track the program's aux info at this point in time
950 	 * since the program pointer itself may not be stable yet, see
951 	 * also comment in prog_array_map_poke_run().
952 	 */
953 	elem->aux = prog_aux;
954 
955 	list_add_tail(&elem->list, &aux->poke_progs);
956 out:
957 	mutex_unlock(&aux->poke_mutex);
958 	return ret;
959 }
960 
961 static void prog_array_map_poke_untrack(struct bpf_map *map,
962 					struct bpf_prog_aux *prog_aux)
963 {
964 	struct prog_poke_elem *elem, *tmp;
965 	struct bpf_array_aux *aux;
966 
967 	aux = container_of(map, struct bpf_array, map)->aux;
968 	mutex_lock(&aux->poke_mutex);
969 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
970 		if (elem->aux == prog_aux) {
971 			list_del_init(&elem->list);
972 			kfree(elem);
973 			break;
974 		}
975 	}
976 	mutex_unlock(&aux->poke_mutex);
977 }
978 
979 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
980 				    struct bpf_prog *old,
981 				    struct bpf_prog *new)
982 {
983 	u8 *old_addr, *new_addr, *old_bypass_addr;
984 	struct prog_poke_elem *elem;
985 	struct bpf_array_aux *aux;
986 
987 	aux = container_of(map, struct bpf_array, map)->aux;
988 	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
989 
990 	list_for_each_entry(elem, &aux->poke_progs, list) {
991 		struct bpf_jit_poke_descriptor *poke;
992 		int i, ret;
993 
994 		for (i = 0; i < elem->aux->size_poke_tab; i++) {
995 			poke = &elem->aux->poke_tab[i];
996 
997 			/* Few things to be aware of:
998 			 *
999 			 * 1) We can only ever access aux in this context, but
1000 			 *    not aux->prog since it might not be stable yet and
1001 			 *    there could be danger of use after free otherwise.
1002 			 * 2) Initially when we start tracking aux, the program
1003 			 *    is not JITed yet and also does not have a kallsyms
1004 			 *    entry. We skip these as poke->tailcall_target_stable
1005 			 *    is not active yet. The JIT will do the final fixup
1006 			 *    before setting it stable. The various
1007 			 *    poke->tailcall_target_stable are successively
1008 			 *    activated, so tail call updates can arrive from here
1009 			 *    while JIT is still finishing its final fixup for
1010 			 *    non-activated poke entries.
1011 			 * 3) On program teardown, the program's kallsym entry gets
1012 			 *    removed out of RCU callback, but we can only untrack
1013 			 *    from sleepable context, therefore bpf_arch_text_poke()
1014 			 *    might not see that this is in BPF text section and
1015 			 *    bails out with -EINVAL. As these are unreachable since
1016 			 *    RCU grace period already passed, we simply skip them.
1017 			 * 4) Also programs reaching refcount of zero while patching
1018 			 *    is in progress is okay since we're protected under
1019 			 *    poke_mutex and untrack the programs before the JIT
1020 			 *    buffer is freed. When we're still in the middle of
1021 			 *    patching and suddenly kallsyms entry of the program
1022 			 *    gets evicted, we just skip the rest which is fine due
1023 			 *    to point 3).
1024 			 * 5) Any other error happening below from bpf_arch_text_poke()
1025 			 *    is a unexpected bug.
1026 			 */
1027 			if (!READ_ONCE(poke->tailcall_target_stable))
1028 				continue;
1029 			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1030 				continue;
1031 			if (poke->tail_call.map != map ||
1032 			    poke->tail_call.key != key)
1033 				continue;
1034 
1035 			old_bypass_addr = old ? NULL : poke->bypass_addr;
1036 			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1037 			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1038 
1039 			if (new) {
1040 				ret = bpf_arch_text_poke(poke->tailcall_target,
1041 							 BPF_MOD_JUMP,
1042 							 old_addr, new_addr);
1043 				BUG_ON(ret < 0 && ret != -EINVAL);
1044 				if (!old) {
1045 					ret = bpf_arch_text_poke(poke->tailcall_bypass,
1046 								 BPF_MOD_JUMP,
1047 								 poke->bypass_addr,
1048 								 NULL);
1049 					BUG_ON(ret < 0 && ret != -EINVAL);
1050 				}
1051 			} else {
1052 				ret = bpf_arch_text_poke(poke->tailcall_bypass,
1053 							 BPF_MOD_JUMP,
1054 							 old_bypass_addr,
1055 							 poke->bypass_addr);
1056 				BUG_ON(ret < 0 && ret != -EINVAL);
1057 				/* let other CPUs finish the execution of program
1058 				 * so that it will not possible to expose them
1059 				 * to invalid nop, stack unwind, nop state
1060 				 */
1061 				if (!ret)
1062 					synchronize_rcu();
1063 				ret = bpf_arch_text_poke(poke->tailcall_target,
1064 							 BPF_MOD_JUMP,
1065 							 old_addr, NULL);
1066 				BUG_ON(ret < 0 && ret != -EINVAL);
1067 			}
1068 		}
1069 	}
1070 }
1071 
1072 static void prog_array_map_clear_deferred(struct work_struct *work)
1073 {
1074 	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1075 					   work)->map;
1076 	bpf_fd_array_map_clear(map);
1077 	bpf_map_put(map);
1078 }
1079 
1080 static void prog_array_map_clear(struct bpf_map *map)
1081 {
1082 	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1083 						 map)->aux;
1084 	bpf_map_inc(map);
1085 	schedule_work(&aux->work);
1086 }
1087 
1088 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1089 {
1090 	struct bpf_array_aux *aux;
1091 	struct bpf_map *map;
1092 
1093 	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1094 	if (!aux)
1095 		return ERR_PTR(-ENOMEM);
1096 
1097 	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1098 	INIT_LIST_HEAD(&aux->poke_progs);
1099 	mutex_init(&aux->poke_mutex);
1100 
1101 	map = array_map_alloc(attr);
1102 	if (IS_ERR(map)) {
1103 		kfree(aux);
1104 		return map;
1105 	}
1106 
1107 	container_of(map, struct bpf_array, map)->aux = aux;
1108 	aux->map = map;
1109 
1110 	return map;
1111 }
1112 
1113 static void prog_array_map_free(struct bpf_map *map)
1114 {
1115 	struct prog_poke_elem *elem, *tmp;
1116 	struct bpf_array_aux *aux;
1117 
1118 	aux = container_of(map, struct bpf_array, map)->aux;
1119 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1120 		list_del_init(&elem->list);
1121 		kfree(elem);
1122 	}
1123 	kfree(aux);
1124 	fd_array_map_free(map);
1125 }
1126 
1127 /* prog_array->aux->{type,jited} is a runtime binding.
1128  * Doing static check alone in the verifier is not enough.
1129  * Thus, prog_array_map cannot be used as an inner_map
1130  * and map_meta_equal is not implemented.
1131  */
1132 const struct bpf_map_ops prog_array_map_ops = {
1133 	.map_alloc_check = fd_array_map_alloc_check,
1134 	.map_alloc = prog_array_map_alloc,
1135 	.map_free = prog_array_map_free,
1136 	.map_poke_track = prog_array_map_poke_track,
1137 	.map_poke_untrack = prog_array_map_poke_untrack,
1138 	.map_poke_run = prog_array_map_poke_run,
1139 	.map_get_next_key = array_map_get_next_key,
1140 	.map_lookup_elem = fd_array_map_lookup_elem,
1141 	.map_delete_elem = fd_array_map_delete_elem,
1142 	.map_fd_get_ptr = prog_fd_array_get_ptr,
1143 	.map_fd_put_ptr = prog_fd_array_put_ptr,
1144 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1145 	.map_release_uref = prog_array_map_clear,
1146 	.map_seq_show_elem = prog_array_map_seq_show_elem,
1147 	.map_btf_id = &array_map_btf_ids[0],
1148 };
1149 
1150 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1151 						   struct file *map_file)
1152 {
1153 	struct bpf_event_entry *ee;
1154 
1155 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
1156 	if (ee) {
1157 		ee->event = perf_file->private_data;
1158 		ee->perf_file = perf_file;
1159 		ee->map_file = map_file;
1160 	}
1161 
1162 	return ee;
1163 }
1164 
1165 static void __bpf_event_entry_free(struct rcu_head *rcu)
1166 {
1167 	struct bpf_event_entry *ee;
1168 
1169 	ee = container_of(rcu, struct bpf_event_entry, rcu);
1170 	fput(ee->perf_file);
1171 	kfree(ee);
1172 }
1173 
1174 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1175 {
1176 	call_rcu(&ee->rcu, __bpf_event_entry_free);
1177 }
1178 
1179 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1180 					 struct file *map_file, int fd)
1181 {
1182 	struct bpf_event_entry *ee;
1183 	struct perf_event *event;
1184 	struct file *perf_file;
1185 	u64 value;
1186 
1187 	perf_file = perf_event_get(fd);
1188 	if (IS_ERR(perf_file))
1189 		return perf_file;
1190 
1191 	ee = ERR_PTR(-EOPNOTSUPP);
1192 	event = perf_file->private_data;
1193 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1194 		goto err_out;
1195 
1196 	ee = bpf_event_entry_gen(perf_file, map_file);
1197 	if (ee)
1198 		return ee;
1199 	ee = ERR_PTR(-ENOMEM);
1200 err_out:
1201 	fput(perf_file);
1202 	return ee;
1203 }
1204 
1205 static void perf_event_fd_array_put_ptr(void *ptr)
1206 {
1207 	bpf_event_entry_free_rcu(ptr);
1208 }
1209 
1210 static void perf_event_fd_array_release(struct bpf_map *map,
1211 					struct file *map_file)
1212 {
1213 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1214 	struct bpf_event_entry *ee;
1215 	int i;
1216 
1217 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1218 		return;
1219 
1220 	rcu_read_lock();
1221 	for (i = 0; i < array->map.max_entries; i++) {
1222 		ee = READ_ONCE(array->ptrs[i]);
1223 		if (ee && ee->map_file == map_file)
1224 			fd_array_map_delete_elem(map, &i);
1225 	}
1226 	rcu_read_unlock();
1227 }
1228 
1229 static void perf_event_fd_array_map_free(struct bpf_map *map)
1230 {
1231 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1232 		bpf_fd_array_map_clear(map);
1233 	fd_array_map_free(map);
1234 }
1235 
1236 const struct bpf_map_ops perf_event_array_map_ops = {
1237 	.map_meta_equal = bpf_map_meta_equal,
1238 	.map_alloc_check = fd_array_map_alloc_check,
1239 	.map_alloc = array_map_alloc,
1240 	.map_free = perf_event_fd_array_map_free,
1241 	.map_get_next_key = array_map_get_next_key,
1242 	.map_lookup_elem = fd_array_map_lookup_elem,
1243 	.map_delete_elem = fd_array_map_delete_elem,
1244 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1245 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
1246 	.map_release = perf_event_fd_array_release,
1247 	.map_check_btf = map_check_no_btf,
1248 	.map_btf_id = &array_map_btf_ids[0],
1249 };
1250 
1251 #ifdef CONFIG_CGROUPS
1252 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1253 				     struct file *map_file /* not used */,
1254 				     int fd)
1255 {
1256 	return cgroup_get_from_fd(fd);
1257 }
1258 
1259 static void cgroup_fd_array_put_ptr(void *ptr)
1260 {
1261 	/* cgroup_put free cgrp after a rcu grace period */
1262 	cgroup_put(ptr);
1263 }
1264 
1265 static void cgroup_fd_array_free(struct bpf_map *map)
1266 {
1267 	bpf_fd_array_map_clear(map);
1268 	fd_array_map_free(map);
1269 }
1270 
1271 const struct bpf_map_ops cgroup_array_map_ops = {
1272 	.map_meta_equal = bpf_map_meta_equal,
1273 	.map_alloc_check = fd_array_map_alloc_check,
1274 	.map_alloc = array_map_alloc,
1275 	.map_free = cgroup_fd_array_free,
1276 	.map_get_next_key = array_map_get_next_key,
1277 	.map_lookup_elem = fd_array_map_lookup_elem,
1278 	.map_delete_elem = fd_array_map_delete_elem,
1279 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
1280 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1281 	.map_check_btf = map_check_no_btf,
1282 	.map_btf_id = &array_map_btf_ids[0],
1283 };
1284 #endif
1285 
1286 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1287 {
1288 	struct bpf_map *map, *inner_map_meta;
1289 
1290 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1291 	if (IS_ERR(inner_map_meta))
1292 		return inner_map_meta;
1293 
1294 	map = array_map_alloc(attr);
1295 	if (IS_ERR(map)) {
1296 		bpf_map_meta_free(inner_map_meta);
1297 		return map;
1298 	}
1299 
1300 	map->inner_map_meta = inner_map_meta;
1301 
1302 	return map;
1303 }
1304 
1305 static void array_of_map_free(struct bpf_map *map)
1306 {
1307 	/* map->inner_map_meta is only accessed by syscall which
1308 	 * is protected by fdget/fdput.
1309 	 */
1310 	bpf_map_meta_free(map->inner_map_meta);
1311 	bpf_fd_array_map_clear(map);
1312 	fd_array_map_free(map);
1313 }
1314 
1315 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1316 {
1317 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1318 
1319 	if (!inner_map)
1320 		return NULL;
1321 
1322 	return READ_ONCE(*inner_map);
1323 }
1324 
1325 static int array_of_map_gen_lookup(struct bpf_map *map,
1326 				   struct bpf_insn *insn_buf)
1327 {
1328 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1329 	u32 elem_size = array->elem_size;
1330 	struct bpf_insn *insn = insn_buf;
1331 	const int ret = BPF_REG_0;
1332 	const int map_ptr = BPF_REG_1;
1333 	const int index = BPF_REG_2;
1334 
1335 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1336 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1337 	if (!map->bypass_spec_v1) {
1338 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1339 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1340 	} else {
1341 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1342 	}
1343 	if (is_power_of_2(elem_size))
1344 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1345 	else
1346 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1347 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1348 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1349 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1350 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1351 	*insn++ = BPF_MOV64_IMM(ret, 0);
1352 
1353 	return insn - insn_buf;
1354 }
1355 
1356 const struct bpf_map_ops array_of_maps_map_ops = {
1357 	.map_alloc_check = fd_array_map_alloc_check,
1358 	.map_alloc = array_of_map_alloc,
1359 	.map_free = array_of_map_free,
1360 	.map_get_next_key = array_map_get_next_key,
1361 	.map_lookup_elem = array_of_map_lookup_elem,
1362 	.map_delete_elem = fd_array_map_delete_elem,
1363 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
1364 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
1365 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1366 	.map_gen_lookup = array_of_map_gen_lookup,
1367 	.map_lookup_batch = generic_map_lookup_batch,
1368 	.map_update_batch = generic_map_update_batch,
1369 	.map_check_btf = map_check_no_btf,
1370 	.map_btf_id = &array_map_btf_ids[0],
1371 };
1372