15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
381ed18abSAlexei Starovoitov * Copyright (c) 2016,2017 Facebook
428fbcfa0SAlexei Starovoitov */
528fbcfa0SAlexei Starovoitov #include <linux/bpf.h>
6a26ca7c9SMartin KaFai Lau #include <linux/btf.h>
728fbcfa0SAlexei Starovoitov #include <linux/err.h>
828fbcfa0SAlexei Starovoitov #include <linux/slab.h>
928fbcfa0SAlexei Starovoitov #include <linux/mm.h>
1004fd61abSAlexei Starovoitov #include <linux/filter.h>
110cdf5640SDaniel Borkmann #include <linux/perf_event.h>
12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h>
131e6c62a8SAlexei Starovoitov #include <linux/rcupdate_trace.h>
14c317ab71SMenglong Dong #include <linux/btf_ids.h>
1528fbcfa0SAlexei Starovoitov
1656f668dfSMartin KaFai Lau #include "map_in_map.h"
1756f668dfSMartin KaFai Lau
186e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \
19792cacccSSong Liu (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
204a8f87e6SDaniel Borkmann BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
216e71b04aSChenbo Feng
bpf_array_free_percpu(struct bpf_array * array)22a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array)
23a10423b8SAlexei Starovoitov {
24a10423b8SAlexei Starovoitov int i;
25a10423b8SAlexei Starovoitov
2632fff239SEric Dumazet for (i = 0; i < array->map.max_entries; i++) {
27a10423b8SAlexei Starovoitov free_percpu(array->pptrs[i]);
2832fff239SEric Dumazet cond_resched();
2932fff239SEric Dumazet }
30a10423b8SAlexei Starovoitov }
31a10423b8SAlexei Starovoitov
bpf_array_alloc_percpu(struct bpf_array * array)32a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array)
33a10423b8SAlexei Starovoitov {
34a10423b8SAlexei Starovoitov void __percpu *ptr;
35a10423b8SAlexei Starovoitov int i;
36a10423b8SAlexei Starovoitov
37a10423b8SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) {
386d192c79SRoman Gushchin ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39a10423b8SAlexei Starovoitov GFP_USER | __GFP_NOWARN);
40a10423b8SAlexei Starovoitov if (!ptr) {
41a10423b8SAlexei Starovoitov bpf_array_free_percpu(array);
42a10423b8SAlexei Starovoitov return -ENOMEM;
43a10423b8SAlexei Starovoitov }
44a10423b8SAlexei Starovoitov array->pptrs[i] = ptr;
4532fff239SEric Dumazet cond_resched();
46a10423b8SAlexei Starovoitov }
47a10423b8SAlexei Starovoitov
48a10423b8SAlexei Starovoitov return 0;
49a10423b8SAlexei Starovoitov }
50a10423b8SAlexei Starovoitov
5128fbcfa0SAlexei Starovoitov /* Called from syscall */
array_map_alloc_check(union bpf_attr * attr)525dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr)
53ad46061fSJakub Kicinski {
54ad46061fSJakub Kicinski bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55ad46061fSJakub Kicinski int numa_node = bpf_map_attr_numa_node(attr);
56ad46061fSJakub Kicinski
57ad46061fSJakub Kicinski /* check sanity of attributes */
58ad46061fSJakub Kicinski if (attr->max_entries == 0 || attr->key_size != 4 ||
59ad46061fSJakub Kicinski attr->value_size == 0 ||
60ad46061fSJakub Kicinski attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61591fe988SDaniel Borkmann !bpf_map_flags_access_ok(attr->map_flags) ||
62ad46061fSJakub Kicinski (percpu && numa_node != NUMA_NO_NODE))
63ad46061fSJakub Kicinski return -EINVAL;
64ad46061fSJakub Kicinski
65fc970227SAndrii Nakryiko if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
664a8f87e6SDaniel Borkmann attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
67fc970227SAndrii Nakryiko return -EINVAL;
68fc970227SAndrii Nakryiko
69792cacccSSong Liu if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70792cacccSSong Liu attr->map_flags & BPF_F_PRESERVE_ELEMS)
71792cacccSSong Liu return -EINVAL;
72792cacccSSong Liu
7363b8ce77SAndrii Nakryiko /* avoid overflow on round_up(map->value_size) */
7463b8ce77SAndrii Nakryiko if (attr->value_size > INT_MAX)
75ad46061fSJakub Kicinski return -E2BIG;
76*c43622d6STao Chen /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
77*c43622d6STao Chen if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
78*c43622d6STao Chen return -E2BIG;
79ad46061fSJakub Kicinski
80ad46061fSJakub Kicinski return 0;
81ad46061fSJakub Kicinski }
82ad46061fSJakub Kicinski
array_map_alloc(union bpf_attr * attr)8328fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr)
8428fbcfa0SAlexei Starovoitov {
85a10423b8SAlexei Starovoitov bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
861bc59756SRoman Gushchin int numa_node = bpf_map_attr_numa_node(attr);
87b2157399SAlexei Starovoitov u32 elem_size, index_mask, max_entries;
882c78ee89SAlexei Starovoitov bool bypass_spec_v1 = bpf_bypass_spec_v1();
891bc59756SRoman Gushchin u64 array_size, mask64;
9028fbcfa0SAlexei Starovoitov struct bpf_array *array;
9128fbcfa0SAlexei Starovoitov
9228fbcfa0SAlexei Starovoitov elem_size = round_up(attr->value_size, 8);
9328fbcfa0SAlexei Starovoitov
94b2157399SAlexei Starovoitov max_entries = attr->max_entries;
95b2157399SAlexei Starovoitov
96bbeb6e43SDaniel Borkmann /* On 32 bit archs roundup_pow_of_two() with max_entries that has
97bbeb6e43SDaniel Borkmann * upper most bit set in u32 space is undefined behavior due to
98bbeb6e43SDaniel Borkmann * resulting 1U << 32, so do it manually here in u64 space.
99bbeb6e43SDaniel Borkmann */
100bbeb6e43SDaniel Borkmann mask64 = fls_long(max_entries - 1);
101bbeb6e43SDaniel Borkmann mask64 = 1ULL << mask64;
102bbeb6e43SDaniel Borkmann mask64 -= 1;
103bbeb6e43SDaniel Borkmann
104bbeb6e43SDaniel Borkmann index_mask = mask64;
1052c78ee89SAlexei Starovoitov if (!bypass_spec_v1) {
106b2157399SAlexei Starovoitov /* round up array size to nearest power of 2,
107b2157399SAlexei Starovoitov * since cpu will speculate within index_mask limits
108b2157399SAlexei Starovoitov */
109b2157399SAlexei Starovoitov max_entries = index_mask + 1;
110bbeb6e43SDaniel Borkmann /* Check for overflows. */
111bbeb6e43SDaniel Borkmann if (max_entries < attr->max_entries)
112bbeb6e43SDaniel Borkmann return ERR_PTR(-E2BIG);
113bbeb6e43SDaniel Borkmann }
114b2157399SAlexei Starovoitov
115a10423b8SAlexei Starovoitov array_size = sizeof(*array);
116fc970227SAndrii Nakryiko if (percpu) {
117b2157399SAlexei Starovoitov array_size += (u64) max_entries * sizeof(void *);
118fc970227SAndrii Nakryiko } else {
119fc970227SAndrii Nakryiko /* rely on vmalloc() to return page-aligned memory and
120fc970227SAndrii Nakryiko * ensure array->value is exactly page-aligned
121fc970227SAndrii Nakryiko */
122fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) {
123fc970227SAndrii Nakryiko array_size = PAGE_ALIGN(array_size);
124fc970227SAndrii Nakryiko array_size += PAGE_ALIGN((u64) max_entries * elem_size);
125fc970227SAndrii Nakryiko } else {
126b2157399SAlexei Starovoitov array_size += (u64) max_entries * elem_size;
127fc970227SAndrii Nakryiko }
128fc970227SAndrii Nakryiko }
129a10423b8SAlexei Starovoitov
13028fbcfa0SAlexei Starovoitov /* allocate all map elements and zero-initialize them */
131fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) {
132fc970227SAndrii Nakryiko void *data;
133fc970227SAndrii Nakryiko
134fc970227SAndrii Nakryiko /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
135fc970227SAndrii Nakryiko data = bpf_map_area_mmapable_alloc(array_size, numa_node);
1361bc59756SRoman Gushchin if (!data)
137fc970227SAndrii Nakryiko return ERR_PTR(-ENOMEM);
138fc970227SAndrii Nakryiko array = data + PAGE_ALIGN(sizeof(struct bpf_array))
139fc970227SAndrii Nakryiko - offsetof(struct bpf_array, value);
140fc970227SAndrii Nakryiko } else {
14196eabe7aSMartin KaFai Lau array = bpf_map_area_alloc(array_size, numa_node);
142fc970227SAndrii Nakryiko }
1431bc59756SRoman Gushchin if (!array)
14428fbcfa0SAlexei Starovoitov return ERR_PTR(-ENOMEM);
145b2157399SAlexei Starovoitov array->index_mask = index_mask;
1462c78ee89SAlexei Starovoitov array->map.bypass_spec_v1 = bypass_spec_v1;
14728fbcfa0SAlexei Starovoitov
14828fbcfa0SAlexei Starovoitov /* copy mandatory map attributes */
14932852649SJakub Kicinski bpf_map_init_from_attr(&array->map, attr);
15028fbcfa0SAlexei Starovoitov array->elem_size = elem_size;
15128fbcfa0SAlexei Starovoitov
1529c2d63b8SDaniel Borkmann if (percpu && bpf_array_alloc_percpu(array)) {
153d407bd25SDaniel Borkmann bpf_map_area_free(array);
154a10423b8SAlexei Starovoitov return ERR_PTR(-ENOMEM);
155a10423b8SAlexei Starovoitov }
156a10423b8SAlexei Starovoitov
15728fbcfa0SAlexei Starovoitov return &array->map;
15828fbcfa0SAlexei Starovoitov }
15928fbcfa0SAlexei Starovoitov
array_map_elem_ptr(struct bpf_array * array,u32 index)16087ac0d60SAndrii Nakryiko static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
16187ac0d60SAndrii Nakryiko {
16287ac0d60SAndrii Nakryiko return array->value + (u64)array->elem_size * index;
16387ac0d60SAndrii Nakryiko }
16487ac0d60SAndrii Nakryiko
16528fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
array_map_lookup_elem(struct bpf_map * map,void * key)16628fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key)
16728fbcfa0SAlexei Starovoitov {
16828fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
16928fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key;
17028fbcfa0SAlexei Starovoitov
171a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
17228fbcfa0SAlexei Starovoitov return NULL;
17328fbcfa0SAlexei Starovoitov
17487ac0d60SAndrii Nakryiko return array->value + (u64)array->elem_size * (index & array->index_mask);
17528fbcfa0SAlexei Starovoitov }
17628fbcfa0SAlexei Starovoitov
array_map_direct_value_addr(const struct bpf_map * map,u64 * imm,u32 off)177d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
178d8eca5bbSDaniel Borkmann u32 off)
179d8eca5bbSDaniel Borkmann {
180d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map);
181d8eca5bbSDaniel Borkmann
182d8eca5bbSDaniel Borkmann if (map->max_entries != 1)
183d8eca5bbSDaniel Borkmann return -ENOTSUPP;
184d8eca5bbSDaniel Borkmann if (off >= map->value_size)
185d8eca5bbSDaniel Borkmann return -EINVAL;
186d8eca5bbSDaniel Borkmann
187d8eca5bbSDaniel Borkmann *imm = (unsigned long)array->value;
188d8eca5bbSDaniel Borkmann return 0;
189d8eca5bbSDaniel Borkmann }
190d8eca5bbSDaniel Borkmann
array_map_direct_value_meta(const struct bpf_map * map,u64 imm,u32 * off)191d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
192d8eca5bbSDaniel Borkmann u32 *off)
193d8eca5bbSDaniel Borkmann {
194d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map);
195d8eca5bbSDaniel Borkmann u64 base = (unsigned long)array->value;
196d8eca5bbSDaniel Borkmann u64 range = array->elem_size;
197d8eca5bbSDaniel Borkmann
198d8eca5bbSDaniel Borkmann if (map->max_entries != 1)
199d8eca5bbSDaniel Borkmann return -ENOTSUPP;
200d8eca5bbSDaniel Borkmann if (imm < base || imm >= base + range)
201d8eca5bbSDaniel Borkmann return -ENOENT;
202d8eca5bbSDaniel Borkmann
203d8eca5bbSDaniel Borkmann *off = imm - base;
204d8eca5bbSDaniel Borkmann return 0;
205d8eca5bbSDaniel Borkmann }
206d8eca5bbSDaniel Borkmann
20781ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)2084a8f87e6SDaniel Borkmann static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
20981ed18abSAlexei Starovoitov {
210b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
21181ed18abSAlexei Starovoitov struct bpf_insn *insn = insn_buf;
212d937bc34SAndrii Nakryiko u32 elem_size = array->elem_size;
21381ed18abSAlexei Starovoitov const int ret = BPF_REG_0;
21481ed18abSAlexei Starovoitov const int map_ptr = BPF_REG_1;
21581ed18abSAlexei Starovoitov const int index = BPF_REG_2;
21681ed18abSAlexei Starovoitov
2174a8f87e6SDaniel Borkmann if (map->map_flags & BPF_F_INNER_MAP)
2184a8f87e6SDaniel Borkmann return -EOPNOTSUPP;
2194a8f87e6SDaniel Borkmann
22081ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
22181ed18abSAlexei Starovoitov *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
2222c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) {
223b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
224b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
225b2157399SAlexei Starovoitov } else {
226fad73a1aSMartin KaFai Lau *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
227b2157399SAlexei Starovoitov }
228fad73a1aSMartin KaFai Lau
229fad73a1aSMartin KaFai Lau if (is_power_of_2(elem_size)) {
23081ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
23181ed18abSAlexei Starovoitov } else {
23281ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
23381ed18abSAlexei Starovoitov }
23481ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
23581ed18abSAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
23681ed18abSAlexei Starovoitov *insn++ = BPF_MOV64_IMM(ret, 0);
23781ed18abSAlexei Starovoitov return insn - insn_buf;
23881ed18abSAlexei Starovoitov }
23981ed18abSAlexei Starovoitov
240a10423b8SAlexei Starovoitov /* Called from eBPF program */
percpu_array_map_lookup_elem(struct bpf_map * map,void * key)241a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
242a10423b8SAlexei Starovoitov {
243a10423b8SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
244a10423b8SAlexei Starovoitov u32 index = *(u32 *)key;
245a10423b8SAlexei Starovoitov
246a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
247a10423b8SAlexei Starovoitov return NULL;
248a10423b8SAlexei Starovoitov
249b2157399SAlexei Starovoitov return this_cpu_ptr(array->pptrs[index & array->index_mask]);
250a10423b8SAlexei Starovoitov }
251a10423b8SAlexei Starovoitov
percpu_array_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)25207343110SFeng Zhou static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
25307343110SFeng Zhou {
25407343110SFeng Zhou struct bpf_array *array = container_of(map, struct bpf_array, map);
25507343110SFeng Zhou u32 index = *(u32 *)key;
25607343110SFeng Zhou
25707343110SFeng Zhou if (cpu >= nr_cpu_ids)
25807343110SFeng Zhou return NULL;
25907343110SFeng Zhou
26007343110SFeng Zhou if (unlikely(index >= array->map.max_entries))
26107343110SFeng Zhou return NULL;
26207343110SFeng Zhou
26307343110SFeng Zhou return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
26407343110SFeng Zhou }
26507343110SFeng Zhou
bpf_percpu_array_copy(struct bpf_map * map,void * key,void * value)26615a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
26715a07b33SAlexei Starovoitov {
26815a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
26915a07b33SAlexei Starovoitov u32 index = *(u32 *)key;
27015a07b33SAlexei Starovoitov void __percpu *pptr;
27115a07b33SAlexei Starovoitov int cpu, off = 0;
27215a07b33SAlexei Starovoitov u32 size;
27315a07b33SAlexei Starovoitov
27415a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
27515a07b33SAlexei Starovoitov return -ENOENT;
27615a07b33SAlexei Starovoitov
27715a07b33SAlexei Starovoitov /* per_cpu areas are zero-filled and bpf programs can only
27815a07b33SAlexei Starovoitov * access 'value_size' of them, so copying rounded areas
27915a07b33SAlexei Starovoitov * will not leak any kernel data
28015a07b33SAlexei Starovoitov */
281d937bc34SAndrii Nakryiko size = array->elem_size;
28215a07b33SAlexei Starovoitov rcu_read_lock();
283b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask];
28415a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) {
2856df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
2866df4ea1fSKumar Kartikeya Dwivedi check_and_init_map_value(map, value + off);
28715a07b33SAlexei Starovoitov off += size;
28815a07b33SAlexei Starovoitov }
28915a07b33SAlexei Starovoitov rcu_read_unlock();
29015a07b33SAlexei Starovoitov return 0;
29115a07b33SAlexei Starovoitov }
29215a07b33SAlexei Starovoitov
29328fbcfa0SAlexei Starovoitov /* Called from syscall */
array_map_get_next_key(struct bpf_map * map,void * key,void * next_key)29428fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
29528fbcfa0SAlexei Starovoitov {
29628fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
2978fe45924STeng Qin u32 index = key ? *(u32 *)key : U32_MAX;
29828fbcfa0SAlexei Starovoitov u32 *next = (u32 *)next_key;
29928fbcfa0SAlexei Starovoitov
30028fbcfa0SAlexei Starovoitov if (index >= array->map.max_entries) {
30128fbcfa0SAlexei Starovoitov *next = 0;
30228fbcfa0SAlexei Starovoitov return 0;
30328fbcfa0SAlexei Starovoitov }
30428fbcfa0SAlexei Starovoitov
30528fbcfa0SAlexei Starovoitov if (index == array->map.max_entries - 1)
30628fbcfa0SAlexei Starovoitov return -ENOENT;
30728fbcfa0SAlexei Starovoitov
30828fbcfa0SAlexei Starovoitov *next = index + 1;
30928fbcfa0SAlexei Starovoitov return 0;
31028fbcfa0SAlexei Starovoitov }
31128fbcfa0SAlexei Starovoitov
31228fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
array_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)313d7ba4cc9SJP Kobryn static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
31428fbcfa0SAlexei Starovoitov u64 map_flags)
31528fbcfa0SAlexei Starovoitov {
31628fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
31728fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key;
31896049f3aSAlexei Starovoitov char *val;
31928fbcfa0SAlexei Starovoitov
32096049f3aSAlexei Starovoitov if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
32128fbcfa0SAlexei Starovoitov /* unknown flags */
32228fbcfa0SAlexei Starovoitov return -EINVAL;
32328fbcfa0SAlexei Starovoitov
324a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
32528fbcfa0SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */
32628fbcfa0SAlexei Starovoitov return -E2BIG;
32728fbcfa0SAlexei Starovoitov
32896049f3aSAlexei Starovoitov if (unlikely(map_flags & BPF_NOEXIST))
329daaf427cSAlexei Starovoitov /* all elements already exist */
33028fbcfa0SAlexei Starovoitov return -EEXIST;
33128fbcfa0SAlexei Starovoitov
33296049f3aSAlexei Starovoitov if (unlikely((map_flags & BPF_F_LOCK) &&
333db559117SKumar Kartikeya Dwivedi !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
33496049f3aSAlexei Starovoitov return -EINVAL;
33596049f3aSAlexei Starovoitov
33696049f3aSAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
3376df4ea1fSKumar Kartikeya Dwivedi val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
3386df4ea1fSKumar Kartikeya Dwivedi copy_map_value(map, val, value);
339db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, val);
34096049f3aSAlexei Starovoitov } else {
34196049f3aSAlexei Starovoitov val = array->value +
34287ac0d60SAndrii Nakryiko (u64)array->elem_size * (index & array->index_mask);
34396049f3aSAlexei Starovoitov if (map_flags & BPF_F_LOCK)
34496049f3aSAlexei Starovoitov copy_map_value_locked(map, val, value, false);
345a10423b8SAlexei Starovoitov else
34696049f3aSAlexei Starovoitov copy_map_value(map, val, value);
347db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, val);
34896049f3aSAlexei Starovoitov }
34928fbcfa0SAlexei Starovoitov return 0;
35028fbcfa0SAlexei Starovoitov }
35128fbcfa0SAlexei Starovoitov
bpf_percpu_array_update(struct bpf_map * map,void * key,void * value,u64 map_flags)35215a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
35315a07b33SAlexei Starovoitov u64 map_flags)
35415a07b33SAlexei Starovoitov {
35515a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
35615a07b33SAlexei Starovoitov u32 index = *(u32 *)key;
35715a07b33SAlexei Starovoitov void __percpu *pptr;
35815a07b33SAlexei Starovoitov int cpu, off = 0;
35915a07b33SAlexei Starovoitov u32 size;
36015a07b33SAlexei Starovoitov
36115a07b33SAlexei Starovoitov if (unlikely(map_flags > BPF_EXIST))
36215a07b33SAlexei Starovoitov /* unknown flags */
36315a07b33SAlexei Starovoitov return -EINVAL;
36415a07b33SAlexei Starovoitov
36515a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
36615a07b33SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */
36715a07b33SAlexei Starovoitov return -E2BIG;
36815a07b33SAlexei Starovoitov
36915a07b33SAlexei Starovoitov if (unlikely(map_flags == BPF_NOEXIST))
37015a07b33SAlexei Starovoitov /* all elements already exist */
37115a07b33SAlexei Starovoitov return -EEXIST;
37215a07b33SAlexei Starovoitov
37315a07b33SAlexei Starovoitov /* the user space will provide round_up(value_size, 8) bytes that
37415a07b33SAlexei Starovoitov * will be copied into per-cpu area. bpf programs can only access
37515a07b33SAlexei Starovoitov * value_size of it. During lookup the same extra bytes will be
37615a07b33SAlexei Starovoitov * returned or zeros which were zero-filled by percpu_alloc,
37715a07b33SAlexei Starovoitov * so no kernel data leaks possible
37815a07b33SAlexei Starovoitov */
379d937bc34SAndrii Nakryiko size = array->elem_size;
38015a07b33SAlexei Starovoitov rcu_read_lock();
381b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask];
38215a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) {
3836df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
384db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
38515a07b33SAlexei Starovoitov off += size;
38615a07b33SAlexei Starovoitov }
38715a07b33SAlexei Starovoitov rcu_read_unlock();
38815a07b33SAlexei Starovoitov return 0;
38915a07b33SAlexei Starovoitov }
39015a07b33SAlexei Starovoitov
39128fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
array_map_delete_elem(struct bpf_map * map,void * key)392d7ba4cc9SJP Kobryn static long array_map_delete_elem(struct bpf_map *map, void *key)
39328fbcfa0SAlexei Starovoitov {
39428fbcfa0SAlexei Starovoitov return -EINVAL;
39528fbcfa0SAlexei Starovoitov }
39628fbcfa0SAlexei Starovoitov
array_map_vmalloc_addr(struct bpf_array * array)397fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array)
398fc970227SAndrii Nakryiko {
399fc970227SAndrii Nakryiko return (void *)round_down((unsigned long)array, PAGE_SIZE);
400fc970227SAndrii Nakryiko }
401fc970227SAndrii Nakryiko
array_map_free_timers(struct bpf_map * map)40268134668SAlexei Starovoitov static void array_map_free_timers(struct bpf_map *map)
40368134668SAlexei Starovoitov {
40468134668SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
40568134668SAlexei Starovoitov int i;
40668134668SAlexei Starovoitov
407aa3496acSKumar Kartikeya Dwivedi /* We don't reset or free fields other than timer on uref dropping to zero. */
408db559117SKumar Kartikeya Dwivedi if (!btf_record_has_field(map->record, BPF_TIMER))
40968134668SAlexei Starovoitov return;
41068134668SAlexei Starovoitov
41168134668SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++)
412db559117SKumar Kartikeya Dwivedi bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
41368134668SAlexei Starovoitov }
41468134668SAlexei Starovoitov
41528fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
array_map_free(struct bpf_map * map)41628fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map)
41728fbcfa0SAlexei Starovoitov {
41828fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
41914a324f6SKumar Kartikeya Dwivedi int i;
42014a324f6SKumar Kartikeya Dwivedi
421aa3496acSKumar Kartikeya Dwivedi if (!IS_ERR_OR_NULL(map->record)) {
4226df4ea1fSKumar Kartikeya Dwivedi if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
4236df4ea1fSKumar Kartikeya Dwivedi for (i = 0; i < array->map.max_entries; i++) {
4246df4ea1fSKumar Kartikeya Dwivedi void __percpu *pptr = array->pptrs[i & array->index_mask];
4256df4ea1fSKumar Kartikeya Dwivedi int cpu;
4266df4ea1fSKumar Kartikeya Dwivedi
4276df4ea1fSKumar Kartikeya Dwivedi for_each_possible_cpu(cpu) {
428aa3496acSKumar Kartikeya Dwivedi bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
4296df4ea1fSKumar Kartikeya Dwivedi cond_resched();
4306df4ea1fSKumar Kartikeya Dwivedi }
4316df4ea1fSKumar Kartikeya Dwivedi }
4326df4ea1fSKumar Kartikeya Dwivedi } else {
43314a324f6SKumar Kartikeya Dwivedi for (i = 0; i < array->map.max_entries; i++)
434aa3496acSKumar Kartikeya Dwivedi bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
4356df4ea1fSKumar Kartikeya Dwivedi }
43614a324f6SKumar Kartikeya Dwivedi }
43728fbcfa0SAlexei Starovoitov
438a10423b8SAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
439a10423b8SAlexei Starovoitov bpf_array_free_percpu(array);
440a10423b8SAlexei Starovoitov
441fc970227SAndrii Nakryiko if (array->map.map_flags & BPF_F_MMAPABLE)
442fc970227SAndrii Nakryiko bpf_map_area_free(array_map_vmalloc_addr(array));
443fc970227SAndrii Nakryiko else
444d407bd25SDaniel Borkmann bpf_map_area_free(array);
44528fbcfa0SAlexei Starovoitov }
44628fbcfa0SAlexei Starovoitov
array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)447a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key,
448a26ca7c9SMartin KaFai Lau struct seq_file *m)
449a26ca7c9SMartin KaFai Lau {
450a26ca7c9SMartin KaFai Lau void *value;
451a26ca7c9SMartin KaFai Lau
452a26ca7c9SMartin KaFai Lau rcu_read_lock();
453a26ca7c9SMartin KaFai Lau
454a26ca7c9SMartin KaFai Lau value = array_map_lookup_elem(map, key);
455a26ca7c9SMartin KaFai Lau if (!value) {
456a26ca7c9SMartin KaFai Lau rcu_read_unlock();
457a26ca7c9SMartin KaFai Lau return;
458a26ca7c9SMartin KaFai Lau }
459a26ca7c9SMartin KaFai Lau
4602824ecb7SDaniel Borkmann if (map->btf_key_type_id)
461a26ca7c9SMartin KaFai Lau seq_printf(m, "%u: ", *(u32 *)key);
4629b2cf328SMartin KaFai Lau btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
463a26ca7c9SMartin KaFai Lau seq_puts(m, "\n");
464a26ca7c9SMartin KaFai Lau
465a26ca7c9SMartin KaFai Lau rcu_read_unlock();
466a26ca7c9SMartin KaFai Lau }
467a26ca7c9SMartin KaFai Lau
percpu_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)468c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
469c7b27c37SYonghong Song struct seq_file *m)
470c7b27c37SYonghong Song {
471c7b27c37SYonghong Song struct bpf_array *array = container_of(map, struct bpf_array, map);
472c7b27c37SYonghong Song u32 index = *(u32 *)key;
473c7b27c37SYonghong Song void __percpu *pptr;
474c7b27c37SYonghong Song int cpu;
475c7b27c37SYonghong Song
476c7b27c37SYonghong Song rcu_read_lock();
477c7b27c37SYonghong Song
478c7b27c37SYonghong Song seq_printf(m, "%u: {\n", *(u32 *)key);
479c7b27c37SYonghong Song pptr = array->pptrs[index & array->index_mask];
480c7b27c37SYonghong Song for_each_possible_cpu(cpu) {
481c7b27c37SYonghong Song seq_printf(m, "\tcpu%d: ", cpu);
482c7b27c37SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id,
483c7b27c37SYonghong Song per_cpu_ptr(pptr, cpu), m);
484c7b27c37SYonghong Song seq_puts(m, "\n");
485c7b27c37SYonghong Song }
486c7b27c37SYonghong Song seq_puts(m, "}\n");
487c7b27c37SYonghong Song
488c7b27c37SYonghong Song rcu_read_unlock();
489c7b27c37SYonghong Song }
490c7b27c37SYonghong Song
array_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)491e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map,
4921b2b234bSRoman Gushchin const struct btf *btf,
493e8d2bec0SDaniel Borkmann const struct btf_type *key_type,
494e8d2bec0SDaniel Borkmann const struct btf_type *value_type)
495a26ca7c9SMartin KaFai Lau {
496a26ca7c9SMartin KaFai Lau u32 int_data;
497a26ca7c9SMartin KaFai Lau
4982824ecb7SDaniel Borkmann /* One exception for keyless BTF: .bss/.data/.rodata map */
4992824ecb7SDaniel Borkmann if (btf_type_is_void(key_type)) {
5002824ecb7SDaniel Borkmann if (map->map_type != BPF_MAP_TYPE_ARRAY ||
5012824ecb7SDaniel Borkmann map->max_entries != 1)
5022824ecb7SDaniel Borkmann return -EINVAL;
5032824ecb7SDaniel Borkmann
5042824ecb7SDaniel Borkmann if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
5052824ecb7SDaniel Borkmann return -EINVAL;
5062824ecb7SDaniel Borkmann
5072824ecb7SDaniel Borkmann return 0;
5082824ecb7SDaniel Borkmann }
5092824ecb7SDaniel Borkmann
510e8d2bec0SDaniel Borkmann if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
511a26ca7c9SMartin KaFai Lau return -EINVAL;
512a26ca7c9SMartin KaFai Lau
513a26ca7c9SMartin KaFai Lau int_data = *(u32 *)(key_type + 1);
514e8d2bec0SDaniel Borkmann /* bpf array can only take a u32 key. This check makes sure
515e8d2bec0SDaniel Borkmann * that the btf matches the attr used during map_create.
516a26ca7c9SMartin KaFai Lau */
517e8d2bec0SDaniel Borkmann if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
518a26ca7c9SMartin KaFai Lau return -EINVAL;
519a26ca7c9SMartin KaFai Lau
520a26ca7c9SMartin KaFai Lau return 0;
521a26ca7c9SMartin KaFai Lau }
522a26ca7c9SMartin KaFai Lau
array_map_mmap(struct bpf_map * map,struct vm_area_struct * vma)523b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
524fc970227SAndrii Nakryiko {
525fc970227SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map);
526fc970227SAndrii Nakryiko pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
527fc970227SAndrii Nakryiko
528fc970227SAndrii Nakryiko if (!(map->map_flags & BPF_F_MMAPABLE))
529fc970227SAndrii Nakryiko return -EINVAL;
530fc970227SAndrii Nakryiko
531333291ceSAndrii Nakryiko if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
532333291ceSAndrii Nakryiko PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
533333291ceSAndrii Nakryiko return -EINVAL;
534333291ceSAndrii Nakryiko
535333291ceSAndrii Nakryiko return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
536333291ceSAndrii Nakryiko vma->vm_pgoff + pgoff);
537fc970227SAndrii Nakryiko }
538fc970227SAndrii Nakryiko
array_map_meta_equal(const struct bpf_map * meta0,const struct bpf_map * meta1)539134fede4SMartin KaFai Lau static bool array_map_meta_equal(const struct bpf_map *meta0,
540134fede4SMartin KaFai Lau const struct bpf_map *meta1)
541134fede4SMartin KaFai Lau {
5424a8f87e6SDaniel Borkmann if (!bpf_map_meta_equal(meta0, meta1))
5434a8f87e6SDaniel Borkmann return false;
5444a8f87e6SDaniel Borkmann return meta0->map_flags & BPF_F_INNER_MAP ? true :
5454a8f87e6SDaniel Borkmann meta0->max_entries == meta1->max_entries;
546134fede4SMartin KaFai Lau }
547134fede4SMartin KaFai Lau
548d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info {
549d3cc2ab5SYonghong Song struct bpf_map *map;
550d3cc2ab5SYonghong Song void *percpu_value_buf;
551d3cc2ab5SYonghong Song u32 index;
552d3cc2ab5SYonghong Song };
553d3cc2ab5SYonghong Song
bpf_array_map_seq_start(struct seq_file * seq,loff_t * pos)554d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
555d3cc2ab5SYonghong Song {
556d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private;
557d3cc2ab5SYonghong Song struct bpf_map *map = info->map;
558d3cc2ab5SYonghong Song struct bpf_array *array;
559d3cc2ab5SYonghong Song u32 index;
560d3cc2ab5SYonghong Song
561d3cc2ab5SYonghong Song if (info->index >= map->max_entries)
562d3cc2ab5SYonghong Song return NULL;
563d3cc2ab5SYonghong Song
564d3cc2ab5SYonghong Song if (*pos == 0)
565d3cc2ab5SYonghong Song ++*pos;
566d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map);
567d3cc2ab5SYonghong Song index = info->index & array->index_mask;
568d3cc2ab5SYonghong Song if (info->percpu_value_buf)
569d3cc2ab5SYonghong Song return array->pptrs[index];
57087ac0d60SAndrii Nakryiko return array_map_elem_ptr(array, index);
571d3cc2ab5SYonghong Song }
572d3cc2ab5SYonghong Song
bpf_array_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)573d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
574d3cc2ab5SYonghong Song {
575d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private;
576d3cc2ab5SYonghong Song struct bpf_map *map = info->map;
577d3cc2ab5SYonghong Song struct bpf_array *array;
578d3cc2ab5SYonghong Song u32 index;
579d3cc2ab5SYonghong Song
580d3cc2ab5SYonghong Song ++*pos;
581d3cc2ab5SYonghong Song ++info->index;
582d3cc2ab5SYonghong Song if (info->index >= map->max_entries)
583d3cc2ab5SYonghong Song return NULL;
584d3cc2ab5SYonghong Song
585d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map);
586d3cc2ab5SYonghong Song index = info->index & array->index_mask;
587d3cc2ab5SYonghong Song if (info->percpu_value_buf)
588d3cc2ab5SYonghong Song return array->pptrs[index];
58987ac0d60SAndrii Nakryiko return array_map_elem_ptr(array, index);
590d3cc2ab5SYonghong Song }
591d3cc2ab5SYonghong Song
__bpf_array_map_seq_show(struct seq_file * seq,void * v)592d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
593d3cc2ab5SYonghong Song {
594d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private;
595d3cc2ab5SYonghong Song struct bpf_iter__bpf_map_elem ctx = {};
596d3cc2ab5SYonghong Song struct bpf_map *map = info->map;
597d937bc34SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map);
598d3cc2ab5SYonghong Song struct bpf_iter_meta meta;
599d3cc2ab5SYonghong Song struct bpf_prog *prog;
600d3cc2ab5SYonghong Song int off = 0, cpu = 0;
601d3cc2ab5SYonghong Song void __percpu **pptr;
602d3cc2ab5SYonghong Song u32 size;
603d3cc2ab5SYonghong Song
604d3cc2ab5SYonghong Song meta.seq = seq;
605d3cc2ab5SYonghong Song prog = bpf_iter_get_info(&meta, v == NULL);
606d3cc2ab5SYonghong Song if (!prog)
607d3cc2ab5SYonghong Song return 0;
608d3cc2ab5SYonghong Song
609d3cc2ab5SYonghong Song ctx.meta = &meta;
610d3cc2ab5SYonghong Song ctx.map = info->map;
611d3cc2ab5SYonghong Song if (v) {
612d3cc2ab5SYonghong Song ctx.key = &info->index;
613d3cc2ab5SYonghong Song
614d3cc2ab5SYonghong Song if (!info->percpu_value_buf) {
615d3cc2ab5SYonghong Song ctx.value = v;
616d3cc2ab5SYonghong Song } else {
617d3cc2ab5SYonghong Song pptr = v;
618d937bc34SAndrii Nakryiko size = array->elem_size;
619d3cc2ab5SYonghong Song for_each_possible_cpu(cpu) {
6206df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, info->percpu_value_buf + off,
6216df4ea1fSKumar Kartikeya Dwivedi per_cpu_ptr(pptr, cpu));
6226df4ea1fSKumar Kartikeya Dwivedi check_and_init_map_value(map, info->percpu_value_buf + off);
623d3cc2ab5SYonghong Song off += size;
624d3cc2ab5SYonghong Song }
625d3cc2ab5SYonghong Song ctx.value = info->percpu_value_buf;
626d3cc2ab5SYonghong Song }
627d3cc2ab5SYonghong Song }
628d3cc2ab5SYonghong Song
629d3cc2ab5SYonghong Song return bpf_iter_run_prog(prog, &ctx);
630d3cc2ab5SYonghong Song }
631d3cc2ab5SYonghong Song
bpf_array_map_seq_show(struct seq_file * seq,void * v)632d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
633d3cc2ab5SYonghong Song {
634d3cc2ab5SYonghong Song return __bpf_array_map_seq_show(seq, v);
635d3cc2ab5SYonghong Song }
636d3cc2ab5SYonghong Song
bpf_array_map_seq_stop(struct seq_file * seq,void * v)637d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
638d3cc2ab5SYonghong Song {
639d3cc2ab5SYonghong Song if (!v)
640d3cc2ab5SYonghong Song (void)__bpf_array_map_seq_show(seq, NULL);
641d3cc2ab5SYonghong Song }
642d3cc2ab5SYonghong Song
bpf_iter_init_array_map(void * priv_data,struct bpf_iter_aux_info * aux)643d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data,
644d3cc2ab5SYonghong Song struct bpf_iter_aux_info *aux)
645d3cc2ab5SYonghong Song {
646d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data;
647d3cc2ab5SYonghong Song struct bpf_map *map = aux->map;
648d937bc34SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map);
649d3cc2ab5SYonghong Song void *value_buf;
650d3cc2ab5SYonghong Song u32 buf_size;
651d3cc2ab5SYonghong Song
652d3cc2ab5SYonghong Song if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
653d937bc34SAndrii Nakryiko buf_size = array->elem_size * num_possible_cpus();
654d3cc2ab5SYonghong Song value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
655d3cc2ab5SYonghong Song if (!value_buf)
656d3cc2ab5SYonghong Song return -ENOMEM;
657d3cc2ab5SYonghong Song
658d3cc2ab5SYonghong Song seq_info->percpu_value_buf = value_buf;
659d3cc2ab5SYonghong Song }
660d3cc2ab5SYonghong Song
661f76fa6b3SHou Tao /* bpf_iter_attach_map() acquires a map uref, and the uref may be
662f76fa6b3SHou Tao * released before or in the middle of iterating map elements, so
663f76fa6b3SHou Tao * acquire an extra map uref for iterator.
664f76fa6b3SHou Tao */
665f76fa6b3SHou Tao bpf_map_inc_with_uref(map);
666d3cc2ab5SYonghong Song seq_info->map = map;
667d3cc2ab5SYonghong Song return 0;
668d3cc2ab5SYonghong Song }
669d3cc2ab5SYonghong Song
bpf_iter_fini_array_map(void * priv_data)670d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data)
671d3cc2ab5SYonghong Song {
672d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data;
673d3cc2ab5SYonghong Song
674f76fa6b3SHou Tao bpf_map_put_with_uref(seq_info->map);
675d3cc2ab5SYonghong Song kfree(seq_info->percpu_value_buf);
676d3cc2ab5SYonghong Song }
677d3cc2ab5SYonghong Song
678d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = {
679d3cc2ab5SYonghong Song .start = bpf_array_map_seq_start,
680d3cc2ab5SYonghong Song .next = bpf_array_map_seq_next,
681d3cc2ab5SYonghong Song .stop = bpf_array_map_seq_stop,
682d3cc2ab5SYonghong Song .show = bpf_array_map_seq_show,
683d3cc2ab5SYonghong Song };
684d3cc2ab5SYonghong Song
685d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = {
686d3cc2ab5SYonghong Song .seq_ops = &bpf_array_map_seq_ops,
687d3cc2ab5SYonghong Song .init_seq_private = bpf_iter_init_array_map,
688d3cc2ab5SYonghong Song .fini_seq_private = bpf_iter_fini_array_map,
689d3cc2ab5SYonghong Song .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
690d3cc2ab5SYonghong Song };
691d3cc2ab5SYonghong Song
bpf_for_each_array_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags)692d7ba4cc9SJP Kobryn static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
69306dcdcd4SYonghong Song void *callback_ctx, u64 flags)
69406dcdcd4SYonghong Song {
69506dcdcd4SYonghong Song u32 i, key, num_elems = 0;
69606dcdcd4SYonghong Song struct bpf_array *array;
69706dcdcd4SYonghong Song bool is_percpu;
69806dcdcd4SYonghong Song u64 ret = 0;
69906dcdcd4SYonghong Song void *val;
70006dcdcd4SYonghong Song
70106dcdcd4SYonghong Song if (flags != 0)
70206dcdcd4SYonghong Song return -EINVAL;
70306dcdcd4SYonghong Song
70406dcdcd4SYonghong Song is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
70506dcdcd4SYonghong Song array = container_of(map, struct bpf_array, map);
70606dcdcd4SYonghong Song if (is_percpu)
70706dcdcd4SYonghong Song migrate_disable();
70806dcdcd4SYonghong Song for (i = 0; i < map->max_entries; i++) {
70906dcdcd4SYonghong Song if (is_percpu)
71006dcdcd4SYonghong Song val = this_cpu_ptr(array->pptrs[i]);
71106dcdcd4SYonghong Song else
71287ac0d60SAndrii Nakryiko val = array_map_elem_ptr(array, i);
71306dcdcd4SYonghong Song num_elems++;
71406dcdcd4SYonghong Song key = i;
715102acbacSKees Cook ret = callback_fn((u64)(long)map, (u64)(long)&key,
716102acbacSKees Cook (u64)(long)val, (u64)(long)callback_ctx, 0);
71706dcdcd4SYonghong Song /* return value: 0 - continue, 1 - stop and return */
71806dcdcd4SYonghong Song if (ret)
71906dcdcd4SYonghong Song break;
72006dcdcd4SYonghong Song }
72106dcdcd4SYonghong Song
72206dcdcd4SYonghong Song if (is_percpu)
72306dcdcd4SYonghong Song migrate_enable();
72406dcdcd4SYonghong Song return num_elems;
72506dcdcd4SYonghong Song }
72606dcdcd4SYonghong Song
array_map_mem_usage(const struct bpf_map * map)7271746d055SYafang Shao static u64 array_map_mem_usage(const struct bpf_map *map)
7281746d055SYafang Shao {
7291746d055SYafang Shao struct bpf_array *array = container_of(map, struct bpf_array, map);
7301746d055SYafang Shao bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
7311746d055SYafang Shao u32 elem_size = array->elem_size;
7321746d055SYafang Shao u64 entries = map->max_entries;
7331746d055SYafang Shao u64 usage = sizeof(*array);
7341746d055SYafang Shao
7351746d055SYafang Shao if (percpu) {
7361746d055SYafang Shao usage += entries * sizeof(void *);
7371746d055SYafang Shao usage += entries * elem_size * num_possible_cpus();
7381746d055SYafang Shao } else {
7391746d055SYafang Shao if (map->map_flags & BPF_F_MMAPABLE) {
7401746d055SYafang Shao usage = PAGE_ALIGN(usage);
7411746d055SYafang Shao usage += PAGE_ALIGN(entries * elem_size);
7421746d055SYafang Shao } else {
7431746d055SYafang Shao usage += entries * elem_size;
7441746d055SYafang Shao }
7451746d055SYafang Shao }
7461746d055SYafang Shao return usage;
7471746d055SYafang Shao }
7481746d055SYafang Shao
749c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
75040077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = {
751134fede4SMartin KaFai Lau .map_meta_equal = array_map_meta_equal,
752ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check,
75328fbcfa0SAlexei Starovoitov .map_alloc = array_map_alloc,
75428fbcfa0SAlexei Starovoitov .map_free = array_map_free,
75528fbcfa0SAlexei Starovoitov .map_get_next_key = array_map_get_next_key,
75668134668SAlexei Starovoitov .map_release_uref = array_map_free_timers,
75728fbcfa0SAlexei Starovoitov .map_lookup_elem = array_map_lookup_elem,
75828fbcfa0SAlexei Starovoitov .map_update_elem = array_map_update_elem,
75928fbcfa0SAlexei Starovoitov .map_delete_elem = array_map_delete_elem,
76081ed18abSAlexei Starovoitov .map_gen_lookup = array_map_gen_lookup,
761d8eca5bbSDaniel Borkmann .map_direct_value_addr = array_map_direct_value_addr,
762d8eca5bbSDaniel Borkmann .map_direct_value_meta = array_map_direct_value_meta,
763fc970227SAndrii Nakryiko .map_mmap = array_map_mmap,
764a26ca7c9SMartin KaFai Lau .map_seq_show_elem = array_map_seq_show_elem,
765a26ca7c9SMartin KaFai Lau .map_check_btf = array_map_check_btf,
766c60f2d28SBrian Vazquez .map_lookup_batch = generic_map_lookup_batch,
767c60f2d28SBrian Vazquez .map_update_batch = generic_map_update_batch,
76806dcdcd4SYonghong Song .map_set_for_each_callback_args = map_set_for_each_callback_args,
76906dcdcd4SYonghong Song .map_for_each_callback = bpf_for_each_array_elem,
7701746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
771c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
772d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info,
77328fbcfa0SAlexei Starovoitov };
77428fbcfa0SAlexei Starovoitov
77540077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = {
776f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
777ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check,
778a10423b8SAlexei Starovoitov .map_alloc = array_map_alloc,
779a10423b8SAlexei Starovoitov .map_free = array_map_free,
780a10423b8SAlexei Starovoitov .map_get_next_key = array_map_get_next_key,
781a10423b8SAlexei Starovoitov .map_lookup_elem = percpu_array_map_lookup_elem,
782a10423b8SAlexei Starovoitov .map_update_elem = array_map_update_elem,
783a10423b8SAlexei Starovoitov .map_delete_elem = array_map_delete_elem,
78407343110SFeng Zhou .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
785c7b27c37SYonghong Song .map_seq_show_elem = percpu_array_map_seq_show_elem,
786e8d2bec0SDaniel Borkmann .map_check_btf = array_map_check_btf,
787f008d732SPedro Tammela .map_lookup_batch = generic_map_lookup_batch,
788f008d732SPedro Tammela .map_update_batch = generic_map_update_batch,
78906dcdcd4SYonghong Song .map_set_for_each_callback_args = map_set_for_each_callback_args,
79006dcdcd4SYonghong Song .map_for_each_callback = bpf_for_each_array_elem,
7911746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
792c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
793d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info,
794a10423b8SAlexei Starovoitov };
795a10423b8SAlexei Starovoitov
fd_array_map_alloc_check(union bpf_attr * attr)796ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr)
79704fd61abSAlexei Starovoitov {
7982a36f0b9SWang Nan /* only file descriptors can be stored in this type of map */
79904fd61abSAlexei Starovoitov if (attr->value_size != sizeof(u32))
800ad46061fSJakub Kicinski return -EINVAL;
801591fe988SDaniel Borkmann /* Program read-only/write-only not supported for special maps yet. */
802591fe988SDaniel Borkmann if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
803591fe988SDaniel Borkmann return -EINVAL;
804ad46061fSJakub Kicinski return array_map_alloc_check(attr);
80504fd61abSAlexei Starovoitov }
80604fd61abSAlexei Starovoitov
fd_array_map_free(struct bpf_map * map)8072a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map)
80804fd61abSAlexei Starovoitov {
80904fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
81004fd61abSAlexei Starovoitov int i;
81104fd61abSAlexei Starovoitov
81204fd61abSAlexei Starovoitov /* make sure it's empty */
81304fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++)
8142a36f0b9SWang Nan BUG_ON(array->ptrs[i] != NULL);
815d407bd25SDaniel Borkmann
816d407bd25SDaniel Borkmann bpf_map_area_free(array);
81704fd61abSAlexei Starovoitov }
81804fd61abSAlexei Starovoitov
fd_array_map_lookup_elem(struct bpf_map * map,void * key)8192a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
82004fd61abSAlexei Starovoitov {
8213b4a63f6SPrashant Bhole return ERR_PTR(-EOPNOTSUPP);
82204fd61abSAlexei Starovoitov }
82304fd61abSAlexei Starovoitov
82404fd61abSAlexei Starovoitov /* only called from syscall */
bpf_fd_array_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)82514dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
82614dc6f04SMartin KaFai Lau {
82714dc6f04SMartin KaFai Lau void **elem, *ptr;
82814dc6f04SMartin KaFai Lau int ret = 0;
82914dc6f04SMartin KaFai Lau
83014dc6f04SMartin KaFai Lau if (!map->ops->map_fd_sys_lookup_elem)
83114dc6f04SMartin KaFai Lau return -ENOTSUPP;
83214dc6f04SMartin KaFai Lau
83314dc6f04SMartin KaFai Lau rcu_read_lock();
83414dc6f04SMartin KaFai Lau elem = array_map_lookup_elem(map, key);
83514dc6f04SMartin KaFai Lau if (elem && (ptr = READ_ONCE(*elem)))
83614dc6f04SMartin KaFai Lau *value = map->ops->map_fd_sys_lookup_elem(ptr);
83714dc6f04SMartin KaFai Lau else
83814dc6f04SMartin KaFai Lau ret = -ENOENT;
83914dc6f04SMartin KaFai Lau rcu_read_unlock();
84014dc6f04SMartin KaFai Lau
84114dc6f04SMartin KaFai Lau return ret;
84214dc6f04SMartin KaFai Lau }
84314dc6f04SMartin KaFai Lau
84414dc6f04SMartin KaFai Lau /* only called from syscall */
bpf_fd_array_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)845d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
846d056a788SDaniel Borkmann void *key, void *value, u64 map_flags)
84704fd61abSAlexei Starovoitov {
84804fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
8492a36f0b9SWang Nan void *new_ptr, *old_ptr;
85004fd61abSAlexei Starovoitov u32 index = *(u32 *)key, ufd;
85104fd61abSAlexei Starovoitov
85204fd61abSAlexei Starovoitov if (map_flags != BPF_ANY)
85304fd61abSAlexei Starovoitov return -EINVAL;
85404fd61abSAlexei Starovoitov
85504fd61abSAlexei Starovoitov if (index >= array->map.max_entries)
85604fd61abSAlexei Starovoitov return -E2BIG;
85704fd61abSAlexei Starovoitov
85804fd61abSAlexei Starovoitov ufd = *(u32 *)value;
859d056a788SDaniel Borkmann new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
8602a36f0b9SWang Nan if (IS_ERR(new_ptr))
8612a36f0b9SWang Nan return PTR_ERR(new_ptr);
86204fd61abSAlexei Starovoitov
863da765a2fSDaniel Borkmann if (map->ops->map_poke_run) {
864da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex);
8652a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, new_ptr);
866da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, new_ptr);
867da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex);
868da765a2fSDaniel Borkmann } else {
869da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, new_ptr);
870da765a2fSDaniel Borkmann }
871da765a2fSDaniel Borkmann
8722a36f0b9SWang Nan if (old_ptr)
8731c40ec6bSHou Tao map->ops->map_fd_put_ptr(map, old_ptr, true);
87404fd61abSAlexei Starovoitov return 0;
87504fd61abSAlexei Starovoitov }
87604fd61abSAlexei Starovoitov
__fd_array_map_delete_elem(struct bpf_map * map,void * key,bool need_defer)877a9bf3a49SHou Tao static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
87804fd61abSAlexei Starovoitov {
87904fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
8802a36f0b9SWang Nan void *old_ptr;
88104fd61abSAlexei Starovoitov u32 index = *(u32 *)key;
88204fd61abSAlexei Starovoitov
88304fd61abSAlexei Starovoitov if (index >= array->map.max_entries)
88404fd61abSAlexei Starovoitov return -E2BIG;
88504fd61abSAlexei Starovoitov
886da765a2fSDaniel Borkmann if (map->ops->map_poke_run) {
887da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex);
8882a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, NULL);
889da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, NULL);
890da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex);
891da765a2fSDaniel Borkmann } else {
892da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, NULL);
893da765a2fSDaniel Borkmann }
894da765a2fSDaniel Borkmann
8952a36f0b9SWang Nan if (old_ptr) {
896a9bf3a49SHou Tao map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
89704fd61abSAlexei Starovoitov return 0;
89804fd61abSAlexei Starovoitov } else {
89904fd61abSAlexei Starovoitov return -ENOENT;
90004fd61abSAlexei Starovoitov }
90104fd61abSAlexei Starovoitov }
90204fd61abSAlexei Starovoitov
fd_array_map_delete_elem(struct bpf_map * map,void * key)903a9bf3a49SHou Tao static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
904a9bf3a49SHou Tao {
905a9bf3a49SHou Tao return __fd_array_map_delete_elem(map, key, true);
906a9bf3a49SHou Tao }
907a9bf3a49SHou Tao
prog_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)908d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map,
909d056a788SDaniel Borkmann struct file *map_file, int fd)
9102a36f0b9SWang Nan {
9112a36f0b9SWang Nan struct bpf_prog *prog = bpf_prog_get(fd);
912d056a788SDaniel Borkmann
9132a36f0b9SWang Nan if (IS_ERR(prog))
9142a36f0b9SWang Nan return prog;
9152a36f0b9SWang Nan
916f45d5b6cSToke Hoiland-Jorgensen if (!bpf_prog_map_compatible(map, prog)) {
9172a36f0b9SWang Nan bpf_prog_put(prog);
9182a36f0b9SWang Nan return ERR_PTR(-EINVAL);
9192a36f0b9SWang Nan }
920d056a788SDaniel Borkmann
9212a36f0b9SWang Nan return prog;
9222a36f0b9SWang Nan }
9232a36f0b9SWang Nan
prog_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)9241c40ec6bSHou Tao static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
9252a36f0b9SWang Nan {
9261c40ec6bSHou Tao /* bpf_prog is freed after one RCU or tasks trace grace period */
9271aacde3dSDaniel Borkmann bpf_prog_put(ptr);
9282a36f0b9SWang Nan }
9292a36f0b9SWang Nan
prog_fd_array_sys_lookup_elem(void * ptr)93014dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr)
93114dc6f04SMartin KaFai Lau {
93214dc6f04SMartin KaFai Lau return ((struct bpf_prog *)ptr)->aux->id;
93314dc6f04SMartin KaFai Lau }
93414dc6f04SMartin KaFai Lau
93504fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */
bpf_fd_array_map_clear(struct bpf_map * map,bool need_defer)936a9bf3a49SHou Tao static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
93704fd61abSAlexei Starovoitov {
93804fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
93904fd61abSAlexei Starovoitov int i;
94004fd61abSAlexei Starovoitov
94104fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++)
942a9bf3a49SHou Tao __fd_array_map_delete_elem(map, &i, need_defer);
94304fd61abSAlexei Starovoitov }
94404fd61abSAlexei Starovoitov
prog_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)945a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
946a7c19db3SYonghong Song struct seq_file *m)
947a7c19db3SYonghong Song {
948a7c19db3SYonghong Song void **elem, *ptr;
949a7c19db3SYonghong Song u32 prog_id;
950a7c19db3SYonghong Song
951a7c19db3SYonghong Song rcu_read_lock();
952a7c19db3SYonghong Song
953a7c19db3SYonghong Song elem = array_map_lookup_elem(map, key);
954a7c19db3SYonghong Song if (elem) {
955a7c19db3SYonghong Song ptr = READ_ONCE(*elem);
956a7c19db3SYonghong Song if (ptr) {
957a7c19db3SYonghong Song seq_printf(m, "%u: ", *(u32 *)key);
958a7c19db3SYonghong Song prog_id = prog_fd_array_sys_lookup_elem(ptr);
959a7c19db3SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id,
960a7c19db3SYonghong Song &prog_id, m);
961a7c19db3SYonghong Song seq_puts(m, "\n");
962a7c19db3SYonghong Song }
963a7c19db3SYonghong Song }
964a7c19db3SYonghong Song
965a7c19db3SYonghong Song rcu_read_unlock();
966a7c19db3SYonghong Song }
967a7c19db3SYonghong Song
968da765a2fSDaniel Borkmann struct prog_poke_elem {
969da765a2fSDaniel Borkmann struct list_head list;
970da765a2fSDaniel Borkmann struct bpf_prog_aux *aux;
971da765a2fSDaniel Borkmann };
972da765a2fSDaniel Borkmann
prog_array_map_poke_track(struct bpf_map * map,struct bpf_prog_aux * prog_aux)973da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map,
974da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux)
975da765a2fSDaniel Borkmann {
976da765a2fSDaniel Borkmann struct prog_poke_elem *elem;
977da765a2fSDaniel Borkmann struct bpf_array_aux *aux;
978da765a2fSDaniel Borkmann int ret = 0;
979da765a2fSDaniel Borkmann
980da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux;
981da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex);
982da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) {
983da765a2fSDaniel Borkmann if (elem->aux == prog_aux)
984da765a2fSDaniel Borkmann goto out;
985da765a2fSDaniel Borkmann }
986da765a2fSDaniel Borkmann
987da765a2fSDaniel Borkmann elem = kmalloc(sizeof(*elem), GFP_KERNEL);
988da765a2fSDaniel Borkmann if (!elem) {
989da765a2fSDaniel Borkmann ret = -ENOMEM;
990da765a2fSDaniel Borkmann goto out;
991da765a2fSDaniel Borkmann }
992da765a2fSDaniel Borkmann
993da765a2fSDaniel Borkmann INIT_LIST_HEAD(&elem->list);
994da765a2fSDaniel Borkmann /* We must track the program's aux info at this point in time
995da765a2fSDaniel Borkmann * since the program pointer itself may not be stable yet, see
996da765a2fSDaniel Borkmann * also comment in prog_array_map_poke_run().
997da765a2fSDaniel Borkmann */
998da765a2fSDaniel Borkmann elem->aux = prog_aux;
999da765a2fSDaniel Borkmann
1000da765a2fSDaniel Borkmann list_add_tail(&elem->list, &aux->poke_progs);
1001da765a2fSDaniel Borkmann out:
1002da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex);
1003da765a2fSDaniel Borkmann return ret;
1004da765a2fSDaniel Borkmann }
1005da765a2fSDaniel Borkmann
prog_array_map_poke_untrack(struct bpf_map * map,struct bpf_prog_aux * prog_aux)1006da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map,
1007da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux)
1008da765a2fSDaniel Borkmann {
1009da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp;
1010da765a2fSDaniel Borkmann struct bpf_array_aux *aux;
1011da765a2fSDaniel Borkmann
1012da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux;
1013da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex);
1014da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1015da765a2fSDaniel Borkmann if (elem->aux == prog_aux) {
1016da765a2fSDaniel Borkmann list_del_init(&elem->list);
1017da765a2fSDaniel Borkmann kfree(elem);
1018da765a2fSDaniel Borkmann break;
1019da765a2fSDaniel Borkmann }
1020da765a2fSDaniel Borkmann }
1021da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex);
1022da765a2fSDaniel Borkmann }
1023da765a2fSDaniel Borkmann
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)1024f64b2dc8SJiri Olsa void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
1025f64b2dc8SJiri Olsa struct bpf_prog *new, struct bpf_prog *old)
1026f64b2dc8SJiri Olsa {
1027f64b2dc8SJiri Olsa WARN_ON_ONCE(1);
1028f64b2dc8SJiri Olsa }
1029f64b2dc8SJiri Olsa
prog_array_map_poke_run(struct bpf_map * map,u32 key,struct bpf_prog * old,struct bpf_prog * new)1030da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
1031da765a2fSDaniel Borkmann struct bpf_prog *old,
1032da765a2fSDaniel Borkmann struct bpf_prog *new)
1033da765a2fSDaniel Borkmann {
1034da765a2fSDaniel Borkmann struct prog_poke_elem *elem;
1035da765a2fSDaniel Borkmann struct bpf_array_aux *aux;
1036da765a2fSDaniel Borkmann
1037da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux;
1038da765a2fSDaniel Borkmann WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1039da765a2fSDaniel Borkmann
1040da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) {
1041da765a2fSDaniel Borkmann struct bpf_jit_poke_descriptor *poke;
1042f64b2dc8SJiri Olsa int i;
1043da765a2fSDaniel Borkmann
1044da765a2fSDaniel Borkmann for (i = 0; i < elem->aux->size_poke_tab; i++) {
1045da765a2fSDaniel Borkmann poke = &elem->aux->poke_tab[i];
1046da765a2fSDaniel Borkmann
1047da765a2fSDaniel Borkmann /* Few things to be aware of:
1048da765a2fSDaniel Borkmann *
1049da765a2fSDaniel Borkmann * 1) We can only ever access aux in this context, but
1050da765a2fSDaniel Borkmann * not aux->prog since it might not be stable yet and
1051da765a2fSDaniel Borkmann * there could be danger of use after free otherwise.
1052da765a2fSDaniel Borkmann * 2) Initially when we start tracking aux, the program
1053da765a2fSDaniel Borkmann * is not JITed yet and also does not have a kallsyms
1054cf71b174SMaciej Fijalkowski * entry. We skip these as poke->tailcall_target_stable
1055cf71b174SMaciej Fijalkowski * is not active yet. The JIT will do the final fixup
1056cf71b174SMaciej Fijalkowski * before setting it stable. The various
1057cf71b174SMaciej Fijalkowski * poke->tailcall_target_stable are successively
1058cf71b174SMaciej Fijalkowski * activated, so tail call updates can arrive from here
1059cf71b174SMaciej Fijalkowski * while JIT is still finishing its final fixup for
1060cf71b174SMaciej Fijalkowski * non-activated poke entries.
1061f64b2dc8SJiri Olsa * 3) Also programs reaching refcount of zero while patching
1062da765a2fSDaniel Borkmann * is in progress is okay since we're protected under
1063da765a2fSDaniel Borkmann * poke_mutex and untrack the programs before the JIT
1064f64b2dc8SJiri Olsa * buffer is freed.
1065da765a2fSDaniel Borkmann */
1066cf71b174SMaciej Fijalkowski if (!READ_ONCE(poke->tailcall_target_stable))
1067da765a2fSDaniel Borkmann continue;
1068da765a2fSDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1069da765a2fSDaniel Borkmann continue;
1070da765a2fSDaniel Borkmann if (poke->tail_call.map != map ||
1071da765a2fSDaniel Borkmann poke->tail_call.key != key)
1072da765a2fSDaniel Borkmann continue;
1073da765a2fSDaniel Borkmann
1074f64b2dc8SJiri Olsa bpf_arch_poke_desc_update(poke, new, old);
1075da765a2fSDaniel Borkmann }
1076da765a2fSDaniel Borkmann }
1077da765a2fSDaniel Borkmann }
1078da765a2fSDaniel Borkmann
prog_array_map_clear_deferred(struct work_struct * work)1079da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work)
1080da765a2fSDaniel Borkmann {
1081da765a2fSDaniel Borkmann struct bpf_map *map = container_of(work, struct bpf_array_aux,
1082da765a2fSDaniel Borkmann work)->map;
1083a9bf3a49SHou Tao bpf_fd_array_map_clear(map, true);
1084da765a2fSDaniel Borkmann bpf_map_put(map);
1085da765a2fSDaniel Borkmann }
1086da765a2fSDaniel Borkmann
prog_array_map_clear(struct bpf_map * map)1087da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map)
1088da765a2fSDaniel Borkmann {
1089da765a2fSDaniel Borkmann struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1090da765a2fSDaniel Borkmann map)->aux;
1091da765a2fSDaniel Borkmann bpf_map_inc(map);
1092da765a2fSDaniel Borkmann schedule_work(&aux->work);
1093da765a2fSDaniel Borkmann }
1094da765a2fSDaniel Borkmann
prog_array_map_alloc(union bpf_attr * attr)10952beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
10962beee5f5SDaniel Borkmann {
10972beee5f5SDaniel Borkmann struct bpf_array_aux *aux;
10982beee5f5SDaniel Borkmann struct bpf_map *map;
10992beee5f5SDaniel Borkmann
11006d192c79SRoman Gushchin aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
11012beee5f5SDaniel Borkmann if (!aux)
11022beee5f5SDaniel Borkmann return ERR_PTR(-ENOMEM);
11032beee5f5SDaniel Borkmann
1104da765a2fSDaniel Borkmann INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1105da765a2fSDaniel Borkmann INIT_LIST_HEAD(&aux->poke_progs);
1106da765a2fSDaniel Borkmann mutex_init(&aux->poke_mutex);
1107da765a2fSDaniel Borkmann
11082beee5f5SDaniel Borkmann map = array_map_alloc(attr);
11092beee5f5SDaniel Borkmann if (IS_ERR(map)) {
11102beee5f5SDaniel Borkmann kfree(aux);
11112beee5f5SDaniel Borkmann return map;
11122beee5f5SDaniel Borkmann }
11132beee5f5SDaniel Borkmann
11142beee5f5SDaniel Borkmann container_of(map, struct bpf_array, map)->aux = aux;
1115da765a2fSDaniel Borkmann aux->map = map;
1116da765a2fSDaniel Borkmann
11172beee5f5SDaniel Borkmann return map;
11182beee5f5SDaniel Borkmann }
11192beee5f5SDaniel Borkmann
prog_array_map_free(struct bpf_map * map)11202beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map)
11212beee5f5SDaniel Borkmann {
1122da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp;
11232beee5f5SDaniel Borkmann struct bpf_array_aux *aux;
11242beee5f5SDaniel Borkmann
11252beee5f5SDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux;
1126da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1127da765a2fSDaniel Borkmann list_del_init(&elem->list);
1128da765a2fSDaniel Borkmann kfree(elem);
1129da765a2fSDaniel Borkmann }
11302beee5f5SDaniel Borkmann kfree(aux);
11312beee5f5SDaniel Borkmann fd_array_map_free(map);
11322beee5f5SDaniel Borkmann }
11332beee5f5SDaniel Borkmann
1134f4d05259SMartin KaFai Lau /* prog_array->aux->{type,jited} is a runtime binding.
1135f4d05259SMartin KaFai Lau * Doing static check alone in the verifier is not enough.
1136f4d05259SMartin KaFai Lau * Thus, prog_array_map cannot be used as an inner_map
1137f4d05259SMartin KaFai Lau * and map_meta_equal is not implemented.
1138f4d05259SMartin KaFai Lau */
113940077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = {
1140ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check,
11412beee5f5SDaniel Borkmann .map_alloc = prog_array_map_alloc,
11422beee5f5SDaniel Borkmann .map_free = prog_array_map_free,
1143da765a2fSDaniel Borkmann .map_poke_track = prog_array_map_poke_track,
1144da765a2fSDaniel Borkmann .map_poke_untrack = prog_array_map_poke_untrack,
1145da765a2fSDaniel Borkmann .map_poke_run = prog_array_map_poke_run,
114604fd61abSAlexei Starovoitov .map_get_next_key = array_map_get_next_key,
11472a36f0b9SWang Nan .map_lookup_elem = fd_array_map_lookup_elem,
11482a36f0b9SWang Nan .map_delete_elem = fd_array_map_delete_elem,
11492a36f0b9SWang Nan .map_fd_get_ptr = prog_fd_array_get_ptr,
11502a36f0b9SWang Nan .map_fd_put_ptr = prog_fd_array_put_ptr,
115114dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1152da765a2fSDaniel Borkmann .map_release_uref = prog_array_map_clear,
1153a7c19db3SYonghong Song .map_seq_show_elem = prog_array_map_seq_show_elem,
11541746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
1155c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
115604fd61abSAlexei Starovoitov };
115704fd61abSAlexei Starovoitov
bpf_event_entry_gen(struct file * perf_file,struct file * map_file)11583b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
11593b1efb19SDaniel Borkmann struct file *map_file)
1160ea317b26SKaixu Xia {
11613b1efb19SDaniel Borkmann struct bpf_event_entry *ee;
11623b1efb19SDaniel Borkmann
1163858d68f1SDaniel Borkmann ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
11643b1efb19SDaniel Borkmann if (ee) {
11653b1efb19SDaniel Borkmann ee->event = perf_file->private_data;
11663b1efb19SDaniel Borkmann ee->perf_file = perf_file;
11673b1efb19SDaniel Borkmann ee->map_file = map_file;
11683b1efb19SDaniel Borkmann }
11693b1efb19SDaniel Borkmann
11703b1efb19SDaniel Borkmann return ee;
11713b1efb19SDaniel Borkmann }
11723b1efb19SDaniel Borkmann
__bpf_event_entry_free(struct rcu_head * rcu)11733b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu)
11743b1efb19SDaniel Borkmann {
11753b1efb19SDaniel Borkmann struct bpf_event_entry *ee;
11763b1efb19SDaniel Borkmann
11773b1efb19SDaniel Borkmann ee = container_of(rcu, struct bpf_event_entry, rcu);
11783b1efb19SDaniel Borkmann fput(ee->perf_file);
11793b1efb19SDaniel Borkmann kfree(ee);
11803b1efb19SDaniel Borkmann }
11813b1efb19SDaniel Borkmann
bpf_event_entry_free_rcu(struct bpf_event_entry * ee)11823b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
11833b1efb19SDaniel Borkmann {
11843b1efb19SDaniel Borkmann call_rcu(&ee->rcu, __bpf_event_entry_free);
1185ea317b26SKaixu Xia }
1186ea317b26SKaixu Xia
perf_event_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1187d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1188d056a788SDaniel Borkmann struct file *map_file, int fd)
1189ea317b26SKaixu Xia {
11903b1efb19SDaniel Borkmann struct bpf_event_entry *ee;
11913b1efb19SDaniel Borkmann struct perf_event *event;
11923b1efb19SDaniel Borkmann struct file *perf_file;
1193f91840a3SAlexei Starovoitov u64 value;
1194ea317b26SKaixu Xia
11953b1efb19SDaniel Borkmann perf_file = perf_event_get(fd);
11963b1efb19SDaniel Borkmann if (IS_ERR(perf_file))
11973b1efb19SDaniel Borkmann return perf_file;
1198e03e7ee3SAlexei Starovoitov
1199f91840a3SAlexei Starovoitov ee = ERR_PTR(-EOPNOTSUPP);
12003b1efb19SDaniel Borkmann event = perf_file->private_data;
120197562633SYonghong Song if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
12023b1efb19SDaniel Borkmann goto err_out;
1203ea317b26SKaixu Xia
12043b1efb19SDaniel Borkmann ee = bpf_event_entry_gen(perf_file, map_file);
12053b1efb19SDaniel Borkmann if (ee)
12063b1efb19SDaniel Borkmann return ee;
12073b1efb19SDaniel Borkmann ee = ERR_PTR(-ENOMEM);
12083b1efb19SDaniel Borkmann err_out:
12093b1efb19SDaniel Borkmann fput(perf_file);
12103b1efb19SDaniel Borkmann return ee;
1211ea317b26SKaixu Xia }
1212ea317b26SKaixu Xia
perf_event_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)12131c40ec6bSHou Tao static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1214ea317b26SKaixu Xia {
12151c40ec6bSHou Tao /* bpf_perf_event is freed after one RCU grace period */
12163b1efb19SDaniel Borkmann bpf_event_entry_free_rcu(ptr);
12173b1efb19SDaniel Borkmann }
12183b1efb19SDaniel Borkmann
perf_event_fd_array_release(struct bpf_map * map,struct file * map_file)12193b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map,
12203b1efb19SDaniel Borkmann struct file *map_file)
12213b1efb19SDaniel Borkmann {
12223b1efb19SDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map);
12233b1efb19SDaniel Borkmann struct bpf_event_entry *ee;
12243b1efb19SDaniel Borkmann int i;
12253b1efb19SDaniel Borkmann
1226792cacccSSong Liu if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1227792cacccSSong Liu return;
1228792cacccSSong Liu
12293b1efb19SDaniel Borkmann rcu_read_lock();
12303b1efb19SDaniel Borkmann for (i = 0; i < array->map.max_entries; i++) {
12313b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[i]);
12323b1efb19SDaniel Borkmann if (ee && ee->map_file == map_file)
1233a9bf3a49SHou Tao __fd_array_map_delete_elem(map, &i, true);
12343b1efb19SDaniel Borkmann }
12353b1efb19SDaniel Borkmann rcu_read_unlock();
1236ea317b26SKaixu Xia }
1237ea317b26SKaixu Xia
perf_event_fd_array_map_free(struct bpf_map * map)1238792cacccSSong Liu static void perf_event_fd_array_map_free(struct bpf_map *map)
1239792cacccSSong Liu {
1240792cacccSSong Liu if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1241a9bf3a49SHou Tao bpf_fd_array_map_clear(map, false);
1242792cacccSSong Liu fd_array_map_free(map);
1243792cacccSSong Liu }
1244792cacccSSong Liu
124540077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = {
1246f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
1247ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check,
1248ad46061fSJakub Kicinski .map_alloc = array_map_alloc,
1249792cacccSSong Liu .map_free = perf_event_fd_array_map_free,
1250ea317b26SKaixu Xia .map_get_next_key = array_map_get_next_key,
1251ea317b26SKaixu Xia .map_lookup_elem = fd_array_map_lookup_elem,
1252ea317b26SKaixu Xia .map_delete_elem = fd_array_map_delete_elem,
1253ea317b26SKaixu Xia .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1254ea317b26SKaixu Xia .map_fd_put_ptr = perf_event_fd_array_put_ptr,
12553b1efb19SDaniel Borkmann .map_release = perf_event_fd_array_release,
1256e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf,
12571746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
1258c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
1259ea317b26SKaixu Xia };
1260ea317b26SKaixu Xia
126160d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS
cgroup_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)12624ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
12634ed8ec52SMartin KaFai Lau struct file *map_file /* not used */,
12644ed8ec52SMartin KaFai Lau int fd)
12654ed8ec52SMartin KaFai Lau {
12664ed8ec52SMartin KaFai Lau return cgroup_get_from_fd(fd);
12674ed8ec52SMartin KaFai Lau }
12684ed8ec52SMartin KaFai Lau
cgroup_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)12691c40ec6bSHou Tao static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
12704ed8ec52SMartin KaFai Lau {
12714ed8ec52SMartin KaFai Lau /* cgroup_put free cgrp after a rcu grace period */
12724ed8ec52SMartin KaFai Lau cgroup_put(ptr);
12734ed8ec52SMartin KaFai Lau }
12744ed8ec52SMartin KaFai Lau
cgroup_fd_array_free(struct bpf_map * map)12754ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map)
12764ed8ec52SMartin KaFai Lau {
1277a9bf3a49SHou Tao bpf_fd_array_map_clear(map, false);
12784ed8ec52SMartin KaFai Lau fd_array_map_free(map);
12794ed8ec52SMartin KaFai Lau }
12804ed8ec52SMartin KaFai Lau
128140077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = {
1282f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
1283ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check,
1284ad46061fSJakub Kicinski .map_alloc = array_map_alloc,
12854ed8ec52SMartin KaFai Lau .map_free = cgroup_fd_array_free,
12864ed8ec52SMartin KaFai Lau .map_get_next_key = array_map_get_next_key,
12874ed8ec52SMartin KaFai Lau .map_lookup_elem = fd_array_map_lookup_elem,
12884ed8ec52SMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem,
12894ed8ec52SMartin KaFai Lau .map_fd_get_ptr = cgroup_fd_array_get_ptr,
12904ed8ec52SMartin KaFai Lau .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1291e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf,
12921746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
1293c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
12944ed8ec52SMartin KaFai Lau };
12954ed8ec52SMartin KaFai Lau #endif
129656f668dfSMartin KaFai Lau
array_of_map_alloc(union bpf_attr * attr)129756f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
129856f668dfSMartin KaFai Lau {
129956f668dfSMartin KaFai Lau struct bpf_map *map, *inner_map_meta;
130056f668dfSMartin KaFai Lau
130156f668dfSMartin KaFai Lau inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
130256f668dfSMartin KaFai Lau if (IS_ERR(inner_map_meta))
130356f668dfSMartin KaFai Lau return inner_map_meta;
130456f668dfSMartin KaFai Lau
1305ad46061fSJakub Kicinski map = array_map_alloc(attr);
130656f668dfSMartin KaFai Lau if (IS_ERR(map)) {
130756f668dfSMartin KaFai Lau bpf_map_meta_free(inner_map_meta);
130856f668dfSMartin KaFai Lau return map;
130956f668dfSMartin KaFai Lau }
131056f668dfSMartin KaFai Lau
131156f668dfSMartin KaFai Lau map->inner_map_meta = inner_map_meta;
131256f668dfSMartin KaFai Lau
131356f668dfSMartin KaFai Lau return map;
131456f668dfSMartin KaFai Lau }
131556f668dfSMartin KaFai Lau
array_of_map_free(struct bpf_map * map)131656f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map)
131756f668dfSMartin KaFai Lau {
131856f668dfSMartin KaFai Lau /* map->inner_map_meta is only accessed by syscall which
131956f668dfSMartin KaFai Lau * is protected by fdget/fdput.
132056f668dfSMartin KaFai Lau */
132156f668dfSMartin KaFai Lau bpf_map_meta_free(map->inner_map_meta);
1322a9bf3a49SHou Tao bpf_fd_array_map_clear(map, false);
132356f668dfSMartin KaFai Lau fd_array_map_free(map);
132456f668dfSMartin KaFai Lau }
132556f668dfSMartin KaFai Lau
array_of_map_lookup_elem(struct bpf_map * map,void * key)132656f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
132756f668dfSMartin KaFai Lau {
132856f668dfSMartin KaFai Lau struct bpf_map **inner_map = array_map_lookup_elem(map, key);
132956f668dfSMartin KaFai Lau
133056f668dfSMartin KaFai Lau if (!inner_map)
133156f668dfSMartin KaFai Lau return NULL;
133256f668dfSMartin KaFai Lau
133356f668dfSMartin KaFai Lau return READ_ONCE(*inner_map);
133456f668dfSMartin KaFai Lau }
133556f668dfSMartin KaFai Lau
array_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)13364a8f87e6SDaniel Borkmann static int array_of_map_gen_lookup(struct bpf_map *map,
13377b0c2a05SDaniel Borkmann struct bpf_insn *insn_buf)
13387b0c2a05SDaniel Borkmann {
1339b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
1340d937bc34SAndrii Nakryiko u32 elem_size = array->elem_size;
13417b0c2a05SDaniel Borkmann struct bpf_insn *insn = insn_buf;
13427b0c2a05SDaniel Borkmann const int ret = BPF_REG_0;
13437b0c2a05SDaniel Borkmann const int map_ptr = BPF_REG_1;
13447b0c2a05SDaniel Borkmann const int index = BPF_REG_2;
13457b0c2a05SDaniel Borkmann
13467b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
13477b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
13482c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) {
1349b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1350b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1351b2157399SAlexei Starovoitov } else {
13527b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1353b2157399SAlexei Starovoitov }
13547b0c2a05SDaniel Borkmann if (is_power_of_2(elem_size))
13557b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
13567b0c2a05SDaniel Borkmann else
13577b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
13587b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
13597b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
13607b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
13617b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
13627b0c2a05SDaniel Borkmann *insn++ = BPF_MOV64_IMM(ret, 0);
13637b0c2a05SDaniel Borkmann
13647b0c2a05SDaniel Borkmann return insn - insn_buf;
13657b0c2a05SDaniel Borkmann }
13667b0c2a05SDaniel Borkmann
136740077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = {
1368ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check,
136956f668dfSMartin KaFai Lau .map_alloc = array_of_map_alloc,
137056f668dfSMartin KaFai Lau .map_free = array_of_map_free,
137156f668dfSMartin KaFai Lau .map_get_next_key = array_map_get_next_key,
137256f668dfSMartin KaFai Lau .map_lookup_elem = array_of_map_lookup_elem,
137356f668dfSMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem,
137456f668dfSMartin KaFai Lau .map_fd_get_ptr = bpf_map_fd_get_ptr,
137556f668dfSMartin KaFai Lau .map_fd_put_ptr = bpf_map_fd_put_ptr,
137614dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
13777b0c2a05SDaniel Borkmann .map_gen_lookup = array_of_map_gen_lookup,
13789263dddcSTakshak Chahande .map_lookup_batch = generic_map_lookup_batch,
13799263dddcSTakshak Chahande .map_update_batch = generic_map_update_batch,
1380e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf,
13811746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
1382c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
138356f668dfSMartin KaFai Lau };
1384