xref: /openbmc/linux/kernel/bpf/syscall.c (revision 7ae5c03a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf-cgroup.h>
6 #include <linux/bpf_trace.h>
7 #include <linux/bpf_lirc.h>
8 #include <linux/bpf_verifier.h>
9 #include <linux/bsearch.h>
10 #include <linux/btf.h>
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched/signal.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmzone.h>
16 #include <linux/anon_inodes.h>
17 #include <linux/fdtable.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/license.h>
21 #include <linux/filter.h>
22 #include <linux/kernel.h>
23 #include <linux/idr.h>
24 #include <linux/cred.h>
25 #include <linux/timekeeping.h>
26 #include <linux/ctype.h>
27 #include <linux/nospec.h>
28 #include <linux/audit.h>
29 #include <uapi/linux/btf.h>
30 #include <linux/pgtable.h>
31 #include <linux/bpf_lsm.h>
32 #include <linux/poll.h>
33 #include <linux/sort.h>
34 #include <linux/bpf-netns.h>
35 #include <linux/rcupdate_trace.h>
36 #include <linux/memcontrol.h>
37 #include <linux/trace_events.h>
38 
39 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
40 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
41 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
42 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
43 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
44 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
45 			IS_FD_HASH(map))
46 
47 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
48 
49 DEFINE_PER_CPU(int, bpf_prog_active);
50 static DEFINE_IDR(prog_idr);
51 static DEFINE_SPINLOCK(prog_idr_lock);
52 static DEFINE_IDR(map_idr);
53 static DEFINE_SPINLOCK(map_idr_lock);
54 static DEFINE_IDR(link_idr);
55 static DEFINE_SPINLOCK(link_idr_lock);
56 
57 int sysctl_unprivileged_bpf_disabled __read_mostly =
58 	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
59 
60 static const struct bpf_map_ops * const bpf_map_types[] = {
61 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
62 #define BPF_MAP_TYPE(_id, _ops) \
63 	[_id] = &_ops,
64 #define BPF_LINK_TYPE(_id, _name)
65 #include <linux/bpf_types.h>
66 #undef BPF_PROG_TYPE
67 #undef BPF_MAP_TYPE
68 #undef BPF_LINK_TYPE
69 };
70 
71 /*
72  * If we're handed a bigger struct than we know of, ensure all the unknown bits
73  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
74  * we don't know about yet.
75  *
76  * There is a ToCToU between this function call and the following
77  * copy_from_user() call. However, this is not a concern since this function is
78  * meant to be a future-proofing of bits.
79  */
80 int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
81 			     size_t expected_size,
82 			     size_t actual_size)
83 {
84 	int res;
85 
86 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
87 		return -E2BIG;
88 
89 	if (actual_size <= expected_size)
90 		return 0;
91 
92 	if (uaddr.is_kernel)
93 		res = memchr_inv(uaddr.kernel + expected_size, 0,
94 				 actual_size - expected_size) == NULL;
95 	else
96 		res = check_zeroed_user(uaddr.user + expected_size,
97 					actual_size - expected_size);
98 	if (res < 0)
99 		return res;
100 	return res ? 0 : -E2BIG;
101 }
102 
103 const struct bpf_map_ops bpf_map_offload_ops = {
104 	.map_meta_equal = bpf_map_meta_equal,
105 	.map_alloc = bpf_map_offload_map_alloc,
106 	.map_free = bpf_map_offload_map_free,
107 	.map_check_btf = map_check_no_btf,
108 };
109 
110 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
111 {
112 	const struct bpf_map_ops *ops;
113 	u32 type = attr->map_type;
114 	struct bpf_map *map;
115 	int err;
116 
117 	if (type >= ARRAY_SIZE(bpf_map_types))
118 		return ERR_PTR(-EINVAL);
119 	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
120 	ops = bpf_map_types[type];
121 	if (!ops)
122 		return ERR_PTR(-EINVAL);
123 
124 	if (ops->map_alloc_check) {
125 		err = ops->map_alloc_check(attr);
126 		if (err)
127 			return ERR_PTR(err);
128 	}
129 	if (attr->map_ifindex)
130 		ops = &bpf_map_offload_ops;
131 	map = ops->map_alloc(attr);
132 	if (IS_ERR(map))
133 		return map;
134 	map->ops = ops;
135 	map->map_type = type;
136 	return map;
137 }
138 
139 static void bpf_map_write_active_inc(struct bpf_map *map)
140 {
141 	atomic64_inc(&map->writecnt);
142 }
143 
144 static void bpf_map_write_active_dec(struct bpf_map *map)
145 {
146 	atomic64_dec(&map->writecnt);
147 }
148 
149 bool bpf_map_write_active(const struct bpf_map *map)
150 {
151 	return atomic64_read(&map->writecnt) != 0;
152 }
153 
154 static u32 bpf_map_value_size(const struct bpf_map *map)
155 {
156 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
157 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
158 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
159 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
160 		return round_up(map->value_size, 8) * num_possible_cpus();
161 	else if (IS_FD_MAP(map))
162 		return sizeof(u32);
163 	else
164 		return  map->value_size;
165 }
166 
167 static void maybe_wait_bpf_programs(struct bpf_map *map)
168 {
169 	/* Wait for any running BPF programs to complete so that
170 	 * userspace, when we return to it, knows that all programs
171 	 * that could be running use the new map value.
172 	 */
173 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
174 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
175 		synchronize_rcu();
176 }
177 
178 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
179 				void *value, __u64 flags)
180 {
181 	int err;
182 
183 	/* Need to create a kthread, thus must support schedule */
184 	if (bpf_map_is_dev_bound(map)) {
185 		return bpf_map_offload_update_elem(map, key, value, flags);
186 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
187 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
188 		return map->ops->map_update_elem(map, key, value, flags);
189 	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
190 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
191 		return sock_map_update_elem_sys(map, key, value, flags);
192 	} else if (IS_FD_PROG_ARRAY(map)) {
193 		return bpf_fd_array_map_update_elem(map, f.file, key, value,
194 						    flags);
195 	}
196 
197 	bpf_disable_instrumentation();
198 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
199 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
200 		err = bpf_percpu_hash_update(map, key, value, flags);
201 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
202 		err = bpf_percpu_array_update(map, key, value, flags);
203 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
204 		err = bpf_percpu_cgroup_storage_update(map, key, value,
205 						       flags);
206 	} else if (IS_FD_ARRAY(map)) {
207 		rcu_read_lock();
208 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
209 						   flags);
210 		rcu_read_unlock();
211 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
212 		rcu_read_lock();
213 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
214 						  flags);
215 		rcu_read_unlock();
216 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
217 		/* rcu_read_lock() is not needed */
218 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
219 							 flags);
220 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
221 		   map->map_type == BPF_MAP_TYPE_STACK ||
222 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
223 		err = map->ops->map_push_elem(map, value, flags);
224 	} else {
225 		rcu_read_lock();
226 		err = map->ops->map_update_elem(map, key, value, flags);
227 		rcu_read_unlock();
228 	}
229 	bpf_enable_instrumentation();
230 	maybe_wait_bpf_programs(map);
231 
232 	return err;
233 }
234 
235 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
236 			      __u64 flags)
237 {
238 	void *ptr;
239 	int err;
240 
241 	if (bpf_map_is_dev_bound(map))
242 		return bpf_map_offload_lookup_elem(map, key, value);
243 
244 	bpf_disable_instrumentation();
245 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
246 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
247 		err = bpf_percpu_hash_copy(map, key, value);
248 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
249 		err = bpf_percpu_array_copy(map, key, value);
250 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
251 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
252 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
253 		err = bpf_stackmap_copy(map, key, value);
254 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
255 		err = bpf_fd_array_map_lookup_elem(map, key, value);
256 	} else if (IS_FD_HASH(map)) {
257 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
258 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
259 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
260 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
261 		   map->map_type == BPF_MAP_TYPE_STACK ||
262 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
263 		err = map->ops->map_peek_elem(map, value);
264 	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
265 		/* struct_ops map requires directly updating "value" */
266 		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
267 	} else {
268 		rcu_read_lock();
269 		if (map->ops->map_lookup_elem_sys_only)
270 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
271 		else
272 			ptr = map->ops->map_lookup_elem(map, key);
273 		if (IS_ERR(ptr)) {
274 			err = PTR_ERR(ptr);
275 		} else if (!ptr) {
276 			err = -ENOENT;
277 		} else {
278 			err = 0;
279 			if (flags & BPF_F_LOCK)
280 				/* lock 'ptr' and copy everything but lock */
281 				copy_map_value_locked(map, value, ptr, true);
282 			else
283 				copy_map_value(map, value, ptr);
284 			/* mask lock and timer, since value wasn't zero inited */
285 			check_and_init_map_value(map, value);
286 		}
287 		rcu_read_unlock();
288 	}
289 
290 	bpf_enable_instrumentation();
291 	maybe_wait_bpf_programs(map);
292 
293 	return err;
294 }
295 
296 /* Please, do not use this function outside from the map creation path
297  * (e.g. in map update path) without taking care of setting the active
298  * memory cgroup (see at bpf_map_kmalloc_node() for example).
299  */
300 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
301 {
302 	/* We really just want to fail instead of triggering OOM killer
303 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
304 	 * which is used for lower order allocation requests.
305 	 *
306 	 * It has been observed that higher order allocation requests done by
307 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
308 	 * to reclaim memory from the page cache, thus we set
309 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
310 	 */
311 
312 	const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT;
313 	unsigned int flags = 0;
314 	unsigned long align = 1;
315 	void *area;
316 
317 	if (size >= SIZE_MAX)
318 		return NULL;
319 
320 	/* kmalloc()'ed memory can't be mmap()'ed */
321 	if (mmapable) {
322 		BUG_ON(!PAGE_ALIGNED(size));
323 		align = SHMLBA;
324 		flags = VM_USERMAP;
325 	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
326 		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
327 				    numa_node);
328 		if (area != NULL)
329 			return area;
330 	}
331 
332 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
333 			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
334 			flags, numa_node, __builtin_return_address(0));
335 }
336 
337 void *bpf_map_area_alloc(u64 size, int numa_node)
338 {
339 	return __bpf_map_area_alloc(size, numa_node, false);
340 }
341 
342 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
343 {
344 	return __bpf_map_area_alloc(size, numa_node, true);
345 }
346 
347 void bpf_map_area_free(void *area)
348 {
349 	kvfree(area);
350 }
351 
352 static u32 bpf_map_flags_retain_permanent(u32 flags)
353 {
354 	/* Some map creation flags are not tied to the map object but
355 	 * rather to the map fd instead, so they have no meaning upon
356 	 * map object inspection since multiple file descriptors with
357 	 * different (access) properties can exist here. Thus, given
358 	 * this has zero meaning for the map itself, lets clear these
359 	 * from here.
360 	 */
361 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
362 }
363 
364 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
365 {
366 	map->map_type = attr->map_type;
367 	map->key_size = attr->key_size;
368 	map->value_size = attr->value_size;
369 	map->max_entries = attr->max_entries;
370 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
371 	map->numa_node = bpf_map_attr_numa_node(attr);
372 	map->map_extra = attr->map_extra;
373 }
374 
375 static int bpf_map_alloc_id(struct bpf_map *map)
376 {
377 	int id;
378 
379 	idr_preload(GFP_KERNEL);
380 	spin_lock_bh(&map_idr_lock);
381 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
382 	if (id > 0)
383 		map->id = id;
384 	spin_unlock_bh(&map_idr_lock);
385 	idr_preload_end();
386 
387 	if (WARN_ON_ONCE(!id))
388 		return -ENOSPC;
389 
390 	return id > 0 ? 0 : id;
391 }
392 
393 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
394 {
395 	unsigned long flags;
396 
397 	/* Offloaded maps are removed from the IDR store when their device
398 	 * disappears - even if someone holds an fd to them they are unusable,
399 	 * the memory is gone, all ops will fail; they are simply waiting for
400 	 * refcnt to drop to be freed.
401 	 */
402 	if (!map->id)
403 		return;
404 
405 	if (do_idr_lock)
406 		spin_lock_irqsave(&map_idr_lock, flags);
407 	else
408 		__acquire(&map_idr_lock);
409 
410 	idr_remove(&map_idr, map->id);
411 	map->id = 0;
412 
413 	if (do_idr_lock)
414 		spin_unlock_irqrestore(&map_idr_lock, flags);
415 	else
416 		__release(&map_idr_lock);
417 }
418 
419 #ifdef CONFIG_MEMCG_KMEM
420 static void bpf_map_save_memcg(struct bpf_map *map)
421 {
422 	/* Currently if a map is created by a process belonging to the root
423 	 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
424 	 * So we have to check map->objcg for being NULL each time it's
425 	 * being used.
426 	 */
427 	map->objcg = get_obj_cgroup_from_current();
428 }
429 
430 static void bpf_map_release_memcg(struct bpf_map *map)
431 {
432 	if (map->objcg)
433 		obj_cgroup_put(map->objcg);
434 }
435 
436 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
437 {
438 	if (map->objcg)
439 		return get_mem_cgroup_from_objcg(map->objcg);
440 
441 	return root_mem_cgroup;
442 }
443 
444 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
445 			   int node)
446 {
447 	struct mem_cgroup *memcg, *old_memcg;
448 	void *ptr;
449 
450 	memcg = bpf_map_get_memcg(map);
451 	old_memcg = set_active_memcg(memcg);
452 	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
453 	set_active_memcg(old_memcg);
454 	mem_cgroup_put(memcg);
455 
456 	return ptr;
457 }
458 
459 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
460 {
461 	struct mem_cgroup *memcg, *old_memcg;
462 	void *ptr;
463 
464 	memcg = bpf_map_get_memcg(map);
465 	old_memcg = set_active_memcg(memcg);
466 	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
467 	set_active_memcg(old_memcg);
468 	mem_cgroup_put(memcg);
469 
470 	return ptr;
471 }
472 
473 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
474 				    size_t align, gfp_t flags)
475 {
476 	struct mem_cgroup *memcg, *old_memcg;
477 	void __percpu *ptr;
478 
479 	memcg = bpf_map_get_memcg(map);
480 	old_memcg = set_active_memcg(memcg);
481 	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
482 	set_active_memcg(old_memcg);
483 	mem_cgroup_put(memcg);
484 
485 	return ptr;
486 }
487 
488 #else
489 static void bpf_map_save_memcg(struct bpf_map *map)
490 {
491 }
492 
493 static void bpf_map_release_memcg(struct bpf_map *map)
494 {
495 }
496 #endif
497 
498 static int bpf_map_kptr_off_cmp(const void *a, const void *b)
499 {
500 	const struct bpf_map_value_off_desc *off_desc1 = a, *off_desc2 = b;
501 
502 	if (off_desc1->offset < off_desc2->offset)
503 		return -1;
504 	else if (off_desc1->offset > off_desc2->offset)
505 		return 1;
506 	return 0;
507 }
508 
509 struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset)
510 {
511 	/* Since members are iterated in btf_find_field in increasing order,
512 	 * offsets appended to kptr_off_tab are in increasing order, so we can
513 	 * do bsearch to find exact match.
514 	 */
515 	struct bpf_map_value_off *tab;
516 
517 	if (!map_value_has_kptrs(map))
518 		return NULL;
519 	tab = map->kptr_off_tab;
520 	return bsearch(&offset, tab->off, tab->nr_off, sizeof(tab->off[0]), bpf_map_kptr_off_cmp);
521 }
522 
523 void bpf_map_free_kptr_off_tab(struct bpf_map *map)
524 {
525 	struct bpf_map_value_off *tab = map->kptr_off_tab;
526 	int i;
527 
528 	if (!map_value_has_kptrs(map))
529 		return;
530 	for (i = 0; i < tab->nr_off; i++) {
531 		if (tab->off[i].kptr.module)
532 			module_put(tab->off[i].kptr.module);
533 		btf_put(tab->off[i].kptr.btf);
534 	}
535 	kfree(tab);
536 	map->kptr_off_tab = NULL;
537 }
538 
539 struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map)
540 {
541 	struct bpf_map_value_off *tab = map->kptr_off_tab, *new_tab;
542 	int size, i;
543 
544 	if (!map_value_has_kptrs(map))
545 		return ERR_PTR(-ENOENT);
546 	size = offsetof(struct bpf_map_value_off, off[tab->nr_off]);
547 	new_tab = kmemdup(tab, size, GFP_KERNEL | __GFP_NOWARN);
548 	if (!new_tab)
549 		return ERR_PTR(-ENOMEM);
550 	/* Do a deep copy of the kptr_off_tab */
551 	for (i = 0; i < tab->nr_off; i++) {
552 		btf_get(tab->off[i].kptr.btf);
553 		if (tab->off[i].kptr.module && !try_module_get(tab->off[i].kptr.module)) {
554 			while (i--) {
555 				if (tab->off[i].kptr.module)
556 					module_put(tab->off[i].kptr.module);
557 				btf_put(tab->off[i].kptr.btf);
558 			}
559 			kfree(new_tab);
560 			return ERR_PTR(-ENXIO);
561 		}
562 	}
563 	return new_tab;
564 }
565 
566 bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b)
567 {
568 	struct bpf_map_value_off *tab_a = map_a->kptr_off_tab, *tab_b = map_b->kptr_off_tab;
569 	bool a_has_kptr = map_value_has_kptrs(map_a), b_has_kptr = map_value_has_kptrs(map_b);
570 	int size;
571 
572 	if (!a_has_kptr && !b_has_kptr)
573 		return true;
574 	if (a_has_kptr != b_has_kptr)
575 		return false;
576 	if (tab_a->nr_off != tab_b->nr_off)
577 		return false;
578 	size = offsetof(struct bpf_map_value_off, off[tab_a->nr_off]);
579 	return !memcmp(tab_a, tab_b, size);
580 }
581 
582 /* Caller must ensure map_value_has_kptrs is true. Note that this function can
583  * be called on a map value while the map_value is visible to BPF programs, as
584  * it ensures the correct synchronization, and we already enforce the same using
585  * the bpf_kptr_xchg helper on the BPF program side for referenced kptrs.
586  */
587 void bpf_map_free_kptrs(struct bpf_map *map, void *map_value)
588 {
589 	struct bpf_map_value_off *tab = map->kptr_off_tab;
590 	unsigned long *btf_id_ptr;
591 	int i;
592 
593 	for (i = 0; i < tab->nr_off; i++) {
594 		struct bpf_map_value_off_desc *off_desc = &tab->off[i];
595 		unsigned long old_ptr;
596 
597 		btf_id_ptr = map_value + off_desc->offset;
598 		if (off_desc->type == BPF_KPTR_UNREF) {
599 			u64 *p = (u64 *)btf_id_ptr;
600 
601 			WRITE_ONCE(p, 0);
602 			continue;
603 		}
604 		old_ptr = xchg(btf_id_ptr, 0);
605 		off_desc->kptr.dtor((void *)old_ptr);
606 	}
607 }
608 
609 /* called from workqueue */
610 static void bpf_map_free_deferred(struct work_struct *work)
611 {
612 	struct bpf_map *map = container_of(work, struct bpf_map, work);
613 
614 	security_bpf_map_free(map);
615 	kfree(map->off_arr);
616 	bpf_map_release_memcg(map);
617 	/* implementation dependent freeing, map_free callback also does
618 	 * bpf_map_free_kptr_off_tab, if needed.
619 	 */
620 	map->ops->map_free(map);
621 }
622 
623 static void bpf_map_put_uref(struct bpf_map *map)
624 {
625 	if (atomic64_dec_and_test(&map->usercnt)) {
626 		if (map->ops->map_release_uref)
627 			map->ops->map_release_uref(map);
628 	}
629 }
630 
631 /* decrement map refcnt and schedule it for freeing via workqueue
632  * (unrelying map implementation ops->map_free() might sleep)
633  */
634 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
635 {
636 	if (atomic64_dec_and_test(&map->refcnt)) {
637 		/* bpf_map_free_id() must be called first */
638 		bpf_map_free_id(map, do_idr_lock);
639 		btf_put(map->btf);
640 		INIT_WORK(&map->work, bpf_map_free_deferred);
641 		schedule_work(&map->work);
642 	}
643 }
644 
645 void bpf_map_put(struct bpf_map *map)
646 {
647 	__bpf_map_put(map, true);
648 }
649 EXPORT_SYMBOL_GPL(bpf_map_put);
650 
651 void bpf_map_put_with_uref(struct bpf_map *map)
652 {
653 	bpf_map_put_uref(map);
654 	bpf_map_put(map);
655 }
656 
657 static int bpf_map_release(struct inode *inode, struct file *filp)
658 {
659 	struct bpf_map *map = filp->private_data;
660 
661 	if (map->ops->map_release)
662 		map->ops->map_release(map, filp);
663 
664 	bpf_map_put_with_uref(map);
665 	return 0;
666 }
667 
668 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
669 {
670 	fmode_t mode = f.file->f_mode;
671 
672 	/* Our file permissions may have been overridden by global
673 	 * map permissions facing syscall side.
674 	 */
675 	if (READ_ONCE(map->frozen))
676 		mode &= ~FMODE_CAN_WRITE;
677 	return mode;
678 }
679 
680 #ifdef CONFIG_PROC_FS
681 /* Provides an approximation of the map's memory footprint.
682  * Used only to provide a backward compatibility and display
683  * a reasonable "memlock" info.
684  */
685 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
686 {
687 	unsigned long size;
688 
689 	size = round_up(map->key_size + bpf_map_value_size(map), 8);
690 
691 	return round_up(map->max_entries * size, PAGE_SIZE);
692 }
693 
694 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
695 {
696 	struct bpf_map *map = filp->private_data;
697 	u32 type = 0, jited = 0;
698 
699 	if (map_type_contains_progs(map)) {
700 		spin_lock(&map->owner.lock);
701 		type  = map->owner.type;
702 		jited = map->owner.jited;
703 		spin_unlock(&map->owner.lock);
704 	}
705 
706 	seq_printf(m,
707 		   "map_type:\t%u\n"
708 		   "key_size:\t%u\n"
709 		   "value_size:\t%u\n"
710 		   "max_entries:\t%u\n"
711 		   "map_flags:\t%#x\n"
712 		   "map_extra:\t%#llx\n"
713 		   "memlock:\t%lu\n"
714 		   "map_id:\t%u\n"
715 		   "frozen:\t%u\n",
716 		   map->map_type,
717 		   map->key_size,
718 		   map->value_size,
719 		   map->max_entries,
720 		   map->map_flags,
721 		   (unsigned long long)map->map_extra,
722 		   bpf_map_memory_footprint(map),
723 		   map->id,
724 		   READ_ONCE(map->frozen));
725 	if (type) {
726 		seq_printf(m, "owner_prog_type:\t%u\n", type);
727 		seq_printf(m, "owner_jited:\t%u\n", jited);
728 	}
729 }
730 #endif
731 
732 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
733 			      loff_t *ppos)
734 {
735 	/* We need this handler such that alloc_file() enables
736 	 * f_mode with FMODE_CAN_READ.
737 	 */
738 	return -EINVAL;
739 }
740 
741 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
742 			       size_t siz, loff_t *ppos)
743 {
744 	/* We need this handler such that alloc_file() enables
745 	 * f_mode with FMODE_CAN_WRITE.
746 	 */
747 	return -EINVAL;
748 }
749 
750 /* called for any extra memory-mapped regions (except initial) */
751 static void bpf_map_mmap_open(struct vm_area_struct *vma)
752 {
753 	struct bpf_map *map = vma->vm_file->private_data;
754 
755 	if (vma->vm_flags & VM_MAYWRITE)
756 		bpf_map_write_active_inc(map);
757 }
758 
759 /* called for all unmapped memory region (including initial) */
760 static void bpf_map_mmap_close(struct vm_area_struct *vma)
761 {
762 	struct bpf_map *map = vma->vm_file->private_data;
763 
764 	if (vma->vm_flags & VM_MAYWRITE)
765 		bpf_map_write_active_dec(map);
766 }
767 
768 static const struct vm_operations_struct bpf_map_default_vmops = {
769 	.open		= bpf_map_mmap_open,
770 	.close		= bpf_map_mmap_close,
771 };
772 
773 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
774 {
775 	struct bpf_map *map = filp->private_data;
776 	int err;
777 
778 	if (!map->ops->map_mmap || map_value_has_spin_lock(map) ||
779 	    map_value_has_timer(map) || map_value_has_kptrs(map))
780 		return -ENOTSUPP;
781 
782 	if (!(vma->vm_flags & VM_SHARED))
783 		return -EINVAL;
784 
785 	mutex_lock(&map->freeze_mutex);
786 
787 	if (vma->vm_flags & VM_WRITE) {
788 		if (map->frozen) {
789 			err = -EPERM;
790 			goto out;
791 		}
792 		/* map is meant to be read-only, so do not allow mapping as
793 		 * writable, because it's possible to leak a writable page
794 		 * reference and allows user-space to still modify it after
795 		 * freezing, while verifier will assume contents do not change
796 		 */
797 		if (map->map_flags & BPF_F_RDONLY_PROG) {
798 			err = -EACCES;
799 			goto out;
800 		}
801 	}
802 
803 	/* set default open/close callbacks */
804 	vma->vm_ops = &bpf_map_default_vmops;
805 	vma->vm_private_data = map;
806 	vma->vm_flags &= ~VM_MAYEXEC;
807 	if (!(vma->vm_flags & VM_WRITE))
808 		/* disallow re-mapping with PROT_WRITE */
809 		vma->vm_flags &= ~VM_MAYWRITE;
810 
811 	err = map->ops->map_mmap(map, vma);
812 	if (err)
813 		goto out;
814 
815 	if (vma->vm_flags & VM_MAYWRITE)
816 		bpf_map_write_active_inc(map);
817 out:
818 	mutex_unlock(&map->freeze_mutex);
819 	return err;
820 }
821 
822 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
823 {
824 	struct bpf_map *map = filp->private_data;
825 
826 	if (map->ops->map_poll)
827 		return map->ops->map_poll(map, filp, pts);
828 
829 	return EPOLLERR;
830 }
831 
832 const struct file_operations bpf_map_fops = {
833 #ifdef CONFIG_PROC_FS
834 	.show_fdinfo	= bpf_map_show_fdinfo,
835 #endif
836 	.release	= bpf_map_release,
837 	.read		= bpf_dummy_read,
838 	.write		= bpf_dummy_write,
839 	.mmap		= bpf_map_mmap,
840 	.poll		= bpf_map_poll,
841 };
842 
843 int bpf_map_new_fd(struct bpf_map *map, int flags)
844 {
845 	int ret;
846 
847 	ret = security_bpf_map(map, OPEN_FMODE(flags));
848 	if (ret < 0)
849 		return ret;
850 
851 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
852 				flags | O_CLOEXEC);
853 }
854 
855 int bpf_get_file_flag(int flags)
856 {
857 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
858 		return -EINVAL;
859 	if (flags & BPF_F_RDONLY)
860 		return O_RDONLY;
861 	if (flags & BPF_F_WRONLY)
862 		return O_WRONLY;
863 	return O_RDWR;
864 }
865 
866 /* helper macro to check that unused fields 'union bpf_attr' are zero */
867 #define CHECK_ATTR(CMD) \
868 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
869 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
870 		   sizeof(*attr) - \
871 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
872 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
873 
874 /* dst and src must have at least "size" number of bytes.
875  * Return strlen on success and < 0 on error.
876  */
877 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
878 {
879 	const char *end = src + size;
880 	const char *orig_src = src;
881 
882 	memset(dst, 0, size);
883 	/* Copy all isalnum(), '_' and '.' chars. */
884 	while (src < end && *src) {
885 		if (!isalnum(*src) &&
886 		    *src != '_' && *src != '.')
887 			return -EINVAL;
888 		*dst++ = *src++;
889 	}
890 
891 	/* No '\0' found in "size" number of bytes */
892 	if (src == end)
893 		return -EINVAL;
894 
895 	return src - orig_src;
896 }
897 
898 int map_check_no_btf(const struct bpf_map *map,
899 		     const struct btf *btf,
900 		     const struct btf_type *key_type,
901 		     const struct btf_type *value_type)
902 {
903 	return -ENOTSUPP;
904 }
905 
906 static int map_off_arr_cmp(const void *_a, const void *_b, const void *priv)
907 {
908 	const u32 a = *(const u32 *)_a;
909 	const u32 b = *(const u32 *)_b;
910 
911 	if (a < b)
912 		return -1;
913 	else if (a > b)
914 		return 1;
915 	return 0;
916 }
917 
918 static void map_off_arr_swap(void *_a, void *_b, int size, const void *priv)
919 {
920 	struct bpf_map *map = (struct bpf_map *)priv;
921 	u32 *off_base = map->off_arr->field_off;
922 	u32 *a = _a, *b = _b;
923 	u8 *sz_a, *sz_b;
924 
925 	sz_a = map->off_arr->field_sz + (a - off_base);
926 	sz_b = map->off_arr->field_sz + (b - off_base);
927 
928 	swap(*a, *b);
929 	swap(*sz_a, *sz_b);
930 }
931 
932 static int bpf_map_alloc_off_arr(struct bpf_map *map)
933 {
934 	bool has_spin_lock = map_value_has_spin_lock(map);
935 	bool has_timer = map_value_has_timer(map);
936 	bool has_kptrs = map_value_has_kptrs(map);
937 	struct bpf_map_off_arr *off_arr;
938 	u32 i;
939 
940 	if (!has_spin_lock && !has_timer && !has_kptrs) {
941 		map->off_arr = NULL;
942 		return 0;
943 	}
944 
945 	off_arr = kmalloc(sizeof(*map->off_arr), GFP_KERNEL | __GFP_NOWARN);
946 	if (!off_arr)
947 		return -ENOMEM;
948 	map->off_arr = off_arr;
949 
950 	off_arr->cnt = 0;
951 	if (has_spin_lock) {
952 		i = off_arr->cnt;
953 
954 		off_arr->field_off[i] = map->spin_lock_off;
955 		off_arr->field_sz[i] = sizeof(struct bpf_spin_lock);
956 		off_arr->cnt++;
957 	}
958 	if (has_timer) {
959 		i = off_arr->cnt;
960 
961 		off_arr->field_off[i] = map->timer_off;
962 		off_arr->field_sz[i] = sizeof(struct bpf_timer);
963 		off_arr->cnt++;
964 	}
965 	if (has_kptrs) {
966 		struct bpf_map_value_off *tab = map->kptr_off_tab;
967 		u32 *off = &off_arr->field_off[off_arr->cnt];
968 		u8 *sz = &off_arr->field_sz[off_arr->cnt];
969 
970 		for (i = 0; i < tab->nr_off; i++) {
971 			*off++ = tab->off[i].offset;
972 			*sz++ = sizeof(u64);
973 		}
974 		off_arr->cnt += tab->nr_off;
975 	}
976 
977 	if (off_arr->cnt == 1)
978 		return 0;
979 	sort_r(off_arr->field_off, off_arr->cnt, sizeof(off_arr->field_off[0]),
980 	       map_off_arr_cmp, map_off_arr_swap, map);
981 	return 0;
982 }
983 
984 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
985 			 u32 btf_key_id, u32 btf_value_id)
986 {
987 	const struct btf_type *key_type, *value_type;
988 	u32 key_size, value_size;
989 	int ret = 0;
990 
991 	/* Some maps allow key to be unspecified. */
992 	if (btf_key_id) {
993 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
994 		if (!key_type || key_size != map->key_size)
995 			return -EINVAL;
996 	} else {
997 		key_type = btf_type_by_id(btf, 0);
998 		if (!map->ops->map_check_btf)
999 			return -EINVAL;
1000 	}
1001 
1002 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1003 	if (!value_type || value_size != map->value_size)
1004 		return -EINVAL;
1005 
1006 	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
1007 
1008 	if (map_value_has_spin_lock(map)) {
1009 		if (map->map_flags & BPF_F_RDONLY_PROG)
1010 			return -EACCES;
1011 		if (map->map_type != BPF_MAP_TYPE_HASH &&
1012 		    map->map_type != BPF_MAP_TYPE_ARRAY &&
1013 		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1014 		    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1015 		    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1016 		    map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
1017 			return -ENOTSUPP;
1018 		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
1019 		    map->value_size) {
1020 			WARN_ONCE(1,
1021 				  "verifier bug spin_lock_off %d value_size %d\n",
1022 				  map->spin_lock_off, map->value_size);
1023 			return -EFAULT;
1024 		}
1025 	}
1026 
1027 	map->timer_off = btf_find_timer(btf, value_type);
1028 	if (map_value_has_timer(map)) {
1029 		if (map->map_flags & BPF_F_RDONLY_PROG)
1030 			return -EACCES;
1031 		if (map->map_type != BPF_MAP_TYPE_HASH &&
1032 		    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1033 		    map->map_type != BPF_MAP_TYPE_ARRAY)
1034 			return -EOPNOTSUPP;
1035 	}
1036 
1037 	map->kptr_off_tab = btf_parse_kptrs(btf, value_type);
1038 	if (map_value_has_kptrs(map)) {
1039 		if (!bpf_capable()) {
1040 			ret = -EPERM;
1041 			goto free_map_tab;
1042 		}
1043 		if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1044 			ret = -EACCES;
1045 			goto free_map_tab;
1046 		}
1047 		if (map->map_type != BPF_MAP_TYPE_HASH &&
1048 		    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1049 		    map->map_type != BPF_MAP_TYPE_ARRAY) {
1050 			ret = -EOPNOTSUPP;
1051 			goto free_map_tab;
1052 		}
1053 	}
1054 
1055 	if (map->ops->map_check_btf) {
1056 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1057 		if (ret < 0)
1058 			goto free_map_tab;
1059 	}
1060 
1061 	return ret;
1062 free_map_tab:
1063 	bpf_map_free_kptr_off_tab(map);
1064 	return ret;
1065 }
1066 
1067 #define BPF_MAP_CREATE_LAST_FIELD map_extra
1068 /* called via syscall */
1069 static int map_create(union bpf_attr *attr)
1070 {
1071 	int numa_node = bpf_map_attr_numa_node(attr);
1072 	struct bpf_map *map;
1073 	int f_flags;
1074 	int err;
1075 
1076 	err = CHECK_ATTR(BPF_MAP_CREATE);
1077 	if (err)
1078 		return -EINVAL;
1079 
1080 	if (attr->btf_vmlinux_value_type_id) {
1081 		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1082 		    attr->btf_key_type_id || attr->btf_value_type_id)
1083 			return -EINVAL;
1084 	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1085 		return -EINVAL;
1086 	}
1087 
1088 	if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1089 	    attr->map_extra != 0)
1090 		return -EINVAL;
1091 
1092 	f_flags = bpf_get_file_flag(attr->map_flags);
1093 	if (f_flags < 0)
1094 		return f_flags;
1095 
1096 	if (numa_node != NUMA_NO_NODE &&
1097 	    ((unsigned int)numa_node >= nr_node_ids ||
1098 	     !node_online(numa_node)))
1099 		return -EINVAL;
1100 
1101 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1102 	map = find_and_alloc_map(attr);
1103 	if (IS_ERR(map))
1104 		return PTR_ERR(map);
1105 
1106 	err = bpf_obj_name_cpy(map->name, attr->map_name,
1107 			       sizeof(attr->map_name));
1108 	if (err < 0)
1109 		goto free_map;
1110 
1111 	atomic64_set(&map->refcnt, 1);
1112 	atomic64_set(&map->usercnt, 1);
1113 	mutex_init(&map->freeze_mutex);
1114 	spin_lock_init(&map->owner.lock);
1115 
1116 	map->spin_lock_off = -EINVAL;
1117 	map->timer_off = -EINVAL;
1118 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
1119 	    /* Even the map's value is a kernel's struct,
1120 	     * the bpf_prog.o must have BTF to begin with
1121 	     * to figure out the corresponding kernel's
1122 	     * counter part.  Thus, attr->btf_fd has
1123 	     * to be valid also.
1124 	     */
1125 	    attr->btf_vmlinux_value_type_id) {
1126 		struct btf *btf;
1127 
1128 		btf = btf_get_by_fd(attr->btf_fd);
1129 		if (IS_ERR(btf)) {
1130 			err = PTR_ERR(btf);
1131 			goto free_map;
1132 		}
1133 		if (btf_is_kernel(btf)) {
1134 			btf_put(btf);
1135 			err = -EACCES;
1136 			goto free_map;
1137 		}
1138 		map->btf = btf;
1139 
1140 		if (attr->btf_value_type_id) {
1141 			err = map_check_btf(map, btf, attr->btf_key_type_id,
1142 					    attr->btf_value_type_id);
1143 			if (err)
1144 				goto free_map;
1145 		}
1146 
1147 		map->btf_key_type_id = attr->btf_key_type_id;
1148 		map->btf_value_type_id = attr->btf_value_type_id;
1149 		map->btf_vmlinux_value_type_id =
1150 			attr->btf_vmlinux_value_type_id;
1151 	}
1152 
1153 	err = bpf_map_alloc_off_arr(map);
1154 	if (err)
1155 		goto free_map;
1156 
1157 	err = security_bpf_map_alloc(map);
1158 	if (err)
1159 		goto free_map_off_arr;
1160 
1161 	err = bpf_map_alloc_id(map);
1162 	if (err)
1163 		goto free_map_sec;
1164 
1165 	bpf_map_save_memcg(map);
1166 
1167 	err = bpf_map_new_fd(map, f_flags);
1168 	if (err < 0) {
1169 		/* failed to allocate fd.
1170 		 * bpf_map_put_with_uref() is needed because the above
1171 		 * bpf_map_alloc_id() has published the map
1172 		 * to the userspace and the userspace may
1173 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1174 		 */
1175 		bpf_map_put_with_uref(map);
1176 		return err;
1177 	}
1178 
1179 	return err;
1180 
1181 free_map_sec:
1182 	security_bpf_map_free(map);
1183 free_map_off_arr:
1184 	kfree(map->off_arr);
1185 free_map:
1186 	btf_put(map->btf);
1187 	map->ops->map_free(map);
1188 	return err;
1189 }
1190 
1191 /* if error is returned, fd is released.
1192  * On success caller should complete fd access with matching fdput()
1193  */
1194 struct bpf_map *__bpf_map_get(struct fd f)
1195 {
1196 	if (!f.file)
1197 		return ERR_PTR(-EBADF);
1198 	if (f.file->f_op != &bpf_map_fops) {
1199 		fdput(f);
1200 		return ERR_PTR(-EINVAL);
1201 	}
1202 
1203 	return f.file->private_data;
1204 }
1205 
1206 void bpf_map_inc(struct bpf_map *map)
1207 {
1208 	atomic64_inc(&map->refcnt);
1209 }
1210 EXPORT_SYMBOL_GPL(bpf_map_inc);
1211 
1212 void bpf_map_inc_with_uref(struct bpf_map *map)
1213 {
1214 	atomic64_inc(&map->refcnt);
1215 	atomic64_inc(&map->usercnt);
1216 }
1217 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1218 
1219 struct bpf_map *bpf_map_get(u32 ufd)
1220 {
1221 	struct fd f = fdget(ufd);
1222 	struct bpf_map *map;
1223 
1224 	map = __bpf_map_get(f);
1225 	if (IS_ERR(map))
1226 		return map;
1227 
1228 	bpf_map_inc(map);
1229 	fdput(f);
1230 
1231 	return map;
1232 }
1233 EXPORT_SYMBOL(bpf_map_get);
1234 
1235 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1236 {
1237 	struct fd f = fdget(ufd);
1238 	struct bpf_map *map;
1239 
1240 	map = __bpf_map_get(f);
1241 	if (IS_ERR(map))
1242 		return map;
1243 
1244 	bpf_map_inc_with_uref(map);
1245 	fdput(f);
1246 
1247 	return map;
1248 }
1249 
1250 /* map_idr_lock should have been held */
1251 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1252 {
1253 	int refold;
1254 
1255 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1256 	if (!refold)
1257 		return ERR_PTR(-ENOENT);
1258 	if (uref)
1259 		atomic64_inc(&map->usercnt);
1260 
1261 	return map;
1262 }
1263 
1264 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1265 {
1266 	spin_lock_bh(&map_idr_lock);
1267 	map = __bpf_map_inc_not_zero(map, false);
1268 	spin_unlock_bh(&map_idr_lock);
1269 
1270 	return map;
1271 }
1272 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1273 
1274 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1275 {
1276 	return -ENOTSUPP;
1277 }
1278 
1279 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1280 {
1281 	if (key_size)
1282 		return vmemdup_user(ukey, key_size);
1283 
1284 	if (ukey)
1285 		return ERR_PTR(-EINVAL);
1286 
1287 	return NULL;
1288 }
1289 
1290 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1291 {
1292 	if (key_size)
1293 		return kvmemdup_bpfptr(ukey, key_size);
1294 
1295 	if (!bpfptr_is_null(ukey))
1296 		return ERR_PTR(-EINVAL);
1297 
1298 	return NULL;
1299 }
1300 
1301 /* last field in 'union bpf_attr' used by this command */
1302 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1303 
1304 static int map_lookup_elem(union bpf_attr *attr)
1305 {
1306 	void __user *ukey = u64_to_user_ptr(attr->key);
1307 	void __user *uvalue = u64_to_user_ptr(attr->value);
1308 	int ufd = attr->map_fd;
1309 	struct bpf_map *map;
1310 	void *key, *value;
1311 	u32 value_size;
1312 	struct fd f;
1313 	int err;
1314 
1315 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1316 		return -EINVAL;
1317 
1318 	if (attr->flags & ~BPF_F_LOCK)
1319 		return -EINVAL;
1320 
1321 	f = fdget(ufd);
1322 	map = __bpf_map_get(f);
1323 	if (IS_ERR(map))
1324 		return PTR_ERR(map);
1325 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1326 		err = -EPERM;
1327 		goto err_put;
1328 	}
1329 
1330 	if ((attr->flags & BPF_F_LOCK) &&
1331 	    !map_value_has_spin_lock(map)) {
1332 		err = -EINVAL;
1333 		goto err_put;
1334 	}
1335 
1336 	key = __bpf_copy_key(ukey, map->key_size);
1337 	if (IS_ERR(key)) {
1338 		err = PTR_ERR(key);
1339 		goto err_put;
1340 	}
1341 
1342 	value_size = bpf_map_value_size(map);
1343 
1344 	err = -ENOMEM;
1345 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1346 	if (!value)
1347 		goto free_key;
1348 
1349 	if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1350 		if (copy_from_user(value, uvalue, value_size))
1351 			err = -EFAULT;
1352 		else
1353 			err = bpf_map_copy_value(map, key, value, attr->flags);
1354 		goto free_value;
1355 	}
1356 
1357 	err = bpf_map_copy_value(map, key, value, attr->flags);
1358 	if (err)
1359 		goto free_value;
1360 
1361 	err = -EFAULT;
1362 	if (copy_to_user(uvalue, value, value_size) != 0)
1363 		goto free_value;
1364 
1365 	err = 0;
1366 
1367 free_value:
1368 	kvfree(value);
1369 free_key:
1370 	kvfree(key);
1371 err_put:
1372 	fdput(f);
1373 	return err;
1374 }
1375 
1376 
1377 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1378 
1379 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1380 {
1381 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1382 	bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1383 	int ufd = attr->map_fd;
1384 	struct bpf_map *map;
1385 	void *key, *value;
1386 	u32 value_size;
1387 	struct fd f;
1388 	int err;
1389 
1390 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1391 		return -EINVAL;
1392 
1393 	f = fdget(ufd);
1394 	map = __bpf_map_get(f);
1395 	if (IS_ERR(map))
1396 		return PTR_ERR(map);
1397 	bpf_map_write_active_inc(map);
1398 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1399 		err = -EPERM;
1400 		goto err_put;
1401 	}
1402 
1403 	if ((attr->flags & BPF_F_LOCK) &&
1404 	    !map_value_has_spin_lock(map)) {
1405 		err = -EINVAL;
1406 		goto err_put;
1407 	}
1408 
1409 	key = ___bpf_copy_key(ukey, map->key_size);
1410 	if (IS_ERR(key)) {
1411 		err = PTR_ERR(key);
1412 		goto err_put;
1413 	}
1414 
1415 	value_size = bpf_map_value_size(map);
1416 
1417 	err = -ENOMEM;
1418 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1419 	if (!value)
1420 		goto free_key;
1421 
1422 	err = -EFAULT;
1423 	if (copy_from_bpfptr(value, uvalue, value_size) != 0)
1424 		goto free_value;
1425 
1426 	err = bpf_map_update_value(map, f, key, value, attr->flags);
1427 
1428 free_value:
1429 	kvfree(value);
1430 free_key:
1431 	kvfree(key);
1432 err_put:
1433 	bpf_map_write_active_dec(map);
1434 	fdput(f);
1435 	return err;
1436 }
1437 
1438 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1439 
1440 static int map_delete_elem(union bpf_attr *attr)
1441 {
1442 	void __user *ukey = u64_to_user_ptr(attr->key);
1443 	int ufd = attr->map_fd;
1444 	struct bpf_map *map;
1445 	struct fd f;
1446 	void *key;
1447 	int err;
1448 
1449 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1450 		return -EINVAL;
1451 
1452 	f = fdget(ufd);
1453 	map = __bpf_map_get(f);
1454 	if (IS_ERR(map))
1455 		return PTR_ERR(map);
1456 	bpf_map_write_active_inc(map);
1457 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1458 		err = -EPERM;
1459 		goto err_put;
1460 	}
1461 
1462 	key = __bpf_copy_key(ukey, map->key_size);
1463 	if (IS_ERR(key)) {
1464 		err = PTR_ERR(key);
1465 		goto err_put;
1466 	}
1467 
1468 	if (bpf_map_is_dev_bound(map)) {
1469 		err = bpf_map_offload_delete_elem(map, key);
1470 		goto out;
1471 	} else if (IS_FD_PROG_ARRAY(map) ||
1472 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1473 		/* These maps require sleepable context */
1474 		err = map->ops->map_delete_elem(map, key);
1475 		goto out;
1476 	}
1477 
1478 	bpf_disable_instrumentation();
1479 	rcu_read_lock();
1480 	err = map->ops->map_delete_elem(map, key);
1481 	rcu_read_unlock();
1482 	bpf_enable_instrumentation();
1483 	maybe_wait_bpf_programs(map);
1484 out:
1485 	kvfree(key);
1486 err_put:
1487 	bpf_map_write_active_dec(map);
1488 	fdput(f);
1489 	return err;
1490 }
1491 
1492 /* last field in 'union bpf_attr' used by this command */
1493 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1494 
1495 static int map_get_next_key(union bpf_attr *attr)
1496 {
1497 	void __user *ukey = u64_to_user_ptr(attr->key);
1498 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1499 	int ufd = attr->map_fd;
1500 	struct bpf_map *map;
1501 	void *key, *next_key;
1502 	struct fd f;
1503 	int err;
1504 
1505 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1506 		return -EINVAL;
1507 
1508 	f = fdget(ufd);
1509 	map = __bpf_map_get(f);
1510 	if (IS_ERR(map))
1511 		return PTR_ERR(map);
1512 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1513 		err = -EPERM;
1514 		goto err_put;
1515 	}
1516 
1517 	if (ukey) {
1518 		key = __bpf_copy_key(ukey, map->key_size);
1519 		if (IS_ERR(key)) {
1520 			err = PTR_ERR(key);
1521 			goto err_put;
1522 		}
1523 	} else {
1524 		key = NULL;
1525 	}
1526 
1527 	err = -ENOMEM;
1528 	next_key = kvmalloc(map->key_size, GFP_USER);
1529 	if (!next_key)
1530 		goto free_key;
1531 
1532 	if (bpf_map_is_dev_bound(map)) {
1533 		err = bpf_map_offload_get_next_key(map, key, next_key);
1534 		goto out;
1535 	}
1536 
1537 	rcu_read_lock();
1538 	err = map->ops->map_get_next_key(map, key, next_key);
1539 	rcu_read_unlock();
1540 out:
1541 	if (err)
1542 		goto free_next_key;
1543 
1544 	err = -EFAULT;
1545 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1546 		goto free_next_key;
1547 
1548 	err = 0;
1549 
1550 free_next_key:
1551 	kvfree(next_key);
1552 free_key:
1553 	kvfree(key);
1554 err_put:
1555 	fdput(f);
1556 	return err;
1557 }
1558 
1559 int generic_map_delete_batch(struct bpf_map *map,
1560 			     const union bpf_attr *attr,
1561 			     union bpf_attr __user *uattr)
1562 {
1563 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1564 	u32 cp, max_count;
1565 	int err = 0;
1566 	void *key;
1567 
1568 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1569 		return -EINVAL;
1570 
1571 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1572 	    !map_value_has_spin_lock(map)) {
1573 		return -EINVAL;
1574 	}
1575 
1576 	max_count = attr->batch.count;
1577 	if (!max_count)
1578 		return 0;
1579 
1580 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1581 	if (!key)
1582 		return -ENOMEM;
1583 
1584 	for (cp = 0; cp < max_count; cp++) {
1585 		err = -EFAULT;
1586 		if (copy_from_user(key, keys + cp * map->key_size,
1587 				   map->key_size))
1588 			break;
1589 
1590 		if (bpf_map_is_dev_bound(map)) {
1591 			err = bpf_map_offload_delete_elem(map, key);
1592 			break;
1593 		}
1594 
1595 		bpf_disable_instrumentation();
1596 		rcu_read_lock();
1597 		err = map->ops->map_delete_elem(map, key);
1598 		rcu_read_unlock();
1599 		bpf_enable_instrumentation();
1600 		if (err)
1601 			break;
1602 		cond_resched();
1603 	}
1604 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1605 		err = -EFAULT;
1606 
1607 	kvfree(key);
1608 
1609 	maybe_wait_bpf_programs(map);
1610 	return err;
1611 }
1612 
1613 int generic_map_update_batch(struct bpf_map *map,
1614 			     const union bpf_attr *attr,
1615 			     union bpf_attr __user *uattr)
1616 {
1617 	void __user *values = u64_to_user_ptr(attr->batch.values);
1618 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1619 	u32 value_size, cp, max_count;
1620 	int ufd = attr->batch.map_fd;
1621 	void *key, *value;
1622 	struct fd f;
1623 	int err = 0;
1624 
1625 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1626 		return -EINVAL;
1627 
1628 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1629 	    !map_value_has_spin_lock(map)) {
1630 		return -EINVAL;
1631 	}
1632 
1633 	value_size = bpf_map_value_size(map);
1634 
1635 	max_count = attr->batch.count;
1636 	if (!max_count)
1637 		return 0;
1638 
1639 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1640 	if (!key)
1641 		return -ENOMEM;
1642 
1643 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1644 	if (!value) {
1645 		kvfree(key);
1646 		return -ENOMEM;
1647 	}
1648 
1649 	f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
1650 	for (cp = 0; cp < max_count; cp++) {
1651 		err = -EFAULT;
1652 		if (copy_from_user(key, keys + cp * map->key_size,
1653 		    map->key_size) ||
1654 		    copy_from_user(value, values + cp * value_size, value_size))
1655 			break;
1656 
1657 		err = bpf_map_update_value(map, f, key, value,
1658 					   attr->batch.elem_flags);
1659 
1660 		if (err)
1661 			break;
1662 		cond_resched();
1663 	}
1664 
1665 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1666 		err = -EFAULT;
1667 
1668 	kvfree(value);
1669 	kvfree(key);
1670 	fdput(f);
1671 	return err;
1672 }
1673 
1674 #define MAP_LOOKUP_RETRIES 3
1675 
1676 int generic_map_lookup_batch(struct bpf_map *map,
1677 				    const union bpf_attr *attr,
1678 				    union bpf_attr __user *uattr)
1679 {
1680 	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1681 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1682 	void __user *values = u64_to_user_ptr(attr->batch.values);
1683 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1684 	void *buf, *buf_prevkey, *prev_key, *key, *value;
1685 	int err, retry = MAP_LOOKUP_RETRIES;
1686 	u32 value_size, cp, max_count;
1687 
1688 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1689 		return -EINVAL;
1690 
1691 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1692 	    !map_value_has_spin_lock(map))
1693 		return -EINVAL;
1694 
1695 	value_size = bpf_map_value_size(map);
1696 
1697 	max_count = attr->batch.count;
1698 	if (!max_count)
1699 		return 0;
1700 
1701 	if (put_user(0, &uattr->batch.count))
1702 		return -EFAULT;
1703 
1704 	buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1705 	if (!buf_prevkey)
1706 		return -ENOMEM;
1707 
1708 	buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1709 	if (!buf) {
1710 		kvfree(buf_prevkey);
1711 		return -ENOMEM;
1712 	}
1713 
1714 	err = -EFAULT;
1715 	prev_key = NULL;
1716 	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1717 		goto free_buf;
1718 	key = buf;
1719 	value = key + map->key_size;
1720 	if (ubatch)
1721 		prev_key = buf_prevkey;
1722 
1723 	for (cp = 0; cp < max_count;) {
1724 		rcu_read_lock();
1725 		err = map->ops->map_get_next_key(map, prev_key, key);
1726 		rcu_read_unlock();
1727 		if (err)
1728 			break;
1729 		err = bpf_map_copy_value(map, key, value,
1730 					 attr->batch.elem_flags);
1731 
1732 		if (err == -ENOENT) {
1733 			if (retry) {
1734 				retry--;
1735 				continue;
1736 			}
1737 			err = -EINTR;
1738 			break;
1739 		}
1740 
1741 		if (err)
1742 			goto free_buf;
1743 
1744 		if (copy_to_user(keys + cp * map->key_size, key,
1745 				 map->key_size)) {
1746 			err = -EFAULT;
1747 			goto free_buf;
1748 		}
1749 		if (copy_to_user(values + cp * value_size, value, value_size)) {
1750 			err = -EFAULT;
1751 			goto free_buf;
1752 		}
1753 
1754 		if (!prev_key)
1755 			prev_key = buf_prevkey;
1756 
1757 		swap(prev_key, key);
1758 		retry = MAP_LOOKUP_RETRIES;
1759 		cp++;
1760 		cond_resched();
1761 	}
1762 
1763 	if (err == -EFAULT)
1764 		goto free_buf;
1765 
1766 	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1767 		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1768 		err = -EFAULT;
1769 
1770 free_buf:
1771 	kvfree(buf_prevkey);
1772 	kvfree(buf);
1773 	return err;
1774 }
1775 
1776 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1777 
1778 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1779 {
1780 	void __user *ukey = u64_to_user_ptr(attr->key);
1781 	void __user *uvalue = u64_to_user_ptr(attr->value);
1782 	int ufd = attr->map_fd;
1783 	struct bpf_map *map;
1784 	void *key, *value;
1785 	u32 value_size;
1786 	struct fd f;
1787 	int err;
1788 
1789 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1790 		return -EINVAL;
1791 
1792 	if (attr->flags & ~BPF_F_LOCK)
1793 		return -EINVAL;
1794 
1795 	f = fdget(ufd);
1796 	map = __bpf_map_get(f);
1797 	if (IS_ERR(map))
1798 		return PTR_ERR(map);
1799 	bpf_map_write_active_inc(map);
1800 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1801 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1802 		err = -EPERM;
1803 		goto err_put;
1804 	}
1805 
1806 	if (attr->flags &&
1807 	    (map->map_type == BPF_MAP_TYPE_QUEUE ||
1808 	     map->map_type == BPF_MAP_TYPE_STACK)) {
1809 		err = -EINVAL;
1810 		goto err_put;
1811 	}
1812 
1813 	if ((attr->flags & BPF_F_LOCK) &&
1814 	    !map_value_has_spin_lock(map)) {
1815 		err = -EINVAL;
1816 		goto err_put;
1817 	}
1818 
1819 	key = __bpf_copy_key(ukey, map->key_size);
1820 	if (IS_ERR(key)) {
1821 		err = PTR_ERR(key);
1822 		goto err_put;
1823 	}
1824 
1825 	value_size = bpf_map_value_size(map);
1826 
1827 	err = -ENOMEM;
1828 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1829 	if (!value)
1830 		goto free_key;
1831 
1832 	err = -ENOTSUPP;
1833 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1834 	    map->map_type == BPF_MAP_TYPE_STACK) {
1835 		err = map->ops->map_pop_elem(map, value);
1836 	} else if (map->map_type == BPF_MAP_TYPE_HASH ||
1837 		   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1838 		   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1839 		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1840 		if (!bpf_map_is_dev_bound(map)) {
1841 			bpf_disable_instrumentation();
1842 			rcu_read_lock();
1843 			err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
1844 			rcu_read_unlock();
1845 			bpf_enable_instrumentation();
1846 		}
1847 	}
1848 
1849 	if (err)
1850 		goto free_value;
1851 
1852 	if (copy_to_user(uvalue, value, value_size) != 0) {
1853 		err = -EFAULT;
1854 		goto free_value;
1855 	}
1856 
1857 	err = 0;
1858 
1859 free_value:
1860 	kvfree(value);
1861 free_key:
1862 	kvfree(key);
1863 err_put:
1864 	bpf_map_write_active_dec(map);
1865 	fdput(f);
1866 	return err;
1867 }
1868 
1869 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1870 
1871 static int map_freeze(const union bpf_attr *attr)
1872 {
1873 	int err = 0, ufd = attr->map_fd;
1874 	struct bpf_map *map;
1875 	struct fd f;
1876 
1877 	if (CHECK_ATTR(BPF_MAP_FREEZE))
1878 		return -EINVAL;
1879 
1880 	f = fdget(ufd);
1881 	map = __bpf_map_get(f);
1882 	if (IS_ERR(map))
1883 		return PTR_ERR(map);
1884 
1885 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS ||
1886 	    map_value_has_timer(map) || map_value_has_kptrs(map)) {
1887 		fdput(f);
1888 		return -ENOTSUPP;
1889 	}
1890 
1891 	mutex_lock(&map->freeze_mutex);
1892 	if (bpf_map_write_active(map)) {
1893 		err = -EBUSY;
1894 		goto err_put;
1895 	}
1896 	if (READ_ONCE(map->frozen)) {
1897 		err = -EBUSY;
1898 		goto err_put;
1899 	}
1900 	if (!bpf_capable()) {
1901 		err = -EPERM;
1902 		goto err_put;
1903 	}
1904 
1905 	WRITE_ONCE(map->frozen, true);
1906 err_put:
1907 	mutex_unlock(&map->freeze_mutex);
1908 	fdput(f);
1909 	return err;
1910 }
1911 
1912 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1913 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1914 	[_id] = & _name ## _prog_ops,
1915 #define BPF_MAP_TYPE(_id, _ops)
1916 #define BPF_LINK_TYPE(_id, _name)
1917 #include <linux/bpf_types.h>
1918 #undef BPF_PROG_TYPE
1919 #undef BPF_MAP_TYPE
1920 #undef BPF_LINK_TYPE
1921 };
1922 
1923 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1924 {
1925 	const struct bpf_prog_ops *ops;
1926 
1927 	if (type >= ARRAY_SIZE(bpf_prog_types))
1928 		return -EINVAL;
1929 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1930 	ops = bpf_prog_types[type];
1931 	if (!ops)
1932 		return -EINVAL;
1933 
1934 	if (!bpf_prog_is_dev_bound(prog->aux))
1935 		prog->aux->ops = ops;
1936 	else
1937 		prog->aux->ops = &bpf_offload_prog_ops;
1938 	prog->type = type;
1939 	return 0;
1940 }
1941 
1942 enum bpf_audit {
1943 	BPF_AUDIT_LOAD,
1944 	BPF_AUDIT_UNLOAD,
1945 	BPF_AUDIT_MAX,
1946 };
1947 
1948 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1949 	[BPF_AUDIT_LOAD]   = "LOAD",
1950 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
1951 };
1952 
1953 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1954 {
1955 	struct audit_context *ctx = NULL;
1956 	struct audit_buffer *ab;
1957 
1958 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1959 		return;
1960 	if (audit_enabled == AUDIT_OFF)
1961 		return;
1962 	if (op == BPF_AUDIT_LOAD)
1963 		ctx = audit_context();
1964 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1965 	if (unlikely(!ab))
1966 		return;
1967 	audit_log_format(ab, "prog-id=%u op=%s",
1968 			 prog->aux->id, bpf_audit_str[op]);
1969 	audit_log_end(ab);
1970 }
1971 
1972 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1973 {
1974 	int id;
1975 
1976 	idr_preload(GFP_KERNEL);
1977 	spin_lock_bh(&prog_idr_lock);
1978 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1979 	if (id > 0)
1980 		prog->aux->id = id;
1981 	spin_unlock_bh(&prog_idr_lock);
1982 	idr_preload_end();
1983 
1984 	/* id is in [1, INT_MAX) */
1985 	if (WARN_ON_ONCE(!id))
1986 		return -ENOSPC;
1987 
1988 	return id > 0 ? 0 : id;
1989 }
1990 
1991 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1992 {
1993 	unsigned long flags;
1994 
1995 	/* cBPF to eBPF migrations are currently not in the idr store.
1996 	 * Offloaded programs are removed from the store when their device
1997 	 * disappears - even if someone grabs an fd to them they are unusable,
1998 	 * simply waiting for refcnt to drop to be freed.
1999 	 */
2000 	if (!prog->aux->id)
2001 		return;
2002 
2003 	if (do_idr_lock)
2004 		spin_lock_irqsave(&prog_idr_lock, flags);
2005 	else
2006 		__acquire(&prog_idr_lock);
2007 
2008 	idr_remove(&prog_idr, prog->aux->id);
2009 	prog->aux->id = 0;
2010 
2011 	if (do_idr_lock)
2012 		spin_unlock_irqrestore(&prog_idr_lock, flags);
2013 	else
2014 		__release(&prog_idr_lock);
2015 }
2016 
2017 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2018 {
2019 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2020 
2021 	kvfree(aux->func_info);
2022 	kfree(aux->func_info_aux);
2023 	free_uid(aux->user);
2024 	security_bpf_prog_free(aux);
2025 	bpf_prog_free(aux->prog);
2026 }
2027 
2028 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2029 {
2030 	bpf_prog_kallsyms_del_all(prog);
2031 	btf_put(prog->aux->btf);
2032 	kvfree(prog->aux->jited_linfo);
2033 	kvfree(prog->aux->linfo);
2034 	kfree(prog->aux->kfunc_tab);
2035 	if (prog->aux->attach_btf)
2036 		btf_put(prog->aux->attach_btf);
2037 
2038 	if (deferred) {
2039 		if (prog->aux->sleepable)
2040 			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2041 		else
2042 			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2043 	} else {
2044 		__bpf_prog_put_rcu(&prog->aux->rcu);
2045 	}
2046 }
2047 
2048 static void bpf_prog_put_deferred(struct work_struct *work)
2049 {
2050 	struct bpf_prog_aux *aux;
2051 	struct bpf_prog *prog;
2052 
2053 	aux = container_of(work, struct bpf_prog_aux, work);
2054 	prog = aux->prog;
2055 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2056 	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2057 	__bpf_prog_put_noref(prog, true);
2058 }
2059 
2060 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
2061 {
2062 	struct bpf_prog_aux *aux = prog->aux;
2063 
2064 	if (atomic64_dec_and_test(&aux->refcnt)) {
2065 		/* bpf_prog_free_id() must be called first */
2066 		bpf_prog_free_id(prog, do_idr_lock);
2067 
2068 		if (in_irq() || irqs_disabled()) {
2069 			INIT_WORK(&aux->work, bpf_prog_put_deferred);
2070 			schedule_work(&aux->work);
2071 		} else {
2072 			bpf_prog_put_deferred(&aux->work);
2073 		}
2074 	}
2075 }
2076 
2077 void bpf_prog_put(struct bpf_prog *prog)
2078 {
2079 	__bpf_prog_put(prog, true);
2080 }
2081 EXPORT_SYMBOL_GPL(bpf_prog_put);
2082 
2083 static int bpf_prog_release(struct inode *inode, struct file *filp)
2084 {
2085 	struct bpf_prog *prog = filp->private_data;
2086 
2087 	bpf_prog_put(prog);
2088 	return 0;
2089 }
2090 
2091 struct bpf_prog_kstats {
2092 	u64 nsecs;
2093 	u64 cnt;
2094 	u64 misses;
2095 };
2096 
2097 static void bpf_prog_get_stats(const struct bpf_prog *prog,
2098 			       struct bpf_prog_kstats *stats)
2099 {
2100 	u64 nsecs = 0, cnt = 0, misses = 0;
2101 	int cpu;
2102 
2103 	for_each_possible_cpu(cpu) {
2104 		const struct bpf_prog_stats *st;
2105 		unsigned int start;
2106 		u64 tnsecs, tcnt, tmisses;
2107 
2108 		st = per_cpu_ptr(prog->stats, cpu);
2109 		do {
2110 			start = u64_stats_fetch_begin_irq(&st->syncp);
2111 			tnsecs = u64_stats_read(&st->nsecs);
2112 			tcnt = u64_stats_read(&st->cnt);
2113 			tmisses = u64_stats_read(&st->misses);
2114 		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
2115 		nsecs += tnsecs;
2116 		cnt += tcnt;
2117 		misses += tmisses;
2118 	}
2119 	stats->nsecs = nsecs;
2120 	stats->cnt = cnt;
2121 	stats->misses = misses;
2122 }
2123 
2124 #ifdef CONFIG_PROC_FS
2125 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2126 {
2127 	const struct bpf_prog *prog = filp->private_data;
2128 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2129 	struct bpf_prog_kstats stats;
2130 
2131 	bpf_prog_get_stats(prog, &stats);
2132 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2133 	seq_printf(m,
2134 		   "prog_type:\t%u\n"
2135 		   "prog_jited:\t%u\n"
2136 		   "prog_tag:\t%s\n"
2137 		   "memlock:\t%llu\n"
2138 		   "prog_id:\t%u\n"
2139 		   "run_time_ns:\t%llu\n"
2140 		   "run_cnt:\t%llu\n"
2141 		   "recursion_misses:\t%llu\n"
2142 		   "verified_insns:\t%u\n",
2143 		   prog->type,
2144 		   prog->jited,
2145 		   prog_tag,
2146 		   prog->pages * 1ULL << PAGE_SHIFT,
2147 		   prog->aux->id,
2148 		   stats.nsecs,
2149 		   stats.cnt,
2150 		   stats.misses,
2151 		   prog->aux->verified_insns);
2152 }
2153 #endif
2154 
2155 const struct file_operations bpf_prog_fops = {
2156 #ifdef CONFIG_PROC_FS
2157 	.show_fdinfo	= bpf_prog_show_fdinfo,
2158 #endif
2159 	.release	= bpf_prog_release,
2160 	.read		= bpf_dummy_read,
2161 	.write		= bpf_dummy_write,
2162 };
2163 
2164 int bpf_prog_new_fd(struct bpf_prog *prog)
2165 {
2166 	int ret;
2167 
2168 	ret = security_bpf_prog(prog);
2169 	if (ret < 0)
2170 		return ret;
2171 
2172 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2173 				O_RDWR | O_CLOEXEC);
2174 }
2175 
2176 static struct bpf_prog *____bpf_prog_get(struct fd f)
2177 {
2178 	if (!f.file)
2179 		return ERR_PTR(-EBADF);
2180 	if (f.file->f_op != &bpf_prog_fops) {
2181 		fdput(f);
2182 		return ERR_PTR(-EINVAL);
2183 	}
2184 
2185 	return f.file->private_data;
2186 }
2187 
2188 void bpf_prog_add(struct bpf_prog *prog, int i)
2189 {
2190 	atomic64_add(i, &prog->aux->refcnt);
2191 }
2192 EXPORT_SYMBOL_GPL(bpf_prog_add);
2193 
2194 void bpf_prog_sub(struct bpf_prog *prog, int i)
2195 {
2196 	/* Only to be used for undoing previous bpf_prog_add() in some
2197 	 * error path. We still know that another entity in our call
2198 	 * path holds a reference to the program, thus atomic_sub() can
2199 	 * be safely used in such cases!
2200 	 */
2201 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2202 }
2203 EXPORT_SYMBOL_GPL(bpf_prog_sub);
2204 
2205 void bpf_prog_inc(struct bpf_prog *prog)
2206 {
2207 	atomic64_inc(&prog->aux->refcnt);
2208 }
2209 EXPORT_SYMBOL_GPL(bpf_prog_inc);
2210 
2211 /* prog_idr_lock should have been held */
2212 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2213 {
2214 	int refold;
2215 
2216 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2217 
2218 	if (!refold)
2219 		return ERR_PTR(-ENOENT);
2220 
2221 	return prog;
2222 }
2223 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2224 
2225 bool bpf_prog_get_ok(struct bpf_prog *prog,
2226 			    enum bpf_prog_type *attach_type, bool attach_drv)
2227 {
2228 	/* not an attachment, just a refcount inc, always allow */
2229 	if (!attach_type)
2230 		return true;
2231 
2232 	if (prog->type != *attach_type)
2233 		return false;
2234 	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
2235 		return false;
2236 
2237 	return true;
2238 }
2239 
2240 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2241 				       bool attach_drv)
2242 {
2243 	struct fd f = fdget(ufd);
2244 	struct bpf_prog *prog;
2245 
2246 	prog = ____bpf_prog_get(f);
2247 	if (IS_ERR(prog))
2248 		return prog;
2249 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
2250 		prog = ERR_PTR(-EINVAL);
2251 		goto out;
2252 	}
2253 
2254 	bpf_prog_inc(prog);
2255 out:
2256 	fdput(f);
2257 	return prog;
2258 }
2259 
2260 struct bpf_prog *bpf_prog_get(u32 ufd)
2261 {
2262 	return __bpf_prog_get(ufd, NULL, false);
2263 }
2264 
2265 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2266 				       bool attach_drv)
2267 {
2268 	return __bpf_prog_get(ufd, &type, attach_drv);
2269 }
2270 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2271 
2272 /* Initially all BPF programs could be loaded w/o specifying
2273  * expected_attach_type. Later for some of them specifying expected_attach_type
2274  * at load time became required so that program could be validated properly.
2275  * Programs of types that are allowed to be loaded both w/ and w/o (for
2276  * backward compatibility) expected_attach_type, should have the default attach
2277  * type assigned to expected_attach_type for the latter case, so that it can be
2278  * validated later at attach time.
2279  *
2280  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2281  * prog type requires it but has some attach types that have to be backward
2282  * compatible.
2283  */
2284 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2285 {
2286 	switch (attr->prog_type) {
2287 	case BPF_PROG_TYPE_CGROUP_SOCK:
2288 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2289 		 * exist so checking for non-zero is the way to go here.
2290 		 */
2291 		if (!attr->expected_attach_type)
2292 			attr->expected_attach_type =
2293 				BPF_CGROUP_INET_SOCK_CREATE;
2294 		break;
2295 	case BPF_PROG_TYPE_SK_REUSEPORT:
2296 		if (!attr->expected_attach_type)
2297 			attr->expected_attach_type =
2298 				BPF_SK_REUSEPORT_SELECT;
2299 		break;
2300 	}
2301 }
2302 
2303 static int
2304 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2305 			   enum bpf_attach_type expected_attach_type,
2306 			   struct btf *attach_btf, u32 btf_id,
2307 			   struct bpf_prog *dst_prog)
2308 {
2309 	if (btf_id) {
2310 		if (btf_id > BTF_MAX_TYPE)
2311 			return -EINVAL;
2312 
2313 		if (!attach_btf && !dst_prog)
2314 			return -EINVAL;
2315 
2316 		switch (prog_type) {
2317 		case BPF_PROG_TYPE_TRACING:
2318 		case BPF_PROG_TYPE_LSM:
2319 		case BPF_PROG_TYPE_STRUCT_OPS:
2320 		case BPF_PROG_TYPE_EXT:
2321 			break;
2322 		default:
2323 			return -EINVAL;
2324 		}
2325 	}
2326 
2327 	if (attach_btf && (!btf_id || dst_prog))
2328 		return -EINVAL;
2329 
2330 	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2331 	    prog_type != BPF_PROG_TYPE_EXT)
2332 		return -EINVAL;
2333 
2334 	switch (prog_type) {
2335 	case BPF_PROG_TYPE_CGROUP_SOCK:
2336 		switch (expected_attach_type) {
2337 		case BPF_CGROUP_INET_SOCK_CREATE:
2338 		case BPF_CGROUP_INET_SOCK_RELEASE:
2339 		case BPF_CGROUP_INET4_POST_BIND:
2340 		case BPF_CGROUP_INET6_POST_BIND:
2341 			return 0;
2342 		default:
2343 			return -EINVAL;
2344 		}
2345 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2346 		switch (expected_attach_type) {
2347 		case BPF_CGROUP_INET4_BIND:
2348 		case BPF_CGROUP_INET6_BIND:
2349 		case BPF_CGROUP_INET4_CONNECT:
2350 		case BPF_CGROUP_INET6_CONNECT:
2351 		case BPF_CGROUP_INET4_GETPEERNAME:
2352 		case BPF_CGROUP_INET6_GETPEERNAME:
2353 		case BPF_CGROUP_INET4_GETSOCKNAME:
2354 		case BPF_CGROUP_INET6_GETSOCKNAME:
2355 		case BPF_CGROUP_UDP4_SENDMSG:
2356 		case BPF_CGROUP_UDP6_SENDMSG:
2357 		case BPF_CGROUP_UDP4_RECVMSG:
2358 		case BPF_CGROUP_UDP6_RECVMSG:
2359 			return 0;
2360 		default:
2361 			return -EINVAL;
2362 		}
2363 	case BPF_PROG_TYPE_CGROUP_SKB:
2364 		switch (expected_attach_type) {
2365 		case BPF_CGROUP_INET_INGRESS:
2366 		case BPF_CGROUP_INET_EGRESS:
2367 			return 0;
2368 		default:
2369 			return -EINVAL;
2370 		}
2371 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2372 		switch (expected_attach_type) {
2373 		case BPF_CGROUP_SETSOCKOPT:
2374 		case BPF_CGROUP_GETSOCKOPT:
2375 			return 0;
2376 		default:
2377 			return -EINVAL;
2378 		}
2379 	case BPF_PROG_TYPE_SK_LOOKUP:
2380 		if (expected_attach_type == BPF_SK_LOOKUP)
2381 			return 0;
2382 		return -EINVAL;
2383 	case BPF_PROG_TYPE_SK_REUSEPORT:
2384 		switch (expected_attach_type) {
2385 		case BPF_SK_REUSEPORT_SELECT:
2386 		case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2387 			return 0;
2388 		default:
2389 			return -EINVAL;
2390 		}
2391 	case BPF_PROG_TYPE_SYSCALL:
2392 	case BPF_PROG_TYPE_EXT:
2393 		if (expected_attach_type)
2394 			return -EINVAL;
2395 		fallthrough;
2396 	default:
2397 		return 0;
2398 	}
2399 }
2400 
2401 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2402 {
2403 	switch (prog_type) {
2404 	case BPF_PROG_TYPE_SCHED_CLS:
2405 	case BPF_PROG_TYPE_SCHED_ACT:
2406 	case BPF_PROG_TYPE_XDP:
2407 	case BPF_PROG_TYPE_LWT_IN:
2408 	case BPF_PROG_TYPE_LWT_OUT:
2409 	case BPF_PROG_TYPE_LWT_XMIT:
2410 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2411 	case BPF_PROG_TYPE_SK_SKB:
2412 	case BPF_PROG_TYPE_SK_MSG:
2413 	case BPF_PROG_TYPE_LIRC_MODE2:
2414 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2415 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2416 	case BPF_PROG_TYPE_CGROUP_SOCK:
2417 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2418 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2419 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2420 	case BPF_PROG_TYPE_SOCK_OPS:
2421 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2422 		return true;
2423 	case BPF_PROG_TYPE_CGROUP_SKB:
2424 		/* always unpriv */
2425 	case BPF_PROG_TYPE_SK_REUSEPORT:
2426 		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2427 	default:
2428 		return false;
2429 	}
2430 }
2431 
2432 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2433 {
2434 	switch (prog_type) {
2435 	case BPF_PROG_TYPE_KPROBE:
2436 	case BPF_PROG_TYPE_TRACEPOINT:
2437 	case BPF_PROG_TYPE_PERF_EVENT:
2438 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2439 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2440 	case BPF_PROG_TYPE_TRACING:
2441 	case BPF_PROG_TYPE_LSM:
2442 	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2443 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2444 		return true;
2445 	default:
2446 		return false;
2447 	}
2448 }
2449 
2450 /* last field in 'union bpf_attr' used by this command */
2451 #define	BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size
2452 
2453 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
2454 {
2455 	enum bpf_prog_type type = attr->prog_type;
2456 	struct bpf_prog *prog, *dst_prog = NULL;
2457 	struct btf *attach_btf = NULL;
2458 	int err;
2459 	char license[128];
2460 	bool is_gpl;
2461 
2462 	if (CHECK_ATTR(BPF_PROG_LOAD))
2463 		return -EINVAL;
2464 
2465 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2466 				 BPF_F_ANY_ALIGNMENT |
2467 				 BPF_F_TEST_STATE_FREQ |
2468 				 BPF_F_SLEEPABLE |
2469 				 BPF_F_TEST_RND_HI32 |
2470 				 BPF_F_XDP_HAS_FRAGS))
2471 		return -EINVAL;
2472 
2473 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2474 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2475 	    !bpf_capable())
2476 		return -EPERM;
2477 
2478 	/* copy eBPF program license from user space */
2479 	if (strncpy_from_bpfptr(license,
2480 				make_bpfptr(attr->license, uattr.is_kernel),
2481 				sizeof(license) - 1) < 0)
2482 		return -EFAULT;
2483 	license[sizeof(license) - 1] = 0;
2484 
2485 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2486 	is_gpl = license_is_gpl_compatible(license);
2487 
2488 	if (attr->insn_cnt == 0 ||
2489 	    attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2490 		return -E2BIG;
2491 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2492 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2493 	    !bpf_capable())
2494 		return -EPERM;
2495 
2496 	if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2497 		return -EPERM;
2498 	if (is_perfmon_prog_type(type) && !perfmon_capable())
2499 		return -EPERM;
2500 
2501 	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2502 	 * or btf, we need to check which one it is
2503 	 */
2504 	if (attr->attach_prog_fd) {
2505 		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2506 		if (IS_ERR(dst_prog)) {
2507 			dst_prog = NULL;
2508 			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2509 			if (IS_ERR(attach_btf))
2510 				return -EINVAL;
2511 			if (!btf_is_kernel(attach_btf)) {
2512 				/* attaching through specifying bpf_prog's BTF
2513 				 * objects directly might be supported eventually
2514 				 */
2515 				btf_put(attach_btf);
2516 				return -ENOTSUPP;
2517 			}
2518 		}
2519 	} else if (attr->attach_btf_id) {
2520 		/* fall back to vmlinux BTF, if BTF type ID is specified */
2521 		attach_btf = bpf_get_btf_vmlinux();
2522 		if (IS_ERR(attach_btf))
2523 			return PTR_ERR(attach_btf);
2524 		if (!attach_btf)
2525 			return -EINVAL;
2526 		btf_get(attach_btf);
2527 	}
2528 
2529 	bpf_prog_load_fixup_attach_type(attr);
2530 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2531 				       attach_btf, attr->attach_btf_id,
2532 				       dst_prog)) {
2533 		if (dst_prog)
2534 			bpf_prog_put(dst_prog);
2535 		if (attach_btf)
2536 			btf_put(attach_btf);
2537 		return -EINVAL;
2538 	}
2539 
2540 	/* plain bpf_prog allocation */
2541 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2542 	if (!prog) {
2543 		if (dst_prog)
2544 			bpf_prog_put(dst_prog);
2545 		if (attach_btf)
2546 			btf_put(attach_btf);
2547 		return -ENOMEM;
2548 	}
2549 
2550 	prog->expected_attach_type = attr->expected_attach_type;
2551 	prog->aux->attach_btf = attach_btf;
2552 	prog->aux->attach_btf_id = attr->attach_btf_id;
2553 	prog->aux->dst_prog = dst_prog;
2554 	prog->aux->offload_requested = !!attr->prog_ifindex;
2555 	prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2556 	prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2557 
2558 	err = security_bpf_prog_alloc(prog->aux);
2559 	if (err)
2560 		goto free_prog;
2561 
2562 	prog->aux->user = get_current_user();
2563 	prog->len = attr->insn_cnt;
2564 
2565 	err = -EFAULT;
2566 	if (copy_from_bpfptr(prog->insns,
2567 			     make_bpfptr(attr->insns, uattr.is_kernel),
2568 			     bpf_prog_insn_size(prog)) != 0)
2569 		goto free_prog_sec;
2570 
2571 	prog->orig_prog = NULL;
2572 	prog->jited = 0;
2573 
2574 	atomic64_set(&prog->aux->refcnt, 1);
2575 	prog->gpl_compatible = is_gpl ? 1 : 0;
2576 
2577 	if (bpf_prog_is_dev_bound(prog->aux)) {
2578 		err = bpf_prog_offload_init(prog, attr);
2579 		if (err)
2580 			goto free_prog_sec;
2581 	}
2582 
2583 	/* find program type: socket_filter vs tracing_filter */
2584 	err = find_prog_type(type, prog);
2585 	if (err < 0)
2586 		goto free_prog_sec;
2587 
2588 	prog->aux->load_time = ktime_get_boottime_ns();
2589 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2590 			       sizeof(attr->prog_name));
2591 	if (err < 0)
2592 		goto free_prog_sec;
2593 
2594 	/* run eBPF verifier */
2595 	err = bpf_check(&prog, attr, uattr);
2596 	if (err < 0)
2597 		goto free_used_maps;
2598 
2599 	prog = bpf_prog_select_runtime(prog, &err);
2600 	if (err < 0)
2601 		goto free_used_maps;
2602 
2603 	err = bpf_prog_alloc_id(prog);
2604 	if (err)
2605 		goto free_used_maps;
2606 
2607 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2608 	 * effectively publicly exposed. However, retrieving via
2609 	 * bpf_prog_get_fd_by_id() will take another reference,
2610 	 * therefore it cannot be gone underneath us.
2611 	 *
2612 	 * Only for the time /after/ successful bpf_prog_new_fd()
2613 	 * and before returning to userspace, we might just hold
2614 	 * one reference and any parallel close on that fd could
2615 	 * rip everything out. Hence, below notifications must
2616 	 * happen before bpf_prog_new_fd().
2617 	 *
2618 	 * Also, any failure handling from this point onwards must
2619 	 * be using bpf_prog_put() given the program is exposed.
2620 	 */
2621 	bpf_prog_kallsyms_add(prog);
2622 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2623 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2624 
2625 	err = bpf_prog_new_fd(prog);
2626 	if (err < 0)
2627 		bpf_prog_put(prog);
2628 	return err;
2629 
2630 free_used_maps:
2631 	/* In case we have subprogs, we need to wait for a grace
2632 	 * period before we can tear down JIT memory since symbols
2633 	 * are already exposed under kallsyms.
2634 	 */
2635 	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
2636 	return err;
2637 free_prog_sec:
2638 	free_uid(prog->aux->user);
2639 	security_bpf_prog_free(prog->aux);
2640 free_prog:
2641 	if (prog->aux->attach_btf)
2642 		btf_put(prog->aux->attach_btf);
2643 	bpf_prog_free(prog);
2644 	return err;
2645 }
2646 
2647 #define BPF_OBJ_LAST_FIELD file_flags
2648 
2649 static int bpf_obj_pin(const union bpf_attr *attr)
2650 {
2651 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2652 		return -EINVAL;
2653 
2654 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2655 }
2656 
2657 static int bpf_obj_get(const union bpf_attr *attr)
2658 {
2659 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2660 	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2661 		return -EINVAL;
2662 
2663 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2664 				attr->file_flags);
2665 }
2666 
2667 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2668 		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2669 {
2670 	atomic64_set(&link->refcnt, 1);
2671 	link->type = type;
2672 	link->id = 0;
2673 	link->ops = ops;
2674 	link->prog = prog;
2675 }
2676 
2677 static void bpf_link_free_id(int id)
2678 {
2679 	if (!id)
2680 		return;
2681 
2682 	spin_lock_bh(&link_idr_lock);
2683 	idr_remove(&link_idr, id);
2684 	spin_unlock_bh(&link_idr_lock);
2685 }
2686 
2687 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2688  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2689  * anon_inode's release() call. This helper marksbpf_link as
2690  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2691  * is not decremented, it's the responsibility of a calling code that failed
2692  * to complete bpf_link initialization.
2693  */
2694 void bpf_link_cleanup(struct bpf_link_primer *primer)
2695 {
2696 	primer->link->prog = NULL;
2697 	bpf_link_free_id(primer->id);
2698 	fput(primer->file);
2699 	put_unused_fd(primer->fd);
2700 }
2701 
2702 void bpf_link_inc(struct bpf_link *link)
2703 {
2704 	atomic64_inc(&link->refcnt);
2705 }
2706 
2707 /* bpf_link_free is guaranteed to be called from process context */
2708 static void bpf_link_free(struct bpf_link *link)
2709 {
2710 	bpf_link_free_id(link->id);
2711 	if (link->prog) {
2712 		/* detach BPF program, clean up used resources */
2713 		link->ops->release(link);
2714 		bpf_prog_put(link->prog);
2715 	}
2716 	/* free bpf_link and its containing memory */
2717 	link->ops->dealloc(link);
2718 }
2719 
2720 static void bpf_link_put_deferred(struct work_struct *work)
2721 {
2722 	struct bpf_link *link = container_of(work, struct bpf_link, work);
2723 
2724 	bpf_link_free(link);
2725 }
2726 
2727 /* bpf_link_put can be called from atomic context, but ensures that resources
2728  * are freed from process context
2729  */
2730 void bpf_link_put(struct bpf_link *link)
2731 {
2732 	if (!atomic64_dec_and_test(&link->refcnt))
2733 		return;
2734 
2735 	if (in_atomic()) {
2736 		INIT_WORK(&link->work, bpf_link_put_deferred);
2737 		schedule_work(&link->work);
2738 	} else {
2739 		bpf_link_free(link);
2740 	}
2741 }
2742 EXPORT_SYMBOL(bpf_link_put);
2743 
2744 static int bpf_link_release(struct inode *inode, struct file *filp)
2745 {
2746 	struct bpf_link *link = filp->private_data;
2747 
2748 	bpf_link_put(link);
2749 	return 0;
2750 }
2751 
2752 #ifdef CONFIG_PROC_FS
2753 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2754 #define BPF_MAP_TYPE(_id, _ops)
2755 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2756 static const char *bpf_link_type_strs[] = {
2757 	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2758 #include <linux/bpf_types.h>
2759 };
2760 #undef BPF_PROG_TYPE
2761 #undef BPF_MAP_TYPE
2762 #undef BPF_LINK_TYPE
2763 
2764 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2765 {
2766 	const struct bpf_link *link = filp->private_data;
2767 	const struct bpf_prog *prog = link->prog;
2768 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2769 
2770 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2771 	seq_printf(m,
2772 		   "link_type:\t%s\n"
2773 		   "link_id:\t%u\n"
2774 		   "prog_tag:\t%s\n"
2775 		   "prog_id:\t%u\n",
2776 		   bpf_link_type_strs[link->type],
2777 		   link->id,
2778 		   prog_tag,
2779 		   prog->aux->id);
2780 	if (link->ops->show_fdinfo)
2781 		link->ops->show_fdinfo(link, m);
2782 }
2783 #endif
2784 
2785 static const struct file_operations bpf_link_fops = {
2786 #ifdef CONFIG_PROC_FS
2787 	.show_fdinfo	= bpf_link_show_fdinfo,
2788 #endif
2789 	.release	= bpf_link_release,
2790 	.read		= bpf_dummy_read,
2791 	.write		= bpf_dummy_write,
2792 };
2793 
2794 static int bpf_link_alloc_id(struct bpf_link *link)
2795 {
2796 	int id;
2797 
2798 	idr_preload(GFP_KERNEL);
2799 	spin_lock_bh(&link_idr_lock);
2800 	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2801 	spin_unlock_bh(&link_idr_lock);
2802 	idr_preload_end();
2803 
2804 	return id;
2805 }
2806 
2807 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2808  * reserving unused FD and allocating ID from link_idr. This is to be paired
2809  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2810  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2811  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2812  * transient state is passed around in struct bpf_link_primer.
2813  * This is preferred way to create and initialize bpf_link, especially when
2814  * there are complicated and expensive operations in between creating bpf_link
2815  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2816  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2817  * expensive (and potentially failing) roll back operations in a rare case
2818  * that file, FD, or ID can't be allocated.
2819  */
2820 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2821 {
2822 	struct file *file;
2823 	int fd, id;
2824 
2825 	fd = get_unused_fd_flags(O_CLOEXEC);
2826 	if (fd < 0)
2827 		return fd;
2828 
2829 
2830 	id = bpf_link_alloc_id(link);
2831 	if (id < 0) {
2832 		put_unused_fd(fd);
2833 		return id;
2834 	}
2835 
2836 	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2837 	if (IS_ERR(file)) {
2838 		bpf_link_free_id(id);
2839 		put_unused_fd(fd);
2840 		return PTR_ERR(file);
2841 	}
2842 
2843 	primer->link = link;
2844 	primer->file = file;
2845 	primer->fd = fd;
2846 	primer->id = id;
2847 	return 0;
2848 }
2849 
2850 int bpf_link_settle(struct bpf_link_primer *primer)
2851 {
2852 	/* make bpf_link fetchable by ID */
2853 	spin_lock_bh(&link_idr_lock);
2854 	primer->link->id = primer->id;
2855 	spin_unlock_bh(&link_idr_lock);
2856 	/* make bpf_link fetchable by FD */
2857 	fd_install(primer->fd, primer->file);
2858 	/* pass through installed FD */
2859 	return primer->fd;
2860 }
2861 
2862 int bpf_link_new_fd(struct bpf_link *link)
2863 {
2864 	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2865 }
2866 
2867 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2868 {
2869 	struct fd f = fdget(ufd);
2870 	struct bpf_link *link;
2871 
2872 	if (!f.file)
2873 		return ERR_PTR(-EBADF);
2874 	if (f.file->f_op != &bpf_link_fops) {
2875 		fdput(f);
2876 		return ERR_PTR(-EINVAL);
2877 	}
2878 
2879 	link = f.file->private_data;
2880 	bpf_link_inc(link);
2881 	fdput(f);
2882 
2883 	return link;
2884 }
2885 EXPORT_SYMBOL(bpf_link_get_from_fd);
2886 
2887 static void bpf_tracing_link_release(struct bpf_link *link)
2888 {
2889 	struct bpf_tracing_link *tr_link =
2890 		container_of(link, struct bpf_tracing_link, link.link);
2891 
2892 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
2893 						tr_link->trampoline));
2894 
2895 	bpf_trampoline_put(tr_link->trampoline);
2896 
2897 	/* tgt_prog is NULL if target is a kernel function */
2898 	if (tr_link->tgt_prog)
2899 		bpf_prog_put(tr_link->tgt_prog);
2900 }
2901 
2902 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2903 {
2904 	struct bpf_tracing_link *tr_link =
2905 		container_of(link, struct bpf_tracing_link, link.link);
2906 
2907 	kfree(tr_link);
2908 }
2909 
2910 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2911 					 struct seq_file *seq)
2912 {
2913 	struct bpf_tracing_link *tr_link =
2914 		container_of(link, struct bpf_tracing_link, link.link);
2915 
2916 	seq_printf(seq,
2917 		   "attach_type:\t%d\n",
2918 		   tr_link->attach_type);
2919 }
2920 
2921 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2922 					   struct bpf_link_info *info)
2923 {
2924 	struct bpf_tracing_link *tr_link =
2925 		container_of(link, struct bpf_tracing_link, link.link);
2926 
2927 	info->tracing.attach_type = tr_link->attach_type;
2928 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
2929 				  &info->tracing.target_obj_id,
2930 				  &info->tracing.target_btf_id);
2931 
2932 	return 0;
2933 }
2934 
2935 static const struct bpf_link_ops bpf_tracing_link_lops = {
2936 	.release = bpf_tracing_link_release,
2937 	.dealloc = bpf_tracing_link_dealloc,
2938 	.show_fdinfo = bpf_tracing_link_show_fdinfo,
2939 	.fill_link_info = bpf_tracing_link_fill_link_info,
2940 };
2941 
2942 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2943 				   int tgt_prog_fd,
2944 				   u32 btf_id,
2945 				   u64 bpf_cookie)
2946 {
2947 	struct bpf_link_primer link_primer;
2948 	struct bpf_prog *tgt_prog = NULL;
2949 	struct bpf_trampoline *tr = NULL;
2950 	struct bpf_tracing_link *link;
2951 	u64 key = 0;
2952 	int err;
2953 
2954 	switch (prog->type) {
2955 	case BPF_PROG_TYPE_TRACING:
2956 		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2957 		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
2958 		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
2959 			err = -EINVAL;
2960 			goto out_put_prog;
2961 		}
2962 		break;
2963 	case BPF_PROG_TYPE_EXT:
2964 		if (prog->expected_attach_type != 0) {
2965 			err = -EINVAL;
2966 			goto out_put_prog;
2967 		}
2968 		break;
2969 	case BPF_PROG_TYPE_LSM:
2970 		if (prog->expected_attach_type != BPF_LSM_MAC) {
2971 			err = -EINVAL;
2972 			goto out_put_prog;
2973 		}
2974 		break;
2975 	default:
2976 		err = -EINVAL;
2977 		goto out_put_prog;
2978 	}
2979 
2980 	if (!!tgt_prog_fd != !!btf_id) {
2981 		err = -EINVAL;
2982 		goto out_put_prog;
2983 	}
2984 
2985 	if (tgt_prog_fd) {
2986 		/* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2987 		if (prog->type != BPF_PROG_TYPE_EXT) {
2988 			err = -EINVAL;
2989 			goto out_put_prog;
2990 		}
2991 
2992 		tgt_prog = bpf_prog_get(tgt_prog_fd);
2993 		if (IS_ERR(tgt_prog)) {
2994 			err = PTR_ERR(tgt_prog);
2995 			tgt_prog = NULL;
2996 			goto out_put_prog;
2997 		}
2998 
2999 		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3000 	}
3001 
3002 	link = kzalloc(sizeof(*link), GFP_USER);
3003 	if (!link) {
3004 		err = -ENOMEM;
3005 		goto out_put_prog;
3006 	}
3007 	bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3008 		      &bpf_tracing_link_lops, prog);
3009 	link->attach_type = prog->expected_attach_type;
3010 	link->link.cookie = bpf_cookie;
3011 
3012 	mutex_lock(&prog->aux->dst_mutex);
3013 
3014 	/* There are a few possible cases here:
3015 	 *
3016 	 * - if prog->aux->dst_trampoline is set, the program was just loaded
3017 	 *   and not yet attached to anything, so we can use the values stored
3018 	 *   in prog->aux
3019 	 *
3020 	 * - if prog->aux->dst_trampoline is NULL, the program has already been
3021          *   attached to a target and its initial target was cleared (below)
3022 	 *
3023 	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3024 	 *   target_btf_id using the link_create API.
3025 	 *
3026 	 * - if tgt_prog == NULL when this function was called using the old
3027 	 *   raw_tracepoint_open API, and we need a target from prog->aux
3028 	 *
3029 	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3030 	 *   was detached and is going for re-attachment.
3031 	 */
3032 	if (!prog->aux->dst_trampoline && !tgt_prog) {
3033 		/*
3034 		 * Allow re-attach for TRACING and LSM programs. If it's
3035 		 * currently linked, bpf_trampoline_link_prog will fail.
3036 		 * EXT programs need to specify tgt_prog_fd, so they
3037 		 * re-attach in separate code path.
3038 		 */
3039 		if (prog->type != BPF_PROG_TYPE_TRACING &&
3040 		    prog->type != BPF_PROG_TYPE_LSM) {
3041 			err = -EINVAL;
3042 			goto out_unlock;
3043 		}
3044 		btf_id = prog->aux->attach_btf_id;
3045 		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3046 	}
3047 
3048 	if (!prog->aux->dst_trampoline ||
3049 	    (key && key != prog->aux->dst_trampoline->key)) {
3050 		/* If there is no saved target, or the specified target is
3051 		 * different from the destination specified at load time, we
3052 		 * need a new trampoline and a check for compatibility
3053 		 */
3054 		struct bpf_attach_target_info tgt_info = {};
3055 
3056 		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3057 					      &tgt_info);
3058 		if (err)
3059 			goto out_unlock;
3060 
3061 		tr = bpf_trampoline_get(key, &tgt_info);
3062 		if (!tr) {
3063 			err = -ENOMEM;
3064 			goto out_unlock;
3065 		}
3066 	} else {
3067 		/* The caller didn't specify a target, or the target was the
3068 		 * same as the destination supplied during program load. This
3069 		 * means we can reuse the trampoline and reference from program
3070 		 * load time, and there is no need to allocate a new one. This
3071 		 * can only happen once for any program, as the saved values in
3072 		 * prog->aux are cleared below.
3073 		 */
3074 		tr = prog->aux->dst_trampoline;
3075 		tgt_prog = prog->aux->dst_prog;
3076 	}
3077 
3078 	err = bpf_link_prime(&link->link.link, &link_primer);
3079 	if (err)
3080 		goto out_unlock;
3081 
3082 	err = bpf_trampoline_link_prog(&link->link, tr);
3083 	if (err) {
3084 		bpf_link_cleanup(&link_primer);
3085 		link = NULL;
3086 		goto out_unlock;
3087 	}
3088 
3089 	link->tgt_prog = tgt_prog;
3090 	link->trampoline = tr;
3091 
3092 	/* Always clear the trampoline and target prog from prog->aux to make
3093 	 * sure the original attach destination is not kept alive after a
3094 	 * program is (re-)attached to another target.
3095 	 */
3096 	if (prog->aux->dst_prog &&
3097 	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3098 		/* got extra prog ref from syscall, or attaching to different prog */
3099 		bpf_prog_put(prog->aux->dst_prog);
3100 	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3101 		/* we allocated a new trampoline, so free the old one */
3102 		bpf_trampoline_put(prog->aux->dst_trampoline);
3103 
3104 	prog->aux->dst_prog = NULL;
3105 	prog->aux->dst_trampoline = NULL;
3106 	mutex_unlock(&prog->aux->dst_mutex);
3107 
3108 	return bpf_link_settle(&link_primer);
3109 out_unlock:
3110 	if (tr && tr != prog->aux->dst_trampoline)
3111 		bpf_trampoline_put(tr);
3112 	mutex_unlock(&prog->aux->dst_mutex);
3113 	kfree(link);
3114 out_put_prog:
3115 	if (tgt_prog_fd && tgt_prog)
3116 		bpf_prog_put(tgt_prog);
3117 	return err;
3118 }
3119 
3120 struct bpf_raw_tp_link {
3121 	struct bpf_link link;
3122 	struct bpf_raw_event_map *btp;
3123 };
3124 
3125 static void bpf_raw_tp_link_release(struct bpf_link *link)
3126 {
3127 	struct bpf_raw_tp_link *raw_tp =
3128 		container_of(link, struct bpf_raw_tp_link, link);
3129 
3130 	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
3131 	bpf_put_raw_tracepoint(raw_tp->btp);
3132 }
3133 
3134 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3135 {
3136 	struct bpf_raw_tp_link *raw_tp =
3137 		container_of(link, struct bpf_raw_tp_link, link);
3138 
3139 	kfree(raw_tp);
3140 }
3141 
3142 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3143 					struct seq_file *seq)
3144 {
3145 	struct bpf_raw_tp_link *raw_tp_link =
3146 		container_of(link, struct bpf_raw_tp_link, link);
3147 
3148 	seq_printf(seq,
3149 		   "tp_name:\t%s\n",
3150 		   raw_tp_link->btp->tp->name);
3151 }
3152 
3153 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3154 					  struct bpf_link_info *info)
3155 {
3156 	struct bpf_raw_tp_link *raw_tp_link =
3157 		container_of(link, struct bpf_raw_tp_link, link);
3158 	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3159 	const char *tp_name = raw_tp_link->btp->tp->name;
3160 	u32 ulen = info->raw_tracepoint.tp_name_len;
3161 	size_t tp_len = strlen(tp_name);
3162 
3163 	if (!ulen ^ !ubuf)
3164 		return -EINVAL;
3165 
3166 	info->raw_tracepoint.tp_name_len = tp_len + 1;
3167 
3168 	if (!ubuf)
3169 		return 0;
3170 
3171 	if (ulen >= tp_len + 1) {
3172 		if (copy_to_user(ubuf, tp_name, tp_len + 1))
3173 			return -EFAULT;
3174 	} else {
3175 		char zero = '\0';
3176 
3177 		if (copy_to_user(ubuf, tp_name, ulen - 1))
3178 			return -EFAULT;
3179 		if (put_user(zero, ubuf + ulen - 1))
3180 			return -EFAULT;
3181 		return -ENOSPC;
3182 	}
3183 
3184 	return 0;
3185 }
3186 
3187 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3188 	.release = bpf_raw_tp_link_release,
3189 	.dealloc = bpf_raw_tp_link_dealloc,
3190 	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3191 	.fill_link_info = bpf_raw_tp_link_fill_link_info,
3192 };
3193 
3194 #ifdef CONFIG_PERF_EVENTS
3195 struct bpf_perf_link {
3196 	struct bpf_link link;
3197 	struct file *perf_file;
3198 };
3199 
3200 static void bpf_perf_link_release(struct bpf_link *link)
3201 {
3202 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3203 	struct perf_event *event = perf_link->perf_file->private_data;
3204 
3205 	perf_event_free_bpf_prog(event);
3206 	fput(perf_link->perf_file);
3207 }
3208 
3209 static void bpf_perf_link_dealloc(struct bpf_link *link)
3210 {
3211 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3212 
3213 	kfree(perf_link);
3214 }
3215 
3216 static const struct bpf_link_ops bpf_perf_link_lops = {
3217 	.release = bpf_perf_link_release,
3218 	.dealloc = bpf_perf_link_dealloc,
3219 };
3220 
3221 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3222 {
3223 	struct bpf_link_primer link_primer;
3224 	struct bpf_perf_link *link;
3225 	struct perf_event *event;
3226 	struct file *perf_file;
3227 	int err;
3228 
3229 	if (attr->link_create.flags)
3230 		return -EINVAL;
3231 
3232 	perf_file = perf_event_get(attr->link_create.target_fd);
3233 	if (IS_ERR(perf_file))
3234 		return PTR_ERR(perf_file);
3235 
3236 	link = kzalloc(sizeof(*link), GFP_USER);
3237 	if (!link) {
3238 		err = -ENOMEM;
3239 		goto out_put_file;
3240 	}
3241 	bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3242 	link->perf_file = perf_file;
3243 
3244 	err = bpf_link_prime(&link->link, &link_primer);
3245 	if (err) {
3246 		kfree(link);
3247 		goto out_put_file;
3248 	}
3249 
3250 	event = perf_file->private_data;
3251 	err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3252 	if (err) {
3253 		bpf_link_cleanup(&link_primer);
3254 		goto out_put_file;
3255 	}
3256 	/* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3257 	bpf_prog_inc(prog);
3258 
3259 	return bpf_link_settle(&link_primer);
3260 
3261 out_put_file:
3262 	fput(perf_file);
3263 	return err;
3264 }
3265 #else
3266 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3267 {
3268 	return -EOPNOTSUPP;
3269 }
3270 #endif /* CONFIG_PERF_EVENTS */
3271 
3272 static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3273 				  const char __user *user_tp_name)
3274 {
3275 	struct bpf_link_primer link_primer;
3276 	struct bpf_raw_tp_link *link;
3277 	struct bpf_raw_event_map *btp;
3278 	const char *tp_name;
3279 	char buf[128];
3280 	int err;
3281 
3282 	switch (prog->type) {
3283 	case BPF_PROG_TYPE_TRACING:
3284 	case BPF_PROG_TYPE_EXT:
3285 	case BPF_PROG_TYPE_LSM:
3286 		if (user_tp_name)
3287 			/* The attach point for this category of programs
3288 			 * should be specified via btf_id during program load.
3289 			 */
3290 			return -EINVAL;
3291 		if (prog->type == BPF_PROG_TYPE_TRACING &&
3292 		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3293 			tp_name = prog->aux->attach_func_name;
3294 			break;
3295 		}
3296 		return bpf_tracing_prog_attach(prog, 0, 0, 0);
3297 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
3298 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3299 		if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3300 			return -EFAULT;
3301 		buf[sizeof(buf) - 1] = 0;
3302 		tp_name = buf;
3303 		break;
3304 	default:
3305 		return -EINVAL;
3306 	}
3307 
3308 	btp = bpf_get_raw_tracepoint(tp_name);
3309 	if (!btp)
3310 		return -ENOENT;
3311 
3312 	link = kzalloc(sizeof(*link), GFP_USER);
3313 	if (!link) {
3314 		err = -ENOMEM;
3315 		goto out_put_btp;
3316 	}
3317 	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3318 		      &bpf_raw_tp_link_lops, prog);
3319 	link->btp = btp;
3320 
3321 	err = bpf_link_prime(&link->link, &link_primer);
3322 	if (err) {
3323 		kfree(link);
3324 		goto out_put_btp;
3325 	}
3326 
3327 	err = bpf_probe_register(link->btp, prog);
3328 	if (err) {
3329 		bpf_link_cleanup(&link_primer);
3330 		goto out_put_btp;
3331 	}
3332 
3333 	return bpf_link_settle(&link_primer);
3334 
3335 out_put_btp:
3336 	bpf_put_raw_tracepoint(btp);
3337 	return err;
3338 }
3339 
3340 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3341 
3342 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3343 {
3344 	struct bpf_prog *prog;
3345 	int fd;
3346 
3347 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3348 		return -EINVAL;
3349 
3350 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3351 	if (IS_ERR(prog))
3352 		return PTR_ERR(prog);
3353 
3354 	fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
3355 	if (fd < 0)
3356 		bpf_prog_put(prog);
3357 	return fd;
3358 }
3359 
3360 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3361 					     enum bpf_attach_type attach_type)
3362 {
3363 	switch (prog->type) {
3364 	case BPF_PROG_TYPE_CGROUP_SOCK:
3365 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3366 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3367 	case BPF_PROG_TYPE_SK_LOOKUP:
3368 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3369 	case BPF_PROG_TYPE_CGROUP_SKB:
3370 		if (!capable(CAP_NET_ADMIN))
3371 			/* cg-skb progs can be loaded by unpriv user.
3372 			 * check permissions at attach time.
3373 			 */
3374 			return -EPERM;
3375 		return prog->enforce_expected_attach_type &&
3376 			prog->expected_attach_type != attach_type ?
3377 			-EINVAL : 0;
3378 	default:
3379 		return 0;
3380 	}
3381 }
3382 
3383 static enum bpf_prog_type
3384 attach_type_to_prog_type(enum bpf_attach_type attach_type)
3385 {
3386 	switch (attach_type) {
3387 	case BPF_CGROUP_INET_INGRESS:
3388 	case BPF_CGROUP_INET_EGRESS:
3389 		return BPF_PROG_TYPE_CGROUP_SKB;
3390 	case BPF_CGROUP_INET_SOCK_CREATE:
3391 	case BPF_CGROUP_INET_SOCK_RELEASE:
3392 	case BPF_CGROUP_INET4_POST_BIND:
3393 	case BPF_CGROUP_INET6_POST_BIND:
3394 		return BPF_PROG_TYPE_CGROUP_SOCK;
3395 	case BPF_CGROUP_INET4_BIND:
3396 	case BPF_CGROUP_INET6_BIND:
3397 	case BPF_CGROUP_INET4_CONNECT:
3398 	case BPF_CGROUP_INET6_CONNECT:
3399 	case BPF_CGROUP_INET4_GETPEERNAME:
3400 	case BPF_CGROUP_INET6_GETPEERNAME:
3401 	case BPF_CGROUP_INET4_GETSOCKNAME:
3402 	case BPF_CGROUP_INET6_GETSOCKNAME:
3403 	case BPF_CGROUP_UDP4_SENDMSG:
3404 	case BPF_CGROUP_UDP6_SENDMSG:
3405 	case BPF_CGROUP_UDP4_RECVMSG:
3406 	case BPF_CGROUP_UDP6_RECVMSG:
3407 		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3408 	case BPF_CGROUP_SOCK_OPS:
3409 		return BPF_PROG_TYPE_SOCK_OPS;
3410 	case BPF_CGROUP_DEVICE:
3411 		return BPF_PROG_TYPE_CGROUP_DEVICE;
3412 	case BPF_SK_MSG_VERDICT:
3413 		return BPF_PROG_TYPE_SK_MSG;
3414 	case BPF_SK_SKB_STREAM_PARSER:
3415 	case BPF_SK_SKB_STREAM_VERDICT:
3416 	case BPF_SK_SKB_VERDICT:
3417 		return BPF_PROG_TYPE_SK_SKB;
3418 	case BPF_LIRC_MODE2:
3419 		return BPF_PROG_TYPE_LIRC_MODE2;
3420 	case BPF_FLOW_DISSECTOR:
3421 		return BPF_PROG_TYPE_FLOW_DISSECTOR;
3422 	case BPF_CGROUP_SYSCTL:
3423 		return BPF_PROG_TYPE_CGROUP_SYSCTL;
3424 	case BPF_CGROUP_GETSOCKOPT:
3425 	case BPF_CGROUP_SETSOCKOPT:
3426 		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3427 	case BPF_TRACE_ITER:
3428 	case BPF_TRACE_RAW_TP:
3429 	case BPF_TRACE_FENTRY:
3430 	case BPF_TRACE_FEXIT:
3431 	case BPF_MODIFY_RETURN:
3432 		return BPF_PROG_TYPE_TRACING;
3433 	case BPF_LSM_MAC:
3434 		return BPF_PROG_TYPE_LSM;
3435 	case BPF_SK_LOOKUP:
3436 		return BPF_PROG_TYPE_SK_LOOKUP;
3437 	case BPF_XDP:
3438 		return BPF_PROG_TYPE_XDP;
3439 	case BPF_LSM_CGROUP:
3440 		return BPF_PROG_TYPE_LSM;
3441 	default:
3442 		return BPF_PROG_TYPE_UNSPEC;
3443 	}
3444 }
3445 
3446 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
3447 
3448 #define BPF_F_ATTACH_MASK \
3449 	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
3450 
3451 static int bpf_prog_attach(const union bpf_attr *attr)
3452 {
3453 	enum bpf_prog_type ptype;
3454 	struct bpf_prog *prog;
3455 	int ret;
3456 
3457 	if (CHECK_ATTR(BPF_PROG_ATTACH))
3458 		return -EINVAL;
3459 
3460 	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
3461 		return -EINVAL;
3462 
3463 	ptype = attach_type_to_prog_type(attr->attach_type);
3464 	if (ptype == BPF_PROG_TYPE_UNSPEC)
3465 		return -EINVAL;
3466 
3467 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3468 	if (IS_ERR(prog))
3469 		return PTR_ERR(prog);
3470 
3471 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3472 		bpf_prog_put(prog);
3473 		return -EINVAL;
3474 	}
3475 
3476 	switch (ptype) {
3477 	case BPF_PROG_TYPE_SK_SKB:
3478 	case BPF_PROG_TYPE_SK_MSG:
3479 		ret = sock_map_get_from_fd(attr, prog);
3480 		break;
3481 	case BPF_PROG_TYPE_LIRC_MODE2:
3482 		ret = lirc_prog_attach(attr, prog);
3483 		break;
3484 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3485 		ret = netns_bpf_prog_attach(attr, prog);
3486 		break;
3487 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3488 	case BPF_PROG_TYPE_CGROUP_SKB:
3489 	case BPF_PROG_TYPE_CGROUP_SOCK:
3490 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3491 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3492 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3493 	case BPF_PROG_TYPE_SOCK_OPS:
3494 	case BPF_PROG_TYPE_LSM:
3495 		if (ptype == BPF_PROG_TYPE_LSM &&
3496 		    prog->expected_attach_type != BPF_LSM_CGROUP)
3497 			return -EINVAL;
3498 
3499 		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3500 		break;
3501 	default:
3502 		ret = -EINVAL;
3503 	}
3504 
3505 	if (ret)
3506 		bpf_prog_put(prog);
3507 	return ret;
3508 }
3509 
3510 #define BPF_PROG_DETACH_LAST_FIELD attach_type
3511 
3512 static int bpf_prog_detach(const union bpf_attr *attr)
3513 {
3514 	enum bpf_prog_type ptype;
3515 
3516 	if (CHECK_ATTR(BPF_PROG_DETACH))
3517 		return -EINVAL;
3518 
3519 	ptype = attach_type_to_prog_type(attr->attach_type);
3520 
3521 	switch (ptype) {
3522 	case BPF_PROG_TYPE_SK_MSG:
3523 	case BPF_PROG_TYPE_SK_SKB:
3524 		return sock_map_prog_detach(attr, ptype);
3525 	case BPF_PROG_TYPE_LIRC_MODE2:
3526 		return lirc_prog_detach(attr);
3527 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3528 		return netns_bpf_prog_detach(attr, ptype);
3529 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3530 	case BPF_PROG_TYPE_CGROUP_SKB:
3531 	case BPF_PROG_TYPE_CGROUP_SOCK:
3532 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3533 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3534 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3535 	case BPF_PROG_TYPE_SOCK_OPS:
3536 	case BPF_PROG_TYPE_LSM:
3537 		return cgroup_bpf_prog_detach(attr, ptype);
3538 	default:
3539 		return -EINVAL;
3540 	}
3541 }
3542 
3543 #define BPF_PROG_QUERY_LAST_FIELD query.prog_attach_flags
3544 
3545 static int bpf_prog_query(const union bpf_attr *attr,
3546 			  union bpf_attr __user *uattr)
3547 {
3548 	if (!capable(CAP_NET_ADMIN))
3549 		return -EPERM;
3550 	if (CHECK_ATTR(BPF_PROG_QUERY))
3551 		return -EINVAL;
3552 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3553 		return -EINVAL;
3554 
3555 	switch (attr->query.attach_type) {
3556 	case BPF_CGROUP_INET_INGRESS:
3557 	case BPF_CGROUP_INET_EGRESS:
3558 	case BPF_CGROUP_INET_SOCK_CREATE:
3559 	case BPF_CGROUP_INET_SOCK_RELEASE:
3560 	case BPF_CGROUP_INET4_BIND:
3561 	case BPF_CGROUP_INET6_BIND:
3562 	case BPF_CGROUP_INET4_POST_BIND:
3563 	case BPF_CGROUP_INET6_POST_BIND:
3564 	case BPF_CGROUP_INET4_CONNECT:
3565 	case BPF_CGROUP_INET6_CONNECT:
3566 	case BPF_CGROUP_INET4_GETPEERNAME:
3567 	case BPF_CGROUP_INET6_GETPEERNAME:
3568 	case BPF_CGROUP_INET4_GETSOCKNAME:
3569 	case BPF_CGROUP_INET6_GETSOCKNAME:
3570 	case BPF_CGROUP_UDP4_SENDMSG:
3571 	case BPF_CGROUP_UDP6_SENDMSG:
3572 	case BPF_CGROUP_UDP4_RECVMSG:
3573 	case BPF_CGROUP_UDP6_RECVMSG:
3574 	case BPF_CGROUP_SOCK_OPS:
3575 	case BPF_CGROUP_DEVICE:
3576 	case BPF_CGROUP_SYSCTL:
3577 	case BPF_CGROUP_GETSOCKOPT:
3578 	case BPF_CGROUP_SETSOCKOPT:
3579 	case BPF_LSM_CGROUP:
3580 		return cgroup_bpf_prog_query(attr, uattr);
3581 	case BPF_LIRC_MODE2:
3582 		return lirc_prog_query(attr, uattr);
3583 	case BPF_FLOW_DISSECTOR:
3584 	case BPF_SK_LOOKUP:
3585 		return netns_bpf_prog_query(attr, uattr);
3586 	case BPF_SK_SKB_STREAM_PARSER:
3587 	case BPF_SK_SKB_STREAM_VERDICT:
3588 	case BPF_SK_MSG_VERDICT:
3589 	case BPF_SK_SKB_VERDICT:
3590 		return sock_map_bpf_prog_query(attr, uattr);
3591 	default:
3592 		return -EINVAL;
3593 	}
3594 }
3595 
3596 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
3597 
3598 static int bpf_prog_test_run(const union bpf_attr *attr,
3599 			     union bpf_attr __user *uattr)
3600 {
3601 	struct bpf_prog *prog;
3602 	int ret = -ENOTSUPP;
3603 
3604 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3605 		return -EINVAL;
3606 
3607 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3608 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
3609 		return -EINVAL;
3610 
3611 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3612 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
3613 		return -EINVAL;
3614 
3615 	prog = bpf_prog_get(attr->test.prog_fd);
3616 	if (IS_ERR(prog))
3617 		return PTR_ERR(prog);
3618 
3619 	if (prog->aux->ops->test_run)
3620 		ret = prog->aux->ops->test_run(prog, attr, uattr);
3621 
3622 	bpf_prog_put(prog);
3623 	return ret;
3624 }
3625 
3626 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3627 
3628 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3629 			       union bpf_attr __user *uattr,
3630 			       struct idr *idr,
3631 			       spinlock_t *lock)
3632 {
3633 	u32 next_id = attr->start_id;
3634 	int err = 0;
3635 
3636 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3637 		return -EINVAL;
3638 
3639 	if (!capable(CAP_SYS_ADMIN))
3640 		return -EPERM;
3641 
3642 	next_id++;
3643 	spin_lock_bh(lock);
3644 	if (!idr_get_next(idr, &next_id))
3645 		err = -ENOENT;
3646 	spin_unlock_bh(lock);
3647 
3648 	if (!err)
3649 		err = put_user(next_id, &uattr->next_id);
3650 
3651 	return err;
3652 }
3653 
3654 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3655 {
3656 	struct bpf_map *map;
3657 
3658 	spin_lock_bh(&map_idr_lock);
3659 again:
3660 	map = idr_get_next(&map_idr, id);
3661 	if (map) {
3662 		map = __bpf_map_inc_not_zero(map, false);
3663 		if (IS_ERR(map)) {
3664 			(*id)++;
3665 			goto again;
3666 		}
3667 	}
3668 	spin_unlock_bh(&map_idr_lock);
3669 
3670 	return map;
3671 }
3672 
3673 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3674 {
3675 	struct bpf_prog *prog;
3676 
3677 	spin_lock_bh(&prog_idr_lock);
3678 again:
3679 	prog = idr_get_next(&prog_idr, id);
3680 	if (prog) {
3681 		prog = bpf_prog_inc_not_zero(prog);
3682 		if (IS_ERR(prog)) {
3683 			(*id)++;
3684 			goto again;
3685 		}
3686 	}
3687 	spin_unlock_bh(&prog_idr_lock);
3688 
3689 	return prog;
3690 }
3691 
3692 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3693 
3694 struct bpf_prog *bpf_prog_by_id(u32 id)
3695 {
3696 	struct bpf_prog *prog;
3697 
3698 	if (!id)
3699 		return ERR_PTR(-ENOENT);
3700 
3701 	spin_lock_bh(&prog_idr_lock);
3702 	prog = idr_find(&prog_idr, id);
3703 	if (prog)
3704 		prog = bpf_prog_inc_not_zero(prog);
3705 	else
3706 		prog = ERR_PTR(-ENOENT);
3707 	spin_unlock_bh(&prog_idr_lock);
3708 	return prog;
3709 }
3710 
3711 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3712 {
3713 	struct bpf_prog *prog;
3714 	u32 id = attr->prog_id;
3715 	int fd;
3716 
3717 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3718 		return -EINVAL;
3719 
3720 	if (!capable(CAP_SYS_ADMIN))
3721 		return -EPERM;
3722 
3723 	prog = bpf_prog_by_id(id);
3724 	if (IS_ERR(prog))
3725 		return PTR_ERR(prog);
3726 
3727 	fd = bpf_prog_new_fd(prog);
3728 	if (fd < 0)
3729 		bpf_prog_put(prog);
3730 
3731 	return fd;
3732 }
3733 
3734 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3735 
3736 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3737 {
3738 	struct bpf_map *map;
3739 	u32 id = attr->map_id;
3740 	int f_flags;
3741 	int fd;
3742 
3743 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3744 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3745 		return -EINVAL;
3746 
3747 	if (!capable(CAP_SYS_ADMIN))
3748 		return -EPERM;
3749 
3750 	f_flags = bpf_get_file_flag(attr->open_flags);
3751 	if (f_flags < 0)
3752 		return f_flags;
3753 
3754 	spin_lock_bh(&map_idr_lock);
3755 	map = idr_find(&map_idr, id);
3756 	if (map)
3757 		map = __bpf_map_inc_not_zero(map, true);
3758 	else
3759 		map = ERR_PTR(-ENOENT);
3760 	spin_unlock_bh(&map_idr_lock);
3761 
3762 	if (IS_ERR(map))
3763 		return PTR_ERR(map);
3764 
3765 	fd = bpf_map_new_fd(map, f_flags);
3766 	if (fd < 0)
3767 		bpf_map_put_with_uref(map);
3768 
3769 	return fd;
3770 }
3771 
3772 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3773 					      unsigned long addr, u32 *off,
3774 					      u32 *type)
3775 {
3776 	const struct bpf_map *map;
3777 	int i;
3778 
3779 	mutex_lock(&prog->aux->used_maps_mutex);
3780 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3781 		map = prog->aux->used_maps[i];
3782 		if (map == (void *)addr) {
3783 			*type = BPF_PSEUDO_MAP_FD;
3784 			goto out;
3785 		}
3786 		if (!map->ops->map_direct_value_meta)
3787 			continue;
3788 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
3789 			*type = BPF_PSEUDO_MAP_VALUE;
3790 			goto out;
3791 		}
3792 	}
3793 	map = NULL;
3794 
3795 out:
3796 	mutex_unlock(&prog->aux->used_maps_mutex);
3797 	return map;
3798 }
3799 
3800 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3801 					      const struct cred *f_cred)
3802 {
3803 	const struct bpf_map *map;
3804 	struct bpf_insn *insns;
3805 	u32 off, type;
3806 	u64 imm;
3807 	u8 code;
3808 	int i;
3809 
3810 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3811 			GFP_USER);
3812 	if (!insns)
3813 		return insns;
3814 
3815 	for (i = 0; i < prog->len; i++) {
3816 		code = insns[i].code;
3817 
3818 		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3819 			insns[i].code = BPF_JMP | BPF_CALL;
3820 			insns[i].imm = BPF_FUNC_tail_call;
3821 			/* fall-through */
3822 		}
3823 		if (code == (BPF_JMP | BPF_CALL) ||
3824 		    code == (BPF_JMP | BPF_CALL_ARGS)) {
3825 			if (code == (BPF_JMP | BPF_CALL_ARGS))
3826 				insns[i].code = BPF_JMP | BPF_CALL;
3827 			if (!bpf_dump_raw_ok(f_cred))
3828 				insns[i].imm = 0;
3829 			continue;
3830 		}
3831 		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3832 			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3833 			continue;
3834 		}
3835 
3836 		if (code != (BPF_LD | BPF_IMM | BPF_DW))
3837 			continue;
3838 
3839 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3840 		map = bpf_map_from_imm(prog, imm, &off, &type);
3841 		if (map) {
3842 			insns[i].src_reg = type;
3843 			insns[i].imm = map->id;
3844 			insns[i + 1].imm = off;
3845 			continue;
3846 		}
3847 	}
3848 
3849 	return insns;
3850 }
3851 
3852 static int set_info_rec_size(struct bpf_prog_info *info)
3853 {
3854 	/*
3855 	 * Ensure info.*_rec_size is the same as kernel expected size
3856 	 *
3857 	 * or
3858 	 *
3859 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
3860 	 * zero.  In this case, the kernel will set the expected
3861 	 * _rec_size back to the info.
3862 	 */
3863 
3864 	if ((info->nr_func_info || info->func_info_rec_size) &&
3865 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
3866 		return -EINVAL;
3867 
3868 	if ((info->nr_line_info || info->line_info_rec_size) &&
3869 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
3870 		return -EINVAL;
3871 
3872 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3873 	    info->jited_line_info_rec_size != sizeof(__u64))
3874 		return -EINVAL;
3875 
3876 	info->func_info_rec_size = sizeof(struct bpf_func_info);
3877 	info->line_info_rec_size = sizeof(struct bpf_line_info);
3878 	info->jited_line_info_rec_size = sizeof(__u64);
3879 
3880 	return 0;
3881 }
3882 
3883 static int bpf_prog_get_info_by_fd(struct file *file,
3884 				   struct bpf_prog *prog,
3885 				   const union bpf_attr *attr,
3886 				   union bpf_attr __user *uattr)
3887 {
3888 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3889 	struct bpf_prog_info info;
3890 	u32 info_len = attr->info.info_len;
3891 	struct bpf_prog_kstats stats;
3892 	char __user *uinsns;
3893 	u32 ulen;
3894 	int err;
3895 
3896 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
3897 	if (err)
3898 		return err;
3899 	info_len = min_t(u32, sizeof(info), info_len);
3900 
3901 	memset(&info, 0, sizeof(info));
3902 	if (copy_from_user(&info, uinfo, info_len))
3903 		return -EFAULT;
3904 
3905 	info.type = prog->type;
3906 	info.id = prog->aux->id;
3907 	info.load_time = prog->aux->load_time;
3908 	info.created_by_uid = from_kuid_munged(current_user_ns(),
3909 					       prog->aux->user->uid);
3910 	info.gpl_compatible = prog->gpl_compatible;
3911 
3912 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
3913 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3914 
3915 	mutex_lock(&prog->aux->used_maps_mutex);
3916 	ulen = info.nr_map_ids;
3917 	info.nr_map_ids = prog->aux->used_map_cnt;
3918 	ulen = min_t(u32, info.nr_map_ids, ulen);
3919 	if (ulen) {
3920 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3921 		u32 i;
3922 
3923 		for (i = 0; i < ulen; i++)
3924 			if (put_user(prog->aux->used_maps[i]->id,
3925 				     &user_map_ids[i])) {
3926 				mutex_unlock(&prog->aux->used_maps_mutex);
3927 				return -EFAULT;
3928 			}
3929 	}
3930 	mutex_unlock(&prog->aux->used_maps_mutex);
3931 
3932 	err = set_info_rec_size(&info);
3933 	if (err)
3934 		return err;
3935 
3936 	bpf_prog_get_stats(prog, &stats);
3937 	info.run_time_ns = stats.nsecs;
3938 	info.run_cnt = stats.cnt;
3939 	info.recursion_misses = stats.misses;
3940 
3941 	info.verified_insns = prog->aux->verified_insns;
3942 
3943 	if (!bpf_capable()) {
3944 		info.jited_prog_len = 0;
3945 		info.xlated_prog_len = 0;
3946 		info.nr_jited_ksyms = 0;
3947 		info.nr_jited_func_lens = 0;
3948 		info.nr_func_info = 0;
3949 		info.nr_line_info = 0;
3950 		info.nr_jited_line_info = 0;
3951 		goto done;
3952 	}
3953 
3954 	ulen = info.xlated_prog_len;
3955 	info.xlated_prog_len = bpf_prog_insn_size(prog);
3956 	if (info.xlated_prog_len && ulen) {
3957 		struct bpf_insn *insns_sanitized;
3958 		bool fault;
3959 
3960 		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3961 			info.xlated_prog_insns = 0;
3962 			goto done;
3963 		}
3964 		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3965 		if (!insns_sanitized)
3966 			return -ENOMEM;
3967 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3968 		ulen = min_t(u32, info.xlated_prog_len, ulen);
3969 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
3970 		kfree(insns_sanitized);
3971 		if (fault)
3972 			return -EFAULT;
3973 	}
3974 
3975 	if (bpf_prog_is_dev_bound(prog->aux)) {
3976 		err = bpf_prog_offload_info_fill(&info, prog);
3977 		if (err)
3978 			return err;
3979 		goto done;
3980 	}
3981 
3982 	/* NOTE: the following code is supposed to be skipped for offload.
3983 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
3984 	 * for offload.
3985 	 */
3986 	ulen = info.jited_prog_len;
3987 	if (prog->aux->func_cnt) {
3988 		u32 i;
3989 
3990 		info.jited_prog_len = 0;
3991 		for (i = 0; i < prog->aux->func_cnt; i++)
3992 			info.jited_prog_len += prog->aux->func[i]->jited_len;
3993 	} else {
3994 		info.jited_prog_len = prog->jited_len;
3995 	}
3996 
3997 	if (info.jited_prog_len && ulen) {
3998 		if (bpf_dump_raw_ok(file->f_cred)) {
3999 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
4000 			ulen = min_t(u32, info.jited_prog_len, ulen);
4001 
4002 			/* for multi-function programs, copy the JITed
4003 			 * instructions for all the functions
4004 			 */
4005 			if (prog->aux->func_cnt) {
4006 				u32 len, free, i;
4007 				u8 *img;
4008 
4009 				free = ulen;
4010 				for (i = 0; i < prog->aux->func_cnt; i++) {
4011 					len = prog->aux->func[i]->jited_len;
4012 					len = min_t(u32, len, free);
4013 					img = (u8 *) prog->aux->func[i]->bpf_func;
4014 					if (copy_to_user(uinsns, img, len))
4015 						return -EFAULT;
4016 					uinsns += len;
4017 					free -= len;
4018 					if (!free)
4019 						break;
4020 				}
4021 			} else {
4022 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
4023 					return -EFAULT;
4024 			}
4025 		} else {
4026 			info.jited_prog_insns = 0;
4027 		}
4028 	}
4029 
4030 	ulen = info.nr_jited_ksyms;
4031 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4032 	if (ulen) {
4033 		if (bpf_dump_raw_ok(file->f_cred)) {
4034 			unsigned long ksym_addr;
4035 			u64 __user *user_ksyms;
4036 			u32 i;
4037 
4038 			/* copy the address of the kernel symbol
4039 			 * corresponding to each function
4040 			 */
4041 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4042 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4043 			if (prog->aux->func_cnt) {
4044 				for (i = 0; i < ulen; i++) {
4045 					ksym_addr = (unsigned long)
4046 						prog->aux->func[i]->bpf_func;
4047 					if (put_user((u64) ksym_addr,
4048 						     &user_ksyms[i]))
4049 						return -EFAULT;
4050 				}
4051 			} else {
4052 				ksym_addr = (unsigned long) prog->bpf_func;
4053 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
4054 					return -EFAULT;
4055 			}
4056 		} else {
4057 			info.jited_ksyms = 0;
4058 		}
4059 	}
4060 
4061 	ulen = info.nr_jited_func_lens;
4062 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4063 	if (ulen) {
4064 		if (bpf_dump_raw_ok(file->f_cred)) {
4065 			u32 __user *user_lens;
4066 			u32 func_len, i;
4067 
4068 			/* copy the JITed image lengths for each function */
4069 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4070 			user_lens = u64_to_user_ptr(info.jited_func_lens);
4071 			if (prog->aux->func_cnt) {
4072 				for (i = 0; i < ulen; i++) {
4073 					func_len =
4074 						prog->aux->func[i]->jited_len;
4075 					if (put_user(func_len, &user_lens[i]))
4076 						return -EFAULT;
4077 				}
4078 			} else {
4079 				func_len = prog->jited_len;
4080 				if (put_user(func_len, &user_lens[0]))
4081 					return -EFAULT;
4082 			}
4083 		} else {
4084 			info.jited_func_lens = 0;
4085 		}
4086 	}
4087 
4088 	if (prog->aux->btf)
4089 		info.btf_id = btf_obj_id(prog->aux->btf);
4090 	info.attach_btf_id = prog->aux->attach_btf_id;
4091 	if (prog->aux->attach_btf)
4092 		info.attach_btf_obj_id = btf_obj_id(prog->aux->attach_btf);
4093 	else if (prog->aux->dst_prog)
4094 		info.attach_btf_obj_id = btf_obj_id(prog->aux->dst_prog->aux->attach_btf);
4095 
4096 	ulen = info.nr_func_info;
4097 	info.nr_func_info = prog->aux->func_info_cnt;
4098 	if (info.nr_func_info && ulen) {
4099 		char __user *user_finfo;
4100 
4101 		user_finfo = u64_to_user_ptr(info.func_info);
4102 		ulen = min_t(u32, info.nr_func_info, ulen);
4103 		if (copy_to_user(user_finfo, prog->aux->func_info,
4104 				 info.func_info_rec_size * ulen))
4105 			return -EFAULT;
4106 	}
4107 
4108 	ulen = info.nr_line_info;
4109 	info.nr_line_info = prog->aux->nr_linfo;
4110 	if (info.nr_line_info && ulen) {
4111 		__u8 __user *user_linfo;
4112 
4113 		user_linfo = u64_to_user_ptr(info.line_info);
4114 		ulen = min_t(u32, info.nr_line_info, ulen);
4115 		if (copy_to_user(user_linfo, prog->aux->linfo,
4116 				 info.line_info_rec_size * ulen))
4117 			return -EFAULT;
4118 	}
4119 
4120 	ulen = info.nr_jited_line_info;
4121 	if (prog->aux->jited_linfo)
4122 		info.nr_jited_line_info = prog->aux->nr_linfo;
4123 	else
4124 		info.nr_jited_line_info = 0;
4125 	if (info.nr_jited_line_info && ulen) {
4126 		if (bpf_dump_raw_ok(file->f_cred)) {
4127 			unsigned long line_addr;
4128 			__u64 __user *user_linfo;
4129 			u32 i;
4130 
4131 			user_linfo = u64_to_user_ptr(info.jited_line_info);
4132 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
4133 			for (i = 0; i < ulen; i++) {
4134 				line_addr = (unsigned long)prog->aux->jited_linfo[i];
4135 				if (put_user((__u64)line_addr, &user_linfo[i]))
4136 					return -EFAULT;
4137 			}
4138 		} else {
4139 			info.jited_line_info = 0;
4140 		}
4141 	}
4142 
4143 	ulen = info.nr_prog_tags;
4144 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4145 	if (ulen) {
4146 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4147 		u32 i;
4148 
4149 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
4150 		ulen = min_t(u32, info.nr_prog_tags, ulen);
4151 		if (prog->aux->func_cnt) {
4152 			for (i = 0; i < ulen; i++) {
4153 				if (copy_to_user(user_prog_tags[i],
4154 						 prog->aux->func[i]->tag,
4155 						 BPF_TAG_SIZE))
4156 					return -EFAULT;
4157 			}
4158 		} else {
4159 			if (copy_to_user(user_prog_tags[0],
4160 					 prog->tag, BPF_TAG_SIZE))
4161 				return -EFAULT;
4162 		}
4163 	}
4164 
4165 done:
4166 	if (copy_to_user(uinfo, &info, info_len) ||
4167 	    put_user(info_len, &uattr->info.info_len))
4168 		return -EFAULT;
4169 
4170 	return 0;
4171 }
4172 
4173 static int bpf_map_get_info_by_fd(struct file *file,
4174 				  struct bpf_map *map,
4175 				  const union bpf_attr *attr,
4176 				  union bpf_attr __user *uattr)
4177 {
4178 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4179 	struct bpf_map_info info;
4180 	u32 info_len = attr->info.info_len;
4181 	int err;
4182 
4183 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4184 	if (err)
4185 		return err;
4186 	info_len = min_t(u32, sizeof(info), info_len);
4187 
4188 	memset(&info, 0, sizeof(info));
4189 	info.type = map->map_type;
4190 	info.id = map->id;
4191 	info.key_size = map->key_size;
4192 	info.value_size = map->value_size;
4193 	info.max_entries = map->max_entries;
4194 	info.map_flags = map->map_flags;
4195 	info.map_extra = map->map_extra;
4196 	memcpy(info.name, map->name, sizeof(map->name));
4197 
4198 	if (map->btf) {
4199 		info.btf_id = btf_obj_id(map->btf);
4200 		info.btf_key_type_id = map->btf_key_type_id;
4201 		info.btf_value_type_id = map->btf_value_type_id;
4202 	}
4203 	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4204 
4205 	if (bpf_map_is_dev_bound(map)) {
4206 		err = bpf_map_offload_info_fill(&info, map);
4207 		if (err)
4208 			return err;
4209 	}
4210 
4211 	if (copy_to_user(uinfo, &info, info_len) ||
4212 	    put_user(info_len, &uattr->info.info_len))
4213 		return -EFAULT;
4214 
4215 	return 0;
4216 }
4217 
4218 static int bpf_btf_get_info_by_fd(struct file *file,
4219 				  struct btf *btf,
4220 				  const union bpf_attr *attr,
4221 				  union bpf_attr __user *uattr)
4222 {
4223 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4224 	u32 info_len = attr->info.info_len;
4225 	int err;
4226 
4227 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4228 	if (err)
4229 		return err;
4230 
4231 	return btf_get_info_by_fd(btf, attr, uattr);
4232 }
4233 
4234 static int bpf_link_get_info_by_fd(struct file *file,
4235 				  struct bpf_link *link,
4236 				  const union bpf_attr *attr,
4237 				  union bpf_attr __user *uattr)
4238 {
4239 	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4240 	struct bpf_link_info info;
4241 	u32 info_len = attr->info.info_len;
4242 	int err;
4243 
4244 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4245 	if (err)
4246 		return err;
4247 	info_len = min_t(u32, sizeof(info), info_len);
4248 
4249 	memset(&info, 0, sizeof(info));
4250 	if (copy_from_user(&info, uinfo, info_len))
4251 		return -EFAULT;
4252 
4253 	info.type = link->type;
4254 	info.id = link->id;
4255 	info.prog_id = link->prog->aux->id;
4256 
4257 	if (link->ops->fill_link_info) {
4258 		err = link->ops->fill_link_info(link, &info);
4259 		if (err)
4260 			return err;
4261 	}
4262 
4263 	if (copy_to_user(uinfo, &info, info_len) ||
4264 	    put_user(info_len, &uattr->info.info_len))
4265 		return -EFAULT;
4266 
4267 	return 0;
4268 }
4269 
4270 
4271 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4272 
4273 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4274 				  union bpf_attr __user *uattr)
4275 {
4276 	int ufd = attr->info.bpf_fd;
4277 	struct fd f;
4278 	int err;
4279 
4280 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4281 		return -EINVAL;
4282 
4283 	f = fdget(ufd);
4284 	if (!f.file)
4285 		return -EBADFD;
4286 
4287 	if (f.file->f_op == &bpf_prog_fops)
4288 		err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4289 					      uattr);
4290 	else if (f.file->f_op == &bpf_map_fops)
4291 		err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4292 					     uattr);
4293 	else if (f.file->f_op == &btf_fops)
4294 		err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4295 	else if (f.file->f_op == &bpf_link_fops)
4296 		err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4297 					      attr, uattr);
4298 	else
4299 		err = -EINVAL;
4300 
4301 	fdput(f);
4302 	return err;
4303 }
4304 
4305 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
4306 
4307 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
4308 {
4309 	if (CHECK_ATTR(BPF_BTF_LOAD))
4310 		return -EINVAL;
4311 
4312 	if (!bpf_capable())
4313 		return -EPERM;
4314 
4315 	return btf_new_fd(attr, uattr);
4316 }
4317 
4318 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
4319 
4320 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
4321 {
4322 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4323 		return -EINVAL;
4324 
4325 	if (!capable(CAP_SYS_ADMIN))
4326 		return -EPERM;
4327 
4328 	return btf_get_fd_by_id(attr->btf_id);
4329 }
4330 
4331 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4332 				    union bpf_attr __user *uattr,
4333 				    u32 prog_id, u32 fd_type,
4334 				    const char *buf, u64 probe_offset,
4335 				    u64 probe_addr)
4336 {
4337 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
4338 	u32 len = buf ? strlen(buf) : 0, input_len;
4339 	int err = 0;
4340 
4341 	if (put_user(len, &uattr->task_fd_query.buf_len))
4342 		return -EFAULT;
4343 	input_len = attr->task_fd_query.buf_len;
4344 	if (input_len && ubuf) {
4345 		if (!len) {
4346 			/* nothing to copy, just make ubuf NULL terminated */
4347 			char zero = '\0';
4348 
4349 			if (put_user(zero, ubuf))
4350 				return -EFAULT;
4351 		} else if (input_len >= len + 1) {
4352 			/* ubuf can hold the string with NULL terminator */
4353 			if (copy_to_user(ubuf, buf, len + 1))
4354 				return -EFAULT;
4355 		} else {
4356 			/* ubuf cannot hold the string with NULL terminator,
4357 			 * do a partial copy with NULL terminator.
4358 			 */
4359 			char zero = '\0';
4360 
4361 			err = -ENOSPC;
4362 			if (copy_to_user(ubuf, buf, input_len - 1))
4363 				return -EFAULT;
4364 			if (put_user(zero, ubuf + input_len - 1))
4365 				return -EFAULT;
4366 		}
4367 	}
4368 
4369 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
4370 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
4371 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
4372 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
4373 		return -EFAULT;
4374 
4375 	return err;
4376 }
4377 
4378 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
4379 
4380 static int bpf_task_fd_query(const union bpf_attr *attr,
4381 			     union bpf_attr __user *uattr)
4382 {
4383 	pid_t pid = attr->task_fd_query.pid;
4384 	u32 fd = attr->task_fd_query.fd;
4385 	const struct perf_event *event;
4386 	struct task_struct *task;
4387 	struct file *file;
4388 	int err;
4389 
4390 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
4391 		return -EINVAL;
4392 
4393 	if (!capable(CAP_SYS_ADMIN))
4394 		return -EPERM;
4395 
4396 	if (attr->task_fd_query.flags != 0)
4397 		return -EINVAL;
4398 
4399 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
4400 	if (!task)
4401 		return -ENOENT;
4402 
4403 	err = 0;
4404 	file = fget_task(task, fd);
4405 	put_task_struct(task);
4406 	if (!file)
4407 		return -EBADF;
4408 
4409 	if (file->f_op == &bpf_link_fops) {
4410 		struct bpf_link *link = file->private_data;
4411 
4412 		if (link->ops == &bpf_raw_tp_link_lops) {
4413 			struct bpf_raw_tp_link *raw_tp =
4414 				container_of(link, struct bpf_raw_tp_link, link);
4415 			struct bpf_raw_event_map *btp = raw_tp->btp;
4416 
4417 			err = bpf_task_fd_query_copy(attr, uattr,
4418 						     raw_tp->link.prog->aux->id,
4419 						     BPF_FD_TYPE_RAW_TRACEPOINT,
4420 						     btp->tp->name, 0, 0);
4421 			goto put_file;
4422 		}
4423 		goto out_not_supp;
4424 	}
4425 
4426 	event = perf_get_event(file);
4427 	if (!IS_ERR(event)) {
4428 		u64 probe_offset, probe_addr;
4429 		u32 prog_id, fd_type;
4430 		const char *buf;
4431 
4432 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4433 					      &buf, &probe_offset,
4434 					      &probe_addr);
4435 		if (!err)
4436 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4437 						     fd_type, buf,
4438 						     probe_offset,
4439 						     probe_addr);
4440 		goto put_file;
4441 	}
4442 
4443 out_not_supp:
4444 	err = -ENOTSUPP;
4445 put_file:
4446 	fput(file);
4447 	return err;
4448 }
4449 
4450 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
4451 
4452 #define BPF_DO_BATCH(fn)			\
4453 	do {					\
4454 		if (!fn) {			\
4455 			err = -ENOTSUPP;	\
4456 			goto err_put;		\
4457 		}				\
4458 		err = fn(map, attr, uattr);	\
4459 	} while (0)
4460 
4461 static int bpf_map_do_batch(const union bpf_attr *attr,
4462 			    union bpf_attr __user *uattr,
4463 			    int cmd)
4464 {
4465 	bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
4466 			 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
4467 	bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
4468 	struct bpf_map *map;
4469 	int err, ufd;
4470 	struct fd f;
4471 
4472 	if (CHECK_ATTR(BPF_MAP_BATCH))
4473 		return -EINVAL;
4474 
4475 	ufd = attr->batch.map_fd;
4476 	f = fdget(ufd);
4477 	map = __bpf_map_get(f);
4478 	if (IS_ERR(map))
4479 		return PTR_ERR(map);
4480 	if (has_write)
4481 		bpf_map_write_active_inc(map);
4482 	if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4483 		err = -EPERM;
4484 		goto err_put;
4485 	}
4486 	if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4487 		err = -EPERM;
4488 		goto err_put;
4489 	}
4490 
4491 	if (cmd == BPF_MAP_LOOKUP_BATCH)
4492 		BPF_DO_BATCH(map->ops->map_lookup_batch);
4493 	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4494 		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
4495 	else if (cmd == BPF_MAP_UPDATE_BATCH)
4496 		BPF_DO_BATCH(map->ops->map_update_batch);
4497 	else
4498 		BPF_DO_BATCH(map->ops->map_delete_batch);
4499 err_put:
4500 	if (has_write)
4501 		bpf_map_write_active_dec(map);
4502 	fdput(f);
4503 	return err;
4504 }
4505 
4506 #define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies
4507 static int link_create(union bpf_attr *attr, bpfptr_t uattr)
4508 {
4509 	enum bpf_prog_type ptype;
4510 	struct bpf_prog *prog;
4511 	int ret;
4512 
4513 	if (CHECK_ATTR(BPF_LINK_CREATE))
4514 		return -EINVAL;
4515 
4516 	prog = bpf_prog_get(attr->link_create.prog_fd);
4517 	if (IS_ERR(prog))
4518 		return PTR_ERR(prog);
4519 
4520 	ret = bpf_prog_attach_check_attach_type(prog,
4521 						attr->link_create.attach_type);
4522 	if (ret)
4523 		goto out;
4524 
4525 	switch (prog->type) {
4526 	case BPF_PROG_TYPE_EXT:
4527 		break;
4528 	case BPF_PROG_TYPE_PERF_EVENT:
4529 	case BPF_PROG_TYPE_TRACEPOINT:
4530 		if (attr->link_create.attach_type != BPF_PERF_EVENT) {
4531 			ret = -EINVAL;
4532 			goto out;
4533 		}
4534 		break;
4535 	case BPF_PROG_TYPE_KPROBE:
4536 		if (attr->link_create.attach_type != BPF_PERF_EVENT &&
4537 		    attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) {
4538 			ret = -EINVAL;
4539 			goto out;
4540 		}
4541 		break;
4542 	default:
4543 		ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4544 		if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4545 			ret = -EINVAL;
4546 			goto out;
4547 		}
4548 		break;
4549 	}
4550 
4551 	switch (prog->type) {
4552 	case BPF_PROG_TYPE_CGROUP_SKB:
4553 	case BPF_PROG_TYPE_CGROUP_SOCK:
4554 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4555 	case BPF_PROG_TYPE_SOCK_OPS:
4556 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4557 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4558 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4559 		ret = cgroup_bpf_link_attach(attr, prog);
4560 		break;
4561 	case BPF_PROG_TYPE_EXT:
4562 		ret = bpf_tracing_prog_attach(prog,
4563 					      attr->link_create.target_fd,
4564 					      attr->link_create.target_btf_id,
4565 					      attr->link_create.tracing.cookie);
4566 		break;
4567 	case BPF_PROG_TYPE_LSM:
4568 	case BPF_PROG_TYPE_TRACING:
4569 		if (attr->link_create.attach_type != prog->expected_attach_type) {
4570 			ret = -EINVAL;
4571 			goto out;
4572 		}
4573 		if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
4574 			ret = bpf_raw_tp_link_attach(prog, NULL);
4575 		else if (prog->expected_attach_type == BPF_TRACE_ITER)
4576 			ret = bpf_iter_link_attach(attr, uattr, prog);
4577 		else if (prog->expected_attach_type == BPF_LSM_CGROUP)
4578 			ret = cgroup_bpf_link_attach(attr, prog);
4579 		else
4580 			ret = bpf_tracing_prog_attach(prog,
4581 						      attr->link_create.target_fd,
4582 						      attr->link_create.target_btf_id,
4583 						      attr->link_create.tracing.cookie);
4584 		break;
4585 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4586 	case BPF_PROG_TYPE_SK_LOOKUP:
4587 		ret = netns_bpf_link_create(attr, prog);
4588 		break;
4589 #ifdef CONFIG_NET
4590 	case BPF_PROG_TYPE_XDP:
4591 		ret = bpf_xdp_link_attach(attr, prog);
4592 		break;
4593 #endif
4594 	case BPF_PROG_TYPE_PERF_EVENT:
4595 	case BPF_PROG_TYPE_TRACEPOINT:
4596 		ret = bpf_perf_link_attach(attr, prog);
4597 		break;
4598 	case BPF_PROG_TYPE_KPROBE:
4599 		if (attr->link_create.attach_type == BPF_PERF_EVENT)
4600 			ret = bpf_perf_link_attach(attr, prog);
4601 		else
4602 			ret = bpf_kprobe_multi_link_attach(attr, prog);
4603 		break;
4604 	default:
4605 		ret = -EINVAL;
4606 	}
4607 
4608 out:
4609 	if (ret < 0)
4610 		bpf_prog_put(prog);
4611 	return ret;
4612 }
4613 
4614 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4615 
4616 static int link_update(union bpf_attr *attr)
4617 {
4618 	struct bpf_prog *old_prog = NULL, *new_prog;
4619 	struct bpf_link *link;
4620 	u32 flags;
4621 	int ret;
4622 
4623 	if (CHECK_ATTR(BPF_LINK_UPDATE))
4624 		return -EINVAL;
4625 
4626 	flags = attr->link_update.flags;
4627 	if (flags & ~BPF_F_REPLACE)
4628 		return -EINVAL;
4629 
4630 	link = bpf_link_get_from_fd(attr->link_update.link_fd);
4631 	if (IS_ERR(link))
4632 		return PTR_ERR(link);
4633 
4634 	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4635 	if (IS_ERR(new_prog)) {
4636 		ret = PTR_ERR(new_prog);
4637 		goto out_put_link;
4638 	}
4639 
4640 	if (flags & BPF_F_REPLACE) {
4641 		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4642 		if (IS_ERR(old_prog)) {
4643 			ret = PTR_ERR(old_prog);
4644 			old_prog = NULL;
4645 			goto out_put_progs;
4646 		}
4647 	} else if (attr->link_update.old_prog_fd) {
4648 		ret = -EINVAL;
4649 		goto out_put_progs;
4650 	}
4651 
4652 	if (link->ops->update_prog)
4653 		ret = link->ops->update_prog(link, new_prog, old_prog);
4654 	else
4655 		ret = -EINVAL;
4656 
4657 out_put_progs:
4658 	if (old_prog)
4659 		bpf_prog_put(old_prog);
4660 	if (ret)
4661 		bpf_prog_put(new_prog);
4662 out_put_link:
4663 	bpf_link_put(link);
4664 	return ret;
4665 }
4666 
4667 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4668 
4669 static int link_detach(union bpf_attr *attr)
4670 {
4671 	struct bpf_link *link;
4672 	int ret;
4673 
4674 	if (CHECK_ATTR(BPF_LINK_DETACH))
4675 		return -EINVAL;
4676 
4677 	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4678 	if (IS_ERR(link))
4679 		return PTR_ERR(link);
4680 
4681 	if (link->ops->detach)
4682 		ret = link->ops->detach(link);
4683 	else
4684 		ret = -EOPNOTSUPP;
4685 
4686 	bpf_link_put(link);
4687 	return ret;
4688 }
4689 
4690 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4691 {
4692 	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4693 }
4694 
4695 struct bpf_link *bpf_link_by_id(u32 id)
4696 {
4697 	struct bpf_link *link;
4698 
4699 	if (!id)
4700 		return ERR_PTR(-ENOENT);
4701 
4702 	spin_lock_bh(&link_idr_lock);
4703 	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
4704 	link = idr_find(&link_idr, id);
4705 	if (link) {
4706 		if (link->id)
4707 			link = bpf_link_inc_not_zero(link);
4708 		else
4709 			link = ERR_PTR(-EAGAIN);
4710 	} else {
4711 		link = ERR_PTR(-ENOENT);
4712 	}
4713 	spin_unlock_bh(&link_idr_lock);
4714 	return link;
4715 }
4716 
4717 struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
4718 {
4719 	struct bpf_link *link;
4720 
4721 	spin_lock_bh(&link_idr_lock);
4722 again:
4723 	link = idr_get_next(&link_idr, id);
4724 	if (link) {
4725 		link = bpf_link_inc_not_zero(link);
4726 		if (IS_ERR(link)) {
4727 			(*id)++;
4728 			goto again;
4729 		}
4730 	}
4731 	spin_unlock_bh(&link_idr_lock);
4732 
4733 	return link;
4734 }
4735 
4736 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4737 
4738 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4739 {
4740 	struct bpf_link *link;
4741 	u32 id = attr->link_id;
4742 	int fd;
4743 
4744 	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4745 		return -EINVAL;
4746 
4747 	if (!capable(CAP_SYS_ADMIN))
4748 		return -EPERM;
4749 
4750 	link = bpf_link_by_id(id);
4751 	if (IS_ERR(link))
4752 		return PTR_ERR(link);
4753 
4754 	fd = bpf_link_new_fd(link);
4755 	if (fd < 0)
4756 		bpf_link_put(link);
4757 
4758 	return fd;
4759 }
4760 
4761 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4762 
4763 static int bpf_stats_release(struct inode *inode, struct file *file)
4764 {
4765 	mutex_lock(&bpf_stats_enabled_mutex);
4766 	static_key_slow_dec(&bpf_stats_enabled_key.key);
4767 	mutex_unlock(&bpf_stats_enabled_mutex);
4768 	return 0;
4769 }
4770 
4771 static const struct file_operations bpf_stats_fops = {
4772 	.release = bpf_stats_release,
4773 };
4774 
4775 static int bpf_enable_runtime_stats(void)
4776 {
4777 	int fd;
4778 
4779 	mutex_lock(&bpf_stats_enabled_mutex);
4780 
4781 	/* Set a very high limit to avoid overflow */
4782 	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4783 		mutex_unlock(&bpf_stats_enabled_mutex);
4784 		return -EBUSY;
4785 	}
4786 
4787 	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4788 	if (fd >= 0)
4789 		static_key_slow_inc(&bpf_stats_enabled_key.key);
4790 
4791 	mutex_unlock(&bpf_stats_enabled_mutex);
4792 	return fd;
4793 }
4794 
4795 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4796 
4797 static int bpf_enable_stats(union bpf_attr *attr)
4798 {
4799 
4800 	if (CHECK_ATTR(BPF_ENABLE_STATS))
4801 		return -EINVAL;
4802 
4803 	if (!capable(CAP_SYS_ADMIN))
4804 		return -EPERM;
4805 
4806 	switch (attr->enable_stats.type) {
4807 	case BPF_STATS_RUN_TIME:
4808 		return bpf_enable_runtime_stats();
4809 	default:
4810 		break;
4811 	}
4812 	return -EINVAL;
4813 }
4814 
4815 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4816 
4817 static int bpf_iter_create(union bpf_attr *attr)
4818 {
4819 	struct bpf_link *link;
4820 	int err;
4821 
4822 	if (CHECK_ATTR(BPF_ITER_CREATE))
4823 		return -EINVAL;
4824 
4825 	if (attr->iter_create.flags)
4826 		return -EINVAL;
4827 
4828 	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4829 	if (IS_ERR(link))
4830 		return PTR_ERR(link);
4831 
4832 	err = bpf_iter_new_fd(link);
4833 	bpf_link_put(link);
4834 
4835 	return err;
4836 }
4837 
4838 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4839 
4840 static int bpf_prog_bind_map(union bpf_attr *attr)
4841 {
4842 	struct bpf_prog *prog;
4843 	struct bpf_map *map;
4844 	struct bpf_map **used_maps_old, **used_maps_new;
4845 	int i, ret = 0;
4846 
4847 	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4848 		return -EINVAL;
4849 
4850 	if (attr->prog_bind_map.flags)
4851 		return -EINVAL;
4852 
4853 	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4854 	if (IS_ERR(prog))
4855 		return PTR_ERR(prog);
4856 
4857 	map = bpf_map_get(attr->prog_bind_map.map_fd);
4858 	if (IS_ERR(map)) {
4859 		ret = PTR_ERR(map);
4860 		goto out_prog_put;
4861 	}
4862 
4863 	mutex_lock(&prog->aux->used_maps_mutex);
4864 
4865 	used_maps_old = prog->aux->used_maps;
4866 
4867 	for (i = 0; i < prog->aux->used_map_cnt; i++)
4868 		if (used_maps_old[i] == map) {
4869 			bpf_map_put(map);
4870 			goto out_unlock;
4871 		}
4872 
4873 	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4874 				      sizeof(used_maps_new[0]),
4875 				      GFP_KERNEL);
4876 	if (!used_maps_new) {
4877 		ret = -ENOMEM;
4878 		goto out_unlock;
4879 	}
4880 
4881 	memcpy(used_maps_new, used_maps_old,
4882 	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4883 	used_maps_new[prog->aux->used_map_cnt] = map;
4884 
4885 	prog->aux->used_map_cnt++;
4886 	prog->aux->used_maps = used_maps_new;
4887 
4888 	kfree(used_maps_old);
4889 
4890 out_unlock:
4891 	mutex_unlock(&prog->aux->used_maps_mutex);
4892 
4893 	if (ret)
4894 		bpf_map_put(map);
4895 out_prog_put:
4896 	bpf_prog_put(prog);
4897 	return ret;
4898 }
4899 
4900 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
4901 {
4902 	union bpf_attr attr;
4903 	bool capable;
4904 	int err;
4905 
4906 	capable = bpf_capable() || !sysctl_unprivileged_bpf_disabled;
4907 
4908 	/* Intent here is for unprivileged_bpf_disabled to block key object
4909 	 * creation commands for unprivileged users; other actions depend
4910 	 * of fd availability and access to bpffs, so are dependent on
4911 	 * object creation success.  Capabilities are later verified for
4912 	 * operations such as load and map create, so even with unprivileged
4913 	 * BPF disabled, capability checks are still carried out for these
4914 	 * and other operations.
4915 	 */
4916 	if (!capable &&
4917 	    (cmd == BPF_MAP_CREATE || cmd == BPF_PROG_LOAD))
4918 		return -EPERM;
4919 
4920 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4921 	if (err)
4922 		return err;
4923 	size = min_t(u32, size, sizeof(attr));
4924 
4925 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
4926 	memset(&attr, 0, sizeof(attr));
4927 	if (copy_from_bpfptr(&attr, uattr, size) != 0)
4928 		return -EFAULT;
4929 
4930 	err = security_bpf(cmd, &attr, size);
4931 	if (err < 0)
4932 		return err;
4933 
4934 	switch (cmd) {
4935 	case BPF_MAP_CREATE:
4936 		err = map_create(&attr);
4937 		break;
4938 	case BPF_MAP_LOOKUP_ELEM:
4939 		err = map_lookup_elem(&attr);
4940 		break;
4941 	case BPF_MAP_UPDATE_ELEM:
4942 		err = map_update_elem(&attr, uattr);
4943 		break;
4944 	case BPF_MAP_DELETE_ELEM:
4945 		err = map_delete_elem(&attr);
4946 		break;
4947 	case BPF_MAP_GET_NEXT_KEY:
4948 		err = map_get_next_key(&attr);
4949 		break;
4950 	case BPF_MAP_FREEZE:
4951 		err = map_freeze(&attr);
4952 		break;
4953 	case BPF_PROG_LOAD:
4954 		err = bpf_prog_load(&attr, uattr);
4955 		break;
4956 	case BPF_OBJ_PIN:
4957 		err = bpf_obj_pin(&attr);
4958 		break;
4959 	case BPF_OBJ_GET:
4960 		err = bpf_obj_get(&attr);
4961 		break;
4962 	case BPF_PROG_ATTACH:
4963 		err = bpf_prog_attach(&attr);
4964 		break;
4965 	case BPF_PROG_DETACH:
4966 		err = bpf_prog_detach(&attr);
4967 		break;
4968 	case BPF_PROG_QUERY:
4969 		err = bpf_prog_query(&attr, uattr.user);
4970 		break;
4971 	case BPF_PROG_TEST_RUN:
4972 		err = bpf_prog_test_run(&attr, uattr.user);
4973 		break;
4974 	case BPF_PROG_GET_NEXT_ID:
4975 		err = bpf_obj_get_next_id(&attr, uattr.user,
4976 					  &prog_idr, &prog_idr_lock);
4977 		break;
4978 	case BPF_MAP_GET_NEXT_ID:
4979 		err = bpf_obj_get_next_id(&attr, uattr.user,
4980 					  &map_idr, &map_idr_lock);
4981 		break;
4982 	case BPF_BTF_GET_NEXT_ID:
4983 		err = bpf_obj_get_next_id(&attr, uattr.user,
4984 					  &btf_idr, &btf_idr_lock);
4985 		break;
4986 	case BPF_PROG_GET_FD_BY_ID:
4987 		err = bpf_prog_get_fd_by_id(&attr);
4988 		break;
4989 	case BPF_MAP_GET_FD_BY_ID:
4990 		err = bpf_map_get_fd_by_id(&attr);
4991 		break;
4992 	case BPF_OBJ_GET_INFO_BY_FD:
4993 		err = bpf_obj_get_info_by_fd(&attr, uattr.user);
4994 		break;
4995 	case BPF_RAW_TRACEPOINT_OPEN:
4996 		err = bpf_raw_tracepoint_open(&attr);
4997 		break;
4998 	case BPF_BTF_LOAD:
4999 		err = bpf_btf_load(&attr, uattr);
5000 		break;
5001 	case BPF_BTF_GET_FD_BY_ID:
5002 		err = bpf_btf_get_fd_by_id(&attr);
5003 		break;
5004 	case BPF_TASK_FD_QUERY:
5005 		err = bpf_task_fd_query(&attr, uattr.user);
5006 		break;
5007 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5008 		err = map_lookup_and_delete_elem(&attr);
5009 		break;
5010 	case BPF_MAP_LOOKUP_BATCH:
5011 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5012 		break;
5013 	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5014 		err = bpf_map_do_batch(&attr, uattr.user,
5015 				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5016 		break;
5017 	case BPF_MAP_UPDATE_BATCH:
5018 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5019 		break;
5020 	case BPF_MAP_DELETE_BATCH:
5021 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5022 		break;
5023 	case BPF_LINK_CREATE:
5024 		err = link_create(&attr, uattr);
5025 		break;
5026 	case BPF_LINK_UPDATE:
5027 		err = link_update(&attr);
5028 		break;
5029 	case BPF_LINK_GET_FD_BY_ID:
5030 		err = bpf_link_get_fd_by_id(&attr);
5031 		break;
5032 	case BPF_LINK_GET_NEXT_ID:
5033 		err = bpf_obj_get_next_id(&attr, uattr.user,
5034 					  &link_idr, &link_idr_lock);
5035 		break;
5036 	case BPF_ENABLE_STATS:
5037 		err = bpf_enable_stats(&attr);
5038 		break;
5039 	case BPF_ITER_CREATE:
5040 		err = bpf_iter_create(&attr);
5041 		break;
5042 	case BPF_LINK_DETACH:
5043 		err = link_detach(&attr);
5044 		break;
5045 	case BPF_PROG_BIND_MAP:
5046 		err = bpf_prog_bind_map(&attr);
5047 		break;
5048 	default:
5049 		err = -EINVAL;
5050 		break;
5051 	}
5052 
5053 	return err;
5054 }
5055 
5056 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5057 {
5058 	return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5059 }
5060 
5061 static bool syscall_prog_is_valid_access(int off, int size,
5062 					 enum bpf_access_type type,
5063 					 const struct bpf_prog *prog,
5064 					 struct bpf_insn_access_aux *info)
5065 {
5066 	if (off < 0 || off >= U16_MAX)
5067 		return false;
5068 	if (off % size != 0)
5069 		return false;
5070 	return true;
5071 }
5072 
5073 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5074 {
5075 	struct bpf_prog * __maybe_unused prog;
5076 	struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5077 
5078 	switch (cmd) {
5079 	case BPF_MAP_CREATE:
5080 	case BPF_MAP_UPDATE_ELEM:
5081 	case BPF_MAP_FREEZE:
5082 	case BPF_PROG_LOAD:
5083 	case BPF_BTF_LOAD:
5084 	case BPF_LINK_CREATE:
5085 	case BPF_RAW_TRACEPOINT_OPEN:
5086 		break;
5087 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5088 	case BPF_PROG_TEST_RUN:
5089 		if (attr->test.data_in || attr->test.data_out ||
5090 		    attr->test.ctx_out || attr->test.duration ||
5091 		    attr->test.repeat || attr->test.flags)
5092 			return -EINVAL;
5093 
5094 		prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5095 		if (IS_ERR(prog))
5096 			return PTR_ERR(prog);
5097 
5098 		if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5099 		    attr->test.ctx_size_in > U16_MAX) {
5100 			bpf_prog_put(prog);
5101 			return -EINVAL;
5102 		}
5103 
5104 		run_ctx.bpf_cookie = 0;
5105 		run_ctx.saved_run_ctx = NULL;
5106 		if (!__bpf_prog_enter_sleepable(prog, &run_ctx)) {
5107 			/* recursion detected */
5108 			bpf_prog_put(prog);
5109 			return -EBUSY;
5110 		}
5111 		attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5112 		__bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx);
5113 		bpf_prog_put(prog);
5114 		return 0;
5115 #endif
5116 	default:
5117 		return -EINVAL;
5118 	}
5119 	return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5120 }
5121 EXPORT_SYMBOL(bpf_sys_bpf);
5122 
5123 static const struct bpf_func_proto bpf_sys_bpf_proto = {
5124 	.func		= bpf_sys_bpf,
5125 	.gpl_only	= false,
5126 	.ret_type	= RET_INTEGER,
5127 	.arg1_type	= ARG_ANYTHING,
5128 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
5129 	.arg3_type	= ARG_CONST_SIZE,
5130 };
5131 
5132 const struct bpf_func_proto * __weak
5133 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5134 {
5135 	return bpf_base_func_proto(func_id);
5136 }
5137 
5138 BPF_CALL_1(bpf_sys_close, u32, fd)
5139 {
5140 	/* When bpf program calls this helper there should not be
5141 	 * an fdget() without matching completed fdput().
5142 	 * This helper is allowed in the following callchain only:
5143 	 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5144 	 */
5145 	return close_fd(fd);
5146 }
5147 
5148 static const struct bpf_func_proto bpf_sys_close_proto = {
5149 	.func		= bpf_sys_close,
5150 	.gpl_only	= false,
5151 	.ret_type	= RET_INTEGER,
5152 	.arg1_type	= ARG_ANYTHING,
5153 };
5154 
5155 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5156 {
5157 	if (flags)
5158 		return -EINVAL;
5159 
5160 	if (name_sz <= 1 || name[name_sz - 1])
5161 		return -EINVAL;
5162 
5163 	if (!bpf_dump_raw_ok(current_cred()))
5164 		return -EPERM;
5165 
5166 	*res = kallsyms_lookup_name(name);
5167 	return *res ? 0 : -ENOENT;
5168 }
5169 
5170 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5171 	.func		= bpf_kallsyms_lookup_name,
5172 	.gpl_only	= false,
5173 	.ret_type	= RET_INTEGER,
5174 	.arg1_type	= ARG_PTR_TO_MEM,
5175 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
5176 	.arg3_type	= ARG_ANYTHING,
5177 	.arg4_type	= ARG_PTR_TO_LONG,
5178 };
5179 
5180 static const struct bpf_func_proto *
5181 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5182 {
5183 	switch (func_id) {
5184 	case BPF_FUNC_sys_bpf:
5185 		return &bpf_sys_bpf_proto;
5186 	case BPF_FUNC_btf_find_by_name_kind:
5187 		return &bpf_btf_find_by_name_kind_proto;
5188 	case BPF_FUNC_sys_close:
5189 		return &bpf_sys_close_proto;
5190 	case BPF_FUNC_kallsyms_lookup_name:
5191 		return &bpf_kallsyms_lookup_name_proto;
5192 	default:
5193 		return tracing_prog_func_proto(func_id, prog);
5194 	}
5195 }
5196 
5197 const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5198 	.get_func_proto  = syscall_prog_func_proto,
5199 	.is_valid_access = syscall_prog_is_valid_access,
5200 };
5201 
5202 const struct bpf_prog_ops bpf_syscall_prog_ops = {
5203 	.test_run = bpf_prog_test_run_syscall,
5204 };
5205 
5206 #ifdef CONFIG_SYSCTL
5207 static int bpf_stats_handler(struct ctl_table *table, int write,
5208 			     void *buffer, size_t *lenp, loff_t *ppos)
5209 {
5210 	struct static_key *key = (struct static_key *)table->data;
5211 	static int saved_val;
5212 	int val, ret;
5213 	struct ctl_table tmp = {
5214 		.data   = &val,
5215 		.maxlen = sizeof(val),
5216 		.mode   = table->mode,
5217 		.extra1 = SYSCTL_ZERO,
5218 		.extra2 = SYSCTL_ONE,
5219 	};
5220 
5221 	if (write && !capable(CAP_SYS_ADMIN))
5222 		return -EPERM;
5223 
5224 	mutex_lock(&bpf_stats_enabled_mutex);
5225 	val = saved_val;
5226 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5227 	if (write && !ret && val != saved_val) {
5228 		if (val)
5229 			static_key_slow_inc(key);
5230 		else
5231 			static_key_slow_dec(key);
5232 		saved_val = val;
5233 	}
5234 	mutex_unlock(&bpf_stats_enabled_mutex);
5235 	return ret;
5236 }
5237 
5238 void __weak unpriv_ebpf_notify(int new_state)
5239 {
5240 }
5241 
5242 static int bpf_unpriv_handler(struct ctl_table *table, int write,
5243 			      void *buffer, size_t *lenp, loff_t *ppos)
5244 {
5245 	int ret, unpriv_enable = *(int *)table->data;
5246 	bool locked_state = unpriv_enable == 1;
5247 	struct ctl_table tmp = *table;
5248 
5249 	if (write && !capable(CAP_SYS_ADMIN))
5250 		return -EPERM;
5251 
5252 	tmp.data = &unpriv_enable;
5253 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5254 	if (write && !ret) {
5255 		if (locked_state && unpriv_enable != 1)
5256 			return -EPERM;
5257 		*(int *)table->data = unpriv_enable;
5258 	}
5259 
5260 	unpriv_ebpf_notify(unpriv_enable);
5261 
5262 	return ret;
5263 }
5264 
5265 static struct ctl_table bpf_syscall_table[] = {
5266 	{
5267 		.procname	= "unprivileged_bpf_disabled",
5268 		.data		= &sysctl_unprivileged_bpf_disabled,
5269 		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
5270 		.mode		= 0644,
5271 		.proc_handler	= bpf_unpriv_handler,
5272 		.extra1		= SYSCTL_ZERO,
5273 		.extra2		= SYSCTL_TWO,
5274 	},
5275 	{
5276 		.procname	= "bpf_stats_enabled",
5277 		.data		= &bpf_stats_enabled_key.key,
5278 		.maxlen		= sizeof(bpf_stats_enabled_key),
5279 		.mode		= 0644,
5280 		.proc_handler	= bpf_stats_handler,
5281 	},
5282 	{ }
5283 };
5284 
5285 static int __init bpf_syscall_sysctl_init(void)
5286 {
5287 	register_sysctl_init("kernel", bpf_syscall_table);
5288 	return 0;
5289 }
5290 late_initcall(bpf_syscall_sysctl_init);
5291 #endif /* CONFIG_SYSCTL */
5292