xref: /openbmc/linux/kernel/bpf/syscall.c (revision 801b27e8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf-cgroup.h>
6 #include <linux/bpf_trace.h>
7 #include <linux/bpf_lirc.h>
8 #include <linux/bpf_verifier.h>
9 #include <linux/bsearch.h>
10 #include <linux/btf.h>
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched/signal.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmzone.h>
16 #include <linux/anon_inodes.h>
17 #include <linux/fdtable.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/license.h>
21 #include <linux/filter.h>
22 #include <linux/kernel.h>
23 #include <linux/idr.h>
24 #include <linux/cred.h>
25 #include <linux/timekeeping.h>
26 #include <linux/ctype.h>
27 #include <linux/nospec.h>
28 #include <linux/audit.h>
29 #include <uapi/linux/btf.h>
30 #include <linux/pgtable.h>
31 #include <linux/bpf_lsm.h>
32 #include <linux/poll.h>
33 #include <linux/sort.h>
34 #include <linux/bpf-netns.h>
35 #include <linux/rcupdate_trace.h>
36 #include <linux/memcontrol.h>
37 #include <linux/trace_events.h>
38 #include <net/netfilter/nf_bpf_link.h>
39 
40 #include <net/tcx.h>
41 
42 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
43 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
44 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
45 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
46 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
47 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
48 			IS_FD_HASH(map))
49 
50 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
51 
52 DEFINE_PER_CPU(int, bpf_prog_active);
53 static DEFINE_IDR(prog_idr);
54 static DEFINE_SPINLOCK(prog_idr_lock);
55 static DEFINE_IDR(map_idr);
56 static DEFINE_SPINLOCK(map_idr_lock);
57 static DEFINE_IDR(link_idr);
58 static DEFINE_SPINLOCK(link_idr_lock);
59 
60 int sysctl_unprivileged_bpf_disabled __read_mostly =
61 	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
62 
63 static const struct bpf_map_ops * const bpf_map_types[] = {
64 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
65 #define BPF_MAP_TYPE(_id, _ops) \
66 	[_id] = &_ops,
67 #define BPF_LINK_TYPE(_id, _name)
68 #include <linux/bpf_types.h>
69 #undef BPF_PROG_TYPE
70 #undef BPF_MAP_TYPE
71 #undef BPF_LINK_TYPE
72 };
73 
74 /*
75  * If we're handed a bigger struct than we know of, ensure all the unknown bits
76  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
77  * we don't know about yet.
78  *
79  * There is a ToCToU between this function call and the following
80  * copy_from_user() call. However, this is not a concern since this function is
81  * meant to be a future-proofing of bits.
82  */
83 int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
84 			     size_t expected_size,
85 			     size_t actual_size)
86 {
87 	int res;
88 
89 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
90 		return -E2BIG;
91 
92 	if (actual_size <= expected_size)
93 		return 0;
94 
95 	if (uaddr.is_kernel)
96 		res = memchr_inv(uaddr.kernel + expected_size, 0,
97 				 actual_size - expected_size) == NULL;
98 	else
99 		res = check_zeroed_user(uaddr.user + expected_size,
100 					actual_size - expected_size);
101 	if (res < 0)
102 		return res;
103 	return res ? 0 : -E2BIG;
104 }
105 
106 const struct bpf_map_ops bpf_map_offload_ops = {
107 	.map_meta_equal = bpf_map_meta_equal,
108 	.map_alloc = bpf_map_offload_map_alloc,
109 	.map_free = bpf_map_offload_map_free,
110 	.map_check_btf = map_check_no_btf,
111 	.map_mem_usage = bpf_map_offload_map_mem_usage,
112 };
113 
114 static void bpf_map_write_active_inc(struct bpf_map *map)
115 {
116 	atomic64_inc(&map->writecnt);
117 }
118 
119 static void bpf_map_write_active_dec(struct bpf_map *map)
120 {
121 	atomic64_dec(&map->writecnt);
122 }
123 
124 bool bpf_map_write_active(const struct bpf_map *map)
125 {
126 	return atomic64_read(&map->writecnt) != 0;
127 }
128 
129 static u32 bpf_map_value_size(const struct bpf_map *map)
130 {
131 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
132 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
133 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
134 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
135 		return round_up(map->value_size, 8) * num_possible_cpus();
136 	else if (IS_FD_MAP(map))
137 		return sizeof(u32);
138 	else
139 		return  map->value_size;
140 }
141 
142 static void maybe_wait_bpf_programs(struct bpf_map *map)
143 {
144 	/* Wait for any running BPF programs to complete so that
145 	 * userspace, when we return to it, knows that all programs
146 	 * that could be running use the new map value.
147 	 */
148 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
149 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
150 		synchronize_rcu();
151 }
152 
153 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
154 				void *key, void *value, __u64 flags)
155 {
156 	int err;
157 
158 	/* Need to create a kthread, thus must support schedule */
159 	if (bpf_map_is_offloaded(map)) {
160 		return bpf_map_offload_update_elem(map, key, value, flags);
161 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
162 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
163 		return map->ops->map_update_elem(map, key, value, flags);
164 	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
165 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
166 		return sock_map_update_elem_sys(map, key, value, flags);
167 	} else if (IS_FD_PROG_ARRAY(map)) {
168 		return bpf_fd_array_map_update_elem(map, map_file, key, value,
169 						    flags);
170 	}
171 
172 	bpf_disable_instrumentation();
173 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
174 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
175 		err = bpf_percpu_hash_update(map, key, value, flags);
176 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
177 		err = bpf_percpu_array_update(map, key, value, flags);
178 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
179 		err = bpf_percpu_cgroup_storage_update(map, key, value,
180 						       flags);
181 	} else if (IS_FD_ARRAY(map)) {
182 		rcu_read_lock();
183 		err = bpf_fd_array_map_update_elem(map, map_file, key, value,
184 						   flags);
185 		rcu_read_unlock();
186 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
187 		rcu_read_lock();
188 		err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
189 						  flags);
190 		rcu_read_unlock();
191 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
192 		/* rcu_read_lock() is not needed */
193 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
194 							 flags);
195 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
196 		   map->map_type == BPF_MAP_TYPE_STACK ||
197 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
198 		err = map->ops->map_push_elem(map, value, flags);
199 	} else {
200 		rcu_read_lock();
201 		err = map->ops->map_update_elem(map, key, value, flags);
202 		rcu_read_unlock();
203 	}
204 	bpf_enable_instrumentation();
205 	maybe_wait_bpf_programs(map);
206 
207 	return err;
208 }
209 
210 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
211 			      __u64 flags)
212 {
213 	void *ptr;
214 	int err;
215 
216 	if (bpf_map_is_offloaded(map))
217 		return bpf_map_offload_lookup_elem(map, key, value);
218 
219 	bpf_disable_instrumentation();
220 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
221 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
222 		err = bpf_percpu_hash_copy(map, key, value);
223 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
224 		err = bpf_percpu_array_copy(map, key, value);
225 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
226 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
227 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
228 		err = bpf_stackmap_copy(map, key, value);
229 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
230 		err = bpf_fd_array_map_lookup_elem(map, key, value);
231 	} else if (IS_FD_HASH(map)) {
232 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
233 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
234 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
235 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
236 		   map->map_type == BPF_MAP_TYPE_STACK ||
237 		   map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
238 		err = map->ops->map_peek_elem(map, value);
239 	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
240 		/* struct_ops map requires directly updating "value" */
241 		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
242 	} else {
243 		rcu_read_lock();
244 		if (map->ops->map_lookup_elem_sys_only)
245 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
246 		else
247 			ptr = map->ops->map_lookup_elem(map, key);
248 		if (IS_ERR(ptr)) {
249 			err = PTR_ERR(ptr);
250 		} else if (!ptr) {
251 			err = -ENOENT;
252 		} else {
253 			err = 0;
254 			if (flags & BPF_F_LOCK)
255 				/* lock 'ptr' and copy everything but lock */
256 				copy_map_value_locked(map, value, ptr, true);
257 			else
258 				copy_map_value(map, value, ptr);
259 			/* mask lock and timer, since value wasn't zero inited */
260 			check_and_init_map_value(map, value);
261 		}
262 		rcu_read_unlock();
263 	}
264 
265 	bpf_enable_instrumentation();
266 	maybe_wait_bpf_programs(map);
267 
268 	return err;
269 }
270 
271 /* Please, do not use this function outside from the map creation path
272  * (e.g. in map update path) without taking care of setting the active
273  * memory cgroup (see at bpf_map_kmalloc_node() for example).
274  */
275 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
276 {
277 	/* We really just want to fail instead of triggering OOM killer
278 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
279 	 * which is used for lower order allocation requests.
280 	 *
281 	 * It has been observed that higher order allocation requests done by
282 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
283 	 * to reclaim memory from the page cache, thus we set
284 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
285 	 */
286 
287 	gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO);
288 	unsigned int flags = 0;
289 	unsigned long align = 1;
290 	void *area;
291 
292 	if (size >= SIZE_MAX)
293 		return NULL;
294 
295 	/* kmalloc()'ed memory can't be mmap()'ed */
296 	if (mmapable) {
297 		BUG_ON(!PAGE_ALIGNED(size));
298 		align = SHMLBA;
299 		flags = VM_USERMAP;
300 	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
301 		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
302 				    numa_node);
303 		if (area != NULL)
304 			return area;
305 	}
306 
307 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
308 			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
309 			flags, numa_node, __builtin_return_address(0));
310 }
311 
312 void *bpf_map_area_alloc(u64 size, int numa_node)
313 {
314 	return __bpf_map_area_alloc(size, numa_node, false);
315 }
316 
317 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
318 {
319 	return __bpf_map_area_alloc(size, numa_node, true);
320 }
321 
322 void bpf_map_area_free(void *area)
323 {
324 	kvfree(area);
325 }
326 
327 static u32 bpf_map_flags_retain_permanent(u32 flags)
328 {
329 	/* Some map creation flags are not tied to the map object but
330 	 * rather to the map fd instead, so they have no meaning upon
331 	 * map object inspection since multiple file descriptors with
332 	 * different (access) properties can exist here. Thus, given
333 	 * this has zero meaning for the map itself, lets clear these
334 	 * from here.
335 	 */
336 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
337 }
338 
339 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
340 {
341 	map->map_type = attr->map_type;
342 	map->key_size = attr->key_size;
343 	map->value_size = attr->value_size;
344 	map->max_entries = attr->max_entries;
345 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
346 	map->numa_node = bpf_map_attr_numa_node(attr);
347 	map->map_extra = attr->map_extra;
348 }
349 
350 static int bpf_map_alloc_id(struct bpf_map *map)
351 {
352 	int id;
353 
354 	idr_preload(GFP_KERNEL);
355 	spin_lock_bh(&map_idr_lock);
356 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
357 	if (id > 0)
358 		map->id = id;
359 	spin_unlock_bh(&map_idr_lock);
360 	idr_preload_end();
361 
362 	if (WARN_ON_ONCE(!id))
363 		return -ENOSPC;
364 
365 	return id > 0 ? 0 : id;
366 }
367 
368 void bpf_map_free_id(struct bpf_map *map)
369 {
370 	unsigned long flags;
371 
372 	/* Offloaded maps are removed from the IDR store when their device
373 	 * disappears - even if someone holds an fd to them they are unusable,
374 	 * the memory is gone, all ops will fail; they are simply waiting for
375 	 * refcnt to drop to be freed.
376 	 */
377 	if (!map->id)
378 		return;
379 
380 	spin_lock_irqsave(&map_idr_lock, flags);
381 
382 	idr_remove(&map_idr, map->id);
383 	map->id = 0;
384 
385 	spin_unlock_irqrestore(&map_idr_lock, flags);
386 }
387 
388 #ifdef CONFIG_MEMCG_KMEM
389 static void bpf_map_save_memcg(struct bpf_map *map)
390 {
391 	/* Currently if a map is created by a process belonging to the root
392 	 * memory cgroup, get_obj_cgroup_from_current() will return NULL.
393 	 * So we have to check map->objcg for being NULL each time it's
394 	 * being used.
395 	 */
396 	if (memcg_bpf_enabled())
397 		map->objcg = get_obj_cgroup_from_current();
398 }
399 
400 static void bpf_map_release_memcg(struct bpf_map *map)
401 {
402 	if (map->objcg)
403 		obj_cgroup_put(map->objcg);
404 }
405 
406 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
407 {
408 	if (map->objcg)
409 		return get_mem_cgroup_from_objcg(map->objcg);
410 
411 	return root_mem_cgroup;
412 }
413 
414 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
415 			   int node)
416 {
417 	struct mem_cgroup *memcg, *old_memcg;
418 	void *ptr;
419 
420 	memcg = bpf_map_get_memcg(map);
421 	old_memcg = set_active_memcg(memcg);
422 	ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node);
423 	set_active_memcg(old_memcg);
424 	mem_cgroup_put(memcg);
425 
426 	return ptr;
427 }
428 
429 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
430 {
431 	struct mem_cgroup *memcg, *old_memcg;
432 	void *ptr;
433 
434 	memcg = bpf_map_get_memcg(map);
435 	old_memcg = set_active_memcg(memcg);
436 	ptr = kzalloc(size, flags | __GFP_ACCOUNT);
437 	set_active_memcg(old_memcg);
438 	mem_cgroup_put(memcg);
439 
440 	return ptr;
441 }
442 
443 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
444 		       gfp_t flags)
445 {
446 	struct mem_cgroup *memcg, *old_memcg;
447 	void *ptr;
448 
449 	memcg = bpf_map_get_memcg(map);
450 	old_memcg = set_active_memcg(memcg);
451 	ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
452 	set_active_memcg(old_memcg);
453 	mem_cgroup_put(memcg);
454 
455 	return ptr;
456 }
457 
458 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
459 				    size_t align, gfp_t flags)
460 {
461 	struct mem_cgroup *memcg, *old_memcg;
462 	void __percpu *ptr;
463 
464 	memcg = bpf_map_get_memcg(map);
465 	old_memcg = set_active_memcg(memcg);
466 	ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT);
467 	set_active_memcg(old_memcg);
468 	mem_cgroup_put(memcg);
469 
470 	return ptr;
471 }
472 
473 #else
474 static void bpf_map_save_memcg(struct bpf_map *map)
475 {
476 }
477 
478 static void bpf_map_release_memcg(struct bpf_map *map)
479 {
480 }
481 #endif
482 
483 static int btf_field_cmp(const void *a, const void *b)
484 {
485 	const struct btf_field *f1 = a, *f2 = b;
486 
487 	if (f1->offset < f2->offset)
488 		return -1;
489 	else if (f1->offset > f2->offset)
490 		return 1;
491 	return 0;
492 }
493 
494 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
495 				  u32 field_mask)
496 {
497 	struct btf_field *field;
498 
499 	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
500 		return NULL;
501 	field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
502 	if (!field || !(field->type & field_mask))
503 		return NULL;
504 	return field;
505 }
506 
507 void btf_record_free(struct btf_record *rec)
508 {
509 	int i;
510 
511 	if (IS_ERR_OR_NULL(rec))
512 		return;
513 	for (i = 0; i < rec->cnt; i++) {
514 		switch (rec->fields[i].type) {
515 		case BPF_KPTR_UNREF:
516 		case BPF_KPTR_REF:
517 			if (rec->fields[i].kptr.module)
518 				module_put(rec->fields[i].kptr.module);
519 			btf_put(rec->fields[i].kptr.btf);
520 			break;
521 		case BPF_LIST_HEAD:
522 		case BPF_LIST_NODE:
523 		case BPF_RB_ROOT:
524 		case BPF_RB_NODE:
525 		case BPF_SPIN_LOCK:
526 		case BPF_TIMER:
527 		case BPF_REFCOUNT:
528 			/* Nothing to release */
529 			break;
530 		default:
531 			WARN_ON_ONCE(1);
532 			continue;
533 		}
534 	}
535 	kfree(rec);
536 }
537 
538 void bpf_map_free_record(struct bpf_map *map)
539 {
540 	btf_record_free(map->record);
541 	map->record = NULL;
542 }
543 
544 struct btf_record *btf_record_dup(const struct btf_record *rec)
545 {
546 	const struct btf_field *fields;
547 	struct btf_record *new_rec;
548 	int ret, size, i;
549 
550 	if (IS_ERR_OR_NULL(rec))
551 		return NULL;
552 	size = offsetof(struct btf_record, fields[rec->cnt]);
553 	new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN);
554 	if (!new_rec)
555 		return ERR_PTR(-ENOMEM);
556 	/* Do a deep copy of the btf_record */
557 	fields = rec->fields;
558 	new_rec->cnt = 0;
559 	for (i = 0; i < rec->cnt; i++) {
560 		switch (fields[i].type) {
561 		case BPF_KPTR_UNREF:
562 		case BPF_KPTR_REF:
563 			btf_get(fields[i].kptr.btf);
564 			if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) {
565 				ret = -ENXIO;
566 				goto free;
567 			}
568 			break;
569 		case BPF_LIST_HEAD:
570 		case BPF_LIST_NODE:
571 		case BPF_RB_ROOT:
572 		case BPF_RB_NODE:
573 		case BPF_SPIN_LOCK:
574 		case BPF_TIMER:
575 		case BPF_REFCOUNT:
576 			/* Nothing to acquire */
577 			break;
578 		default:
579 			ret = -EFAULT;
580 			WARN_ON_ONCE(1);
581 			goto free;
582 		}
583 		new_rec->cnt++;
584 	}
585 	return new_rec;
586 free:
587 	btf_record_free(new_rec);
588 	return ERR_PTR(ret);
589 }
590 
591 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b)
592 {
593 	bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b);
594 	int size;
595 
596 	if (!a_has_fields && !b_has_fields)
597 		return true;
598 	if (a_has_fields != b_has_fields)
599 		return false;
600 	if (rec_a->cnt != rec_b->cnt)
601 		return false;
602 	size = offsetof(struct btf_record, fields[rec_a->cnt]);
603 	/* btf_parse_fields uses kzalloc to allocate a btf_record, so unused
604 	 * members are zeroed out. So memcmp is safe to do without worrying
605 	 * about padding/unused fields.
606 	 *
607 	 * While spin_lock, timer, and kptr have no relation to map BTF,
608 	 * list_head metadata is specific to map BTF, the btf and value_rec
609 	 * members in particular. btf is the map BTF, while value_rec points to
610 	 * btf_record in that map BTF.
611 	 *
612 	 * So while by default, we don't rely on the map BTF (which the records
613 	 * were parsed from) matching for both records, which is not backwards
614 	 * compatible, in case list_head is part of it, we implicitly rely on
615 	 * that by way of depending on memcmp succeeding for it.
616 	 */
617 	return !memcmp(rec_a, rec_b, size);
618 }
619 
620 void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
621 {
622 	if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER)))
623 		return;
624 	bpf_timer_cancel_and_free(obj + rec->timer_off);
625 }
626 
627 extern void __bpf_obj_drop_impl(void *p, const struct btf_record *rec);
628 
629 void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
630 {
631 	const struct btf_field *fields;
632 	int i;
633 
634 	if (IS_ERR_OR_NULL(rec))
635 		return;
636 	fields = rec->fields;
637 	for (i = 0; i < rec->cnt; i++) {
638 		struct btf_struct_meta *pointee_struct_meta;
639 		const struct btf_field *field = &fields[i];
640 		void *field_ptr = obj + field->offset;
641 		void *xchgd_field;
642 
643 		switch (fields[i].type) {
644 		case BPF_SPIN_LOCK:
645 			break;
646 		case BPF_TIMER:
647 			bpf_timer_cancel_and_free(field_ptr);
648 			break;
649 		case BPF_KPTR_UNREF:
650 			WRITE_ONCE(*(u64 *)field_ptr, 0);
651 			break;
652 		case BPF_KPTR_REF:
653 			xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
654 			if (!xchgd_field)
655 				break;
656 
657 			if (!btf_is_kernel(field->kptr.btf)) {
658 				pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
659 									   field->kptr.btf_id);
660 				WARN_ON_ONCE(!pointee_struct_meta);
661 				migrate_disable();
662 				__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
663 								 pointee_struct_meta->record :
664 								 NULL);
665 				migrate_enable();
666 			} else {
667 				field->kptr.dtor(xchgd_field);
668 			}
669 			break;
670 		case BPF_LIST_HEAD:
671 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
672 				continue;
673 			bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
674 			break;
675 		case BPF_RB_ROOT:
676 			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
677 				continue;
678 			bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
679 			break;
680 		case BPF_LIST_NODE:
681 		case BPF_RB_NODE:
682 		case BPF_REFCOUNT:
683 			break;
684 		default:
685 			WARN_ON_ONCE(1);
686 			continue;
687 		}
688 	}
689 }
690 
691 /* called from workqueue */
692 static void bpf_map_free_deferred(struct work_struct *work)
693 {
694 	struct bpf_map *map = container_of(work, struct bpf_map, work);
695 	struct btf_record *rec = map->record;
696 
697 	security_bpf_map_free(map);
698 	bpf_map_release_memcg(map);
699 	/* implementation dependent freeing */
700 	map->ops->map_free(map);
701 	/* Delay freeing of btf_record for maps, as map_free
702 	 * callback usually needs access to them. It is better to do it here
703 	 * than require each callback to do the free itself manually.
704 	 *
705 	 * Note that the btf_record stashed in map->inner_map_meta->record was
706 	 * already freed using the map_free callback for map in map case which
707 	 * eventually calls bpf_map_free_meta, since inner_map_meta is only a
708 	 * template bpf_map struct used during verification.
709 	 */
710 	btf_record_free(rec);
711 }
712 
713 static void bpf_map_put_uref(struct bpf_map *map)
714 {
715 	if (atomic64_dec_and_test(&map->usercnt)) {
716 		if (map->ops->map_release_uref)
717 			map->ops->map_release_uref(map);
718 	}
719 }
720 
721 /* decrement map refcnt and schedule it for freeing via workqueue
722  * (underlying map implementation ops->map_free() might sleep)
723  */
724 void bpf_map_put(struct bpf_map *map)
725 {
726 	if (atomic64_dec_and_test(&map->refcnt)) {
727 		/* bpf_map_free_id() must be called first */
728 		bpf_map_free_id(map);
729 		btf_put(map->btf);
730 		INIT_WORK(&map->work, bpf_map_free_deferred);
731 		/* Avoid spawning kworkers, since they all might contend
732 		 * for the same mutex like slab_mutex.
733 		 */
734 		queue_work(system_unbound_wq, &map->work);
735 	}
736 }
737 EXPORT_SYMBOL_GPL(bpf_map_put);
738 
739 void bpf_map_put_with_uref(struct bpf_map *map)
740 {
741 	bpf_map_put_uref(map);
742 	bpf_map_put(map);
743 }
744 
745 static int bpf_map_release(struct inode *inode, struct file *filp)
746 {
747 	struct bpf_map *map = filp->private_data;
748 
749 	if (map->ops->map_release)
750 		map->ops->map_release(map, filp);
751 
752 	bpf_map_put_with_uref(map);
753 	return 0;
754 }
755 
756 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
757 {
758 	fmode_t mode = f.file->f_mode;
759 
760 	/* Our file permissions may have been overridden by global
761 	 * map permissions facing syscall side.
762 	 */
763 	if (READ_ONCE(map->frozen))
764 		mode &= ~FMODE_CAN_WRITE;
765 	return mode;
766 }
767 
768 #ifdef CONFIG_PROC_FS
769 /* Show the memory usage of a bpf map */
770 static u64 bpf_map_memory_usage(const struct bpf_map *map)
771 {
772 	return map->ops->map_mem_usage(map);
773 }
774 
775 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
776 {
777 	struct bpf_map *map = filp->private_data;
778 	u32 type = 0, jited = 0;
779 
780 	if (map_type_contains_progs(map)) {
781 		spin_lock(&map->owner.lock);
782 		type  = map->owner.type;
783 		jited = map->owner.jited;
784 		spin_unlock(&map->owner.lock);
785 	}
786 
787 	seq_printf(m,
788 		   "map_type:\t%u\n"
789 		   "key_size:\t%u\n"
790 		   "value_size:\t%u\n"
791 		   "max_entries:\t%u\n"
792 		   "map_flags:\t%#x\n"
793 		   "map_extra:\t%#llx\n"
794 		   "memlock:\t%llu\n"
795 		   "map_id:\t%u\n"
796 		   "frozen:\t%u\n",
797 		   map->map_type,
798 		   map->key_size,
799 		   map->value_size,
800 		   map->max_entries,
801 		   map->map_flags,
802 		   (unsigned long long)map->map_extra,
803 		   bpf_map_memory_usage(map),
804 		   map->id,
805 		   READ_ONCE(map->frozen));
806 	if (type) {
807 		seq_printf(m, "owner_prog_type:\t%u\n", type);
808 		seq_printf(m, "owner_jited:\t%u\n", jited);
809 	}
810 }
811 #endif
812 
813 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
814 			      loff_t *ppos)
815 {
816 	/* We need this handler such that alloc_file() enables
817 	 * f_mode with FMODE_CAN_READ.
818 	 */
819 	return -EINVAL;
820 }
821 
822 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
823 			       size_t siz, loff_t *ppos)
824 {
825 	/* We need this handler such that alloc_file() enables
826 	 * f_mode with FMODE_CAN_WRITE.
827 	 */
828 	return -EINVAL;
829 }
830 
831 /* called for any extra memory-mapped regions (except initial) */
832 static void bpf_map_mmap_open(struct vm_area_struct *vma)
833 {
834 	struct bpf_map *map = vma->vm_file->private_data;
835 
836 	if (vma->vm_flags & VM_MAYWRITE)
837 		bpf_map_write_active_inc(map);
838 }
839 
840 /* called for all unmapped memory region (including initial) */
841 static void bpf_map_mmap_close(struct vm_area_struct *vma)
842 {
843 	struct bpf_map *map = vma->vm_file->private_data;
844 
845 	if (vma->vm_flags & VM_MAYWRITE)
846 		bpf_map_write_active_dec(map);
847 }
848 
849 static const struct vm_operations_struct bpf_map_default_vmops = {
850 	.open		= bpf_map_mmap_open,
851 	.close		= bpf_map_mmap_close,
852 };
853 
854 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
855 {
856 	struct bpf_map *map = filp->private_data;
857 	int err;
858 
859 	if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
860 		return -ENOTSUPP;
861 
862 	if (!(vma->vm_flags & VM_SHARED))
863 		return -EINVAL;
864 
865 	mutex_lock(&map->freeze_mutex);
866 
867 	if (vma->vm_flags & VM_WRITE) {
868 		if (map->frozen) {
869 			err = -EPERM;
870 			goto out;
871 		}
872 		/* map is meant to be read-only, so do not allow mapping as
873 		 * writable, because it's possible to leak a writable page
874 		 * reference and allows user-space to still modify it after
875 		 * freezing, while verifier will assume contents do not change
876 		 */
877 		if (map->map_flags & BPF_F_RDONLY_PROG) {
878 			err = -EACCES;
879 			goto out;
880 		}
881 	}
882 
883 	/* set default open/close callbacks */
884 	vma->vm_ops = &bpf_map_default_vmops;
885 	vma->vm_private_data = map;
886 	vm_flags_clear(vma, VM_MAYEXEC);
887 	if (!(vma->vm_flags & VM_WRITE))
888 		/* disallow re-mapping with PROT_WRITE */
889 		vm_flags_clear(vma, VM_MAYWRITE);
890 
891 	err = map->ops->map_mmap(map, vma);
892 	if (err)
893 		goto out;
894 
895 	if (vma->vm_flags & VM_MAYWRITE)
896 		bpf_map_write_active_inc(map);
897 out:
898 	mutex_unlock(&map->freeze_mutex);
899 	return err;
900 }
901 
902 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
903 {
904 	struct bpf_map *map = filp->private_data;
905 
906 	if (map->ops->map_poll)
907 		return map->ops->map_poll(map, filp, pts);
908 
909 	return EPOLLERR;
910 }
911 
912 const struct file_operations bpf_map_fops = {
913 #ifdef CONFIG_PROC_FS
914 	.show_fdinfo	= bpf_map_show_fdinfo,
915 #endif
916 	.release	= bpf_map_release,
917 	.read		= bpf_dummy_read,
918 	.write		= bpf_dummy_write,
919 	.mmap		= bpf_map_mmap,
920 	.poll		= bpf_map_poll,
921 };
922 
923 int bpf_map_new_fd(struct bpf_map *map, int flags)
924 {
925 	int ret;
926 
927 	ret = security_bpf_map(map, OPEN_FMODE(flags));
928 	if (ret < 0)
929 		return ret;
930 
931 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
932 				flags | O_CLOEXEC);
933 }
934 
935 int bpf_get_file_flag(int flags)
936 {
937 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
938 		return -EINVAL;
939 	if (flags & BPF_F_RDONLY)
940 		return O_RDONLY;
941 	if (flags & BPF_F_WRONLY)
942 		return O_WRONLY;
943 	return O_RDWR;
944 }
945 
946 /* helper macro to check that unused fields 'union bpf_attr' are zero */
947 #define CHECK_ATTR(CMD) \
948 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
949 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
950 		   sizeof(*attr) - \
951 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
952 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
953 
954 /* dst and src must have at least "size" number of bytes.
955  * Return strlen on success and < 0 on error.
956  */
957 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
958 {
959 	const char *end = src + size;
960 	const char *orig_src = src;
961 
962 	memset(dst, 0, size);
963 	/* Copy all isalnum(), '_' and '.' chars. */
964 	while (src < end && *src) {
965 		if (!isalnum(*src) &&
966 		    *src != '_' && *src != '.')
967 			return -EINVAL;
968 		*dst++ = *src++;
969 	}
970 
971 	/* No '\0' found in "size" number of bytes */
972 	if (src == end)
973 		return -EINVAL;
974 
975 	return src - orig_src;
976 }
977 
978 int map_check_no_btf(const struct bpf_map *map,
979 		     const struct btf *btf,
980 		     const struct btf_type *key_type,
981 		     const struct btf_type *value_type)
982 {
983 	return -ENOTSUPP;
984 }
985 
986 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
987 			 u32 btf_key_id, u32 btf_value_id)
988 {
989 	const struct btf_type *key_type, *value_type;
990 	u32 key_size, value_size;
991 	int ret = 0;
992 
993 	/* Some maps allow key to be unspecified. */
994 	if (btf_key_id) {
995 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
996 		if (!key_type || key_size != map->key_size)
997 			return -EINVAL;
998 	} else {
999 		key_type = btf_type_by_id(btf, 0);
1000 		if (!map->ops->map_check_btf)
1001 			return -EINVAL;
1002 	}
1003 
1004 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
1005 	if (!value_type || value_size != map->value_size)
1006 		return -EINVAL;
1007 
1008 	map->record = btf_parse_fields(btf, value_type,
1009 				       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
1010 				       BPF_RB_ROOT | BPF_REFCOUNT,
1011 				       map->value_size);
1012 	if (!IS_ERR_OR_NULL(map->record)) {
1013 		int i;
1014 
1015 		if (!bpf_capable()) {
1016 			ret = -EPERM;
1017 			goto free_map_tab;
1018 		}
1019 		if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) {
1020 			ret = -EACCES;
1021 			goto free_map_tab;
1022 		}
1023 		for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) {
1024 			switch (map->record->field_mask & (1 << i)) {
1025 			case 0:
1026 				continue;
1027 			case BPF_SPIN_LOCK:
1028 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1029 				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1030 				    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
1031 				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1032 				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1033 				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1034 				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1035 					ret = -EOPNOTSUPP;
1036 					goto free_map_tab;
1037 				}
1038 				break;
1039 			case BPF_TIMER:
1040 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1041 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1042 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1043 					ret = -EOPNOTSUPP;
1044 					goto free_map_tab;
1045 				}
1046 				break;
1047 			case BPF_KPTR_UNREF:
1048 			case BPF_KPTR_REF:
1049 			case BPF_REFCOUNT:
1050 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1051 				    map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
1052 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1053 				    map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH &&
1054 				    map->map_type != BPF_MAP_TYPE_ARRAY &&
1055 				    map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
1056 				    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
1057 				    map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
1058 				    map->map_type != BPF_MAP_TYPE_TASK_STORAGE &&
1059 				    map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) {
1060 					ret = -EOPNOTSUPP;
1061 					goto free_map_tab;
1062 				}
1063 				break;
1064 			case BPF_LIST_HEAD:
1065 			case BPF_RB_ROOT:
1066 				if (map->map_type != BPF_MAP_TYPE_HASH &&
1067 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
1068 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
1069 					ret = -EOPNOTSUPP;
1070 					goto free_map_tab;
1071 				}
1072 				break;
1073 			default:
1074 				/* Fail if map_type checks are missing for a field type */
1075 				ret = -EOPNOTSUPP;
1076 				goto free_map_tab;
1077 			}
1078 		}
1079 	}
1080 
1081 	ret = btf_check_and_fixup_fields(btf, map->record);
1082 	if (ret < 0)
1083 		goto free_map_tab;
1084 
1085 	if (map->ops->map_check_btf) {
1086 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
1087 		if (ret < 0)
1088 			goto free_map_tab;
1089 	}
1090 
1091 	return ret;
1092 free_map_tab:
1093 	bpf_map_free_record(map);
1094 	return ret;
1095 }
1096 
1097 #define BPF_MAP_CREATE_LAST_FIELD map_extra
1098 /* called via syscall */
1099 static int map_create(union bpf_attr *attr)
1100 {
1101 	const struct bpf_map_ops *ops;
1102 	int numa_node = bpf_map_attr_numa_node(attr);
1103 	u32 map_type = attr->map_type;
1104 	struct bpf_map *map;
1105 	int f_flags;
1106 	int err;
1107 
1108 	err = CHECK_ATTR(BPF_MAP_CREATE);
1109 	if (err)
1110 		return -EINVAL;
1111 
1112 	if (attr->btf_vmlinux_value_type_id) {
1113 		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
1114 		    attr->btf_key_type_id || attr->btf_value_type_id)
1115 			return -EINVAL;
1116 	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
1117 		return -EINVAL;
1118 	}
1119 
1120 	if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
1121 	    attr->map_extra != 0)
1122 		return -EINVAL;
1123 
1124 	f_flags = bpf_get_file_flag(attr->map_flags);
1125 	if (f_flags < 0)
1126 		return f_flags;
1127 
1128 	if (numa_node != NUMA_NO_NODE &&
1129 	    ((unsigned int)numa_node >= nr_node_ids ||
1130 	     !node_online(numa_node)))
1131 		return -EINVAL;
1132 
1133 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
1134 	map_type = attr->map_type;
1135 	if (map_type >= ARRAY_SIZE(bpf_map_types))
1136 		return -EINVAL;
1137 	map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
1138 	ops = bpf_map_types[map_type];
1139 	if (!ops)
1140 		return -EINVAL;
1141 
1142 	if (ops->map_alloc_check) {
1143 		err = ops->map_alloc_check(attr);
1144 		if (err)
1145 			return err;
1146 	}
1147 	if (attr->map_ifindex)
1148 		ops = &bpf_map_offload_ops;
1149 	if (!ops->map_mem_usage)
1150 		return -EINVAL;
1151 
1152 	/* Intent here is for unprivileged_bpf_disabled to block BPF map
1153 	 * creation for unprivileged users; other actions depend
1154 	 * on fd availability and access to bpffs, so are dependent on
1155 	 * object creation success. Even with unprivileged BPF disabled,
1156 	 * capability checks are still carried out.
1157 	 */
1158 	if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
1159 		return -EPERM;
1160 
1161 	/* check privileged map type permissions */
1162 	switch (map_type) {
1163 	case BPF_MAP_TYPE_ARRAY:
1164 	case BPF_MAP_TYPE_PERCPU_ARRAY:
1165 	case BPF_MAP_TYPE_PROG_ARRAY:
1166 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1167 	case BPF_MAP_TYPE_CGROUP_ARRAY:
1168 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
1169 	case BPF_MAP_TYPE_HASH:
1170 	case BPF_MAP_TYPE_PERCPU_HASH:
1171 	case BPF_MAP_TYPE_HASH_OF_MAPS:
1172 	case BPF_MAP_TYPE_RINGBUF:
1173 	case BPF_MAP_TYPE_USER_RINGBUF:
1174 	case BPF_MAP_TYPE_CGROUP_STORAGE:
1175 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
1176 		/* unprivileged */
1177 		break;
1178 	case BPF_MAP_TYPE_SK_STORAGE:
1179 	case BPF_MAP_TYPE_INODE_STORAGE:
1180 	case BPF_MAP_TYPE_TASK_STORAGE:
1181 	case BPF_MAP_TYPE_CGRP_STORAGE:
1182 	case BPF_MAP_TYPE_BLOOM_FILTER:
1183 	case BPF_MAP_TYPE_LPM_TRIE:
1184 	case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
1185 	case BPF_MAP_TYPE_STACK_TRACE:
1186 	case BPF_MAP_TYPE_QUEUE:
1187 	case BPF_MAP_TYPE_STACK:
1188 	case BPF_MAP_TYPE_LRU_HASH:
1189 	case BPF_MAP_TYPE_LRU_PERCPU_HASH:
1190 	case BPF_MAP_TYPE_STRUCT_OPS:
1191 	case BPF_MAP_TYPE_CPUMAP:
1192 		if (!bpf_capable())
1193 			return -EPERM;
1194 		break;
1195 	case BPF_MAP_TYPE_SOCKMAP:
1196 	case BPF_MAP_TYPE_SOCKHASH:
1197 	case BPF_MAP_TYPE_DEVMAP:
1198 	case BPF_MAP_TYPE_DEVMAP_HASH:
1199 	case BPF_MAP_TYPE_XSKMAP:
1200 		if (!capable(CAP_NET_ADMIN))
1201 			return -EPERM;
1202 		break;
1203 	default:
1204 		WARN(1, "unsupported map type %d", map_type);
1205 		return -EPERM;
1206 	}
1207 
1208 	map = ops->map_alloc(attr);
1209 	if (IS_ERR(map))
1210 		return PTR_ERR(map);
1211 	map->ops = ops;
1212 	map->map_type = map_type;
1213 
1214 	err = bpf_obj_name_cpy(map->name, attr->map_name,
1215 			       sizeof(attr->map_name));
1216 	if (err < 0)
1217 		goto free_map;
1218 
1219 	atomic64_set(&map->refcnt, 1);
1220 	atomic64_set(&map->usercnt, 1);
1221 	mutex_init(&map->freeze_mutex);
1222 	spin_lock_init(&map->owner.lock);
1223 
1224 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
1225 	    /* Even the map's value is a kernel's struct,
1226 	     * the bpf_prog.o must have BTF to begin with
1227 	     * to figure out the corresponding kernel's
1228 	     * counter part.  Thus, attr->btf_fd has
1229 	     * to be valid also.
1230 	     */
1231 	    attr->btf_vmlinux_value_type_id) {
1232 		struct btf *btf;
1233 
1234 		btf = btf_get_by_fd(attr->btf_fd);
1235 		if (IS_ERR(btf)) {
1236 			err = PTR_ERR(btf);
1237 			goto free_map;
1238 		}
1239 		if (btf_is_kernel(btf)) {
1240 			btf_put(btf);
1241 			err = -EACCES;
1242 			goto free_map;
1243 		}
1244 		map->btf = btf;
1245 
1246 		if (attr->btf_value_type_id) {
1247 			err = map_check_btf(map, btf, attr->btf_key_type_id,
1248 					    attr->btf_value_type_id);
1249 			if (err)
1250 				goto free_map;
1251 		}
1252 
1253 		map->btf_key_type_id = attr->btf_key_type_id;
1254 		map->btf_value_type_id = attr->btf_value_type_id;
1255 		map->btf_vmlinux_value_type_id =
1256 			attr->btf_vmlinux_value_type_id;
1257 	}
1258 
1259 	err = security_bpf_map_alloc(map);
1260 	if (err)
1261 		goto free_map;
1262 
1263 	err = bpf_map_alloc_id(map);
1264 	if (err)
1265 		goto free_map_sec;
1266 
1267 	bpf_map_save_memcg(map);
1268 
1269 	err = bpf_map_new_fd(map, f_flags);
1270 	if (err < 0) {
1271 		/* failed to allocate fd.
1272 		 * bpf_map_put_with_uref() is needed because the above
1273 		 * bpf_map_alloc_id() has published the map
1274 		 * to the userspace and the userspace may
1275 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
1276 		 */
1277 		bpf_map_put_with_uref(map);
1278 		return err;
1279 	}
1280 
1281 	return err;
1282 
1283 free_map_sec:
1284 	security_bpf_map_free(map);
1285 free_map:
1286 	btf_put(map->btf);
1287 	map->ops->map_free(map);
1288 	return err;
1289 }
1290 
1291 /* if error is returned, fd is released.
1292  * On success caller should complete fd access with matching fdput()
1293  */
1294 struct bpf_map *__bpf_map_get(struct fd f)
1295 {
1296 	if (!f.file)
1297 		return ERR_PTR(-EBADF);
1298 	if (f.file->f_op != &bpf_map_fops) {
1299 		fdput(f);
1300 		return ERR_PTR(-EINVAL);
1301 	}
1302 
1303 	return f.file->private_data;
1304 }
1305 
1306 void bpf_map_inc(struct bpf_map *map)
1307 {
1308 	atomic64_inc(&map->refcnt);
1309 }
1310 EXPORT_SYMBOL_GPL(bpf_map_inc);
1311 
1312 void bpf_map_inc_with_uref(struct bpf_map *map)
1313 {
1314 	atomic64_inc(&map->refcnt);
1315 	atomic64_inc(&map->usercnt);
1316 }
1317 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
1318 
1319 struct bpf_map *bpf_map_get(u32 ufd)
1320 {
1321 	struct fd f = fdget(ufd);
1322 	struct bpf_map *map;
1323 
1324 	map = __bpf_map_get(f);
1325 	if (IS_ERR(map))
1326 		return map;
1327 
1328 	bpf_map_inc(map);
1329 	fdput(f);
1330 
1331 	return map;
1332 }
1333 EXPORT_SYMBOL(bpf_map_get);
1334 
1335 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
1336 {
1337 	struct fd f = fdget(ufd);
1338 	struct bpf_map *map;
1339 
1340 	map = __bpf_map_get(f);
1341 	if (IS_ERR(map))
1342 		return map;
1343 
1344 	bpf_map_inc_with_uref(map);
1345 	fdput(f);
1346 
1347 	return map;
1348 }
1349 
1350 /* map_idr_lock should have been held or the map should have been
1351  * protected by rcu read lock.
1352  */
1353 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
1354 {
1355 	int refold;
1356 
1357 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
1358 	if (!refold)
1359 		return ERR_PTR(-ENOENT);
1360 	if (uref)
1361 		atomic64_inc(&map->usercnt);
1362 
1363 	return map;
1364 }
1365 
1366 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
1367 {
1368 	spin_lock_bh(&map_idr_lock);
1369 	map = __bpf_map_inc_not_zero(map, false);
1370 	spin_unlock_bh(&map_idr_lock);
1371 
1372 	return map;
1373 }
1374 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
1375 
1376 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
1377 {
1378 	return -ENOTSUPP;
1379 }
1380 
1381 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1382 {
1383 	if (key_size)
1384 		return vmemdup_user(ukey, key_size);
1385 
1386 	if (ukey)
1387 		return ERR_PTR(-EINVAL);
1388 
1389 	return NULL;
1390 }
1391 
1392 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
1393 {
1394 	if (key_size)
1395 		return kvmemdup_bpfptr(ukey, key_size);
1396 
1397 	if (!bpfptr_is_null(ukey))
1398 		return ERR_PTR(-EINVAL);
1399 
1400 	return NULL;
1401 }
1402 
1403 /* last field in 'union bpf_attr' used by this command */
1404 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1405 
1406 static int map_lookup_elem(union bpf_attr *attr)
1407 {
1408 	void __user *ukey = u64_to_user_ptr(attr->key);
1409 	void __user *uvalue = u64_to_user_ptr(attr->value);
1410 	int ufd = attr->map_fd;
1411 	struct bpf_map *map;
1412 	void *key, *value;
1413 	u32 value_size;
1414 	struct fd f;
1415 	int err;
1416 
1417 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1418 		return -EINVAL;
1419 
1420 	if (attr->flags & ~BPF_F_LOCK)
1421 		return -EINVAL;
1422 
1423 	f = fdget(ufd);
1424 	map = __bpf_map_get(f);
1425 	if (IS_ERR(map))
1426 		return PTR_ERR(map);
1427 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1428 		err = -EPERM;
1429 		goto err_put;
1430 	}
1431 
1432 	if ((attr->flags & BPF_F_LOCK) &&
1433 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1434 		err = -EINVAL;
1435 		goto err_put;
1436 	}
1437 
1438 	key = __bpf_copy_key(ukey, map->key_size);
1439 	if (IS_ERR(key)) {
1440 		err = PTR_ERR(key);
1441 		goto err_put;
1442 	}
1443 
1444 	value_size = bpf_map_value_size(map);
1445 
1446 	err = -ENOMEM;
1447 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1448 	if (!value)
1449 		goto free_key;
1450 
1451 	if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) {
1452 		if (copy_from_user(value, uvalue, value_size))
1453 			err = -EFAULT;
1454 		else
1455 			err = bpf_map_copy_value(map, key, value, attr->flags);
1456 		goto free_value;
1457 	}
1458 
1459 	err = bpf_map_copy_value(map, key, value, attr->flags);
1460 	if (err)
1461 		goto free_value;
1462 
1463 	err = -EFAULT;
1464 	if (copy_to_user(uvalue, value, value_size) != 0)
1465 		goto free_value;
1466 
1467 	err = 0;
1468 
1469 free_value:
1470 	kvfree(value);
1471 free_key:
1472 	kvfree(key);
1473 err_put:
1474 	fdput(f);
1475 	return err;
1476 }
1477 
1478 
1479 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1480 
1481 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
1482 {
1483 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1484 	bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
1485 	int ufd = attr->map_fd;
1486 	struct bpf_map *map;
1487 	void *key, *value;
1488 	u32 value_size;
1489 	struct fd f;
1490 	int err;
1491 
1492 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1493 		return -EINVAL;
1494 
1495 	f = fdget(ufd);
1496 	map = __bpf_map_get(f);
1497 	if (IS_ERR(map))
1498 		return PTR_ERR(map);
1499 	bpf_map_write_active_inc(map);
1500 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1501 		err = -EPERM;
1502 		goto err_put;
1503 	}
1504 
1505 	if ((attr->flags & BPF_F_LOCK) &&
1506 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1507 		err = -EINVAL;
1508 		goto err_put;
1509 	}
1510 
1511 	key = ___bpf_copy_key(ukey, map->key_size);
1512 	if (IS_ERR(key)) {
1513 		err = PTR_ERR(key);
1514 		goto err_put;
1515 	}
1516 
1517 	value_size = bpf_map_value_size(map);
1518 	value = kvmemdup_bpfptr(uvalue, value_size);
1519 	if (IS_ERR(value)) {
1520 		err = PTR_ERR(value);
1521 		goto free_key;
1522 	}
1523 
1524 	err = bpf_map_update_value(map, f.file, key, value, attr->flags);
1525 
1526 	kvfree(value);
1527 free_key:
1528 	kvfree(key);
1529 err_put:
1530 	bpf_map_write_active_dec(map);
1531 	fdput(f);
1532 	return err;
1533 }
1534 
1535 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1536 
1537 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
1538 {
1539 	bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
1540 	int ufd = attr->map_fd;
1541 	struct bpf_map *map;
1542 	struct fd f;
1543 	void *key;
1544 	int err;
1545 
1546 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1547 		return -EINVAL;
1548 
1549 	f = fdget(ufd);
1550 	map = __bpf_map_get(f);
1551 	if (IS_ERR(map))
1552 		return PTR_ERR(map);
1553 	bpf_map_write_active_inc(map);
1554 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1555 		err = -EPERM;
1556 		goto err_put;
1557 	}
1558 
1559 	key = ___bpf_copy_key(ukey, map->key_size);
1560 	if (IS_ERR(key)) {
1561 		err = PTR_ERR(key);
1562 		goto err_put;
1563 	}
1564 
1565 	if (bpf_map_is_offloaded(map)) {
1566 		err = bpf_map_offload_delete_elem(map, key);
1567 		goto out;
1568 	} else if (IS_FD_PROG_ARRAY(map) ||
1569 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1570 		/* These maps require sleepable context */
1571 		err = map->ops->map_delete_elem(map, key);
1572 		goto out;
1573 	}
1574 
1575 	bpf_disable_instrumentation();
1576 	rcu_read_lock();
1577 	err = map->ops->map_delete_elem(map, key);
1578 	rcu_read_unlock();
1579 	bpf_enable_instrumentation();
1580 	maybe_wait_bpf_programs(map);
1581 out:
1582 	kvfree(key);
1583 err_put:
1584 	bpf_map_write_active_dec(map);
1585 	fdput(f);
1586 	return err;
1587 }
1588 
1589 /* last field in 'union bpf_attr' used by this command */
1590 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1591 
1592 static int map_get_next_key(union bpf_attr *attr)
1593 {
1594 	void __user *ukey = u64_to_user_ptr(attr->key);
1595 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1596 	int ufd = attr->map_fd;
1597 	struct bpf_map *map;
1598 	void *key, *next_key;
1599 	struct fd f;
1600 	int err;
1601 
1602 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1603 		return -EINVAL;
1604 
1605 	f = fdget(ufd);
1606 	map = __bpf_map_get(f);
1607 	if (IS_ERR(map))
1608 		return PTR_ERR(map);
1609 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1610 		err = -EPERM;
1611 		goto err_put;
1612 	}
1613 
1614 	if (ukey) {
1615 		key = __bpf_copy_key(ukey, map->key_size);
1616 		if (IS_ERR(key)) {
1617 			err = PTR_ERR(key);
1618 			goto err_put;
1619 		}
1620 	} else {
1621 		key = NULL;
1622 	}
1623 
1624 	err = -ENOMEM;
1625 	next_key = kvmalloc(map->key_size, GFP_USER);
1626 	if (!next_key)
1627 		goto free_key;
1628 
1629 	if (bpf_map_is_offloaded(map)) {
1630 		err = bpf_map_offload_get_next_key(map, key, next_key);
1631 		goto out;
1632 	}
1633 
1634 	rcu_read_lock();
1635 	err = map->ops->map_get_next_key(map, key, next_key);
1636 	rcu_read_unlock();
1637 out:
1638 	if (err)
1639 		goto free_next_key;
1640 
1641 	err = -EFAULT;
1642 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1643 		goto free_next_key;
1644 
1645 	err = 0;
1646 
1647 free_next_key:
1648 	kvfree(next_key);
1649 free_key:
1650 	kvfree(key);
1651 err_put:
1652 	fdput(f);
1653 	return err;
1654 }
1655 
1656 int generic_map_delete_batch(struct bpf_map *map,
1657 			     const union bpf_attr *attr,
1658 			     union bpf_attr __user *uattr)
1659 {
1660 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1661 	u32 cp, max_count;
1662 	int err = 0;
1663 	void *key;
1664 
1665 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1666 		return -EINVAL;
1667 
1668 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1669 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1670 		return -EINVAL;
1671 	}
1672 
1673 	max_count = attr->batch.count;
1674 	if (!max_count)
1675 		return 0;
1676 
1677 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1678 	if (!key)
1679 		return -ENOMEM;
1680 
1681 	for (cp = 0; cp < max_count; cp++) {
1682 		err = -EFAULT;
1683 		if (copy_from_user(key, keys + cp * map->key_size,
1684 				   map->key_size))
1685 			break;
1686 
1687 		if (bpf_map_is_offloaded(map)) {
1688 			err = bpf_map_offload_delete_elem(map, key);
1689 			break;
1690 		}
1691 
1692 		bpf_disable_instrumentation();
1693 		rcu_read_lock();
1694 		err = map->ops->map_delete_elem(map, key);
1695 		rcu_read_unlock();
1696 		bpf_enable_instrumentation();
1697 		if (err)
1698 			break;
1699 		cond_resched();
1700 	}
1701 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1702 		err = -EFAULT;
1703 
1704 	kvfree(key);
1705 
1706 	maybe_wait_bpf_programs(map);
1707 	return err;
1708 }
1709 
1710 int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
1711 			     const union bpf_attr *attr,
1712 			     union bpf_attr __user *uattr)
1713 {
1714 	void __user *values = u64_to_user_ptr(attr->batch.values);
1715 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1716 	u32 value_size, cp, max_count;
1717 	void *key, *value;
1718 	int err = 0;
1719 
1720 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1721 		return -EINVAL;
1722 
1723 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1724 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1725 		return -EINVAL;
1726 	}
1727 
1728 	value_size = bpf_map_value_size(map);
1729 
1730 	max_count = attr->batch.count;
1731 	if (!max_count)
1732 		return 0;
1733 
1734 	key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1735 	if (!key)
1736 		return -ENOMEM;
1737 
1738 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1739 	if (!value) {
1740 		kvfree(key);
1741 		return -ENOMEM;
1742 	}
1743 
1744 	for (cp = 0; cp < max_count; cp++) {
1745 		err = -EFAULT;
1746 		if (copy_from_user(key, keys + cp * map->key_size,
1747 		    map->key_size) ||
1748 		    copy_from_user(value, values + cp * value_size, value_size))
1749 			break;
1750 
1751 		err = bpf_map_update_value(map, map_file, key, value,
1752 					   attr->batch.elem_flags);
1753 
1754 		if (err)
1755 			break;
1756 		cond_resched();
1757 	}
1758 
1759 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1760 		err = -EFAULT;
1761 
1762 	kvfree(value);
1763 	kvfree(key);
1764 	return err;
1765 }
1766 
1767 #define MAP_LOOKUP_RETRIES 3
1768 
1769 int generic_map_lookup_batch(struct bpf_map *map,
1770 				    const union bpf_attr *attr,
1771 				    union bpf_attr __user *uattr)
1772 {
1773 	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1774 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1775 	void __user *values = u64_to_user_ptr(attr->batch.values);
1776 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1777 	void *buf, *buf_prevkey, *prev_key, *key, *value;
1778 	int err, retry = MAP_LOOKUP_RETRIES;
1779 	u32 value_size, cp, max_count;
1780 
1781 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1782 		return -EINVAL;
1783 
1784 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1785 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK))
1786 		return -EINVAL;
1787 
1788 	value_size = bpf_map_value_size(map);
1789 
1790 	max_count = attr->batch.count;
1791 	if (!max_count)
1792 		return 0;
1793 
1794 	if (put_user(0, &uattr->batch.count))
1795 		return -EFAULT;
1796 
1797 	buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1798 	if (!buf_prevkey)
1799 		return -ENOMEM;
1800 
1801 	buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1802 	if (!buf) {
1803 		kvfree(buf_prevkey);
1804 		return -ENOMEM;
1805 	}
1806 
1807 	err = -EFAULT;
1808 	prev_key = NULL;
1809 	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1810 		goto free_buf;
1811 	key = buf;
1812 	value = key + map->key_size;
1813 	if (ubatch)
1814 		prev_key = buf_prevkey;
1815 
1816 	for (cp = 0; cp < max_count;) {
1817 		rcu_read_lock();
1818 		err = map->ops->map_get_next_key(map, prev_key, key);
1819 		rcu_read_unlock();
1820 		if (err)
1821 			break;
1822 		err = bpf_map_copy_value(map, key, value,
1823 					 attr->batch.elem_flags);
1824 
1825 		if (err == -ENOENT) {
1826 			if (retry) {
1827 				retry--;
1828 				continue;
1829 			}
1830 			err = -EINTR;
1831 			break;
1832 		}
1833 
1834 		if (err)
1835 			goto free_buf;
1836 
1837 		if (copy_to_user(keys + cp * map->key_size, key,
1838 				 map->key_size)) {
1839 			err = -EFAULT;
1840 			goto free_buf;
1841 		}
1842 		if (copy_to_user(values + cp * value_size, value, value_size)) {
1843 			err = -EFAULT;
1844 			goto free_buf;
1845 		}
1846 
1847 		if (!prev_key)
1848 			prev_key = buf_prevkey;
1849 
1850 		swap(prev_key, key);
1851 		retry = MAP_LOOKUP_RETRIES;
1852 		cp++;
1853 		cond_resched();
1854 	}
1855 
1856 	if (err == -EFAULT)
1857 		goto free_buf;
1858 
1859 	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1860 		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1861 		err = -EFAULT;
1862 
1863 free_buf:
1864 	kvfree(buf_prevkey);
1865 	kvfree(buf);
1866 	return err;
1867 }
1868 
1869 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags
1870 
1871 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1872 {
1873 	void __user *ukey = u64_to_user_ptr(attr->key);
1874 	void __user *uvalue = u64_to_user_ptr(attr->value);
1875 	int ufd = attr->map_fd;
1876 	struct bpf_map *map;
1877 	void *key, *value;
1878 	u32 value_size;
1879 	struct fd f;
1880 	int err;
1881 
1882 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1883 		return -EINVAL;
1884 
1885 	if (attr->flags & ~BPF_F_LOCK)
1886 		return -EINVAL;
1887 
1888 	f = fdget(ufd);
1889 	map = __bpf_map_get(f);
1890 	if (IS_ERR(map))
1891 		return PTR_ERR(map);
1892 	bpf_map_write_active_inc(map);
1893 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1894 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1895 		err = -EPERM;
1896 		goto err_put;
1897 	}
1898 
1899 	if (attr->flags &&
1900 	    (map->map_type == BPF_MAP_TYPE_QUEUE ||
1901 	     map->map_type == BPF_MAP_TYPE_STACK)) {
1902 		err = -EINVAL;
1903 		goto err_put;
1904 	}
1905 
1906 	if ((attr->flags & BPF_F_LOCK) &&
1907 	    !btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
1908 		err = -EINVAL;
1909 		goto err_put;
1910 	}
1911 
1912 	key = __bpf_copy_key(ukey, map->key_size);
1913 	if (IS_ERR(key)) {
1914 		err = PTR_ERR(key);
1915 		goto err_put;
1916 	}
1917 
1918 	value_size = bpf_map_value_size(map);
1919 
1920 	err = -ENOMEM;
1921 	value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN);
1922 	if (!value)
1923 		goto free_key;
1924 
1925 	err = -ENOTSUPP;
1926 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1927 	    map->map_type == BPF_MAP_TYPE_STACK) {
1928 		err = map->ops->map_pop_elem(map, value);
1929 	} else if (map->map_type == BPF_MAP_TYPE_HASH ||
1930 		   map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1931 		   map->map_type == BPF_MAP_TYPE_LRU_HASH ||
1932 		   map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
1933 		if (!bpf_map_is_offloaded(map)) {
1934 			bpf_disable_instrumentation();
1935 			rcu_read_lock();
1936 			err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
1937 			rcu_read_unlock();
1938 			bpf_enable_instrumentation();
1939 		}
1940 	}
1941 
1942 	if (err)
1943 		goto free_value;
1944 
1945 	if (copy_to_user(uvalue, value, value_size) != 0) {
1946 		err = -EFAULT;
1947 		goto free_value;
1948 	}
1949 
1950 	err = 0;
1951 
1952 free_value:
1953 	kvfree(value);
1954 free_key:
1955 	kvfree(key);
1956 err_put:
1957 	bpf_map_write_active_dec(map);
1958 	fdput(f);
1959 	return err;
1960 }
1961 
1962 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1963 
1964 static int map_freeze(const union bpf_attr *attr)
1965 {
1966 	int err = 0, ufd = attr->map_fd;
1967 	struct bpf_map *map;
1968 	struct fd f;
1969 
1970 	if (CHECK_ATTR(BPF_MAP_FREEZE))
1971 		return -EINVAL;
1972 
1973 	f = fdget(ufd);
1974 	map = __bpf_map_get(f);
1975 	if (IS_ERR(map))
1976 		return PTR_ERR(map);
1977 
1978 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) {
1979 		fdput(f);
1980 		return -ENOTSUPP;
1981 	}
1982 
1983 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1984 		fdput(f);
1985 		return -EPERM;
1986 	}
1987 
1988 	mutex_lock(&map->freeze_mutex);
1989 	if (bpf_map_write_active(map)) {
1990 		err = -EBUSY;
1991 		goto err_put;
1992 	}
1993 	if (READ_ONCE(map->frozen)) {
1994 		err = -EBUSY;
1995 		goto err_put;
1996 	}
1997 
1998 	WRITE_ONCE(map->frozen, true);
1999 err_put:
2000 	mutex_unlock(&map->freeze_mutex);
2001 	fdput(f);
2002 	return err;
2003 }
2004 
2005 static const struct bpf_prog_ops * const bpf_prog_types[] = {
2006 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2007 	[_id] = & _name ## _prog_ops,
2008 #define BPF_MAP_TYPE(_id, _ops)
2009 #define BPF_LINK_TYPE(_id, _name)
2010 #include <linux/bpf_types.h>
2011 #undef BPF_PROG_TYPE
2012 #undef BPF_MAP_TYPE
2013 #undef BPF_LINK_TYPE
2014 };
2015 
2016 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
2017 {
2018 	const struct bpf_prog_ops *ops;
2019 
2020 	if (type >= ARRAY_SIZE(bpf_prog_types))
2021 		return -EINVAL;
2022 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
2023 	ops = bpf_prog_types[type];
2024 	if (!ops)
2025 		return -EINVAL;
2026 
2027 	if (!bpf_prog_is_offloaded(prog->aux))
2028 		prog->aux->ops = ops;
2029 	else
2030 		prog->aux->ops = &bpf_offload_prog_ops;
2031 	prog->type = type;
2032 	return 0;
2033 }
2034 
2035 enum bpf_audit {
2036 	BPF_AUDIT_LOAD,
2037 	BPF_AUDIT_UNLOAD,
2038 	BPF_AUDIT_MAX,
2039 };
2040 
2041 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
2042 	[BPF_AUDIT_LOAD]   = "LOAD",
2043 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
2044 };
2045 
2046 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
2047 {
2048 	struct audit_context *ctx = NULL;
2049 	struct audit_buffer *ab;
2050 
2051 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
2052 		return;
2053 	if (audit_enabled == AUDIT_OFF)
2054 		return;
2055 	if (!in_irq() && !irqs_disabled())
2056 		ctx = audit_context();
2057 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
2058 	if (unlikely(!ab))
2059 		return;
2060 	audit_log_format(ab, "prog-id=%u op=%s",
2061 			 prog->aux->id, bpf_audit_str[op]);
2062 	audit_log_end(ab);
2063 }
2064 
2065 static int bpf_prog_alloc_id(struct bpf_prog *prog)
2066 {
2067 	int id;
2068 
2069 	idr_preload(GFP_KERNEL);
2070 	spin_lock_bh(&prog_idr_lock);
2071 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
2072 	if (id > 0)
2073 		prog->aux->id = id;
2074 	spin_unlock_bh(&prog_idr_lock);
2075 	idr_preload_end();
2076 
2077 	/* id is in [1, INT_MAX) */
2078 	if (WARN_ON_ONCE(!id))
2079 		return -ENOSPC;
2080 
2081 	return id > 0 ? 0 : id;
2082 }
2083 
2084 void bpf_prog_free_id(struct bpf_prog *prog)
2085 {
2086 	unsigned long flags;
2087 
2088 	/* cBPF to eBPF migrations are currently not in the idr store.
2089 	 * Offloaded programs are removed from the store when their device
2090 	 * disappears - even if someone grabs an fd to them they are unusable,
2091 	 * simply waiting for refcnt to drop to be freed.
2092 	 */
2093 	if (!prog->aux->id)
2094 		return;
2095 
2096 	spin_lock_irqsave(&prog_idr_lock, flags);
2097 	idr_remove(&prog_idr, prog->aux->id);
2098 	prog->aux->id = 0;
2099 	spin_unlock_irqrestore(&prog_idr_lock, flags);
2100 }
2101 
2102 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
2103 {
2104 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
2105 
2106 	kvfree(aux->func_info);
2107 	kfree(aux->func_info_aux);
2108 	free_uid(aux->user);
2109 	security_bpf_prog_free(aux);
2110 	bpf_prog_free(aux->prog);
2111 }
2112 
2113 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
2114 {
2115 	bpf_prog_kallsyms_del_all(prog);
2116 	btf_put(prog->aux->btf);
2117 	module_put(prog->aux->mod);
2118 	kvfree(prog->aux->jited_linfo);
2119 	kvfree(prog->aux->linfo);
2120 	kfree(prog->aux->kfunc_tab);
2121 	if (prog->aux->attach_btf)
2122 		btf_put(prog->aux->attach_btf);
2123 
2124 	if (deferred) {
2125 		if (prog->aux->sleepable)
2126 			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
2127 		else
2128 			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
2129 	} else {
2130 		__bpf_prog_put_rcu(&prog->aux->rcu);
2131 	}
2132 }
2133 
2134 static void bpf_prog_put_deferred(struct work_struct *work)
2135 {
2136 	struct bpf_prog_aux *aux;
2137 	struct bpf_prog *prog;
2138 
2139 	aux = container_of(work, struct bpf_prog_aux, work);
2140 	prog = aux->prog;
2141 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
2142 	bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
2143 	bpf_prog_free_id(prog);
2144 	__bpf_prog_put_noref(prog, true);
2145 }
2146 
2147 static void __bpf_prog_put(struct bpf_prog *prog)
2148 {
2149 	struct bpf_prog_aux *aux = prog->aux;
2150 
2151 	if (atomic64_dec_and_test(&aux->refcnt)) {
2152 		if (in_irq() || irqs_disabled()) {
2153 			INIT_WORK(&aux->work, bpf_prog_put_deferred);
2154 			schedule_work(&aux->work);
2155 		} else {
2156 			bpf_prog_put_deferred(&aux->work);
2157 		}
2158 	}
2159 }
2160 
2161 void bpf_prog_put(struct bpf_prog *prog)
2162 {
2163 	__bpf_prog_put(prog);
2164 }
2165 EXPORT_SYMBOL_GPL(bpf_prog_put);
2166 
2167 static int bpf_prog_release(struct inode *inode, struct file *filp)
2168 {
2169 	struct bpf_prog *prog = filp->private_data;
2170 
2171 	bpf_prog_put(prog);
2172 	return 0;
2173 }
2174 
2175 struct bpf_prog_kstats {
2176 	u64 nsecs;
2177 	u64 cnt;
2178 	u64 misses;
2179 };
2180 
2181 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2182 {
2183 	struct bpf_prog_stats *stats;
2184 	unsigned int flags;
2185 
2186 	stats = this_cpu_ptr(prog->stats);
2187 	flags = u64_stats_update_begin_irqsave(&stats->syncp);
2188 	u64_stats_inc(&stats->misses);
2189 	u64_stats_update_end_irqrestore(&stats->syncp, flags);
2190 }
2191 
2192 static void bpf_prog_get_stats(const struct bpf_prog *prog,
2193 			       struct bpf_prog_kstats *stats)
2194 {
2195 	u64 nsecs = 0, cnt = 0, misses = 0;
2196 	int cpu;
2197 
2198 	for_each_possible_cpu(cpu) {
2199 		const struct bpf_prog_stats *st;
2200 		unsigned int start;
2201 		u64 tnsecs, tcnt, tmisses;
2202 
2203 		st = per_cpu_ptr(prog->stats, cpu);
2204 		do {
2205 			start = u64_stats_fetch_begin(&st->syncp);
2206 			tnsecs = u64_stats_read(&st->nsecs);
2207 			tcnt = u64_stats_read(&st->cnt);
2208 			tmisses = u64_stats_read(&st->misses);
2209 		} while (u64_stats_fetch_retry(&st->syncp, start));
2210 		nsecs += tnsecs;
2211 		cnt += tcnt;
2212 		misses += tmisses;
2213 	}
2214 	stats->nsecs = nsecs;
2215 	stats->cnt = cnt;
2216 	stats->misses = misses;
2217 }
2218 
2219 #ifdef CONFIG_PROC_FS
2220 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
2221 {
2222 	const struct bpf_prog *prog = filp->private_data;
2223 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2224 	struct bpf_prog_kstats stats;
2225 
2226 	bpf_prog_get_stats(prog, &stats);
2227 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2228 	seq_printf(m,
2229 		   "prog_type:\t%u\n"
2230 		   "prog_jited:\t%u\n"
2231 		   "prog_tag:\t%s\n"
2232 		   "memlock:\t%llu\n"
2233 		   "prog_id:\t%u\n"
2234 		   "run_time_ns:\t%llu\n"
2235 		   "run_cnt:\t%llu\n"
2236 		   "recursion_misses:\t%llu\n"
2237 		   "verified_insns:\t%u\n",
2238 		   prog->type,
2239 		   prog->jited,
2240 		   prog_tag,
2241 		   prog->pages * 1ULL << PAGE_SHIFT,
2242 		   prog->aux->id,
2243 		   stats.nsecs,
2244 		   stats.cnt,
2245 		   stats.misses,
2246 		   prog->aux->verified_insns);
2247 }
2248 #endif
2249 
2250 const struct file_operations bpf_prog_fops = {
2251 #ifdef CONFIG_PROC_FS
2252 	.show_fdinfo	= bpf_prog_show_fdinfo,
2253 #endif
2254 	.release	= bpf_prog_release,
2255 	.read		= bpf_dummy_read,
2256 	.write		= bpf_dummy_write,
2257 };
2258 
2259 int bpf_prog_new_fd(struct bpf_prog *prog)
2260 {
2261 	int ret;
2262 
2263 	ret = security_bpf_prog(prog);
2264 	if (ret < 0)
2265 		return ret;
2266 
2267 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
2268 				O_RDWR | O_CLOEXEC);
2269 }
2270 
2271 static struct bpf_prog *____bpf_prog_get(struct fd f)
2272 {
2273 	if (!f.file)
2274 		return ERR_PTR(-EBADF);
2275 	if (f.file->f_op != &bpf_prog_fops) {
2276 		fdput(f);
2277 		return ERR_PTR(-EINVAL);
2278 	}
2279 
2280 	return f.file->private_data;
2281 }
2282 
2283 void bpf_prog_add(struct bpf_prog *prog, int i)
2284 {
2285 	atomic64_add(i, &prog->aux->refcnt);
2286 }
2287 EXPORT_SYMBOL_GPL(bpf_prog_add);
2288 
2289 void bpf_prog_sub(struct bpf_prog *prog, int i)
2290 {
2291 	/* Only to be used for undoing previous bpf_prog_add() in some
2292 	 * error path. We still know that another entity in our call
2293 	 * path holds a reference to the program, thus atomic_sub() can
2294 	 * be safely used in such cases!
2295 	 */
2296 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
2297 }
2298 EXPORT_SYMBOL_GPL(bpf_prog_sub);
2299 
2300 void bpf_prog_inc(struct bpf_prog *prog)
2301 {
2302 	atomic64_inc(&prog->aux->refcnt);
2303 }
2304 EXPORT_SYMBOL_GPL(bpf_prog_inc);
2305 
2306 /* prog_idr_lock should have been held */
2307 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
2308 {
2309 	int refold;
2310 
2311 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
2312 
2313 	if (!refold)
2314 		return ERR_PTR(-ENOENT);
2315 
2316 	return prog;
2317 }
2318 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
2319 
2320 bool bpf_prog_get_ok(struct bpf_prog *prog,
2321 			    enum bpf_prog_type *attach_type, bool attach_drv)
2322 {
2323 	/* not an attachment, just a refcount inc, always allow */
2324 	if (!attach_type)
2325 		return true;
2326 
2327 	if (prog->type != *attach_type)
2328 		return false;
2329 	if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
2330 		return false;
2331 
2332 	return true;
2333 }
2334 
2335 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
2336 				       bool attach_drv)
2337 {
2338 	struct fd f = fdget(ufd);
2339 	struct bpf_prog *prog;
2340 
2341 	prog = ____bpf_prog_get(f);
2342 	if (IS_ERR(prog))
2343 		return prog;
2344 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
2345 		prog = ERR_PTR(-EINVAL);
2346 		goto out;
2347 	}
2348 
2349 	bpf_prog_inc(prog);
2350 out:
2351 	fdput(f);
2352 	return prog;
2353 }
2354 
2355 struct bpf_prog *bpf_prog_get(u32 ufd)
2356 {
2357 	return __bpf_prog_get(ufd, NULL, false);
2358 }
2359 
2360 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2361 				       bool attach_drv)
2362 {
2363 	return __bpf_prog_get(ufd, &type, attach_drv);
2364 }
2365 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
2366 
2367 /* Initially all BPF programs could be loaded w/o specifying
2368  * expected_attach_type. Later for some of them specifying expected_attach_type
2369  * at load time became required so that program could be validated properly.
2370  * Programs of types that are allowed to be loaded both w/ and w/o (for
2371  * backward compatibility) expected_attach_type, should have the default attach
2372  * type assigned to expected_attach_type for the latter case, so that it can be
2373  * validated later at attach time.
2374  *
2375  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
2376  * prog type requires it but has some attach types that have to be backward
2377  * compatible.
2378  */
2379 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
2380 {
2381 	switch (attr->prog_type) {
2382 	case BPF_PROG_TYPE_CGROUP_SOCK:
2383 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
2384 		 * exist so checking for non-zero is the way to go here.
2385 		 */
2386 		if (!attr->expected_attach_type)
2387 			attr->expected_attach_type =
2388 				BPF_CGROUP_INET_SOCK_CREATE;
2389 		break;
2390 	case BPF_PROG_TYPE_SK_REUSEPORT:
2391 		if (!attr->expected_attach_type)
2392 			attr->expected_attach_type =
2393 				BPF_SK_REUSEPORT_SELECT;
2394 		break;
2395 	}
2396 }
2397 
2398 static int
2399 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
2400 			   enum bpf_attach_type expected_attach_type,
2401 			   struct btf *attach_btf, u32 btf_id,
2402 			   struct bpf_prog *dst_prog)
2403 {
2404 	if (btf_id) {
2405 		if (btf_id > BTF_MAX_TYPE)
2406 			return -EINVAL;
2407 
2408 		if (!attach_btf && !dst_prog)
2409 			return -EINVAL;
2410 
2411 		switch (prog_type) {
2412 		case BPF_PROG_TYPE_TRACING:
2413 		case BPF_PROG_TYPE_LSM:
2414 		case BPF_PROG_TYPE_STRUCT_OPS:
2415 		case BPF_PROG_TYPE_EXT:
2416 			break;
2417 		default:
2418 			return -EINVAL;
2419 		}
2420 	}
2421 
2422 	if (attach_btf && (!btf_id || dst_prog))
2423 		return -EINVAL;
2424 
2425 	if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING &&
2426 	    prog_type != BPF_PROG_TYPE_EXT)
2427 		return -EINVAL;
2428 
2429 	switch (prog_type) {
2430 	case BPF_PROG_TYPE_CGROUP_SOCK:
2431 		switch (expected_attach_type) {
2432 		case BPF_CGROUP_INET_SOCK_CREATE:
2433 		case BPF_CGROUP_INET_SOCK_RELEASE:
2434 		case BPF_CGROUP_INET4_POST_BIND:
2435 		case BPF_CGROUP_INET6_POST_BIND:
2436 			return 0;
2437 		default:
2438 			return -EINVAL;
2439 		}
2440 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2441 		switch (expected_attach_type) {
2442 		case BPF_CGROUP_INET4_BIND:
2443 		case BPF_CGROUP_INET6_BIND:
2444 		case BPF_CGROUP_INET4_CONNECT:
2445 		case BPF_CGROUP_INET6_CONNECT:
2446 		case BPF_CGROUP_INET4_GETPEERNAME:
2447 		case BPF_CGROUP_INET6_GETPEERNAME:
2448 		case BPF_CGROUP_INET4_GETSOCKNAME:
2449 		case BPF_CGROUP_INET6_GETSOCKNAME:
2450 		case BPF_CGROUP_UDP4_SENDMSG:
2451 		case BPF_CGROUP_UDP6_SENDMSG:
2452 		case BPF_CGROUP_UDP4_RECVMSG:
2453 		case BPF_CGROUP_UDP6_RECVMSG:
2454 			return 0;
2455 		default:
2456 			return -EINVAL;
2457 		}
2458 	case BPF_PROG_TYPE_CGROUP_SKB:
2459 		switch (expected_attach_type) {
2460 		case BPF_CGROUP_INET_INGRESS:
2461 		case BPF_CGROUP_INET_EGRESS:
2462 			return 0;
2463 		default:
2464 			return -EINVAL;
2465 		}
2466 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2467 		switch (expected_attach_type) {
2468 		case BPF_CGROUP_SETSOCKOPT:
2469 		case BPF_CGROUP_GETSOCKOPT:
2470 			return 0;
2471 		default:
2472 			return -EINVAL;
2473 		}
2474 	case BPF_PROG_TYPE_SK_LOOKUP:
2475 		if (expected_attach_type == BPF_SK_LOOKUP)
2476 			return 0;
2477 		return -EINVAL;
2478 	case BPF_PROG_TYPE_SK_REUSEPORT:
2479 		switch (expected_attach_type) {
2480 		case BPF_SK_REUSEPORT_SELECT:
2481 		case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE:
2482 			return 0;
2483 		default:
2484 			return -EINVAL;
2485 		}
2486 	case BPF_PROG_TYPE_NETFILTER:
2487 		if (expected_attach_type == BPF_NETFILTER)
2488 			return 0;
2489 		return -EINVAL;
2490 	case BPF_PROG_TYPE_SYSCALL:
2491 	case BPF_PROG_TYPE_EXT:
2492 		if (expected_attach_type)
2493 			return -EINVAL;
2494 		fallthrough;
2495 	default:
2496 		return 0;
2497 	}
2498 }
2499 
2500 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2501 {
2502 	switch (prog_type) {
2503 	case BPF_PROG_TYPE_SCHED_CLS:
2504 	case BPF_PROG_TYPE_SCHED_ACT:
2505 	case BPF_PROG_TYPE_XDP:
2506 	case BPF_PROG_TYPE_LWT_IN:
2507 	case BPF_PROG_TYPE_LWT_OUT:
2508 	case BPF_PROG_TYPE_LWT_XMIT:
2509 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2510 	case BPF_PROG_TYPE_SK_SKB:
2511 	case BPF_PROG_TYPE_SK_MSG:
2512 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2513 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2514 	case BPF_PROG_TYPE_CGROUP_SOCK:
2515 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2516 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2517 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2518 	case BPF_PROG_TYPE_SOCK_OPS:
2519 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2520 	case BPF_PROG_TYPE_NETFILTER:
2521 		return true;
2522 	case BPF_PROG_TYPE_CGROUP_SKB:
2523 		/* always unpriv */
2524 	case BPF_PROG_TYPE_SK_REUSEPORT:
2525 		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2526 	default:
2527 		return false;
2528 	}
2529 }
2530 
2531 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2532 {
2533 	switch (prog_type) {
2534 	case BPF_PROG_TYPE_KPROBE:
2535 	case BPF_PROG_TYPE_TRACEPOINT:
2536 	case BPF_PROG_TYPE_PERF_EVENT:
2537 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2538 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2539 	case BPF_PROG_TYPE_TRACING:
2540 	case BPF_PROG_TYPE_LSM:
2541 	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2542 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2543 		return true;
2544 	default:
2545 		return false;
2546 	}
2547 }
2548 
2549 /* last field in 'union bpf_attr' used by this command */
2550 #define	BPF_PROG_LOAD_LAST_FIELD log_true_size
2551 
2552 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
2553 {
2554 	enum bpf_prog_type type = attr->prog_type;
2555 	struct bpf_prog *prog, *dst_prog = NULL;
2556 	struct btf *attach_btf = NULL;
2557 	int err;
2558 	char license[128];
2559 
2560 	if (CHECK_ATTR(BPF_PROG_LOAD))
2561 		return -EINVAL;
2562 
2563 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2564 				 BPF_F_ANY_ALIGNMENT |
2565 				 BPF_F_TEST_STATE_FREQ |
2566 				 BPF_F_SLEEPABLE |
2567 				 BPF_F_TEST_RND_HI32 |
2568 				 BPF_F_XDP_HAS_FRAGS |
2569 				 BPF_F_XDP_DEV_BOUND_ONLY))
2570 		return -EINVAL;
2571 
2572 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2573 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2574 	    !bpf_capable())
2575 		return -EPERM;
2576 
2577 	/* Intent here is for unprivileged_bpf_disabled to block BPF program
2578 	 * creation for unprivileged users; other actions depend
2579 	 * on fd availability and access to bpffs, so are dependent on
2580 	 * object creation success. Even with unprivileged BPF disabled,
2581 	 * capability checks are still carried out for these
2582 	 * and other operations.
2583 	 */
2584 	if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
2585 		return -EPERM;
2586 
2587 	if (attr->insn_cnt == 0 ||
2588 	    attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2589 		return -E2BIG;
2590 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2591 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2592 	    !bpf_capable())
2593 		return -EPERM;
2594 
2595 	if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2596 		return -EPERM;
2597 	if (is_perfmon_prog_type(type) && !perfmon_capable())
2598 		return -EPERM;
2599 
2600 	/* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog
2601 	 * or btf, we need to check which one it is
2602 	 */
2603 	if (attr->attach_prog_fd) {
2604 		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2605 		if (IS_ERR(dst_prog)) {
2606 			dst_prog = NULL;
2607 			attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd);
2608 			if (IS_ERR(attach_btf))
2609 				return -EINVAL;
2610 			if (!btf_is_kernel(attach_btf)) {
2611 				/* attaching through specifying bpf_prog's BTF
2612 				 * objects directly might be supported eventually
2613 				 */
2614 				btf_put(attach_btf);
2615 				return -ENOTSUPP;
2616 			}
2617 		}
2618 	} else if (attr->attach_btf_id) {
2619 		/* fall back to vmlinux BTF, if BTF type ID is specified */
2620 		attach_btf = bpf_get_btf_vmlinux();
2621 		if (IS_ERR(attach_btf))
2622 			return PTR_ERR(attach_btf);
2623 		if (!attach_btf)
2624 			return -EINVAL;
2625 		btf_get(attach_btf);
2626 	}
2627 
2628 	bpf_prog_load_fixup_attach_type(attr);
2629 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2630 				       attach_btf, attr->attach_btf_id,
2631 				       dst_prog)) {
2632 		if (dst_prog)
2633 			bpf_prog_put(dst_prog);
2634 		if (attach_btf)
2635 			btf_put(attach_btf);
2636 		return -EINVAL;
2637 	}
2638 
2639 	/* plain bpf_prog allocation */
2640 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2641 	if (!prog) {
2642 		if (dst_prog)
2643 			bpf_prog_put(dst_prog);
2644 		if (attach_btf)
2645 			btf_put(attach_btf);
2646 		return -ENOMEM;
2647 	}
2648 
2649 	prog->expected_attach_type = attr->expected_attach_type;
2650 	prog->aux->attach_btf = attach_btf;
2651 	prog->aux->attach_btf_id = attr->attach_btf_id;
2652 	prog->aux->dst_prog = dst_prog;
2653 	prog->aux->dev_bound = !!attr->prog_ifindex;
2654 	prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2655 	prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
2656 
2657 	err = security_bpf_prog_alloc(prog->aux);
2658 	if (err)
2659 		goto free_prog;
2660 
2661 	prog->aux->user = get_current_user();
2662 	prog->len = attr->insn_cnt;
2663 
2664 	err = -EFAULT;
2665 	if (copy_from_bpfptr(prog->insns,
2666 			     make_bpfptr(attr->insns, uattr.is_kernel),
2667 			     bpf_prog_insn_size(prog)) != 0)
2668 		goto free_prog_sec;
2669 	/* copy eBPF program license from user space */
2670 	if (strncpy_from_bpfptr(license,
2671 				make_bpfptr(attr->license, uattr.is_kernel),
2672 				sizeof(license) - 1) < 0)
2673 		goto free_prog_sec;
2674 	license[sizeof(license) - 1] = 0;
2675 
2676 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2677 	prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0;
2678 
2679 	prog->orig_prog = NULL;
2680 	prog->jited = 0;
2681 
2682 	atomic64_set(&prog->aux->refcnt, 1);
2683 
2684 	if (bpf_prog_is_dev_bound(prog->aux)) {
2685 		err = bpf_prog_dev_bound_init(prog, attr);
2686 		if (err)
2687 			goto free_prog_sec;
2688 	}
2689 
2690 	if (type == BPF_PROG_TYPE_EXT && dst_prog &&
2691 	    bpf_prog_is_dev_bound(dst_prog->aux)) {
2692 		err = bpf_prog_dev_bound_inherit(prog, dst_prog);
2693 		if (err)
2694 			goto free_prog_sec;
2695 	}
2696 
2697 	/* find program type: socket_filter vs tracing_filter */
2698 	err = find_prog_type(type, prog);
2699 	if (err < 0)
2700 		goto free_prog_sec;
2701 
2702 	prog->aux->load_time = ktime_get_boottime_ns();
2703 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2704 			       sizeof(attr->prog_name));
2705 	if (err < 0)
2706 		goto free_prog_sec;
2707 
2708 	/* run eBPF verifier */
2709 	err = bpf_check(&prog, attr, uattr, uattr_size);
2710 	if (err < 0)
2711 		goto free_used_maps;
2712 
2713 	prog = bpf_prog_select_runtime(prog, &err);
2714 	if (err < 0)
2715 		goto free_used_maps;
2716 
2717 	err = bpf_prog_alloc_id(prog);
2718 	if (err)
2719 		goto free_used_maps;
2720 
2721 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2722 	 * effectively publicly exposed. However, retrieving via
2723 	 * bpf_prog_get_fd_by_id() will take another reference,
2724 	 * therefore it cannot be gone underneath us.
2725 	 *
2726 	 * Only for the time /after/ successful bpf_prog_new_fd()
2727 	 * and before returning to userspace, we might just hold
2728 	 * one reference and any parallel close on that fd could
2729 	 * rip everything out. Hence, below notifications must
2730 	 * happen before bpf_prog_new_fd().
2731 	 *
2732 	 * Also, any failure handling from this point onwards must
2733 	 * be using bpf_prog_put() given the program is exposed.
2734 	 */
2735 	bpf_prog_kallsyms_add(prog);
2736 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2737 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2738 
2739 	err = bpf_prog_new_fd(prog);
2740 	if (err < 0)
2741 		bpf_prog_put(prog);
2742 	return err;
2743 
2744 free_used_maps:
2745 	/* In case we have subprogs, we need to wait for a grace
2746 	 * period before we can tear down JIT memory since symbols
2747 	 * are already exposed under kallsyms.
2748 	 */
2749 	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
2750 	return err;
2751 free_prog_sec:
2752 	free_uid(prog->aux->user);
2753 	security_bpf_prog_free(prog->aux);
2754 free_prog:
2755 	if (prog->aux->attach_btf)
2756 		btf_put(prog->aux->attach_btf);
2757 	bpf_prog_free(prog);
2758 	return err;
2759 }
2760 
2761 #define BPF_OBJ_LAST_FIELD path_fd
2762 
2763 static int bpf_obj_pin(const union bpf_attr *attr)
2764 {
2765 	int path_fd;
2766 
2767 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD)
2768 		return -EINVAL;
2769 
2770 	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2771 	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2772 		return -EINVAL;
2773 
2774 	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2775 	return bpf_obj_pin_user(attr->bpf_fd, path_fd,
2776 				u64_to_user_ptr(attr->pathname));
2777 }
2778 
2779 static int bpf_obj_get(const union bpf_attr *attr)
2780 {
2781 	int path_fd;
2782 
2783 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2784 	    attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD))
2785 		return -EINVAL;
2786 
2787 	/* path_fd has to be accompanied by BPF_F_PATH_FD flag */
2788 	if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd)
2789 		return -EINVAL;
2790 
2791 	path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD;
2792 	return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname),
2793 				attr->file_flags);
2794 }
2795 
2796 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2797 		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2798 {
2799 	atomic64_set(&link->refcnt, 1);
2800 	link->type = type;
2801 	link->id = 0;
2802 	link->ops = ops;
2803 	link->prog = prog;
2804 }
2805 
2806 static void bpf_link_free_id(int id)
2807 {
2808 	if (!id)
2809 		return;
2810 
2811 	spin_lock_bh(&link_idr_lock);
2812 	idr_remove(&link_idr, id);
2813 	spin_unlock_bh(&link_idr_lock);
2814 }
2815 
2816 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2817  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2818  * anon_inode's release() call. This helper marksbpf_link as
2819  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2820  * is not decremented, it's the responsibility of a calling code that failed
2821  * to complete bpf_link initialization.
2822  */
2823 void bpf_link_cleanup(struct bpf_link_primer *primer)
2824 {
2825 	primer->link->prog = NULL;
2826 	bpf_link_free_id(primer->id);
2827 	fput(primer->file);
2828 	put_unused_fd(primer->fd);
2829 }
2830 
2831 void bpf_link_inc(struct bpf_link *link)
2832 {
2833 	atomic64_inc(&link->refcnt);
2834 }
2835 
2836 /* bpf_link_free is guaranteed to be called from process context */
2837 static void bpf_link_free(struct bpf_link *link)
2838 {
2839 	bpf_link_free_id(link->id);
2840 	if (link->prog) {
2841 		/* detach BPF program, clean up used resources */
2842 		link->ops->release(link);
2843 		bpf_prog_put(link->prog);
2844 	}
2845 	/* free bpf_link and its containing memory */
2846 	link->ops->dealloc(link);
2847 }
2848 
2849 static void bpf_link_put_deferred(struct work_struct *work)
2850 {
2851 	struct bpf_link *link = container_of(work, struct bpf_link, work);
2852 
2853 	bpf_link_free(link);
2854 }
2855 
2856 /* bpf_link_put might be called from atomic context. It needs to be called
2857  * from sleepable context in order to acquire sleeping locks during the process.
2858  */
2859 void bpf_link_put(struct bpf_link *link)
2860 {
2861 	if (!atomic64_dec_and_test(&link->refcnt))
2862 		return;
2863 
2864 	INIT_WORK(&link->work, bpf_link_put_deferred);
2865 	schedule_work(&link->work);
2866 }
2867 EXPORT_SYMBOL(bpf_link_put);
2868 
2869 static void bpf_link_put_direct(struct bpf_link *link)
2870 {
2871 	if (!atomic64_dec_and_test(&link->refcnt))
2872 		return;
2873 	bpf_link_free(link);
2874 }
2875 
2876 static int bpf_link_release(struct inode *inode, struct file *filp)
2877 {
2878 	struct bpf_link *link = filp->private_data;
2879 
2880 	bpf_link_put_direct(link);
2881 	return 0;
2882 }
2883 
2884 #ifdef CONFIG_PROC_FS
2885 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2886 #define BPF_MAP_TYPE(_id, _ops)
2887 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2888 static const char *bpf_link_type_strs[] = {
2889 	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2890 #include <linux/bpf_types.h>
2891 };
2892 #undef BPF_PROG_TYPE
2893 #undef BPF_MAP_TYPE
2894 #undef BPF_LINK_TYPE
2895 
2896 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2897 {
2898 	const struct bpf_link *link = filp->private_data;
2899 	const struct bpf_prog *prog = link->prog;
2900 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2901 
2902 	seq_printf(m,
2903 		   "link_type:\t%s\n"
2904 		   "link_id:\t%u\n",
2905 		   bpf_link_type_strs[link->type],
2906 		   link->id);
2907 	if (prog) {
2908 		bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2909 		seq_printf(m,
2910 			   "prog_tag:\t%s\n"
2911 			   "prog_id:\t%u\n",
2912 			   prog_tag,
2913 			   prog->aux->id);
2914 	}
2915 	if (link->ops->show_fdinfo)
2916 		link->ops->show_fdinfo(link, m);
2917 }
2918 #endif
2919 
2920 static const struct file_operations bpf_link_fops = {
2921 #ifdef CONFIG_PROC_FS
2922 	.show_fdinfo	= bpf_link_show_fdinfo,
2923 #endif
2924 	.release	= bpf_link_release,
2925 	.read		= bpf_dummy_read,
2926 	.write		= bpf_dummy_write,
2927 };
2928 
2929 static int bpf_link_alloc_id(struct bpf_link *link)
2930 {
2931 	int id;
2932 
2933 	idr_preload(GFP_KERNEL);
2934 	spin_lock_bh(&link_idr_lock);
2935 	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2936 	spin_unlock_bh(&link_idr_lock);
2937 	idr_preload_end();
2938 
2939 	return id;
2940 }
2941 
2942 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2943  * reserving unused FD and allocating ID from link_idr. This is to be paired
2944  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2945  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2946  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2947  * transient state is passed around in struct bpf_link_primer.
2948  * This is preferred way to create and initialize bpf_link, especially when
2949  * there are complicated and expensive operations in between creating bpf_link
2950  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2951  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2952  * expensive (and potentially failing) roll back operations in a rare case
2953  * that file, FD, or ID can't be allocated.
2954  */
2955 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2956 {
2957 	struct file *file;
2958 	int fd, id;
2959 
2960 	fd = get_unused_fd_flags(O_CLOEXEC);
2961 	if (fd < 0)
2962 		return fd;
2963 
2964 
2965 	id = bpf_link_alloc_id(link);
2966 	if (id < 0) {
2967 		put_unused_fd(fd);
2968 		return id;
2969 	}
2970 
2971 	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2972 	if (IS_ERR(file)) {
2973 		bpf_link_free_id(id);
2974 		put_unused_fd(fd);
2975 		return PTR_ERR(file);
2976 	}
2977 
2978 	primer->link = link;
2979 	primer->file = file;
2980 	primer->fd = fd;
2981 	primer->id = id;
2982 	return 0;
2983 }
2984 
2985 int bpf_link_settle(struct bpf_link_primer *primer)
2986 {
2987 	/* make bpf_link fetchable by ID */
2988 	spin_lock_bh(&link_idr_lock);
2989 	primer->link->id = primer->id;
2990 	spin_unlock_bh(&link_idr_lock);
2991 	/* make bpf_link fetchable by FD */
2992 	fd_install(primer->fd, primer->file);
2993 	/* pass through installed FD */
2994 	return primer->fd;
2995 }
2996 
2997 int bpf_link_new_fd(struct bpf_link *link)
2998 {
2999 	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
3000 }
3001 
3002 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
3003 {
3004 	struct fd f = fdget(ufd);
3005 	struct bpf_link *link;
3006 
3007 	if (!f.file)
3008 		return ERR_PTR(-EBADF);
3009 	if (f.file->f_op != &bpf_link_fops) {
3010 		fdput(f);
3011 		return ERR_PTR(-EINVAL);
3012 	}
3013 
3014 	link = f.file->private_data;
3015 	bpf_link_inc(link);
3016 	fdput(f);
3017 
3018 	return link;
3019 }
3020 EXPORT_SYMBOL(bpf_link_get_from_fd);
3021 
3022 static void bpf_tracing_link_release(struct bpf_link *link)
3023 {
3024 	struct bpf_tracing_link *tr_link =
3025 		container_of(link, struct bpf_tracing_link, link.link);
3026 
3027 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link,
3028 						tr_link->trampoline));
3029 
3030 	bpf_trampoline_put(tr_link->trampoline);
3031 
3032 	/* tgt_prog is NULL if target is a kernel function */
3033 	if (tr_link->tgt_prog)
3034 		bpf_prog_put(tr_link->tgt_prog);
3035 }
3036 
3037 static void bpf_tracing_link_dealloc(struct bpf_link *link)
3038 {
3039 	struct bpf_tracing_link *tr_link =
3040 		container_of(link, struct bpf_tracing_link, link.link);
3041 
3042 	kfree(tr_link);
3043 }
3044 
3045 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
3046 					 struct seq_file *seq)
3047 {
3048 	struct bpf_tracing_link *tr_link =
3049 		container_of(link, struct bpf_tracing_link, link.link);
3050 	u32 target_btf_id, target_obj_id;
3051 
3052 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3053 				  &target_obj_id, &target_btf_id);
3054 	seq_printf(seq,
3055 		   "attach_type:\t%d\n"
3056 		   "target_obj_id:\t%u\n"
3057 		   "target_btf_id:\t%u\n",
3058 		   tr_link->attach_type,
3059 		   target_obj_id,
3060 		   target_btf_id);
3061 }
3062 
3063 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
3064 					   struct bpf_link_info *info)
3065 {
3066 	struct bpf_tracing_link *tr_link =
3067 		container_of(link, struct bpf_tracing_link, link.link);
3068 
3069 	info->tracing.attach_type = tr_link->attach_type;
3070 	bpf_trampoline_unpack_key(tr_link->trampoline->key,
3071 				  &info->tracing.target_obj_id,
3072 				  &info->tracing.target_btf_id);
3073 
3074 	return 0;
3075 }
3076 
3077 static const struct bpf_link_ops bpf_tracing_link_lops = {
3078 	.release = bpf_tracing_link_release,
3079 	.dealloc = bpf_tracing_link_dealloc,
3080 	.show_fdinfo = bpf_tracing_link_show_fdinfo,
3081 	.fill_link_info = bpf_tracing_link_fill_link_info,
3082 };
3083 
3084 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
3085 				   int tgt_prog_fd,
3086 				   u32 btf_id,
3087 				   u64 bpf_cookie)
3088 {
3089 	struct bpf_link_primer link_primer;
3090 	struct bpf_prog *tgt_prog = NULL;
3091 	struct bpf_trampoline *tr = NULL;
3092 	struct bpf_tracing_link *link;
3093 	u64 key = 0;
3094 	int err;
3095 
3096 	switch (prog->type) {
3097 	case BPF_PROG_TYPE_TRACING:
3098 		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
3099 		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
3100 		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
3101 			err = -EINVAL;
3102 			goto out_put_prog;
3103 		}
3104 		break;
3105 	case BPF_PROG_TYPE_EXT:
3106 		if (prog->expected_attach_type != 0) {
3107 			err = -EINVAL;
3108 			goto out_put_prog;
3109 		}
3110 		break;
3111 	case BPF_PROG_TYPE_LSM:
3112 		if (prog->expected_attach_type != BPF_LSM_MAC) {
3113 			err = -EINVAL;
3114 			goto out_put_prog;
3115 		}
3116 		break;
3117 	default:
3118 		err = -EINVAL;
3119 		goto out_put_prog;
3120 	}
3121 
3122 	if (!!tgt_prog_fd != !!btf_id) {
3123 		err = -EINVAL;
3124 		goto out_put_prog;
3125 	}
3126 
3127 	if (tgt_prog_fd) {
3128 		/* For now we only allow new targets for BPF_PROG_TYPE_EXT */
3129 		if (prog->type != BPF_PROG_TYPE_EXT) {
3130 			err = -EINVAL;
3131 			goto out_put_prog;
3132 		}
3133 
3134 		tgt_prog = bpf_prog_get(tgt_prog_fd);
3135 		if (IS_ERR(tgt_prog)) {
3136 			err = PTR_ERR(tgt_prog);
3137 			tgt_prog = NULL;
3138 			goto out_put_prog;
3139 		}
3140 
3141 		key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id);
3142 	}
3143 
3144 	link = kzalloc(sizeof(*link), GFP_USER);
3145 	if (!link) {
3146 		err = -ENOMEM;
3147 		goto out_put_prog;
3148 	}
3149 	bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING,
3150 		      &bpf_tracing_link_lops, prog);
3151 	link->attach_type = prog->expected_attach_type;
3152 	link->link.cookie = bpf_cookie;
3153 
3154 	mutex_lock(&prog->aux->dst_mutex);
3155 
3156 	/* There are a few possible cases here:
3157 	 *
3158 	 * - if prog->aux->dst_trampoline is set, the program was just loaded
3159 	 *   and not yet attached to anything, so we can use the values stored
3160 	 *   in prog->aux
3161 	 *
3162 	 * - if prog->aux->dst_trampoline is NULL, the program has already been
3163          *   attached to a target and its initial target was cleared (below)
3164 	 *
3165 	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
3166 	 *   target_btf_id using the link_create API.
3167 	 *
3168 	 * - if tgt_prog == NULL when this function was called using the old
3169 	 *   raw_tracepoint_open API, and we need a target from prog->aux
3170 	 *
3171 	 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
3172 	 *   was detached and is going for re-attachment.
3173 	 */
3174 	if (!prog->aux->dst_trampoline && !tgt_prog) {
3175 		/*
3176 		 * Allow re-attach for TRACING and LSM programs. If it's
3177 		 * currently linked, bpf_trampoline_link_prog will fail.
3178 		 * EXT programs need to specify tgt_prog_fd, so they
3179 		 * re-attach in separate code path.
3180 		 */
3181 		if (prog->type != BPF_PROG_TYPE_TRACING &&
3182 		    prog->type != BPF_PROG_TYPE_LSM) {
3183 			err = -EINVAL;
3184 			goto out_unlock;
3185 		}
3186 		btf_id = prog->aux->attach_btf_id;
3187 		key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
3188 	}
3189 
3190 	if (!prog->aux->dst_trampoline ||
3191 	    (key && key != prog->aux->dst_trampoline->key)) {
3192 		/* If there is no saved target, or the specified target is
3193 		 * different from the destination specified at load time, we
3194 		 * need a new trampoline and a check for compatibility
3195 		 */
3196 		struct bpf_attach_target_info tgt_info = {};
3197 
3198 		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
3199 					      &tgt_info);
3200 		if (err)
3201 			goto out_unlock;
3202 
3203 		if (tgt_info.tgt_mod) {
3204 			module_put(prog->aux->mod);
3205 			prog->aux->mod = tgt_info.tgt_mod;
3206 		}
3207 
3208 		tr = bpf_trampoline_get(key, &tgt_info);
3209 		if (!tr) {
3210 			err = -ENOMEM;
3211 			goto out_unlock;
3212 		}
3213 	} else {
3214 		/* The caller didn't specify a target, or the target was the
3215 		 * same as the destination supplied during program load. This
3216 		 * means we can reuse the trampoline and reference from program
3217 		 * load time, and there is no need to allocate a new one. This
3218 		 * can only happen once for any program, as the saved values in
3219 		 * prog->aux are cleared below.
3220 		 */
3221 		tr = prog->aux->dst_trampoline;
3222 		tgt_prog = prog->aux->dst_prog;
3223 	}
3224 
3225 	err = bpf_link_prime(&link->link.link, &link_primer);
3226 	if (err)
3227 		goto out_unlock;
3228 
3229 	err = bpf_trampoline_link_prog(&link->link, tr);
3230 	if (err) {
3231 		bpf_link_cleanup(&link_primer);
3232 		link = NULL;
3233 		goto out_unlock;
3234 	}
3235 
3236 	link->tgt_prog = tgt_prog;
3237 	link->trampoline = tr;
3238 
3239 	/* Always clear the trampoline and target prog from prog->aux to make
3240 	 * sure the original attach destination is not kept alive after a
3241 	 * program is (re-)attached to another target.
3242 	 */
3243 	if (prog->aux->dst_prog &&
3244 	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
3245 		/* got extra prog ref from syscall, or attaching to different prog */
3246 		bpf_prog_put(prog->aux->dst_prog);
3247 	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
3248 		/* we allocated a new trampoline, so free the old one */
3249 		bpf_trampoline_put(prog->aux->dst_trampoline);
3250 
3251 	prog->aux->dst_prog = NULL;
3252 	prog->aux->dst_trampoline = NULL;
3253 	mutex_unlock(&prog->aux->dst_mutex);
3254 
3255 	return bpf_link_settle(&link_primer);
3256 out_unlock:
3257 	if (tr && tr != prog->aux->dst_trampoline)
3258 		bpf_trampoline_put(tr);
3259 	mutex_unlock(&prog->aux->dst_mutex);
3260 	kfree(link);
3261 out_put_prog:
3262 	if (tgt_prog_fd && tgt_prog)
3263 		bpf_prog_put(tgt_prog);
3264 	return err;
3265 }
3266 
3267 struct bpf_raw_tp_link {
3268 	struct bpf_link link;
3269 	struct bpf_raw_event_map *btp;
3270 };
3271 
3272 static void bpf_raw_tp_link_release(struct bpf_link *link)
3273 {
3274 	struct bpf_raw_tp_link *raw_tp =
3275 		container_of(link, struct bpf_raw_tp_link, link);
3276 
3277 	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
3278 	bpf_put_raw_tracepoint(raw_tp->btp);
3279 }
3280 
3281 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
3282 {
3283 	struct bpf_raw_tp_link *raw_tp =
3284 		container_of(link, struct bpf_raw_tp_link, link);
3285 
3286 	kfree(raw_tp);
3287 }
3288 
3289 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
3290 					struct seq_file *seq)
3291 {
3292 	struct bpf_raw_tp_link *raw_tp_link =
3293 		container_of(link, struct bpf_raw_tp_link, link);
3294 
3295 	seq_printf(seq,
3296 		   "tp_name:\t%s\n",
3297 		   raw_tp_link->btp->tp->name);
3298 }
3299 
3300 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen,
3301 			    u32 len)
3302 {
3303 	if (ulen >= len + 1) {
3304 		if (copy_to_user(ubuf, buf, len + 1))
3305 			return -EFAULT;
3306 	} else {
3307 		char zero = '\0';
3308 
3309 		if (copy_to_user(ubuf, buf, ulen - 1))
3310 			return -EFAULT;
3311 		if (put_user(zero, ubuf + ulen - 1))
3312 			return -EFAULT;
3313 		return -ENOSPC;
3314 	}
3315 
3316 	return 0;
3317 }
3318 
3319 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
3320 					  struct bpf_link_info *info)
3321 {
3322 	struct bpf_raw_tp_link *raw_tp_link =
3323 		container_of(link, struct bpf_raw_tp_link, link);
3324 	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
3325 	const char *tp_name = raw_tp_link->btp->tp->name;
3326 	u32 ulen = info->raw_tracepoint.tp_name_len;
3327 	size_t tp_len = strlen(tp_name);
3328 
3329 	if (!ulen ^ !ubuf)
3330 		return -EINVAL;
3331 
3332 	info->raw_tracepoint.tp_name_len = tp_len + 1;
3333 
3334 	if (!ubuf)
3335 		return 0;
3336 
3337 	return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len);
3338 }
3339 
3340 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
3341 	.release = bpf_raw_tp_link_release,
3342 	.dealloc = bpf_raw_tp_link_dealloc,
3343 	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
3344 	.fill_link_info = bpf_raw_tp_link_fill_link_info,
3345 };
3346 
3347 #ifdef CONFIG_PERF_EVENTS
3348 struct bpf_perf_link {
3349 	struct bpf_link link;
3350 	struct file *perf_file;
3351 };
3352 
3353 static void bpf_perf_link_release(struct bpf_link *link)
3354 {
3355 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3356 	struct perf_event *event = perf_link->perf_file->private_data;
3357 
3358 	perf_event_free_bpf_prog(event);
3359 	fput(perf_link->perf_file);
3360 }
3361 
3362 static void bpf_perf_link_dealloc(struct bpf_link *link)
3363 {
3364 	struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link);
3365 
3366 	kfree(perf_link);
3367 }
3368 
3369 static int bpf_perf_link_fill_common(const struct perf_event *event,
3370 				     char __user *uname, u32 ulen,
3371 				     u64 *probe_offset, u64 *probe_addr,
3372 				     u32 *fd_type)
3373 {
3374 	const char *buf;
3375 	u32 prog_id;
3376 	size_t len;
3377 	int err;
3378 
3379 	if (!ulen ^ !uname)
3380 		return -EINVAL;
3381 	if (!uname)
3382 		return 0;
3383 
3384 	err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
3385 				      probe_offset, probe_addr);
3386 	if (err)
3387 		return err;
3388 
3389 	if (buf) {
3390 		len = strlen(buf);
3391 		err = bpf_copy_to_user(uname, buf, ulen, len);
3392 		if (err)
3393 			return err;
3394 	} else {
3395 		char zero = '\0';
3396 
3397 		if (put_user(zero, uname))
3398 			return -EFAULT;
3399 	}
3400 	return 0;
3401 }
3402 
3403 #ifdef CONFIG_KPROBE_EVENTS
3404 static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
3405 				     struct bpf_link_info *info)
3406 {
3407 	char __user *uname;
3408 	u64 addr, offset;
3409 	u32 ulen, type;
3410 	int err;
3411 
3412 	uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
3413 	ulen = info->perf_event.kprobe.name_len;
3414 	err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3415 					&type);
3416 	if (err)
3417 		return err;
3418 	if (type == BPF_FD_TYPE_KRETPROBE)
3419 		info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
3420 	else
3421 		info->perf_event.type = BPF_PERF_EVENT_KPROBE;
3422 
3423 	info->perf_event.kprobe.offset = offset;
3424 	if (!kallsyms_show_value(current_cred()))
3425 		addr = 0;
3426 	info->perf_event.kprobe.addr = addr;
3427 	return 0;
3428 }
3429 #endif
3430 
3431 #ifdef CONFIG_UPROBE_EVENTS
3432 static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
3433 				     struct bpf_link_info *info)
3434 {
3435 	char __user *uname;
3436 	u64 addr, offset;
3437 	u32 ulen, type;
3438 	int err;
3439 
3440 	uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
3441 	ulen = info->perf_event.uprobe.name_len;
3442 	err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
3443 					&type);
3444 	if (err)
3445 		return err;
3446 
3447 	if (type == BPF_FD_TYPE_URETPROBE)
3448 		info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
3449 	else
3450 		info->perf_event.type = BPF_PERF_EVENT_UPROBE;
3451 	info->perf_event.uprobe.offset = offset;
3452 	return 0;
3453 }
3454 #endif
3455 
3456 static int bpf_perf_link_fill_probe(const struct perf_event *event,
3457 				    struct bpf_link_info *info)
3458 {
3459 #ifdef CONFIG_KPROBE_EVENTS
3460 	if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE)
3461 		return bpf_perf_link_fill_kprobe(event, info);
3462 #endif
3463 #ifdef CONFIG_UPROBE_EVENTS
3464 	if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE)
3465 		return bpf_perf_link_fill_uprobe(event, info);
3466 #endif
3467 	return -EOPNOTSUPP;
3468 }
3469 
3470 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
3471 					 struct bpf_link_info *info)
3472 {
3473 	char __user *uname;
3474 	u32 ulen;
3475 
3476 	uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
3477 	ulen = info->perf_event.tracepoint.name_len;
3478 	info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
3479 	return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL);
3480 }
3481 
3482 static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
3483 					 struct bpf_link_info *info)
3484 {
3485 	info->perf_event.event.type = event->attr.type;
3486 	info->perf_event.event.config = event->attr.config;
3487 	info->perf_event.type = BPF_PERF_EVENT_EVENT;
3488 	return 0;
3489 }
3490 
3491 static int bpf_perf_link_fill_link_info(const struct bpf_link *link,
3492 					struct bpf_link_info *info)
3493 {
3494 	struct bpf_perf_link *perf_link;
3495 	const struct perf_event *event;
3496 
3497 	perf_link = container_of(link, struct bpf_perf_link, link);
3498 	event = perf_get_event(perf_link->perf_file);
3499 	if (IS_ERR(event))
3500 		return PTR_ERR(event);
3501 
3502 	switch (event->prog->type) {
3503 	case BPF_PROG_TYPE_PERF_EVENT:
3504 		return bpf_perf_link_fill_perf_event(event, info);
3505 	case BPF_PROG_TYPE_TRACEPOINT:
3506 		return bpf_perf_link_fill_tracepoint(event, info);
3507 	case BPF_PROG_TYPE_KPROBE:
3508 		return bpf_perf_link_fill_probe(event, info);
3509 	default:
3510 		return -EOPNOTSUPP;
3511 	}
3512 }
3513 
3514 static const struct bpf_link_ops bpf_perf_link_lops = {
3515 	.release = bpf_perf_link_release,
3516 	.dealloc = bpf_perf_link_dealloc,
3517 	.fill_link_info = bpf_perf_link_fill_link_info,
3518 };
3519 
3520 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3521 {
3522 	struct bpf_link_primer link_primer;
3523 	struct bpf_perf_link *link;
3524 	struct perf_event *event;
3525 	struct file *perf_file;
3526 	int err;
3527 
3528 	if (attr->link_create.flags)
3529 		return -EINVAL;
3530 
3531 	perf_file = perf_event_get(attr->link_create.target_fd);
3532 	if (IS_ERR(perf_file))
3533 		return PTR_ERR(perf_file);
3534 
3535 	link = kzalloc(sizeof(*link), GFP_USER);
3536 	if (!link) {
3537 		err = -ENOMEM;
3538 		goto out_put_file;
3539 	}
3540 	bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog);
3541 	link->perf_file = perf_file;
3542 
3543 	err = bpf_link_prime(&link->link, &link_primer);
3544 	if (err) {
3545 		kfree(link);
3546 		goto out_put_file;
3547 	}
3548 
3549 	event = perf_file->private_data;
3550 	err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie);
3551 	if (err) {
3552 		bpf_link_cleanup(&link_primer);
3553 		goto out_put_file;
3554 	}
3555 	/* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */
3556 	bpf_prog_inc(prog);
3557 
3558 	return bpf_link_settle(&link_primer);
3559 
3560 out_put_file:
3561 	fput(perf_file);
3562 	return err;
3563 }
3564 #else
3565 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3566 {
3567 	return -EOPNOTSUPP;
3568 }
3569 #endif /* CONFIG_PERF_EVENTS */
3570 
3571 static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
3572 				  const char __user *user_tp_name)
3573 {
3574 	struct bpf_link_primer link_primer;
3575 	struct bpf_raw_tp_link *link;
3576 	struct bpf_raw_event_map *btp;
3577 	const char *tp_name;
3578 	char buf[128];
3579 	int err;
3580 
3581 	switch (prog->type) {
3582 	case BPF_PROG_TYPE_TRACING:
3583 	case BPF_PROG_TYPE_EXT:
3584 	case BPF_PROG_TYPE_LSM:
3585 		if (user_tp_name)
3586 			/* The attach point for this category of programs
3587 			 * should be specified via btf_id during program load.
3588 			 */
3589 			return -EINVAL;
3590 		if (prog->type == BPF_PROG_TYPE_TRACING &&
3591 		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
3592 			tp_name = prog->aux->attach_func_name;
3593 			break;
3594 		}
3595 		return bpf_tracing_prog_attach(prog, 0, 0, 0);
3596 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
3597 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3598 		if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0)
3599 			return -EFAULT;
3600 		buf[sizeof(buf) - 1] = 0;
3601 		tp_name = buf;
3602 		break;
3603 	default:
3604 		return -EINVAL;
3605 	}
3606 
3607 	btp = bpf_get_raw_tracepoint(tp_name);
3608 	if (!btp)
3609 		return -ENOENT;
3610 
3611 	link = kzalloc(sizeof(*link), GFP_USER);
3612 	if (!link) {
3613 		err = -ENOMEM;
3614 		goto out_put_btp;
3615 	}
3616 	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
3617 		      &bpf_raw_tp_link_lops, prog);
3618 	link->btp = btp;
3619 
3620 	err = bpf_link_prime(&link->link, &link_primer);
3621 	if (err) {
3622 		kfree(link);
3623 		goto out_put_btp;
3624 	}
3625 
3626 	err = bpf_probe_register(link->btp, prog);
3627 	if (err) {
3628 		bpf_link_cleanup(&link_primer);
3629 		goto out_put_btp;
3630 	}
3631 
3632 	return bpf_link_settle(&link_primer);
3633 
3634 out_put_btp:
3635 	bpf_put_raw_tracepoint(btp);
3636 	return err;
3637 }
3638 
3639 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
3640 
3641 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
3642 {
3643 	struct bpf_prog *prog;
3644 	int fd;
3645 
3646 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
3647 		return -EINVAL;
3648 
3649 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
3650 	if (IS_ERR(prog))
3651 		return PTR_ERR(prog);
3652 
3653 	fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
3654 	if (fd < 0)
3655 		bpf_prog_put(prog);
3656 	return fd;
3657 }
3658 
3659 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
3660 					     enum bpf_attach_type attach_type)
3661 {
3662 	switch (prog->type) {
3663 	case BPF_PROG_TYPE_CGROUP_SOCK:
3664 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3665 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3666 	case BPF_PROG_TYPE_SK_LOOKUP:
3667 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
3668 	case BPF_PROG_TYPE_CGROUP_SKB:
3669 		if (!capable(CAP_NET_ADMIN))
3670 			/* cg-skb progs can be loaded by unpriv user.
3671 			 * check permissions at attach time.
3672 			 */
3673 			return -EPERM;
3674 		return prog->enforce_expected_attach_type &&
3675 			prog->expected_attach_type != attach_type ?
3676 			-EINVAL : 0;
3677 	case BPF_PROG_TYPE_KPROBE:
3678 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
3679 		    attach_type != BPF_TRACE_KPROBE_MULTI)
3680 			return -EINVAL;
3681 		return 0;
3682 	default:
3683 		return 0;
3684 	}
3685 }
3686 
3687 static enum bpf_prog_type
3688 attach_type_to_prog_type(enum bpf_attach_type attach_type)
3689 {
3690 	switch (attach_type) {
3691 	case BPF_CGROUP_INET_INGRESS:
3692 	case BPF_CGROUP_INET_EGRESS:
3693 		return BPF_PROG_TYPE_CGROUP_SKB;
3694 	case BPF_CGROUP_INET_SOCK_CREATE:
3695 	case BPF_CGROUP_INET_SOCK_RELEASE:
3696 	case BPF_CGROUP_INET4_POST_BIND:
3697 	case BPF_CGROUP_INET6_POST_BIND:
3698 		return BPF_PROG_TYPE_CGROUP_SOCK;
3699 	case BPF_CGROUP_INET4_BIND:
3700 	case BPF_CGROUP_INET6_BIND:
3701 	case BPF_CGROUP_INET4_CONNECT:
3702 	case BPF_CGROUP_INET6_CONNECT:
3703 	case BPF_CGROUP_INET4_GETPEERNAME:
3704 	case BPF_CGROUP_INET6_GETPEERNAME:
3705 	case BPF_CGROUP_INET4_GETSOCKNAME:
3706 	case BPF_CGROUP_INET6_GETSOCKNAME:
3707 	case BPF_CGROUP_UDP4_SENDMSG:
3708 	case BPF_CGROUP_UDP6_SENDMSG:
3709 	case BPF_CGROUP_UDP4_RECVMSG:
3710 	case BPF_CGROUP_UDP6_RECVMSG:
3711 		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
3712 	case BPF_CGROUP_SOCK_OPS:
3713 		return BPF_PROG_TYPE_SOCK_OPS;
3714 	case BPF_CGROUP_DEVICE:
3715 		return BPF_PROG_TYPE_CGROUP_DEVICE;
3716 	case BPF_SK_MSG_VERDICT:
3717 		return BPF_PROG_TYPE_SK_MSG;
3718 	case BPF_SK_SKB_STREAM_PARSER:
3719 	case BPF_SK_SKB_STREAM_VERDICT:
3720 	case BPF_SK_SKB_VERDICT:
3721 		return BPF_PROG_TYPE_SK_SKB;
3722 	case BPF_LIRC_MODE2:
3723 		return BPF_PROG_TYPE_LIRC_MODE2;
3724 	case BPF_FLOW_DISSECTOR:
3725 		return BPF_PROG_TYPE_FLOW_DISSECTOR;
3726 	case BPF_CGROUP_SYSCTL:
3727 		return BPF_PROG_TYPE_CGROUP_SYSCTL;
3728 	case BPF_CGROUP_GETSOCKOPT:
3729 	case BPF_CGROUP_SETSOCKOPT:
3730 		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
3731 	case BPF_TRACE_ITER:
3732 	case BPF_TRACE_RAW_TP:
3733 	case BPF_TRACE_FENTRY:
3734 	case BPF_TRACE_FEXIT:
3735 	case BPF_MODIFY_RETURN:
3736 		return BPF_PROG_TYPE_TRACING;
3737 	case BPF_LSM_MAC:
3738 		return BPF_PROG_TYPE_LSM;
3739 	case BPF_SK_LOOKUP:
3740 		return BPF_PROG_TYPE_SK_LOOKUP;
3741 	case BPF_XDP:
3742 		return BPF_PROG_TYPE_XDP;
3743 	case BPF_LSM_CGROUP:
3744 		return BPF_PROG_TYPE_LSM;
3745 	case BPF_TCX_INGRESS:
3746 	case BPF_TCX_EGRESS:
3747 		return BPF_PROG_TYPE_SCHED_CLS;
3748 	default:
3749 		return BPF_PROG_TYPE_UNSPEC;
3750 	}
3751 }
3752 
3753 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision
3754 
3755 #define BPF_F_ATTACH_MASK_BASE	\
3756 	(BPF_F_ALLOW_OVERRIDE |	\
3757 	 BPF_F_ALLOW_MULTI |	\
3758 	 BPF_F_REPLACE)
3759 
3760 #define BPF_F_ATTACH_MASK_MPROG	\
3761 	(BPF_F_REPLACE |	\
3762 	 BPF_F_BEFORE |		\
3763 	 BPF_F_AFTER |		\
3764 	 BPF_F_ID |		\
3765 	 BPF_F_LINK)
3766 
3767 static int bpf_prog_attach(const union bpf_attr *attr)
3768 {
3769 	enum bpf_prog_type ptype;
3770 	struct bpf_prog *prog;
3771 	u32 mask;
3772 	int ret;
3773 
3774 	if (CHECK_ATTR(BPF_PROG_ATTACH))
3775 		return -EINVAL;
3776 
3777 	ptype = attach_type_to_prog_type(attr->attach_type);
3778 	if (ptype == BPF_PROG_TYPE_UNSPEC)
3779 		return -EINVAL;
3780 	mask = bpf_mprog_supported(ptype) ?
3781 	       BPF_F_ATTACH_MASK_MPROG : BPF_F_ATTACH_MASK_BASE;
3782 	if (attr->attach_flags & ~mask)
3783 		return -EINVAL;
3784 
3785 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3786 	if (IS_ERR(prog))
3787 		return PTR_ERR(prog);
3788 
3789 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3790 		bpf_prog_put(prog);
3791 		return -EINVAL;
3792 	}
3793 
3794 	switch (ptype) {
3795 	case BPF_PROG_TYPE_SK_SKB:
3796 	case BPF_PROG_TYPE_SK_MSG:
3797 		ret = sock_map_get_from_fd(attr, prog);
3798 		break;
3799 	case BPF_PROG_TYPE_LIRC_MODE2:
3800 		ret = lirc_prog_attach(attr, prog);
3801 		break;
3802 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3803 		ret = netns_bpf_prog_attach(attr, prog);
3804 		break;
3805 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3806 	case BPF_PROG_TYPE_CGROUP_SKB:
3807 	case BPF_PROG_TYPE_CGROUP_SOCK:
3808 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3809 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3810 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3811 	case BPF_PROG_TYPE_SOCK_OPS:
3812 	case BPF_PROG_TYPE_LSM:
3813 		if (ptype == BPF_PROG_TYPE_LSM &&
3814 		    prog->expected_attach_type != BPF_LSM_CGROUP)
3815 			ret = -EINVAL;
3816 		else
3817 			ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3818 		break;
3819 	case BPF_PROG_TYPE_SCHED_CLS:
3820 		ret = tcx_prog_attach(attr, prog);
3821 		break;
3822 	default:
3823 		ret = -EINVAL;
3824 	}
3825 
3826 	if (ret)
3827 		bpf_prog_put(prog);
3828 	return ret;
3829 }
3830 
3831 #define BPF_PROG_DETACH_LAST_FIELD expected_revision
3832 
3833 static int bpf_prog_detach(const union bpf_attr *attr)
3834 {
3835 	struct bpf_prog *prog = NULL;
3836 	enum bpf_prog_type ptype;
3837 	int ret;
3838 
3839 	if (CHECK_ATTR(BPF_PROG_DETACH))
3840 		return -EINVAL;
3841 
3842 	ptype = attach_type_to_prog_type(attr->attach_type);
3843 	if (bpf_mprog_supported(ptype)) {
3844 		if (ptype == BPF_PROG_TYPE_UNSPEC)
3845 			return -EINVAL;
3846 		if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
3847 			return -EINVAL;
3848 		if (attr->attach_bpf_fd) {
3849 			prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3850 			if (IS_ERR(prog))
3851 				return PTR_ERR(prog);
3852 		}
3853 	}
3854 
3855 	switch (ptype) {
3856 	case BPF_PROG_TYPE_SK_MSG:
3857 	case BPF_PROG_TYPE_SK_SKB:
3858 		ret = sock_map_prog_detach(attr, ptype);
3859 		break;
3860 	case BPF_PROG_TYPE_LIRC_MODE2:
3861 		ret = lirc_prog_detach(attr);
3862 		break;
3863 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3864 		ret = netns_bpf_prog_detach(attr, ptype);
3865 		break;
3866 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3867 	case BPF_PROG_TYPE_CGROUP_SKB:
3868 	case BPF_PROG_TYPE_CGROUP_SOCK:
3869 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3870 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3871 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3872 	case BPF_PROG_TYPE_SOCK_OPS:
3873 	case BPF_PROG_TYPE_LSM:
3874 		ret = cgroup_bpf_prog_detach(attr, ptype);
3875 		break;
3876 	case BPF_PROG_TYPE_SCHED_CLS:
3877 		ret = tcx_prog_detach(attr, prog);
3878 		break;
3879 	default:
3880 		ret = -EINVAL;
3881 	}
3882 
3883 	if (prog)
3884 		bpf_prog_put(prog);
3885 	return ret;
3886 }
3887 
3888 #define BPF_PROG_QUERY_LAST_FIELD query.link_attach_flags
3889 
3890 static int bpf_prog_query(const union bpf_attr *attr,
3891 			  union bpf_attr __user *uattr)
3892 {
3893 	if (!capable(CAP_NET_ADMIN))
3894 		return -EPERM;
3895 	if (CHECK_ATTR(BPF_PROG_QUERY))
3896 		return -EINVAL;
3897 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3898 		return -EINVAL;
3899 
3900 	switch (attr->query.attach_type) {
3901 	case BPF_CGROUP_INET_INGRESS:
3902 	case BPF_CGROUP_INET_EGRESS:
3903 	case BPF_CGROUP_INET_SOCK_CREATE:
3904 	case BPF_CGROUP_INET_SOCK_RELEASE:
3905 	case BPF_CGROUP_INET4_BIND:
3906 	case BPF_CGROUP_INET6_BIND:
3907 	case BPF_CGROUP_INET4_POST_BIND:
3908 	case BPF_CGROUP_INET6_POST_BIND:
3909 	case BPF_CGROUP_INET4_CONNECT:
3910 	case BPF_CGROUP_INET6_CONNECT:
3911 	case BPF_CGROUP_INET4_GETPEERNAME:
3912 	case BPF_CGROUP_INET6_GETPEERNAME:
3913 	case BPF_CGROUP_INET4_GETSOCKNAME:
3914 	case BPF_CGROUP_INET6_GETSOCKNAME:
3915 	case BPF_CGROUP_UDP4_SENDMSG:
3916 	case BPF_CGROUP_UDP6_SENDMSG:
3917 	case BPF_CGROUP_UDP4_RECVMSG:
3918 	case BPF_CGROUP_UDP6_RECVMSG:
3919 	case BPF_CGROUP_SOCK_OPS:
3920 	case BPF_CGROUP_DEVICE:
3921 	case BPF_CGROUP_SYSCTL:
3922 	case BPF_CGROUP_GETSOCKOPT:
3923 	case BPF_CGROUP_SETSOCKOPT:
3924 	case BPF_LSM_CGROUP:
3925 		return cgroup_bpf_prog_query(attr, uattr);
3926 	case BPF_LIRC_MODE2:
3927 		return lirc_prog_query(attr, uattr);
3928 	case BPF_FLOW_DISSECTOR:
3929 	case BPF_SK_LOOKUP:
3930 		return netns_bpf_prog_query(attr, uattr);
3931 	case BPF_SK_SKB_STREAM_PARSER:
3932 	case BPF_SK_SKB_STREAM_VERDICT:
3933 	case BPF_SK_MSG_VERDICT:
3934 	case BPF_SK_SKB_VERDICT:
3935 		return sock_map_bpf_prog_query(attr, uattr);
3936 	case BPF_TCX_INGRESS:
3937 	case BPF_TCX_EGRESS:
3938 		return tcx_prog_query(attr, uattr);
3939 	default:
3940 		return -EINVAL;
3941 	}
3942 }
3943 
3944 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size
3945 
3946 static int bpf_prog_test_run(const union bpf_attr *attr,
3947 			     union bpf_attr __user *uattr)
3948 {
3949 	struct bpf_prog *prog;
3950 	int ret = -ENOTSUPP;
3951 
3952 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3953 		return -EINVAL;
3954 
3955 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3956 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
3957 		return -EINVAL;
3958 
3959 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3960 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
3961 		return -EINVAL;
3962 
3963 	prog = bpf_prog_get(attr->test.prog_fd);
3964 	if (IS_ERR(prog))
3965 		return PTR_ERR(prog);
3966 
3967 	if (prog->aux->ops->test_run)
3968 		ret = prog->aux->ops->test_run(prog, attr, uattr);
3969 
3970 	bpf_prog_put(prog);
3971 	return ret;
3972 }
3973 
3974 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3975 
3976 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3977 			       union bpf_attr __user *uattr,
3978 			       struct idr *idr,
3979 			       spinlock_t *lock)
3980 {
3981 	u32 next_id = attr->start_id;
3982 	int err = 0;
3983 
3984 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3985 		return -EINVAL;
3986 
3987 	if (!capable(CAP_SYS_ADMIN))
3988 		return -EPERM;
3989 
3990 	next_id++;
3991 	spin_lock_bh(lock);
3992 	if (!idr_get_next(idr, &next_id))
3993 		err = -ENOENT;
3994 	spin_unlock_bh(lock);
3995 
3996 	if (!err)
3997 		err = put_user(next_id, &uattr->next_id);
3998 
3999 	return err;
4000 }
4001 
4002 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
4003 {
4004 	struct bpf_map *map;
4005 
4006 	spin_lock_bh(&map_idr_lock);
4007 again:
4008 	map = idr_get_next(&map_idr, id);
4009 	if (map) {
4010 		map = __bpf_map_inc_not_zero(map, false);
4011 		if (IS_ERR(map)) {
4012 			(*id)++;
4013 			goto again;
4014 		}
4015 	}
4016 	spin_unlock_bh(&map_idr_lock);
4017 
4018 	return map;
4019 }
4020 
4021 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
4022 {
4023 	struct bpf_prog *prog;
4024 
4025 	spin_lock_bh(&prog_idr_lock);
4026 again:
4027 	prog = idr_get_next(&prog_idr, id);
4028 	if (prog) {
4029 		prog = bpf_prog_inc_not_zero(prog);
4030 		if (IS_ERR(prog)) {
4031 			(*id)++;
4032 			goto again;
4033 		}
4034 	}
4035 	spin_unlock_bh(&prog_idr_lock);
4036 
4037 	return prog;
4038 }
4039 
4040 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
4041 
4042 struct bpf_prog *bpf_prog_by_id(u32 id)
4043 {
4044 	struct bpf_prog *prog;
4045 
4046 	if (!id)
4047 		return ERR_PTR(-ENOENT);
4048 
4049 	spin_lock_bh(&prog_idr_lock);
4050 	prog = idr_find(&prog_idr, id);
4051 	if (prog)
4052 		prog = bpf_prog_inc_not_zero(prog);
4053 	else
4054 		prog = ERR_PTR(-ENOENT);
4055 	spin_unlock_bh(&prog_idr_lock);
4056 	return prog;
4057 }
4058 
4059 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
4060 {
4061 	struct bpf_prog *prog;
4062 	u32 id = attr->prog_id;
4063 	int fd;
4064 
4065 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
4066 		return -EINVAL;
4067 
4068 	if (!capable(CAP_SYS_ADMIN))
4069 		return -EPERM;
4070 
4071 	prog = bpf_prog_by_id(id);
4072 	if (IS_ERR(prog))
4073 		return PTR_ERR(prog);
4074 
4075 	fd = bpf_prog_new_fd(prog);
4076 	if (fd < 0)
4077 		bpf_prog_put(prog);
4078 
4079 	return fd;
4080 }
4081 
4082 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
4083 
4084 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
4085 {
4086 	struct bpf_map *map;
4087 	u32 id = attr->map_id;
4088 	int f_flags;
4089 	int fd;
4090 
4091 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
4092 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
4093 		return -EINVAL;
4094 
4095 	if (!capable(CAP_SYS_ADMIN))
4096 		return -EPERM;
4097 
4098 	f_flags = bpf_get_file_flag(attr->open_flags);
4099 	if (f_flags < 0)
4100 		return f_flags;
4101 
4102 	spin_lock_bh(&map_idr_lock);
4103 	map = idr_find(&map_idr, id);
4104 	if (map)
4105 		map = __bpf_map_inc_not_zero(map, true);
4106 	else
4107 		map = ERR_PTR(-ENOENT);
4108 	spin_unlock_bh(&map_idr_lock);
4109 
4110 	if (IS_ERR(map))
4111 		return PTR_ERR(map);
4112 
4113 	fd = bpf_map_new_fd(map, f_flags);
4114 	if (fd < 0)
4115 		bpf_map_put_with_uref(map);
4116 
4117 	return fd;
4118 }
4119 
4120 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
4121 					      unsigned long addr, u32 *off,
4122 					      u32 *type)
4123 {
4124 	const struct bpf_map *map;
4125 	int i;
4126 
4127 	mutex_lock(&prog->aux->used_maps_mutex);
4128 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
4129 		map = prog->aux->used_maps[i];
4130 		if (map == (void *)addr) {
4131 			*type = BPF_PSEUDO_MAP_FD;
4132 			goto out;
4133 		}
4134 		if (!map->ops->map_direct_value_meta)
4135 			continue;
4136 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
4137 			*type = BPF_PSEUDO_MAP_VALUE;
4138 			goto out;
4139 		}
4140 	}
4141 	map = NULL;
4142 
4143 out:
4144 	mutex_unlock(&prog->aux->used_maps_mutex);
4145 	return map;
4146 }
4147 
4148 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
4149 					      const struct cred *f_cred)
4150 {
4151 	const struct bpf_map *map;
4152 	struct bpf_insn *insns;
4153 	u32 off, type;
4154 	u64 imm;
4155 	u8 code;
4156 	int i;
4157 
4158 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
4159 			GFP_USER);
4160 	if (!insns)
4161 		return insns;
4162 
4163 	for (i = 0; i < prog->len; i++) {
4164 		code = insns[i].code;
4165 
4166 		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
4167 			insns[i].code = BPF_JMP | BPF_CALL;
4168 			insns[i].imm = BPF_FUNC_tail_call;
4169 			/* fall-through */
4170 		}
4171 		if (code == (BPF_JMP | BPF_CALL) ||
4172 		    code == (BPF_JMP | BPF_CALL_ARGS)) {
4173 			if (code == (BPF_JMP | BPF_CALL_ARGS))
4174 				insns[i].code = BPF_JMP | BPF_CALL;
4175 			if (!bpf_dump_raw_ok(f_cred))
4176 				insns[i].imm = 0;
4177 			continue;
4178 		}
4179 		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
4180 			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
4181 			continue;
4182 		}
4183 
4184 		if (code != (BPF_LD | BPF_IMM | BPF_DW))
4185 			continue;
4186 
4187 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
4188 		map = bpf_map_from_imm(prog, imm, &off, &type);
4189 		if (map) {
4190 			insns[i].src_reg = type;
4191 			insns[i].imm = map->id;
4192 			insns[i + 1].imm = off;
4193 			continue;
4194 		}
4195 	}
4196 
4197 	return insns;
4198 }
4199 
4200 static int set_info_rec_size(struct bpf_prog_info *info)
4201 {
4202 	/*
4203 	 * Ensure info.*_rec_size is the same as kernel expected size
4204 	 *
4205 	 * or
4206 	 *
4207 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
4208 	 * zero.  In this case, the kernel will set the expected
4209 	 * _rec_size back to the info.
4210 	 */
4211 
4212 	if ((info->nr_func_info || info->func_info_rec_size) &&
4213 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
4214 		return -EINVAL;
4215 
4216 	if ((info->nr_line_info || info->line_info_rec_size) &&
4217 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
4218 		return -EINVAL;
4219 
4220 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
4221 	    info->jited_line_info_rec_size != sizeof(__u64))
4222 		return -EINVAL;
4223 
4224 	info->func_info_rec_size = sizeof(struct bpf_func_info);
4225 	info->line_info_rec_size = sizeof(struct bpf_line_info);
4226 	info->jited_line_info_rec_size = sizeof(__u64);
4227 
4228 	return 0;
4229 }
4230 
4231 static int bpf_prog_get_info_by_fd(struct file *file,
4232 				   struct bpf_prog *prog,
4233 				   const union bpf_attr *attr,
4234 				   union bpf_attr __user *uattr)
4235 {
4236 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4237 	struct btf *attach_btf = bpf_prog_get_target_btf(prog);
4238 	struct bpf_prog_info info;
4239 	u32 info_len = attr->info.info_len;
4240 	struct bpf_prog_kstats stats;
4241 	char __user *uinsns;
4242 	u32 ulen;
4243 	int err;
4244 
4245 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4246 	if (err)
4247 		return err;
4248 	info_len = min_t(u32, sizeof(info), info_len);
4249 
4250 	memset(&info, 0, sizeof(info));
4251 	if (copy_from_user(&info, uinfo, info_len))
4252 		return -EFAULT;
4253 
4254 	info.type = prog->type;
4255 	info.id = prog->aux->id;
4256 	info.load_time = prog->aux->load_time;
4257 	info.created_by_uid = from_kuid_munged(current_user_ns(),
4258 					       prog->aux->user->uid);
4259 	info.gpl_compatible = prog->gpl_compatible;
4260 
4261 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
4262 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
4263 
4264 	mutex_lock(&prog->aux->used_maps_mutex);
4265 	ulen = info.nr_map_ids;
4266 	info.nr_map_ids = prog->aux->used_map_cnt;
4267 	ulen = min_t(u32, info.nr_map_ids, ulen);
4268 	if (ulen) {
4269 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
4270 		u32 i;
4271 
4272 		for (i = 0; i < ulen; i++)
4273 			if (put_user(prog->aux->used_maps[i]->id,
4274 				     &user_map_ids[i])) {
4275 				mutex_unlock(&prog->aux->used_maps_mutex);
4276 				return -EFAULT;
4277 			}
4278 	}
4279 	mutex_unlock(&prog->aux->used_maps_mutex);
4280 
4281 	err = set_info_rec_size(&info);
4282 	if (err)
4283 		return err;
4284 
4285 	bpf_prog_get_stats(prog, &stats);
4286 	info.run_time_ns = stats.nsecs;
4287 	info.run_cnt = stats.cnt;
4288 	info.recursion_misses = stats.misses;
4289 
4290 	info.verified_insns = prog->aux->verified_insns;
4291 
4292 	if (!bpf_capable()) {
4293 		info.jited_prog_len = 0;
4294 		info.xlated_prog_len = 0;
4295 		info.nr_jited_ksyms = 0;
4296 		info.nr_jited_func_lens = 0;
4297 		info.nr_func_info = 0;
4298 		info.nr_line_info = 0;
4299 		info.nr_jited_line_info = 0;
4300 		goto done;
4301 	}
4302 
4303 	ulen = info.xlated_prog_len;
4304 	info.xlated_prog_len = bpf_prog_insn_size(prog);
4305 	if (info.xlated_prog_len && ulen) {
4306 		struct bpf_insn *insns_sanitized;
4307 		bool fault;
4308 
4309 		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
4310 			info.xlated_prog_insns = 0;
4311 			goto done;
4312 		}
4313 		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
4314 		if (!insns_sanitized)
4315 			return -ENOMEM;
4316 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
4317 		ulen = min_t(u32, info.xlated_prog_len, ulen);
4318 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
4319 		kfree(insns_sanitized);
4320 		if (fault)
4321 			return -EFAULT;
4322 	}
4323 
4324 	if (bpf_prog_is_offloaded(prog->aux)) {
4325 		err = bpf_prog_offload_info_fill(&info, prog);
4326 		if (err)
4327 			return err;
4328 		goto done;
4329 	}
4330 
4331 	/* NOTE: the following code is supposed to be skipped for offload.
4332 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
4333 	 * for offload.
4334 	 */
4335 	ulen = info.jited_prog_len;
4336 	if (prog->aux->func_cnt) {
4337 		u32 i;
4338 
4339 		info.jited_prog_len = 0;
4340 		for (i = 0; i < prog->aux->func_cnt; i++)
4341 			info.jited_prog_len += prog->aux->func[i]->jited_len;
4342 	} else {
4343 		info.jited_prog_len = prog->jited_len;
4344 	}
4345 
4346 	if (info.jited_prog_len && ulen) {
4347 		if (bpf_dump_raw_ok(file->f_cred)) {
4348 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
4349 			ulen = min_t(u32, info.jited_prog_len, ulen);
4350 
4351 			/* for multi-function programs, copy the JITed
4352 			 * instructions for all the functions
4353 			 */
4354 			if (prog->aux->func_cnt) {
4355 				u32 len, free, i;
4356 				u8 *img;
4357 
4358 				free = ulen;
4359 				for (i = 0; i < prog->aux->func_cnt; i++) {
4360 					len = prog->aux->func[i]->jited_len;
4361 					len = min_t(u32, len, free);
4362 					img = (u8 *) prog->aux->func[i]->bpf_func;
4363 					if (copy_to_user(uinsns, img, len))
4364 						return -EFAULT;
4365 					uinsns += len;
4366 					free -= len;
4367 					if (!free)
4368 						break;
4369 				}
4370 			} else {
4371 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
4372 					return -EFAULT;
4373 			}
4374 		} else {
4375 			info.jited_prog_insns = 0;
4376 		}
4377 	}
4378 
4379 	ulen = info.nr_jited_ksyms;
4380 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
4381 	if (ulen) {
4382 		if (bpf_dump_raw_ok(file->f_cred)) {
4383 			unsigned long ksym_addr;
4384 			u64 __user *user_ksyms;
4385 			u32 i;
4386 
4387 			/* copy the address of the kernel symbol
4388 			 * corresponding to each function
4389 			 */
4390 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
4391 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
4392 			if (prog->aux->func_cnt) {
4393 				for (i = 0; i < ulen; i++) {
4394 					ksym_addr = (unsigned long)
4395 						prog->aux->func[i]->bpf_func;
4396 					if (put_user((u64) ksym_addr,
4397 						     &user_ksyms[i]))
4398 						return -EFAULT;
4399 				}
4400 			} else {
4401 				ksym_addr = (unsigned long) prog->bpf_func;
4402 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
4403 					return -EFAULT;
4404 			}
4405 		} else {
4406 			info.jited_ksyms = 0;
4407 		}
4408 	}
4409 
4410 	ulen = info.nr_jited_func_lens;
4411 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
4412 	if (ulen) {
4413 		if (bpf_dump_raw_ok(file->f_cred)) {
4414 			u32 __user *user_lens;
4415 			u32 func_len, i;
4416 
4417 			/* copy the JITed image lengths for each function */
4418 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
4419 			user_lens = u64_to_user_ptr(info.jited_func_lens);
4420 			if (prog->aux->func_cnt) {
4421 				for (i = 0; i < ulen; i++) {
4422 					func_len =
4423 						prog->aux->func[i]->jited_len;
4424 					if (put_user(func_len, &user_lens[i]))
4425 						return -EFAULT;
4426 				}
4427 			} else {
4428 				func_len = prog->jited_len;
4429 				if (put_user(func_len, &user_lens[0]))
4430 					return -EFAULT;
4431 			}
4432 		} else {
4433 			info.jited_func_lens = 0;
4434 		}
4435 	}
4436 
4437 	if (prog->aux->btf)
4438 		info.btf_id = btf_obj_id(prog->aux->btf);
4439 	info.attach_btf_id = prog->aux->attach_btf_id;
4440 	if (attach_btf)
4441 		info.attach_btf_obj_id = btf_obj_id(attach_btf);
4442 
4443 	ulen = info.nr_func_info;
4444 	info.nr_func_info = prog->aux->func_info_cnt;
4445 	if (info.nr_func_info && ulen) {
4446 		char __user *user_finfo;
4447 
4448 		user_finfo = u64_to_user_ptr(info.func_info);
4449 		ulen = min_t(u32, info.nr_func_info, ulen);
4450 		if (copy_to_user(user_finfo, prog->aux->func_info,
4451 				 info.func_info_rec_size * ulen))
4452 			return -EFAULT;
4453 	}
4454 
4455 	ulen = info.nr_line_info;
4456 	info.nr_line_info = prog->aux->nr_linfo;
4457 	if (info.nr_line_info && ulen) {
4458 		__u8 __user *user_linfo;
4459 
4460 		user_linfo = u64_to_user_ptr(info.line_info);
4461 		ulen = min_t(u32, info.nr_line_info, ulen);
4462 		if (copy_to_user(user_linfo, prog->aux->linfo,
4463 				 info.line_info_rec_size * ulen))
4464 			return -EFAULT;
4465 	}
4466 
4467 	ulen = info.nr_jited_line_info;
4468 	if (prog->aux->jited_linfo)
4469 		info.nr_jited_line_info = prog->aux->nr_linfo;
4470 	else
4471 		info.nr_jited_line_info = 0;
4472 	if (info.nr_jited_line_info && ulen) {
4473 		if (bpf_dump_raw_ok(file->f_cred)) {
4474 			unsigned long line_addr;
4475 			__u64 __user *user_linfo;
4476 			u32 i;
4477 
4478 			user_linfo = u64_to_user_ptr(info.jited_line_info);
4479 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
4480 			for (i = 0; i < ulen; i++) {
4481 				line_addr = (unsigned long)prog->aux->jited_linfo[i];
4482 				if (put_user((__u64)line_addr, &user_linfo[i]))
4483 					return -EFAULT;
4484 			}
4485 		} else {
4486 			info.jited_line_info = 0;
4487 		}
4488 	}
4489 
4490 	ulen = info.nr_prog_tags;
4491 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
4492 	if (ulen) {
4493 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
4494 		u32 i;
4495 
4496 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
4497 		ulen = min_t(u32, info.nr_prog_tags, ulen);
4498 		if (prog->aux->func_cnt) {
4499 			for (i = 0; i < ulen; i++) {
4500 				if (copy_to_user(user_prog_tags[i],
4501 						 prog->aux->func[i]->tag,
4502 						 BPF_TAG_SIZE))
4503 					return -EFAULT;
4504 			}
4505 		} else {
4506 			if (copy_to_user(user_prog_tags[0],
4507 					 prog->tag, BPF_TAG_SIZE))
4508 				return -EFAULT;
4509 		}
4510 	}
4511 
4512 done:
4513 	if (copy_to_user(uinfo, &info, info_len) ||
4514 	    put_user(info_len, &uattr->info.info_len))
4515 		return -EFAULT;
4516 
4517 	return 0;
4518 }
4519 
4520 static int bpf_map_get_info_by_fd(struct file *file,
4521 				  struct bpf_map *map,
4522 				  const union bpf_attr *attr,
4523 				  union bpf_attr __user *uattr)
4524 {
4525 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4526 	struct bpf_map_info info;
4527 	u32 info_len = attr->info.info_len;
4528 	int err;
4529 
4530 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4531 	if (err)
4532 		return err;
4533 	info_len = min_t(u32, sizeof(info), info_len);
4534 
4535 	memset(&info, 0, sizeof(info));
4536 	info.type = map->map_type;
4537 	info.id = map->id;
4538 	info.key_size = map->key_size;
4539 	info.value_size = map->value_size;
4540 	info.max_entries = map->max_entries;
4541 	info.map_flags = map->map_flags;
4542 	info.map_extra = map->map_extra;
4543 	memcpy(info.name, map->name, sizeof(map->name));
4544 
4545 	if (map->btf) {
4546 		info.btf_id = btf_obj_id(map->btf);
4547 		info.btf_key_type_id = map->btf_key_type_id;
4548 		info.btf_value_type_id = map->btf_value_type_id;
4549 	}
4550 	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4551 
4552 	if (bpf_map_is_offloaded(map)) {
4553 		err = bpf_map_offload_info_fill(&info, map);
4554 		if (err)
4555 			return err;
4556 	}
4557 
4558 	if (copy_to_user(uinfo, &info, info_len) ||
4559 	    put_user(info_len, &uattr->info.info_len))
4560 		return -EFAULT;
4561 
4562 	return 0;
4563 }
4564 
4565 static int bpf_btf_get_info_by_fd(struct file *file,
4566 				  struct btf *btf,
4567 				  const union bpf_attr *attr,
4568 				  union bpf_attr __user *uattr)
4569 {
4570 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4571 	u32 info_len = attr->info.info_len;
4572 	int err;
4573 
4574 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
4575 	if (err)
4576 		return err;
4577 
4578 	return btf_get_info_by_fd(btf, attr, uattr);
4579 }
4580 
4581 static int bpf_link_get_info_by_fd(struct file *file,
4582 				  struct bpf_link *link,
4583 				  const union bpf_attr *attr,
4584 				  union bpf_attr __user *uattr)
4585 {
4586 	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
4587 	struct bpf_link_info info;
4588 	u32 info_len = attr->info.info_len;
4589 	int err;
4590 
4591 	err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
4592 	if (err)
4593 		return err;
4594 	info_len = min_t(u32, sizeof(info), info_len);
4595 
4596 	memset(&info, 0, sizeof(info));
4597 	if (copy_from_user(&info, uinfo, info_len))
4598 		return -EFAULT;
4599 
4600 	info.type = link->type;
4601 	info.id = link->id;
4602 	if (link->prog)
4603 		info.prog_id = link->prog->aux->id;
4604 
4605 	if (link->ops->fill_link_info) {
4606 		err = link->ops->fill_link_info(link, &info);
4607 		if (err)
4608 			return err;
4609 	}
4610 
4611 	if (copy_to_user(uinfo, &info, info_len) ||
4612 	    put_user(info_len, &uattr->info.info_len))
4613 		return -EFAULT;
4614 
4615 	return 0;
4616 }
4617 
4618 
4619 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
4620 
4621 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
4622 				  union bpf_attr __user *uattr)
4623 {
4624 	int ufd = attr->info.bpf_fd;
4625 	struct fd f;
4626 	int err;
4627 
4628 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
4629 		return -EINVAL;
4630 
4631 	f = fdget(ufd);
4632 	if (!f.file)
4633 		return -EBADFD;
4634 
4635 	if (f.file->f_op == &bpf_prog_fops)
4636 		err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
4637 					      uattr);
4638 	else if (f.file->f_op == &bpf_map_fops)
4639 		err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
4640 					     uattr);
4641 	else if (f.file->f_op == &btf_fops)
4642 		err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
4643 	else if (f.file->f_op == &bpf_link_fops)
4644 		err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
4645 					      attr, uattr);
4646 	else
4647 		err = -EINVAL;
4648 
4649 	fdput(f);
4650 	return err;
4651 }
4652 
4653 #define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size
4654 
4655 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
4656 {
4657 	if (CHECK_ATTR(BPF_BTF_LOAD))
4658 		return -EINVAL;
4659 
4660 	if (!bpf_capable())
4661 		return -EPERM;
4662 
4663 	return btf_new_fd(attr, uattr, uattr_size);
4664 }
4665 
4666 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
4667 
4668 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
4669 {
4670 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
4671 		return -EINVAL;
4672 
4673 	if (!capable(CAP_SYS_ADMIN))
4674 		return -EPERM;
4675 
4676 	return btf_get_fd_by_id(attr->btf_id);
4677 }
4678 
4679 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
4680 				    union bpf_attr __user *uattr,
4681 				    u32 prog_id, u32 fd_type,
4682 				    const char *buf, u64 probe_offset,
4683 				    u64 probe_addr)
4684 {
4685 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
4686 	u32 len = buf ? strlen(buf) : 0, input_len;
4687 	int err = 0;
4688 
4689 	if (put_user(len, &uattr->task_fd_query.buf_len))
4690 		return -EFAULT;
4691 	input_len = attr->task_fd_query.buf_len;
4692 	if (input_len && ubuf) {
4693 		if (!len) {
4694 			/* nothing to copy, just make ubuf NULL terminated */
4695 			char zero = '\0';
4696 
4697 			if (put_user(zero, ubuf))
4698 				return -EFAULT;
4699 		} else if (input_len >= len + 1) {
4700 			/* ubuf can hold the string with NULL terminator */
4701 			if (copy_to_user(ubuf, buf, len + 1))
4702 				return -EFAULT;
4703 		} else {
4704 			/* ubuf cannot hold the string with NULL terminator,
4705 			 * do a partial copy with NULL terminator.
4706 			 */
4707 			char zero = '\0';
4708 
4709 			err = -ENOSPC;
4710 			if (copy_to_user(ubuf, buf, input_len - 1))
4711 				return -EFAULT;
4712 			if (put_user(zero, ubuf + input_len - 1))
4713 				return -EFAULT;
4714 		}
4715 	}
4716 
4717 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
4718 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
4719 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
4720 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
4721 		return -EFAULT;
4722 
4723 	return err;
4724 }
4725 
4726 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
4727 
4728 static int bpf_task_fd_query(const union bpf_attr *attr,
4729 			     union bpf_attr __user *uattr)
4730 {
4731 	pid_t pid = attr->task_fd_query.pid;
4732 	u32 fd = attr->task_fd_query.fd;
4733 	const struct perf_event *event;
4734 	struct task_struct *task;
4735 	struct file *file;
4736 	int err;
4737 
4738 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
4739 		return -EINVAL;
4740 
4741 	if (!capable(CAP_SYS_ADMIN))
4742 		return -EPERM;
4743 
4744 	if (attr->task_fd_query.flags != 0)
4745 		return -EINVAL;
4746 
4747 	rcu_read_lock();
4748 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
4749 	rcu_read_unlock();
4750 	if (!task)
4751 		return -ENOENT;
4752 
4753 	err = 0;
4754 	file = fget_task(task, fd);
4755 	put_task_struct(task);
4756 	if (!file)
4757 		return -EBADF;
4758 
4759 	if (file->f_op == &bpf_link_fops) {
4760 		struct bpf_link *link = file->private_data;
4761 
4762 		if (link->ops == &bpf_raw_tp_link_lops) {
4763 			struct bpf_raw_tp_link *raw_tp =
4764 				container_of(link, struct bpf_raw_tp_link, link);
4765 			struct bpf_raw_event_map *btp = raw_tp->btp;
4766 
4767 			err = bpf_task_fd_query_copy(attr, uattr,
4768 						     raw_tp->link.prog->aux->id,
4769 						     BPF_FD_TYPE_RAW_TRACEPOINT,
4770 						     btp->tp->name, 0, 0);
4771 			goto put_file;
4772 		}
4773 		goto out_not_supp;
4774 	}
4775 
4776 	event = perf_get_event(file);
4777 	if (!IS_ERR(event)) {
4778 		u64 probe_offset, probe_addr;
4779 		u32 prog_id, fd_type;
4780 		const char *buf;
4781 
4782 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
4783 					      &buf, &probe_offset,
4784 					      &probe_addr);
4785 		if (!err)
4786 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
4787 						     fd_type, buf,
4788 						     probe_offset,
4789 						     probe_addr);
4790 		goto put_file;
4791 	}
4792 
4793 out_not_supp:
4794 	err = -ENOTSUPP;
4795 put_file:
4796 	fput(file);
4797 	return err;
4798 }
4799 
4800 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
4801 
4802 #define BPF_DO_BATCH(fn, ...)			\
4803 	do {					\
4804 		if (!fn) {			\
4805 			err = -ENOTSUPP;	\
4806 			goto err_put;		\
4807 		}				\
4808 		err = fn(__VA_ARGS__);		\
4809 	} while (0)
4810 
4811 static int bpf_map_do_batch(const union bpf_attr *attr,
4812 			    union bpf_attr __user *uattr,
4813 			    int cmd)
4814 {
4815 	bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
4816 			 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
4817 	bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
4818 	struct bpf_map *map;
4819 	int err, ufd;
4820 	struct fd f;
4821 
4822 	if (CHECK_ATTR(BPF_MAP_BATCH))
4823 		return -EINVAL;
4824 
4825 	ufd = attr->batch.map_fd;
4826 	f = fdget(ufd);
4827 	map = __bpf_map_get(f);
4828 	if (IS_ERR(map))
4829 		return PTR_ERR(map);
4830 	if (has_write)
4831 		bpf_map_write_active_inc(map);
4832 	if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4833 		err = -EPERM;
4834 		goto err_put;
4835 	}
4836 	if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4837 		err = -EPERM;
4838 		goto err_put;
4839 	}
4840 
4841 	if (cmd == BPF_MAP_LOOKUP_BATCH)
4842 		BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr);
4843 	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4844 		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr);
4845 	else if (cmd == BPF_MAP_UPDATE_BATCH)
4846 		BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr);
4847 	else
4848 		BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
4849 err_put:
4850 	if (has_write)
4851 		bpf_map_write_active_dec(map);
4852 	fdput(f);
4853 	return err;
4854 }
4855 
4856 #define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies
4857 static int link_create(union bpf_attr *attr, bpfptr_t uattr)
4858 {
4859 	enum bpf_prog_type ptype;
4860 	struct bpf_prog *prog;
4861 	int ret;
4862 
4863 	if (CHECK_ATTR(BPF_LINK_CREATE))
4864 		return -EINVAL;
4865 
4866 	if (attr->link_create.attach_type == BPF_STRUCT_OPS)
4867 		return bpf_struct_ops_link_create(attr);
4868 
4869 	prog = bpf_prog_get(attr->link_create.prog_fd);
4870 	if (IS_ERR(prog))
4871 		return PTR_ERR(prog);
4872 
4873 	ret = bpf_prog_attach_check_attach_type(prog,
4874 						attr->link_create.attach_type);
4875 	if (ret)
4876 		goto out;
4877 
4878 	switch (prog->type) {
4879 	case BPF_PROG_TYPE_EXT:
4880 		break;
4881 	case BPF_PROG_TYPE_NETFILTER:
4882 		if (attr->link_create.attach_type != BPF_NETFILTER) {
4883 			ret = -EINVAL;
4884 			goto out;
4885 		}
4886 		break;
4887 	case BPF_PROG_TYPE_PERF_EVENT:
4888 	case BPF_PROG_TYPE_TRACEPOINT:
4889 		if (attr->link_create.attach_type != BPF_PERF_EVENT) {
4890 			ret = -EINVAL;
4891 			goto out;
4892 		}
4893 		break;
4894 	case BPF_PROG_TYPE_KPROBE:
4895 		if (attr->link_create.attach_type != BPF_PERF_EVENT &&
4896 		    attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) {
4897 			ret = -EINVAL;
4898 			goto out;
4899 		}
4900 		break;
4901 	case BPF_PROG_TYPE_SCHED_CLS:
4902 		if (attr->link_create.attach_type != BPF_TCX_INGRESS &&
4903 		    attr->link_create.attach_type != BPF_TCX_EGRESS) {
4904 			ret = -EINVAL;
4905 			goto out;
4906 		}
4907 		break;
4908 	default:
4909 		ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4910 		if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4911 			ret = -EINVAL;
4912 			goto out;
4913 		}
4914 		break;
4915 	}
4916 
4917 	switch (prog->type) {
4918 	case BPF_PROG_TYPE_CGROUP_SKB:
4919 	case BPF_PROG_TYPE_CGROUP_SOCK:
4920 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4921 	case BPF_PROG_TYPE_SOCK_OPS:
4922 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4923 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4924 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4925 		ret = cgroup_bpf_link_attach(attr, prog);
4926 		break;
4927 	case BPF_PROG_TYPE_EXT:
4928 		ret = bpf_tracing_prog_attach(prog,
4929 					      attr->link_create.target_fd,
4930 					      attr->link_create.target_btf_id,
4931 					      attr->link_create.tracing.cookie);
4932 		break;
4933 	case BPF_PROG_TYPE_LSM:
4934 	case BPF_PROG_TYPE_TRACING:
4935 		if (attr->link_create.attach_type != prog->expected_attach_type) {
4936 			ret = -EINVAL;
4937 			goto out;
4938 		}
4939 		if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
4940 			ret = bpf_raw_tp_link_attach(prog, NULL);
4941 		else if (prog->expected_attach_type == BPF_TRACE_ITER)
4942 			ret = bpf_iter_link_attach(attr, uattr, prog);
4943 		else if (prog->expected_attach_type == BPF_LSM_CGROUP)
4944 			ret = cgroup_bpf_link_attach(attr, prog);
4945 		else
4946 			ret = bpf_tracing_prog_attach(prog,
4947 						      attr->link_create.target_fd,
4948 						      attr->link_create.target_btf_id,
4949 						      attr->link_create.tracing.cookie);
4950 		break;
4951 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4952 	case BPF_PROG_TYPE_SK_LOOKUP:
4953 		ret = netns_bpf_link_create(attr, prog);
4954 		break;
4955 #ifdef CONFIG_NET
4956 	case BPF_PROG_TYPE_XDP:
4957 		ret = bpf_xdp_link_attach(attr, prog);
4958 		break;
4959 	case BPF_PROG_TYPE_SCHED_CLS:
4960 		ret = tcx_link_attach(attr, prog);
4961 		break;
4962 	case BPF_PROG_TYPE_NETFILTER:
4963 		ret = bpf_nf_link_attach(attr, prog);
4964 		break;
4965 #endif
4966 	case BPF_PROG_TYPE_PERF_EVENT:
4967 	case BPF_PROG_TYPE_TRACEPOINT:
4968 		ret = bpf_perf_link_attach(attr, prog);
4969 		break;
4970 	case BPF_PROG_TYPE_KPROBE:
4971 		if (attr->link_create.attach_type == BPF_PERF_EVENT)
4972 			ret = bpf_perf_link_attach(attr, prog);
4973 		else
4974 			ret = bpf_kprobe_multi_link_attach(attr, prog);
4975 		break;
4976 	default:
4977 		ret = -EINVAL;
4978 	}
4979 
4980 out:
4981 	if (ret < 0)
4982 		bpf_prog_put(prog);
4983 	return ret;
4984 }
4985 
4986 static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
4987 {
4988 	struct bpf_map *new_map, *old_map = NULL;
4989 	int ret;
4990 
4991 	new_map = bpf_map_get(attr->link_update.new_map_fd);
4992 	if (IS_ERR(new_map))
4993 		return PTR_ERR(new_map);
4994 
4995 	if (attr->link_update.flags & BPF_F_REPLACE) {
4996 		old_map = bpf_map_get(attr->link_update.old_map_fd);
4997 		if (IS_ERR(old_map)) {
4998 			ret = PTR_ERR(old_map);
4999 			goto out_put;
5000 		}
5001 	} else if (attr->link_update.old_map_fd) {
5002 		ret = -EINVAL;
5003 		goto out_put;
5004 	}
5005 
5006 	ret = link->ops->update_map(link, new_map, old_map);
5007 
5008 	if (old_map)
5009 		bpf_map_put(old_map);
5010 out_put:
5011 	bpf_map_put(new_map);
5012 	return ret;
5013 }
5014 
5015 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
5016 
5017 static int link_update(union bpf_attr *attr)
5018 {
5019 	struct bpf_prog *old_prog = NULL, *new_prog;
5020 	struct bpf_link *link;
5021 	u32 flags;
5022 	int ret;
5023 
5024 	if (CHECK_ATTR(BPF_LINK_UPDATE))
5025 		return -EINVAL;
5026 
5027 	flags = attr->link_update.flags;
5028 	if (flags & ~BPF_F_REPLACE)
5029 		return -EINVAL;
5030 
5031 	link = bpf_link_get_from_fd(attr->link_update.link_fd);
5032 	if (IS_ERR(link))
5033 		return PTR_ERR(link);
5034 
5035 	if (link->ops->update_map) {
5036 		ret = link_update_map(link, attr);
5037 		goto out_put_link;
5038 	}
5039 
5040 	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
5041 	if (IS_ERR(new_prog)) {
5042 		ret = PTR_ERR(new_prog);
5043 		goto out_put_link;
5044 	}
5045 
5046 	if (flags & BPF_F_REPLACE) {
5047 		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
5048 		if (IS_ERR(old_prog)) {
5049 			ret = PTR_ERR(old_prog);
5050 			old_prog = NULL;
5051 			goto out_put_progs;
5052 		}
5053 	} else if (attr->link_update.old_prog_fd) {
5054 		ret = -EINVAL;
5055 		goto out_put_progs;
5056 	}
5057 
5058 	if (link->ops->update_prog)
5059 		ret = link->ops->update_prog(link, new_prog, old_prog);
5060 	else
5061 		ret = -EINVAL;
5062 
5063 out_put_progs:
5064 	if (old_prog)
5065 		bpf_prog_put(old_prog);
5066 	if (ret)
5067 		bpf_prog_put(new_prog);
5068 out_put_link:
5069 	bpf_link_put_direct(link);
5070 	return ret;
5071 }
5072 
5073 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
5074 
5075 static int link_detach(union bpf_attr *attr)
5076 {
5077 	struct bpf_link *link;
5078 	int ret;
5079 
5080 	if (CHECK_ATTR(BPF_LINK_DETACH))
5081 		return -EINVAL;
5082 
5083 	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
5084 	if (IS_ERR(link))
5085 		return PTR_ERR(link);
5086 
5087 	if (link->ops->detach)
5088 		ret = link->ops->detach(link);
5089 	else
5090 		ret = -EOPNOTSUPP;
5091 
5092 	bpf_link_put_direct(link);
5093 	return ret;
5094 }
5095 
5096 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
5097 {
5098 	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
5099 }
5100 
5101 struct bpf_link *bpf_link_by_id(u32 id)
5102 {
5103 	struct bpf_link *link;
5104 
5105 	if (!id)
5106 		return ERR_PTR(-ENOENT);
5107 
5108 	spin_lock_bh(&link_idr_lock);
5109 	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
5110 	link = idr_find(&link_idr, id);
5111 	if (link) {
5112 		if (link->id)
5113 			link = bpf_link_inc_not_zero(link);
5114 		else
5115 			link = ERR_PTR(-EAGAIN);
5116 	} else {
5117 		link = ERR_PTR(-ENOENT);
5118 	}
5119 	spin_unlock_bh(&link_idr_lock);
5120 	return link;
5121 }
5122 
5123 struct bpf_link *bpf_link_get_curr_or_next(u32 *id)
5124 {
5125 	struct bpf_link *link;
5126 
5127 	spin_lock_bh(&link_idr_lock);
5128 again:
5129 	link = idr_get_next(&link_idr, id);
5130 	if (link) {
5131 		link = bpf_link_inc_not_zero(link);
5132 		if (IS_ERR(link)) {
5133 			(*id)++;
5134 			goto again;
5135 		}
5136 	}
5137 	spin_unlock_bh(&link_idr_lock);
5138 
5139 	return link;
5140 }
5141 
5142 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
5143 
5144 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
5145 {
5146 	struct bpf_link *link;
5147 	u32 id = attr->link_id;
5148 	int fd;
5149 
5150 	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
5151 		return -EINVAL;
5152 
5153 	if (!capable(CAP_SYS_ADMIN))
5154 		return -EPERM;
5155 
5156 	link = bpf_link_by_id(id);
5157 	if (IS_ERR(link))
5158 		return PTR_ERR(link);
5159 
5160 	fd = bpf_link_new_fd(link);
5161 	if (fd < 0)
5162 		bpf_link_put_direct(link);
5163 
5164 	return fd;
5165 }
5166 
5167 DEFINE_MUTEX(bpf_stats_enabled_mutex);
5168 
5169 static int bpf_stats_release(struct inode *inode, struct file *file)
5170 {
5171 	mutex_lock(&bpf_stats_enabled_mutex);
5172 	static_key_slow_dec(&bpf_stats_enabled_key.key);
5173 	mutex_unlock(&bpf_stats_enabled_mutex);
5174 	return 0;
5175 }
5176 
5177 static const struct file_operations bpf_stats_fops = {
5178 	.release = bpf_stats_release,
5179 };
5180 
5181 static int bpf_enable_runtime_stats(void)
5182 {
5183 	int fd;
5184 
5185 	mutex_lock(&bpf_stats_enabled_mutex);
5186 
5187 	/* Set a very high limit to avoid overflow */
5188 	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
5189 		mutex_unlock(&bpf_stats_enabled_mutex);
5190 		return -EBUSY;
5191 	}
5192 
5193 	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
5194 	if (fd >= 0)
5195 		static_key_slow_inc(&bpf_stats_enabled_key.key);
5196 
5197 	mutex_unlock(&bpf_stats_enabled_mutex);
5198 	return fd;
5199 }
5200 
5201 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
5202 
5203 static int bpf_enable_stats(union bpf_attr *attr)
5204 {
5205 
5206 	if (CHECK_ATTR(BPF_ENABLE_STATS))
5207 		return -EINVAL;
5208 
5209 	if (!capable(CAP_SYS_ADMIN))
5210 		return -EPERM;
5211 
5212 	switch (attr->enable_stats.type) {
5213 	case BPF_STATS_RUN_TIME:
5214 		return bpf_enable_runtime_stats();
5215 	default:
5216 		break;
5217 	}
5218 	return -EINVAL;
5219 }
5220 
5221 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
5222 
5223 static int bpf_iter_create(union bpf_attr *attr)
5224 {
5225 	struct bpf_link *link;
5226 	int err;
5227 
5228 	if (CHECK_ATTR(BPF_ITER_CREATE))
5229 		return -EINVAL;
5230 
5231 	if (attr->iter_create.flags)
5232 		return -EINVAL;
5233 
5234 	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
5235 	if (IS_ERR(link))
5236 		return PTR_ERR(link);
5237 
5238 	err = bpf_iter_new_fd(link);
5239 	bpf_link_put_direct(link);
5240 
5241 	return err;
5242 }
5243 
5244 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
5245 
5246 static int bpf_prog_bind_map(union bpf_attr *attr)
5247 {
5248 	struct bpf_prog *prog;
5249 	struct bpf_map *map;
5250 	struct bpf_map **used_maps_old, **used_maps_new;
5251 	int i, ret = 0;
5252 
5253 	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
5254 		return -EINVAL;
5255 
5256 	if (attr->prog_bind_map.flags)
5257 		return -EINVAL;
5258 
5259 	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
5260 	if (IS_ERR(prog))
5261 		return PTR_ERR(prog);
5262 
5263 	map = bpf_map_get(attr->prog_bind_map.map_fd);
5264 	if (IS_ERR(map)) {
5265 		ret = PTR_ERR(map);
5266 		goto out_prog_put;
5267 	}
5268 
5269 	mutex_lock(&prog->aux->used_maps_mutex);
5270 
5271 	used_maps_old = prog->aux->used_maps;
5272 
5273 	for (i = 0; i < prog->aux->used_map_cnt; i++)
5274 		if (used_maps_old[i] == map) {
5275 			bpf_map_put(map);
5276 			goto out_unlock;
5277 		}
5278 
5279 	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
5280 				      sizeof(used_maps_new[0]),
5281 				      GFP_KERNEL);
5282 	if (!used_maps_new) {
5283 		ret = -ENOMEM;
5284 		goto out_unlock;
5285 	}
5286 
5287 	memcpy(used_maps_new, used_maps_old,
5288 	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
5289 	used_maps_new[prog->aux->used_map_cnt] = map;
5290 
5291 	prog->aux->used_map_cnt++;
5292 	prog->aux->used_maps = used_maps_new;
5293 
5294 	kfree(used_maps_old);
5295 
5296 out_unlock:
5297 	mutex_unlock(&prog->aux->used_maps_mutex);
5298 
5299 	if (ret)
5300 		bpf_map_put(map);
5301 out_prog_put:
5302 	bpf_prog_put(prog);
5303 	return ret;
5304 }
5305 
5306 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
5307 {
5308 	union bpf_attr attr;
5309 	int err;
5310 
5311 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
5312 	if (err)
5313 		return err;
5314 	size = min_t(u32, size, sizeof(attr));
5315 
5316 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
5317 	memset(&attr, 0, sizeof(attr));
5318 	if (copy_from_bpfptr(&attr, uattr, size) != 0)
5319 		return -EFAULT;
5320 
5321 	err = security_bpf(cmd, &attr, size);
5322 	if (err < 0)
5323 		return err;
5324 
5325 	switch (cmd) {
5326 	case BPF_MAP_CREATE:
5327 		err = map_create(&attr);
5328 		break;
5329 	case BPF_MAP_LOOKUP_ELEM:
5330 		err = map_lookup_elem(&attr);
5331 		break;
5332 	case BPF_MAP_UPDATE_ELEM:
5333 		err = map_update_elem(&attr, uattr);
5334 		break;
5335 	case BPF_MAP_DELETE_ELEM:
5336 		err = map_delete_elem(&attr, uattr);
5337 		break;
5338 	case BPF_MAP_GET_NEXT_KEY:
5339 		err = map_get_next_key(&attr);
5340 		break;
5341 	case BPF_MAP_FREEZE:
5342 		err = map_freeze(&attr);
5343 		break;
5344 	case BPF_PROG_LOAD:
5345 		err = bpf_prog_load(&attr, uattr, size);
5346 		break;
5347 	case BPF_OBJ_PIN:
5348 		err = bpf_obj_pin(&attr);
5349 		break;
5350 	case BPF_OBJ_GET:
5351 		err = bpf_obj_get(&attr);
5352 		break;
5353 	case BPF_PROG_ATTACH:
5354 		err = bpf_prog_attach(&attr);
5355 		break;
5356 	case BPF_PROG_DETACH:
5357 		err = bpf_prog_detach(&attr);
5358 		break;
5359 	case BPF_PROG_QUERY:
5360 		err = bpf_prog_query(&attr, uattr.user);
5361 		break;
5362 	case BPF_PROG_TEST_RUN:
5363 		err = bpf_prog_test_run(&attr, uattr.user);
5364 		break;
5365 	case BPF_PROG_GET_NEXT_ID:
5366 		err = bpf_obj_get_next_id(&attr, uattr.user,
5367 					  &prog_idr, &prog_idr_lock);
5368 		break;
5369 	case BPF_MAP_GET_NEXT_ID:
5370 		err = bpf_obj_get_next_id(&attr, uattr.user,
5371 					  &map_idr, &map_idr_lock);
5372 		break;
5373 	case BPF_BTF_GET_NEXT_ID:
5374 		err = bpf_obj_get_next_id(&attr, uattr.user,
5375 					  &btf_idr, &btf_idr_lock);
5376 		break;
5377 	case BPF_PROG_GET_FD_BY_ID:
5378 		err = bpf_prog_get_fd_by_id(&attr);
5379 		break;
5380 	case BPF_MAP_GET_FD_BY_ID:
5381 		err = bpf_map_get_fd_by_id(&attr);
5382 		break;
5383 	case BPF_OBJ_GET_INFO_BY_FD:
5384 		err = bpf_obj_get_info_by_fd(&attr, uattr.user);
5385 		break;
5386 	case BPF_RAW_TRACEPOINT_OPEN:
5387 		err = bpf_raw_tracepoint_open(&attr);
5388 		break;
5389 	case BPF_BTF_LOAD:
5390 		err = bpf_btf_load(&attr, uattr, size);
5391 		break;
5392 	case BPF_BTF_GET_FD_BY_ID:
5393 		err = bpf_btf_get_fd_by_id(&attr);
5394 		break;
5395 	case BPF_TASK_FD_QUERY:
5396 		err = bpf_task_fd_query(&attr, uattr.user);
5397 		break;
5398 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
5399 		err = map_lookup_and_delete_elem(&attr);
5400 		break;
5401 	case BPF_MAP_LOOKUP_BATCH:
5402 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
5403 		break;
5404 	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
5405 		err = bpf_map_do_batch(&attr, uattr.user,
5406 				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
5407 		break;
5408 	case BPF_MAP_UPDATE_BATCH:
5409 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
5410 		break;
5411 	case BPF_MAP_DELETE_BATCH:
5412 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
5413 		break;
5414 	case BPF_LINK_CREATE:
5415 		err = link_create(&attr, uattr);
5416 		break;
5417 	case BPF_LINK_UPDATE:
5418 		err = link_update(&attr);
5419 		break;
5420 	case BPF_LINK_GET_FD_BY_ID:
5421 		err = bpf_link_get_fd_by_id(&attr);
5422 		break;
5423 	case BPF_LINK_GET_NEXT_ID:
5424 		err = bpf_obj_get_next_id(&attr, uattr.user,
5425 					  &link_idr, &link_idr_lock);
5426 		break;
5427 	case BPF_ENABLE_STATS:
5428 		err = bpf_enable_stats(&attr);
5429 		break;
5430 	case BPF_ITER_CREATE:
5431 		err = bpf_iter_create(&attr);
5432 		break;
5433 	case BPF_LINK_DETACH:
5434 		err = link_detach(&attr);
5435 		break;
5436 	case BPF_PROG_BIND_MAP:
5437 		err = bpf_prog_bind_map(&attr);
5438 		break;
5439 	default:
5440 		err = -EINVAL;
5441 		break;
5442 	}
5443 
5444 	return err;
5445 }
5446 
5447 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
5448 {
5449 	return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
5450 }
5451 
5452 static bool syscall_prog_is_valid_access(int off, int size,
5453 					 enum bpf_access_type type,
5454 					 const struct bpf_prog *prog,
5455 					 struct bpf_insn_access_aux *info)
5456 {
5457 	if (off < 0 || off >= U16_MAX)
5458 		return false;
5459 	if (off % size != 0)
5460 		return false;
5461 	return true;
5462 }
5463 
5464 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size)
5465 {
5466 	switch (cmd) {
5467 	case BPF_MAP_CREATE:
5468 	case BPF_MAP_DELETE_ELEM:
5469 	case BPF_MAP_UPDATE_ELEM:
5470 	case BPF_MAP_FREEZE:
5471 	case BPF_MAP_GET_FD_BY_ID:
5472 	case BPF_PROG_LOAD:
5473 	case BPF_BTF_LOAD:
5474 	case BPF_LINK_CREATE:
5475 	case BPF_RAW_TRACEPOINT_OPEN:
5476 		break;
5477 	default:
5478 		return -EINVAL;
5479 	}
5480 	return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
5481 }
5482 
5483 
5484 /* To shut up -Wmissing-prototypes.
5485  * This function is used by the kernel light skeleton
5486  * to load bpf programs when modules are loaded or during kernel boot.
5487  * See tools/lib/bpf/skel_internal.h
5488  */
5489 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
5490 
5491 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
5492 {
5493 	struct bpf_prog * __maybe_unused prog;
5494 	struct bpf_tramp_run_ctx __maybe_unused run_ctx;
5495 
5496 	switch (cmd) {
5497 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */
5498 	case BPF_PROG_TEST_RUN:
5499 		if (attr->test.data_in || attr->test.data_out ||
5500 		    attr->test.ctx_out || attr->test.duration ||
5501 		    attr->test.repeat || attr->test.flags)
5502 			return -EINVAL;
5503 
5504 		prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL);
5505 		if (IS_ERR(prog))
5506 			return PTR_ERR(prog);
5507 
5508 		if (attr->test.ctx_size_in < prog->aux->max_ctx_offset ||
5509 		    attr->test.ctx_size_in > U16_MAX) {
5510 			bpf_prog_put(prog);
5511 			return -EINVAL;
5512 		}
5513 
5514 		run_ctx.bpf_cookie = 0;
5515 		run_ctx.saved_run_ctx = NULL;
5516 		if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
5517 			/* recursion detected */
5518 			bpf_prog_put(prog);
5519 			return -EBUSY;
5520 		}
5521 		attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
5522 		__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
5523 						&run_ctx);
5524 		bpf_prog_put(prog);
5525 		return 0;
5526 #endif
5527 	default:
5528 		return ____bpf_sys_bpf(cmd, attr, size);
5529 	}
5530 }
5531 EXPORT_SYMBOL(kern_sys_bpf);
5532 
5533 static const struct bpf_func_proto bpf_sys_bpf_proto = {
5534 	.func		= bpf_sys_bpf,
5535 	.gpl_only	= false,
5536 	.ret_type	= RET_INTEGER,
5537 	.arg1_type	= ARG_ANYTHING,
5538 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
5539 	.arg3_type	= ARG_CONST_SIZE,
5540 };
5541 
5542 const struct bpf_func_proto * __weak
5543 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5544 {
5545 	return bpf_base_func_proto(func_id);
5546 }
5547 
5548 BPF_CALL_1(bpf_sys_close, u32, fd)
5549 {
5550 	/* When bpf program calls this helper there should not be
5551 	 * an fdget() without matching completed fdput().
5552 	 * This helper is allowed in the following callchain only:
5553 	 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
5554 	 */
5555 	return close_fd(fd);
5556 }
5557 
5558 static const struct bpf_func_proto bpf_sys_close_proto = {
5559 	.func		= bpf_sys_close,
5560 	.gpl_only	= false,
5561 	.ret_type	= RET_INTEGER,
5562 	.arg1_type	= ARG_ANYTHING,
5563 };
5564 
5565 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
5566 {
5567 	if (flags)
5568 		return -EINVAL;
5569 
5570 	if (name_sz <= 1 || name[name_sz - 1])
5571 		return -EINVAL;
5572 
5573 	if (!bpf_dump_raw_ok(current_cred()))
5574 		return -EPERM;
5575 
5576 	*res = kallsyms_lookup_name(name);
5577 	return *res ? 0 : -ENOENT;
5578 }
5579 
5580 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
5581 	.func		= bpf_kallsyms_lookup_name,
5582 	.gpl_only	= false,
5583 	.ret_type	= RET_INTEGER,
5584 	.arg1_type	= ARG_PTR_TO_MEM,
5585 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
5586 	.arg3_type	= ARG_ANYTHING,
5587 	.arg4_type	= ARG_PTR_TO_LONG,
5588 };
5589 
5590 static const struct bpf_func_proto *
5591 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5592 {
5593 	switch (func_id) {
5594 	case BPF_FUNC_sys_bpf:
5595 		return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
5596 	case BPF_FUNC_btf_find_by_name_kind:
5597 		return &bpf_btf_find_by_name_kind_proto;
5598 	case BPF_FUNC_sys_close:
5599 		return &bpf_sys_close_proto;
5600 	case BPF_FUNC_kallsyms_lookup_name:
5601 		return &bpf_kallsyms_lookup_name_proto;
5602 	default:
5603 		return tracing_prog_func_proto(func_id, prog);
5604 	}
5605 }
5606 
5607 const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
5608 	.get_func_proto  = syscall_prog_func_proto,
5609 	.is_valid_access = syscall_prog_is_valid_access,
5610 };
5611 
5612 const struct bpf_prog_ops bpf_syscall_prog_ops = {
5613 	.test_run = bpf_prog_test_run_syscall,
5614 };
5615 
5616 #ifdef CONFIG_SYSCTL
5617 static int bpf_stats_handler(struct ctl_table *table, int write,
5618 			     void *buffer, size_t *lenp, loff_t *ppos)
5619 {
5620 	struct static_key *key = (struct static_key *)table->data;
5621 	static int saved_val;
5622 	int val, ret;
5623 	struct ctl_table tmp = {
5624 		.data   = &val,
5625 		.maxlen = sizeof(val),
5626 		.mode   = table->mode,
5627 		.extra1 = SYSCTL_ZERO,
5628 		.extra2 = SYSCTL_ONE,
5629 	};
5630 
5631 	if (write && !capable(CAP_SYS_ADMIN))
5632 		return -EPERM;
5633 
5634 	mutex_lock(&bpf_stats_enabled_mutex);
5635 	val = saved_val;
5636 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5637 	if (write && !ret && val != saved_val) {
5638 		if (val)
5639 			static_key_slow_inc(key);
5640 		else
5641 			static_key_slow_dec(key);
5642 		saved_val = val;
5643 	}
5644 	mutex_unlock(&bpf_stats_enabled_mutex);
5645 	return ret;
5646 }
5647 
5648 void __weak unpriv_ebpf_notify(int new_state)
5649 {
5650 }
5651 
5652 static int bpf_unpriv_handler(struct ctl_table *table, int write,
5653 			      void *buffer, size_t *lenp, loff_t *ppos)
5654 {
5655 	int ret, unpriv_enable = *(int *)table->data;
5656 	bool locked_state = unpriv_enable == 1;
5657 	struct ctl_table tmp = *table;
5658 
5659 	if (write && !capable(CAP_SYS_ADMIN))
5660 		return -EPERM;
5661 
5662 	tmp.data = &unpriv_enable;
5663 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
5664 	if (write && !ret) {
5665 		if (locked_state && unpriv_enable != 1)
5666 			return -EPERM;
5667 		*(int *)table->data = unpriv_enable;
5668 	}
5669 
5670 	if (write)
5671 		unpriv_ebpf_notify(unpriv_enable);
5672 
5673 	return ret;
5674 }
5675 
5676 static struct ctl_table bpf_syscall_table[] = {
5677 	{
5678 		.procname	= "unprivileged_bpf_disabled",
5679 		.data		= &sysctl_unprivileged_bpf_disabled,
5680 		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
5681 		.mode		= 0644,
5682 		.proc_handler	= bpf_unpriv_handler,
5683 		.extra1		= SYSCTL_ZERO,
5684 		.extra2		= SYSCTL_TWO,
5685 	},
5686 	{
5687 		.procname	= "bpf_stats_enabled",
5688 		.data		= &bpf_stats_enabled_key.key,
5689 		.mode		= 0644,
5690 		.proc_handler	= bpf_stats_handler,
5691 	},
5692 	{ }
5693 };
5694 
5695 static int __init bpf_syscall_sysctl_init(void)
5696 {
5697 	register_sysctl_init("kernel", bpf_syscall_table);
5698 	return 0;
5699 }
5700 late_initcall(bpf_syscall_sysctl_init);
5701 #endif /* CONFIG_SYSCTL */
5702