xref: /openbmc/linux/kernel/bpf/syscall.c (revision cff11abeca78aa782378401ca2800bd2194aa14e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/btf.h>
8 #include <linux/syscalls.h>
9 #include <linux/slab.h>
10 #include <linux/sched/signal.h>
11 #include <linux/vmalloc.h>
12 #include <linux/mmzone.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/fdtable.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20 #include <linux/kernel.h>
21 #include <linux/idr.h>
22 #include <linux/cred.h>
23 #include <linux/timekeeping.h>
24 #include <linux/ctype.h>
25 #include <linux/nospec.h>
26 #include <linux/audit.h>
27 #include <uapi/linux/btf.h>
28 #include <asm/pgtable.h>
29 #include <linux/bpf_lsm.h>
30 #include <linux/poll.h>
31 #include <linux/bpf-netns.h>
32 
33 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
34 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
35 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
36 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
37 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
38 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
39 			IS_FD_HASH(map))
40 
41 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
42 
43 DEFINE_PER_CPU(int, bpf_prog_active);
44 static DEFINE_IDR(prog_idr);
45 static DEFINE_SPINLOCK(prog_idr_lock);
46 static DEFINE_IDR(map_idr);
47 static DEFINE_SPINLOCK(map_idr_lock);
48 static DEFINE_IDR(link_idr);
49 static DEFINE_SPINLOCK(link_idr_lock);
50 
51 int sysctl_unprivileged_bpf_disabled __read_mostly;
52 
53 static const struct bpf_map_ops * const bpf_map_types[] = {
54 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
55 #define BPF_MAP_TYPE(_id, _ops) \
56 	[_id] = &_ops,
57 #define BPF_LINK_TYPE(_id, _name)
58 #include <linux/bpf_types.h>
59 #undef BPF_PROG_TYPE
60 #undef BPF_MAP_TYPE
61 #undef BPF_LINK_TYPE
62 };
63 
64 /*
65  * If we're handed a bigger struct than we know of, ensure all the unknown bits
66  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
67  * we don't know about yet.
68  *
69  * There is a ToCToU between this function call and the following
70  * copy_from_user() call. However, this is not a concern since this function is
71  * meant to be a future-proofing of bits.
72  */
73 int bpf_check_uarg_tail_zero(void __user *uaddr,
74 			     size_t expected_size,
75 			     size_t actual_size)
76 {
77 	unsigned char __user *addr;
78 	unsigned char __user *end;
79 	unsigned char val;
80 	int err;
81 
82 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
83 		return -E2BIG;
84 
85 	if (unlikely(!access_ok(uaddr, actual_size)))
86 		return -EFAULT;
87 
88 	if (actual_size <= expected_size)
89 		return 0;
90 
91 	addr = uaddr + expected_size;
92 	end  = uaddr + actual_size;
93 
94 	for (; addr < end; addr++) {
95 		err = get_user(val, addr);
96 		if (err)
97 			return err;
98 		if (val)
99 			return -E2BIG;
100 	}
101 
102 	return 0;
103 }
104 
105 const struct bpf_map_ops bpf_map_offload_ops = {
106 	.map_alloc = bpf_map_offload_map_alloc,
107 	.map_free = bpf_map_offload_map_free,
108 	.map_check_btf = map_check_no_btf,
109 };
110 
111 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
112 {
113 	const struct bpf_map_ops *ops;
114 	u32 type = attr->map_type;
115 	struct bpf_map *map;
116 	int err;
117 
118 	if (type >= ARRAY_SIZE(bpf_map_types))
119 		return ERR_PTR(-EINVAL);
120 	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
121 	ops = bpf_map_types[type];
122 	if (!ops)
123 		return ERR_PTR(-EINVAL);
124 
125 	if (ops->map_alloc_check) {
126 		err = ops->map_alloc_check(attr);
127 		if (err)
128 			return ERR_PTR(err);
129 	}
130 	if (attr->map_ifindex)
131 		ops = &bpf_map_offload_ops;
132 	map = ops->map_alloc(attr);
133 	if (IS_ERR(map))
134 		return map;
135 	map->ops = ops;
136 	map->map_type = type;
137 	return map;
138 }
139 
140 static u32 bpf_map_value_size(struct bpf_map *map)
141 {
142 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
143 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
144 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
145 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
146 		return round_up(map->value_size, 8) * num_possible_cpus();
147 	else if (IS_FD_MAP(map))
148 		return sizeof(u32);
149 	else
150 		return  map->value_size;
151 }
152 
153 static void maybe_wait_bpf_programs(struct bpf_map *map)
154 {
155 	/* Wait for any running BPF programs to complete so that
156 	 * userspace, when we return to it, knows that all programs
157 	 * that could be running use the new map value.
158 	 */
159 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
160 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
161 		synchronize_rcu();
162 }
163 
164 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
165 				void *value, __u64 flags)
166 {
167 	int err;
168 
169 	/* Need to create a kthread, thus must support schedule */
170 	if (bpf_map_is_dev_bound(map)) {
171 		return bpf_map_offload_update_elem(map, key, value, flags);
172 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
173 		   map->map_type == BPF_MAP_TYPE_SOCKHASH ||
174 		   map->map_type == BPF_MAP_TYPE_SOCKMAP ||
175 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
176 		return map->ops->map_update_elem(map, key, value, flags);
177 	} else if (IS_FD_PROG_ARRAY(map)) {
178 		return bpf_fd_array_map_update_elem(map, f.file, key, value,
179 						    flags);
180 	}
181 
182 	bpf_disable_instrumentation();
183 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
184 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
185 		err = bpf_percpu_hash_update(map, key, value, flags);
186 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
187 		err = bpf_percpu_array_update(map, key, value, flags);
188 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
189 		err = bpf_percpu_cgroup_storage_update(map, key, value,
190 						       flags);
191 	} else if (IS_FD_ARRAY(map)) {
192 		rcu_read_lock();
193 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
194 						   flags);
195 		rcu_read_unlock();
196 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
197 		rcu_read_lock();
198 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
199 						  flags);
200 		rcu_read_unlock();
201 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
202 		/* rcu_read_lock() is not needed */
203 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
204 							 flags);
205 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
206 		   map->map_type == BPF_MAP_TYPE_STACK) {
207 		err = map->ops->map_push_elem(map, value, flags);
208 	} else {
209 		rcu_read_lock();
210 		err = map->ops->map_update_elem(map, key, value, flags);
211 		rcu_read_unlock();
212 	}
213 	bpf_enable_instrumentation();
214 	maybe_wait_bpf_programs(map);
215 
216 	return err;
217 }
218 
219 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
220 			      __u64 flags)
221 {
222 	void *ptr;
223 	int err;
224 
225 	if (bpf_map_is_dev_bound(map))
226 		return bpf_map_offload_lookup_elem(map, key, value);
227 
228 	bpf_disable_instrumentation();
229 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
230 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
231 		err = bpf_percpu_hash_copy(map, key, value);
232 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
233 		err = bpf_percpu_array_copy(map, key, value);
234 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
235 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
236 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
237 		err = bpf_stackmap_copy(map, key, value);
238 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
239 		err = bpf_fd_array_map_lookup_elem(map, key, value);
240 	} else if (IS_FD_HASH(map)) {
241 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
242 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
243 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
244 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
245 		   map->map_type == BPF_MAP_TYPE_STACK) {
246 		err = map->ops->map_peek_elem(map, value);
247 	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
248 		/* struct_ops map requires directly updating "value" */
249 		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
250 	} else {
251 		rcu_read_lock();
252 		if (map->ops->map_lookup_elem_sys_only)
253 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
254 		else
255 			ptr = map->ops->map_lookup_elem(map, key);
256 		if (IS_ERR(ptr)) {
257 			err = PTR_ERR(ptr);
258 		} else if (!ptr) {
259 			err = -ENOENT;
260 		} else {
261 			err = 0;
262 			if (flags & BPF_F_LOCK)
263 				/* lock 'ptr' and copy everything but lock */
264 				copy_map_value_locked(map, value, ptr, true);
265 			else
266 				copy_map_value(map, value, ptr);
267 			/* mask lock, since value wasn't zero inited */
268 			check_and_init_map_lock(map, value);
269 		}
270 		rcu_read_unlock();
271 	}
272 
273 	bpf_enable_instrumentation();
274 	maybe_wait_bpf_programs(map);
275 
276 	return err;
277 }
278 
279 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
280 {
281 	/* We really just want to fail instead of triggering OOM killer
282 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
283 	 * which is used for lower order allocation requests.
284 	 *
285 	 * It has been observed that higher order allocation requests done by
286 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
287 	 * to reclaim memory from the page cache, thus we set
288 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
289 	 */
290 
291 	const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
292 	unsigned int flags = 0;
293 	unsigned long align = 1;
294 	void *area;
295 
296 	if (size >= SIZE_MAX)
297 		return NULL;
298 
299 	/* kmalloc()'ed memory can't be mmap()'ed */
300 	if (mmapable) {
301 		BUG_ON(!PAGE_ALIGNED(size));
302 		align = SHMLBA;
303 		flags = VM_USERMAP;
304 	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
305 		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
306 				    numa_node);
307 		if (area != NULL)
308 			return area;
309 	}
310 
311 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
312 			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
313 			flags, numa_node, __builtin_return_address(0));
314 }
315 
316 void *bpf_map_area_alloc(u64 size, int numa_node)
317 {
318 	return __bpf_map_area_alloc(size, numa_node, false);
319 }
320 
321 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
322 {
323 	return __bpf_map_area_alloc(size, numa_node, true);
324 }
325 
326 void bpf_map_area_free(void *area)
327 {
328 	kvfree(area);
329 }
330 
331 static u32 bpf_map_flags_retain_permanent(u32 flags)
332 {
333 	/* Some map creation flags are not tied to the map object but
334 	 * rather to the map fd instead, so they have no meaning upon
335 	 * map object inspection since multiple file descriptors with
336 	 * different (access) properties can exist here. Thus, given
337 	 * this has zero meaning for the map itself, lets clear these
338 	 * from here.
339 	 */
340 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
341 }
342 
343 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
344 {
345 	map->map_type = attr->map_type;
346 	map->key_size = attr->key_size;
347 	map->value_size = attr->value_size;
348 	map->max_entries = attr->max_entries;
349 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
350 	map->numa_node = bpf_map_attr_numa_node(attr);
351 }
352 
353 static int bpf_charge_memlock(struct user_struct *user, u32 pages)
354 {
355 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
356 
357 	if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
358 		atomic_long_sub(pages, &user->locked_vm);
359 		return -EPERM;
360 	}
361 	return 0;
362 }
363 
364 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
365 {
366 	if (user)
367 		atomic_long_sub(pages, &user->locked_vm);
368 }
369 
370 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
371 {
372 	u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
373 	struct user_struct *user;
374 	int ret;
375 
376 	if (size >= U32_MAX - PAGE_SIZE)
377 		return -E2BIG;
378 
379 	user = get_current_user();
380 	ret = bpf_charge_memlock(user, pages);
381 	if (ret) {
382 		free_uid(user);
383 		return ret;
384 	}
385 
386 	mem->pages = pages;
387 	mem->user = user;
388 
389 	return 0;
390 }
391 
392 void bpf_map_charge_finish(struct bpf_map_memory *mem)
393 {
394 	bpf_uncharge_memlock(mem->user, mem->pages);
395 	free_uid(mem->user);
396 }
397 
398 void bpf_map_charge_move(struct bpf_map_memory *dst,
399 			 struct bpf_map_memory *src)
400 {
401 	*dst = *src;
402 
403 	/* Make sure src will not be used for the redundant uncharging. */
404 	memset(src, 0, sizeof(struct bpf_map_memory));
405 }
406 
407 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
408 {
409 	int ret;
410 
411 	ret = bpf_charge_memlock(map->memory.user, pages);
412 	if (ret)
413 		return ret;
414 	map->memory.pages += pages;
415 	return ret;
416 }
417 
418 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
419 {
420 	bpf_uncharge_memlock(map->memory.user, pages);
421 	map->memory.pages -= pages;
422 }
423 
424 static int bpf_map_alloc_id(struct bpf_map *map)
425 {
426 	int id;
427 
428 	idr_preload(GFP_KERNEL);
429 	spin_lock_bh(&map_idr_lock);
430 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
431 	if (id > 0)
432 		map->id = id;
433 	spin_unlock_bh(&map_idr_lock);
434 	idr_preload_end();
435 
436 	if (WARN_ON_ONCE(!id))
437 		return -ENOSPC;
438 
439 	return id > 0 ? 0 : id;
440 }
441 
442 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
443 {
444 	unsigned long flags;
445 
446 	/* Offloaded maps are removed from the IDR store when their device
447 	 * disappears - even if someone holds an fd to them they are unusable,
448 	 * the memory is gone, all ops will fail; they are simply waiting for
449 	 * refcnt to drop to be freed.
450 	 */
451 	if (!map->id)
452 		return;
453 
454 	if (do_idr_lock)
455 		spin_lock_irqsave(&map_idr_lock, flags);
456 	else
457 		__acquire(&map_idr_lock);
458 
459 	idr_remove(&map_idr, map->id);
460 	map->id = 0;
461 
462 	if (do_idr_lock)
463 		spin_unlock_irqrestore(&map_idr_lock, flags);
464 	else
465 		__release(&map_idr_lock);
466 }
467 
468 /* called from workqueue */
469 static void bpf_map_free_deferred(struct work_struct *work)
470 {
471 	struct bpf_map *map = container_of(work, struct bpf_map, work);
472 	struct bpf_map_memory mem;
473 
474 	bpf_map_charge_move(&mem, &map->memory);
475 	security_bpf_map_free(map);
476 	/* implementation dependent freeing */
477 	map->ops->map_free(map);
478 	bpf_map_charge_finish(&mem);
479 }
480 
481 static void bpf_map_put_uref(struct bpf_map *map)
482 {
483 	if (atomic64_dec_and_test(&map->usercnt)) {
484 		if (map->ops->map_release_uref)
485 			map->ops->map_release_uref(map);
486 	}
487 }
488 
489 /* decrement map refcnt and schedule it for freeing via workqueue
490  * (unrelying map implementation ops->map_free() might sleep)
491  */
492 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
493 {
494 	if (atomic64_dec_and_test(&map->refcnt)) {
495 		/* bpf_map_free_id() must be called first */
496 		bpf_map_free_id(map, do_idr_lock);
497 		btf_put(map->btf);
498 		INIT_WORK(&map->work, bpf_map_free_deferred);
499 		schedule_work(&map->work);
500 	}
501 }
502 
503 void bpf_map_put(struct bpf_map *map)
504 {
505 	__bpf_map_put(map, true);
506 }
507 EXPORT_SYMBOL_GPL(bpf_map_put);
508 
509 void bpf_map_put_with_uref(struct bpf_map *map)
510 {
511 	bpf_map_put_uref(map);
512 	bpf_map_put(map);
513 }
514 
515 static int bpf_map_release(struct inode *inode, struct file *filp)
516 {
517 	struct bpf_map *map = filp->private_data;
518 
519 	if (map->ops->map_release)
520 		map->ops->map_release(map, filp);
521 
522 	bpf_map_put_with_uref(map);
523 	return 0;
524 }
525 
526 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
527 {
528 	fmode_t mode = f.file->f_mode;
529 
530 	/* Our file permissions may have been overridden by global
531 	 * map permissions facing syscall side.
532 	 */
533 	if (READ_ONCE(map->frozen))
534 		mode &= ~FMODE_CAN_WRITE;
535 	return mode;
536 }
537 
538 #ifdef CONFIG_PROC_FS
539 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
540 {
541 	const struct bpf_map *map = filp->private_data;
542 	const struct bpf_array *array;
543 	u32 type = 0, jited = 0;
544 
545 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
546 		array = container_of(map, struct bpf_array, map);
547 		type  = array->aux->type;
548 		jited = array->aux->jited;
549 	}
550 
551 	seq_printf(m,
552 		   "map_type:\t%u\n"
553 		   "key_size:\t%u\n"
554 		   "value_size:\t%u\n"
555 		   "max_entries:\t%u\n"
556 		   "map_flags:\t%#x\n"
557 		   "memlock:\t%llu\n"
558 		   "map_id:\t%u\n"
559 		   "frozen:\t%u\n",
560 		   map->map_type,
561 		   map->key_size,
562 		   map->value_size,
563 		   map->max_entries,
564 		   map->map_flags,
565 		   map->memory.pages * 1ULL << PAGE_SHIFT,
566 		   map->id,
567 		   READ_ONCE(map->frozen));
568 	if (type) {
569 		seq_printf(m, "owner_prog_type:\t%u\n", type);
570 		seq_printf(m, "owner_jited:\t%u\n", jited);
571 	}
572 }
573 #endif
574 
575 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
576 			      loff_t *ppos)
577 {
578 	/* We need this handler such that alloc_file() enables
579 	 * f_mode with FMODE_CAN_READ.
580 	 */
581 	return -EINVAL;
582 }
583 
584 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
585 			       size_t siz, loff_t *ppos)
586 {
587 	/* We need this handler such that alloc_file() enables
588 	 * f_mode with FMODE_CAN_WRITE.
589 	 */
590 	return -EINVAL;
591 }
592 
593 /* called for any extra memory-mapped regions (except initial) */
594 static void bpf_map_mmap_open(struct vm_area_struct *vma)
595 {
596 	struct bpf_map *map = vma->vm_file->private_data;
597 
598 	if (vma->vm_flags & VM_MAYWRITE) {
599 		mutex_lock(&map->freeze_mutex);
600 		map->writecnt++;
601 		mutex_unlock(&map->freeze_mutex);
602 	}
603 }
604 
605 /* called for all unmapped memory region (including initial) */
606 static void bpf_map_mmap_close(struct vm_area_struct *vma)
607 {
608 	struct bpf_map *map = vma->vm_file->private_data;
609 
610 	if (vma->vm_flags & VM_MAYWRITE) {
611 		mutex_lock(&map->freeze_mutex);
612 		map->writecnt--;
613 		mutex_unlock(&map->freeze_mutex);
614 	}
615 }
616 
617 static const struct vm_operations_struct bpf_map_default_vmops = {
618 	.open		= bpf_map_mmap_open,
619 	.close		= bpf_map_mmap_close,
620 };
621 
622 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
623 {
624 	struct bpf_map *map = filp->private_data;
625 	int err;
626 
627 	if (!map->ops->map_mmap || map_value_has_spin_lock(map))
628 		return -ENOTSUPP;
629 
630 	if (!(vma->vm_flags & VM_SHARED))
631 		return -EINVAL;
632 
633 	mutex_lock(&map->freeze_mutex);
634 
635 	if (vma->vm_flags & VM_WRITE) {
636 		if (map->frozen) {
637 			err = -EPERM;
638 			goto out;
639 		}
640 		/* map is meant to be read-only, so do not allow mapping as
641 		 * writable, because it's possible to leak a writable page
642 		 * reference and allows user-space to still modify it after
643 		 * freezing, while verifier will assume contents do not change
644 		 */
645 		if (map->map_flags & BPF_F_RDONLY_PROG) {
646 			err = -EACCES;
647 			goto out;
648 		}
649 	}
650 
651 	/* set default open/close callbacks */
652 	vma->vm_ops = &bpf_map_default_vmops;
653 	vma->vm_private_data = map;
654 	vma->vm_flags &= ~VM_MAYEXEC;
655 	if (!(vma->vm_flags & VM_WRITE))
656 		/* disallow re-mapping with PROT_WRITE */
657 		vma->vm_flags &= ~VM_MAYWRITE;
658 
659 	err = map->ops->map_mmap(map, vma);
660 	if (err)
661 		goto out;
662 
663 	if (vma->vm_flags & VM_MAYWRITE)
664 		map->writecnt++;
665 out:
666 	mutex_unlock(&map->freeze_mutex);
667 	return err;
668 }
669 
670 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
671 {
672 	struct bpf_map *map = filp->private_data;
673 
674 	if (map->ops->map_poll)
675 		return map->ops->map_poll(map, filp, pts);
676 
677 	return EPOLLERR;
678 }
679 
680 const struct file_operations bpf_map_fops = {
681 #ifdef CONFIG_PROC_FS
682 	.show_fdinfo	= bpf_map_show_fdinfo,
683 #endif
684 	.release	= bpf_map_release,
685 	.read		= bpf_dummy_read,
686 	.write		= bpf_dummy_write,
687 	.mmap		= bpf_map_mmap,
688 	.poll		= bpf_map_poll,
689 };
690 
691 int bpf_map_new_fd(struct bpf_map *map, int flags)
692 {
693 	int ret;
694 
695 	ret = security_bpf_map(map, OPEN_FMODE(flags));
696 	if (ret < 0)
697 		return ret;
698 
699 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
700 				flags | O_CLOEXEC);
701 }
702 
703 int bpf_get_file_flag(int flags)
704 {
705 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
706 		return -EINVAL;
707 	if (flags & BPF_F_RDONLY)
708 		return O_RDONLY;
709 	if (flags & BPF_F_WRONLY)
710 		return O_WRONLY;
711 	return O_RDWR;
712 }
713 
714 /* helper macro to check that unused fields 'union bpf_attr' are zero */
715 #define CHECK_ATTR(CMD) \
716 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
717 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
718 		   sizeof(*attr) - \
719 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
720 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
721 
722 /* dst and src must have at least "size" number of bytes.
723  * Return strlen on success and < 0 on error.
724  */
725 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
726 {
727 	const char *end = src + size;
728 	const char *orig_src = src;
729 
730 	memset(dst, 0, size);
731 	/* Copy all isalnum(), '_' and '.' chars. */
732 	while (src < end && *src) {
733 		if (!isalnum(*src) &&
734 		    *src != '_' && *src != '.')
735 			return -EINVAL;
736 		*dst++ = *src++;
737 	}
738 
739 	/* No '\0' found in "size" number of bytes */
740 	if (src == end)
741 		return -EINVAL;
742 
743 	return src - orig_src;
744 }
745 
746 int map_check_no_btf(const struct bpf_map *map,
747 		     const struct btf *btf,
748 		     const struct btf_type *key_type,
749 		     const struct btf_type *value_type)
750 {
751 	return -ENOTSUPP;
752 }
753 
754 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
755 			 u32 btf_key_id, u32 btf_value_id)
756 {
757 	const struct btf_type *key_type, *value_type;
758 	u32 key_size, value_size;
759 	int ret = 0;
760 
761 	/* Some maps allow key to be unspecified. */
762 	if (btf_key_id) {
763 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
764 		if (!key_type || key_size != map->key_size)
765 			return -EINVAL;
766 	} else {
767 		key_type = btf_type_by_id(btf, 0);
768 		if (!map->ops->map_check_btf)
769 			return -EINVAL;
770 	}
771 
772 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
773 	if (!value_type || value_size != map->value_size)
774 		return -EINVAL;
775 
776 	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
777 
778 	if (map_value_has_spin_lock(map)) {
779 		if (map->map_flags & BPF_F_RDONLY_PROG)
780 			return -EACCES;
781 		if (map->map_type != BPF_MAP_TYPE_HASH &&
782 		    map->map_type != BPF_MAP_TYPE_ARRAY &&
783 		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
784 		    map->map_type != BPF_MAP_TYPE_SK_STORAGE)
785 			return -ENOTSUPP;
786 		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
787 		    map->value_size) {
788 			WARN_ONCE(1,
789 				  "verifier bug spin_lock_off %d value_size %d\n",
790 				  map->spin_lock_off, map->value_size);
791 			return -EFAULT;
792 		}
793 	}
794 
795 	if (map->ops->map_check_btf)
796 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
797 
798 	return ret;
799 }
800 
801 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
802 /* called via syscall */
803 static int map_create(union bpf_attr *attr)
804 {
805 	int numa_node = bpf_map_attr_numa_node(attr);
806 	struct bpf_map_memory mem;
807 	struct bpf_map *map;
808 	int f_flags;
809 	int err;
810 
811 	err = CHECK_ATTR(BPF_MAP_CREATE);
812 	if (err)
813 		return -EINVAL;
814 
815 	if (attr->btf_vmlinux_value_type_id) {
816 		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
817 		    attr->btf_key_type_id || attr->btf_value_type_id)
818 			return -EINVAL;
819 	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
820 		return -EINVAL;
821 	}
822 
823 	f_flags = bpf_get_file_flag(attr->map_flags);
824 	if (f_flags < 0)
825 		return f_flags;
826 
827 	if (numa_node != NUMA_NO_NODE &&
828 	    ((unsigned int)numa_node >= nr_node_ids ||
829 	     !node_online(numa_node)))
830 		return -EINVAL;
831 
832 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
833 	map = find_and_alloc_map(attr);
834 	if (IS_ERR(map))
835 		return PTR_ERR(map);
836 
837 	err = bpf_obj_name_cpy(map->name, attr->map_name,
838 			       sizeof(attr->map_name));
839 	if (err < 0)
840 		goto free_map;
841 
842 	atomic64_set(&map->refcnt, 1);
843 	atomic64_set(&map->usercnt, 1);
844 	mutex_init(&map->freeze_mutex);
845 
846 	map->spin_lock_off = -EINVAL;
847 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
848 	    /* Even the map's value is a kernel's struct,
849 	     * the bpf_prog.o must have BTF to begin with
850 	     * to figure out the corresponding kernel's
851 	     * counter part.  Thus, attr->btf_fd has
852 	     * to be valid also.
853 	     */
854 	    attr->btf_vmlinux_value_type_id) {
855 		struct btf *btf;
856 
857 		btf = btf_get_by_fd(attr->btf_fd);
858 		if (IS_ERR(btf)) {
859 			err = PTR_ERR(btf);
860 			goto free_map;
861 		}
862 		map->btf = btf;
863 
864 		if (attr->btf_value_type_id) {
865 			err = map_check_btf(map, btf, attr->btf_key_type_id,
866 					    attr->btf_value_type_id);
867 			if (err)
868 				goto free_map;
869 		}
870 
871 		map->btf_key_type_id = attr->btf_key_type_id;
872 		map->btf_value_type_id = attr->btf_value_type_id;
873 		map->btf_vmlinux_value_type_id =
874 			attr->btf_vmlinux_value_type_id;
875 	}
876 
877 	err = security_bpf_map_alloc(map);
878 	if (err)
879 		goto free_map;
880 
881 	err = bpf_map_alloc_id(map);
882 	if (err)
883 		goto free_map_sec;
884 
885 	err = bpf_map_new_fd(map, f_flags);
886 	if (err < 0) {
887 		/* failed to allocate fd.
888 		 * bpf_map_put_with_uref() is needed because the above
889 		 * bpf_map_alloc_id() has published the map
890 		 * to the userspace and the userspace may
891 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
892 		 */
893 		bpf_map_put_with_uref(map);
894 		return err;
895 	}
896 
897 	return err;
898 
899 free_map_sec:
900 	security_bpf_map_free(map);
901 free_map:
902 	btf_put(map->btf);
903 	bpf_map_charge_move(&mem, &map->memory);
904 	map->ops->map_free(map);
905 	bpf_map_charge_finish(&mem);
906 	return err;
907 }
908 
909 /* if error is returned, fd is released.
910  * On success caller should complete fd access with matching fdput()
911  */
912 struct bpf_map *__bpf_map_get(struct fd f)
913 {
914 	if (!f.file)
915 		return ERR_PTR(-EBADF);
916 	if (f.file->f_op != &bpf_map_fops) {
917 		fdput(f);
918 		return ERR_PTR(-EINVAL);
919 	}
920 
921 	return f.file->private_data;
922 }
923 
924 void bpf_map_inc(struct bpf_map *map)
925 {
926 	atomic64_inc(&map->refcnt);
927 }
928 EXPORT_SYMBOL_GPL(bpf_map_inc);
929 
930 void bpf_map_inc_with_uref(struct bpf_map *map)
931 {
932 	atomic64_inc(&map->refcnt);
933 	atomic64_inc(&map->usercnt);
934 }
935 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
936 
937 struct bpf_map *bpf_map_get(u32 ufd)
938 {
939 	struct fd f = fdget(ufd);
940 	struct bpf_map *map;
941 
942 	map = __bpf_map_get(f);
943 	if (IS_ERR(map))
944 		return map;
945 
946 	bpf_map_inc(map);
947 	fdput(f);
948 
949 	return map;
950 }
951 
952 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
953 {
954 	struct fd f = fdget(ufd);
955 	struct bpf_map *map;
956 
957 	map = __bpf_map_get(f);
958 	if (IS_ERR(map))
959 		return map;
960 
961 	bpf_map_inc_with_uref(map);
962 	fdput(f);
963 
964 	return map;
965 }
966 
967 /* map_idr_lock should have been held */
968 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
969 {
970 	int refold;
971 
972 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
973 	if (!refold)
974 		return ERR_PTR(-ENOENT);
975 	if (uref)
976 		atomic64_inc(&map->usercnt);
977 
978 	return map;
979 }
980 
981 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
982 {
983 	spin_lock_bh(&map_idr_lock);
984 	map = __bpf_map_inc_not_zero(map, false);
985 	spin_unlock_bh(&map_idr_lock);
986 
987 	return map;
988 }
989 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
990 
991 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
992 {
993 	return -ENOTSUPP;
994 }
995 
996 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
997 {
998 	if (key_size)
999 		return memdup_user(ukey, key_size);
1000 
1001 	if (ukey)
1002 		return ERR_PTR(-EINVAL);
1003 
1004 	return NULL;
1005 }
1006 
1007 /* last field in 'union bpf_attr' used by this command */
1008 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1009 
1010 static int map_lookup_elem(union bpf_attr *attr)
1011 {
1012 	void __user *ukey = u64_to_user_ptr(attr->key);
1013 	void __user *uvalue = u64_to_user_ptr(attr->value);
1014 	int ufd = attr->map_fd;
1015 	struct bpf_map *map;
1016 	void *key, *value;
1017 	u32 value_size;
1018 	struct fd f;
1019 	int err;
1020 
1021 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1022 		return -EINVAL;
1023 
1024 	if (attr->flags & ~BPF_F_LOCK)
1025 		return -EINVAL;
1026 
1027 	f = fdget(ufd);
1028 	map = __bpf_map_get(f);
1029 	if (IS_ERR(map))
1030 		return PTR_ERR(map);
1031 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1032 		err = -EPERM;
1033 		goto err_put;
1034 	}
1035 
1036 	if ((attr->flags & BPF_F_LOCK) &&
1037 	    !map_value_has_spin_lock(map)) {
1038 		err = -EINVAL;
1039 		goto err_put;
1040 	}
1041 
1042 	key = __bpf_copy_key(ukey, map->key_size);
1043 	if (IS_ERR(key)) {
1044 		err = PTR_ERR(key);
1045 		goto err_put;
1046 	}
1047 
1048 	value_size = bpf_map_value_size(map);
1049 
1050 	err = -ENOMEM;
1051 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1052 	if (!value)
1053 		goto free_key;
1054 
1055 	err = bpf_map_copy_value(map, key, value, attr->flags);
1056 	if (err)
1057 		goto free_value;
1058 
1059 	err = -EFAULT;
1060 	if (copy_to_user(uvalue, value, value_size) != 0)
1061 		goto free_value;
1062 
1063 	err = 0;
1064 
1065 free_value:
1066 	kfree(value);
1067 free_key:
1068 	kfree(key);
1069 err_put:
1070 	fdput(f);
1071 	return err;
1072 }
1073 
1074 
1075 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1076 
1077 static int map_update_elem(union bpf_attr *attr)
1078 {
1079 	void __user *ukey = u64_to_user_ptr(attr->key);
1080 	void __user *uvalue = u64_to_user_ptr(attr->value);
1081 	int ufd = attr->map_fd;
1082 	struct bpf_map *map;
1083 	void *key, *value;
1084 	u32 value_size;
1085 	struct fd f;
1086 	int err;
1087 
1088 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1089 		return -EINVAL;
1090 
1091 	f = fdget(ufd);
1092 	map = __bpf_map_get(f);
1093 	if (IS_ERR(map))
1094 		return PTR_ERR(map);
1095 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1096 		err = -EPERM;
1097 		goto err_put;
1098 	}
1099 
1100 	if ((attr->flags & BPF_F_LOCK) &&
1101 	    !map_value_has_spin_lock(map)) {
1102 		err = -EINVAL;
1103 		goto err_put;
1104 	}
1105 
1106 	key = __bpf_copy_key(ukey, map->key_size);
1107 	if (IS_ERR(key)) {
1108 		err = PTR_ERR(key);
1109 		goto err_put;
1110 	}
1111 
1112 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1113 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1114 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1115 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1116 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
1117 	else
1118 		value_size = map->value_size;
1119 
1120 	err = -ENOMEM;
1121 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1122 	if (!value)
1123 		goto free_key;
1124 
1125 	err = -EFAULT;
1126 	if (copy_from_user(value, uvalue, value_size) != 0)
1127 		goto free_value;
1128 
1129 	err = bpf_map_update_value(map, f, key, value, attr->flags);
1130 
1131 free_value:
1132 	kfree(value);
1133 free_key:
1134 	kfree(key);
1135 err_put:
1136 	fdput(f);
1137 	return err;
1138 }
1139 
1140 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1141 
1142 static int map_delete_elem(union bpf_attr *attr)
1143 {
1144 	void __user *ukey = u64_to_user_ptr(attr->key);
1145 	int ufd = attr->map_fd;
1146 	struct bpf_map *map;
1147 	struct fd f;
1148 	void *key;
1149 	int err;
1150 
1151 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1152 		return -EINVAL;
1153 
1154 	f = fdget(ufd);
1155 	map = __bpf_map_get(f);
1156 	if (IS_ERR(map))
1157 		return PTR_ERR(map);
1158 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1159 		err = -EPERM;
1160 		goto err_put;
1161 	}
1162 
1163 	key = __bpf_copy_key(ukey, map->key_size);
1164 	if (IS_ERR(key)) {
1165 		err = PTR_ERR(key);
1166 		goto err_put;
1167 	}
1168 
1169 	if (bpf_map_is_dev_bound(map)) {
1170 		err = bpf_map_offload_delete_elem(map, key);
1171 		goto out;
1172 	} else if (IS_FD_PROG_ARRAY(map) ||
1173 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1174 		/* These maps require sleepable context */
1175 		err = map->ops->map_delete_elem(map, key);
1176 		goto out;
1177 	}
1178 
1179 	bpf_disable_instrumentation();
1180 	rcu_read_lock();
1181 	err = map->ops->map_delete_elem(map, key);
1182 	rcu_read_unlock();
1183 	bpf_enable_instrumentation();
1184 	maybe_wait_bpf_programs(map);
1185 out:
1186 	kfree(key);
1187 err_put:
1188 	fdput(f);
1189 	return err;
1190 }
1191 
1192 /* last field in 'union bpf_attr' used by this command */
1193 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1194 
1195 static int map_get_next_key(union bpf_attr *attr)
1196 {
1197 	void __user *ukey = u64_to_user_ptr(attr->key);
1198 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1199 	int ufd = attr->map_fd;
1200 	struct bpf_map *map;
1201 	void *key, *next_key;
1202 	struct fd f;
1203 	int err;
1204 
1205 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1206 		return -EINVAL;
1207 
1208 	f = fdget(ufd);
1209 	map = __bpf_map_get(f);
1210 	if (IS_ERR(map))
1211 		return PTR_ERR(map);
1212 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1213 		err = -EPERM;
1214 		goto err_put;
1215 	}
1216 
1217 	if (ukey) {
1218 		key = __bpf_copy_key(ukey, map->key_size);
1219 		if (IS_ERR(key)) {
1220 			err = PTR_ERR(key);
1221 			goto err_put;
1222 		}
1223 	} else {
1224 		key = NULL;
1225 	}
1226 
1227 	err = -ENOMEM;
1228 	next_key = kmalloc(map->key_size, GFP_USER);
1229 	if (!next_key)
1230 		goto free_key;
1231 
1232 	if (bpf_map_is_dev_bound(map)) {
1233 		err = bpf_map_offload_get_next_key(map, key, next_key);
1234 		goto out;
1235 	}
1236 
1237 	rcu_read_lock();
1238 	err = map->ops->map_get_next_key(map, key, next_key);
1239 	rcu_read_unlock();
1240 out:
1241 	if (err)
1242 		goto free_next_key;
1243 
1244 	err = -EFAULT;
1245 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1246 		goto free_next_key;
1247 
1248 	err = 0;
1249 
1250 free_next_key:
1251 	kfree(next_key);
1252 free_key:
1253 	kfree(key);
1254 err_put:
1255 	fdput(f);
1256 	return err;
1257 }
1258 
1259 int generic_map_delete_batch(struct bpf_map *map,
1260 			     const union bpf_attr *attr,
1261 			     union bpf_attr __user *uattr)
1262 {
1263 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1264 	u32 cp, max_count;
1265 	int err = 0;
1266 	void *key;
1267 
1268 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1269 		return -EINVAL;
1270 
1271 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1272 	    !map_value_has_spin_lock(map)) {
1273 		return -EINVAL;
1274 	}
1275 
1276 	max_count = attr->batch.count;
1277 	if (!max_count)
1278 		return 0;
1279 
1280 	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1281 	if (!key)
1282 		return -ENOMEM;
1283 
1284 	for (cp = 0; cp < max_count; cp++) {
1285 		err = -EFAULT;
1286 		if (copy_from_user(key, keys + cp * map->key_size,
1287 				   map->key_size))
1288 			break;
1289 
1290 		if (bpf_map_is_dev_bound(map)) {
1291 			err = bpf_map_offload_delete_elem(map, key);
1292 			break;
1293 		}
1294 
1295 		bpf_disable_instrumentation();
1296 		rcu_read_lock();
1297 		err = map->ops->map_delete_elem(map, key);
1298 		rcu_read_unlock();
1299 		bpf_enable_instrumentation();
1300 		maybe_wait_bpf_programs(map);
1301 		if (err)
1302 			break;
1303 	}
1304 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1305 		err = -EFAULT;
1306 
1307 	kfree(key);
1308 	return err;
1309 }
1310 
1311 int generic_map_update_batch(struct bpf_map *map,
1312 			     const union bpf_attr *attr,
1313 			     union bpf_attr __user *uattr)
1314 {
1315 	void __user *values = u64_to_user_ptr(attr->batch.values);
1316 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1317 	u32 value_size, cp, max_count;
1318 	int ufd = attr->map_fd;
1319 	void *key, *value;
1320 	struct fd f;
1321 	int err = 0;
1322 
1323 	f = fdget(ufd);
1324 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1325 		return -EINVAL;
1326 
1327 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1328 	    !map_value_has_spin_lock(map)) {
1329 		return -EINVAL;
1330 	}
1331 
1332 	value_size = bpf_map_value_size(map);
1333 
1334 	max_count = attr->batch.count;
1335 	if (!max_count)
1336 		return 0;
1337 
1338 	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1339 	if (!key)
1340 		return -ENOMEM;
1341 
1342 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1343 	if (!value) {
1344 		kfree(key);
1345 		return -ENOMEM;
1346 	}
1347 
1348 	for (cp = 0; cp < max_count; cp++) {
1349 		err = -EFAULT;
1350 		if (copy_from_user(key, keys + cp * map->key_size,
1351 		    map->key_size) ||
1352 		    copy_from_user(value, values + cp * value_size, value_size))
1353 			break;
1354 
1355 		err = bpf_map_update_value(map, f, key, value,
1356 					   attr->batch.elem_flags);
1357 
1358 		if (err)
1359 			break;
1360 	}
1361 
1362 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1363 		err = -EFAULT;
1364 
1365 	kfree(value);
1366 	kfree(key);
1367 	return err;
1368 }
1369 
1370 #define MAP_LOOKUP_RETRIES 3
1371 
1372 int generic_map_lookup_batch(struct bpf_map *map,
1373 				    const union bpf_attr *attr,
1374 				    union bpf_attr __user *uattr)
1375 {
1376 	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1377 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1378 	void __user *values = u64_to_user_ptr(attr->batch.values);
1379 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1380 	void *buf, *buf_prevkey, *prev_key, *key, *value;
1381 	int err, retry = MAP_LOOKUP_RETRIES;
1382 	u32 value_size, cp, max_count;
1383 
1384 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1385 		return -EINVAL;
1386 
1387 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1388 	    !map_value_has_spin_lock(map))
1389 		return -EINVAL;
1390 
1391 	value_size = bpf_map_value_size(map);
1392 
1393 	max_count = attr->batch.count;
1394 	if (!max_count)
1395 		return 0;
1396 
1397 	if (put_user(0, &uattr->batch.count))
1398 		return -EFAULT;
1399 
1400 	buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1401 	if (!buf_prevkey)
1402 		return -ENOMEM;
1403 
1404 	buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1405 	if (!buf) {
1406 		kfree(buf_prevkey);
1407 		return -ENOMEM;
1408 	}
1409 
1410 	err = -EFAULT;
1411 	prev_key = NULL;
1412 	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1413 		goto free_buf;
1414 	key = buf;
1415 	value = key + map->key_size;
1416 	if (ubatch)
1417 		prev_key = buf_prevkey;
1418 
1419 	for (cp = 0; cp < max_count;) {
1420 		rcu_read_lock();
1421 		err = map->ops->map_get_next_key(map, prev_key, key);
1422 		rcu_read_unlock();
1423 		if (err)
1424 			break;
1425 		err = bpf_map_copy_value(map, key, value,
1426 					 attr->batch.elem_flags);
1427 
1428 		if (err == -ENOENT) {
1429 			if (retry) {
1430 				retry--;
1431 				continue;
1432 			}
1433 			err = -EINTR;
1434 			break;
1435 		}
1436 
1437 		if (err)
1438 			goto free_buf;
1439 
1440 		if (copy_to_user(keys + cp * map->key_size, key,
1441 				 map->key_size)) {
1442 			err = -EFAULT;
1443 			goto free_buf;
1444 		}
1445 		if (copy_to_user(values + cp * value_size, value, value_size)) {
1446 			err = -EFAULT;
1447 			goto free_buf;
1448 		}
1449 
1450 		if (!prev_key)
1451 			prev_key = buf_prevkey;
1452 
1453 		swap(prev_key, key);
1454 		retry = MAP_LOOKUP_RETRIES;
1455 		cp++;
1456 	}
1457 
1458 	if (err == -EFAULT)
1459 		goto free_buf;
1460 
1461 	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1462 		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1463 		err = -EFAULT;
1464 
1465 free_buf:
1466 	kfree(buf_prevkey);
1467 	kfree(buf);
1468 	return err;
1469 }
1470 
1471 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1472 
1473 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1474 {
1475 	void __user *ukey = u64_to_user_ptr(attr->key);
1476 	void __user *uvalue = u64_to_user_ptr(attr->value);
1477 	int ufd = attr->map_fd;
1478 	struct bpf_map *map;
1479 	void *key, *value;
1480 	u32 value_size;
1481 	struct fd f;
1482 	int err;
1483 
1484 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1485 		return -EINVAL;
1486 
1487 	f = fdget(ufd);
1488 	map = __bpf_map_get(f);
1489 	if (IS_ERR(map))
1490 		return PTR_ERR(map);
1491 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1492 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1493 		err = -EPERM;
1494 		goto err_put;
1495 	}
1496 
1497 	key = __bpf_copy_key(ukey, map->key_size);
1498 	if (IS_ERR(key)) {
1499 		err = PTR_ERR(key);
1500 		goto err_put;
1501 	}
1502 
1503 	value_size = map->value_size;
1504 
1505 	err = -ENOMEM;
1506 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1507 	if (!value)
1508 		goto free_key;
1509 
1510 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1511 	    map->map_type == BPF_MAP_TYPE_STACK) {
1512 		err = map->ops->map_pop_elem(map, value);
1513 	} else {
1514 		err = -ENOTSUPP;
1515 	}
1516 
1517 	if (err)
1518 		goto free_value;
1519 
1520 	if (copy_to_user(uvalue, value, value_size) != 0) {
1521 		err = -EFAULT;
1522 		goto free_value;
1523 	}
1524 
1525 	err = 0;
1526 
1527 free_value:
1528 	kfree(value);
1529 free_key:
1530 	kfree(key);
1531 err_put:
1532 	fdput(f);
1533 	return err;
1534 }
1535 
1536 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1537 
1538 static int map_freeze(const union bpf_attr *attr)
1539 {
1540 	int err = 0, ufd = attr->map_fd;
1541 	struct bpf_map *map;
1542 	struct fd f;
1543 
1544 	if (CHECK_ATTR(BPF_MAP_FREEZE))
1545 		return -EINVAL;
1546 
1547 	f = fdget(ufd);
1548 	map = __bpf_map_get(f);
1549 	if (IS_ERR(map))
1550 		return PTR_ERR(map);
1551 
1552 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1553 		fdput(f);
1554 		return -ENOTSUPP;
1555 	}
1556 
1557 	mutex_lock(&map->freeze_mutex);
1558 
1559 	if (map->writecnt) {
1560 		err = -EBUSY;
1561 		goto err_put;
1562 	}
1563 	if (READ_ONCE(map->frozen)) {
1564 		err = -EBUSY;
1565 		goto err_put;
1566 	}
1567 	if (!bpf_capable()) {
1568 		err = -EPERM;
1569 		goto err_put;
1570 	}
1571 
1572 	WRITE_ONCE(map->frozen, true);
1573 err_put:
1574 	mutex_unlock(&map->freeze_mutex);
1575 	fdput(f);
1576 	return err;
1577 }
1578 
1579 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1580 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1581 	[_id] = & _name ## _prog_ops,
1582 #define BPF_MAP_TYPE(_id, _ops)
1583 #define BPF_LINK_TYPE(_id, _name)
1584 #include <linux/bpf_types.h>
1585 #undef BPF_PROG_TYPE
1586 #undef BPF_MAP_TYPE
1587 #undef BPF_LINK_TYPE
1588 };
1589 
1590 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1591 {
1592 	const struct bpf_prog_ops *ops;
1593 
1594 	if (type >= ARRAY_SIZE(bpf_prog_types))
1595 		return -EINVAL;
1596 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1597 	ops = bpf_prog_types[type];
1598 	if (!ops)
1599 		return -EINVAL;
1600 
1601 	if (!bpf_prog_is_dev_bound(prog->aux))
1602 		prog->aux->ops = ops;
1603 	else
1604 		prog->aux->ops = &bpf_offload_prog_ops;
1605 	prog->type = type;
1606 	return 0;
1607 }
1608 
1609 enum bpf_audit {
1610 	BPF_AUDIT_LOAD,
1611 	BPF_AUDIT_UNLOAD,
1612 	BPF_AUDIT_MAX,
1613 };
1614 
1615 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1616 	[BPF_AUDIT_LOAD]   = "LOAD",
1617 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
1618 };
1619 
1620 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1621 {
1622 	struct audit_context *ctx = NULL;
1623 	struct audit_buffer *ab;
1624 
1625 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1626 		return;
1627 	if (audit_enabled == AUDIT_OFF)
1628 		return;
1629 	if (op == BPF_AUDIT_LOAD)
1630 		ctx = audit_context();
1631 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1632 	if (unlikely(!ab))
1633 		return;
1634 	audit_log_format(ab, "prog-id=%u op=%s",
1635 			 prog->aux->id, bpf_audit_str[op]);
1636 	audit_log_end(ab);
1637 }
1638 
1639 int __bpf_prog_charge(struct user_struct *user, u32 pages)
1640 {
1641 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1642 	unsigned long user_bufs;
1643 
1644 	if (user) {
1645 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
1646 		if (user_bufs > memlock_limit) {
1647 			atomic_long_sub(pages, &user->locked_vm);
1648 			return -EPERM;
1649 		}
1650 	}
1651 
1652 	return 0;
1653 }
1654 
1655 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1656 {
1657 	if (user)
1658 		atomic_long_sub(pages, &user->locked_vm);
1659 }
1660 
1661 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1662 {
1663 	struct user_struct *user = get_current_user();
1664 	int ret;
1665 
1666 	ret = __bpf_prog_charge(user, prog->pages);
1667 	if (ret) {
1668 		free_uid(user);
1669 		return ret;
1670 	}
1671 
1672 	prog->aux->user = user;
1673 	return 0;
1674 }
1675 
1676 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1677 {
1678 	struct user_struct *user = prog->aux->user;
1679 
1680 	__bpf_prog_uncharge(user, prog->pages);
1681 	free_uid(user);
1682 }
1683 
1684 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1685 {
1686 	int id;
1687 
1688 	idr_preload(GFP_KERNEL);
1689 	spin_lock_bh(&prog_idr_lock);
1690 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1691 	if (id > 0)
1692 		prog->aux->id = id;
1693 	spin_unlock_bh(&prog_idr_lock);
1694 	idr_preload_end();
1695 
1696 	/* id is in [1, INT_MAX) */
1697 	if (WARN_ON_ONCE(!id))
1698 		return -ENOSPC;
1699 
1700 	return id > 0 ? 0 : id;
1701 }
1702 
1703 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1704 {
1705 	/* cBPF to eBPF migrations are currently not in the idr store.
1706 	 * Offloaded programs are removed from the store when their device
1707 	 * disappears - even if someone grabs an fd to them they are unusable,
1708 	 * simply waiting for refcnt to drop to be freed.
1709 	 */
1710 	if (!prog->aux->id)
1711 		return;
1712 
1713 	if (do_idr_lock)
1714 		spin_lock_bh(&prog_idr_lock);
1715 	else
1716 		__acquire(&prog_idr_lock);
1717 
1718 	idr_remove(&prog_idr, prog->aux->id);
1719 	prog->aux->id = 0;
1720 
1721 	if (do_idr_lock)
1722 		spin_unlock_bh(&prog_idr_lock);
1723 	else
1724 		__release(&prog_idr_lock);
1725 }
1726 
1727 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1728 {
1729 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1730 
1731 	kvfree(aux->func_info);
1732 	kfree(aux->func_info_aux);
1733 	bpf_prog_uncharge_memlock(aux->prog);
1734 	security_bpf_prog_free(aux);
1735 	bpf_prog_free(aux->prog);
1736 }
1737 
1738 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1739 {
1740 	bpf_prog_kallsyms_del_all(prog);
1741 	btf_put(prog->aux->btf);
1742 	bpf_prog_free_linfo(prog);
1743 
1744 	if (deferred)
1745 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1746 	else
1747 		__bpf_prog_put_rcu(&prog->aux->rcu);
1748 }
1749 
1750 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1751 {
1752 	if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1753 		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1754 		bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1755 		/* bpf_prog_free_id() must be called first */
1756 		bpf_prog_free_id(prog, do_idr_lock);
1757 		__bpf_prog_put_noref(prog, true);
1758 	}
1759 }
1760 
1761 void bpf_prog_put(struct bpf_prog *prog)
1762 {
1763 	__bpf_prog_put(prog, true);
1764 }
1765 EXPORT_SYMBOL_GPL(bpf_prog_put);
1766 
1767 static int bpf_prog_release(struct inode *inode, struct file *filp)
1768 {
1769 	struct bpf_prog *prog = filp->private_data;
1770 
1771 	bpf_prog_put(prog);
1772 	return 0;
1773 }
1774 
1775 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1776 			       struct bpf_prog_stats *stats)
1777 {
1778 	u64 nsecs = 0, cnt = 0;
1779 	int cpu;
1780 
1781 	for_each_possible_cpu(cpu) {
1782 		const struct bpf_prog_stats *st;
1783 		unsigned int start;
1784 		u64 tnsecs, tcnt;
1785 
1786 		st = per_cpu_ptr(prog->aux->stats, cpu);
1787 		do {
1788 			start = u64_stats_fetch_begin_irq(&st->syncp);
1789 			tnsecs = st->nsecs;
1790 			tcnt = st->cnt;
1791 		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
1792 		nsecs += tnsecs;
1793 		cnt += tcnt;
1794 	}
1795 	stats->nsecs = nsecs;
1796 	stats->cnt = cnt;
1797 }
1798 
1799 #ifdef CONFIG_PROC_FS
1800 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1801 {
1802 	const struct bpf_prog *prog = filp->private_data;
1803 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1804 	struct bpf_prog_stats stats;
1805 
1806 	bpf_prog_get_stats(prog, &stats);
1807 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1808 	seq_printf(m,
1809 		   "prog_type:\t%u\n"
1810 		   "prog_jited:\t%u\n"
1811 		   "prog_tag:\t%s\n"
1812 		   "memlock:\t%llu\n"
1813 		   "prog_id:\t%u\n"
1814 		   "run_time_ns:\t%llu\n"
1815 		   "run_cnt:\t%llu\n",
1816 		   prog->type,
1817 		   prog->jited,
1818 		   prog_tag,
1819 		   prog->pages * 1ULL << PAGE_SHIFT,
1820 		   prog->aux->id,
1821 		   stats.nsecs,
1822 		   stats.cnt);
1823 }
1824 #endif
1825 
1826 const struct file_operations bpf_prog_fops = {
1827 #ifdef CONFIG_PROC_FS
1828 	.show_fdinfo	= bpf_prog_show_fdinfo,
1829 #endif
1830 	.release	= bpf_prog_release,
1831 	.read		= bpf_dummy_read,
1832 	.write		= bpf_dummy_write,
1833 };
1834 
1835 int bpf_prog_new_fd(struct bpf_prog *prog)
1836 {
1837 	int ret;
1838 
1839 	ret = security_bpf_prog(prog);
1840 	if (ret < 0)
1841 		return ret;
1842 
1843 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1844 				O_RDWR | O_CLOEXEC);
1845 }
1846 
1847 static struct bpf_prog *____bpf_prog_get(struct fd f)
1848 {
1849 	if (!f.file)
1850 		return ERR_PTR(-EBADF);
1851 	if (f.file->f_op != &bpf_prog_fops) {
1852 		fdput(f);
1853 		return ERR_PTR(-EINVAL);
1854 	}
1855 
1856 	return f.file->private_data;
1857 }
1858 
1859 void bpf_prog_add(struct bpf_prog *prog, int i)
1860 {
1861 	atomic64_add(i, &prog->aux->refcnt);
1862 }
1863 EXPORT_SYMBOL_GPL(bpf_prog_add);
1864 
1865 void bpf_prog_sub(struct bpf_prog *prog, int i)
1866 {
1867 	/* Only to be used for undoing previous bpf_prog_add() in some
1868 	 * error path. We still know that another entity in our call
1869 	 * path holds a reference to the program, thus atomic_sub() can
1870 	 * be safely used in such cases!
1871 	 */
1872 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1873 }
1874 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1875 
1876 void bpf_prog_inc(struct bpf_prog *prog)
1877 {
1878 	atomic64_inc(&prog->aux->refcnt);
1879 }
1880 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1881 
1882 /* prog_idr_lock should have been held */
1883 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1884 {
1885 	int refold;
1886 
1887 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1888 
1889 	if (!refold)
1890 		return ERR_PTR(-ENOENT);
1891 
1892 	return prog;
1893 }
1894 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1895 
1896 bool bpf_prog_get_ok(struct bpf_prog *prog,
1897 			    enum bpf_prog_type *attach_type, bool attach_drv)
1898 {
1899 	/* not an attachment, just a refcount inc, always allow */
1900 	if (!attach_type)
1901 		return true;
1902 
1903 	if (prog->type != *attach_type)
1904 		return false;
1905 	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1906 		return false;
1907 
1908 	return true;
1909 }
1910 
1911 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1912 				       bool attach_drv)
1913 {
1914 	struct fd f = fdget(ufd);
1915 	struct bpf_prog *prog;
1916 
1917 	prog = ____bpf_prog_get(f);
1918 	if (IS_ERR(prog))
1919 		return prog;
1920 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1921 		prog = ERR_PTR(-EINVAL);
1922 		goto out;
1923 	}
1924 
1925 	bpf_prog_inc(prog);
1926 out:
1927 	fdput(f);
1928 	return prog;
1929 }
1930 
1931 struct bpf_prog *bpf_prog_get(u32 ufd)
1932 {
1933 	return __bpf_prog_get(ufd, NULL, false);
1934 }
1935 
1936 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1937 				       bool attach_drv)
1938 {
1939 	return __bpf_prog_get(ufd, &type, attach_drv);
1940 }
1941 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1942 
1943 /* Initially all BPF programs could be loaded w/o specifying
1944  * expected_attach_type. Later for some of them specifying expected_attach_type
1945  * at load time became required so that program could be validated properly.
1946  * Programs of types that are allowed to be loaded both w/ and w/o (for
1947  * backward compatibility) expected_attach_type, should have the default attach
1948  * type assigned to expected_attach_type for the latter case, so that it can be
1949  * validated later at attach time.
1950  *
1951  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1952  * prog type requires it but has some attach types that have to be backward
1953  * compatible.
1954  */
1955 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1956 {
1957 	switch (attr->prog_type) {
1958 	case BPF_PROG_TYPE_CGROUP_SOCK:
1959 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1960 		 * exist so checking for non-zero is the way to go here.
1961 		 */
1962 		if (!attr->expected_attach_type)
1963 			attr->expected_attach_type =
1964 				BPF_CGROUP_INET_SOCK_CREATE;
1965 		break;
1966 	}
1967 }
1968 
1969 static int
1970 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1971 			   enum bpf_attach_type expected_attach_type,
1972 			   u32 btf_id, u32 prog_fd)
1973 {
1974 	if (btf_id) {
1975 		if (btf_id > BTF_MAX_TYPE)
1976 			return -EINVAL;
1977 
1978 		switch (prog_type) {
1979 		case BPF_PROG_TYPE_TRACING:
1980 		case BPF_PROG_TYPE_LSM:
1981 		case BPF_PROG_TYPE_STRUCT_OPS:
1982 		case BPF_PROG_TYPE_EXT:
1983 			break;
1984 		default:
1985 			return -EINVAL;
1986 		}
1987 	}
1988 
1989 	if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING &&
1990 	    prog_type != BPF_PROG_TYPE_EXT)
1991 		return -EINVAL;
1992 
1993 	switch (prog_type) {
1994 	case BPF_PROG_TYPE_CGROUP_SOCK:
1995 		switch (expected_attach_type) {
1996 		case BPF_CGROUP_INET_SOCK_CREATE:
1997 		case BPF_CGROUP_INET4_POST_BIND:
1998 		case BPF_CGROUP_INET6_POST_BIND:
1999 			return 0;
2000 		default:
2001 			return -EINVAL;
2002 		}
2003 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2004 		switch (expected_attach_type) {
2005 		case BPF_CGROUP_INET4_BIND:
2006 		case BPF_CGROUP_INET6_BIND:
2007 		case BPF_CGROUP_INET4_CONNECT:
2008 		case BPF_CGROUP_INET6_CONNECT:
2009 		case BPF_CGROUP_INET4_GETPEERNAME:
2010 		case BPF_CGROUP_INET6_GETPEERNAME:
2011 		case BPF_CGROUP_INET4_GETSOCKNAME:
2012 		case BPF_CGROUP_INET6_GETSOCKNAME:
2013 		case BPF_CGROUP_UDP4_SENDMSG:
2014 		case BPF_CGROUP_UDP6_SENDMSG:
2015 		case BPF_CGROUP_UDP4_RECVMSG:
2016 		case BPF_CGROUP_UDP6_RECVMSG:
2017 			return 0;
2018 		default:
2019 			return -EINVAL;
2020 		}
2021 	case BPF_PROG_TYPE_CGROUP_SKB:
2022 		switch (expected_attach_type) {
2023 		case BPF_CGROUP_INET_INGRESS:
2024 		case BPF_CGROUP_INET_EGRESS:
2025 			return 0;
2026 		default:
2027 			return -EINVAL;
2028 		}
2029 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2030 		switch (expected_attach_type) {
2031 		case BPF_CGROUP_SETSOCKOPT:
2032 		case BPF_CGROUP_GETSOCKOPT:
2033 			return 0;
2034 		default:
2035 			return -EINVAL;
2036 		}
2037 	case BPF_PROG_TYPE_EXT:
2038 		if (expected_attach_type)
2039 			return -EINVAL;
2040 		/* fallthrough */
2041 	default:
2042 		return 0;
2043 	}
2044 }
2045 
2046 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2047 {
2048 	switch (prog_type) {
2049 	case BPF_PROG_TYPE_SCHED_CLS:
2050 	case BPF_PROG_TYPE_SCHED_ACT:
2051 	case BPF_PROG_TYPE_XDP:
2052 	case BPF_PROG_TYPE_LWT_IN:
2053 	case BPF_PROG_TYPE_LWT_OUT:
2054 	case BPF_PROG_TYPE_LWT_XMIT:
2055 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2056 	case BPF_PROG_TYPE_SK_SKB:
2057 	case BPF_PROG_TYPE_SK_MSG:
2058 	case BPF_PROG_TYPE_LIRC_MODE2:
2059 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2060 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2061 	case BPF_PROG_TYPE_CGROUP_SOCK:
2062 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2063 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2064 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2065 	case BPF_PROG_TYPE_SOCK_OPS:
2066 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2067 		return true;
2068 	case BPF_PROG_TYPE_CGROUP_SKB:
2069 		/* always unpriv */
2070 	case BPF_PROG_TYPE_SK_REUSEPORT:
2071 		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2072 	default:
2073 		return false;
2074 	}
2075 }
2076 
2077 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2078 {
2079 	switch (prog_type) {
2080 	case BPF_PROG_TYPE_KPROBE:
2081 	case BPF_PROG_TYPE_TRACEPOINT:
2082 	case BPF_PROG_TYPE_PERF_EVENT:
2083 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2084 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2085 	case BPF_PROG_TYPE_TRACING:
2086 	case BPF_PROG_TYPE_LSM:
2087 	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2088 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2089 		return true;
2090 	default:
2091 		return false;
2092 	}
2093 }
2094 
2095 /* last field in 'union bpf_attr' used by this command */
2096 #define	BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2097 
2098 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2099 {
2100 	enum bpf_prog_type type = attr->prog_type;
2101 	struct bpf_prog *prog;
2102 	int err;
2103 	char license[128];
2104 	bool is_gpl;
2105 
2106 	if (CHECK_ATTR(BPF_PROG_LOAD))
2107 		return -EINVAL;
2108 
2109 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2110 				 BPF_F_ANY_ALIGNMENT |
2111 				 BPF_F_TEST_STATE_FREQ |
2112 				 BPF_F_TEST_RND_HI32))
2113 		return -EINVAL;
2114 
2115 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2116 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2117 	    !bpf_capable())
2118 		return -EPERM;
2119 
2120 	/* copy eBPF program license from user space */
2121 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2122 			      sizeof(license) - 1) < 0)
2123 		return -EFAULT;
2124 	license[sizeof(license) - 1] = 0;
2125 
2126 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2127 	is_gpl = license_is_gpl_compatible(license);
2128 
2129 	if (attr->insn_cnt == 0 ||
2130 	    attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2131 		return -E2BIG;
2132 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2133 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2134 	    !bpf_capable())
2135 		return -EPERM;
2136 
2137 	if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN))
2138 		return -EPERM;
2139 	if (is_perfmon_prog_type(type) && !perfmon_capable())
2140 		return -EPERM;
2141 
2142 	bpf_prog_load_fixup_attach_type(attr);
2143 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2144 				       attr->attach_btf_id,
2145 				       attr->attach_prog_fd))
2146 		return -EINVAL;
2147 
2148 	/* plain bpf_prog allocation */
2149 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2150 	if (!prog)
2151 		return -ENOMEM;
2152 
2153 	prog->expected_attach_type = attr->expected_attach_type;
2154 	prog->aux->attach_btf_id = attr->attach_btf_id;
2155 	if (attr->attach_prog_fd) {
2156 		struct bpf_prog *tgt_prog;
2157 
2158 		tgt_prog = bpf_prog_get(attr->attach_prog_fd);
2159 		if (IS_ERR(tgt_prog)) {
2160 			err = PTR_ERR(tgt_prog);
2161 			goto free_prog_nouncharge;
2162 		}
2163 		prog->aux->linked_prog = tgt_prog;
2164 	}
2165 
2166 	prog->aux->offload_requested = !!attr->prog_ifindex;
2167 
2168 	err = security_bpf_prog_alloc(prog->aux);
2169 	if (err)
2170 		goto free_prog_nouncharge;
2171 
2172 	err = bpf_prog_charge_memlock(prog);
2173 	if (err)
2174 		goto free_prog_sec;
2175 
2176 	prog->len = attr->insn_cnt;
2177 
2178 	err = -EFAULT;
2179 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2180 			   bpf_prog_insn_size(prog)) != 0)
2181 		goto free_prog;
2182 
2183 	prog->orig_prog = NULL;
2184 	prog->jited = 0;
2185 
2186 	atomic64_set(&prog->aux->refcnt, 1);
2187 	prog->gpl_compatible = is_gpl ? 1 : 0;
2188 
2189 	if (bpf_prog_is_dev_bound(prog->aux)) {
2190 		err = bpf_prog_offload_init(prog, attr);
2191 		if (err)
2192 			goto free_prog;
2193 	}
2194 
2195 	/* find program type: socket_filter vs tracing_filter */
2196 	err = find_prog_type(type, prog);
2197 	if (err < 0)
2198 		goto free_prog;
2199 
2200 	prog->aux->load_time = ktime_get_boottime_ns();
2201 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2202 			       sizeof(attr->prog_name));
2203 	if (err < 0)
2204 		goto free_prog;
2205 
2206 	/* run eBPF verifier */
2207 	err = bpf_check(&prog, attr, uattr);
2208 	if (err < 0)
2209 		goto free_used_maps;
2210 
2211 	prog = bpf_prog_select_runtime(prog, &err);
2212 	if (err < 0)
2213 		goto free_used_maps;
2214 
2215 	err = bpf_prog_alloc_id(prog);
2216 	if (err)
2217 		goto free_used_maps;
2218 
2219 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2220 	 * effectively publicly exposed. However, retrieving via
2221 	 * bpf_prog_get_fd_by_id() will take another reference,
2222 	 * therefore it cannot be gone underneath us.
2223 	 *
2224 	 * Only for the time /after/ successful bpf_prog_new_fd()
2225 	 * and before returning to userspace, we might just hold
2226 	 * one reference and any parallel close on that fd could
2227 	 * rip everything out. Hence, below notifications must
2228 	 * happen before bpf_prog_new_fd().
2229 	 *
2230 	 * Also, any failure handling from this point onwards must
2231 	 * be using bpf_prog_put() given the program is exposed.
2232 	 */
2233 	bpf_prog_kallsyms_add(prog);
2234 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2235 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2236 
2237 	err = bpf_prog_new_fd(prog);
2238 	if (err < 0)
2239 		bpf_prog_put(prog);
2240 	return err;
2241 
2242 free_used_maps:
2243 	/* In case we have subprogs, we need to wait for a grace
2244 	 * period before we can tear down JIT memory since symbols
2245 	 * are already exposed under kallsyms.
2246 	 */
2247 	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
2248 	return err;
2249 free_prog:
2250 	bpf_prog_uncharge_memlock(prog);
2251 free_prog_sec:
2252 	security_bpf_prog_free(prog->aux);
2253 free_prog_nouncharge:
2254 	bpf_prog_free(prog);
2255 	return err;
2256 }
2257 
2258 #define BPF_OBJ_LAST_FIELD file_flags
2259 
2260 static int bpf_obj_pin(const union bpf_attr *attr)
2261 {
2262 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2263 		return -EINVAL;
2264 
2265 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2266 }
2267 
2268 static int bpf_obj_get(const union bpf_attr *attr)
2269 {
2270 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2271 	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2272 		return -EINVAL;
2273 
2274 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2275 				attr->file_flags);
2276 }
2277 
2278 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2279 		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2280 {
2281 	atomic64_set(&link->refcnt, 1);
2282 	link->type = type;
2283 	link->id = 0;
2284 	link->ops = ops;
2285 	link->prog = prog;
2286 }
2287 
2288 static void bpf_link_free_id(int id)
2289 {
2290 	if (!id)
2291 		return;
2292 
2293 	spin_lock_bh(&link_idr_lock);
2294 	idr_remove(&link_idr, id);
2295 	spin_unlock_bh(&link_idr_lock);
2296 }
2297 
2298 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2299  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2300  * anon_inode's release() call. This helper marksbpf_link as
2301  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2302  * is not decremented, it's the responsibility of a calling code that failed
2303  * to complete bpf_link initialization.
2304  */
2305 void bpf_link_cleanup(struct bpf_link_primer *primer)
2306 {
2307 	primer->link->prog = NULL;
2308 	bpf_link_free_id(primer->id);
2309 	fput(primer->file);
2310 	put_unused_fd(primer->fd);
2311 }
2312 
2313 void bpf_link_inc(struct bpf_link *link)
2314 {
2315 	atomic64_inc(&link->refcnt);
2316 }
2317 
2318 /* bpf_link_free is guaranteed to be called from process context */
2319 static void bpf_link_free(struct bpf_link *link)
2320 {
2321 	bpf_link_free_id(link->id);
2322 	if (link->prog) {
2323 		/* detach BPF program, clean up used resources */
2324 		link->ops->release(link);
2325 		bpf_prog_put(link->prog);
2326 	}
2327 	/* free bpf_link and its containing memory */
2328 	link->ops->dealloc(link);
2329 }
2330 
2331 static void bpf_link_put_deferred(struct work_struct *work)
2332 {
2333 	struct bpf_link *link = container_of(work, struct bpf_link, work);
2334 
2335 	bpf_link_free(link);
2336 }
2337 
2338 /* bpf_link_put can be called from atomic context, but ensures that resources
2339  * are freed from process context
2340  */
2341 void bpf_link_put(struct bpf_link *link)
2342 {
2343 	if (!atomic64_dec_and_test(&link->refcnt))
2344 		return;
2345 
2346 	if (in_atomic()) {
2347 		INIT_WORK(&link->work, bpf_link_put_deferred);
2348 		schedule_work(&link->work);
2349 	} else {
2350 		bpf_link_free(link);
2351 	}
2352 }
2353 
2354 static int bpf_link_release(struct inode *inode, struct file *filp)
2355 {
2356 	struct bpf_link *link = filp->private_data;
2357 
2358 	bpf_link_put(link);
2359 	return 0;
2360 }
2361 
2362 #ifdef CONFIG_PROC_FS
2363 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2364 #define BPF_MAP_TYPE(_id, _ops)
2365 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2366 static const char *bpf_link_type_strs[] = {
2367 	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2368 #include <linux/bpf_types.h>
2369 };
2370 #undef BPF_PROG_TYPE
2371 #undef BPF_MAP_TYPE
2372 #undef BPF_LINK_TYPE
2373 
2374 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2375 {
2376 	const struct bpf_link *link = filp->private_data;
2377 	const struct bpf_prog *prog = link->prog;
2378 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2379 
2380 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2381 	seq_printf(m,
2382 		   "link_type:\t%s\n"
2383 		   "link_id:\t%u\n"
2384 		   "prog_tag:\t%s\n"
2385 		   "prog_id:\t%u\n",
2386 		   bpf_link_type_strs[link->type],
2387 		   link->id,
2388 		   prog_tag,
2389 		   prog->aux->id);
2390 	if (link->ops->show_fdinfo)
2391 		link->ops->show_fdinfo(link, m);
2392 }
2393 #endif
2394 
2395 static const struct file_operations bpf_link_fops = {
2396 #ifdef CONFIG_PROC_FS
2397 	.show_fdinfo	= bpf_link_show_fdinfo,
2398 #endif
2399 	.release	= bpf_link_release,
2400 	.read		= bpf_dummy_read,
2401 	.write		= bpf_dummy_write,
2402 };
2403 
2404 static int bpf_link_alloc_id(struct bpf_link *link)
2405 {
2406 	int id;
2407 
2408 	idr_preload(GFP_KERNEL);
2409 	spin_lock_bh(&link_idr_lock);
2410 	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2411 	spin_unlock_bh(&link_idr_lock);
2412 	idr_preload_end();
2413 
2414 	return id;
2415 }
2416 
2417 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2418  * reserving unused FD and allocating ID from link_idr. This is to be paired
2419  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2420  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2421  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2422  * transient state is passed around in struct bpf_link_primer.
2423  * This is preferred way to create and initialize bpf_link, especially when
2424  * there are complicated and expensive operations inbetween creating bpf_link
2425  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2426  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2427  * expensive (and potentially failing) roll back operations in a rare case
2428  * that file, FD, or ID can't be allocated.
2429  */
2430 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2431 {
2432 	struct file *file;
2433 	int fd, id;
2434 
2435 	fd = get_unused_fd_flags(O_CLOEXEC);
2436 	if (fd < 0)
2437 		return fd;
2438 
2439 
2440 	id = bpf_link_alloc_id(link);
2441 	if (id < 0) {
2442 		put_unused_fd(fd);
2443 		return id;
2444 	}
2445 
2446 	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2447 	if (IS_ERR(file)) {
2448 		bpf_link_free_id(id);
2449 		put_unused_fd(fd);
2450 		return PTR_ERR(file);
2451 	}
2452 
2453 	primer->link = link;
2454 	primer->file = file;
2455 	primer->fd = fd;
2456 	primer->id = id;
2457 	return 0;
2458 }
2459 
2460 int bpf_link_settle(struct bpf_link_primer *primer)
2461 {
2462 	/* make bpf_link fetchable by ID */
2463 	spin_lock_bh(&link_idr_lock);
2464 	primer->link->id = primer->id;
2465 	spin_unlock_bh(&link_idr_lock);
2466 	/* make bpf_link fetchable by FD */
2467 	fd_install(primer->fd, primer->file);
2468 	/* pass through installed FD */
2469 	return primer->fd;
2470 }
2471 
2472 int bpf_link_new_fd(struct bpf_link *link)
2473 {
2474 	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2475 }
2476 
2477 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2478 {
2479 	struct fd f = fdget(ufd);
2480 	struct bpf_link *link;
2481 
2482 	if (!f.file)
2483 		return ERR_PTR(-EBADF);
2484 	if (f.file->f_op != &bpf_link_fops) {
2485 		fdput(f);
2486 		return ERR_PTR(-EINVAL);
2487 	}
2488 
2489 	link = f.file->private_data;
2490 	bpf_link_inc(link);
2491 	fdput(f);
2492 
2493 	return link;
2494 }
2495 
2496 struct bpf_tracing_link {
2497 	struct bpf_link link;
2498 	enum bpf_attach_type attach_type;
2499 };
2500 
2501 static void bpf_tracing_link_release(struct bpf_link *link)
2502 {
2503 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog));
2504 }
2505 
2506 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2507 {
2508 	struct bpf_tracing_link *tr_link =
2509 		container_of(link, struct bpf_tracing_link, link);
2510 
2511 	kfree(tr_link);
2512 }
2513 
2514 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2515 					 struct seq_file *seq)
2516 {
2517 	struct bpf_tracing_link *tr_link =
2518 		container_of(link, struct bpf_tracing_link, link);
2519 
2520 	seq_printf(seq,
2521 		   "attach_type:\t%d\n",
2522 		   tr_link->attach_type);
2523 }
2524 
2525 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2526 					   struct bpf_link_info *info)
2527 {
2528 	struct bpf_tracing_link *tr_link =
2529 		container_of(link, struct bpf_tracing_link, link);
2530 
2531 	info->tracing.attach_type = tr_link->attach_type;
2532 
2533 	return 0;
2534 }
2535 
2536 static const struct bpf_link_ops bpf_tracing_link_lops = {
2537 	.release = bpf_tracing_link_release,
2538 	.dealloc = bpf_tracing_link_dealloc,
2539 	.show_fdinfo = bpf_tracing_link_show_fdinfo,
2540 	.fill_link_info = bpf_tracing_link_fill_link_info,
2541 };
2542 
2543 static int bpf_tracing_prog_attach(struct bpf_prog *prog)
2544 {
2545 	struct bpf_link_primer link_primer;
2546 	struct bpf_tracing_link *link;
2547 	int err;
2548 
2549 	switch (prog->type) {
2550 	case BPF_PROG_TYPE_TRACING:
2551 		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2552 		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
2553 		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
2554 			err = -EINVAL;
2555 			goto out_put_prog;
2556 		}
2557 		break;
2558 	case BPF_PROG_TYPE_EXT:
2559 		if (prog->expected_attach_type != 0) {
2560 			err = -EINVAL;
2561 			goto out_put_prog;
2562 		}
2563 		break;
2564 	case BPF_PROG_TYPE_LSM:
2565 		if (prog->expected_attach_type != BPF_LSM_MAC) {
2566 			err = -EINVAL;
2567 			goto out_put_prog;
2568 		}
2569 		break;
2570 	default:
2571 		err = -EINVAL;
2572 		goto out_put_prog;
2573 	}
2574 
2575 	link = kzalloc(sizeof(*link), GFP_USER);
2576 	if (!link) {
2577 		err = -ENOMEM;
2578 		goto out_put_prog;
2579 	}
2580 	bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2581 		      &bpf_tracing_link_lops, prog);
2582 	link->attach_type = prog->expected_attach_type;
2583 
2584 	err = bpf_link_prime(&link->link, &link_primer);
2585 	if (err) {
2586 		kfree(link);
2587 		goto out_put_prog;
2588 	}
2589 
2590 	err = bpf_trampoline_link_prog(prog);
2591 	if (err) {
2592 		bpf_link_cleanup(&link_primer);
2593 		goto out_put_prog;
2594 	}
2595 
2596 	return bpf_link_settle(&link_primer);
2597 out_put_prog:
2598 	bpf_prog_put(prog);
2599 	return err;
2600 }
2601 
2602 struct bpf_raw_tp_link {
2603 	struct bpf_link link;
2604 	struct bpf_raw_event_map *btp;
2605 };
2606 
2607 static void bpf_raw_tp_link_release(struct bpf_link *link)
2608 {
2609 	struct bpf_raw_tp_link *raw_tp =
2610 		container_of(link, struct bpf_raw_tp_link, link);
2611 
2612 	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2613 	bpf_put_raw_tracepoint(raw_tp->btp);
2614 }
2615 
2616 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2617 {
2618 	struct bpf_raw_tp_link *raw_tp =
2619 		container_of(link, struct bpf_raw_tp_link, link);
2620 
2621 	kfree(raw_tp);
2622 }
2623 
2624 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2625 					struct seq_file *seq)
2626 {
2627 	struct bpf_raw_tp_link *raw_tp_link =
2628 		container_of(link, struct bpf_raw_tp_link, link);
2629 
2630 	seq_printf(seq,
2631 		   "tp_name:\t%s\n",
2632 		   raw_tp_link->btp->tp->name);
2633 }
2634 
2635 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2636 					  struct bpf_link_info *info)
2637 {
2638 	struct bpf_raw_tp_link *raw_tp_link =
2639 		container_of(link, struct bpf_raw_tp_link, link);
2640 	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2641 	const char *tp_name = raw_tp_link->btp->tp->name;
2642 	u32 ulen = info->raw_tracepoint.tp_name_len;
2643 	size_t tp_len = strlen(tp_name);
2644 
2645 	if (ulen && !ubuf)
2646 		return -EINVAL;
2647 
2648 	info->raw_tracepoint.tp_name_len = tp_len + 1;
2649 
2650 	if (!ubuf)
2651 		return 0;
2652 
2653 	if (ulen >= tp_len + 1) {
2654 		if (copy_to_user(ubuf, tp_name, tp_len + 1))
2655 			return -EFAULT;
2656 	} else {
2657 		char zero = '\0';
2658 
2659 		if (copy_to_user(ubuf, tp_name, ulen - 1))
2660 			return -EFAULT;
2661 		if (put_user(zero, ubuf + ulen - 1))
2662 			return -EFAULT;
2663 		return -ENOSPC;
2664 	}
2665 
2666 	return 0;
2667 }
2668 
2669 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2670 	.release = bpf_raw_tp_link_release,
2671 	.dealloc = bpf_raw_tp_link_dealloc,
2672 	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2673 	.fill_link_info = bpf_raw_tp_link_fill_link_info,
2674 };
2675 
2676 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2677 
2678 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2679 {
2680 	struct bpf_link_primer link_primer;
2681 	struct bpf_raw_tp_link *link;
2682 	struct bpf_raw_event_map *btp;
2683 	struct bpf_prog *prog;
2684 	const char *tp_name;
2685 	char buf[128];
2686 	int err;
2687 
2688 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2689 		return -EINVAL;
2690 
2691 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2692 	if (IS_ERR(prog))
2693 		return PTR_ERR(prog);
2694 
2695 	switch (prog->type) {
2696 	case BPF_PROG_TYPE_TRACING:
2697 	case BPF_PROG_TYPE_EXT:
2698 	case BPF_PROG_TYPE_LSM:
2699 		if (attr->raw_tracepoint.name) {
2700 			/* The attach point for this category of programs
2701 			 * should be specified via btf_id during program load.
2702 			 */
2703 			err = -EINVAL;
2704 			goto out_put_prog;
2705 		}
2706 		if (prog->type == BPF_PROG_TYPE_TRACING &&
2707 		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2708 			tp_name = prog->aux->attach_func_name;
2709 			break;
2710 		}
2711 		return bpf_tracing_prog_attach(prog);
2712 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2713 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2714 		if (strncpy_from_user(buf,
2715 				      u64_to_user_ptr(attr->raw_tracepoint.name),
2716 				      sizeof(buf) - 1) < 0) {
2717 			err = -EFAULT;
2718 			goto out_put_prog;
2719 		}
2720 		buf[sizeof(buf) - 1] = 0;
2721 		tp_name = buf;
2722 		break;
2723 	default:
2724 		err = -EINVAL;
2725 		goto out_put_prog;
2726 	}
2727 
2728 	btp = bpf_get_raw_tracepoint(tp_name);
2729 	if (!btp) {
2730 		err = -ENOENT;
2731 		goto out_put_prog;
2732 	}
2733 
2734 	link = kzalloc(sizeof(*link), GFP_USER);
2735 	if (!link) {
2736 		err = -ENOMEM;
2737 		goto out_put_btp;
2738 	}
2739 	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2740 		      &bpf_raw_tp_link_lops, prog);
2741 	link->btp = btp;
2742 
2743 	err = bpf_link_prime(&link->link, &link_primer);
2744 	if (err) {
2745 		kfree(link);
2746 		goto out_put_btp;
2747 	}
2748 
2749 	err = bpf_probe_register(link->btp, prog);
2750 	if (err) {
2751 		bpf_link_cleanup(&link_primer);
2752 		goto out_put_btp;
2753 	}
2754 
2755 	return bpf_link_settle(&link_primer);
2756 
2757 out_put_btp:
2758 	bpf_put_raw_tracepoint(btp);
2759 out_put_prog:
2760 	bpf_prog_put(prog);
2761 	return err;
2762 }
2763 
2764 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2765 					     enum bpf_attach_type attach_type)
2766 {
2767 	switch (prog->type) {
2768 	case BPF_PROG_TYPE_CGROUP_SOCK:
2769 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2770 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2771 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2772 	case BPF_PROG_TYPE_CGROUP_SKB:
2773 		if (!capable(CAP_NET_ADMIN))
2774 			/* cg-skb progs can be loaded by unpriv user.
2775 			 * check permissions at attach time.
2776 			 */
2777 			return -EPERM;
2778 		return prog->enforce_expected_attach_type &&
2779 			prog->expected_attach_type != attach_type ?
2780 			-EINVAL : 0;
2781 	default:
2782 		return 0;
2783 	}
2784 }
2785 
2786 static enum bpf_prog_type
2787 attach_type_to_prog_type(enum bpf_attach_type attach_type)
2788 {
2789 	switch (attach_type) {
2790 	case BPF_CGROUP_INET_INGRESS:
2791 	case BPF_CGROUP_INET_EGRESS:
2792 		return BPF_PROG_TYPE_CGROUP_SKB;
2793 		break;
2794 	case BPF_CGROUP_INET_SOCK_CREATE:
2795 	case BPF_CGROUP_INET4_POST_BIND:
2796 	case BPF_CGROUP_INET6_POST_BIND:
2797 		return BPF_PROG_TYPE_CGROUP_SOCK;
2798 	case BPF_CGROUP_INET4_BIND:
2799 	case BPF_CGROUP_INET6_BIND:
2800 	case BPF_CGROUP_INET4_CONNECT:
2801 	case BPF_CGROUP_INET6_CONNECT:
2802 	case BPF_CGROUP_INET4_GETPEERNAME:
2803 	case BPF_CGROUP_INET6_GETPEERNAME:
2804 	case BPF_CGROUP_INET4_GETSOCKNAME:
2805 	case BPF_CGROUP_INET6_GETSOCKNAME:
2806 	case BPF_CGROUP_UDP4_SENDMSG:
2807 	case BPF_CGROUP_UDP6_SENDMSG:
2808 	case BPF_CGROUP_UDP4_RECVMSG:
2809 	case BPF_CGROUP_UDP6_RECVMSG:
2810 		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2811 	case BPF_CGROUP_SOCK_OPS:
2812 		return BPF_PROG_TYPE_SOCK_OPS;
2813 	case BPF_CGROUP_DEVICE:
2814 		return BPF_PROG_TYPE_CGROUP_DEVICE;
2815 	case BPF_SK_MSG_VERDICT:
2816 		return BPF_PROG_TYPE_SK_MSG;
2817 	case BPF_SK_SKB_STREAM_PARSER:
2818 	case BPF_SK_SKB_STREAM_VERDICT:
2819 		return BPF_PROG_TYPE_SK_SKB;
2820 	case BPF_LIRC_MODE2:
2821 		return BPF_PROG_TYPE_LIRC_MODE2;
2822 	case BPF_FLOW_DISSECTOR:
2823 		return BPF_PROG_TYPE_FLOW_DISSECTOR;
2824 	case BPF_CGROUP_SYSCTL:
2825 		return BPF_PROG_TYPE_CGROUP_SYSCTL;
2826 	case BPF_CGROUP_GETSOCKOPT:
2827 	case BPF_CGROUP_SETSOCKOPT:
2828 		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2829 	case BPF_TRACE_ITER:
2830 		return BPF_PROG_TYPE_TRACING;
2831 	default:
2832 		return BPF_PROG_TYPE_UNSPEC;
2833 	}
2834 }
2835 
2836 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2837 
2838 #define BPF_F_ATTACH_MASK \
2839 	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2840 
2841 static int bpf_prog_attach(const union bpf_attr *attr)
2842 {
2843 	enum bpf_prog_type ptype;
2844 	struct bpf_prog *prog;
2845 	int ret;
2846 
2847 	if (CHECK_ATTR(BPF_PROG_ATTACH))
2848 		return -EINVAL;
2849 
2850 	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
2851 		return -EINVAL;
2852 
2853 	ptype = attach_type_to_prog_type(attr->attach_type);
2854 	if (ptype == BPF_PROG_TYPE_UNSPEC)
2855 		return -EINVAL;
2856 
2857 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
2858 	if (IS_ERR(prog))
2859 		return PTR_ERR(prog);
2860 
2861 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
2862 		bpf_prog_put(prog);
2863 		return -EINVAL;
2864 	}
2865 
2866 	switch (ptype) {
2867 	case BPF_PROG_TYPE_SK_SKB:
2868 	case BPF_PROG_TYPE_SK_MSG:
2869 		ret = sock_map_get_from_fd(attr, prog);
2870 		break;
2871 	case BPF_PROG_TYPE_LIRC_MODE2:
2872 		ret = lirc_prog_attach(attr, prog);
2873 		break;
2874 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2875 		ret = netns_bpf_prog_attach(attr, prog);
2876 		break;
2877 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2878 	case BPF_PROG_TYPE_CGROUP_SKB:
2879 	case BPF_PROG_TYPE_CGROUP_SOCK:
2880 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2881 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2882 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2883 	case BPF_PROG_TYPE_SOCK_OPS:
2884 		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
2885 		break;
2886 	default:
2887 		ret = -EINVAL;
2888 	}
2889 
2890 	if (ret)
2891 		bpf_prog_put(prog);
2892 	return ret;
2893 }
2894 
2895 #define BPF_PROG_DETACH_LAST_FIELD attach_type
2896 
2897 static int bpf_prog_detach(const union bpf_attr *attr)
2898 {
2899 	enum bpf_prog_type ptype;
2900 
2901 	if (CHECK_ATTR(BPF_PROG_DETACH))
2902 		return -EINVAL;
2903 
2904 	ptype = attach_type_to_prog_type(attr->attach_type);
2905 
2906 	switch (ptype) {
2907 	case BPF_PROG_TYPE_SK_MSG:
2908 	case BPF_PROG_TYPE_SK_SKB:
2909 		return sock_map_get_from_fd(attr, NULL);
2910 	case BPF_PROG_TYPE_LIRC_MODE2:
2911 		return lirc_prog_detach(attr);
2912 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2913 		if (!capable(CAP_NET_ADMIN))
2914 			return -EPERM;
2915 		return netns_bpf_prog_detach(attr);
2916 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2917 	case BPF_PROG_TYPE_CGROUP_SKB:
2918 	case BPF_PROG_TYPE_CGROUP_SOCK:
2919 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2920 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2921 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2922 	case BPF_PROG_TYPE_SOCK_OPS:
2923 		return cgroup_bpf_prog_detach(attr, ptype);
2924 	default:
2925 		return -EINVAL;
2926 	}
2927 }
2928 
2929 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
2930 
2931 static int bpf_prog_query(const union bpf_attr *attr,
2932 			  union bpf_attr __user *uattr)
2933 {
2934 	if (!capable(CAP_NET_ADMIN))
2935 		return -EPERM;
2936 	if (CHECK_ATTR(BPF_PROG_QUERY))
2937 		return -EINVAL;
2938 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
2939 		return -EINVAL;
2940 
2941 	switch (attr->query.attach_type) {
2942 	case BPF_CGROUP_INET_INGRESS:
2943 	case BPF_CGROUP_INET_EGRESS:
2944 	case BPF_CGROUP_INET_SOCK_CREATE:
2945 	case BPF_CGROUP_INET4_BIND:
2946 	case BPF_CGROUP_INET6_BIND:
2947 	case BPF_CGROUP_INET4_POST_BIND:
2948 	case BPF_CGROUP_INET6_POST_BIND:
2949 	case BPF_CGROUP_INET4_CONNECT:
2950 	case BPF_CGROUP_INET6_CONNECT:
2951 	case BPF_CGROUP_INET4_GETPEERNAME:
2952 	case BPF_CGROUP_INET6_GETPEERNAME:
2953 	case BPF_CGROUP_INET4_GETSOCKNAME:
2954 	case BPF_CGROUP_INET6_GETSOCKNAME:
2955 	case BPF_CGROUP_UDP4_SENDMSG:
2956 	case BPF_CGROUP_UDP6_SENDMSG:
2957 	case BPF_CGROUP_UDP4_RECVMSG:
2958 	case BPF_CGROUP_UDP6_RECVMSG:
2959 	case BPF_CGROUP_SOCK_OPS:
2960 	case BPF_CGROUP_DEVICE:
2961 	case BPF_CGROUP_SYSCTL:
2962 	case BPF_CGROUP_GETSOCKOPT:
2963 	case BPF_CGROUP_SETSOCKOPT:
2964 		return cgroup_bpf_prog_query(attr, uattr);
2965 	case BPF_LIRC_MODE2:
2966 		return lirc_prog_query(attr, uattr);
2967 	case BPF_FLOW_DISSECTOR:
2968 		return netns_bpf_prog_query(attr, uattr);
2969 	default:
2970 		return -EINVAL;
2971 	}
2972 }
2973 
2974 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
2975 
2976 static int bpf_prog_test_run(const union bpf_attr *attr,
2977 			     union bpf_attr __user *uattr)
2978 {
2979 	struct bpf_prog *prog;
2980 	int ret = -ENOTSUPP;
2981 
2982 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
2983 		return -EINVAL;
2984 
2985 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
2986 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
2987 		return -EINVAL;
2988 
2989 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
2990 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
2991 		return -EINVAL;
2992 
2993 	prog = bpf_prog_get(attr->test.prog_fd);
2994 	if (IS_ERR(prog))
2995 		return PTR_ERR(prog);
2996 
2997 	if (prog->aux->ops->test_run)
2998 		ret = prog->aux->ops->test_run(prog, attr, uattr);
2999 
3000 	bpf_prog_put(prog);
3001 	return ret;
3002 }
3003 
3004 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3005 
3006 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3007 			       union bpf_attr __user *uattr,
3008 			       struct idr *idr,
3009 			       spinlock_t *lock)
3010 {
3011 	u32 next_id = attr->start_id;
3012 	int err = 0;
3013 
3014 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3015 		return -EINVAL;
3016 
3017 	if (!capable(CAP_SYS_ADMIN))
3018 		return -EPERM;
3019 
3020 	next_id++;
3021 	spin_lock_bh(lock);
3022 	if (!idr_get_next(idr, &next_id))
3023 		err = -ENOENT;
3024 	spin_unlock_bh(lock);
3025 
3026 	if (!err)
3027 		err = put_user(next_id, &uattr->next_id);
3028 
3029 	return err;
3030 }
3031 
3032 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3033 {
3034 	struct bpf_map *map;
3035 
3036 	spin_lock_bh(&map_idr_lock);
3037 again:
3038 	map = idr_get_next(&map_idr, id);
3039 	if (map) {
3040 		map = __bpf_map_inc_not_zero(map, false);
3041 		if (IS_ERR(map)) {
3042 			(*id)++;
3043 			goto again;
3044 		}
3045 	}
3046 	spin_unlock_bh(&map_idr_lock);
3047 
3048 	return map;
3049 }
3050 
3051 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3052 
3053 struct bpf_prog *bpf_prog_by_id(u32 id)
3054 {
3055 	struct bpf_prog *prog;
3056 
3057 	if (!id)
3058 		return ERR_PTR(-ENOENT);
3059 
3060 	spin_lock_bh(&prog_idr_lock);
3061 	prog = idr_find(&prog_idr, id);
3062 	if (prog)
3063 		prog = bpf_prog_inc_not_zero(prog);
3064 	else
3065 		prog = ERR_PTR(-ENOENT);
3066 	spin_unlock_bh(&prog_idr_lock);
3067 	return prog;
3068 }
3069 
3070 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3071 {
3072 	struct bpf_prog *prog;
3073 	u32 id = attr->prog_id;
3074 	int fd;
3075 
3076 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3077 		return -EINVAL;
3078 
3079 	if (!capable(CAP_SYS_ADMIN))
3080 		return -EPERM;
3081 
3082 	prog = bpf_prog_by_id(id);
3083 	if (IS_ERR(prog))
3084 		return PTR_ERR(prog);
3085 
3086 	fd = bpf_prog_new_fd(prog);
3087 	if (fd < 0)
3088 		bpf_prog_put(prog);
3089 
3090 	return fd;
3091 }
3092 
3093 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3094 
3095 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3096 {
3097 	struct bpf_map *map;
3098 	u32 id = attr->map_id;
3099 	int f_flags;
3100 	int fd;
3101 
3102 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3103 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3104 		return -EINVAL;
3105 
3106 	if (!capable(CAP_SYS_ADMIN))
3107 		return -EPERM;
3108 
3109 	f_flags = bpf_get_file_flag(attr->open_flags);
3110 	if (f_flags < 0)
3111 		return f_flags;
3112 
3113 	spin_lock_bh(&map_idr_lock);
3114 	map = idr_find(&map_idr, id);
3115 	if (map)
3116 		map = __bpf_map_inc_not_zero(map, true);
3117 	else
3118 		map = ERR_PTR(-ENOENT);
3119 	spin_unlock_bh(&map_idr_lock);
3120 
3121 	if (IS_ERR(map))
3122 		return PTR_ERR(map);
3123 
3124 	fd = bpf_map_new_fd(map, f_flags);
3125 	if (fd < 0)
3126 		bpf_map_put_with_uref(map);
3127 
3128 	return fd;
3129 }
3130 
3131 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3132 					      unsigned long addr, u32 *off,
3133 					      u32 *type)
3134 {
3135 	const struct bpf_map *map;
3136 	int i;
3137 
3138 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3139 		map = prog->aux->used_maps[i];
3140 		if (map == (void *)addr) {
3141 			*type = BPF_PSEUDO_MAP_FD;
3142 			return map;
3143 		}
3144 		if (!map->ops->map_direct_value_meta)
3145 			continue;
3146 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
3147 			*type = BPF_PSEUDO_MAP_VALUE;
3148 			return map;
3149 		}
3150 	}
3151 
3152 	return NULL;
3153 }
3154 
3155 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
3156 {
3157 	const struct bpf_map *map;
3158 	struct bpf_insn *insns;
3159 	u32 off, type;
3160 	u64 imm;
3161 	int i;
3162 
3163 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3164 			GFP_USER);
3165 	if (!insns)
3166 		return insns;
3167 
3168 	for (i = 0; i < prog->len; i++) {
3169 		if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
3170 			insns[i].code = BPF_JMP | BPF_CALL;
3171 			insns[i].imm = BPF_FUNC_tail_call;
3172 			/* fall-through */
3173 		}
3174 		if (insns[i].code == (BPF_JMP | BPF_CALL) ||
3175 		    insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
3176 			if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
3177 				insns[i].code = BPF_JMP | BPF_CALL;
3178 			if (!bpf_dump_raw_ok())
3179 				insns[i].imm = 0;
3180 			continue;
3181 		}
3182 
3183 		if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
3184 			continue;
3185 
3186 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3187 		map = bpf_map_from_imm(prog, imm, &off, &type);
3188 		if (map) {
3189 			insns[i].src_reg = type;
3190 			insns[i].imm = map->id;
3191 			insns[i + 1].imm = off;
3192 			continue;
3193 		}
3194 	}
3195 
3196 	return insns;
3197 }
3198 
3199 static int set_info_rec_size(struct bpf_prog_info *info)
3200 {
3201 	/*
3202 	 * Ensure info.*_rec_size is the same as kernel expected size
3203 	 *
3204 	 * or
3205 	 *
3206 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
3207 	 * zero.  In this case, the kernel will set the expected
3208 	 * _rec_size back to the info.
3209 	 */
3210 
3211 	if ((info->nr_func_info || info->func_info_rec_size) &&
3212 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
3213 		return -EINVAL;
3214 
3215 	if ((info->nr_line_info || info->line_info_rec_size) &&
3216 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
3217 		return -EINVAL;
3218 
3219 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3220 	    info->jited_line_info_rec_size != sizeof(__u64))
3221 		return -EINVAL;
3222 
3223 	info->func_info_rec_size = sizeof(struct bpf_func_info);
3224 	info->line_info_rec_size = sizeof(struct bpf_line_info);
3225 	info->jited_line_info_rec_size = sizeof(__u64);
3226 
3227 	return 0;
3228 }
3229 
3230 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
3231 				   const union bpf_attr *attr,
3232 				   union bpf_attr __user *uattr)
3233 {
3234 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3235 	struct bpf_prog_info info;
3236 	u32 info_len = attr->info.info_len;
3237 	struct bpf_prog_stats stats;
3238 	char __user *uinsns;
3239 	u32 ulen;
3240 	int err;
3241 
3242 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3243 	if (err)
3244 		return err;
3245 	info_len = min_t(u32, sizeof(info), info_len);
3246 
3247 	memset(&info, 0, sizeof(info));
3248 	if (copy_from_user(&info, uinfo, info_len))
3249 		return -EFAULT;
3250 
3251 	info.type = prog->type;
3252 	info.id = prog->aux->id;
3253 	info.load_time = prog->aux->load_time;
3254 	info.created_by_uid = from_kuid_munged(current_user_ns(),
3255 					       prog->aux->user->uid);
3256 	info.gpl_compatible = prog->gpl_compatible;
3257 
3258 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
3259 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3260 
3261 	ulen = info.nr_map_ids;
3262 	info.nr_map_ids = prog->aux->used_map_cnt;
3263 	ulen = min_t(u32, info.nr_map_ids, ulen);
3264 	if (ulen) {
3265 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3266 		u32 i;
3267 
3268 		for (i = 0; i < ulen; i++)
3269 			if (put_user(prog->aux->used_maps[i]->id,
3270 				     &user_map_ids[i]))
3271 				return -EFAULT;
3272 	}
3273 
3274 	err = set_info_rec_size(&info);
3275 	if (err)
3276 		return err;
3277 
3278 	bpf_prog_get_stats(prog, &stats);
3279 	info.run_time_ns = stats.nsecs;
3280 	info.run_cnt = stats.cnt;
3281 
3282 	if (!bpf_capable()) {
3283 		info.jited_prog_len = 0;
3284 		info.xlated_prog_len = 0;
3285 		info.nr_jited_ksyms = 0;
3286 		info.nr_jited_func_lens = 0;
3287 		info.nr_func_info = 0;
3288 		info.nr_line_info = 0;
3289 		info.nr_jited_line_info = 0;
3290 		goto done;
3291 	}
3292 
3293 	ulen = info.xlated_prog_len;
3294 	info.xlated_prog_len = bpf_prog_insn_size(prog);
3295 	if (info.xlated_prog_len && ulen) {
3296 		struct bpf_insn *insns_sanitized;
3297 		bool fault;
3298 
3299 		if (prog->blinded && !bpf_dump_raw_ok()) {
3300 			info.xlated_prog_insns = 0;
3301 			goto done;
3302 		}
3303 		insns_sanitized = bpf_insn_prepare_dump(prog);
3304 		if (!insns_sanitized)
3305 			return -ENOMEM;
3306 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3307 		ulen = min_t(u32, info.xlated_prog_len, ulen);
3308 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
3309 		kfree(insns_sanitized);
3310 		if (fault)
3311 			return -EFAULT;
3312 	}
3313 
3314 	if (bpf_prog_is_dev_bound(prog->aux)) {
3315 		err = bpf_prog_offload_info_fill(&info, prog);
3316 		if (err)
3317 			return err;
3318 		goto done;
3319 	}
3320 
3321 	/* NOTE: the following code is supposed to be skipped for offload.
3322 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
3323 	 * for offload.
3324 	 */
3325 	ulen = info.jited_prog_len;
3326 	if (prog->aux->func_cnt) {
3327 		u32 i;
3328 
3329 		info.jited_prog_len = 0;
3330 		for (i = 0; i < prog->aux->func_cnt; i++)
3331 			info.jited_prog_len += prog->aux->func[i]->jited_len;
3332 	} else {
3333 		info.jited_prog_len = prog->jited_len;
3334 	}
3335 
3336 	if (info.jited_prog_len && ulen) {
3337 		if (bpf_dump_raw_ok()) {
3338 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
3339 			ulen = min_t(u32, info.jited_prog_len, ulen);
3340 
3341 			/* for multi-function programs, copy the JITed
3342 			 * instructions for all the functions
3343 			 */
3344 			if (prog->aux->func_cnt) {
3345 				u32 len, free, i;
3346 				u8 *img;
3347 
3348 				free = ulen;
3349 				for (i = 0; i < prog->aux->func_cnt; i++) {
3350 					len = prog->aux->func[i]->jited_len;
3351 					len = min_t(u32, len, free);
3352 					img = (u8 *) prog->aux->func[i]->bpf_func;
3353 					if (copy_to_user(uinsns, img, len))
3354 						return -EFAULT;
3355 					uinsns += len;
3356 					free -= len;
3357 					if (!free)
3358 						break;
3359 				}
3360 			} else {
3361 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
3362 					return -EFAULT;
3363 			}
3364 		} else {
3365 			info.jited_prog_insns = 0;
3366 		}
3367 	}
3368 
3369 	ulen = info.nr_jited_ksyms;
3370 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3371 	if (ulen) {
3372 		if (bpf_dump_raw_ok()) {
3373 			unsigned long ksym_addr;
3374 			u64 __user *user_ksyms;
3375 			u32 i;
3376 
3377 			/* copy the address of the kernel symbol
3378 			 * corresponding to each function
3379 			 */
3380 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3381 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3382 			if (prog->aux->func_cnt) {
3383 				for (i = 0; i < ulen; i++) {
3384 					ksym_addr = (unsigned long)
3385 						prog->aux->func[i]->bpf_func;
3386 					if (put_user((u64) ksym_addr,
3387 						     &user_ksyms[i]))
3388 						return -EFAULT;
3389 				}
3390 			} else {
3391 				ksym_addr = (unsigned long) prog->bpf_func;
3392 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
3393 					return -EFAULT;
3394 			}
3395 		} else {
3396 			info.jited_ksyms = 0;
3397 		}
3398 	}
3399 
3400 	ulen = info.nr_jited_func_lens;
3401 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3402 	if (ulen) {
3403 		if (bpf_dump_raw_ok()) {
3404 			u32 __user *user_lens;
3405 			u32 func_len, i;
3406 
3407 			/* copy the JITed image lengths for each function */
3408 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3409 			user_lens = u64_to_user_ptr(info.jited_func_lens);
3410 			if (prog->aux->func_cnt) {
3411 				for (i = 0; i < ulen; i++) {
3412 					func_len =
3413 						prog->aux->func[i]->jited_len;
3414 					if (put_user(func_len, &user_lens[i]))
3415 						return -EFAULT;
3416 				}
3417 			} else {
3418 				func_len = prog->jited_len;
3419 				if (put_user(func_len, &user_lens[0]))
3420 					return -EFAULT;
3421 			}
3422 		} else {
3423 			info.jited_func_lens = 0;
3424 		}
3425 	}
3426 
3427 	if (prog->aux->btf)
3428 		info.btf_id = btf_id(prog->aux->btf);
3429 
3430 	ulen = info.nr_func_info;
3431 	info.nr_func_info = prog->aux->func_info_cnt;
3432 	if (info.nr_func_info && ulen) {
3433 		char __user *user_finfo;
3434 
3435 		user_finfo = u64_to_user_ptr(info.func_info);
3436 		ulen = min_t(u32, info.nr_func_info, ulen);
3437 		if (copy_to_user(user_finfo, prog->aux->func_info,
3438 				 info.func_info_rec_size * ulen))
3439 			return -EFAULT;
3440 	}
3441 
3442 	ulen = info.nr_line_info;
3443 	info.nr_line_info = prog->aux->nr_linfo;
3444 	if (info.nr_line_info && ulen) {
3445 		__u8 __user *user_linfo;
3446 
3447 		user_linfo = u64_to_user_ptr(info.line_info);
3448 		ulen = min_t(u32, info.nr_line_info, ulen);
3449 		if (copy_to_user(user_linfo, prog->aux->linfo,
3450 				 info.line_info_rec_size * ulen))
3451 			return -EFAULT;
3452 	}
3453 
3454 	ulen = info.nr_jited_line_info;
3455 	if (prog->aux->jited_linfo)
3456 		info.nr_jited_line_info = prog->aux->nr_linfo;
3457 	else
3458 		info.nr_jited_line_info = 0;
3459 	if (info.nr_jited_line_info && ulen) {
3460 		if (bpf_dump_raw_ok()) {
3461 			__u64 __user *user_linfo;
3462 			u32 i;
3463 
3464 			user_linfo = u64_to_user_ptr(info.jited_line_info);
3465 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
3466 			for (i = 0; i < ulen; i++) {
3467 				if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3468 					     &user_linfo[i]))
3469 					return -EFAULT;
3470 			}
3471 		} else {
3472 			info.jited_line_info = 0;
3473 		}
3474 	}
3475 
3476 	ulen = info.nr_prog_tags;
3477 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3478 	if (ulen) {
3479 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3480 		u32 i;
3481 
3482 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
3483 		ulen = min_t(u32, info.nr_prog_tags, ulen);
3484 		if (prog->aux->func_cnt) {
3485 			for (i = 0; i < ulen; i++) {
3486 				if (copy_to_user(user_prog_tags[i],
3487 						 prog->aux->func[i]->tag,
3488 						 BPF_TAG_SIZE))
3489 					return -EFAULT;
3490 			}
3491 		} else {
3492 			if (copy_to_user(user_prog_tags[0],
3493 					 prog->tag, BPF_TAG_SIZE))
3494 				return -EFAULT;
3495 		}
3496 	}
3497 
3498 done:
3499 	if (copy_to_user(uinfo, &info, info_len) ||
3500 	    put_user(info_len, &uattr->info.info_len))
3501 		return -EFAULT;
3502 
3503 	return 0;
3504 }
3505 
3506 static int bpf_map_get_info_by_fd(struct bpf_map *map,
3507 				  const union bpf_attr *attr,
3508 				  union bpf_attr __user *uattr)
3509 {
3510 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3511 	struct bpf_map_info info;
3512 	u32 info_len = attr->info.info_len;
3513 	int err;
3514 
3515 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3516 	if (err)
3517 		return err;
3518 	info_len = min_t(u32, sizeof(info), info_len);
3519 
3520 	memset(&info, 0, sizeof(info));
3521 	info.type = map->map_type;
3522 	info.id = map->id;
3523 	info.key_size = map->key_size;
3524 	info.value_size = map->value_size;
3525 	info.max_entries = map->max_entries;
3526 	info.map_flags = map->map_flags;
3527 	memcpy(info.name, map->name, sizeof(map->name));
3528 
3529 	if (map->btf) {
3530 		info.btf_id = btf_id(map->btf);
3531 		info.btf_key_type_id = map->btf_key_type_id;
3532 		info.btf_value_type_id = map->btf_value_type_id;
3533 	}
3534 	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3535 
3536 	if (bpf_map_is_dev_bound(map)) {
3537 		err = bpf_map_offload_info_fill(&info, map);
3538 		if (err)
3539 			return err;
3540 	}
3541 
3542 	if (copy_to_user(uinfo, &info, info_len) ||
3543 	    put_user(info_len, &uattr->info.info_len))
3544 		return -EFAULT;
3545 
3546 	return 0;
3547 }
3548 
3549 static int bpf_btf_get_info_by_fd(struct btf *btf,
3550 				  const union bpf_attr *attr,
3551 				  union bpf_attr __user *uattr)
3552 {
3553 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3554 	u32 info_len = attr->info.info_len;
3555 	int err;
3556 
3557 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3558 	if (err)
3559 		return err;
3560 
3561 	return btf_get_info_by_fd(btf, attr, uattr);
3562 }
3563 
3564 static int bpf_link_get_info_by_fd(struct bpf_link *link,
3565 				  const union bpf_attr *attr,
3566 				  union bpf_attr __user *uattr)
3567 {
3568 	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3569 	struct bpf_link_info info;
3570 	u32 info_len = attr->info.info_len;
3571 	int err;
3572 
3573 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3574 	if (err)
3575 		return err;
3576 	info_len = min_t(u32, sizeof(info), info_len);
3577 
3578 	memset(&info, 0, sizeof(info));
3579 	if (copy_from_user(&info, uinfo, info_len))
3580 		return -EFAULT;
3581 
3582 	info.type = link->type;
3583 	info.id = link->id;
3584 	info.prog_id = link->prog->aux->id;
3585 
3586 	if (link->ops->fill_link_info) {
3587 		err = link->ops->fill_link_info(link, &info);
3588 		if (err)
3589 			return err;
3590 	}
3591 
3592 	if (copy_to_user(uinfo, &info, info_len) ||
3593 	    put_user(info_len, &uattr->info.info_len))
3594 		return -EFAULT;
3595 
3596 	return 0;
3597 }
3598 
3599 
3600 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3601 
3602 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3603 				  union bpf_attr __user *uattr)
3604 {
3605 	int ufd = attr->info.bpf_fd;
3606 	struct fd f;
3607 	int err;
3608 
3609 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3610 		return -EINVAL;
3611 
3612 	f = fdget(ufd);
3613 	if (!f.file)
3614 		return -EBADFD;
3615 
3616 	if (f.file->f_op == &bpf_prog_fops)
3617 		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
3618 					      uattr);
3619 	else if (f.file->f_op == &bpf_map_fops)
3620 		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
3621 					     uattr);
3622 	else if (f.file->f_op == &btf_fops)
3623 		err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
3624 	else if (f.file->f_op == &bpf_link_fops)
3625 		err = bpf_link_get_info_by_fd(f.file->private_data,
3626 					      attr, uattr);
3627 	else
3628 		err = -EINVAL;
3629 
3630 	fdput(f);
3631 	return err;
3632 }
3633 
3634 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3635 
3636 static int bpf_btf_load(const union bpf_attr *attr)
3637 {
3638 	if (CHECK_ATTR(BPF_BTF_LOAD))
3639 		return -EINVAL;
3640 
3641 	if (!bpf_capable())
3642 		return -EPERM;
3643 
3644 	return btf_new_fd(attr);
3645 }
3646 
3647 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3648 
3649 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3650 {
3651 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3652 		return -EINVAL;
3653 
3654 	if (!capable(CAP_SYS_ADMIN))
3655 		return -EPERM;
3656 
3657 	return btf_get_fd_by_id(attr->btf_id);
3658 }
3659 
3660 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3661 				    union bpf_attr __user *uattr,
3662 				    u32 prog_id, u32 fd_type,
3663 				    const char *buf, u64 probe_offset,
3664 				    u64 probe_addr)
3665 {
3666 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3667 	u32 len = buf ? strlen(buf) : 0, input_len;
3668 	int err = 0;
3669 
3670 	if (put_user(len, &uattr->task_fd_query.buf_len))
3671 		return -EFAULT;
3672 	input_len = attr->task_fd_query.buf_len;
3673 	if (input_len && ubuf) {
3674 		if (!len) {
3675 			/* nothing to copy, just make ubuf NULL terminated */
3676 			char zero = '\0';
3677 
3678 			if (put_user(zero, ubuf))
3679 				return -EFAULT;
3680 		} else if (input_len >= len + 1) {
3681 			/* ubuf can hold the string with NULL terminator */
3682 			if (copy_to_user(ubuf, buf, len + 1))
3683 				return -EFAULT;
3684 		} else {
3685 			/* ubuf cannot hold the string with NULL terminator,
3686 			 * do a partial copy with NULL terminator.
3687 			 */
3688 			char zero = '\0';
3689 
3690 			err = -ENOSPC;
3691 			if (copy_to_user(ubuf, buf, input_len - 1))
3692 				return -EFAULT;
3693 			if (put_user(zero, ubuf + input_len - 1))
3694 				return -EFAULT;
3695 		}
3696 	}
3697 
3698 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3699 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3700 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3701 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3702 		return -EFAULT;
3703 
3704 	return err;
3705 }
3706 
3707 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3708 
3709 static int bpf_task_fd_query(const union bpf_attr *attr,
3710 			     union bpf_attr __user *uattr)
3711 {
3712 	pid_t pid = attr->task_fd_query.pid;
3713 	u32 fd = attr->task_fd_query.fd;
3714 	const struct perf_event *event;
3715 	struct files_struct *files;
3716 	struct task_struct *task;
3717 	struct file *file;
3718 	int err;
3719 
3720 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3721 		return -EINVAL;
3722 
3723 	if (!capable(CAP_SYS_ADMIN))
3724 		return -EPERM;
3725 
3726 	if (attr->task_fd_query.flags != 0)
3727 		return -EINVAL;
3728 
3729 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3730 	if (!task)
3731 		return -ENOENT;
3732 
3733 	files = get_files_struct(task);
3734 	put_task_struct(task);
3735 	if (!files)
3736 		return -ENOENT;
3737 
3738 	err = 0;
3739 	spin_lock(&files->file_lock);
3740 	file = fcheck_files(files, fd);
3741 	if (!file)
3742 		err = -EBADF;
3743 	else
3744 		get_file(file);
3745 	spin_unlock(&files->file_lock);
3746 	put_files_struct(files);
3747 
3748 	if (err)
3749 		goto out;
3750 
3751 	if (file->f_op == &bpf_link_fops) {
3752 		struct bpf_link *link = file->private_data;
3753 
3754 		if (link->ops == &bpf_raw_tp_link_lops) {
3755 			struct bpf_raw_tp_link *raw_tp =
3756 				container_of(link, struct bpf_raw_tp_link, link);
3757 			struct bpf_raw_event_map *btp = raw_tp->btp;
3758 
3759 			err = bpf_task_fd_query_copy(attr, uattr,
3760 						     raw_tp->link.prog->aux->id,
3761 						     BPF_FD_TYPE_RAW_TRACEPOINT,
3762 						     btp->tp->name, 0, 0);
3763 			goto put_file;
3764 		}
3765 		goto out_not_supp;
3766 	}
3767 
3768 	event = perf_get_event(file);
3769 	if (!IS_ERR(event)) {
3770 		u64 probe_offset, probe_addr;
3771 		u32 prog_id, fd_type;
3772 		const char *buf;
3773 
3774 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3775 					      &buf, &probe_offset,
3776 					      &probe_addr);
3777 		if (!err)
3778 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3779 						     fd_type, buf,
3780 						     probe_offset,
3781 						     probe_addr);
3782 		goto put_file;
3783 	}
3784 
3785 out_not_supp:
3786 	err = -ENOTSUPP;
3787 put_file:
3788 	fput(file);
3789 out:
3790 	return err;
3791 }
3792 
3793 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
3794 
3795 #define BPF_DO_BATCH(fn)			\
3796 	do {					\
3797 		if (!fn) {			\
3798 			err = -ENOTSUPP;	\
3799 			goto err_put;		\
3800 		}				\
3801 		err = fn(map, attr, uattr);	\
3802 	} while (0)
3803 
3804 static int bpf_map_do_batch(const union bpf_attr *attr,
3805 			    union bpf_attr __user *uattr,
3806 			    int cmd)
3807 {
3808 	struct bpf_map *map;
3809 	int err, ufd;
3810 	struct fd f;
3811 
3812 	if (CHECK_ATTR(BPF_MAP_BATCH))
3813 		return -EINVAL;
3814 
3815 	ufd = attr->batch.map_fd;
3816 	f = fdget(ufd);
3817 	map = __bpf_map_get(f);
3818 	if (IS_ERR(map))
3819 		return PTR_ERR(map);
3820 
3821 	if ((cmd == BPF_MAP_LOOKUP_BATCH ||
3822 	     cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) &&
3823 	    !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
3824 		err = -EPERM;
3825 		goto err_put;
3826 	}
3827 
3828 	if (cmd != BPF_MAP_LOOKUP_BATCH &&
3829 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
3830 		err = -EPERM;
3831 		goto err_put;
3832 	}
3833 
3834 	if (cmd == BPF_MAP_LOOKUP_BATCH)
3835 		BPF_DO_BATCH(map->ops->map_lookup_batch);
3836 	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
3837 		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
3838 	else if (cmd == BPF_MAP_UPDATE_BATCH)
3839 		BPF_DO_BATCH(map->ops->map_update_batch);
3840 	else
3841 		BPF_DO_BATCH(map->ops->map_delete_batch);
3842 
3843 err_put:
3844 	fdput(f);
3845 	return err;
3846 }
3847 
3848 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3849 {
3850 	if (attr->link_create.attach_type == BPF_TRACE_ITER &&
3851 	    prog->expected_attach_type == BPF_TRACE_ITER)
3852 		return bpf_iter_link_attach(attr, prog);
3853 
3854 	return -EINVAL;
3855 }
3856 
3857 #define BPF_LINK_CREATE_LAST_FIELD link_create.flags
3858 static int link_create(union bpf_attr *attr)
3859 {
3860 	enum bpf_prog_type ptype;
3861 	struct bpf_prog *prog;
3862 	int ret;
3863 
3864 	if (CHECK_ATTR(BPF_LINK_CREATE))
3865 		return -EINVAL;
3866 
3867 	ptype = attach_type_to_prog_type(attr->link_create.attach_type);
3868 	if (ptype == BPF_PROG_TYPE_UNSPEC)
3869 		return -EINVAL;
3870 
3871 	prog = bpf_prog_get_type(attr->link_create.prog_fd, ptype);
3872 	if (IS_ERR(prog))
3873 		return PTR_ERR(prog);
3874 
3875 	ret = bpf_prog_attach_check_attach_type(prog,
3876 						attr->link_create.attach_type);
3877 	if (ret)
3878 		goto err_out;
3879 
3880 	switch (ptype) {
3881 	case BPF_PROG_TYPE_CGROUP_SKB:
3882 	case BPF_PROG_TYPE_CGROUP_SOCK:
3883 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3884 	case BPF_PROG_TYPE_SOCK_OPS:
3885 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3886 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3887 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3888 		ret = cgroup_bpf_link_attach(attr, prog);
3889 		break;
3890 	case BPF_PROG_TYPE_TRACING:
3891 		ret = tracing_bpf_link_attach(attr, prog);
3892 		break;
3893 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3894 		ret = netns_bpf_link_create(attr, prog);
3895 		break;
3896 	default:
3897 		ret = -EINVAL;
3898 	}
3899 
3900 err_out:
3901 	if (ret < 0)
3902 		bpf_prog_put(prog);
3903 	return ret;
3904 }
3905 
3906 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
3907 
3908 static int link_update(union bpf_attr *attr)
3909 {
3910 	struct bpf_prog *old_prog = NULL, *new_prog;
3911 	struct bpf_link *link;
3912 	u32 flags;
3913 	int ret;
3914 
3915 	if (CHECK_ATTR(BPF_LINK_UPDATE))
3916 		return -EINVAL;
3917 
3918 	flags = attr->link_update.flags;
3919 	if (flags & ~BPF_F_REPLACE)
3920 		return -EINVAL;
3921 
3922 	link = bpf_link_get_from_fd(attr->link_update.link_fd);
3923 	if (IS_ERR(link))
3924 		return PTR_ERR(link);
3925 
3926 	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
3927 	if (IS_ERR(new_prog)) {
3928 		ret = PTR_ERR(new_prog);
3929 		goto out_put_link;
3930 	}
3931 
3932 	if (flags & BPF_F_REPLACE) {
3933 		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
3934 		if (IS_ERR(old_prog)) {
3935 			ret = PTR_ERR(old_prog);
3936 			old_prog = NULL;
3937 			goto out_put_progs;
3938 		}
3939 	} else if (attr->link_update.old_prog_fd) {
3940 		ret = -EINVAL;
3941 		goto out_put_progs;
3942 	}
3943 
3944 	if (link->ops->update_prog)
3945 		ret = link->ops->update_prog(link, new_prog, old_prog);
3946 	else
3947 		ret = -EINVAL;
3948 
3949 out_put_progs:
3950 	if (old_prog)
3951 		bpf_prog_put(old_prog);
3952 	if (ret)
3953 		bpf_prog_put(new_prog);
3954 out_put_link:
3955 	bpf_link_put(link);
3956 	return ret;
3957 }
3958 
3959 static int bpf_link_inc_not_zero(struct bpf_link *link)
3960 {
3961 	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT;
3962 }
3963 
3964 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
3965 
3966 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
3967 {
3968 	struct bpf_link *link;
3969 	u32 id = attr->link_id;
3970 	int fd, err;
3971 
3972 	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
3973 		return -EINVAL;
3974 
3975 	if (!capable(CAP_SYS_ADMIN))
3976 		return -EPERM;
3977 
3978 	spin_lock_bh(&link_idr_lock);
3979 	link = idr_find(&link_idr, id);
3980 	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
3981 	if (link) {
3982 		if (link->id)
3983 			err = bpf_link_inc_not_zero(link);
3984 		else
3985 			err = -EAGAIN;
3986 	} else {
3987 		err = -ENOENT;
3988 	}
3989 	spin_unlock_bh(&link_idr_lock);
3990 
3991 	if (err)
3992 		return err;
3993 
3994 	fd = bpf_link_new_fd(link);
3995 	if (fd < 0)
3996 		bpf_link_put(link);
3997 
3998 	return fd;
3999 }
4000 
4001 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4002 
4003 static int bpf_stats_release(struct inode *inode, struct file *file)
4004 {
4005 	mutex_lock(&bpf_stats_enabled_mutex);
4006 	static_key_slow_dec(&bpf_stats_enabled_key.key);
4007 	mutex_unlock(&bpf_stats_enabled_mutex);
4008 	return 0;
4009 }
4010 
4011 static const struct file_operations bpf_stats_fops = {
4012 	.release = bpf_stats_release,
4013 };
4014 
4015 static int bpf_enable_runtime_stats(void)
4016 {
4017 	int fd;
4018 
4019 	mutex_lock(&bpf_stats_enabled_mutex);
4020 
4021 	/* Set a very high limit to avoid overflow */
4022 	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4023 		mutex_unlock(&bpf_stats_enabled_mutex);
4024 		return -EBUSY;
4025 	}
4026 
4027 	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4028 	if (fd >= 0)
4029 		static_key_slow_inc(&bpf_stats_enabled_key.key);
4030 
4031 	mutex_unlock(&bpf_stats_enabled_mutex);
4032 	return fd;
4033 }
4034 
4035 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4036 
4037 static int bpf_enable_stats(union bpf_attr *attr)
4038 {
4039 
4040 	if (CHECK_ATTR(BPF_ENABLE_STATS))
4041 		return -EINVAL;
4042 
4043 	if (!capable(CAP_SYS_ADMIN))
4044 		return -EPERM;
4045 
4046 	switch (attr->enable_stats.type) {
4047 	case BPF_STATS_RUN_TIME:
4048 		return bpf_enable_runtime_stats();
4049 	default:
4050 		break;
4051 	}
4052 	return -EINVAL;
4053 }
4054 
4055 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4056 
4057 static int bpf_iter_create(union bpf_attr *attr)
4058 {
4059 	struct bpf_link *link;
4060 	int err;
4061 
4062 	if (CHECK_ATTR(BPF_ITER_CREATE))
4063 		return -EINVAL;
4064 
4065 	if (attr->iter_create.flags)
4066 		return -EINVAL;
4067 
4068 	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4069 	if (IS_ERR(link))
4070 		return PTR_ERR(link);
4071 
4072 	err = bpf_iter_new_fd(link);
4073 	bpf_link_put(link);
4074 
4075 	return err;
4076 }
4077 
4078 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4079 {
4080 	union bpf_attr attr;
4081 	int err;
4082 
4083 	if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4084 		return -EPERM;
4085 
4086 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4087 	if (err)
4088 		return err;
4089 	size = min_t(u32, size, sizeof(attr));
4090 
4091 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
4092 	memset(&attr, 0, sizeof(attr));
4093 	if (copy_from_user(&attr, uattr, size) != 0)
4094 		return -EFAULT;
4095 
4096 	err = security_bpf(cmd, &attr, size);
4097 	if (err < 0)
4098 		return err;
4099 
4100 	switch (cmd) {
4101 	case BPF_MAP_CREATE:
4102 		err = map_create(&attr);
4103 		break;
4104 	case BPF_MAP_LOOKUP_ELEM:
4105 		err = map_lookup_elem(&attr);
4106 		break;
4107 	case BPF_MAP_UPDATE_ELEM:
4108 		err = map_update_elem(&attr);
4109 		break;
4110 	case BPF_MAP_DELETE_ELEM:
4111 		err = map_delete_elem(&attr);
4112 		break;
4113 	case BPF_MAP_GET_NEXT_KEY:
4114 		err = map_get_next_key(&attr);
4115 		break;
4116 	case BPF_MAP_FREEZE:
4117 		err = map_freeze(&attr);
4118 		break;
4119 	case BPF_PROG_LOAD:
4120 		err = bpf_prog_load(&attr, uattr);
4121 		break;
4122 	case BPF_OBJ_PIN:
4123 		err = bpf_obj_pin(&attr);
4124 		break;
4125 	case BPF_OBJ_GET:
4126 		err = bpf_obj_get(&attr);
4127 		break;
4128 	case BPF_PROG_ATTACH:
4129 		err = bpf_prog_attach(&attr);
4130 		break;
4131 	case BPF_PROG_DETACH:
4132 		err = bpf_prog_detach(&attr);
4133 		break;
4134 	case BPF_PROG_QUERY:
4135 		err = bpf_prog_query(&attr, uattr);
4136 		break;
4137 	case BPF_PROG_TEST_RUN:
4138 		err = bpf_prog_test_run(&attr, uattr);
4139 		break;
4140 	case BPF_PROG_GET_NEXT_ID:
4141 		err = bpf_obj_get_next_id(&attr, uattr,
4142 					  &prog_idr, &prog_idr_lock);
4143 		break;
4144 	case BPF_MAP_GET_NEXT_ID:
4145 		err = bpf_obj_get_next_id(&attr, uattr,
4146 					  &map_idr, &map_idr_lock);
4147 		break;
4148 	case BPF_BTF_GET_NEXT_ID:
4149 		err = bpf_obj_get_next_id(&attr, uattr,
4150 					  &btf_idr, &btf_idr_lock);
4151 		break;
4152 	case BPF_PROG_GET_FD_BY_ID:
4153 		err = bpf_prog_get_fd_by_id(&attr);
4154 		break;
4155 	case BPF_MAP_GET_FD_BY_ID:
4156 		err = bpf_map_get_fd_by_id(&attr);
4157 		break;
4158 	case BPF_OBJ_GET_INFO_BY_FD:
4159 		err = bpf_obj_get_info_by_fd(&attr, uattr);
4160 		break;
4161 	case BPF_RAW_TRACEPOINT_OPEN:
4162 		err = bpf_raw_tracepoint_open(&attr);
4163 		break;
4164 	case BPF_BTF_LOAD:
4165 		err = bpf_btf_load(&attr);
4166 		break;
4167 	case BPF_BTF_GET_FD_BY_ID:
4168 		err = bpf_btf_get_fd_by_id(&attr);
4169 		break;
4170 	case BPF_TASK_FD_QUERY:
4171 		err = bpf_task_fd_query(&attr, uattr);
4172 		break;
4173 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4174 		err = map_lookup_and_delete_elem(&attr);
4175 		break;
4176 	case BPF_MAP_LOOKUP_BATCH:
4177 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4178 		break;
4179 	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4180 		err = bpf_map_do_batch(&attr, uattr,
4181 				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4182 		break;
4183 	case BPF_MAP_UPDATE_BATCH:
4184 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4185 		break;
4186 	case BPF_MAP_DELETE_BATCH:
4187 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4188 		break;
4189 	case BPF_LINK_CREATE:
4190 		err = link_create(&attr);
4191 		break;
4192 	case BPF_LINK_UPDATE:
4193 		err = link_update(&attr);
4194 		break;
4195 	case BPF_LINK_GET_FD_BY_ID:
4196 		err = bpf_link_get_fd_by_id(&attr);
4197 		break;
4198 	case BPF_LINK_GET_NEXT_ID:
4199 		err = bpf_obj_get_next_id(&attr, uattr,
4200 					  &link_idr, &link_idr_lock);
4201 		break;
4202 	case BPF_ENABLE_STATS:
4203 		err = bpf_enable_stats(&attr);
4204 		break;
4205 	case BPF_ITER_CREATE:
4206 		err = bpf_iter_create(&attr);
4207 		break;
4208 	default:
4209 		err = -EINVAL;
4210 		break;
4211 	}
4212 
4213 	return err;
4214 }
4215