xref: /openbmc/linux/kernel/bpf/syscall.c (revision 711aab1d)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
25 #include <linux/idr.h>
26 
27 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
28 			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
29 			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
30 			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
31 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
32 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
33 
34 DEFINE_PER_CPU(int, bpf_prog_active);
35 static DEFINE_IDR(prog_idr);
36 static DEFINE_SPINLOCK(prog_idr_lock);
37 static DEFINE_IDR(map_idr);
38 static DEFINE_SPINLOCK(map_idr_lock);
39 
40 int sysctl_unprivileged_bpf_disabled __read_mostly;
41 
42 static const struct bpf_map_ops * const bpf_map_types[] = {
43 #define BPF_PROG_TYPE(_id, _ops)
44 #define BPF_MAP_TYPE(_id, _ops) \
45 	[_id] = &_ops,
46 #include <linux/bpf_types.h>
47 #undef BPF_PROG_TYPE
48 #undef BPF_MAP_TYPE
49 };
50 
51 /*
52  * If we're handed a bigger struct than we know of, ensure all the unknown bits
53  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
54  * we don't know about yet.
55  *
56  * There is a ToCToU between this function call and the following
57  * copy_from_user() call. However, this is not a concern since this function is
58  * meant to be a future-proofing of bits.
59  */
60 static int check_uarg_tail_zero(void __user *uaddr,
61 				size_t expected_size,
62 				size_t actual_size)
63 {
64 	unsigned char __user *addr;
65 	unsigned char __user *end;
66 	unsigned char val;
67 	int err;
68 
69 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
70 		return -E2BIG;
71 
72 	if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
73 		return -EFAULT;
74 
75 	if (actual_size <= expected_size)
76 		return 0;
77 
78 	addr = uaddr + expected_size;
79 	end  = uaddr + actual_size;
80 
81 	for (; addr < end; addr++) {
82 		err = get_user(val, addr);
83 		if (err)
84 			return err;
85 		if (val)
86 			return -E2BIG;
87 	}
88 
89 	return 0;
90 }
91 
92 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
93 {
94 	struct bpf_map *map;
95 
96 	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
97 	    !bpf_map_types[attr->map_type])
98 		return ERR_PTR(-EINVAL);
99 
100 	map = bpf_map_types[attr->map_type]->map_alloc(attr);
101 	if (IS_ERR(map))
102 		return map;
103 	map->ops = bpf_map_types[attr->map_type];
104 	map->map_type = attr->map_type;
105 	return map;
106 }
107 
108 void *bpf_map_area_alloc(size_t size, int numa_node)
109 {
110 	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
111 	 * trigger under memory pressure as we really just want to
112 	 * fail instead.
113 	 */
114 	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
115 	void *area;
116 
117 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
118 		area = kmalloc_node(size, GFP_USER | flags, numa_node);
119 		if (area != NULL)
120 			return area;
121 	}
122 
123 	return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
124 					   __builtin_return_address(0));
125 }
126 
127 void bpf_map_area_free(void *area)
128 {
129 	kvfree(area);
130 }
131 
132 int bpf_map_precharge_memlock(u32 pages)
133 {
134 	struct user_struct *user = get_current_user();
135 	unsigned long memlock_limit, cur;
136 
137 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
138 	cur = atomic_long_read(&user->locked_vm);
139 	free_uid(user);
140 	if (cur + pages > memlock_limit)
141 		return -EPERM;
142 	return 0;
143 }
144 
145 static int bpf_map_charge_memlock(struct bpf_map *map)
146 {
147 	struct user_struct *user = get_current_user();
148 	unsigned long memlock_limit;
149 
150 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
151 
152 	atomic_long_add(map->pages, &user->locked_vm);
153 
154 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
155 		atomic_long_sub(map->pages, &user->locked_vm);
156 		free_uid(user);
157 		return -EPERM;
158 	}
159 	map->user = user;
160 	return 0;
161 }
162 
163 static void bpf_map_uncharge_memlock(struct bpf_map *map)
164 {
165 	struct user_struct *user = map->user;
166 
167 	atomic_long_sub(map->pages, &user->locked_vm);
168 	free_uid(user);
169 }
170 
171 static int bpf_map_alloc_id(struct bpf_map *map)
172 {
173 	int id;
174 
175 	spin_lock_bh(&map_idr_lock);
176 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
177 	if (id > 0)
178 		map->id = id;
179 	spin_unlock_bh(&map_idr_lock);
180 
181 	if (WARN_ON_ONCE(!id))
182 		return -ENOSPC;
183 
184 	return id > 0 ? 0 : id;
185 }
186 
187 static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
188 {
189 	if (do_idr_lock)
190 		spin_lock_bh(&map_idr_lock);
191 	else
192 		__acquire(&map_idr_lock);
193 
194 	idr_remove(&map_idr, map->id);
195 
196 	if (do_idr_lock)
197 		spin_unlock_bh(&map_idr_lock);
198 	else
199 		__release(&map_idr_lock);
200 }
201 
202 /* called from workqueue */
203 static void bpf_map_free_deferred(struct work_struct *work)
204 {
205 	struct bpf_map *map = container_of(work, struct bpf_map, work);
206 
207 	bpf_map_uncharge_memlock(map);
208 	/* implementation dependent freeing */
209 	map->ops->map_free(map);
210 }
211 
212 static void bpf_map_put_uref(struct bpf_map *map)
213 {
214 	if (atomic_dec_and_test(&map->usercnt)) {
215 		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
216 			bpf_fd_array_map_clear(map);
217 	}
218 }
219 
220 /* decrement map refcnt and schedule it for freeing via workqueue
221  * (unrelying map implementation ops->map_free() might sleep)
222  */
223 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
224 {
225 	if (atomic_dec_and_test(&map->refcnt)) {
226 		/* bpf_map_free_id() must be called first */
227 		bpf_map_free_id(map, do_idr_lock);
228 		INIT_WORK(&map->work, bpf_map_free_deferred);
229 		schedule_work(&map->work);
230 	}
231 }
232 
233 void bpf_map_put(struct bpf_map *map)
234 {
235 	__bpf_map_put(map, true);
236 }
237 
238 void bpf_map_put_with_uref(struct bpf_map *map)
239 {
240 	bpf_map_put_uref(map);
241 	bpf_map_put(map);
242 }
243 
244 static int bpf_map_release(struct inode *inode, struct file *filp)
245 {
246 	struct bpf_map *map = filp->private_data;
247 
248 	if (map->ops->map_release)
249 		map->ops->map_release(map, filp);
250 
251 	bpf_map_put_with_uref(map);
252 	return 0;
253 }
254 
255 #ifdef CONFIG_PROC_FS
256 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
257 {
258 	const struct bpf_map *map = filp->private_data;
259 	const struct bpf_array *array;
260 	u32 owner_prog_type = 0;
261 	u32 owner_jited = 0;
262 
263 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
264 		array = container_of(map, struct bpf_array, map);
265 		owner_prog_type = array->owner_prog_type;
266 		owner_jited = array->owner_jited;
267 	}
268 
269 	seq_printf(m,
270 		   "map_type:\t%u\n"
271 		   "key_size:\t%u\n"
272 		   "value_size:\t%u\n"
273 		   "max_entries:\t%u\n"
274 		   "map_flags:\t%#x\n"
275 		   "memlock:\t%llu\n",
276 		   map->map_type,
277 		   map->key_size,
278 		   map->value_size,
279 		   map->max_entries,
280 		   map->map_flags,
281 		   map->pages * 1ULL << PAGE_SHIFT);
282 
283 	if (owner_prog_type) {
284 		seq_printf(m, "owner_prog_type:\t%u\n",
285 			   owner_prog_type);
286 		seq_printf(m, "owner_jited:\t%u\n",
287 			   owner_jited);
288 	}
289 }
290 #endif
291 
292 static const struct file_operations bpf_map_fops = {
293 #ifdef CONFIG_PROC_FS
294 	.show_fdinfo	= bpf_map_show_fdinfo,
295 #endif
296 	.release	= bpf_map_release,
297 };
298 
299 int bpf_map_new_fd(struct bpf_map *map)
300 {
301 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
302 				O_RDWR | O_CLOEXEC);
303 }
304 
305 /* helper macro to check that unused fields 'union bpf_attr' are zero */
306 #define CHECK_ATTR(CMD) \
307 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
308 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
309 		   sizeof(*attr) - \
310 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
311 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
312 
313 #define BPF_MAP_CREATE_LAST_FIELD numa_node
314 /* called via syscall */
315 static int map_create(union bpf_attr *attr)
316 {
317 	int numa_node = bpf_map_attr_numa_node(attr);
318 	struct bpf_map *map;
319 	int err;
320 
321 	err = CHECK_ATTR(BPF_MAP_CREATE);
322 	if (err)
323 		return -EINVAL;
324 
325 	if (numa_node != NUMA_NO_NODE &&
326 	    ((unsigned int)numa_node >= nr_node_ids ||
327 	     !node_online(numa_node)))
328 		return -EINVAL;
329 
330 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
331 	map = find_and_alloc_map(attr);
332 	if (IS_ERR(map))
333 		return PTR_ERR(map);
334 
335 	atomic_set(&map->refcnt, 1);
336 	atomic_set(&map->usercnt, 1);
337 
338 	err = bpf_map_charge_memlock(map);
339 	if (err)
340 		goto free_map_nouncharge;
341 
342 	err = bpf_map_alloc_id(map);
343 	if (err)
344 		goto free_map;
345 
346 	err = bpf_map_new_fd(map);
347 	if (err < 0) {
348 		/* failed to allocate fd.
349 		 * bpf_map_put() is needed because the above
350 		 * bpf_map_alloc_id() has published the map
351 		 * to the userspace and the userspace may
352 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
353 		 */
354 		bpf_map_put(map);
355 		return err;
356 	}
357 
358 	trace_bpf_map_create(map, err);
359 	return err;
360 
361 free_map:
362 	bpf_map_uncharge_memlock(map);
363 free_map_nouncharge:
364 	map->ops->map_free(map);
365 	return err;
366 }
367 
368 /* if error is returned, fd is released.
369  * On success caller should complete fd access with matching fdput()
370  */
371 struct bpf_map *__bpf_map_get(struct fd f)
372 {
373 	if (!f.file)
374 		return ERR_PTR(-EBADF);
375 	if (f.file->f_op != &bpf_map_fops) {
376 		fdput(f);
377 		return ERR_PTR(-EINVAL);
378 	}
379 
380 	return f.file->private_data;
381 }
382 
383 /* prog's and map's refcnt limit */
384 #define BPF_MAX_REFCNT 32768
385 
386 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
387 {
388 	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
389 		atomic_dec(&map->refcnt);
390 		return ERR_PTR(-EBUSY);
391 	}
392 	if (uref)
393 		atomic_inc(&map->usercnt);
394 	return map;
395 }
396 
397 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
398 {
399 	struct fd f = fdget(ufd);
400 	struct bpf_map *map;
401 
402 	map = __bpf_map_get(f);
403 	if (IS_ERR(map))
404 		return map;
405 
406 	map = bpf_map_inc(map, true);
407 	fdput(f);
408 
409 	return map;
410 }
411 
412 /* map_idr_lock should have been held */
413 static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
414 					    bool uref)
415 {
416 	int refold;
417 
418 	refold = __atomic_add_unless(&map->refcnt, 1, 0);
419 
420 	if (refold >= BPF_MAX_REFCNT) {
421 		__bpf_map_put(map, false);
422 		return ERR_PTR(-EBUSY);
423 	}
424 
425 	if (!refold)
426 		return ERR_PTR(-ENOENT);
427 
428 	if (uref)
429 		atomic_inc(&map->usercnt);
430 
431 	return map;
432 }
433 
434 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
435 {
436 	return -ENOTSUPP;
437 }
438 
439 /* last field in 'union bpf_attr' used by this command */
440 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
441 
442 static int map_lookup_elem(union bpf_attr *attr)
443 {
444 	void __user *ukey = u64_to_user_ptr(attr->key);
445 	void __user *uvalue = u64_to_user_ptr(attr->value);
446 	int ufd = attr->map_fd;
447 	struct bpf_map *map;
448 	void *key, *value, *ptr;
449 	u32 value_size;
450 	struct fd f;
451 	int err;
452 
453 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
454 		return -EINVAL;
455 
456 	f = fdget(ufd);
457 	map = __bpf_map_get(f);
458 	if (IS_ERR(map))
459 		return PTR_ERR(map);
460 
461 	key = memdup_user(ukey, map->key_size);
462 	if (IS_ERR(key)) {
463 		err = PTR_ERR(key);
464 		goto err_put;
465 	}
466 
467 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
468 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
469 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
470 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
471 	else if (IS_FD_MAP(map))
472 		value_size = sizeof(u32);
473 	else
474 		value_size = map->value_size;
475 
476 	err = -ENOMEM;
477 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
478 	if (!value)
479 		goto free_key;
480 
481 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
482 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
483 		err = bpf_percpu_hash_copy(map, key, value);
484 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
485 		err = bpf_percpu_array_copy(map, key, value);
486 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
487 		err = bpf_stackmap_copy(map, key, value);
488 	} else if (IS_FD_ARRAY(map)) {
489 		err = bpf_fd_array_map_lookup_elem(map, key, value);
490 	} else if (IS_FD_HASH(map)) {
491 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
492 	} else {
493 		rcu_read_lock();
494 		ptr = map->ops->map_lookup_elem(map, key);
495 		if (ptr)
496 			memcpy(value, ptr, value_size);
497 		rcu_read_unlock();
498 		err = ptr ? 0 : -ENOENT;
499 	}
500 
501 	if (err)
502 		goto free_value;
503 
504 	err = -EFAULT;
505 	if (copy_to_user(uvalue, value, value_size) != 0)
506 		goto free_value;
507 
508 	trace_bpf_map_lookup_elem(map, ufd, key, value);
509 	err = 0;
510 
511 free_value:
512 	kfree(value);
513 free_key:
514 	kfree(key);
515 err_put:
516 	fdput(f);
517 	return err;
518 }
519 
520 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
521 
522 static int map_update_elem(union bpf_attr *attr)
523 {
524 	void __user *ukey = u64_to_user_ptr(attr->key);
525 	void __user *uvalue = u64_to_user_ptr(attr->value);
526 	int ufd = attr->map_fd;
527 	struct bpf_map *map;
528 	void *key, *value;
529 	u32 value_size;
530 	struct fd f;
531 	int err;
532 
533 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
534 		return -EINVAL;
535 
536 	f = fdget(ufd);
537 	map = __bpf_map_get(f);
538 	if (IS_ERR(map))
539 		return PTR_ERR(map);
540 
541 	key = memdup_user(ukey, map->key_size);
542 	if (IS_ERR(key)) {
543 		err = PTR_ERR(key);
544 		goto err_put;
545 	}
546 
547 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
548 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
549 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
550 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
551 	else
552 		value_size = map->value_size;
553 
554 	err = -ENOMEM;
555 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
556 	if (!value)
557 		goto free_key;
558 
559 	err = -EFAULT;
560 	if (copy_from_user(value, uvalue, value_size) != 0)
561 		goto free_value;
562 
563 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
564 	 * inside bpf map update or delete otherwise deadlocks are possible
565 	 */
566 	preempt_disable();
567 	__this_cpu_inc(bpf_prog_active);
568 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
569 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
570 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
571 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
572 		err = bpf_percpu_array_update(map, key, value, attr->flags);
573 	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
574 		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
575 		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
576 		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
577 		rcu_read_lock();
578 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
579 						   attr->flags);
580 		rcu_read_unlock();
581 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
582 		rcu_read_lock();
583 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
584 						  attr->flags);
585 		rcu_read_unlock();
586 	} else {
587 		rcu_read_lock();
588 		err = map->ops->map_update_elem(map, key, value, attr->flags);
589 		rcu_read_unlock();
590 	}
591 	__this_cpu_dec(bpf_prog_active);
592 	preempt_enable();
593 
594 	if (!err)
595 		trace_bpf_map_update_elem(map, ufd, key, value);
596 free_value:
597 	kfree(value);
598 free_key:
599 	kfree(key);
600 err_put:
601 	fdput(f);
602 	return err;
603 }
604 
605 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
606 
607 static int map_delete_elem(union bpf_attr *attr)
608 {
609 	void __user *ukey = u64_to_user_ptr(attr->key);
610 	int ufd = attr->map_fd;
611 	struct bpf_map *map;
612 	struct fd f;
613 	void *key;
614 	int err;
615 
616 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
617 		return -EINVAL;
618 
619 	f = fdget(ufd);
620 	map = __bpf_map_get(f);
621 	if (IS_ERR(map))
622 		return PTR_ERR(map);
623 
624 	key = memdup_user(ukey, map->key_size);
625 	if (IS_ERR(key)) {
626 		err = PTR_ERR(key);
627 		goto err_put;
628 	}
629 
630 	preempt_disable();
631 	__this_cpu_inc(bpf_prog_active);
632 	rcu_read_lock();
633 	err = map->ops->map_delete_elem(map, key);
634 	rcu_read_unlock();
635 	__this_cpu_dec(bpf_prog_active);
636 	preempt_enable();
637 
638 	if (!err)
639 		trace_bpf_map_delete_elem(map, ufd, key);
640 	kfree(key);
641 err_put:
642 	fdput(f);
643 	return err;
644 }
645 
646 /* last field in 'union bpf_attr' used by this command */
647 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
648 
649 static int map_get_next_key(union bpf_attr *attr)
650 {
651 	void __user *ukey = u64_to_user_ptr(attr->key);
652 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
653 	int ufd = attr->map_fd;
654 	struct bpf_map *map;
655 	void *key, *next_key;
656 	struct fd f;
657 	int err;
658 
659 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
660 		return -EINVAL;
661 
662 	f = fdget(ufd);
663 	map = __bpf_map_get(f);
664 	if (IS_ERR(map))
665 		return PTR_ERR(map);
666 
667 	if (ukey) {
668 		key = memdup_user(ukey, map->key_size);
669 		if (IS_ERR(key)) {
670 			err = PTR_ERR(key);
671 			goto err_put;
672 		}
673 	} else {
674 		key = NULL;
675 	}
676 
677 	err = -ENOMEM;
678 	next_key = kmalloc(map->key_size, GFP_USER);
679 	if (!next_key)
680 		goto free_key;
681 
682 	rcu_read_lock();
683 	err = map->ops->map_get_next_key(map, key, next_key);
684 	rcu_read_unlock();
685 	if (err)
686 		goto free_next_key;
687 
688 	err = -EFAULT;
689 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
690 		goto free_next_key;
691 
692 	trace_bpf_map_next_key(map, ufd, key, next_key);
693 	err = 0;
694 
695 free_next_key:
696 	kfree(next_key);
697 free_key:
698 	kfree(key);
699 err_put:
700 	fdput(f);
701 	return err;
702 }
703 
704 static const struct bpf_verifier_ops * const bpf_prog_types[] = {
705 #define BPF_PROG_TYPE(_id, _ops) \
706 	[_id] = &_ops,
707 #define BPF_MAP_TYPE(_id, _ops)
708 #include <linux/bpf_types.h>
709 #undef BPF_PROG_TYPE
710 #undef BPF_MAP_TYPE
711 };
712 
713 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
714 {
715 	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
716 		return -EINVAL;
717 
718 	prog->aux->ops = bpf_prog_types[type];
719 	prog->type = type;
720 	return 0;
721 }
722 
723 /* drop refcnt on maps used by eBPF program and free auxilary data */
724 static void free_used_maps(struct bpf_prog_aux *aux)
725 {
726 	int i;
727 
728 	for (i = 0; i < aux->used_map_cnt; i++)
729 		bpf_map_put(aux->used_maps[i]);
730 
731 	kfree(aux->used_maps);
732 }
733 
734 int __bpf_prog_charge(struct user_struct *user, u32 pages)
735 {
736 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
737 	unsigned long user_bufs;
738 
739 	if (user) {
740 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
741 		if (user_bufs > memlock_limit) {
742 			atomic_long_sub(pages, &user->locked_vm);
743 			return -EPERM;
744 		}
745 	}
746 
747 	return 0;
748 }
749 
750 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
751 {
752 	if (user)
753 		atomic_long_sub(pages, &user->locked_vm);
754 }
755 
756 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
757 {
758 	struct user_struct *user = get_current_user();
759 	int ret;
760 
761 	ret = __bpf_prog_charge(user, prog->pages);
762 	if (ret) {
763 		free_uid(user);
764 		return ret;
765 	}
766 
767 	prog->aux->user = user;
768 	return 0;
769 }
770 
771 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
772 {
773 	struct user_struct *user = prog->aux->user;
774 
775 	__bpf_prog_uncharge(user, prog->pages);
776 	free_uid(user);
777 }
778 
779 static int bpf_prog_alloc_id(struct bpf_prog *prog)
780 {
781 	int id;
782 
783 	spin_lock_bh(&prog_idr_lock);
784 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
785 	if (id > 0)
786 		prog->aux->id = id;
787 	spin_unlock_bh(&prog_idr_lock);
788 
789 	/* id is in [1, INT_MAX) */
790 	if (WARN_ON_ONCE(!id))
791 		return -ENOSPC;
792 
793 	return id > 0 ? 0 : id;
794 }
795 
796 static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
797 {
798 	/* cBPF to eBPF migrations are currently not in the idr store. */
799 	if (!prog->aux->id)
800 		return;
801 
802 	if (do_idr_lock)
803 		spin_lock_bh(&prog_idr_lock);
804 	else
805 		__acquire(&prog_idr_lock);
806 
807 	idr_remove(&prog_idr, prog->aux->id);
808 
809 	if (do_idr_lock)
810 		spin_unlock_bh(&prog_idr_lock);
811 	else
812 		__release(&prog_idr_lock);
813 }
814 
815 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
816 {
817 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
818 
819 	free_used_maps(aux);
820 	bpf_prog_uncharge_memlock(aux->prog);
821 	bpf_prog_free(aux->prog);
822 }
823 
824 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
825 {
826 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
827 		trace_bpf_prog_put_rcu(prog);
828 		/* bpf_prog_free_id() must be called first */
829 		bpf_prog_free_id(prog, do_idr_lock);
830 		bpf_prog_kallsyms_del(prog);
831 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
832 	}
833 }
834 
835 void bpf_prog_put(struct bpf_prog *prog)
836 {
837 	__bpf_prog_put(prog, true);
838 }
839 EXPORT_SYMBOL_GPL(bpf_prog_put);
840 
841 static int bpf_prog_release(struct inode *inode, struct file *filp)
842 {
843 	struct bpf_prog *prog = filp->private_data;
844 
845 	bpf_prog_put(prog);
846 	return 0;
847 }
848 
849 #ifdef CONFIG_PROC_FS
850 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
851 {
852 	const struct bpf_prog *prog = filp->private_data;
853 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
854 
855 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
856 	seq_printf(m,
857 		   "prog_type:\t%u\n"
858 		   "prog_jited:\t%u\n"
859 		   "prog_tag:\t%s\n"
860 		   "memlock:\t%llu\n",
861 		   prog->type,
862 		   prog->jited,
863 		   prog_tag,
864 		   prog->pages * 1ULL << PAGE_SHIFT);
865 }
866 #endif
867 
868 static const struct file_operations bpf_prog_fops = {
869 #ifdef CONFIG_PROC_FS
870 	.show_fdinfo	= bpf_prog_show_fdinfo,
871 #endif
872 	.release	= bpf_prog_release,
873 };
874 
875 int bpf_prog_new_fd(struct bpf_prog *prog)
876 {
877 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
878 				O_RDWR | O_CLOEXEC);
879 }
880 
881 static struct bpf_prog *____bpf_prog_get(struct fd f)
882 {
883 	if (!f.file)
884 		return ERR_PTR(-EBADF);
885 	if (f.file->f_op != &bpf_prog_fops) {
886 		fdput(f);
887 		return ERR_PTR(-EINVAL);
888 	}
889 
890 	return f.file->private_data;
891 }
892 
893 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
894 {
895 	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
896 		atomic_sub(i, &prog->aux->refcnt);
897 		return ERR_PTR(-EBUSY);
898 	}
899 	return prog;
900 }
901 EXPORT_SYMBOL_GPL(bpf_prog_add);
902 
903 void bpf_prog_sub(struct bpf_prog *prog, int i)
904 {
905 	/* Only to be used for undoing previous bpf_prog_add() in some
906 	 * error path. We still know that another entity in our call
907 	 * path holds a reference to the program, thus atomic_sub() can
908 	 * be safely used in such cases!
909 	 */
910 	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
911 }
912 EXPORT_SYMBOL_GPL(bpf_prog_sub);
913 
914 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
915 {
916 	return bpf_prog_add(prog, 1);
917 }
918 EXPORT_SYMBOL_GPL(bpf_prog_inc);
919 
920 /* prog_idr_lock should have been held */
921 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
922 {
923 	int refold;
924 
925 	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
926 
927 	if (refold >= BPF_MAX_REFCNT) {
928 		__bpf_prog_put(prog, false);
929 		return ERR_PTR(-EBUSY);
930 	}
931 
932 	if (!refold)
933 		return ERR_PTR(-ENOENT);
934 
935 	return prog;
936 }
937 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
938 
939 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
940 {
941 	struct fd f = fdget(ufd);
942 	struct bpf_prog *prog;
943 
944 	prog = ____bpf_prog_get(f);
945 	if (IS_ERR(prog))
946 		return prog;
947 	if (type && prog->type != *type) {
948 		prog = ERR_PTR(-EINVAL);
949 		goto out;
950 	}
951 
952 	prog = bpf_prog_inc(prog);
953 out:
954 	fdput(f);
955 	return prog;
956 }
957 
958 struct bpf_prog *bpf_prog_get(u32 ufd)
959 {
960 	return __bpf_prog_get(ufd, NULL);
961 }
962 
963 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
964 {
965 	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
966 
967 	if (!IS_ERR(prog))
968 		trace_bpf_prog_get_type(prog);
969 	return prog;
970 }
971 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
972 
973 /* last field in 'union bpf_attr' used by this command */
974 #define	BPF_PROG_LOAD_LAST_FIELD prog_flags
975 
976 static int bpf_prog_load(union bpf_attr *attr)
977 {
978 	enum bpf_prog_type type = attr->prog_type;
979 	struct bpf_prog *prog;
980 	int err;
981 	char license[128];
982 	bool is_gpl;
983 
984 	if (CHECK_ATTR(BPF_PROG_LOAD))
985 		return -EINVAL;
986 
987 	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
988 		return -EINVAL;
989 
990 	/* copy eBPF program license from user space */
991 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
992 			      sizeof(license) - 1) < 0)
993 		return -EFAULT;
994 	license[sizeof(license) - 1] = 0;
995 
996 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
997 	is_gpl = license_is_gpl_compatible(license);
998 
999 	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
1000 		return -E2BIG;
1001 
1002 	if (type == BPF_PROG_TYPE_KPROBE &&
1003 	    attr->kern_version != LINUX_VERSION_CODE)
1004 		return -EINVAL;
1005 
1006 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1007 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
1008 	    !capable(CAP_SYS_ADMIN))
1009 		return -EPERM;
1010 
1011 	/* plain bpf_prog allocation */
1012 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
1013 	if (!prog)
1014 		return -ENOMEM;
1015 
1016 	err = bpf_prog_charge_memlock(prog);
1017 	if (err)
1018 		goto free_prog_nouncharge;
1019 
1020 	prog->len = attr->insn_cnt;
1021 
1022 	err = -EFAULT;
1023 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1024 			   bpf_prog_insn_size(prog)) != 0)
1025 		goto free_prog;
1026 
1027 	prog->orig_prog = NULL;
1028 	prog->jited = 0;
1029 
1030 	atomic_set(&prog->aux->refcnt, 1);
1031 	prog->gpl_compatible = is_gpl ? 1 : 0;
1032 
1033 	/* find program type: socket_filter vs tracing_filter */
1034 	err = find_prog_type(type, prog);
1035 	if (err < 0)
1036 		goto free_prog;
1037 
1038 	/* run eBPF verifier */
1039 	err = bpf_check(&prog, attr);
1040 	if (err < 0)
1041 		goto free_used_maps;
1042 
1043 	/* eBPF program is ready to be JITed */
1044 	prog = bpf_prog_select_runtime(prog, &err);
1045 	if (err < 0)
1046 		goto free_used_maps;
1047 
1048 	err = bpf_prog_alloc_id(prog);
1049 	if (err)
1050 		goto free_used_maps;
1051 
1052 	err = bpf_prog_new_fd(prog);
1053 	if (err < 0) {
1054 		/* failed to allocate fd.
1055 		 * bpf_prog_put() is needed because the above
1056 		 * bpf_prog_alloc_id() has published the prog
1057 		 * to the userspace and the userspace may
1058 		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
1059 		 */
1060 		bpf_prog_put(prog);
1061 		return err;
1062 	}
1063 
1064 	bpf_prog_kallsyms_add(prog);
1065 	trace_bpf_prog_load(prog, err);
1066 	return err;
1067 
1068 free_used_maps:
1069 	free_used_maps(prog->aux);
1070 free_prog:
1071 	bpf_prog_uncharge_memlock(prog);
1072 free_prog_nouncharge:
1073 	bpf_prog_free(prog);
1074 	return err;
1075 }
1076 
1077 #define BPF_OBJ_LAST_FIELD bpf_fd
1078 
1079 static int bpf_obj_pin(const union bpf_attr *attr)
1080 {
1081 	if (CHECK_ATTR(BPF_OBJ))
1082 		return -EINVAL;
1083 
1084 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1085 }
1086 
1087 static int bpf_obj_get(const union bpf_attr *attr)
1088 {
1089 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
1090 		return -EINVAL;
1091 
1092 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
1093 }
1094 
1095 #ifdef CONFIG_CGROUP_BPF
1096 
1097 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1098 
1099 static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
1100 {
1101 	struct bpf_prog *prog = NULL;
1102 	int ufd = attr->target_fd;
1103 	struct bpf_map *map;
1104 	struct fd f;
1105 	int err;
1106 
1107 	f = fdget(ufd);
1108 	map = __bpf_map_get(f);
1109 	if (IS_ERR(map))
1110 		return PTR_ERR(map);
1111 
1112 	if (attach) {
1113 		prog = bpf_prog_get_type(attr->attach_bpf_fd,
1114 					 BPF_PROG_TYPE_SK_SKB);
1115 		if (IS_ERR(prog)) {
1116 			fdput(f);
1117 			return PTR_ERR(prog);
1118 		}
1119 	}
1120 
1121 	err = sock_map_prog(map, prog, attr->attach_type);
1122 	if (err) {
1123 		fdput(f);
1124 		if (prog)
1125 			bpf_prog_put(prog);
1126 		return err;
1127 	}
1128 
1129 	fdput(f);
1130 	return 0;
1131 }
1132 
1133 static int bpf_prog_attach(const union bpf_attr *attr)
1134 {
1135 	enum bpf_prog_type ptype;
1136 	struct bpf_prog *prog;
1137 	struct cgroup *cgrp;
1138 	int ret;
1139 
1140 	if (!capable(CAP_NET_ADMIN))
1141 		return -EPERM;
1142 
1143 	if (CHECK_ATTR(BPF_PROG_ATTACH))
1144 		return -EINVAL;
1145 
1146 	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
1147 		return -EINVAL;
1148 
1149 	switch (attr->attach_type) {
1150 	case BPF_CGROUP_INET_INGRESS:
1151 	case BPF_CGROUP_INET_EGRESS:
1152 		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1153 		break;
1154 	case BPF_CGROUP_INET_SOCK_CREATE:
1155 		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
1156 		break;
1157 	case BPF_CGROUP_SOCK_OPS:
1158 		ptype = BPF_PROG_TYPE_SOCK_OPS;
1159 		break;
1160 	case BPF_SK_SKB_STREAM_PARSER:
1161 	case BPF_SK_SKB_STREAM_VERDICT:
1162 		return sockmap_get_from_fd(attr, true);
1163 	default:
1164 		return -EINVAL;
1165 	}
1166 
1167 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
1168 	if (IS_ERR(prog))
1169 		return PTR_ERR(prog);
1170 
1171 	cgrp = cgroup_get_from_fd(attr->target_fd);
1172 	if (IS_ERR(cgrp)) {
1173 		bpf_prog_put(prog);
1174 		return PTR_ERR(cgrp);
1175 	}
1176 
1177 	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
1178 				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
1179 	if (ret)
1180 		bpf_prog_put(prog);
1181 	cgroup_put(cgrp);
1182 
1183 	return ret;
1184 }
1185 
1186 #define BPF_PROG_DETACH_LAST_FIELD attach_type
1187 
1188 static int bpf_prog_detach(const union bpf_attr *attr)
1189 {
1190 	struct cgroup *cgrp;
1191 	int ret;
1192 
1193 	if (!capable(CAP_NET_ADMIN))
1194 		return -EPERM;
1195 
1196 	if (CHECK_ATTR(BPF_PROG_DETACH))
1197 		return -EINVAL;
1198 
1199 	switch (attr->attach_type) {
1200 	case BPF_CGROUP_INET_INGRESS:
1201 	case BPF_CGROUP_INET_EGRESS:
1202 	case BPF_CGROUP_INET_SOCK_CREATE:
1203 	case BPF_CGROUP_SOCK_OPS:
1204 		cgrp = cgroup_get_from_fd(attr->target_fd);
1205 		if (IS_ERR(cgrp))
1206 			return PTR_ERR(cgrp);
1207 
1208 		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1209 		cgroup_put(cgrp);
1210 		break;
1211 	case BPF_SK_SKB_STREAM_PARSER:
1212 	case BPF_SK_SKB_STREAM_VERDICT:
1213 		ret = sockmap_get_from_fd(attr, false);
1214 		break;
1215 	default:
1216 		return -EINVAL;
1217 	}
1218 
1219 	return ret;
1220 }
1221 
1222 #endif /* CONFIG_CGROUP_BPF */
1223 
1224 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
1225 
1226 static int bpf_prog_test_run(const union bpf_attr *attr,
1227 			     union bpf_attr __user *uattr)
1228 {
1229 	struct bpf_prog *prog;
1230 	int ret = -ENOTSUPP;
1231 
1232 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
1233 		return -EINVAL;
1234 
1235 	prog = bpf_prog_get(attr->test.prog_fd);
1236 	if (IS_ERR(prog))
1237 		return PTR_ERR(prog);
1238 
1239 	if (prog->aux->ops->test_run)
1240 		ret = prog->aux->ops->test_run(prog, attr, uattr);
1241 
1242 	bpf_prog_put(prog);
1243 	return ret;
1244 }
1245 
1246 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
1247 
1248 static int bpf_obj_get_next_id(const union bpf_attr *attr,
1249 			       union bpf_attr __user *uattr,
1250 			       struct idr *idr,
1251 			       spinlock_t *lock)
1252 {
1253 	u32 next_id = attr->start_id;
1254 	int err = 0;
1255 
1256 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
1257 		return -EINVAL;
1258 
1259 	if (!capable(CAP_SYS_ADMIN))
1260 		return -EPERM;
1261 
1262 	next_id++;
1263 	spin_lock_bh(lock);
1264 	if (!idr_get_next(idr, &next_id))
1265 		err = -ENOENT;
1266 	spin_unlock_bh(lock);
1267 
1268 	if (!err)
1269 		err = put_user(next_id, &uattr->next_id);
1270 
1271 	return err;
1272 }
1273 
1274 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
1275 
1276 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
1277 {
1278 	struct bpf_prog *prog;
1279 	u32 id = attr->prog_id;
1280 	int fd;
1281 
1282 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
1283 		return -EINVAL;
1284 
1285 	if (!capable(CAP_SYS_ADMIN))
1286 		return -EPERM;
1287 
1288 	spin_lock_bh(&prog_idr_lock);
1289 	prog = idr_find(&prog_idr, id);
1290 	if (prog)
1291 		prog = bpf_prog_inc_not_zero(prog);
1292 	else
1293 		prog = ERR_PTR(-ENOENT);
1294 	spin_unlock_bh(&prog_idr_lock);
1295 
1296 	if (IS_ERR(prog))
1297 		return PTR_ERR(prog);
1298 
1299 	fd = bpf_prog_new_fd(prog);
1300 	if (fd < 0)
1301 		bpf_prog_put(prog);
1302 
1303 	return fd;
1304 }
1305 
1306 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
1307 
1308 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1309 {
1310 	struct bpf_map *map;
1311 	u32 id = attr->map_id;
1312 	int fd;
1313 
1314 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
1315 		return -EINVAL;
1316 
1317 	if (!capable(CAP_SYS_ADMIN))
1318 		return -EPERM;
1319 
1320 	spin_lock_bh(&map_idr_lock);
1321 	map = idr_find(&map_idr, id);
1322 	if (map)
1323 		map = bpf_map_inc_not_zero(map, true);
1324 	else
1325 		map = ERR_PTR(-ENOENT);
1326 	spin_unlock_bh(&map_idr_lock);
1327 
1328 	if (IS_ERR(map))
1329 		return PTR_ERR(map);
1330 
1331 	fd = bpf_map_new_fd(map);
1332 	if (fd < 0)
1333 		bpf_map_put(map);
1334 
1335 	return fd;
1336 }
1337 
1338 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1339 				   const union bpf_attr *attr,
1340 				   union bpf_attr __user *uattr)
1341 {
1342 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1343 	struct bpf_prog_info info = {};
1344 	u32 info_len = attr->info.info_len;
1345 	char __user *uinsns;
1346 	u32 ulen;
1347 	int err;
1348 
1349 	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1350 	if (err)
1351 		return err;
1352 	info_len = min_t(u32, sizeof(info), info_len);
1353 
1354 	if (copy_from_user(&info, uinfo, info_len))
1355 		return -EFAULT;
1356 
1357 	info.type = prog->type;
1358 	info.id = prog->aux->id;
1359 
1360 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
1361 
1362 	if (!capable(CAP_SYS_ADMIN)) {
1363 		info.jited_prog_len = 0;
1364 		info.xlated_prog_len = 0;
1365 		goto done;
1366 	}
1367 
1368 	ulen = info.jited_prog_len;
1369 	info.jited_prog_len = prog->jited_len;
1370 	if (info.jited_prog_len && ulen) {
1371 		uinsns = u64_to_user_ptr(info.jited_prog_insns);
1372 		ulen = min_t(u32, info.jited_prog_len, ulen);
1373 		if (copy_to_user(uinsns, prog->bpf_func, ulen))
1374 			return -EFAULT;
1375 	}
1376 
1377 	ulen = info.xlated_prog_len;
1378 	info.xlated_prog_len = bpf_prog_insn_size(prog);
1379 	if (info.xlated_prog_len && ulen) {
1380 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1381 		ulen = min_t(u32, info.xlated_prog_len, ulen);
1382 		if (copy_to_user(uinsns, prog->insnsi, ulen))
1383 			return -EFAULT;
1384 	}
1385 
1386 done:
1387 	if (copy_to_user(uinfo, &info, info_len) ||
1388 	    put_user(info_len, &uattr->info.info_len))
1389 		return -EFAULT;
1390 
1391 	return 0;
1392 }
1393 
1394 static int bpf_map_get_info_by_fd(struct bpf_map *map,
1395 				  const union bpf_attr *attr,
1396 				  union bpf_attr __user *uattr)
1397 {
1398 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
1399 	struct bpf_map_info info = {};
1400 	u32 info_len = attr->info.info_len;
1401 	int err;
1402 
1403 	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
1404 	if (err)
1405 		return err;
1406 	info_len = min_t(u32, sizeof(info), info_len);
1407 
1408 	info.type = map->map_type;
1409 	info.id = map->id;
1410 	info.key_size = map->key_size;
1411 	info.value_size = map->value_size;
1412 	info.max_entries = map->max_entries;
1413 	info.map_flags = map->map_flags;
1414 
1415 	if (copy_to_user(uinfo, &info, info_len) ||
1416 	    put_user(info_len, &uattr->info.info_len))
1417 		return -EFAULT;
1418 
1419 	return 0;
1420 }
1421 
1422 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
1423 
1424 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
1425 				  union bpf_attr __user *uattr)
1426 {
1427 	int ufd = attr->info.bpf_fd;
1428 	struct fd f;
1429 	int err;
1430 
1431 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
1432 		return -EINVAL;
1433 
1434 	f = fdget(ufd);
1435 	if (!f.file)
1436 		return -EBADFD;
1437 
1438 	if (f.file->f_op == &bpf_prog_fops)
1439 		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
1440 					      uattr);
1441 	else if (f.file->f_op == &bpf_map_fops)
1442 		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
1443 					     uattr);
1444 	else
1445 		err = -EINVAL;
1446 
1447 	fdput(f);
1448 	return err;
1449 }
1450 
1451 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1452 {
1453 	union bpf_attr attr = {};
1454 	int err;
1455 
1456 	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1457 		return -EPERM;
1458 
1459 	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
1460 	if (err)
1461 		return err;
1462 	size = min_t(u32, size, sizeof(attr));
1463 
1464 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
1465 	if (copy_from_user(&attr, uattr, size) != 0)
1466 		return -EFAULT;
1467 
1468 	switch (cmd) {
1469 	case BPF_MAP_CREATE:
1470 		err = map_create(&attr);
1471 		break;
1472 	case BPF_MAP_LOOKUP_ELEM:
1473 		err = map_lookup_elem(&attr);
1474 		break;
1475 	case BPF_MAP_UPDATE_ELEM:
1476 		err = map_update_elem(&attr);
1477 		break;
1478 	case BPF_MAP_DELETE_ELEM:
1479 		err = map_delete_elem(&attr);
1480 		break;
1481 	case BPF_MAP_GET_NEXT_KEY:
1482 		err = map_get_next_key(&attr);
1483 		break;
1484 	case BPF_PROG_LOAD:
1485 		err = bpf_prog_load(&attr);
1486 		break;
1487 	case BPF_OBJ_PIN:
1488 		err = bpf_obj_pin(&attr);
1489 		break;
1490 	case BPF_OBJ_GET:
1491 		err = bpf_obj_get(&attr);
1492 		break;
1493 #ifdef CONFIG_CGROUP_BPF
1494 	case BPF_PROG_ATTACH:
1495 		err = bpf_prog_attach(&attr);
1496 		break;
1497 	case BPF_PROG_DETACH:
1498 		err = bpf_prog_detach(&attr);
1499 		break;
1500 #endif
1501 	case BPF_PROG_TEST_RUN:
1502 		err = bpf_prog_test_run(&attr, uattr);
1503 		break;
1504 	case BPF_PROG_GET_NEXT_ID:
1505 		err = bpf_obj_get_next_id(&attr, uattr,
1506 					  &prog_idr, &prog_idr_lock);
1507 		break;
1508 	case BPF_MAP_GET_NEXT_ID:
1509 		err = bpf_obj_get_next_id(&attr, uattr,
1510 					  &map_idr, &map_idr_lock);
1511 		break;
1512 	case BPF_PROG_GET_FD_BY_ID:
1513 		err = bpf_prog_get_fd_by_id(&attr);
1514 		break;
1515 	case BPF_MAP_GET_FD_BY_ID:
1516 		err = bpf_map_get_fd_by_id(&attr);
1517 		break;
1518 	case BPF_OBJ_GET_INFO_BY_FD:
1519 		err = bpf_obj_get_info_by_fd(&attr, uattr);
1520 		break;
1521 	default:
1522 		err = -EINVAL;
1523 		break;
1524 	}
1525 
1526 	return err;
1527 }
1528