xref: /openbmc/linux/kernel/bpf/syscall.c (revision 110e6f26)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20 
21 DEFINE_PER_CPU(int, bpf_prog_active);
22 
23 int sysctl_unprivileged_bpf_disabled __read_mostly;
24 
25 static LIST_HEAD(bpf_map_types);
26 
27 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
28 {
29 	struct bpf_map_type_list *tl;
30 	struct bpf_map *map;
31 
32 	list_for_each_entry(tl, &bpf_map_types, list_node) {
33 		if (tl->type == attr->map_type) {
34 			map = tl->ops->map_alloc(attr);
35 			if (IS_ERR(map))
36 				return map;
37 			map->ops = tl->ops;
38 			map->map_type = attr->map_type;
39 			return map;
40 		}
41 	}
42 	return ERR_PTR(-EINVAL);
43 }
44 
45 /* boot time registration of different map implementations */
46 void bpf_register_map_type(struct bpf_map_type_list *tl)
47 {
48 	list_add(&tl->list_node, &bpf_map_types);
49 }
50 
51 int bpf_map_precharge_memlock(u32 pages)
52 {
53 	struct user_struct *user = get_current_user();
54 	unsigned long memlock_limit, cur;
55 
56 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
57 	cur = atomic_long_read(&user->locked_vm);
58 	free_uid(user);
59 	if (cur + pages > memlock_limit)
60 		return -EPERM;
61 	return 0;
62 }
63 
64 static int bpf_map_charge_memlock(struct bpf_map *map)
65 {
66 	struct user_struct *user = get_current_user();
67 	unsigned long memlock_limit;
68 
69 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
70 
71 	atomic_long_add(map->pages, &user->locked_vm);
72 
73 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
74 		atomic_long_sub(map->pages, &user->locked_vm);
75 		free_uid(user);
76 		return -EPERM;
77 	}
78 	map->user = user;
79 	return 0;
80 }
81 
82 static void bpf_map_uncharge_memlock(struct bpf_map *map)
83 {
84 	struct user_struct *user = map->user;
85 
86 	atomic_long_sub(map->pages, &user->locked_vm);
87 	free_uid(user);
88 }
89 
90 /* called from workqueue */
91 static void bpf_map_free_deferred(struct work_struct *work)
92 {
93 	struct bpf_map *map = container_of(work, struct bpf_map, work);
94 
95 	bpf_map_uncharge_memlock(map);
96 	/* implementation dependent freeing */
97 	map->ops->map_free(map);
98 }
99 
100 static void bpf_map_put_uref(struct bpf_map *map)
101 {
102 	if (atomic_dec_and_test(&map->usercnt)) {
103 		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
104 			bpf_fd_array_map_clear(map);
105 	}
106 }
107 
108 /* decrement map refcnt and schedule it for freeing via workqueue
109  * (unrelying map implementation ops->map_free() might sleep)
110  */
111 void bpf_map_put(struct bpf_map *map)
112 {
113 	if (atomic_dec_and_test(&map->refcnt)) {
114 		INIT_WORK(&map->work, bpf_map_free_deferred);
115 		schedule_work(&map->work);
116 	}
117 }
118 
119 void bpf_map_put_with_uref(struct bpf_map *map)
120 {
121 	bpf_map_put_uref(map);
122 	bpf_map_put(map);
123 }
124 
125 static int bpf_map_release(struct inode *inode, struct file *filp)
126 {
127 	bpf_map_put_with_uref(filp->private_data);
128 	return 0;
129 }
130 
131 #ifdef CONFIG_PROC_FS
132 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
133 {
134 	const struct bpf_map *map = filp->private_data;
135 
136 	seq_printf(m,
137 		   "map_type:\t%u\n"
138 		   "key_size:\t%u\n"
139 		   "value_size:\t%u\n"
140 		   "max_entries:\t%u\n"
141 		   "map_flags:\t%#x\n",
142 		   map->map_type,
143 		   map->key_size,
144 		   map->value_size,
145 		   map->max_entries,
146 		   map->map_flags);
147 }
148 #endif
149 
150 static const struct file_operations bpf_map_fops = {
151 #ifdef CONFIG_PROC_FS
152 	.show_fdinfo	= bpf_map_show_fdinfo,
153 #endif
154 	.release	= bpf_map_release,
155 };
156 
157 int bpf_map_new_fd(struct bpf_map *map)
158 {
159 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
160 				O_RDWR | O_CLOEXEC);
161 }
162 
163 /* helper macro to check that unused fields 'union bpf_attr' are zero */
164 #define CHECK_ATTR(CMD) \
165 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
166 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
167 		   sizeof(*attr) - \
168 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
169 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
170 
171 #define BPF_MAP_CREATE_LAST_FIELD map_flags
172 /* called via syscall */
173 static int map_create(union bpf_attr *attr)
174 {
175 	struct bpf_map *map;
176 	int err;
177 
178 	err = CHECK_ATTR(BPF_MAP_CREATE);
179 	if (err)
180 		return -EINVAL;
181 
182 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
183 	map = find_and_alloc_map(attr);
184 	if (IS_ERR(map))
185 		return PTR_ERR(map);
186 
187 	atomic_set(&map->refcnt, 1);
188 	atomic_set(&map->usercnt, 1);
189 
190 	err = bpf_map_charge_memlock(map);
191 	if (err)
192 		goto free_map;
193 
194 	err = bpf_map_new_fd(map);
195 	if (err < 0)
196 		/* failed to allocate fd */
197 		goto free_map;
198 
199 	return err;
200 
201 free_map:
202 	map->ops->map_free(map);
203 	return err;
204 }
205 
206 /* if error is returned, fd is released.
207  * On success caller should complete fd access with matching fdput()
208  */
209 struct bpf_map *__bpf_map_get(struct fd f)
210 {
211 	if (!f.file)
212 		return ERR_PTR(-EBADF);
213 	if (f.file->f_op != &bpf_map_fops) {
214 		fdput(f);
215 		return ERR_PTR(-EINVAL);
216 	}
217 
218 	return f.file->private_data;
219 }
220 
221 void bpf_map_inc(struct bpf_map *map, bool uref)
222 {
223 	atomic_inc(&map->refcnt);
224 	if (uref)
225 		atomic_inc(&map->usercnt);
226 }
227 
228 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
229 {
230 	struct fd f = fdget(ufd);
231 	struct bpf_map *map;
232 
233 	map = __bpf_map_get(f);
234 	if (IS_ERR(map))
235 		return map;
236 
237 	bpf_map_inc(map, true);
238 	fdput(f);
239 
240 	return map;
241 }
242 
243 /* helper to convert user pointers passed inside __aligned_u64 fields */
244 static void __user *u64_to_ptr(__u64 val)
245 {
246 	return (void __user *) (unsigned long) val;
247 }
248 
249 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
250 {
251 	return -ENOTSUPP;
252 }
253 
254 /* last field in 'union bpf_attr' used by this command */
255 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
256 
257 static int map_lookup_elem(union bpf_attr *attr)
258 {
259 	void __user *ukey = u64_to_ptr(attr->key);
260 	void __user *uvalue = u64_to_ptr(attr->value);
261 	int ufd = attr->map_fd;
262 	struct bpf_map *map;
263 	void *key, *value, *ptr;
264 	u32 value_size;
265 	struct fd f;
266 	int err;
267 
268 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
269 		return -EINVAL;
270 
271 	f = fdget(ufd);
272 	map = __bpf_map_get(f);
273 	if (IS_ERR(map))
274 		return PTR_ERR(map);
275 
276 	err = -ENOMEM;
277 	key = kmalloc(map->key_size, GFP_USER);
278 	if (!key)
279 		goto err_put;
280 
281 	err = -EFAULT;
282 	if (copy_from_user(key, ukey, map->key_size) != 0)
283 		goto free_key;
284 
285 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
286 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
287 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
288 	else
289 		value_size = map->value_size;
290 
291 	err = -ENOMEM;
292 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
293 	if (!value)
294 		goto free_key;
295 
296 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
297 		err = bpf_percpu_hash_copy(map, key, value);
298 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
299 		err = bpf_percpu_array_copy(map, key, value);
300 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
301 		err = bpf_stackmap_copy(map, key, value);
302 	} else {
303 		rcu_read_lock();
304 		ptr = map->ops->map_lookup_elem(map, key);
305 		if (ptr)
306 			memcpy(value, ptr, value_size);
307 		rcu_read_unlock();
308 		err = ptr ? 0 : -ENOENT;
309 	}
310 
311 	if (err)
312 		goto free_value;
313 
314 	err = -EFAULT;
315 	if (copy_to_user(uvalue, value, value_size) != 0)
316 		goto free_value;
317 
318 	err = 0;
319 
320 free_value:
321 	kfree(value);
322 free_key:
323 	kfree(key);
324 err_put:
325 	fdput(f);
326 	return err;
327 }
328 
329 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
330 
331 static int map_update_elem(union bpf_attr *attr)
332 {
333 	void __user *ukey = u64_to_ptr(attr->key);
334 	void __user *uvalue = u64_to_ptr(attr->value);
335 	int ufd = attr->map_fd;
336 	struct bpf_map *map;
337 	void *key, *value;
338 	u32 value_size;
339 	struct fd f;
340 	int err;
341 
342 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
343 		return -EINVAL;
344 
345 	f = fdget(ufd);
346 	map = __bpf_map_get(f);
347 	if (IS_ERR(map))
348 		return PTR_ERR(map);
349 
350 	err = -ENOMEM;
351 	key = kmalloc(map->key_size, GFP_USER);
352 	if (!key)
353 		goto err_put;
354 
355 	err = -EFAULT;
356 	if (copy_from_user(key, ukey, map->key_size) != 0)
357 		goto free_key;
358 
359 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
360 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
361 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
362 	else
363 		value_size = map->value_size;
364 
365 	err = -ENOMEM;
366 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
367 	if (!value)
368 		goto free_key;
369 
370 	err = -EFAULT;
371 	if (copy_from_user(value, uvalue, value_size) != 0)
372 		goto free_value;
373 
374 	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
375 	 * inside bpf map update or delete otherwise deadlocks are possible
376 	 */
377 	preempt_disable();
378 	__this_cpu_inc(bpf_prog_active);
379 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
380 		err = bpf_percpu_hash_update(map, key, value, attr->flags);
381 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
382 		err = bpf_percpu_array_update(map, key, value, attr->flags);
383 	} else {
384 		rcu_read_lock();
385 		err = map->ops->map_update_elem(map, key, value, attr->flags);
386 		rcu_read_unlock();
387 	}
388 	__this_cpu_dec(bpf_prog_active);
389 	preempt_enable();
390 
391 free_value:
392 	kfree(value);
393 free_key:
394 	kfree(key);
395 err_put:
396 	fdput(f);
397 	return err;
398 }
399 
400 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
401 
402 static int map_delete_elem(union bpf_attr *attr)
403 {
404 	void __user *ukey = u64_to_ptr(attr->key);
405 	int ufd = attr->map_fd;
406 	struct bpf_map *map;
407 	struct fd f;
408 	void *key;
409 	int err;
410 
411 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
412 		return -EINVAL;
413 
414 	f = fdget(ufd);
415 	map = __bpf_map_get(f);
416 	if (IS_ERR(map))
417 		return PTR_ERR(map);
418 
419 	err = -ENOMEM;
420 	key = kmalloc(map->key_size, GFP_USER);
421 	if (!key)
422 		goto err_put;
423 
424 	err = -EFAULT;
425 	if (copy_from_user(key, ukey, map->key_size) != 0)
426 		goto free_key;
427 
428 	preempt_disable();
429 	__this_cpu_inc(bpf_prog_active);
430 	rcu_read_lock();
431 	err = map->ops->map_delete_elem(map, key);
432 	rcu_read_unlock();
433 	__this_cpu_dec(bpf_prog_active);
434 	preempt_enable();
435 
436 free_key:
437 	kfree(key);
438 err_put:
439 	fdput(f);
440 	return err;
441 }
442 
443 /* last field in 'union bpf_attr' used by this command */
444 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
445 
446 static int map_get_next_key(union bpf_attr *attr)
447 {
448 	void __user *ukey = u64_to_ptr(attr->key);
449 	void __user *unext_key = u64_to_ptr(attr->next_key);
450 	int ufd = attr->map_fd;
451 	struct bpf_map *map;
452 	void *key, *next_key;
453 	struct fd f;
454 	int err;
455 
456 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
457 		return -EINVAL;
458 
459 	f = fdget(ufd);
460 	map = __bpf_map_get(f);
461 	if (IS_ERR(map))
462 		return PTR_ERR(map);
463 
464 	err = -ENOMEM;
465 	key = kmalloc(map->key_size, GFP_USER);
466 	if (!key)
467 		goto err_put;
468 
469 	err = -EFAULT;
470 	if (copy_from_user(key, ukey, map->key_size) != 0)
471 		goto free_key;
472 
473 	err = -ENOMEM;
474 	next_key = kmalloc(map->key_size, GFP_USER);
475 	if (!next_key)
476 		goto free_key;
477 
478 	rcu_read_lock();
479 	err = map->ops->map_get_next_key(map, key, next_key);
480 	rcu_read_unlock();
481 	if (err)
482 		goto free_next_key;
483 
484 	err = -EFAULT;
485 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
486 		goto free_next_key;
487 
488 	err = 0;
489 
490 free_next_key:
491 	kfree(next_key);
492 free_key:
493 	kfree(key);
494 err_put:
495 	fdput(f);
496 	return err;
497 }
498 
499 static LIST_HEAD(bpf_prog_types);
500 
501 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
502 {
503 	struct bpf_prog_type_list *tl;
504 
505 	list_for_each_entry(tl, &bpf_prog_types, list_node) {
506 		if (tl->type == type) {
507 			prog->aux->ops = tl->ops;
508 			prog->type = type;
509 			return 0;
510 		}
511 	}
512 
513 	return -EINVAL;
514 }
515 
516 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
517 {
518 	list_add(&tl->list_node, &bpf_prog_types);
519 }
520 
521 /* fixup insn->imm field of bpf_call instructions:
522  * if (insn->imm == BPF_FUNC_map_lookup_elem)
523  *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
524  * else if (insn->imm == BPF_FUNC_map_update_elem)
525  *      insn->imm = bpf_map_update_elem - __bpf_call_base;
526  * else ...
527  *
528  * this function is called after eBPF program passed verification
529  */
530 static void fixup_bpf_calls(struct bpf_prog *prog)
531 {
532 	const struct bpf_func_proto *fn;
533 	int i;
534 
535 	for (i = 0; i < prog->len; i++) {
536 		struct bpf_insn *insn = &prog->insnsi[i];
537 
538 		if (insn->code == (BPF_JMP | BPF_CALL)) {
539 			/* we reach here when program has bpf_call instructions
540 			 * and it passed bpf_check(), means that
541 			 * ops->get_func_proto must have been supplied, check it
542 			 */
543 			BUG_ON(!prog->aux->ops->get_func_proto);
544 
545 			if (insn->imm == BPF_FUNC_get_route_realm)
546 				prog->dst_needed = 1;
547 			if (insn->imm == BPF_FUNC_get_prandom_u32)
548 				bpf_user_rnd_init_once();
549 			if (insn->imm == BPF_FUNC_tail_call) {
550 				/* mark bpf_tail_call as different opcode
551 				 * to avoid conditional branch in
552 				 * interpeter for every normal call
553 				 * and to prevent accidental JITing by
554 				 * JIT compiler that doesn't support
555 				 * bpf_tail_call yet
556 				 */
557 				insn->imm = 0;
558 				insn->code |= BPF_X;
559 				continue;
560 			}
561 
562 			fn = prog->aux->ops->get_func_proto(insn->imm);
563 			/* all functions that have prototype and verifier allowed
564 			 * programs to call them, must be real in-kernel functions
565 			 */
566 			BUG_ON(!fn->func);
567 			insn->imm = fn->func - __bpf_call_base;
568 		}
569 	}
570 }
571 
572 /* drop refcnt on maps used by eBPF program and free auxilary data */
573 static void free_used_maps(struct bpf_prog_aux *aux)
574 {
575 	int i;
576 
577 	for (i = 0; i < aux->used_map_cnt; i++)
578 		bpf_map_put(aux->used_maps[i]);
579 
580 	kfree(aux->used_maps);
581 }
582 
583 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
584 {
585 	struct user_struct *user = get_current_user();
586 	unsigned long memlock_limit;
587 
588 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
589 
590 	atomic_long_add(prog->pages, &user->locked_vm);
591 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
592 		atomic_long_sub(prog->pages, &user->locked_vm);
593 		free_uid(user);
594 		return -EPERM;
595 	}
596 	prog->aux->user = user;
597 	return 0;
598 }
599 
600 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
601 {
602 	struct user_struct *user = prog->aux->user;
603 
604 	atomic_long_sub(prog->pages, &user->locked_vm);
605 	free_uid(user);
606 }
607 
608 static void __prog_put_common(struct rcu_head *rcu)
609 {
610 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
611 
612 	free_used_maps(aux);
613 	bpf_prog_uncharge_memlock(aux->prog);
614 	bpf_prog_free(aux->prog);
615 }
616 
617 /* version of bpf_prog_put() that is called after a grace period */
618 void bpf_prog_put_rcu(struct bpf_prog *prog)
619 {
620 	if (atomic_dec_and_test(&prog->aux->refcnt))
621 		call_rcu(&prog->aux->rcu, __prog_put_common);
622 }
623 
624 void bpf_prog_put(struct bpf_prog *prog)
625 {
626 	if (atomic_dec_and_test(&prog->aux->refcnt))
627 		__prog_put_common(&prog->aux->rcu);
628 }
629 EXPORT_SYMBOL_GPL(bpf_prog_put);
630 
631 static int bpf_prog_release(struct inode *inode, struct file *filp)
632 {
633 	struct bpf_prog *prog = filp->private_data;
634 
635 	bpf_prog_put_rcu(prog);
636 	return 0;
637 }
638 
639 static const struct file_operations bpf_prog_fops = {
640         .release = bpf_prog_release,
641 };
642 
643 int bpf_prog_new_fd(struct bpf_prog *prog)
644 {
645 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
646 				O_RDWR | O_CLOEXEC);
647 }
648 
649 static struct bpf_prog *__bpf_prog_get(struct fd f)
650 {
651 	if (!f.file)
652 		return ERR_PTR(-EBADF);
653 	if (f.file->f_op != &bpf_prog_fops) {
654 		fdput(f);
655 		return ERR_PTR(-EINVAL);
656 	}
657 
658 	return f.file->private_data;
659 }
660 
661 /* called by sockets/tracing/seccomp before attaching program to an event
662  * pairs with bpf_prog_put()
663  */
664 struct bpf_prog *bpf_prog_get(u32 ufd)
665 {
666 	struct fd f = fdget(ufd);
667 	struct bpf_prog *prog;
668 
669 	prog = __bpf_prog_get(f);
670 	if (IS_ERR(prog))
671 		return prog;
672 
673 	atomic_inc(&prog->aux->refcnt);
674 	fdput(f);
675 
676 	return prog;
677 }
678 EXPORT_SYMBOL_GPL(bpf_prog_get);
679 
680 /* last field in 'union bpf_attr' used by this command */
681 #define	BPF_PROG_LOAD_LAST_FIELD kern_version
682 
683 static int bpf_prog_load(union bpf_attr *attr)
684 {
685 	enum bpf_prog_type type = attr->prog_type;
686 	struct bpf_prog *prog;
687 	int err;
688 	char license[128];
689 	bool is_gpl;
690 
691 	if (CHECK_ATTR(BPF_PROG_LOAD))
692 		return -EINVAL;
693 
694 	/* copy eBPF program license from user space */
695 	if (strncpy_from_user(license, u64_to_ptr(attr->license),
696 			      sizeof(license) - 1) < 0)
697 		return -EFAULT;
698 	license[sizeof(license) - 1] = 0;
699 
700 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
701 	is_gpl = license_is_gpl_compatible(license);
702 
703 	if (attr->insn_cnt >= BPF_MAXINSNS)
704 		return -EINVAL;
705 
706 	if (type == BPF_PROG_TYPE_KPROBE &&
707 	    attr->kern_version != LINUX_VERSION_CODE)
708 		return -EINVAL;
709 
710 	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
711 		return -EPERM;
712 
713 	/* plain bpf_prog allocation */
714 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
715 	if (!prog)
716 		return -ENOMEM;
717 
718 	err = bpf_prog_charge_memlock(prog);
719 	if (err)
720 		goto free_prog_nouncharge;
721 
722 	prog->len = attr->insn_cnt;
723 
724 	err = -EFAULT;
725 	if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
726 			   prog->len * sizeof(struct bpf_insn)) != 0)
727 		goto free_prog;
728 
729 	prog->orig_prog = NULL;
730 	prog->jited = 0;
731 
732 	atomic_set(&prog->aux->refcnt, 1);
733 	prog->gpl_compatible = is_gpl ? 1 : 0;
734 
735 	/* find program type: socket_filter vs tracing_filter */
736 	err = find_prog_type(type, prog);
737 	if (err < 0)
738 		goto free_prog;
739 
740 	/* run eBPF verifier */
741 	err = bpf_check(&prog, attr);
742 	if (err < 0)
743 		goto free_used_maps;
744 
745 	/* fixup BPF_CALL->imm field */
746 	fixup_bpf_calls(prog);
747 
748 	/* eBPF program is ready to be JITed */
749 	err = bpf_prog_select_runtime(prog);
750 	if (err < 0)
751 		goto free_used_maps;
752 
753 	err = bpf_prog_new_fd(prog);
754 	if (err < 0)
755 		/* failed to allocate fd */
756 		goto free_used_maps;
757 
758 	return err;
759 
760 free_used_maps:
761 	free_used_maps(prog->aux);
762 free_prog:
763 	bpf_prog_uncharge_memlock(prog);
764 free_prog_nouncharge:
765 	bpf_prog_free(prog);
766 	return err;
767 }
768 
769 #define BPF_OBJ_LAST_FIELD bpf_fd
770 
771 static int bpf_obj_pin(const union bpf_attr *attr)
772 {
773 	if (CHECK_ATTR(BPF_OBJ))
774 		return -EINVAL;
775 
776 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
777 }
778 
779 static int bpf_obj_get(const union bpf_attr *attr)
780 {
781 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
782 		return -EINVAL;
783 
784 	return bpf_obj_get_user(u64_to_ptr(attr->pathname));
785 }
786 
787 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
788 {
789 	union bpf_attr attr = {};
790 	int err;
791 
792 	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
793 		return -EPERM;
794 
795 	if (!access_ok(VERIFY_READ, uattr, 1))
796 		return -EFAULT;
797 
798 	if (size > PAGE_SIZE)	/* silly large */
799 		return -E2BIG;
800 
801 	/* If we're handed a bigger struct than we know of,
802 	 * ensure all the unknown bits are 0 - i.e. new
803 	 * user-space does not rely on any kernel feature
804 	 * extensions we dont know about yet.
805 	 */
806 	if (size > sizeof(attr)) {
807 		unsigned char __user *addr;
808 		unsigned char __user *end;
809 		unsigned char val;
810 
811 		addr = (void __user *)uattr + sizeof(attr);
812 		end  = (void __user *)uattr + size;
813 
814 		for (; addr < end; addr++) {
815 			err = get_user(val, addr);
816 			if (err)
817 				return err;
818 			if (val)
819 				return -E2BIG;
820 		}
821 		size = sizeof(attr);
822 	}
823 
824 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
825 	if (copy_from_user(&attr, uattr, size) != 0)
826 		return -EFAULT;
827 
828 	switch (cmd) {
829 	case BPF_MAP_CREATE:
830 		err = map_create(&attr);
831 		break;
832 	case BPF_MAP_LOOKUP_ELEM:
833 		err = map_lookup_elem(&attr);
834 		break;
835 	case BPF_MAP_UPDATE_ELEM:
836 		err = map_update_elem(&attr);
837 		break;
838 	case BPF_MAP_DELETE_ELEM:
839 		err = map_delete_elem(&attr);
840 		break;
841 	case BPF_MAP_GET_NEXT_KEY:
842 		err = map_get_next_key(&attr);
843 		break;
844 	case BPF_PROG_LOAD:
845 		err = bpf_prog_load(&attr);
846 		break;
847 	case BPF_OBJ_PIN:
848 		err = bpf_obj_pin(&attr);
849 		break;
850 	case BPF_OBJ_GET:
851 		err = bpf_obj_get(&attr);
852 		break;
853 	default:
854 		err = -EINVAL;
855 		break;
856 	}
857 
858 	return err;
859 }
860