xref: /openbmc/linux/kernel/bpf/syscall.c (revision a8fe58ce)
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20 
21 int sysctl_unprivileged_bpf_disabled __read_mostly;
22 
23 static LIST_HEAD(bpf_map_types);
24 
25 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
26 {
27 	struct bpf_map_type_list *tl;
28 	struct bpf_map *map;
29 
30 	list_for_each_entry(tl, &bpf_map_types, list_node) {
31 		if (tl->type == attr->map_type) {
32 			map = tl->ops->map_alloc(attr);
33 			if (IS_ERR(map))
34 				return map;
35 			map->ops = tl->ops;
36 			map->map_type = attr->map_type;
37 			return map;
38 		}
39 	}
40 	return ERR_PTR(-EINVAL);
41 }
42 
43 /* boot time registration of different map implementations */
44 void bpf_register_map_type(struct bpf_map_type_list *tl)
45 {
46 	list_add(&tl->list_node, &bpf_map_types);
47 }
48 
49 static int bpf_map_charge_memlock(struct bpf_map *map)
50 {
51 	struct user_struct *user = get_current_user();
52 	unsigned long memlock_limit;
53 
54 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
55 
56 	atomic_long_add(map->pages, &user->locked_vm);
57 
58 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
59 		atomic_long_sub(map->pages, &user->locked_vm);
60 		free_uid(user);
61 		return -EPERM;
62 	}
63 	map->user = user;
64 	return 0;
65 }
66 
67 static void bpf_map_uncharge_memlock(struct bpf_map *map)
68 {
69 	struct user_struct *user = map->user;
70 
71 	atomic_long_sub(map->pages, &user->locked_vm);
72 	free_uid(user);
73 }
74 
75 /* called from workqueue */
76 static void bpf_map_free_deferred(struct work_struct *work)
77 {
78 	struct bpf_map *map = container_of(work, struct bpf_map, work);
79 
80 	bpf_map_uncharge_memlock(map);
81 	/* implementation dependent freeing */
82 	map->ops->map_free(map);
83 }
84 
85 static void bpf_map_put_uref(struct bpf_map *map)
86 {
87 	if (atomic_dec_and_test(&map->usercnt)) {
88 		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
89 			bpf_fd_array_map_clear(map);
90 	}
91 }
92 
93 /* decrement map refcnt and schedule it for freeing via workqueue
94  * (unrelying map implementation ops->map_free() might sleep)
95  */
96 void bpf_map_put(struct bpf_map *map)
97 {
98 	if (atomic_dec_and_test(&map->refcnt)) {
99 		INIT_WORK(&map->work, bpf_map_free_deferred);
100 		schedule_work(&map->work);
101 	}
102 }
103 
104 void bpf_map_put_with_uref(struct bpf_map *map)
105 {
106 	bpf_map_put_uref(map);
107 	bpf_map_put(map);
108 }
109 
110 static int bpf_map_release(struct inode *inode, struct file *filp)
111 {
112 	bpf_map_put_with_uref(filp->private_data);
113 	return 0;
114 }
115 
116 #ifdef CONFIG_PROC_FS
117 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
118 {
119 	const struct bpf_map *map = filp->private_data;
120 
121 	seq_printf(m,
122 		   "map_type:\t%u\n"
123 		   "key_size:\t%u\n"
124 		   "value_size:\t%u\n"
125 		   "max_entries:\t%u\n",
126 		   map->map_type,
127 		   map->key_size,
128 		   map->value_size,
129 		   map->max_entries);
130 }
131 #endif
132 
133 static const struct file_operations bpf_map_fops = {
134 #ifdef CONFIG_PROC_FS
135 	.show_fdinfo	= bpf_map_show_fdinfo,
136 #endif
137 	.release	= bpf_map_release,
138 };
139 
140 int bpf_map_new_fd(struct bpf_map *map)
141 {
142 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
143 				O_RDWR | O_CLOEXEC);
144 }
145 
146 /* helper macro to check that unused fields 'union bpf_attr' are zero */
147 #define CHECK_ATTR(CMD) \
148 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
149 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
150 		   sizeof(*attr) - \
151 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
152 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
153 
154 #define BPF_MAP_CREATE_LAST_FIELD max_entries
155 /* called via syscall */
156 static int map_create(union bpf_attr *attr)
157 {
158 	struct bpf_map *map;
159 	int err;
160 
161 	err = CHECK_ATTR(BPF_MAP_CREATE);
162 	if (err)
163 		return -EINVAL;
164 
165 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
166 	map = find_and_alloc_map(attr);
167 	if (IS_ERR(map))
168 		return PTR_ERR(map);
169 
170 	atomic_set(&map->refcnt, 1);
171 	atomic_set(&map->usercnt, 1);
172 
173 	err = bpf_map_charge_memlock(map);
174 	if (err)
175 		goto free_map;
176 
177 	err = bpf_map_new_fd(map);
178 	if (err < 0)
179 		/* failed to allocate fd */
180 		goto free_map;
181 
182 	return err;
183 
184 free_map:
185 	map->ops->map_free(map);
186 	return err;
187 }
188 
189 /* if error is returned, fd is released.
190  * On success caller should complete fd access with matching fdput()
191  */
192 struct bpf_map *__bpf_map_get(struct fd f)
193 {
194 	if (!f.file)
195 		return ERR_PTR(-EBADF);
196 	if (f.file->f_op != &bpf_map_fops) {
197 		fdput(f);
198 		return ERR_PTR(-EINVAL);
199 	}
200 
201 	return f.file->private_data;
202 }
203 
204 void bpf_map_inc(struct bpf_map *map, bool uref)
205 {
206 	atomic_inc(&map->refcnt);
207 	if (uref)
208 		atomic_inc(&map->usercnt);
209 }
210 
211 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
212 {
213 	struct fd f = fdget(ufd);
214 	struct bpf_map *map;
215 
216 	map = __bpf_map_get(f);
217 	if (IS_ERR(map))
218 		return map;
219 
220 	bpf_map_inc(map, true);
221 	fdput(f);
222 
223 	return map;
224 }
225 
226 /* helper to convert user pointers passed inside __aligned_u64 fields */
227 static void __user *u64_to_ptr(__u64 val)
228 {
229 	return (void __user *) (unsigned long) val;
230 }
231 
232 /* last field in 'union bpf_attr' used by this command */
233 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
234 
235 static int map_lookup_elem(union bpf_attr *attr)
236 {
237 	void __user *ukey = u64_to_ptr(attr->key);
238 	void __user *uvalue = u64_to_ptr(attr->value);
239 	int ufd = attr->map_fd;
240 	struct bpf_map *map;
241 	void *key, *value, *ptr;
242 	struct fd f;
243 	int err;
244 
245 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
246 		return -EINVAL;
247 
248 	f = fdget(ufd);
249 	map = __bpf_map_get(f);
250 	if (IS_ERR(map))
251 		return PTR_ERR(map);
252 
253 	err = -ENOMEM;
254 	key = kmalloc(map->key_size, GFP_USER);
255 	if (!key)
256 		goto err_put;
257 
258 	err = -EFAULT;
259 	if (copy_from_user(key, ukey, map->key_size) != 0)
260 		goto free_key;
261 
262 	err = -ENOMEM;
263 	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
264 	if (!value)
265 		goto free_key;
266 
267 	rcu_read_lock();
268 	ptr = map->ops->map_lookup_elem(map, key);
269 	if (ptr)
270 		memcpy(value, ptr, map->value_size);
271 	rcu_read_unlock();
272 
273 	err = -ENOENT;
274 	if (!ptr)
275 		goto free_value;
276 
277 	err = -EFAULT;
278 	if (copy_to_user(uvalue, value, map->value_size) != 0)
279 		goto free_value;
280 
281 	err = 0;
282 
283 free_value:
284 	kfree(value);
285 free_key:
286 	kfree(key);
287 err_put:
288 	fdput(f);
289 	return err;
290 }
291 
292 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
293 
294 static int map_update_elem(union bpf_attr *attr)
295 {
296 	void __user *ukey = u64_to_ptr(attr->key);
297 	void __user *uvalue = u64_to_ptr(attr->value);
298 	int ufd = attr->map_fd;
299 	struct bpf_map *map;
300 	void *key, *value;
301 	struct fd f;
302 	int err;
303 
304 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
305 		return -EINVAL;
306 
307 	f = fdget(ufd);
308 	map = __bpf_map_get(f);
309 	if (IS_ERR(map))
310 		return PTR_ERR(map);
311 
312 	err = -ENOMEM;
313 	key = kmalloc(map->key_size, GFP_USER);
314 	if (!key)
315 		goto err_put;
316 
317 	err = -EFAULT;
318 	if (copy_from_user(key, ukey, map->key_size) != 0)
319 		goto free_key;
320 
321 	err = -ENOMEM;
322 	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
323 	if (!value)
324 		goto free_key;
325 
326 	err = -EFAULT;
327 	if (copy_from_user(value, uvalue, map->value_size) != 0)
328 		goto free_value;
329 
330 	/* eBPF program that use maps are running under rcu_read_lock(),
331 	 * therefore all map accessors rely on this fact, so do the same here
332 	 */
333 	rcu_read_lock();
334 	err = map->ops->map_update_elem(map, key, value, attr->flags);
335 	rcu_read_unlock();
336 
337 free_value:
338 	kfree(value);
339 free_key:
340 	kfree(key);
341 err_put:
342 	fdput(f);
343 	return err;
344 }
345 
346 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
347 
348 static int map_delete_elem(union bpf_attr *attr)
349 {
350 	void __user *ukey = u64_to_ptr(attr->key);
351 	int ufd = attr->map_fd;
352 	struct bpf_map *map;
353 	struct fd f;
354 	void *key;
355 	int err;
356 
357 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
358 		return -EINVAL;
359 
360 	f = fdget(ufd);
361 	map = __bpf_map_get(f);
362 	if (IS_ERR(map))
363 		return PTR_ERR(map);
364 
365 	err = -ENOMEM;
366 	key = kmalloc(map->key_size, GFP_USER);
367 	if (!key)
368 		goto err_put;
369 
370 	err = -EFAULT;
371 	if (copy_from_user(key, ukey, map->key_size) != 0)
372 		goto free_key;
373 
374 	rcu_read_lock();
375 	err = map->ops->map_delete_elem(map, key);
376 	rcu_read_unlock();
377 
378 free_key:
379 	kfree(key);
380 err_put:
381 	fdput(f);
382 	return err;
383 }
384 
385 /* last field in 'union bpf_attr' used by this command */
386 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
387 
388 static int map_get_next_key(union bpf_attr *attr)
389 {
390 	void __user *ukey = u64_to_ptr(attr->key);
391 	void __user *unext_key = u64_to_ptr(attr->next_key);
392 	int ufd = attr->map_fd;
393 	struct bpf_map *map;
394 	void *key, *next_key;
395 	struct fd f;
396 	int err;
397 
398 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
399 		return -EINVAL;
400 
401 	f = fdget(ufd);
402 	map = __bpf_map_get(f);
403 	if (IS_ERR(map))
404 		return PTR_ERR(map);
405 
406 	err = -ENOMEM;
407 	key = kmalloc(map->key_size, GFP_USER);
408 	if (!key)
409 		goto err_put;
410 
411 	err = -EFAULT;
412 	if (copy_from_user(key, ukey, map->key_size) != 0)
413 		goto free_key;
414 
415 	err = -ENOMEM;
416 	next_key = kmalloc(map->key_size, GFP_USER);
417 	if (!next_key)
418 		goto free_key;
419 
420 	rcu_read_lock();
421 	err = map->ops->map_get_next_key(map, key, next_key);
422 	rcu_read_unlock();
423 	if (err)
424 		goto free_next_key;
425 
426 	err = -EFAULT;
427 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
428 		goto free_next_key;
429 
430 	err = 0;
431 
432 free_next_key:
433 	kfree(next_key);
434 free_key:
435 	kfree(key);
436 err_put:
437 	fdput(f);
438 	return err;
439 }
440 
441 static LIST_HEAD(bpf_prog_types);
442 
443 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
444 {
445 	struct bpf_prog_type_list *tl;
446 
447 	list_for_each_entry(tl, &bpf_prog_types, list_node) {
448 		if (tl->type == type) {
449 			prog->aux->ops = tl->ops;
450 			prog->type = type;
451 			return 0;
452 		}
453 	}
454 
455 	return -EINVAL;
456 }
457 
458 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
459 {
460 	list_add(&tl->list_node, &bpf_prog_types);
461 }
462 
463 /* fixup insn->imm field of bpf_call instructions:
464  * if (insn->imm == BPF_FUNC_map_lookup_elem)
465  *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
466  * else if (insn->imm == BPF_FUNC_map_update_elem)
467  *      insn->imm = bpf_map_update_elem - __bpf_call_base;
468  * else ...
469  *
470  * this function is called after eBPF program passed verification
471  */
472 static void fixup_bpf_calls(struct bpf_prog *prog)
473 {
474 	const struct bpf_func_proto *fn;
475 	int i;
476 
477 	for (i = 0; i < prog->len; i++) {
478 		struct bpf_insn *insn = &prog->insnsi[i];
479 
480 		if (insn->code == (BPF_JMP | BPF_CALL)) {
481 			/* we reach here when program has bpf_call instructions
482 			 * and it passed bpf_check(), means that
483 			 * ops->get_func_proto must have been supplied, check it
484 			 */
485 			BUG_ON(!prog->aux->ops->get_func_proto);
486 
487 			if (insn->imm == BPF_FUNC_get_route_realm)
488 				prog->dst_needed = 1;
489 			if (insn->imm == BPF_FUNC_get_prandom_u32)
490 				bpf_user_rnd_init_once();
491 			if (insn->imm == BPF_FUNC_tail_call) {
492 				/* mark bpf_tail_call as different opcode
493 				 * to avoid conditional branch in
494 				 * interpeter for every normal call
495 				 * and to prevent accidental JITing by
496 				 * JIT compiler that doesn't support
497 				 * bpf_tail_call yet
498 				 */
499 				insn->imm = 0;
500 				insn->code |= BPF_X;
501 				continue;
502 			}
503 
504 			fn = prog->aux->ops->get_func_proto(insn->imm);
505 			/* all functions that have prototype and verifier allowed
506 			 * programs to call them, must be real in-kernel functions
507 			 */
508 			BUG_ON(!fn->func);
509 			insn->imm = fn->func - __bpf_call_base;
510 		}
511 	}
512 }
513 
514 /* drop refcnt on maps used by eBPF program and free auxilary data */
515 static void free_used_maps(struct bpf_prog_aux *aux)
516 {
517 	int i;
518 
519 	for (i = 0; i < aux->used_map_cnt; i++)
520 		bpf_map_put(aux->used_maps[i]);
521 
522 	kfree(aux->used_maps);
523 }
524 
525 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
526 {
527 	struct user_struct *user = get_current_user();
528 	unsigned long memlock_limit;
529 
530 	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
531 
532 	atomic_long_add(prog->pages, &user->locked_vm);
533 	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
534 		atomic_long_sub(prog->pages, &user->locked_vm);
535 		free_uid(user);
536 		return -EPERM;
537 	}
538 	prog->aux->user = user;
539 	return 0;
540 }
541 
542 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
543 {
544 	struct user_struct *user = prog->aux->user;
545 
546 	atomic_long_sub(prog->pages, &user->locked_vm);
547 	free_uid(user);
548 }
549 
550 static void __prog_put_common(struct rcu_head *rcu)
551 {
552 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
553 
554 	free_used_maps(aux);
555 	bpf_prog_uncharge_memlock(aux->prog);
556 	bpf_prog_free(aux->prog);
557 }
558 
559 /* version of bpf_prog_put() that is called after a grace period */
560 void bpf_prog_put_rcu(struct bpf_prog *prog)
561 {
562 	if (atomic_dec_and_test(&prog->aux->refcnt))
563 		call_rcu(&prog->aux->rcu, __prog_put_common);
564 }
565 
566 void bpf_prog_put(struct bpf_prog *prog)
567 {
568 	if (atomic_dec_and_test(&prog->aux->refcnt))
569 		__prog_put_common(&prog->aux->rcu);
570 }
571 EXPORT_SYMBOL_GPL(bpf_prog_put);
572 
573 static int bpf_prog_release(struct inode *inode, struct file *filp)
574 {
575 	struct bpf_prog *prog = filp->private_data;
576 
577 	bpf_prog_put_rcu(prog);
578 	return 0;
579 }
580 
581 static const struct file_operations bpf_prog_fops = {
582         .release = bpf_prog_release,
583 };
584 
585 int bpf_prog_new_fd(struct bpf_prog *prog)
586 {
587 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
588 				O_RDWR | O_CLOEXEC);
589 }
590 
591 static struct bpf_prog *__bpf_prog_get(struct fd f)
592 {
593 	if (!f.file)
594 		return ERR_PTR(-EBADF);
595 	if (f.file->f_op != &bpf_prog_fops) {
596 		fdput(f);
597 		return ERR_PTR(-EINVAL);
598 	}
599 
600 	return f.file->private_data;
601 }
602 
603 /* called by sockets/tracing/seccomp before attaching program to an event
604  * pairs with bpf_prog_put()
605  */
606 struct bpf_prog *bpf_prog_get(u32 ufd)
607 {
608 	struct fd f = fdget(ufd);
609 	struct bpf_prog *prog;
610 
611 	prog = __bpf_prog_get(f);
612 	if (IS_ERR(prog))
613 		return prog;
614 
615 	atomic_inc(&prog->aux->refcnt);
616 	fdput(f);
617 
618 	return prog;
619 }
620 EXPORT_SYMBOL_GPL(bpf_prog_get);
621 
622 /* last field in 'union bpf_attr' used by this command */
623 #define	BPF_PROG_LOAD_LAST_FIELD kern_version
624 
625 static int bpf_prog_load(union bpf_attr *attr)
626 {
627 	enum bpf_prog_type type = attr->prog_type;
628 	struct bpf_prog *prog;
629 	int err;
630 	char license[128];
631 	bool is_gpl;
632 
633 	if (CHECK_ATTR(BPF_PROG_LOAD))
634 		return -EINVAL;
635 
636 	/* copy eBPF program license from user space */
637 	if (strncpy_from_user(license, u64_to_ptr(attr->license),
638 			      sizeof(license) - 1) < 0)
639 		return -EFAULT;
640 	license[sizeof(license) - 1] = 0;
641 
642 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
643 	is_gpl = license_is_gpl_compatible(license);
644 
645 	if (attr->insn_cnt >= BPF_MAXINSNS)
646 		return -EINVAL;
647 
648 	if (type == BPF_PROG_TYPE_KPROBE &&
649 	    attr->kern_version != LINUX_VERSION_CODE)
650 		return -EINVAL;
651 
652 	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
653 		return -EPERM;
654 
655 	/* plain bpf_prog allocation */
656 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
657 	if (!prog)
658 		return -ENOMEM;
659 
660 	err = bpf_prog_charge_memlock(prog);
661 	if (err)
662 		goto free_prog_nouncharge;
663 
664 	prog->len = attr->insn_cnt;
665 
666 	err = -EFAULT;
667 	if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
668 			   prog->len * sizeof(struct bpf_insn)) != 0)
669 		goto free_prog;
670 
671 	prog->orig_prog = NULL;
672 	prog->jited = 0;
673 
674 	atomic_set(&prog->aux->refcnt, 1);
675 	prog->gpl_compatible = is_gpl ? 1 : 0;
676 
677 	/* find program type: socket_filter vs tracing_filter */
678 	err = find_prog_type(type, prog);
679 	if (err < 0)
680 		goto free_prog;
681 
682 	/* run eBPF verifier */
683 	err = bpf_check(&prog, attr);
684 	if (err < 0)
685 		goto free_used_maps;
686 
687 	/* fixup BPF_CALL->imm field */
688 	fixup_bpf_calls(prog);
689 
690 	/* eBPF program is ready to be JITed */
691 	err = bpf_prog_select_runtime(prog);
692 	if (err < 0)
693 		goto free_used_maps;
694 
695 	err = bpf_prog_new_fd(prog);
696 	if (err < 0)
697 		/* failed to allocate fd */
698 		goto free_used_maps;
699 
700 	return err;
701 
702 free_used_maps:
703 	free_used_maps(prog->aux);
704 free_prog:
705 	bpf_prog_uncharge_memlock(prog);
706 free_prog_nouncharge:
707 	bpf_prog_free(prog);
708 	return err;
709 }
710 
711 #define BPF_OBJ_LAST_FIELD bpf_fd
712 
713 static int bpf_obj_pin(const union bpf_attr *attr)
714 {
715 	if (CHECK_ATTR(BPF_OBJ))
716 		return -EINVAL;
717 
718 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
719 }
720 
721 static int bpf_obj_get(const union bpf_attr *attr)
722 {
723 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
724 		return -EINVAL;
725 
726 	return bpf_obj_get_user(u64_to_ptr(attr->pathname));
727 }
728 
729 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
730 {
731 	union bpf_attr attr = {};
732 	int err;
733 
734 	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
735 		return -EPERM;
736 
737 	if (!access_ok(VERIFY_READ, uattr, 1))
738 		return -EFAULT;
739 
740 	if (size > PAGE_SIZE)	/* silly large */
741 		return -E2BIG;
742 
743 	/* If we're handed a bigger struct than we know of,
744 	 * ensure all the unknown bits are 0 - i.e. new
745 	 * user-space does not rely on any kernel feature
746 	 * extensions we dont know about yet.
747 	 */
748 	if (size > sizeof(attr)) {
749 		unsigned char __user *addr;
750 		unsigned char __user *end;
751 		unsigned char val;
752 
753 		addr = (void __user *)uattr + sizeof(attr);
754 		end  = (void __user *)uattr + size;
755 
756 		for (; addr < end; addr++) {
757 			err = get_user(val, addr);
758 			if (err)
759 				return err;
760 			if (val)
761 				return -E2BIG;
762 		}
763 		size = sizeof(attr);
764 	}
765 
766 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
767 	if (copy_from_user(&attr, uattr, size) != 0)
768 		return -EFAULT;
769 
770 	switch (cmd) {
771 	case BPF_MAP_CREATE:
772 		err = map_create(&attr);
773 		break;
774 	case BPF_MAP_LOOKUP_ELEM:
775 		err = map_lookup_elem(&attr);
776 		break;
777 	case BPF_MAP_UPDATE_ELEM:
778 		err = map_update_elem(&attr);
779 		break;
780 	case BPF_MAP_DELETE_ELEM:
781 		err = map_delete_elem(&attr);
782 		break;
783 	case BPF_MAP_GET_NEXT_KEY:
784 		err = map_get_next_key(&attr);
785 		break;
786 	case BPF_PROG_LOAD:
787 		err = bpf_prog_load(&attr);
788 		break;
789 	case BPF_OBJ_PIN:
790 		err = bpf_obj_pin(&attr);
791 		break;
792 	case BPF_OBJ_GET:
793 		err = bpf_obj_get(&attr);
794 		break;
795 	default:
796 		err = -EINVAL;
797 		break;
798 	}
799 
800 	return err;
801 }
802