1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf_trace.h> 6 #include <linux/bpf_lirc.h> 7 #include <linux/btf.h> 8 #include <linux/syscalls.h> 9 #include <linux/slab.h> 10 #include <linux/sched/signal.h> 11 #include <linux/vmalloc.h> 12 #include <linux/mmzone.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/fdtable.h> 15 #include <linux/file.h> 16 #include <linux/fs.h> 17 #include <linux/license.h> 18 #include <linux/filter.h> 19 #include <linux/version.h> 20 #include <linux/kernel.h> 21 #include <linux/idr.h> 22 #include <linux/cred.h> 23 #include <linux/timekeeping.h> 24 #include <linux/ctype.h> 25 #include <linux/nospec.h> 26 #include <linux/audit.h> 27 #include <uapi/linux/btf.h> 28 29 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 30 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 31 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 32 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 33 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 34 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 35 IS_FD_HASH(map)) 36 37 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 38 39 DEFINE_PER_CPU(int, bpf_prog_active); 40 static DEFINE_IDR(prog_idr); 41 static DEFINE_SPINLOCK(prog_idr_lock); 42 static DEFINE_IDR(map_idr); 43 static DEFINE_SPINLOCK(map_idr_lock); 44 45 int sysctl_unprivileged_bpf_disabled __read_mostly; 46 47 static const struct bpf_map_ops * const bpf_map_types[] = { 48 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 49 #define BPF_MAP_TYPE(_id, _ops) \ 50 [_id] = &_ops, 51 #include <linux/bpf_types.h> 52 #undef BPF_PROG_TYPE 53 #undef BPF_MAP_TYPE 54 }; 55 56 /* 57 * If we're handed a bigger struct than we know of, ensure all the unknown bits 58 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 59 * we don't know about yet. 60 * 61 * There is a ToCToU between this function call and the following 62 * copy_from_user() call. However, this is not a concern since this function is 63 * meant to be a future-proofing of bits. 64 */ 65 int bpf_check_uarg_tail_zero(void __user *uaddr, 66 size_t expected_size, 67 size_t actual_size) 68 { 69 unsigned char __user *addr; 70 unsigned char __user *end; 71 unsigned char val; 72 int err; 73 74 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 75 return -E2BIG; 76 77 if (unlikely(!access_ok(uaddr, actual_size))) 78 return -EFAULT; 79 80 if (actual_size <= expected_size) 81 return 0; 82 83 addr = uaddr + expected_size; 84 end = uaddr + actual_size; 85 86 for (; addr < end; addr++) { 87 err = get_user(val, addr); 88 if (err) 89 return err; 90 if (val) 91 return -E2BIG; 92 } 93 94 return 0; 95 } 96 97 const struct bpf_map_ops bpf_map_offload_ops = { 98 .map_alloc = bpf_map_offload_map_alloc, 99 .map_free = bpf_map_offload_map_free, 100 .map_check_btf = map_check_no_btf, 101 }; 102 103 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 104 { 105 const struct bpf_map_ops *ops; 106 u32 type = attr->map_type; 107 struct bpf_map *map; 108 int err; 109 110 if (type >= ARRAY_SIZE(bpf_map_types)) 111 return ERR_PTR(-EINVAL); 112 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 113 ops = bpf_map_types[type]; 114 if (!ops) 115 return ERR_PTR(-EINVAL); 116 117 if (ops->map_alloc_check) { 118 err = ops->map_alloc_check(attr); 119 if (err) 120 return ERR_PTR(err); 121 } 122 if (attr->map_ifindex) 123 ops = &bpf_map_offload_ops; 124 map = ops->map_alloc(attr); 125 if (IS_ERR(map)) 126 return map; 127 map->ops = ops; 128 map->map_type = type; 129 return map; 130 } 131 132 static u32 bpf_map_value_size(struct bpf_map *map) 133 { 134 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 135 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 136 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 137 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 138 return round_up(map->value_size, 8) * num_possible_cpus(); 139 else if (IS_FD_MAP(map)) 140 return sizeof(u32); 141 else 142 return map->value_size; 143 } 144 145 static void maybe_wait_bpf_programs(struct bpf_map *map) 146 { 147 /* Wait for any running BPF programs to complete so that 148 * userspace, when we return to it, knows that all programs 149 * that could be running use the new map value. 150 */ 151 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 152 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 153 synchronize_rcu(); 154 } 155 156 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, 157 void *value, __u64 flags) 158 { 159 int err; 160 161 /* Need to create a kthread, thus must support schedule */ 162 if (bpf_map_is_dev_bound(map)) { 163 return bpf_map_offload_update_elem(map, key, value, flags); 164 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 165 map->map_type == BPF_MAP_TYPE_SOCKHASH || 166 map->map_type == BPF_MAP_TYPE_SOCKMAP || 167 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 168 return map->ops->map_update_elem(map, key, value, flags); 169 } else if (IS_FD_PROG_ARRAY(map)) { 170 return bpf_fd_array_map_update_elem(map, f.file, key, value, 171 flags); 172 } 173 174 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from 175 * inside bpf map update or delete otherwise deadlocks are possible 176 */ 177 preempt_disable(); 178 __this_cpu_inc(bpf_prog_active); 179 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 180 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 181 err = bpf_percpu_hash_update(map, key, value, flags); 182 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 183 err = bpf_percpu_array_update(map, key, value, flags); 184 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 185 err = bpf_percpu_cgroup_storage_update(map, key, value, 186 flags); 187 } else if (IS_FD_ARRAY(map)) { 188 rcu_read_lock(); 189 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 190 flags); 191 rcu_read_unlock(); 192 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 193 rcu_read_lock(); 194 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 195 flags); 196 rcu_read_unlock(); 197 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 198 /* rcu_read_lock() is not needed */ 199 err = bpf_fd_reuseport_array_update_elem(map, key, value, 200 flags); 201 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 202 map->map_type == BPF_MAP_TYPE_STACK) { 203 err = map->ops->map_push_elem(map, value, flags); 204 } else { 205 rcu_read_lock(); 206 err = map->ops->map_update_elem(map, key, value, flags); 207 rcu_read_unlock(); 208 } 209 __this_cpu_dec(bpf_prog_active); 210 preempt_enable(); 211 maybe_wait_bpf_programs(map); 212 213 return err; 214 } 215 216 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 217 __u64 flags) 218 { 219 void *ptr; 220 int err; 221 222 if (bpf_map_is_dev_bound(map)) 223 return bpf_map_offload_lookup_elem(map, key, value); 224 225 preempt_disable(); 226 this_cpu_inc(bpf_prog_active); 227 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 228 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 229 err = bpf_percpu_hash_copy(map, key, value); 230 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 231 err = bpf_percpu_array_copy(map, key, value); 232 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 233 err = bpf_percpu_cgroup_storage_copy(map, key, value); 234 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 235 err = bpf_stackmap_copy(map, key, value); 236 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 237 err = bpf_fd_array_map_lookup_elem(map, key, value); 238 } else if (IS_FD_HASH(map)) { 239 err = bpf_fd_htab_map_lookup_elem(map, key, value); 240 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 241 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 242 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 243 map->map_type == BPF_MAP_TYPE_STACK) { 244 err = map->ops->map_peek_elem(map, value); 245 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 246 /* struct_ops map requires directly updating "value" */ 247 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 248 } else { 249 rcu_read_lock(); 250 if (map->ops->map_lookup_elem_sys_only) 251 ptr = map->ops->map_lookup_elem_sys_only(map, key); 252 else 253 ptr = map->ops->map_lookup_elem(map, key); 254 if (IS_ERR(ptr)) { 255 err = PTR_ERR(ptr); 256 } else if (!ptr) { 257 err = -ENOENT; 258 } else { 259 err = 0; 260 if (flags & BPF_F_LOCK) 261 /* lock 'ptr' and copy everything but lock */ 262 copy_map_value_locked(map, value, ptr, true); 263 else 264 copy_map_value(map, value, ptr); 265 /* mask lock, since value wasn't zero inited */ 266 check_and_init_map_lock(map, value); 267 } 268 rcu_read_unlock(); 269 } 270 271 this_cpu_dec(bpf_prog_active); 272 preempt_enable(); 273 maybe_wait_bpf_programs(map); 274 275 return err; 276 } 277 278 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 279 { 280 /* We really just want to fail instead of triggering OOM killer 281 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 282 * which is used for lower order allocation requests. 283 * 284 * It has been observed that higher order allocation requests done by 285 * vmalloc with __GFP_NORETRY being set might fail due to not trying 286 * to reclaim memory from the page cache, thus we set 287 * __GFP_RETRY_MAYFAIL to avoid such situations. 288 */ 289 290 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; 291 void *area; 292 293 if (size >= SIZE_MAX) 294 return NULL; 295 296 /* kmalloc()'ed memory can't be mmap()'ed */ 297 if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 298 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, 299 numa_node); 300 if (area != NULL) 301 return area; 302 } 303 if (mmapable) { 304 BUG_ON(!PAGE_ALIGNED(size)); 305 return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL | 306 __GFP_RETRY_MAYFAIL | flags); 307 } 308 return __vmalloc_node_flags_caller(size, numa_node, 309 GFP_KERNEL | __GFP_RETRY_MAYFAIL | 310 flags, __builtin_return_address(0)); 311 } 312 313 void *bpf_map_area_alloc(u64 size, int numa_node) 314 { 315 return __bpf_map_area_alloc(size, numa_node, false); 316 } 317 318 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 319 { 320 return __bpf_map_area_alloc(size, numa_node, true); 321 } 322 323 void bpf_map_area_free(void *area) 324 { 325 kvfree(area); 326 } 327 328 static u32 bpf_map_flags_retain_permanent(u32 flags) 329 { 330 /* Some map creation flags are not tied to the map object but 331 * rather to the map fd instead, so they have no meaning upon 332 * map object inspection since multiple file descriptors with 333 * different (access) properties can exist here. Thus, given 334 * this has zero meaning for the map itself, lets clear these 335 * from here. 336 */ 337 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 338 } 339 340 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 341 { 342 map->map_type = attr->map_type; 343 map->key_size = attr->key_size; 344 map->value_size = attr->value_size; 345 map->max_entries = attr->max_entries; 346 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 347 map->numa_node = bpf_map_attr_numa_node(attr); 348 } 349 350 static int bpf_charge_memlock(struct user_struct *user, u32 pages) 351 { 352 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 353 354 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { 355 atomic_long_sub(pages, &user->locked_vm); 356 return -EPERM; 357 } 358 return 0; 359 } 360 361 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) 362 { 363 if (user) 364 atomic_long_sub(pages, &user->locked_vm); 365 } 366 367 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size) 368 { 369 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; 370 struct user_struct *user; 371 int ret; 372 373 if (size >= U32_MAX - PAGE_SIZE) 374 return -E2BIG; 375 376 user = get_current_user(); 377 ret = bpf_charge_memlock(user, pages); 378 if (ret) { 379 free_uid(user); 380 return ret; 381 } 382 383 mem->pages = pages; 384 mem->user = user; 385 386 return 0; 387 } 388 389 void bpf_map_charge_finish(struct bpf_map_memory *mem) 390 { 391 bpf_uncharge_memlock(mem->user, mem->pages); 392 free_uid(mem->user); 393 } 394 395 void bpf_map_charge_move(struct bpf_map_memory *dst, 396 struct bpf_map_memory *src) 397 { 398 *dst = *src; 399 400 /* Make sure src will not be used for the redundant uncharging. */ 401 memset(src, 0, sizeof(struct bpf_map_memory)); 402 } 403 404 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) 405 { 406 int ret; 407 408 ret = bpf_charge_memlock(map->memory.user, pages); 409 if (ret) 410 return ret; 411 map->memory.pages += pages; 412 return ret; 413 } 414 415 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 416 { 417 bpf_uncharge_memlock(map->memory.user, pages); 418 map->memory.pages -= pages; 419 } 420 421 static int bpf_map_alloc_id(struct bpf_map *map) 422 { 423 int id; 424 425 idr_preload(GFP_KERNEL); 426 spin_lock_bh(&map_idr_lock); 427 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 428 if (id > 0) 429 map->id = id; 430 spin_unlock_bh(&map_idr_lock); 431 idr_preload_end(); 432 433 if (WARN_ON_ONCE(!id)) 434 return -ENOSPC; 435 436 return id > 0 ? 0 : id; 437 } 438 439 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 440 { 441 unsigned long flags; 442 443 /* Offloaded maps are removed from the IDR store when their device 444 * disappears - even if someone holds an fd to them they are unusable, 445 * the memory is gone, all ops will fail; they are simply waiting for 446 * refcnt to drop to be freed. 447 */ 448 if (!map->id) 449 return; 450 451 if (do_idr_lock) 452 spin_lock_irqsave(&map_idr_lock, flags); 453 else 454 __acquire(&map_idr_lock); 455 456 idr_remove(&map_idr, map->id); 457 map->id = 0; 458 459 if (do_idr_lock) 460 spin_unlock_irqrestore(&map_idr_lock, flags); 461 else 462 __release(&map_idr_lock); 463 } 464 465 /* called from workqueue */ 466 static void bpf_map_free_deferred(struct work_struct *work) 467 { 468 struct bpf_map *map = container_of(work, struct bpf_map, work); 469 struct bpf_map_memory mem; 470 471 bpf_map_charge_move(&mem, &map->memory); 472 security_bpf_map_free(map); 473 /* implementation dependent freeing */ 474 map->ops->map_free(map); 475 bpf_map_charge_finish(&mem); 476 } 477 478 static void bpf_map_put_uref(struct bpf_map *map) 479 { 480 if (atomic64_dec_and_test(&map->usercnt)) { 481 if (map->ops->map_release_uref) 482 map->ops->map_release_uref(map); 483 } 484 } 485 486 /* decrement map refcnt and schedule it for freeing via workqueue 487 * (unrelying map implementation ops->map_free() might sleep) 488 */ 489 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 490 { 491 if (atomic64_dec_and_test(&map->refcnt)) { 492 /* bpf_map_free_id() must be called first */ 493 bpf_map_free_id(map, do_idr_lock); 494 btf_put(map->btf); 495 INIT_WORK(&map->work, bpf_map_free_deferred); 496 schedule_work(&map->work); 497 } 498 } 499 500 void bpf_map_put(struct bpf_map *map) 501 { 502 __bpf_map_put(map, true); 503 } 504 EXPORT_SYMBOL_GPL(bpf_map_put); 505 506 void bpf_map_put_with_uref(struct bpf_map *map) 507 { 508 bpf_map_put_uref(map); 509 bpf_map_put(map); 510 } 511 512 static int bpf_map_release(struct inode *inode, struct file *filp) 513 { 514 struct bpf_map *map = filp->private_data; 515 516 if (map->ops->map_release) 517 map->ops->map_release(map, filp); 518 519 bpf_map_put_with_uref(map); 520 return 0; 521 } 522 523 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 524 { 525 fmode_t mode = f.file->f_mode; 526 527 /* Our file permissions may have been overridden by global 528 * map permissions facing syscall side. 529 */ 530 if (READ_ONCE(map->frozen)) 531 mode &= ~FMODE_CAN_WRITE; 532 return mode; 533 } 534 535 #ifdef CONFIG_PROC_FS 536 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 537 { 538 const struct bpf_map *map = filp->private_data; 539 const struct bpf_array *array; 540 u32 type = 0, jited = 0; 541 542 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 543 array = container_of(map, struct bpf_array, map); 544 type = array->aux->type; 545 jited = array->aux->jited; 546 } 547 548 seq_printf(m, 549 "map_type:\t%u\n" 550 "key_size:\t%u\n" 551 "value_size:\t%u\n" 552 "max_entries:\t%u\n" 553 "map_flags:\t%#x\n" 554 "memlock:\t%llu\n" 555 "map_id:\t%u\n" 556 "frozen:\t%u\n", 557 map->map_type, 558 map->key_size, 559 map->value_size, 560 map->max_entries, 561 map->map_flags, 562 map->memory.pages * 1ULL << PAGE_SHIFT, 563 map->id, 564 READ_ONCE(map->frozen)); 565 if (type) { 566 seq_printf(m, "owner_prog_type:\t%u\n", type); 567 seq_printf(m, "owner_jited:\t%u\n", jited); 568 } 569 } 570 #endif 571 572 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 573 loff_t *ppos) 574 { 575 /* We need this handler such that alloc_file() enables 576 * f_mode with FMODE_CAN_READ. 577 */ 578 return -EINVAL; 579 } 580 581 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 582 size_t siz, loff_t *ppos) 583 { 584 /* We need this handler such that alloc_file() enables 585 * f_mode with FMODE_CAN_WRITE. 586 */ 587 return -EINVAL; 588 } 589 590 /* called for any extra memory-mapped regions (except initial) */ 591 static void bpf_map_mmap_open(struct vm_area_struct *vma) 592 { 593 struct bpf_map *map = vma->vm_file->private_data; 594 595 bpf_map_inc_with_uref(map); 596 597 if (vma->vm_flags & VM_WRITE) { 598 mutex_lock(&map->freeze_mutex); 599 map->writecnt++; 600 mutex_unlock(&map->freeze_mutex); 601 } 602 } 603 604 /* called for all unmapped memory region (including initial) */ 605 static void bpf_map_mmap_close(struct vm_area_struct *vma) 606 { 607 struct bpf_map *map = vma->vm_file->private_data; 608 609 if (vma->vm_flags & VM_WRITE) { 610 mutex_lock(&map->freeze_mutex); 611 map->writecnt--; 612 mutex_unlock(&map->freeze_mutex); 613 } 614 615 bpf_map_put_with_uref(map); 616 } 617 618 static const struct vm_operations_struct bpf_map_default_vmops = { 619 .open = bpf_map_mmap_open, 620 .close = bpf_map_mmap_close, 621 }; 622 623 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 624 { 625 struct bpf_map *map = filp->private_data; 626 int err; 627 628 if (!map->ops->map_mmap || map_value_has_spin_lock(map)) 629 return -ENOTSUPP; 630 631 if (!(vma->vm_flags & VM_SHARED)) 632 return -EINVAL; 633 634 mutex_lock(&map->freeze_mutex); 635 636 if ((vma->vm_flags & VM_WRITE) && map->frozen) { 637 err = -EPERM; 638 goto out; 639 } 640 641 /* set default open/close callbacks */ 642 vma->vm_ops = &bpf_map_default_vmops; 643 vma->vm_private_data = map; 644 645 err = map->ops->map_mmap(map, vma); 646 if (err) 647 goto out; 648 649 bpf_map_inc_with_uref(map); 650 651 if (vma->vm_flags & VM_WRITE) 652 map->writecnt++; 653 out: 654 mutex_unlock(&map->freeze_mutex); 655 return err; 656 } 657 658 const struct file_operations bpf_map_fops = { 659 #ifdef CONFIG_PROC_FS 660 .show_fdinfo = bpf_map_show_fdinfo, 661 #endif 662 .release = bpf_map_release, 663 .read = bpf_dummy_read, 664 .write = bpf_dummy_write, 665 .mmap = bpf_map_mmap, 666 }; 667 668 int bpf_map_new_fd(struct bpf_map *map, int flags) 669 { 670 int ret; 671 672 ret = security_bpf_map(map, OPEN_FMODE(flags)); 673 if (ret < 0) 674 return ret; 675 676 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 677 flags | O_CLOEXEC); 678 } 679 680 int bpf_get_file_flag(int flags) 681 { 682 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 683 return -EINVAL; 684 if (flags & BPF_F_RDONLY) 685 return O_RDONLY; 686 if (flags & BPF_F_WRONLY) 687 return O_WRONLY; 688 return O_RDWR; 689 } 690 691 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 692 #define CHECK_ATTR(CMD) \ 693 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 694 sizeof(attr->CMD##_LAST_FIELD), 0, \ 695 sizeof(*attr) - \ 696 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 697 sizeof(attr->CMD##_LAST_FIELD)) != NULL 698 699 /* dst and src must have at least "size" number of bytes. 700 * Return strlen on success and < 0 on error. 701 */ 702 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 703 { 704 const char *end = src + size; 705 const char *orig_src = src; 706 707 memset(dst, 0, size); 708 /* Copy all isalnum(), '_' and '.' chars. */ 709 while (src < end && *src) { 710 if (!isalnum(*src) && 711 *src != '_' && *src != '.') 712 return -EINVAL; 713 *dst++ = *src++; 714 } 715 716 /* No '\0' found in "size" number of bytes */ 717 if (src == end) 718 return -EINVAL; 719 720 return src - orig_src; 721 } 722 723 int map_check_no_btf(const struct bpf_map *map, 724 const struct btf *btf, 725 const struct btf_type *key_type, 726 const struct btf_type *value_type) 727 { 728 return -ENOTSUPP; 729 } 730 731 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 732 u32 btf_key_id, u32 btf_value_id) 733 { 734 const struct btf_type *key_type, *value_type; 735 u32 key_size, value_size; 736 int ret = 0; 737 738 /* Some maps allow key to be unspecified. */ 739 if (btf_key_id) { 740 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 741 if (!key_type || key_size != map->key_size) 742 return -EINVAL; 743 } else { 744 key_type = btf_type_by_id(btf, 0); 745 if (!map->ops->map_check_btf) 746 return -EINVAL; 747 } 748 749 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 750 if (!value_type || value_size != map->value_size) 751 return -EINVAL; 752 753 map->spin_lock_off = btf_find_spin_lock(btf, value_type); 754 755 if (map_value_has_spin_lock(map)) { 756 if (map->map_flags & BPF_F_RDONLY_PROG) 757 return -EACCES; 758 if (map->map_type != BPF_MAP_TYPE_HASH && 759 map->map_type != BPF_MAP_TYPE_ARRAY && 760 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 761 map->map_type != BPF_MAP_TYPE_SK_STORAGE) 762 return -ENOTSUPP; 763 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > 764 map->value_size) { 765 WARN_ONCE(1, 766 "verifier bug spin_lock_off %d value_size %d\n", 767 map->spin_lock_off, map->value_size); 768 return -EFAULT; 769 } 770 } 771 772 if (map->ops->map_check_btf) 773 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 774 775 return ret; 776 } 777 778 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id 779 /* called via syscall */ 780 static int map_create(union bpf_attr *attr) 781 { 782 int numa_node = bpf_map_attr_numa_node(attr); 783 struct bpf_map_memory mem; 784 struct bpf_map *map; 785 int f_flags; 786 int err; 787 788 err = CHECK_ATTR(BPF_MAP_CREATE); 789 if (err) 790 return -EINVAL; 791 792 if (attr->btf_vmlinux_value_type_id) { 793 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 794 attr->btf_key_type_id || attr->btf_value_type_id) 795 return -EINVAL; 796 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 797 return -EINVAL; 798 } 799 800 f_flags = bpf_get_file_flag(attr->map_flags); 801 if (f_flags < 0) 802 return f_flags; 803 804 if (numa_node != NUMA_NO_NODE && 805 ((unsigned int)numa_node >= nr_node_ids || 806 !node_online(numa_node))) 807 return -EINVAL; 808 809 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 810 map = find_and_alloc_map(attr); 811 if (IS_ERR(map)) 812 return PTR_ERR(map); 813 814 err = bpf_obj_name_cpy(map->name, attr->map_name, 815 sizeof(attr->map_name)); 816 if (err < 0) 817 goto free_map; 818 819 atomic64_set(&map->refcnt, 1); 820 atomic64_set(&map->usercnt, 1); 821 mutex_init(&map->freeze_mutex); 822 823 map->spin_lock_off = -EINVAL; 824 if (attr->btf_key_type_id || attr->btf_value_type_id || 825 /* Even the map's value is a kernel's struct, 826 * the bpf_prog.o must have BTF to begin with 827 * to figure out the corresponding kernel's 828 * counter part. Thus, attr->btf_fd has 829 * to be valid also. 830 */ 831 attr->btf_vmlinux_value_type_id) { 832 struct btf *btf; 833 834 btf = btf_get_by_fd(attr->btf_fd); 835 if (IS_ERR(btf)) { 836 err = PTR_ERR(btf); 837 goto free_map; 838 } 839 map->btf = btf; 840 841 if (attr->btf_value_type_id) { 842 err = map_check_btf(map, btf, attr->btf_key_type_id, 843 attr->btf_value_type_id); 844 if (err) 845 goto free_map; 846 } 847 848 map->btf_key_type_id = attr->btf_key_type_id; 849 map->btf_value_type_id = attr->btf_value_type_id; 850 map->btf_vmlinux_value_type_id = 851 attr->btf_vmlinux_value_type_id; 852 } 853 854 err = security_bpf_map_alloc(map); 855 if (err) 856 goto free_map; 857 858 err = bpf_map_alloc_id(map); 859 if (err) 860 goto free_map_sec; 861 862 err = bpf_map_new_fd(map, f_flags); 863 if (err < 0) { 864 /* failed to allocate fd. 865 * bpf_map_put_with_uref() is needed because the above 866 * bpf_map_alloc_id() has published the map 867 * to the userspace and the userspace may 868 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 869 */ 870 bpf_map_put_with_uref(map); 871 return err; 872 } 873 874 return err; 875 876 free_map_sec: 877 security_bpf_map_free(map); 878 free_map: 879 btf_put(map->btf); 880 bpf_map_charge_move(&mem, &map->memory); 881 map->ops->map_free(map); 882 bpf_map_charge_finish(&mem); 883 return err; 884 } 885 886 /* if error is returned, fd is released. 887 * On success caller should complete fd access with matching fdput() 888 */ 889 struct bpf_map *__bpf_map_get(struct fd f) 890 { 891 if (!f.file) 892 return ERR_PTR(-EBADF); 893 if (f.file->f_op != &bpf_map_fops) { 894 fdput(f); 895 return ERR_PTR(-EINVAL); 896 } 897 898 return f.file->private_data; 899 } 900 901 void bpf_map_inc(struct bpf_map *map) 902 { 903 atomic64_inc(&map->refcnt); 904 } 905 EXPORT_SYMBOL_GPL(bpf_map_inc); 906 907 void bpf_map_inc_with_uref(struct bpf_map *map) 908 { 909 atomic64_inc(&map->refcnt); 910 atomic64_inc(&map->usercnt); 911 } 912 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 913 914 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 915 { 916 struct fd f = fdget(ufd); 917 struct bpf_map *map; 918 919 map = __bpf_map_get(f); 920 if (IS_ERR(map)) 921 return map; 922 923 bpf_map_inc_with_uref(map); 924 fdput(f); 925 926 return map; 927 } 928 929 /* map_idr_lock should have been held */ 930 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 931 { 932 int refold; 933 934 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 935 if (!refold) 936 return ERR_PTR(-ENOENT); 937 if (uref) 938 atomic64_inc(&map->usercnt); 939 940 return map; 941 } 942 943 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 944 { 945 spin_lock_bh(&map_idr_lock); 946 map = __bpf_map_inc_not_zero(map, false); 947 spin_unlock_bh(&map_idr_lock); 948 949 return map; 950 } 951 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 952 953 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 954 { 955 return -ENOTSUPP; 956 } 957 958 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 959 { 960 if (key_size) 961 return memdup_user(ukey, key_size); 962 963 if (ukey) 964 return ERR_PTR(-EINVAL); 965 966 return NULL; 967 } 968 969 /* last field in 'union bpf_attr' used by this command */ 970 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 971 972 static int map_lookup_elem(union bpf_attr *attr) 973 { 974 void __user *ukey = u64_to_user_ptr(attr->key); 975 void __user *uvalue = u64_to_user_ptr(attr->value); 976 int ufd = attr->map_fd; 977 struct bpf_map *map; 978 void *key, *value; 979 u32 value_size; 980 struct fd f; 981 int err; 982 983 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 984 return -EINVAL; 985 986 if (attr->flags & ~BPF_F_LOCK) 987 return -EINVAL; 988 989 f = fdget(ufd); 990 map = __bpf_map_get(f); 991 if (IS_ERR(map)) 992 return PTR_ERR(map); 993 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 994 err = -EPERM; 995 goto err_put; 996 } 997 998 if ((attr->flags & BPF_F_LOCK) && 999 !map_value_has_spin_lock(map)) { 1000 err = -EINVAL; 1001 goto err_put; 1002 } 1003 1004 key = __bpf_copy_key(ukey, map->key_size); 1005 if (IS_ERR(key)) { 1006 err = PTR_ERR(key); 1007 goto err_put; 1008 } 1009 1010 value_size = bpf_map_value_size(map); 1011 1012 err = -ENOMEM; 1013 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1014 if (!value) 1015 goto free_key; 1016 1017 err = bpf_map_copy_value(map, key, value, attr->flags); 1018 if (err) 1019 goto free_value; 1020 1021 err = -EFAULT; 1022 if (copy_to_user(uvalue, value, value_size) != 0) 1023 goto free_value; 1024 1025 err = 0; 1026 1027 free_value: 1028 kfree(value); 1029 free_key: 1030 kfree(key); 1031 err_put: 1032 fdput(f); 1033 return err; 1034 } 1035 1036 1037 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1038 1039 static int map_update_elem(union bpf_attr *attr) 1040 { 1041 void __user *ukey = u64_to_user_ptr(attr->key); 1042 void __user *uvalue = u64_to_user_ptr(attr->value); 1043 int ufd = attr->map_fd; 1044 struct bpf_map *map; 1045 void *key, *value; 1046 u32 value_size; 1047 struct fd f; 1048 int err; 1049 1050 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1051 return -EINVAL; 1052 1053 f = fdget(ufd); 1054 map = __bpf_map_get(f); 1055 if (IS_ERR(map)) 1056 return PTR_ERR(map); 1057 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1058 err = -EPERM; 1059 goto err_put; 1060 } 1061 1062 if ((attr->flags & BPF_F_LOCK) && 1063 !map_value_has_spin_lock(map)) { 1064 err = -EINVAL; 1065 goto err_put; 1066 } 1067 1068 key = __bpf_copy_key(ukey, map->key_size); 1069 if (IS_ERR(key)) { 1070 err = PTR_ERR(key); 1071 goto err_put; 1072 } 1073 1074 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1075 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 1076 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 1077 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 1078 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 1079 else 1080 value_size = map->value_size; 1081 1082 err = -ENOMEM; 1083 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1084 if (!value) 1085 goto free_key; 1086 1087 err = -EFAULT; 1088 if (copy_from_user(value, uvalue, value_size) != 0) 1089 goto free_value; 1090 1091 err = bpf_map_update_value(map, f, key, value, attr->flags); 1092 1093 free_value: 1094 kfree(value); 1095 free_key: 1096 kfree(key); 1097 err_put: 1098 fdput(f); 1099 return err; 1100 } 1101 1102 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1103 1104 static int map_delete_elem(union bpf_attr *attr) 1105 { 1106 void __user *ukey = u64_to_user_ptr(attr->key); 1107 int ufd = attr->map_fd; 1108 struct bpf_map *map; 1109 struct fd f; 1110 void *key; 1111 int err; 1112 1113 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1114 return -EINVAL; 1115 1116 f = fdget(ufd); 1117 map = __bpf_map_get(f); 1118 if (IS_ERR(map)) 1119 return PTR_ERR(map); 1120 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1121 err = -EPERM; 1122 goto err_put; 1123 } 1124 1125 key = __bpf_copy_key(ukey, map->key_size); 1126 if (IS_ERR(key)) { 1127 err = PTR_ERR(key); 1128 goto err_put; 1129 } 1130 1131 if (bpf_map_is_dev_bound(map)) { 1132 err = bpf_map_offload_delete_elem(map, key); 1133 goto out; 1134 } else if (IS_FD_PROG_ARRAY(map) || 1135 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1136 /* These maps require sleepable context */ 1137 err = map->ops->map_delete_elem(map, key); 1138 goto out; 1139 } 1140 1141 preempt_disable(); 1142 __this_cpu_inc(bpf_prog_active); 1143 rcu_read_lock(); 1144 err = map->ops->map_delete_elem(map, key); 1145 rcu_read_unlock(); 1146 __this_cpu_dec(bpf_prog_active); 1147 preempt_enable(); 1148 maybe_wait_bpf_programs(map); 1149 out: 1150 kfree(key); 1151 err_put: 1152 fdput(f); 1153 return err; 1154 } 1155 1156 /* last field in 'union bpf_attr' used by this command */ 1157 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1158 1159 static int map_get_next_key(union bpf_attr *attr) 1160 { 1161 void __user *ukey = u64_to_user_ptr(attr->key); 1162 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1163 int ufd = attr->map_fd; 1164 struct bpf_map *map; 1165 void *key, *next_key; 1166 struct fd f; 1167 int err; 1168 1169 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1170 return -EINVAL; 1171 1172 f = fdget(ufd); 1173 map = __bpf_map_get(f); 1174 if (IS_ERR(map)) 1175 return PTR_ERR(map); 1176 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1177 err = -EPERM; 1178 goto err_put; 1179 } 1180 1181 if (ukey) { 1182 key = __bpf_copy_key(ukey, map->key_size); 1183 if (IS_ERR(key)) { 1184 err = PTR_ERR(key); 1185 goto err_put; 1186 } 1187 } else { 1188 key = NULL; 1189 } 1190 1191 err = -ENOMEM; 1192 next_key = kmalloc(map->key_size, GFP_USER); 1193 if (!next_key) 1194 goto free_key; 1195 1196 if (bpf_map_is_dev_bound(map)) { 1197 err = bpf_map_offload_get_next_key(map, key, next_key); 1198 goto out; 1199 } 1200 1201 rcu_read_lock(); 1202 err = map->ops->map_get_next_key(map, key, next_key); 1203 rcu_read_unlock(); 1204 out: 1205 if (err) 1206 goto free_next_key; 1207 1208 err = -EFAULT; 1209 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1210 goto free_next_key; 1211 1212 err = 0; 1213 1214 free_next_key: 1215 kfree(next_key); 1216 free_key: 1217 kfree(key); 1218 err_put: 1219 fdput(f); 1220 return err; 1221 } 1222 1223 int generic_map_delete_batch(struct bpf_map *map, 1224 const union bpf_attr *attr, 1225 union bpf_attr __user *uattr) 1226 { 1227 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1228 u32 cp, max_count; 1229 int err = 0; 1230 void *key; 1231 1232 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1233 return -EINVAL; 1234 1235 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1236 !map_value_has_spin_lock(map)) { 1237 return -EINVAL; 1238 } 1239 1240 max_count = attr->batch.count; 1241 if (!max_count) 1242 return 0; 1243 1244 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1245 if (!key) 1246 return -ENOMEM; 1247 1248 for (cp = 0; cp < max_count; cp++) { 1249 err = -EFAULT; 1250 if (copy_from_user(key, keys + cp * map->key_size, 1251 map->key_size)) 1252 break; 1253 1254 if (bpf_map_is_dev_bound(map)) { 1255 err = bpf_map_offload_delete_elem(map, key); 1256 break; 1257 } 1258 1259 preempt_disable(); 1260 __this_cpu_inc(bpf_prog_active); 1261 rcu_read_lock(); 1262 err = map->ops->map_delete_elem(map, key); 1263 rcu_read_unlock(); 1264 __this_cpu_dec(bpf_prog_active); 1265 preempt_enable(); 1266 maybe_wait_bpf_programs(map); 1267 if (err) 1268 break; 1269 } 1270 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1271 err = -EFAULT; 1272 1273 kfree(key); 1274 return err; 1275 } 1276 1277 int generic_map_update_batch(struct bpf_map *map, 1278 const union bpf_attr *attr, 1279 union bpf_attr __user *uattr) 1280 { 1281 void __user *values = u64_to_user_ptr(attr->batch.values); 1282 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1283 u32 value_size, cp, max_count; 1284 int ufd = attr->map_fd; 1285 void *key, *value; 1286 struct fd f; 1287 int err = 0; 1288 1289 f = fdget(ufd); 1290 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1291 return -EINVAL; 1292 1293 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1294 !map_value_has_spin_lock(map)) { 1295 return -EINVAL; 1296 } 1297 1298 value_size = bpf_map_value_size(map); 1299 1300 max_count = attr->batch.count; 1301 if (!max_count) 1302 return 0; 1303 1304 key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1305 if (!key) 1306 return -ENOMEM; 1307 1308 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1309 if (!value) { 1310 kfree(key); 1311 return -ENOMEM; 1312 } 1313 1314 for (cp = 0; cp < max_count; cp++) { 1315 err = -EFAULT; 1316 if (copy_from_user(key, keys + cp * map->key_size, 1317 map->key_size) || 1318 copy_from_user(value, values + cp * value_size, value_size)) 1319 break; 1320 1321 err = bpf_map_update_value(map, f, key, value, 1322 attr->batch.elem_flags); 1323 1324 if (err) 1325 break; 1326 } 1327 1328 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1329 err = -EFAULT; 1330 1331 kfree(value); 1332 kfree(key); 1333 return err; 1334 } 1335 1336 #define MAP_LOOKUP_RETRIES 3 1337 1338 int generic_map_lookup_batch(struct bpf_map *map, 1339 const union bpf_attr *attr, 1340 union bpf_attr __user *uattr) 1341 { 1342 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1343 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1344 void __user *values = u64_to_user_ptr(attr->batch.values); 1345 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1346 void *buf, *buf_prevkey, *prev_key, *key, *value; 1347 int err, retry = MAP_LOOKUP_RETRIES; 1348 u32 value_size, cp, max_count; 1349 1350 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1351 return -EINVAL; 1352 1353 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1354 !map_value_has_spin_lock(map)) 1355 return -EINVAL; 1356 1357 value_size = bpf_map_value_size(map); 1358 1359 max_count = attr->batch.count; 1360 if (!max_count) 1361 return 0; 1362 1363 if (put_user(0, &uattr->batch.count)) 1364 return -EFAULT; 1365 1366 buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1367 if (!buf_prevkey) 1368 return -ENOMEM; 1369 1370 buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1371 if (!buf) { 1372 kvfree(buf_prevkey); 1373 return -ENOMEM; 1374 } 1375 1376 err = -EFAULT; 1377 prev_key = NULL; 1378 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1379 goto free_buf; 1380 key = buf; 1381 value = key + map->key_size; 1382 if (ubatch) 1383 prev_key = buf_prevkey; 1384 1385 for (cp = 0; cp < max_count;) { 1386 rcu_read_lock(); 1387 err = map->ops->map_get_next_key(map, prev_key, key); 1388 rcu_read_unlock(); 1389 if (err) 1390 break; 1391 err = bpf_map_copy_value(map, key, value, 1392 attr->batch.elem_flags); 1393 1394 if (err == -ENOENT) { 1395 if (retry) { 1396 retry--; 1397 continue; 1398 } 1399 err = -EINTR; 1400 break; 1401 } 1402 1403 if (err) 1404 goto free_buf; 1405 1406 if (copy_to_user(keys + cp * map->key_size, key, 1407 map->key_size)) { 1408 err = -EFAULT; 1409 goto free_buf; 1410 } 1411 if (copy_to_user(values + cp * value_size, value, value_size)) { 1412 err = -EFAULT; 1413 goto free_buf; 1414 } 1415 1416 if (!prev_key) 1417 prev_key = buf_prevkey; 1418 1419 swap(prev_key, key); 1420 retry = MAP_LOOKUP_RETRIES; 1421 cp++; 1422 } 1423 1424 if (err == -EFAULT) 1425 goto free_buf; 1426 1427 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1428 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1429 err = -EFAULT; 1430 1431 free_buf: 1432 kfree(buf_prevkey); 1433 kfree(buf); 1434 return err; 1435 } 1436 1437 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value 1438 1439 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1440 { 1441 void __user *ukey = u64_to_user_ptr(attr->key); 1442 void __user *uvalue = u64_to_user_ptr(attr->value); 1443 int ufd = attr->map_fd; 1444 struct bpf_map *map; 1445 void *key, *value; 1446 u32 value_size; 1447 struct fd f; 1448 int err; 1449 1450 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1451 return -EINVAL; 1452 1453 f = fdget(ufd); 1454 map = __bpf_map_get(f); 1455 if (IS_ERR(map)) 1456 return PTR_ERR(map); 1457 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1458 err = -EPERM; 1459 goto err_put; 1460 } 1461 1462 key = __bpf_copy_key(ukey, map->key_size); 1463 if (IS_ERR(key)) { 1464 err = PTR_ERR(key); 1465 goto err_put; 1466 } 1467 1468 value_size = map->value_size; 1469 1470 err = -ENOMEM; 1471 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 1472 if (!value) 1473 goto free_key; 1474 1475 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1476 map->map_type == BPF_MAP_TYPE_STACK) { 1477 err = map->ops->map_pop_elem(map, value); 1478 } else { 1479 err = -ENOTSUPP; 1480 } 1481 1482 if (err) 1483 goto free_value; 1484 1485 if (copy_to_user(uvalue, value, value_size) != 0) 1486 goto free_value; 1487 1488 err = 0; 1489 1490 free_value: 1491 kfree(value); 1492 free_key: 1493 kfree(key); 1494 err_put: 1495 fdput(f); 1496 return err; 1497 } 1498 1499 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1500 1501 static int map_freeze(const union bpf_attr *attr) 1502 { 1503 int err = 0, ufd = attr->map_fd; 1504 struct bpf_map *map; 1505 struct fd f; 1506 1507 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1508 return -EINVAL; 1509 1510 f = fdget(ufd); 1511 map = __bpf_map_get(f); 1512 if (IS_ERR(map)) 1513 return PTR_ERR(map); 1514 1515 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1516 fdput(f); 1517 return -ENOTSUPP; 1518 } 1519 1520 mutex_lock(&map->freeze_mutex); 1521 1522 if (map->writecnt) { 1523 err = -EBUSY; 1524 goto err_put; 1525 } 1526 if (READ_ONCE(map->frozen)) { 1527 err = -EBUSY; 1528 goto err_put; 1529 } 1530 if (!capable(CAP_SYS_ADMIN)) { 1531 err = -EPERM; 1532 goto err_put; 1533 } 1534 1535 WRITE_ONCE(map->frozen, true); 1536 err_put: 1537 mutex_unlock(&map->freeze_mutex); 1538 fdput(f); 1539 return err; 1540 } 1541 1542 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1543 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1544 [_id] = & _name ## _prog_ops, 1545 #define BPF_MAP_TYPE(_id, _ops) 1546 #include <linux/bpf_types.h> 1547 #undef BPF_PROG_TYPE 1548 #undef BPF_MAP_TYPE 1549 }; 1550 1551 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1552 { 1553 const struct bpf_prog_ops *ops; 1554 1555 if (type >= ARRAY_SIZE(bpf_prog_types)) 1556 return -EINVAL; 1557 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1558 ops = bpf_prog_types[type]; 1559 if (!ops) 1560 return -EINVAL; 1561 1562 if (!bpf_prog_is_dev_bound(prog->aux)) 1563 prog->aux->ops = ops; 1564 else 1565 prog->aux->ops = &bpf_offload_prog_ops; 1566 prog->type = type; 1567 return 0; 1568 } 1569 1570 enum bpf_audit { 1571 BPF_AUDIT_LOAD, 1572 BPF_AUDIT_UNLOAD, 1573 BPF_AUDIT_MAX, 1574 }; 1575 1576 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 1577 [BPF_AUDIT_LOAD] = "LOAD", 1578 [BPF_AUDIT_UNLOAD] = "UNLOAD", 1579 }; 1580 1581 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 1582 { 1583 struct audit_context *ctx = NULL; 1584 struct audit_buffer *ab; 1585 1586 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 1587 return; 1588 if (audit_enabled == AUDIT_OFF) 1589 return; 1590 if (op == BPF_AUDIT_LOAD) 1591 ctx = audit_context(); 1592 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1593 if (unlikely(!ab)) 1594 return; 1595 audit_log_format(ab, "prog-id=%u op=%s", 1596 prog->aux->id, bpf_audit_str[op]); 1597 audit_log_end(ab); 1598 } 1599 1600 int __bpf_prog_charge(struct user_struct *user, u32 pages) 1601 { 1602 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1603 unsigned long user_bufs; 1604 1605 if (user) { 1606 user_bufs = atomic_long_add_return(pages, &user->locked_vm); 1607 if (user_bufs > memlock_limit) { 1608 atomic_long_sub(pages, &user->locked_vm); 1609 return -EPERM; 1610 } 1611 } 1612 1613 return 0; 1614 } 1615 1616 void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1617 { 1618 if (user) 1619 atomic_long_sub(pages, &user->locked_vm); 1620 } 1621 1622 static int bpf_prog_charge_memlock(struct bpf_prog *prog) 1623 { 1624 struct user_struct *user = get_current_user(); 1625 int ret; 1626 1627 ret = __bpf_prog_charge(user, prog->pages); 1628 if (ret) { 1629 free_uid(user); 1630 return ret; 1631 } 1632 1633 prog->aux->user = user; 1634 return 0; 1635 } 1636 1637 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) 1638 { 1639 struct user_struct *user = prog->aux->user; 1640 1641 __bpf_prog_uncharge(user, prog->pages); 1642 free_uid(user); 1643 } 1644 1645 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1646 { 1647 int id; 1648 1649 idr_preload(GFP_KERNEL); 1650 spin_lock_bh(&prog_idr_lock); 1651 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1652 if (id > 0) 1653 prog->aux->id = id; 1654 spin_unlock_bh(&prog_idr_lock); 1655 idr_preload_end(); 1656 1657 /* id is in [1, INT_MAX) */ 1658 if (WARN_ON_ONCE(!id)) 1659 return -ENOSPC; 1660 1661 return id > 0 ? 0 : id; 1662 } 1663 1664 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1665 { 1666 /* cBPF to eBPF migrations are currently not in the idr store. 1667 * Offloaded programs are removed from the store when their device 1668 * disappears - even if someone grabs an fd to them they are unusable, 1669 * simply waiting for refcnt to drop to be freed. 1670 */ 1671 if (!prog->aux->id) 1672 return; 1673 1674 if (do_idr_lock) 1675 spin_lock_bh(&prog_idr_lock); 1676 else 1677 __acquire(&prog_idr_lock); 1678 1679 idr_remove(&prog_idr, prog->aux->id); 1680 prog->aux->id = 0; 1681 1682 if (do_idr_lock) 1683 spin_unlock_bh(&prog_idr_lock); 1684 else 1685 __release(&prog_idr_lock); 1686 } 1687 1688 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1689 { 1690 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1691 1692 kvfree(aux->func_info); 1693 kfree(aux->func_info_aux); 1694 bpf_prog_uncharge_memlock(aux->prog); 1695 security_bpf_prog_free(aux); 1696 bpf_prog_free(aux->prog); 1697 } 1698 1699 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 1700 { 1701 bpf_prog_kallsyms_del_all(prog); 1702 btf_put(prog->aux->btf); 1703 bpf_prog_free_linfo(prog); 1704 1705 if (deferred) 1706 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1707 else 1708 __bpf_prog_put_rcu(&prog->aux->rcu); 1709 } 1710 1711 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1712 { 1713 if (atomic64_dec_and_test(&prog->aux->refcnt)) { 1714 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 1715 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 1716 /* bpf_prog_free_id() must be called first */ 1717 bpf_prog_free_id(prog, do_idr_lock); 1718 __bpf_prog_put_noref(prog, true); 1719 } 1720 } 1721 1722 void bpf_prog_put(struct bpf_prog *prog) 1723 { 1724 __bpf_prog_put(prog, true); 1725 } 1726 EXPORT_SYMBOL_GPL(bpf_prog_put); 1727 1728 static int bpf_prog_release(struct inode *inode, struct file *filp) 1729 { 1730 struct bpf_prog *prog = filp->private_data; 1731 1732 bpf_prog_put(prog); 1733 return 0; 1734 } 1735 1736 static void bpf_prog_get_stats(const struct bpf_prog *prog, 1737 struct bpf_prog_stats *stats) 1738 { 1739 u64 nsecs = 0, cnt = 0; 1740 int cpu; 1741 1742 for_each_possible_cpu(cpu) { 1743 const struct bpf_prog_stats *st; 1744 unsigned int start; 1745 u64 tnsecs, tcnt; 1746 1747 st = per_cpu_ptr(prog->aux->stats, cpu); 1748 do { 1749 start = u64_stats_fetch_begin_irq(&st->syncp); 1750 tnsecs = st->nsecs; 1751 tcnt = st->cnt; 1752 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); 1753 nsecs += tnsecs; 1754 cnt += tcnt; 1755 } 1756 stats->nsecs = nsecs; 1757 stats->cnt = cnt; 1758 } 1759 1760 #ifdef CONFIG_PROC_FS 1761 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1762 { 1763 const struct bpf_prog *prog = filp->private_data; 1764 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1765 struct bpf_prog_stats stats; 1766 1767 bpf_prog_get_stats(prog, &stats); 1768 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1769 seq_printf(m, 1770 "prog_type:\t%u\n" 1771 "prog_jited:\t%u\n" 1772 "prog_tag:\t%s\n" 1773 "memlock:\t%llu\n" 1774 "prog_id:\t%u\n" 1775 "run_time_ns:\t%llu\n" 1776 "run_cnt:\t%llu\n", 1777 prog->type, 1778 prog->jited, 1779 prog_tag, 1780 prog->pages * 1ULL << PAGE_SHIFT, 1781 prog->aux->id, 1782 stats.nsecs, 1783 stats.cnt); 1784 } 1785 #endif 1786 1787 const struct file_operations bpf_prog_fops = { 1788 #ifdef CONFIG_PROC_FS 1789 .show_fdinfo = bpf_prog_show_fdinfo, 1790 #endif 1791 .release = bpf_prog_release, 1792 .read = bpf_dummy_read, 1793 .write = bpf_dummy_write, 1794 }; 1795 1796 int bpf_prog_new_fd(struct bpf_prog *prog) 1797 { 1798 int ret; 1799 1800 ret = security_bpf_prog(prog); 1801 if (ret < 0) 1802 return ret; 1803 1804 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1805 O_RDWR | O_CLOEXEC); 1806 } 1807 1808 static struct bpf_prog *____bpf_prog_get(struct fd f) 1809 { 1810 if (!f.file) 1811 return ERR_PTR(-EBADF); 1812 if (f.file->f_op != &bpf_prog_fops) { 1813 fdput(f); 1814 return ERR_PTR(-EINVAL); 1815 } 1816 1817 return f.file->private_data; 1818 } 1819 1820 void bpf_prog_add(struct bpf_prog *prog, int i) 1821 { 1822 atomic64_add(i, &prog->aux->refcnt); 1823 } 1824 EXPORT_SYMBOL_GPL(bpf_prog_add); 1825 1826 void bpf_prog_sub(struct bpf_prog *prog, int i) 1827 { 1828 /* Only to be used for undoing previous bpf_prog_add() in some 1829 * error path. We still know that another entity in our call 1830 * path holds a reference to the program, thus atomic_sub() can 1831 * be safely used in such cases! 1832 */ 1833 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 1834 } 1835 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1836 1837 void bpf_prog_inc(struct bpf_prog *prog) 1838 { 1839 atomic64_inc(&prog->aux->refcnt); 1840 } 1841 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1842 1843 /* prog_idr_lock should have been held */ 1844 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1845 { 1846 int refold; 1847 1848 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 1849 1850 if (!refold) 1851 return ERR_PTR(-ENOENT); 1852 1853 return prog; 1854 } 1855 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1856 1857 bool bpf_prog_get_ok(struct bpf_prog *prog, 1858 enum bpf_prog_type *attach_type, bool attach_drv) 1859 { 1860 /* not an attachment, just a refcount inc, always allow */ 1861 if (!attach_type) 1862 return true; 1863 1864 if (prog->type != *attach_type) 1865 return false; 1866 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1867 return false; 1868 1869 return true; 1870 } 1871 1872 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1873 bool attach_drv) 1874 { 1875 struct fd f = fdget(ufd); 1876 struct bpf_prog *prog; 1877 1878 prog = ____bpf_prog_get(f); 1879 if (IS_ERR(prog)) 1880 return prog; 1881 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1882 prog = ERR_PTR(-EINVAL); 1883 goto out; 1884 } 1885 1886 bpf_prog_inc(prog); 1887 out: 1888 fdput(f); 1889 return prog; 1890 } 1891 1892 struct bpf_prog *bpf_prog_get(u32 ufd) 1893 { 1894 return __bpf_prog_get(ufd, NULL, false); 1895 } 1896 1897 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1898 bool attach_drv) 1899 { 1900 return __bpf_prog_get(ufd, &type, attach_drv); 1901 } 1902 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1903 1904 /* Initially all BPF programs could be loaded w/o specifying 1905 * expected_attach_type. Later for some of them specifying expected_attach_type 1906 * at load time became required so that program could be validated properly. 1907 * Programs of types that are allowed to be loaded both w/ and w/o (for 1908 * backward compatibility) expected_attach_type, should have the default attach 1909 * type assigned to expected_attach_type for the latter case, so that it can be 1910 * validated later at attach time. 1911 * 1912 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 1913 * prog type requires it but has some attach types that have to be backward 1914 * compatible. 1915 */ 1916 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 1917 { 1918 switch (attr->prog_type) { 1919 case BPF_PROG_TYPE_CGROUP_SOCK: 1920 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 1921 * exist so checking for non-zero is the way to go here. 1922 */ 1923 if (!attr->expected_attach_type) 1924 attr->expected_attach_type = 1925 BPF_CGROUP_INET_SOCK_CREATE; 1926 break; 1927 } 1928 } 1929 1930 static int 1931 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 1932 enum bpf_attach_type expected_attach_type, 1933 u32 btf_id, u32 prog_fd) 1934 { 1935 if (btf_id) { 1936 if (btf_id > BTF_MAX_TYPE) 1937 return -EINVAL; 1938 1939 switch (prog_type) { 1940 case BPF_PROG_TYPE_TRACING: 1941 case BPF_PROG_TYPE_STRUCT_OPS: 1942 case BPF_PROG_TYPE_EXT: 1943 break; 1944 default: 1945 return -EINVAL; 1946 } 1947 } 1948 1949 if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING && 1950 prog_type != BPF_PROG_TYPE_EXT) 1951 return -EINVAL; 1952 1953 switch (prog_type) { 1954 case BPF_PROG_TYPE_CGROUP_SOCK: 1955 switch (expected_attach_type) { 1956 case BPF_CGROUP_INET_SOCK_CREATE: 1957 case BPF_CGROUP_INET4_POST_BIND: 1958 case BPF_CGROUP_INET6_POST_BIND: 1959 return 0; 1960 default: 1961 return -EINVAL; 1962 } 1963 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1964 switch (expected_attach_type) { 1965 case BPF_CGROUP_INET4_BIND: 1966 case BPF_CGROUP_INET6_BIND: 1967 case BPF_CGROUP_INET4_CONNECT: 1968 case BPF_CGROUP_INET6_CONNECT: 1969 case BPF_CGROUP_UDP4_SENDMSG: 1970 case BPF_CGROUP_UDP6_SENDMSG: 1971 case BPF_CGROUP_UDP4_RECVMSG: 1972 case BPF_CGROUP_UDP6_RECVMSG: 1973 return 0; 1974 default: 1975 return -EINVAL; 1976 } 1977 case BPF_PROG_TYPE_CGROUP_SKB: 1978 switch (expected_attach_type) { 1979 case BPF_CGROUP_INET_INGRESS: 1980 case BPF_CGROUP_INET_EGRESS: 1981 return 0; 1982 default: 1983 return -EINVAL; 1984 } 1985 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 1986 switch (expected_attach_type) { 1987 case BPF_CGROUP_SETSOCKOPT: 1988 case BPF_CGROUP_GETSOCKOPT: 1989 return 0; 1990 default: 1991 return -EINVAL; 1992 } 1993 case BPF_PROG_TYPE_EXT: 1994 if (expected_attach_type) 1995 return -EINVAL; 1996 /* fallthrough */ 1997 default: 1998 return 0; 1999 } 2000 } 2001 2002 /* last field in 'union bpf_attr' used by this command */ 2003 #define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd 2004 2005 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) 2006 { 2007 enum bpf_prog_type type = attr->prog_type; 2008 struct bpf_prog *prog; 2009 int err; 2010 char license[128]; 2011 bool is_gpl; 2012 2013 if (CHECK_ATTR(BPF_PROG_LOAD)) 2014 return -EINVAL; 2015 2016 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2017 BPF_F_ANY_ALIGNMENT | 2018 BPF_F_TEST_STATE_FREQ | 2019 BPF_F_TEST_RND_HI32)) 2020 return -EINVAL; 2021 2022 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2023 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2024 !capable(CAP_SYS_ADMIN)) 2025 return -EPERM; 2026 2027 /* copy eBPF program license from user space */ 2028 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 2029 sizeof(license) - 1) < 0) 2030 return -EFAULT; 2031 license[sizeof(license) - 1] = 0; 2032 2033 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2034 is_gpl = license_is_gpl_compatible(license); 2035 2036 if (attr->insn_cnt == 0 || 2037 attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2038 return -E2BIG; 2039 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2040 type != BPF_PROG_TYPE_CGROUP_SKB && 2041 !capable(CAP_SYS_ADMIN)) 2042 return -EPERM; 2043 2044 bpf_prog_load_fixup_attach_type(attr); 2045 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2046 attr->attach_btf_id, 2047 attr->attach_prog_fd)) 2048 return -EINVAL; 2049 2050 /* plain bpf_prog allocation */ 2051 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2052 if (!prog) 2053 return -ENOMEM; 2054 2055 prog->expected_attach_type = attr->expected_attach_type; 2056 prog->aux->attach_btf_id = attr->attach_btf_id; 2057 if (attr->attach_prog_fd) { 2058 struct bpf_prog *tgt_prog; 2059 2060 tgt_prog = bpf_prog_get(attr->attach_prog_fd); 2061 if (IS_ERR(tgt_prog)) { 2062 err = PTR_ERR(tgt_prog); 2063 goto free_prog_nouncharge; 2064 } 2065 prog->aux->linked_prog = tgt_prog; 2066 } 2067 2068 prog->aux->offload_requested = !!attr->prog_ifindex; 2069 2070 err = security_bpf_prog_alloc(prog->aux); 2071 if (err) 2072 goto free_prog_nouncharge; 2073 2074 err = bpf_prog_charge_memlock(prog); 2075 if (err) 2076 goto free_prog_sec; 2077 2078 prog->len = attr->insn_cnt; 2079 2080 err = -EFAULT; 2081 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 2082 bpf_prog_insn_size(prog)) != 0) 2083 goto free_prog; 2084 2085 prog->orig_prog = NULL; 2086 prog->jited = 0; 2087 2088 atomic64_set(&prog->aux->refcnt, 1); 2089 prog->gpl_compatible = is_gpl ? 1 : 0; 2090 2091 if (bpf_prog_is_dev_bound(prog->aux)) { 2092 err = bpf_prog_offload_init(prog, attr); 2093 if (err) 2094 goto free_prog; 2095 } 2096 2097 /* find program type: socket_filter vs tracing_filter */ 2098 err = find_prog_type(type, prog); 2099 if (err < 0) 2100 goto free_prog; 2101 2102 prog->aux->load_time = ktime_get_boottime_ns(); 2103 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2104 sizeof(attr->prog_name)); 2105 if (err < 0) 2106 goto free_prog; 2107 2108 /* run eBPF verifier */ 2109 err = bpf_check(&prog, attr, uattr); 2110 if (err < 0) 2111 goto free_used_maps; 2112 2113 prog = bpf_prog_select_runtime(prog, &err); 2114 if (err < 0) 2115 goto free_used_maps; 2116 2117 err = bpf_prog_alloc_id(prog); 2118 if (err) 2119 goto free_used_maps; 2120 2121 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2122 * effectively publicly exposed. However, retrieving via 2123 * bpf_prog_get_fd_by_id() will take another reference, 2124 * therefore it cannot be gone underneath us. 2125 * 2126 * Only for the time /after/ successful bpf_prog_new_fd() 2127 * and before returning to userspace, we might just hold 2128 * one reference and any parallel close on that fd could 2129 * rip everything out. Hence, below notifications must 2130 * happen before bpf_prog_new_fd(). 2131 * 2132 * Also, any failure handling from this point onwards must 2133 * be using bpf_prog_put() given the program is exposed. 2134 */ 2135 bpf_prog_kallsyms_add(prog); 2136 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2137 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2138 2139 err = bpf_prog_new_fd(prog); 2140 if (err < 0) 2141 bpf_prog_put(prog); 2142 return err; 2143 2144 free_used_maps: 2145 /* In case we have subprogs, we need to wait for a grace 2146 * period before we can tear down JIT memory since symbols 2147 * are already exposed under kallsyms. 2148 */ 2149 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2150 return err; 2151 free_prog: 2152 bpf_prog_uncharge_memlock(prog); 2153 free_prog_sec: 2154 security_bpf_prog_free(prog->aux); 2155 free_prog_nouncharge: 2156 bpf_prog_free(prog); 2157 return err; 2158 } 2159 2160 #define BPF_OBJ_LAST_FIELD file_flags 2161 2162 static int bpf_obj_pin(const union bpf_attr *attr) 2163 { 2164 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 2165 return -EINVAL; 2166 2167 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 2168 } 2169 2170 static int bpf_obj_get(const union bpf_attr *attr) 2171 { 2172 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2173 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 2174 return -EINVAL; 2175 2176 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 2177 attr->file_flags); 2178 } 2179 2180 static int bpf_tracing_prog_release(struct inode *inode, struct file *filp) 2181 { 2182 struct bpf_prog *prog = filp->private_data; 2183 2184 WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog)); 2185 bpf_prog_put(prog); 2186 return 0; 2187 } 2188 2189 static const struct file_operations bpf_tracing_prog_fops = { 2190 .release = bpf_tracing_prog_release, 2191 .read = bpf_dummy_read, 2192 .write = bpf_dummy_write, 2193 }; 2194 2195 static int bpf_tracing_prog_attach(struct bpf_prog *prog) 2196 { 2197 int tr_fd, err; 2198 2199 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 2200 prog->expected_attach_type != BPF_TRACE_FEXIT && 2201 prog->type != BPF_PROG_TYPE_EXT) { 2202 err = -EINVAL; 2203 goto out_put_prog; 2204 } 2205 2206 err = bpf_trampoline_link_prog(prog); 2207 if (err) 2208 goto out_put_prog; 2209 2210 tr_fd = anon_inode_getfd("bpf-tracing-prog", &bpf_tracing_prog_fops, 2211 prog, O_CLOEXEC); 2212 if (tr_fd < 0) { 2213 WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog)); 2214 err = tr_fd; 2215 goto out_put_prog; 2216 } 2217 return tr_fd; 2218 2219 out_put_prog: 2220 bpf_prog_put(prog); 2221 return err; 2222 } 2223 2224 struct bpf_raw_tracepoint { 2225 struct bpf_raw_event_map *btp; 2226 struct bpf_prog *prog; 2227 }; 2228 2229 static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) 2230 { 2231 struct bpf_raw_tracepoint *raw_tp = filp->private_data; 2232 2233 if (raw_tp->prog) { 2234 bpf_probe_unregister(raw_tp->btp, raw_tp->prog); 2235 bpf_prog_put(raw_tp->prog); 2236 } 2237 bpf_put_raw_tracepoint(raw_tp->btp); 2238 kfree(raw_tp); 2239 return 0; 2240 } 2241 2242 static const struct file_operations bpf_raw_tp_fops = { 2243 .release = bpf_raw_tracepoint_release, 2244 .read = bpf_dummy_read, 2245 .write = bpf_dummy_write, 2246 }; 2247 2248 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 2249 2250 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 2251 { 2252 struct bpf_raw_tracepoint *raw_tp; 2253 struct bpf_raw_event_map *btp; 2254 struct bpf_prog *prog; 2255 const char *tp_name; 2256 char buf[128]; 2257 int tp_fd, err; 2258 2259 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 2260 return -EINVAL; 2261 2262 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 2263 if (IS_ERR(prog)) 2264 return PTR_ERR(prog); 2265 2266 if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT && 2267 prog->type != BPF_PROG_TYPE_TRACING && 2268 prog->type != BPF_PROG_TYPE_EXT && 2269 prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) { 2270 err = -EINVAL; 2271 goto out_put_prog; 2272 } 2273 2274 if (prog->type == BPF_PROG_TYPE_TRACING || 2275 prog->type == BPF_PROG_TYPE_EXT) { 2276 if (attr->raw_tracepoint.name) { 2277 /* The attach point for this category of programs 2278 * should be specified via btf_id during program load. 2279 */ 2280 err = -EINVAL; 2281 goto out_put_prog; 2282 } 2283 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 2284 tp_name = prog->aux->attach_func_name; 2285 else 2286 return bpf_tracing_prog_attach(prog); 2287 } else { 2288 if (strncpy_from_user(buf, 2289 u64_to_user_ptr(attr->raw_tracepoint.name), 2290 sizeof(buf) - 1) < 0) { 2291 err = -EFAULT; 2292 goto out_put_prog; 2293 } 2294 buf[sizeof(buf) - 1] = 0; 2295 tp_name = buf; 2296 } 2297 2298 btp = bpf_get_raw_tracepoint(tp_name); 2299 if (!btp) { 2300 err = -ENOENT; 2301 goto out_put_prog; 2302 } 2303 2304 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); 2305 if (!raw_tp) { 2306 err = -ENOMEM; 2307 goto out_put_btp; 2308 } 2309 raw_tp->btp = btp; 2310 raw_tp->prog = prog; 2311 2312 err = bpf_probe_register(raw_tp->btp, prog); 2313 if (err) 2314 goto out_free_tp; 2315 2316 tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, 2317 O_CLOEXEC); 2318 if (tp_fd < 0) { 2319 bpf_probe_unregister(raw_tp->btp, prog); 2320 err = tp_fd; 2321 goto out_free_tp; 2322 } 2323 return tp_fd; 2324 2325 out_free_tp: 2326 kfree(raw_tp); 2327 out_put_btp: 2328 bpf_put_raw_tracepoint(btp); 2329 out_put_prog: 2330 bpf_prog_put(prog); 2331 return err; 2332 } 2333 2334 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 2335 enum bpf_attach_type attach_type) 2336 { 2337 switch (prog->type) { 2338 case BPF_PROG_TYPE_CGROUP_SOCK: 2339 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2340 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2341 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 2342 case BPF_PROG_TYPE_CGROUP_SKB: 2343 return prog->enforce_expected_attach_type && 2344 prog->expected_attach_type != attach_type ? 2345 -EINVAL : 0; 2346 default: 2347 return 0; 2348 } 2349 } 2350 2351 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd 2352 2353 #define BPF_F_ATTACH_MASK \ 2354 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) 2355 2356 static int bpf_prog_attach(const union bpf_attr *attr) 2357 { 2358 enum bpf_prog_type ptype; 2359 struct bpf_prog *prog; 2360 int ret; 2361 2362 if (!capable(CAP_NET_ADMIN)) 2363 return -EPERM; 2364 2365 if (CHECK_ATTR(BPF_PROG_ATTACH)) 2366 return -EINVAL; 2367 2368 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 2369 return -EINVAL; 2370 2371 switch (attr->attach_type) { 2372 case BPF_CGROUP_INET_INGRESS: 2373 case BPF_CGROUP_INET_EGRESS: 2374 ptype = BPF_PROG_TYPE_CGROUP_SKB; 2375 break; 2376 case BPF_CGROUP_INET_SOCK_CREATE: 2377 case BPF_CGROUP_INET4_POST_BIND: 2378 case BPF_CGROUP_INET6_POST_BIND: 2379 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 2380 break; 2381 case BPF_CGROUP_INET4_BIND: 2382 case BPF_CGROUP_INET6_BIND: 2383 case BPF_CGROUP_INET4_CONNECT: 2384 case BPF_CGROUP_INET6_CONNECT: 2385 case BPF_CGROUP_UDP4_SENDMSG: 2386 case BPF_CGROUP_UDP6_SENDMSG: 2387 case BPF_CGROUP_UDP4_RECVMSG: 2388 case BPF_CGROUP_UDP6_RECVMSG: 2389 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 2390 break; 2391 case BPF_CGROUP_SOCK_OPS: 2392 ptype = BPF_PROG_TYPE_SOCK_OPS; 2393 break; 2394 case BPF_CGROUP_DEVICE: 2395 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 2396 break; 2397 case BPF_SK_MSG_VERDICT: 2398 ptype = BPF_PROG_TYPE_SK_MSG; 2399 break; 2400 case BPF_SK_SKB_STREAM_PARSER: 2401 case BPF_SK_SKB_STREAM_VERDICT: 2402 ptype = BPF_PROG_TYPE_SK_SKB; 2403 break; 2404 case BPF_LIRC_MODE2: 2405 ptype = BPF_PROG_TYPE_LIRC_MODE2; 2406 break; 2407 case BPF_FLOW_DISSECTOR: 2408 ptype = BPF_PROG_TYPE_FLOW_DISSECTOR; 2409 break; 2410 case BPF_CGROUP_SYSCTL: 2411 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 2412 break; 2413 case BPF_CGROUP_GETSOCKOPT: 2414 case BPF_CGROUP_SETSOCKOPT: 2415 ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; 2416 break; 2417 default: 2418 return -EINVAL; 2419 } 2420 2421 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 2422 if (IS_ERR(prog)) 2423 return PTR_ERR(prog); 2424 2425 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 2426 bpf_prog_put(prog); 2427 return -EINVAL; 2428 } 2429 2430 switch (ptype) { 2431 case BPF_PROG_TYPE_SK_SKB: 2432 case BPF_PROG_TYPE_SK_MSG: 2433 ret = sock_map_get_from_fd(attr, prog); 2434 break; 2435 case BPF_PROG_TYPE_LIRC_MODE2: 2436 ret = lirc_prog_attach(attr, prog); 2437 break; 2438 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2439 ret = skb_flow_dissector_bpf_prog_attach(attr, prog); 2440 break; 2441 default: 2442 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 2443 } 2444 2445 if (ret) 2446 bpf_prog_put(prog); 2447 return ret; 2448 } 2449 2450 #define BPF_PROG_DETACH_LAST_FIELD attach_type 2451 2452 static int bpf_prog_detach(const union bpf_attr *attr) 2453 { 2454 enum bpf_prog_type ptype; 2455 2456 if (!capable(CAP_NET_ADMIN)) 2457 return -EPERM; 2458 2459 if (CHECK_ATTR(BPF_PROG_DETACH)) 2460 return -EINVAL; 2461 2462 switch (attr->attach_type) { 2463 case BPF_CGROUP_INET_INGRESS: 2464 case BPF_CGROUP_INET_EGRESS: 2465 ptype = BPF_PROG_TYPE_CGROUP_SKB; 2466 break; 2467 case BPF_CGROUP_INET_SOCK_CREATE: 2468 case BPF_CGROUP_INET4_POST_BIND: 2469 case BPF_CGROUP_INET6_POST_BIND: 2470 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 2471 break; 2472 case BPF_CGROUP_INET4_BIND: 2473 case BPF_CGROUP_INET6_BIND: 2474 case BPF_CGROUP_INET4_CONNECT: 2475 case BPF_CGROUP_INET6_CONNECT: 2476 case BPF_CGROUP_UDP4_SENDMSG: 2477 case BPF_CGROUP_UDP6_SENDMSG: 2478 case BPF_CGROUP_UDP4_RECVMSG: 2479 case BPF_CGROUP_UDP6_RECVMSG: 2480 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 2481 break; 2482 case BPF_CGROUP_SOCK_OPS: 2483 ptype = BPF_PROG_TYPE_SOCK_OPS; 2484 break; 2485 case BPF_CGROUP_DEVICE: 2486 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 2487 break; 2488 case BPF_SK_MSG_VERDICT: 2489 return sock_map_get_from_fd(attr, NULL); 2490 case BPF_SK_SKB_STREAM_PARSER: 2491 case BPF_SK_SKB_STREAM_VERDICT: 2492 return sock_map_get_from_fd(attr, NULL); 2493 case BPF_LIRC_MODE2: 2494 return lirc_prog_detach(attr); 2495 case BPF_FLOW_DISSECTOR: 2496 return skb_flow_dissector_bpf_prog_detach(attr); 2497 case BPF_CGROUP_SYSCTL: 2498 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 2499 break; 2500 case BPF_CGROUP_GETSOCKOPT: 2501 case BPF_CGROUP_SETSOCKOPT: 2502 ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT; 2503 break; 2504 default: 2505 return -EINVAL; 2506 } 2507 2508 return cgroup_bpf_prog_detach(attr, ptype); 2509 } 2510 2511 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 2512 2513 static int bpf_prog_query(const union bpf_attr *attr, 2514 union bpf_attr __user *uattr) 2515 { 2516 if (!capable(CAP_NET_ADMIN)) 2517 return -EPERM; 2518 if (CHECK_ATTR(BPF_PROG_QUERY)) 2519 return -EINVAL; 2520 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 2521 return -EINVAL; 2522 2523 switch (attr->query.attach_type) { 2524 case BPF_CGROUP_INET_INGRESS: 2525 case BPF_CGROUP_INET_EGRESS: 2526 case BPF_CGROUP_INET_SOCK_CREATE: 2527 case BPF_CGROUP_INET4_BIND: 2528 case BPF_CGROUP_INET6_BIND: 2529 case BPF_CGROUP_INET4_POST_BIND: 2530 case BPF_CGROUP_INET6_POST_BIND: 2531 case BPF_CGROUP_INET4_CONNECT: 2532 case BPF_CGROUP_INET6_CONNECT: 2533 case BPF_CGROUP_UDP4_SENDMSG: 2534 case BPF_CGROUP_UDP6_SENDMSG: 2535 case BPF_CGROUP_UDP4_RECVMSG: 2536 case BPF_CGROUP_UDP6_RECVMSG: 2537 case BPF_CGROUP_SOCK_OPS: 2538 case BPF_CGROUP_DEVICE: 2539 case BPF_CGROUP_SYSCTL: 2540 case BPF_CGROUP_GETSOCKOPT: 2541 case BPF_CGROUP_SETSOCKOPT: 2542 break; 2543 case BPF_LIRC_MODE2: 2544 return lirc_prog_query(attr, uattr); 2545 case BPF_FLOW_DISSECTOR: 2546 return skb_flow_dissector_prog_query(attr, uattr); 2547 default: 2548 return -EINVAL; 2549 } 2550 2551 return cgroup_bpf_prog_query(attr, uattr); 2552 } 2553 2554 #define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out 2555 2556 static int bpf_prog_test_run(const union bpf_attr *attr, 2557 union bpf_attr __user *uattr) 2558 { 2559 struct bpf_prog *prog; 2560 int ret = -ENOTSUPP; 2561 2562 if (!capable(CAP_SYS_ADMIN)) 2563 return -EPERM; 2564 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 2565 return -EINVAL; 2566 2567 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 2568 (!attr->test.ctx_size_in && attr->test.ctx_in)) 2569 return -EINVAL; 2570 2571 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 2572 (!attr->test.ctx_size_out && attr->test.ctx_out)) 2573 return -EINVAL; 2574 2575 prog = bpf_prog_get(attr->test.prog_fd); 2576 if (IS_ERR(prog)) 2577 return PTR_ERR(prog); 2578 2579 if (prog->aux->ops->test_run) 2580 ret = prog->aux->ops->test_run(prog, attr, uattr); 2581 2582 bpf_prog_put(prog); 2583 return ret; 2584 } 2585 2586 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 2587 2588 static int bpf_obj_get_next_id(const union bpf_attr *attr, 2589 union bpf_attr __user *uattr, 2590 struct idr *idr, 2591 spinlock_t *lock) 2592 { 2593 u32 next_id = attr->start_id; 2594 int err = 0; 2595 2596 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 2597 return -EINVAL; 2598 2599 if (!capable(CAP_SYS_ADMIN)) 2600 return -EPERM; 2601 2602 next_id++; 2603 spin_lock_bh(lock); 2604 if (!idr_get_next(idr, &next_id)) 2605 err = -ENOENT; 2606 spin_unlock_bh(lock); 2607 2608 if (!err) 2609 err = put_user(next_id, &uattr->next_id); 2610 2611 return err; 2612 } 2613 2614 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 2615 2616 struct bpf_prog *bpf_prog_by_id(u32 id) 2617 { 2618 struct bpf_prog *prog; 2619 2620 if (!id) 2621 return ERR_PTR(-ENOENT); 2622 2623 spin_lock_bh(&prog_idr_lock); 2624 prog = idr_find(&prog_idr, id); 2625 if (prog) 2626 prog = bpf_prog_inc_not_zero(prog); 2627 else 2628 prog = ERR_PTR(-ENOENT); 2629 spin_unlock_bh(&prog_idr_lock); 2630 return prog; 2631 } 2632 2633 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 2634 { 2635 struct bpf_prog *prog; 2636 u32 id = attr->prog_id; 2637 int fd; 2638 2639 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 2640 return -EINVAL; 2641 2642 if (!capable(CAP_SYS_ADMIN)) 2643 return -EPERM; 2644 2645 prog = bpf_prog_by_id(id); 2646 if (IS_ERR(prog)) 2647 return PTR_ERR(prog); 2648 2649 fd = bpf_prog_new_fd(prog); 2650 if (fd < 0) 2651 bpf_prog_put(prog); 2652 2653 return fd; 2654 } 2655 2656 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 2657 2658 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 2659 { 2660 struct bpf_map *map; 2661 u32 id = attr->map_id; 2662 int f_flags; 2663 int fd; 2664 2665 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 2666 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 2667 return -EINVAL; 2668 2669 if (!capable(CAP_SYS_ADMIN)) 2670 return -EPERM; 2671 2672 f_flags = bpf_get_file_flag(attr->open_flags); 2673 if (f_flags < 0) 2674 return f_flags; 2675 2676 spin_lock_bh(&map_idr_lock); 2677 map = idr_find(&map_idr, id); 2678 if (map) 2679 map = __bpf_map_inc_not_zero(map, true); 2680 else 2681 map = ERR_PTR(-ENOENT); 2682 spin_unlock_bh(&map_idr_lock); 2683 2684 if (IS_ERR(map)) 2685 return PTR_ERR(map); 2686 2687 fd = bpf_map_new_fd(map, f_flags); 2688 if (fd < 0) 2689 bpf_map_put_with_uref(map); 2690 2691 return fd; 2692 } 2693 2694 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 2695 unsigned long addr, u32 *off, 2696 u32 *type) 2697 { 2698 const struct bpf_map *map; 2699 int i; 2700 2701 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 2702 map = prog->aux->used_maps[i]; 2703 if (map == (void *)addr) { 2704 *type = BPF_PSEUDO_MAP_FD; 2705 return map; 2706 } 2707 if (!map->ops->map_direct_value_meta) 2708 continue; 2709 if (!map->ops->map_direct_value_meta(map, addr, off)) { 2710 *type = BPF_PSEUDO_MAP_VALUE; 2711 return map; 2712 } 2713 } 2714 2715 return NULL; 2716 } 2717 2718 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) 2719 { 2720 const struct bpf_map *map; 2721 struct bpf_insn *insns; 2722 u32 off, type; 2723 u64 imm; 2724 int i; 2725 2726 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 2727 GFP_USER); 2728 if (!insns) 2729 return insns; 2730 2731 for (i = 0; i < prog->len; i++) { 2732 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { 2733 insns[i].code = BPF_JMP | BPF_CALL; 2734 insns[i].imm = BPF_FUNC_tail_call; 2735 /* fall-through */ 2736 } 2737 if (insns[i].code == (BPF_JMP | BPF_CALL) || 2738 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { 2739 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) 2740 insns[i].code = BPF_JMP | BPF_CALL; 2741 if (!bpf_dump_raw_ok()) 2742 insns[i].imm = 0; 2743 continue; 2744 } 2745 2746 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) 2747 continue; 2748 2749 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 2750 map = bpf_map_from_imm(prog, imm, &off, &type); 2751 if (map) { 2752 insns[i].src_reg = type; 2753 insns[i].imm = map->id; 2754 insns[i + 1].imm = off; 2755 continue; 2756 } 2757 } 2758 2759 return insns; 2760 } 2761 2762 static int set_info_rec_size(struct bpf_prog_info *info) 2763 { 2764 /* 2765 * Ensure info.*_rec_size is the same as kernel expected size 2766 * 2767 * or 2768 * 2769 * Only allow zero *_rec_size if both _rec_size and _cnt are 2770 * zero. In this case, the kernel will set the expected 2771 * _rec_size back to the info. 2772 */ 2773 2774 if ((info->nr_func_info || info->func_info_rec_size) && 2775 info->func_info_rec_size != sizeof(struct bpf_func_info)) 2776 return -EINVAL; 2777 2778 if ((info->nr_line_info || info->line_info_rec_size) && 2779 info->line_info_rec_size != sizeof(struct bpf_line_info)) 2780 return -EINVAL; 2781 2782 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 2783 info->jited_line_info_rec_size != sizeof(__u64)) 2784 return -EINVAL; 2785 2786 info->func_info_rec_size = sizeof(struct bpf_func_info); 2787 info->line_info_rec_size = sizeof(struct bpf_line_info); 2788 info->jited_line_info_rec_size = sizeof(__u64); 2789 2790 return 0; 2791 } 2792 2793 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 2794 const union bpf_attr *attr, 2795 union bpf_attr __user *uattr) 2796 { 2797 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2798 struct bpf_prog_info info; 2799 u32 info_len = attr->info.info_len; 2800 struct bpf_prog_stats stats; 2801 char __user *uinsns; 2802 u32 ulen; 2803 int err; 2804 2805 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 2806 if (err) 2807 return err; 2808 info_len = min_t(u32, sizeof(info), info_len); 2809 2810 memset(&info, 0, sizeof(info)); 2811 if (copy_from_user(&info, uinfo, info_len)) 2812 return -EFAULT; 2813 2814 info.type = prog->type; 2815 info.id = prog->aux->id; 2816 info.load_time = prog->aux->load_time; 2817 info.created_by_uid = from_kuid_munged(current_user_ns(), 2818 prog->aux->user->uid); 2819 info.gpl_compatible = prog->gpl_compatible; 2820 2821 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 2822 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 2823 2824 ulen = info.nr_map_ids; 2825 info.nr_map_ids = prog->aux->used_map_cnt; 2826 ulen = min_t(u32, info.nr_map_ids, ulen); 2827 if (ulen) { 2828 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 2829 u32 i; 2830 2831 for (i = 0; i < ulen; i++) 2832 if (put_user(prog->aux->used_maps[i]->id, 2833 &user_map_ids[i])) 2834 return -EFAULT; 2835 } 2836 2837 err = set_info_rec_size(&info); 2838 if (err) 2839 return err; 2840 2841 bpf_prog_get_stats(prog, &stats); 2842 info.run_time_ns = stats.nsecs; 2843 info.run_cnt = stats.cnt; 2844 2845 if (!capable(CAP_SYS_ADMIN)) { 2846 info.jited_prog_len = 0; 2847 info.xlated_prog_len = 0; 2848 info.nr_jited_ksyms = 0; 2849 info.nr_jited_func_lens = 0; 2850 info.nr_func_info = 0; 2851 info.nr_line_info = 0; 2852 info.nr_jited_line_info = 0; 2853 goto done; 2854 } 2855 2856 ulen = info.xlated_prog_len; 2857 info.xlated_prog_len = bpf_prog_insn_size(prog); 2858 if (info.xlated_prog_len && ulen) { 2859 struct bpf_insn *insns_sanitized; 2860 bool fault; 2861 2862 if (prog->blinded && !bpf_dump_raw_ok()) { 2863 info.xlated_prog_insns = 0; 2864 goto done; 2865 } 2866 insns_sanitized = bpf_insn_prepare_dump(prog); 2867 if (!insns_sanitized) 2868 return -ENOMEM; 2869 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 2870 ulen = min_t(u32, info.xlated_prog_len, ulen); 2871 fault = copy_to_user(uinsns, insns_sanitized, ulen); 2872 kfree(insns_sanitized); 2873 if (fault) 2874 return -EFAULT; 2875 } 2876 2877 if (bpf_prog_is_dev_bound(prog->aux)) { 2878 err = bpf_prog_offload_info_fill(&info, prog); 2879 if (err) 2880 return err; 2881 goto done; 2882 } 2883 2884 /* NOTE: the following code is supposed to be skipped for offload. 2885 * bpf_prog_offload_info_fill() is the place to fill similar fields 2886 * for offload. 2887 */ 2888 ulen = info.jited_prog_len; 2889 if (prog->aux->func_cnt) { 2890 u32 i; 2891 2892 info.jited_prog_len = 0; 2893 for (i = 0; i < prog->aux->func_cnt; i++) 2894 info.jited_prog_len += prog->aux->func[i]->jited_len; 2895 } else { 2896 info.jited_prog_len = prog->jited_len; 2897 } 2898 2899 if (info.jited_prog_len && ulen) { 2900 if (bpf_dump_raw_ok()) { 2901 uinsns = u64_to_user_ptr(info.jited_prog_insns); 2902 ulen = min_t(u32, info.jited_prog_len, ulen); 2903 2904 /* for multi-function programs, copy the JITed 2905 * instructions for all the functions 2906 */ 2907 if (prog->aux->func_cnt) { 2908 u32 len, free, i; 2909 u8 *img; 2910 2911 free = ulen; 2912 for (i = 0; i < prog->aux->func_cnt; i++) { 2913 len = prog->aux->func[i]->jited_len; 2914 len = min_t(u32, len, free); 2915 img = (u8 *) prog->aux->func[i]->bpf_func; 2916 if (copy_to_user(uinsns, img, len)) 2917 return -EFAULT; 2918 uinsns += len; 2919 free -= len; 2920 if (!free) 2921 break; 2922 } 2923 } else { 2924 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 2925 return -EFAULT; 2926 } 2927 } else { 2928 info.jited_prog_insns = 0; 2929 } 2930 } 2931 2932 ulen = info.nr_jited_ksyms; 2933 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 2934 if (ulen) { 2935 if (bpf_dump_raw_ok()) { 2936 unsigned long ksym_addr; 2937 u64 __user *user_ksyms; 2938 u32 i; 2939 2940 /* copy the address of the kernel symbol 2941 * corresponding to each function 2942 */ 2943 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 2944 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 2945 if (prog->aux->func_cnt) { 2946 for (i = 0; i < ulen; i++) { 2947 ksym_addr = (unsigned long) 2948 prog->aux->func[i]->bpf_func; 2949 if (put_user((u64) ksym_addr, 2950 &user_ksyms[i])) 2951 return -EFAULT; 2952 } 2953 } else { 2954 ksym_addr = (unsigned long) prog->bpf_func; 2955 if (put_user((u64) ksym_addr, &user_ksyms[0])) 2956 return -EFAULT; 2957 } 2958 } else { 2959 info.jited_ksyms = 0; 2960 } 2961 } 2962 2963 ulen = info.nr_jited_func_lens; 2964 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 2965 if (ulen) { 2966 if (bpf_dump_raw_ok()) { 2967 u32 __user *user_lens; 2968 u32 func_len, i; 2969 2970 /* copy the JITed image lengths for each function */ 2971 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 2972 user_lens = u64_to_user_ptr(info.jited_func_lens); 2973 if (prog->aux->func_cnt) { 2974 for (i = 0; i < ulen; i++) { 2975 func_len = 2976 prog->aux->func[i]->jited_len; 2977 if (put_user(func_len, &user_lens[i])) 2978 return -EFAULT; 2979 } 2980 } else { 2981 func_len = prog->jited_len; 2982 if (put_user(func_len, &user_lens[0])) 2983 return -EFAULT; 2984 } 2985 } else { 2986 info.jited_func_lens = 0; 2987 } 2988 } 2989 2990 if (prog->aux->btf) 2991 info.btf_id = btf_id(prog->aux->btf); 2992 2993 ulen = info.nr_func_info; 2994 info.nr_func_info = prog->aux->func_info_cnt; 2995 if (info.nr_func_info && ulen) { 2996 char __user *user_finfo; 2997 2998 user_finfo = u64_to_user_ptr(info.func_info); 2999 ulen = min_t(u32, info.nr_func_info, ulen); 3000 if (copy_to_user(user_finfo, prog->aux->func_info, 3001 info.func_info_rec_size * ulen)) 3002 return -EFAULT; 3003 } 3004 3005 ulen = info.nr_line_info; 3006 info.nr_line_info = prog->aux->nr_linfo; 3007 if (info.nr_line_info && ulen) { 3008 __u8 __user *user_linfo; 3009 3010 user_linfo = u64_to_user_ptr(info.line_info); 3011 ulen = min_t(u32, info.nr_line_info, ulen); 3012 if (copy_to_user(user_linfo, prog->aux->linfo, 3013 info.line_info_rec_size * ulen)) 3014 return -EFAULT; 3015 } 3016 3017 ulen = info.nr_jited_line_info; 3018 if (prog->aux->jited_linfo) 3019 info.nr_jited_line_info = prog->aux->nr_linfo; 3020 else 3021 info.nr_jited_line_info = 0; 3022 if (info.nr_jited_line_info && ulen) { 3023 if (bpf_dump_raw_ok()) { 3024 __u64 __user *user_linfo; 3025 u32 i; 3026 3027 user_linfo = u64_to_user_ptr(info.jited_line_info); 3028 ulen = min_t(u32, info.nr_jited_line_info, ulen); 3029 for (i = 0; i < ulen; i++) { 3030 if (put_user((__u64)(long)prog->aux->jited_linfo[i], 3031 &user_linfo[i])) 3032 return -EFAULT; 3033 } 3034 } else { 3035 info.jited_line_info = 0; 3036 } 3037 } 3038 3039 ulen = info.nr_prog_tags; 3040 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 3041 if (ulen) { 3042 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 3043 u32 i; 3044 3045 user_prog_tags = u64_to_user_ptr(info.prog_tags); 3046 ulen = min_t(u32, info.nr_prog_tags, ulen); 3047 if (prog->aux->func_cnt) { 3048 for (i = 0; i < ulen; i++) { 3049 if (copy_to_user(user_prog_tags[i], 3050 prog->aux->func[i]->tag, 3051 BPF_TAG_SIZE)) 3052 return -EFAULT; 3053 } 3054 } else { 3055 if (copy_to_user(user_prog_tags[0], 3056 prog->tag, BPF_TAG_SIZE)) 3057 return -EFAULT; 3058 } 3059 } 3060 3061 done: 3062 if (copy_to_user(uinfo, &info, info_len) || 3063 put_user(info_len, &uattr->info.info_len)) 3064 return -EFAULT; 3065 3066 return 0; 3067 } 3068 3069 static int bpf_map_get_info_by_fd(struct bpf_map *map, 3070 const union bpf_attr *attr, 3071 union bpf_attr __user *uattr) 3072 { 3073 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3074 struct bpf_map_info info; 3075 u32 info_len = attr->info.info_len; 3076 int err; 3077 3078 err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); 3079 if (err) 3080 return err; 3081 info_len = min_t(u32, sizeof(info), info_len); 3082 3083 memset(&info, 0, sizeof(info)); 3084 info.type = map->map_type; 3085 info.id = map->id; 3086 info.key_size = map->key_size; 3087 info.value_size = map->value_size; 3088 info.max_entries = map->max_entries; 3089 info.map_flags = map->map_flags; 3090 memcpy(info.name, map->name, sizeof(map->name)); 3091 3092 if (map->btf) { 3093 info.btf_id = btf_id(map->btf); 3094 info.btf_key_type_id = map->btf_key_type_id; 3095 info.btf_value_type_id = map->btf_value_type_id; 3096 } 3097 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 3098 3099 if (bpf_map_is_dev_bound(map)) { 3100 err = bpf_map_offload_info_fill(&info, map); 3101 if (err) 3102 return err; 3103 } 3104 3105 if (copy_to_user(uinfo, &info, info_len) || 3106 put_user(info_len, &uattr->info.info_len)) 3107 return -EFAULT; 3108 3109 return 0; 3110 } 3111 3112 static int bpf_btf_get_info_by_fd(struct btf *btf, 3113 const union bpf_attr *attr, 3114 union bpf_attr __user *uattr) 3115 { 3116 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3117 u32 info_len = attr->info.info_len; 3118 int err; 3119 3120 err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); 3121 if (err) 3122 return err; 3123 3124 return btf_get_info_by_fd(btf, attr, uattr); 3125 } 3126 3127 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 3128 3129 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 3130 union bpf_attr __user *uattr) 3131 { 3132 int ufd = attr->info.bpf_fd; 3133 struct fd f; 3134 int err; 3135 3136 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 3137 return -EINVAL; 3138 3139 f = fdget(ufd); 3140 if (!f.file) 3141 return -EBADFD; 3142 3143 if (f.file->f_op == &bpf_prog_fops) 3144 err = bpf_prog_get_info_by_fd(f.file->private_data, attr, 3145 uattr); 3146 else if (f.file->f_op == &bpf_map_fops) 3147 err = bpf_map_get_info_by_fd(f.file->private_data, attr, 3148 uattr); 3149 else if (f.file->f_op == &btf_fops) 3150 err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); 3151 else 3152 err = -EINVAL; 3153 3154 fdput(f); 3155 return err; 3156 } 3157 3158 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 3159 3160 static int bpf_btf_load(const union bpf_attr *attr) 3161 { 3162 if (CHECK_ATTR(BPF_BTF_LOAD)) 3163 return -EINVAL; 3164 3165 if (!capable(CAP_SYS_ADMIN)) 3166 return -EPERM; 3167 3168 return btf_new_fd(attr); 3169 } 3170 3171 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 3172 3173 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 3174 { 3175 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 3176 return -EINVAL; 3177 3178 if (!capable(CAP_SYS_ADMIN)) 3179 return -EPERM; 3180 3181 return btf_get_fd_by_id(attr->btf_id); 3182 } 3183 3184 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 3185 union bpf_attr __user *uattr, 3186 u32 prog_id, u32 fd_type, 3187 const char *buf, u64 probe_offset, 3188 u64 probe_addr) 3189 { 3190 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 3191 u32 len = buf ? strlen(buf) : 0, input_len; 3192 int err = 0; 3193 3194 if (put_user(len, &uattr->task_fd_query.buf_len)) 3195 return -EFAULT; 3196 input_len = attr->task_fd_query.buf_len; 3197 if (input_len && ubuf) { 3198 if (!len) { 3199 /* nothing to copy, just make ubuf NULL terminated */ 3200 char zero = '\0'; 3201 3202 if (put_user(zero, ubuf)) 3203 return -EFAULT; 3204 } else if (input_len >= len + 1) { 3205 /* ubuf can hold the string with NULL terminator */ 3206 if (copy_to_user(ubuf, buf, len + 1)) 3207 return -EFAULT; 3208 } else { 3209 /* ubuf cannot hold the string with NULL terminator, 3210 * do a partial copy with NULL terminator. 3211 */ 3212 char zero = '\0'; 3213 3214 err = -ENOSPC; 3215 if (copy_to_user(ubuf, buf, input_len - 1)) 3216 return -EFAULT; 3217 if (put_user(zero, ubuf + input_len - 1)) 3218 return -EFAULT; 3219 } 3220 } 3221 3222 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 3223 put_user(fd_type, &uattr->task_fd_query.fd_type) || 3224 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 3225 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 3226 return -EFAULT; 3227 3228 return err; 3229 } 3230 3231 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 3232 3233 static int bpf_task_fd_query(const union bpf_attr *attr, 3234 union bpf_attr __user *uattr) 3235 { 3236 pid_t pid = attr->task_fd_query.pid; 3237 u32 fd = attr->task_fd_query.fd; 3238 const struct perf_event *event; 3239 struct files_struct *files; 3240 struct task_struct *task; 3241 struct file *file; 3242 int err; 3243 3244 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 3245 return -EINVAL; 3246 3247 if (!capable(CAP_SYS_ADMIN)) 3248 return -EPERM; 3249 3250 if (attr->task_fd_query.flags != 0) 3251 return -EINVAL; 3252 3253 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 3254 if (!task) 3255 return -ENOENT; 3256 3257 files = get_files_struct(task); 3258 put_task_struct(task); 3259 if (!files) 3260 return -ENOENT; 3261 3262 err = 0; 3263 spin_lock(&files->file_lock); 3264 file = fcheck_files(files, fd); 3265 if (!file) 3266 err = -EBADF; 3267 else 3268 get_file(file); 3269 spin_unlock(&files->file_lock); 3270 put_files_struct(files); 3271 3272 if (err) 3273 goto out; 3274 3275 if (file->f_op == &bpf_raw_tp_fops) { 3276 struct bpf_raw_tracepoint *raw_tp = file->private_data; 3277 struct bpf_raw_event_map *btp = raw_tp->btp; 3278 3279 err = bpf_task_fd_query_copy(attr, uattr, 3280 raw_tp->prog->aux->id, 3281 BPF_FD_TYPE_RAW_TRACEPOINT, 3282 btp->tp->name, 0, 0); 3283 goto put_file; 3284 } 3285 3286 event = perf_get_event(file); 3287 if (!IS_ERR(event)) { 3288 u64 probe_offset, probe_addr; 3289 u32 prog_id, fd_type; 3290 const char *buf; 3291 3292 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 3293 &buf, &probe_offset, 3294 &probe_addr); 3295 if (!err) 3296 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 3297 fd_type, buf, 3298 probe_offset, 3299 probe_addr); 3300 goto put_file; 3301 } 3302 3303 err = -ENOTSUPP; 3304 put_file: 3305 fput(file); 3306 out: 3307 return err; 3308 } 3309 3310 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 3311 3312 #define BPF_DO_BATCH(fn) \ 3313 do { \ 3314 if (!fn) { \ 3315 err = -ENOTSUPP; \ 3316 goto err_put; \ 3317 } \ 3318 err = fn(map, attr, uattr); \ 3319 } while (0) 3320 3321 static int bpf_map_do_batch(const union bpf_attr *attr, 3322 union bpf_attr __user *uattr, 3323 int cmd) 3324 { 3325 struct bpf_map *map; 3326 int err, ufd; 3327 struct fd f; 3328 3329 if (CHECK_ATTR(BPF_MAP_BATCH)) 3330 return -EINVAL; 3331 3332 ufd = attr->batch.map_fd; 3333 f = fdget(ufd); 3334 map = __bpf_map_get(f); 3335 if (IS_ERR(map)) 3336 return PTR_ERR(map); 3337 3338 if ((cmd == BPF_MAP_LOOKUP_BATCH || 3339 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) && 3340 !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 3341 err = -EPERM; 3342 goto err_put; 3343 } 3344 3345 if (cmd != BPF_MAP_LOOKUP_BATCH && 3346 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 3347 err = -EPERM; 3348 goto err_put; 3349 } 3350 3351 if (cmd == BPF_MAP_LOOKUP_BATCH) 3352 BPF_DO_BATCH(map->ops->map_lookup_batch); 3353 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 3354 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); 3355 else if (cmd == BPF_MAP_UPDATE_BATCH) 3356 BPF_DO_BATCH(map->ops->map_update_batch); 3357 else 3358 BPF_DO_BATCH(map->ops->map_delete_batch); 3359 3360 err_put: 3361 fdput(f); 3362 return err; 3363 } 3364 3365 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 3366 { 3367 union bpf_attr attr; 3368 int err; 3369 3370 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) 3371 return -EPERM; 3372 3373 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 3374 if (err) 3375 return err; 3376 size = min_t(u32, size, sizeof(attr)); 3377 3378 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 3379 memset(&attr, 0, sizeof(attr)); 3380 if (copy_from_user(&attr, uattr, size) != 0) 3381 return -EFAULT; 3382 3383 err = security_bpf(cmd, &attr, size); 3384 if (err < 0) 3385 return err; 3386 3387 switch (cmd) { 3388 case BPF_MAP_CREATE: 3389 err = map_create(&attr); 3390 break; 3391 case BPF_MAP_LOOKUP_ELEM: 3392 err = map_lookup_elem(&attr); 3393 break; 3394 case BPF_MAP_UPDATE_ELEM: 3395 err = map_update_elem(&attr); 3396 break; 3397 case BPF_MAP_DELETE_ELEM: 3398 err = map_delete_elem(&attr); 3399 break; 3400 case BPF_MAP_GET_NEXT_KEY: 3401 err = map_get_next_key(&attr); 3402 break; 3403 case BPF_MAP_FREEZE: 3404 err = map_freeze(&attr); 3405 break; 3406 case BPF_PROG_LOAD: 3407 err = bpf_prog_load(&attr, uattr); 3408 break; 3409 case BPF_OBJ_PIN: 3410 err = bpf_obj_pin(&attr); 3411 break; 3412 case BPF_OBJ_GET: 3413 err = bpf_obj_get(&attr); 3414 break; 3415 case BPF_PROG_ATTACH: 3416 err = bpf_prog_attach(&attr); 3417 break; 3418 case BPF_PROG_DETACH: 3419 err = bpf_prog_detach(&attr); 3420 break; 3421 case BPF_PROG_QUERY: 3422 err = bpf_prog_query(&attr, uattr); 3423 break; 3424 case BPF_PROG_TEST_RUN: 3425 err = bpf_prog_test_run(&attr, uattr); 3426 break; 3427 case BPF_PROG_GET_NEXT_ID: 3428 err = bpf_obj_get_next_id(&attr, uattr, 3429 &prog_idr, &prog_idr_lock); 3430 break; 3431 case BPF_MAP_GET_NEXT_ID: 3432 err = bpf_obj_get_next_id(&attr, uattr, 3433 &map_idr, &map_idr_lock); 3434 break; 3435 case BPF_BTF_GET_NEXT_ID: 3436 err = bpf_obj_get_next_id(&attr, uattr, 3437 &btf_idr, &btf_idr_lock); 3438 break; 3439 case BPF_PROG_GET_FD_BY_ID: 3440 err = bpf_prog_get_fd_by_id(&attr); 3441 break; 3442 case BPF_MAP_GET_FD_BY_ID: 3443 err = bpf_map_get_fd_by_id(&attr); 3444 break; 3445 case BPF_OBJ_GET_INFO_BY_FD: 3446 err = bpf_obj_get_info_by_fd(&attr, uattr); 3447 break; 3448 case BPF_RAW_TRACEPOINT_OPEN: 3449 err = bpf_raw_tracepoint_open(&attr); 3450 break; 3451 case BPF_BTF_LOAD: 3452 err = bpf_btf_load(&attr); 3453 break; 3454 case BPF_BTF_GET_FD_BY_ID: 3455 err = bpf_btf_get_fd_by_id(&attr); 3456 break; 3457 case BPF_TASK_FD_QUERY: 3458 err = bpf_task_fd_query(&attr, uattr); 3459 break; 3460 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 3461 err = map_lookup_and_delete_elem(&attr); 3462 break; 3463 case BPF_MAP_LOOKUP_BATCH: 3464 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH); 3465 break; 3466 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 3467 err = bpf_map_do_batch(&attr, uattr, 3468 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 3469 break; 3470 case BPF_MAP_UPDATE_BATCH: 3471 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH); 3472 break; 3473 case BPF_MAP_DELETE_BATCH: 3474 err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH); 3475 break; 3476 default: 3477 err = -EINVAL; 3478 break; 3479 } 3480 3481 return err; 3482 } 3483