1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, but 8 * WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 10 * General Public License for more details. 11 */ 12 #include <linux/bpf.h> 13 #include <linux/bpf_trace.h> 14 #include <linux/syscalls.h> 15 #include <linux/slab.h> 16 #include <linux/sched/signal.h> 17 #include <linux/vmalloc.h> 18 #include <linux/mmzone.h> 19 #include <linux/anon_inodes.h> 20 #include <linux/file.h> 21 #include <linux/license.h> 22 #include <linux/filter.h> 23 #include <linux/version.h> 24 #include <linux/kernel.h> 25 #include <linux/idr.h> 26 #include <linux/cred.h> 27 #include <linux/timekeeping.h> 28 #include <linux/ctype.h> 29 #include <linux/nospec.h> 30 31 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ 32 (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 33 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 34 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 35 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 36 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) 37 38 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 39 40 DEFINE_PER_CPU(int, bpf_prog_active); 41 static DEFINE_IDR(prog_idr); 42 static DEFINE_SPINLOCK(prog_idr_lock); 43 static DEFINE_IDR(map_idr); 44 static DEFINE_SPINLOCK(map_idr_lock); 45 46 int sysctl_unprivileged_bpf_disabled __read_mostly; 47 48 static const struct bpf_map_ops * const bpf_map_types[] = { 49 #define BPF_PROG_TYPE(_id, _ops) 50 #define BPF_MAP_TYPE(_id, _ops) \ 51 [_id] = &_ops, 52 #include <linux/bpf_types.h> 53 #undef BPF_PROG_TYPE 54 #undef BPF_MAP_TYPE 55 }; 56 57 /* 58 * If we're handed a bigger struct than we know of, ensure all the unknown bits 59 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 60 * we don't know about yet. 61 * 62 * There is a ToCToU between this function call and the following 63 * copy_from_user() call. However, this is not a concern since this function is 64 * meant to be a future-proofing of bits. 65 */ 66 static int check_uarg_tail_zero(void __user *uaddr, 67 size_t expected_size, 68 size_t actual_size) 69 { 70 unsigned char __user *addr; 71 unsigned char __user *end; 72 unsigned char val; 73 int err; 74 75 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 76 return -E2BIG; 77 78 if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size))) 79 return -EFAULT; 80 81 if (actual_size <= expected_size) 82 return 0; 83 84 addr = uaddr + expected_size; 85 end = uaddr + actual_size; 86 87 for (; addr < end; addr++) { 88 err = get_user(val, addr); 89 if (err) 90 return err; 91 if (val) 92 return -E2BIG; 93 } 94 95 return 0; 96 } 97 98 const struct bpf_map_ops bpf_map_offload_ops = { 99 .map_alloc = bpf_map_offload_map_alloc, 100 .map_free = bpf_map_offload_map_free, 101 }; 102 103 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 104 { 105 const struct bpf_map_ops *ops; 106 u32 type = attr->map_type; 107 struct bpf_map *map; 108 int err; 109 110 if (type >= ARRAY_SIZE(bpf_map_types)) 111 return ERR_PTR(-EINVAL); 112 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 113 ops = bpf_map_types[type]; 114 if (!ops) 115 return ERR_PTR(-EINVAL); 116 117 if (ops->map_alloc_check) { 118 err = ops->map_alloc_check(attr); 119 if (err) 120 return ERR_PTR(err); 121 } 122 if (attr->map_ifindex) 123 ops = &bpf_map_offload_ops; 124 map = ops->map_alloc(attr); 125 if (IS_ERR(map)) 126 return map; 127 map->ops = ops; 128 map->map_type = type; 129 return map; 130 } 131 132 void *bpf_map_area_alloc(size_t size, int numa_node) 133 { 134 /* We definitely need __GFP_NORETRY, so OOM killer doesn't 135 * trigger under memory pressure as we really just want to 136 * fail instead. 137 */ 138 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; 139 void *area; 140 141 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 142 area = kmalloc_node(size, GFP_USER | flags, numa_node); 143 if (area != NULL) 144 return area; 145 } 146 147 return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, 148 __builtin_return_address(0)); 149 } 150 151 void bpf_map_area_free(void *area) 152 { 153 kvfree(area); 154 } 155 156 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 157 { 158 map->map_type = attr->map_type; 159 map->key_size = attr->key_size; 160 map->value_size = attr->value_size; 161 map->max_entries = attr->max_entries; 162 map->map_flags = attr->map_flags; 163 map->numa_node = bpf_map_attr_numa_node(attr); 164 } 165 166 int bpf_map_precharge_memlock(u32 pages) 167 { 168 struct user_struct *user = get_current_user(); 169 unsigned long memlock_limit, cur; 170 171 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 172 cur = atomic_long_read(&user->locked_vm); 173 free_uid(user); 174 if (cur + pages > memlock_limit) 175 return -EPERM; 176 return 0; 177 } 178 179 static int bpf_map_charge_memlock(struct bpf_map *map) 180 { 181 struct user_struct *user = get_current_user(); 182 unsigned long memlock_limit; 183 184 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 185 186 atomic_long_add(map->pages, &user->locked_vm); 187 188 if (atomic_long_read(&user->locked_vm) > memlock_limit) { 189 atomic_long_sub(map->pages, &user->locked_vm); 190 free_uid(user); 191 return -EPERM; 192 } 193 map->user = user; 194 return 0; 195 } 196 197 static void bpf_map_uncharge_memlock(struct bpf_map *map) 198 { 199 struct user_struct *user = map->user; 200 201 atomic_long_sub(map->pages, &user->locked_vm); 202 free_uid(user); 203 } 204 205 static int bpf_map_alloc_id(struct bpf_map *map) 206 { 207 int id; 208 209 idr_preload(GFP_KERNEL); 210 spin_lock_bh(&map_idr_lock); 211 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 212 if (id > 0) 213 map->id = id; 214 spin_unlock_bh(&map_idr_lock); 215 idr_preload_end(); 216 217 if (WARN_ON_ONCE(!id)) 218 return -ENOSPC; 219 220 return id > 0 ? 0 : id; 221 } 222 223 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 224 { 225 unsigned long flags; 226 227 /* Offloaded maps are removed from the IDR store when their device 228 * disappears - even if someone holds an fd to them they are unusable, 229 * the memory is gone, all ops will fail; they are simply waiting for 230 * refcnt to drop to be freed. 231 */ 232 if (!map->id) 233 return; 234 235 if (do_idr_lock) 236 spin_lock_irqsave(&map_idr_lock, flags); 237 else 238 __acquire(&map_idr_lock); 239 240 idr_remove(&map_idr, map->id); 241 map->id = 0; 242 243 if (do_idr_lock) 244 spin_unlock_irqrestore(&map_idr_lock, flags); 245 else 246 __release(&map_idr_lock); 247 } 248 249 /* called from workqueue */ 250 static void bpf_map_free_deferred(struct work_struct *work) 251 { 252 struct bpf_map *map = container_of(work, struct bpf_map, work); 253 254 bpf_map_uncharge_memlock(map); 255 security_bpf_map_free(map); 256 /* implementation dependent freeing */ 257 map->ops->map_free(map); 258 } 259 260 static void bpf_map_put_uref(struct bpf_map *map) 261 { 262 if (atomic_dec_and_test(&map->usercnt)) { 263 if (map->ops->map_release_uref) 264 map->ops->map_release_uref(map); 265 } 266 } 267 268 /* decrement map refcnt and schedule it for freeing via workqueue 269 * (unrelying map implementation ops->map_free() might sleep) 270 */ 271 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 272 { 273 if (atomic_dec_and_test(&map->refcnt)) { 274 /* bpf_map_free_id() must be called first */ 275 bpf_map_free_id(map, do_idr_lock); 276 INIT_WORK(&map->work, bpf_map_free_deferred); 277 schedule_work(&map->work); 278 } 279 } 280 281 void bpf_map_put(struct bpf_map *map) 282 { 283 __bpf_map_put(map, true); 284 } 285 286 void bpf_map_put_with_uref(struct bpf_map *map) 287 { 288 bpf_map_put_uref(map); 289 bpf_map_put(map); 290 } 291 292 static int bpf_map_release(struct inode *inode, struct file *filp) 293 { 294 struct bpf_map *map = filp->private_data; 295 296 if (map->ops->map_release) 297 map->ops->map_release(map, filp); 298 299 bpf_map_put_with_uref(map); 300 return 0; 301 } 302 303 #ifdef CONFIG_PROC_FS 304 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 305 { 306 const struct bpf_map *map = filp->private_data; 307 const struct bpf_array *array; 308 u32 owner_prog_type = 0; 309 u32 owner_jited = 0; 310 311 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { 312 array = container_of(map, struct bpf_array, map); 313 owner_prog_type = array->owner_prog_type; 314 owner_jited = array->owner_jited; 315 } 316 317 seq_printf(m, 318 "map_type:\t%u\n" 319 "key_size:\t%u\n" 320 "value_size:\t%u\n" 321 "max_entries:\t%u\n" 322 "map_flags:\t%#x\n" 323 "memlock:\t%llu\n", 324 map->map_type, 325 map->key_size, 326 map->value_size, 327 map->max_entries, 328 map->map_flags, 329 map->pages * 1ULL << PAGE_SHIFT); 330 331 if (owner_prog_type) { 332 seq_printf(m, "owner_prog_type:\t%u\n", 333 owner_prog_type); 334 seq_printf(m, "owner_jited:\t%u\n", 335 owner_jited); 336 } 337 } 338 #endif 339 340 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 341 loff_t *ppos) 342 { 343 /* We need this handler such that alloc_file() enables 344 * f_mode with FMODE_CAN_READ. 345 */ 346 return -EINVAL; 347 } 348 349 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 350 size_t siz, loff_t *ppos) 351 { 352 /* We need this handler such that alloc_file() enables 353 * f_mode with FMODE_CAN_WRITE. 354 */ 355 return -EINVAL; 356 } 357 358 const struct file_operations bpf_map_fops = { 359 #ifdef CONFIG_PROC_FS 360 .show_fdinfo = bpf_map_show_fdinfo, 361 #endif 362 .release = bpf_map_release, 363 .read = bpf_dummy_read, 364 .write = bpf_dummy_write, 365 }; 366 367 int bpf_map_new_fd(struct bpf_map *map, int flags) 368 { 369 int ret; 370 371 ret = security_bpf_map(map, OPEN_FMODE(flags)); 372 if (ret < 0) 373 return ret; 374 375 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 376 flags | O_CLOEXEC); 377 } 378 379 int bpf_get_file_flag(int flags) 380 { 381 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 382 return -EINVAL; 383 if (flags & BPF_F_RDONLY) 384 return O_RDONLY; 385 if (flags & BPF_F_WRONLY) 386 return O_WRONLY; 387 return O_RDWR; 388 } 389 390 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 391 #define CHECK_ATTR(CMD) \ 392 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 393 sizeof(attr->CMD##_LAST_FIELD), 0, \ 394 sizeof(*attr) - \ 395 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 396 sizeof(attr->CMD##_LAST_FIELD)) != NULL 397 398 /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. 399 * Return 0 on success and < 0 on error. 400 */ 401 static int bpf_obj_name_cpy(char *dst, const char *src) 402 { 403 const char *end = src + BPF_OBJ_NAME_LEN; 404 405 memset(dst, 0, BPF_OBJ_NAME_LEN); 406 407 /* Copy all isalnum() and '_' char */ 408 while (src < end && *src) { 409 if (!isalnum(*src) && *src != '_') 410 return -EINVAL; 411 *dst++ = *src++; 412 } 413 414 /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ 415 if (src == end) 416 return -EINVAL; 417 418 return 0; 419 } 420 421 #define BPF_MAP_CREATE_LAST_FIELD map_ifindex 422 /* called via syscall */ 423 static int map_create(union bpf_attr *attr) 424 { 425 int numa_node = bpf_map_attr_numa_node(attr); 426 struct bpf_map *map; 427 int f_flags; 428 int err; 429 430 err = CHECK_ATTR(BPF_MAP_CREATE); 431 if (err) 432 return -EINVAL; 433 434 f_flags = bpf_get_file_flag(attr->map_flags); 435 if (f_flags < 0) 436 return f_flags; 437 438 if (numa_node != NUMA_NO_NODE && 439 ((unsigned int)numa_node >= nr_node_ids || 440 !node_online(numa_node))) 441 return -EINVAL; 442 443 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 444 map = find_and_alloc_map(attr); 445 if (IS_ERR(map)) 446 return PTR_ERR(map); 447 448 err = bpf_obj_name_cpy(map->name, attr->map_name); 449 if (err) 450 goto free_map_nouncharge; 451 452 atomic_set(&map->refcnt, 1); 453 atomic_set(&map->usercnt, 1); 454 455 err = security_bpf_map_alloc(map); 456 if (err) 457 goto free_map_nouncharge; 458 459 err = bpf_map_charge_memlock(map); 460 if (err) 461 goto free_map_sec; 462 463 err = bpf_map_alloc_id(map); 464 if (err) 465 goto free_map; 466 467 err = bpf_map_new_fd(map, f_flags); 468 if (err < 0) { 469 /* failed to allocate fd. 470 * bpf_map_put() is needed because the above 471 * bpf_map_alloc_id() has published the map 472 * to the userspace and the userspace may 473 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 474 */ 475 bpf_map_put(map); 476 return err; 477 } 478 479 trace_bpf_map_create(map, err); 480 return err; 481 482 free_map: 483 bpf_map_uncharge_memlock(map); 484 free_map_sec: 485 security_bpf_map_free(map); 486 free_map_nouncharge: 487 map->ops->map_free(map); 488 return err; 489 } 490 491 /* if error is returned, fd is released. 492 * On success caller should complete fd access with matching fdput() 493 */ 494 struct bpf_map *__bpf_map_get(struct fd f) 495 { 496 if (!f.file) 497 return ERR_PTR(-EBADF); 498 if (f.file->f_op != &bpf_map_fops) { 499 fdput(f); 500 return ERR_PTR(-EINVAL); 501 } 502 503 return f.file->private_data; 504 } 505 506 /* prog's and map's refcnt limit */ 507 #define BPF_MAX_REFCNT 32768 508 509 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) 510 { 511 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { 512 atomic_dec(&map->refcnt); 513 return ERR_PTR(-EBUSY); 514 } 515 if (uref) 516 atomic_inc(&map->usercnt); 517 return map; 518 } 519 520 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 521 { 522 struct fd f = fdget(ufd); 523 struct bpf_map *map; 524 525 map = __bpf_map_get(f); 526 if (IS_ERR(map)) 527 return map; 528 529 map = bpf_map_inc(map, true); 530 fdput(f); 531 532 return map; 533 } 534 535 /* map_idr_lock should have been held */ 536 static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, 537 bool uref) 538 { 539 int refold; 540 541 refold = __atomic_add_unless(&map->refcnt, 1, 0); 542 543 if (refold >= BPF_MAX_REFCNT) { 544 __bpf_map_put(map, false); 545 return ERR_PTR(-EBUSY); 546 } 547 548 if (!refold) 549 return ERR_PTR(-ENOENT); 550 551 if (uref) 552 atomic_inc(&map->usercnt); 553 554 return map; 555 } 556 557 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 558 { 559 return -ENOTSUPP; 560 } 561 562 /* last field in 'union bpf_attr' used by this command */ 563 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value 564 565 static int map_lookup_elem(union bpf_attr *attr) 566 { 567 void __user *ukey = u64_to_user_ptr(attr->key); 568 void __user *uvalue = u64_to_user_ptr(attr->value); 569 int ufd = attr->map_fd; 570 struct bpf_map *map; 571 void *key, *value, *ptr; 572 u32 value_size; 573 struct fd f; 574 int err; 575 576 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 577 return -EINVAL; 578 579 f = fdget(ufd); 580 map = __bpf_map_get(f); 581 if (IS_ERR(map)) 582 return PTR_ERR(map); 583 584 if (!(f.file->f_mode & FMODE_CAN_READ)) { 585 err = -EPERM; 586 goto err_put; 587 } 588 589 key = memdup_user(ukey, map->key_size); 590 if (IS_ERR(key)) { 591 err = PTR_ERR(key); 592 goto err_put; 593 } 594 595 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 596 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 597 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 598 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 599 else if (IS_FD_MAP(map)) 600 value_size = sizeof(u32); 601 else 602 value_size = map->value_size; 603 604 err = -ENOMEM; 605 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 606 if (!value) 607 goto free_key; 608 609 if (bpf_map_is_dev_bound(map)) { 610 err = bpf_map_offload_lookup_elem(map, key, value); 611 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 612 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 613 err = bpf_percpu_hash_copy(map, key, value); 614 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 615 err = bpf_percpu_array_copy(map, key, value); 616 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 617 err = bpf_stackmap_copy(map, key, value); 618 } else if (IS_FD_ARRAY(map)) { 619 err = bpf_fd_array_map_lookup_elem(map, key, value); 620 } else if (IS_FD_HASH(map)) { 621 err = bpf_fd_htab_map_lookup_elem(map, key, value); 622 } else { 623 rcu_read_lock(); 624 ptr = map->ops->map_lookup_elem(map, key); 625 if (ptr) 626 memcpy(value, ptr, value_size); 627 rcu_read_unlock(); 628 err = ptr ? 0 : -ENOENT; 629 } 630 631 if (err) 632 goto free_value; 633 634 err = -EFAULT; 635 if (copy_to_user(uvalue, value, value_size) != 0) 636 goto free_value; 637 638 trace_bpf_map_lookup_elem(map, ufd, key, value); 639 err = 0; 640 641 free_value: 642 kfree(value); 643 free_key: 644 kfree(key); 645 err_put: 646 fdput(f); 647 return err; 648 } 649 650 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 651 652 static int map_update_elem(union bpf_attr *attr) 653 { 654 void __user *ukey = u64_to_user_ptr(attr->key); 655 void __user *uvalue = u64_to_user_ptr(attr->value); 656 int ufd = attr->map_fd; 657 struct bpf_map *map; 658 void *key, *value; 659 u32 value_size; 660 struct fd f; 661 int err; 662 663 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 664 return -EINVAL; 665 666 f = fdget(ufd); 667 map = __bpf_map_get(f); 668 if (IS_ERR(map)) 669 return PTR_ERR(map); 670 671 if (!(f.file->f_mode & FMODE_CAN_WRITE)) { 672 err = -EPERM; 673 goto err_put; 674 } 675 676 key = memdup_user(ukey, map->key_size); 677 if (IS_ERR(key)) { 678 err = PTR_ERR(key); 679 goto err_put; 680 } 681 682 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 683 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 684 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 685 value_size = round_up(map->value_size, 8) * num_possible_cpus(); 686 else 687 value_size = map->value_size; 688 689 err = -ENOMEM; 690 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); 691 if (!value) 692 goto free_key; 693 694 err = -EFAULT; 695 if (copy_from_user(value, uvalue, value_size) != 0) 696 goto free_value; 697 698 /* Need to create a kthread, thus must support schedule */ 699 if (bpf_map_is_dev_bound(map)) { 700 err = bpf_map_offload_update_elem(map, key, value, attr->flags); 701 goto out; 702 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) { 703 err = map->ops->map_update_elem(map, key, value, attr->flags); 704 goto out; 705 } 706 707 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from 708 * inside bpf map update or delete otherwise deadlocks are possible 709 */ 710 preempt_disable(); 711 __this_cpu_inc(bpf_prog_active); 712 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 713 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 714 err = bpf_percpu_hash_update(map, key, value, attr->flags); 715 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 716 err = bpf_percpu_array_update(map, key, value, attr->flags); 717 } else if (IS_FD_ARRAY(map)) { 718 rcu_read_lock(); 719 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 720 attr->flags); 721 rcu_read_unlock(); 722 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 723 rcu_read_lock(); 724 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 725 attr->flags); 726 rcu_read_unlock(); 727 } else { 728 rcu_read_lock(); 729 err = map->ops->map_update_elem(map, key, value, attr->flags); 730 rcu_read_unlock(); 731 } 732 __this_cpu_dec(bpf_prog_active); 733 preempt_enable(); 734 out: 735 if (!err) 736 trace_bpf_map_update_elem(map, ufd, key, value); 737 free_value: 738 kfree(value); 739 free_key: 740 kfree(key); 741 err_put: 742 fdput(f); 743 return err; 744 } 745 746 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 747 748 static int map_delete_elem(union bpf_attr *attr) 749 { 750 void __user *ukey = u64_to_user_ptr(attr->key); 751 int ufd = attr->map_fd; 752 struct bpf_map *map; 753 struct fd f; 754 void *key; 755 int err; 756 757 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 758 return -EINVAL; 759 760 f = fdget(ufd); 761 map = __bpf_map_get(f); 762 if (IS_ERR(map)) 763 return PTR_ERR(map); 764 765 if (!(f.file->f_mode & FMODE_CAN_WRITE)) { 766 err = -EPERM; 767 goto err_put; 768 } 769 770 key = memdup_user(ukey, map->key_size); 771 if (IS_ERR(key)) { 772 err = PTR_ERR(key); 773 goto err_put; 774 } 775 776 if (bpf_map_is_dev_bound(map)) { 777 err = bpf_map_offload_delete_elem(map, key); 778 goto out; 779 } 780 781 preempt_disable(); 782 __this_cpu_inc(bpf_prog_active); 783 rcu_read_lock(); 784 err = map->ops->map_delete_elem(map, key); 785 rcu_read_unlock(); 786 __this_cpu_dec(bpf_prog_active); 787 preempt_enable(); 788 out: 789 if (!err) 790 trace_bpf_map_delete_elem(map, ufd, key); 791 kfree(key); 792 err_put: 793 fdput(f); 794 return err; 795 } 796 797 /* last field in 'union bpf_attr' used by this command */ 798 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 799 800 static int map_get_next_key(union bpf_attr *attr) 801 { 802 void __user *ukey = u64_to_user_ptr(attr->key); 803 void __user *unext_key = u64_to_user_ptr(attr->next_key); 804 int ufd = attr->map_fd; 805 struct bpf_map *map; 806 void *key, *next_key; 807 struct fd f; 808 int err; 809 810 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 811 return -EINVAL; 812 813 f = fdget(ufd); 814 map = __bpf_map_get(f); 815 if (IS_ERR(map)) 816 return PTR_ERR(map); 817 818 if (!(f.file->f_mode & FMODE_CAN_READ)) { 819 err = -EPERM; 820 goto err_put; 821 } 822 823 if (ukey) { 824 key = memdup_user(ukey, map->key_size); 825 if (IS_ERR(key)) { 826 err = PTR_ERR(key); 827 goto err_put; 828 } 829 } else { 830 key = NULL; 831 } 832 833 err = -ENOMEM; 834 next_key = kmalloc(map->key_size, GFP_USER); 835 if (!next_key) 836 goto free_key; 837 838 if (bpf_map_is_dev_bound(map)) { 839 err = bpf_map_offload_get_next_key(map, key, next_key); 840 goto out; 841 } 842 843 rcu_read_lock(); 844 err = map->ops->map_get_next_key(map, key, next_key); 845 rcu_read_unlock(); 846 out: 847 if (err) 848 goto free_next_key; 849 850 err = -EFAULT; 851 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 852 goto free_next_key; 853 854 trace_bpf_map_next_key(map, ufd, key, next_key); 855 err = 0; 856 857 free_next_key: 858 kfree(next_key); 859 free_key: 860 kfree(key); 861 err_put: 862 fdput(f); 863 return err; 864 } 865 866 static const struct bpf_prog_ops * const bpf_prog_types[] = { 867 #define BPF_PROG_TYPE(_id, _name) \ 868 [_id] = & _name ## _prog_ops, 869 #define BPF_MAP_TYPE(_id, _ops) 870 #include <linux/bpf_types.h> 871 #undef BPF_PROG_TYPE 872 #undef BPF_MAP_TYPE 873 }; 874 875 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 876 { 877 const struct bpf_prog_ops *ops; 878 879 if (type >= ARRAY_SIZE(bpf_prog_types)) 880 return -EINVAL; 881 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 882 ops = bpf_prog_types[type]; 883 if (!ops) 884 return -EINVAL; 885 886 if (!bpf_prog_is_dev_bound(prog->aux)) 887 prog->aux->ops = ops; 888 else 889 prog->aux->ops = &bpf_offload_prog_ops; 890 prog->type = type; 891 return 0; 892 } 893 894 /* drop refcnt on maps used by eBPF program and free auxilary data */ 895 static void free_used_maps(struct bpf_prog_aux *aux) 896 { 897 int i; 898 899 for (i = 0; i < aux->used_map_cnt; i++) 900 bpf_map_put(aux->used_maps[i]); 901 902 kfree(aux->used_maps); 903 } 904 905 int __bpf_prog_charge(struct user_struct *user, u32 pages) 906 { 907 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 908 unsigned long user_bufs; 909 910 if (user) { 911 user_bufs = atomic_long_add_return(pages, &user->locked_vm); 912 if (user_bufs > memlock_limit) { 913 atomic_long_sub(pages, &user->locked_vm); 914 return -EPERM; 915 } 916 } 917 918 return 0; 919 } 920 921 void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 922 { 923 if (user) 924 atomic_long_sub(pages, &user->locked_vm); 925 } 926 927 static int bpf_prog_charge_memlock(struct bpf_prog *prog) 928 { 929 struct user_struct *user = get_current_user(); 930 int ret; 931 932 ret = __bpf_prog_charge(user, prog->pages); 933 if (ret) { 934 free_uid(user); 935 return ret; 936 } 937 938 prog->aux->user = user; 939 return 0; 940 } 941 942 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) 943 { 944 struct user_struct *user = prog->aux->user; 945 946 __bpf_prog_uncharge(user, prog->pages); 947 free_uid(user); 948 } 949 950 static int bpf_prog_alloc_id(struct bpf_prog *prog) 951 { 952 int id; 953 954 idr_preload(GFP_KERNEL); 955 spin_lock_bh(&prog_idr_lock); 956 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 957 if (id > 0) 958 prog->aux->id = id; 959 spin_unlock_bh(&prog_idr_lock); 960 idr_preload_end(); 961 962 /* id is in [1, INT_MAX) */ 963 if (WARN_ON_ONCE(!id)) 964 return -ENOSPC; 965 966 return id > 0 ? 0 : id; 967 } 968 969 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 970 { 971 /* cBPF to eBPF migrations are currently not in the idr store. 972 * Offloaded programs are removed from the store when their device 973 * disappears - even if someone grabs an fd to them they are unusable, 974 * simply waiting for refcnt to drop to be freed. 975 */ 976 if (!prog->aux->id) 977 return; 978 979 if (do_idr_lock) 980 spin_lock_bh(&prog_idr_lock); 981 else 982 __acquire(&prog_idr_lock); 983 984 idr_remove(&prog_idr, prog->aux->id); 985 prog->aux->id = 0; 986 987 if (do_idr_lock) 988 spin_unlock_bh(&prog_idr_lock); 989 else 990 __release(&prog_idr_lock); 991 } 992 993 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 994 { 995 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 996 997 free_used_maps(aux); 998 bpf_prog_uncharge_memlock(aux->prog); 999 security_bpf_prog_free(aux); 1000 bpf_prog_free(aux->prog); 1001 } 1002 1003 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1004 { 1005 if (atomic_dec_and_test(&prog->aux->refcnt)) { 1006 int i; 1007 1008 trace_bpf_prog_put_rcu(prog); 1009 /* bpf_prog_free_id() must be called first */ 1010 bpf_prog_free_id(prog, do_idr_lock); 1011 1012 for (i = 0; i < prog->aux->func_cnt; i++) 1013 bpf_prog_kallsyms_del(prog->aux->func[i]); 1014 bpf_prog_kallsyms_del(prog); 1015 1016 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1017 } 1018 } 1019 1020 void bpf_prog_put(struct bpf_prog *prog) 1021 { 1022 __bpf_prog_put(prog, true); 1023 } 1024 EXPORT_SYMBOL_GPL(bpf_prog_put); 1025 1026 static int bpf_prog_release(struct inode *inode, struct file *filp) 1027 { 1028 struct bpf_prog *prog = filp->private_data; 1029 1030 bpf_prog_put(prog); 1031 return 0; 1032 } 1033 1034 #ifdef CONFIG_PROC_FS 1035 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1036 { 1037 const struct bpf_prog *prog = filp->private_data; 1038 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1039 1040 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1041 seq_printf(m, 1042 "prog_type:\t%u\n" 1043 "prog_jited:\t%u\n" 1044 "prog_tag:\t%s\n" 1045 "memlock:\t%llu\n", 1046 prog->type, 1047 prog->jited, 1048 prog_tag, 1049 prog->pages * 1ULL << PAGE_SHIFT); 1050 } 1051 #endif 1052 1053 const struct file_operations bpf_prog_fops = { 1054 #ifdef CONFIG_PROC_FS 1055 .show_fdinfo = bpf_prog_show_fdinfo, 1056 #endif 1057 .release = bpf_prog_release, 1058 .read = bpf_dummy_read, 1059 .write = bpf_dummy_write, 1060 }; 1061 1062 int bpf_prog_new_fd(struct bpf_prog *prog) 1063 { 1064 int ret; 1065 1066 ret = security_bpf_prog(prog); 1067 if (ret < 0) 1068 return ret; 1069 1070 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1071 O_RDWR | O_CLOEXEC); 1072 } 1073 1074 static struct bpf_prog *____bpf_prog_get(struct fd f) 1075 { 1076 if (!f.file) 1077 return ERR_PTR(-EBADF); 1078 if (f.file->f_op != &bpf_prog_fops) { 1079 fdput(f); 1080 return ERR_PTR(-EINVAL); 1081 } 1082 1083 return f.file->private_data; 1084 } 1085 1086 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) 1087 { 1088 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) { 1089 atomic_sub(i, &prog->aux->refcnt); 1090 return ERR_PTR(-EBUSY); 1091 } 1092 return prog; 1093 } 1094 EXPORT_SYMBOL_GPL(bpf_prog_add); 1095 1096 void bpf_prog_sub(struct bpf_prog *prog, int i) 1097 { 1098 /* Only to be used for undoing previous bpf_prog_add() in some 1099 * error path. We still know that another entity in our call 1100 * path holds a reference to the program, thus atomic_sub() can 1101 * be safely used in such cases! 1102 */ 1103 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); 1104 } 1105 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1106 1107 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) 1108 { 1109 return bpf_prog_add(prog, 1); 1110 } 1111 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1112 1113 /* prog_idr_lock should have been held */ 1114 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1115 { 1116 int refold; 1117 1118 refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0); 1119 1120 if (refold >= BPF_MAX_REFCNT) { 1121 __bpf_prog_put(prog, false); 1122 return ERR_PTR(-EBUSY); 1123 } 1124 1125 if (!refold) 1126 return ERR_PTR(-ENOENT); 1127 1128 return prog; 1129 } 1130 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1131 1132 bool bpf_prog_get_ok(struct bpf_prog *prog, 1133 enum bpf_prog_type *attach_type, bool attach_drv) 1134 { 1135 /* not an attachment, just a refcount inc, always allow */ 1136 if (!attach_type) 1137 return true; 1138 1139 if (prog->type != *attach_type) 1140 return false; 1141 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1142 return false; 1143 1144 return true; 1145 } 1146 1147 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1148 bool attach_drv) 1149 { 1150 struct fd f = fdget(ufd); 1151 struct bpf_prog *prog; 1152 1153 prog = ____bpf_prog_get(f); 1154 if (IS_ERR(prog)) 1155 return prog; 1156 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1157 prog = ERR_PTR(-EINVAL); 1158 goto out; 1159 } 1160 1161 prog = bpf_prog_inc(prog); 1162 out: 1163 fdput(f); 1164 return prog; 1165 } 1166 1167 struct bpf_prog *bpf_prog_get(u32 ufd) 1168 { 1169 return __bpf_prog_get(ufd, NULL, false); 1170 } 1171 1172 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1173 bool attach_drv) 1174 { 1175 struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv); 1176 1177 if (!IS_ERR(prog)) 1178 trace_bpf_prog_get_type(prog); 1179 return prog; 1180 } 1181 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1182 1183 /* Initially all BPF programs could be loaded w/o specifying 1184 * expected_attach_type. Later for some of them specifying expected_attach_type 1185 * at load time became required so that program could be validated properly. 1186 * Programs of types that are allowed to be loaded both w/ and w/o (for 1187 * backward compatibility) expected_attach_type, should have the default attach 1188 * type assigned to expected_attach_type for the latter case, so that it can be 1189 * validated later at attach time. 1190 * 1191 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 1192 * prog type requires it but has some attach types that have to be backward 1193 * compatible. 1194 */ 1195 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 1196 { 1197 switch (attr->prog_type) { 1198 case BPF_PROG_TYPE_CGROUP_SOCK: 1199 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 1200 * exist so checking for non-zero is the way to go here. 1201 */ 1202 if (!attr->expected_attach_type) 1203 attr->expected_attach_type = 1204 BPF_CGROUP_INET_SOCK_CREATE; 1205 break; 1206 } 1207 } 1208 1209 static int 1210 bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, 1211 enum bpf_attach_type expected_attach_type) 1212 { 1213 switch (prog_type) { 1214 case BPF_PROG_TYPE_CGROUP_SOCK: 1215 switch (expected_attach_type) { 1216 case BPF_CGROUP_INET_SOCK_CREATE: 1217 case BPF_CGROUP_INET4_POST_BIND: 1218 case BPF_CGROUP_INET6_POST_BIND: 1219 return 0; 1220 default: 1221 return -EINVAL; 1222 } 1223 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1224 switch (expected_attach_type) { 1225 case BPF_CGROUP_INET4_BIND: 1226 case BPF_CGROUP_INET6_BIND: 1227 case BPF_CGROUP_INET4_CONNECT: 1228 case BPF_CGROUP_INET6_CONNECT: 1229 return 0; 1230 default: 1231 return -EINVAL; 1232 } 1233 default: 1234 return 0; 1235 } 1236 } 1237 1238 /* last field in 'union bpf_attr' used by this command */ 1239 #define BPF_PROG_LOAD_LAST_FIELD expected_attach_type 1240 1241 static int bpf_prog_load(union bpf_attr *attr) 1242 { 1243 enum bpf_prog_type type = attr->prog_type; 1244 struct bpf_prog *prog; 1245 int err; 1246 char license[128]; 1247 bool is_gpl; 1248 1249 if (CHECK_ATTR(BPF_PROG_LOAD)) 1250 return -EINVAL; 1251 1252 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT) 1253 return -EINVAL; 1254 1255 /* copy eBPF program license from user space */ 1256 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 1257 sizeof(license) - 1) < 0) 1258 return -EFAULT; 1259 license[sizeof(license) - 1] = 0; 1260 1261 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 1262 is_gpl = license_is_gpl_compatible(license); 1263 1264 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS) 1265 return -E2BIG; 1266 1267 if (type == BPF_PROG_TYPE_KPROBE && 1268 attr->kern_version != LINUX_VERSION_CODE) 1269 return -EINVAL; 1270 1271 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 1272 type != BPF_PROG_TYPE_CGROUP_SKB && 1273 !capable(CAP_SYS_ADMIN)) 1274 return -EPERM; 1275 1276 bpf_prog_load_fixup_attach_type(attr); 1277 if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type)) 1278 return -EINVAL; 1279 1280 /* plain bpf_prog allocation */ 1281 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 1282 if (!prog) 1283 return -ENOMEM; 1284 1285 prog->expected_attach_type = attr->expected_attach_type; 1286 1287 prog->aux->offload_requested = !!attr->prog_ifindex; 1288 1289 err = security_bpf_prog_alloc(prog->aux); 1290 if (err) 1291 goto free_prog_nouncharge; 1292 1293 err = bpf_prog_charge_memlock(prog); 1294 if (err) 1295 goto free_prog_sec; 1296 1297 prog->len = attr->insn_cnt; 1298 1299 err = -EFAULT; 1300 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), 1301 bpf_prog_insn_size(prog)) != 0) 1302 goto free_prog; 1303 1304 prog->orig_prog = NULL; 1305 prog->jited = 0; 1306 1307 atomic_set(&prog->aux->refcnt, 1); 1308 prog->gpl_compatible = is_gpl ? 1 : 0; 1309 1310 if (bpf_prog_is_dev_bound(prog->aux)) { 1311 err = bpf_prog_offload_init(prog, attr); 1312 if (err) 1313 goto free_prog; 1314 } 1315 1316 /* find program type: socket_filter vs tracing_filter */ 1317 err = find_prog_type(type, prog); 1318 if (err < 0) 1319 goto free_prog; 1320 1321 prog->aux->load_time = ktime_get_boot_ns(); 1322 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); 1323 if (err) 1324 goto free_prog; 1325 1326 /* run eBPF verifier */ 1327 err = bpf_check(&prog, attr); 1328 if (err < 0) 1329 goto free_used_maps; 1330 1331 /* eBPF program is ready to be JITed */ 1332 if (!prog->bpf_func) 1333 prog = bpf_prog_select_runtime(prog, &err); 1334 if (err < 0) 1335 goto free_used_maps; 1336 1337 err = bpf_prog_alloc_id(prog); 1338 if (err) 1339 goto free_used_maps; 1340 1341 err = bpf_prog_new_fd(prog); 1342 if (err < 0) { 1343 /* failed to allocate fd. 1344 * bpf_prog_put() is needed because the above 1345 * bpf_prog_alloc_id() has published the prog 1346 * to the userspace and the userspace may 1347 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. 1348 */ 1349 bpf_prog_put(prog); 1350 return err; 1351 } 1352 1353 bpf_prog_kallsyms_add(prog); 1354 trace_bpf_prog_load(prog, err); 1355 return err; 1356 1357 free_used_maps: 1358 free_used_maps(prog->aux); 1359 free_prog: 1360 bpf_prog_uncharge_memlock(prog); 1361 free_prog_sec: 1362 security_bpf_prog_free(prog->aux); 1363 free_prog_nouncharge: 1364 bpf_prog_free(prog); 1365 return err; 1366 } 1367 1368 #define BPF_OBJ_LAST_FIELD file_flags 1369 1370 static int bpf_obj_pin(const union bpf_attr *attr) 1371 { 1372 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 1373 return -EINVAL; 1374 1375 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 1376 } 1377 1378 static int bpf_obj_get(const union bpf_attr *attr) 1379 { 1380 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 1381 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 1382 return -EINVAL; 1383 1384 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 1385 attr->file_flags); 1386 } 1387 1388 struct bpf_raw_tracepoint { 1389 struct bpf_raw_event_map *btp; 1390 struct bpf_prog *prog; 1391 }; 1392 1393 static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp) 1394 { 1395 struct bpf_raw_tracepoint *raw_tp = filp->private_data; 1396 1397 if (raw_tp->prog) { 1398 bpf_probe_unregister(raw_tp->btp, raw_tp->prog); 1399 bpf_prog_put(raw_tp->prog); 1400 } 1401 kfree(raw_tp); 1402 return 0; 1403 } 1404 1405 static const struct file_operations bpf_raw_tp_fops = { 1406 .release = bpf_raw_tracepoint_release, 1407 .read = bpf_dummy_read, 1408 .write = bpf_dummy_write, 1409 }; 1410 1411 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 1412 1413 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 1414 { 1415 struct bpf_raw_tracepoint *raw_tp; 1416 struct bpf_raw_event_map *btp; 1417 struct bpf_prog *prog; 1418 char tp_name[128]; 1419 int tp_fd, err; 1420 1421 if (strncpy_from_user(tp_name, u64_to_user_ptr(attr->raw_tracepoint.name), 1422 sizeof(tp_name) - 1) < 0) 1423 return -EFAULT; 1424 tp_name[sizeof(tp_name) - 1] = 0; 1425 1426 btp = bpf_find_raw_tracepoint(tp_name); 1427 if (!btp) 1428 return -ENOENT; 1429 1430 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); 1431 if (!raw_tp) 1432 return -ENOMEM; 1433 raw_tp->btp = btp; 1434 1435 prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd, 1436 BPF_PROG_TYPE_RAW_TRACEPOINT); 1437 if (IS_ERR(prog)) { 1438 err = PTR_ERR(prog); 1439 goto out_free_tp; 1440 } 1441 1442 err = bpf_probe_register(raw_tp->btp, prog); 1443 if (err) 1444 goto out_put_prog; 1445 1446 raw_tp->prog = prog; 1447 tp_fd = anon_inode_getfd("bpf-raw-tracepoint", &bpf_raw_tp_fops, raw_tp, 1448 O_CLOEXEC); 1449 if (tp_fd < 0) { 1450 bpf_probe_unregister(raw_tp->btp, prog); 1451 err = tp_fd; 1452 goto out_put_prog; 1453 } 1454 return tp_fd; 1455 1456 out_put_prog: 1457 bpf_prog_put(prog); 1458 out_free_tp: 1459 kfree(raw_tp); 1460 return err; 1461 } 1462 1463 #ifdef CONFIG_CGROUP_BPF 1464 1465 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 1466 enum bpf_attach_type attach_type) 1467 { 1468 switch (prog->type) { 1469 case BPF_PROG_TYPE_CGROUP_SOCK: 1470 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1471 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 1472 default: 1473 return 0; 1474 } 1475 } 1476 1477 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags 1478 1479 static int sockmap_get_from_fd(const union bpf_attr *attr, 1480 int type, bool attach) 1481 { 1482 struct bpf_prog *prog = NULL; 1483 int ufd = attr->target_fd; 1484 struct bpf_map *map; 1485 struct fd f; 1486 int err; 1487 1488 f = fdget(ufd); 1489 map = __bpf_map_get(f); 1490 if (IS_ERR(map)) 1491 return PTR_ERR(map); 1492 1493 if (attach) { 1494 prog = bpf_prog_get_type(attr->attach_bpf_fd, type); 1495 if (IS_ERR(prog)) { 1496 fdput(f); 1497 return PTR_ERR(prog); 1498 } 1499 } 1500 1501 err = sock_map_prog(map, prog, attr->attach_type); 1502 if (err) { 1503 fdput(f); 1504 if (prog) 1505 bpf_prog_put(prog); 1506 return err; 1507 } 1508 1509 fdput(f); 1510 return 0; 1511 } 1512 1513 #define BPF_F_ATTACH_MASK \ 1514 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) 1515 1516 static int bpf_prog_attach(const union bpf_attr *attr) 1517 { 1518 enum bpf_prog_type ptype; 1519 struct bpf_prog *prog; 1520 struct cgroup *cgrp; 1521 int ret; 1522 1523 if (!capable(CAP_NET_ADMIN)) 1524 return -EPERM; 1525 1526 if (CHECK_ATTR(BPF_PROG_ATTACH)) 1527 return -EINVAL; 1528 1529 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 1530 return -EINVAL; 1531 1532 switch (attr->attach_type) { 1533 case BPF_CGROUP_INET_INGRESS: 1534 case BPF_CGROUP_INET_EGRESS: 1535 ptype = BPF_PROG_TYPE_CGROUP_SKB; 1536 break; 1537 case BPF_CGROUP_INET_SOCK_CREATE: 1538 case BPF_CGROUP_INET4_POST_BIND: 1539 case BPF_CGROUP_INET6_POST_BIND: 1540 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 1541 break; 1542 case BPF_CGROUP_INET4_BIND: 1543 case BPF_CGROUP_INET6_BIND: 1544 case BPF_CGROUP_INET4_CONNECT: 1545 case BPF_CGROUP_INET6_CONNECT: 1546 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 1547 break; 1548 case BPF_CGROUP_SOCK_OPS: 1549 ptype = BPF_PROG_TYPE_SOCK_OPS; 1550 break; 1551 case BPF_CGROUP_DEVICE: 1552 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1553 break; 1554 case BPF_SK_MSG_VERDICT: 1555 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true); 1556 case BPF_SK_SKB_STREAM_PARSER: 1557 case BPF_SK_SKB_STREAM_VERDICT: 1558 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true); 1559 default: 1560 return -EINVAL; 1561 } 1562 1563 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 1564 if (IS_ERR(prog)) 1565 return PTR_ERR(prog); 1566 1567 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 1568 bpf_prog_put(prog); 1569 return -EINVAL; 1570 } 1571 1572 cgrp = cgroup_get_from_fd(attr->target_fd); 1573 if (IS_ERR(cgrp)) { 1574 bpf_prog_put(prog); 1575 return PTR_ERR(cgrp); 1576 } 1577 1578 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, 1579 attr->attach_flags); 1580 if (ret) 1581 bpf_prog_put(prog); 1582 cgroup_put(cgrp); 1583 1584 return ret; 1585 } 1586 1587 #define BPF_PROG_DETACH_LAST_FIELD attach_type 1588 1589 static int bpf_prog_detach(const union bpf_attr *attr) 1590 { 1591 enum bpf_prog_type ptype; 1592 struct bpf_prog *prog; 1593 struct cgroup *cgrp; 1594 int ret; 1595 1596 if (!capable(CAP_NET_ADMIN)) 1597 return -EPERM; 1598 1599 if (CHECK_ATTR(BPF_PROG_DETACH)) 1600 return -EINVAL; 1601 1602 switch (attr->attach_type) { 1603 case BPF_CGROUP_INET_INGRESS: 1604 case BPF_CGROUP_INET_EGRESS: 1605 ptype = BPF_PROG_TYPE_CGROUP_SKB; 1606 break; 1607 case BPF_CGROUP_INET_SOCK_CREATE: 1608 case BPF_CGROUP_INET4_POST_BIND: 1609 case BPF_CGROUP_INET6_POST_BIND: 1610 ptype = BPF_PROG_TYPE_CGROUP_SOCK; 1611 break; 1612 case BPF_CGROUP_INET4_BIND: 1613 case BPF_CGROUP_INET6_BIND: 1614 case BPF_CGROUP_INET4_CONNECT: 1615 case BPF_CGROUP_INET6_CONNECT: 1616 ptype = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 1617 break; 1618 case BPF_CGROUP_SOCK_OPS: 1619 ptype = BPF_PROG_TYPE_SOCK_OPS; 1620 break; 1621 case BPF_CGROUP_DEVICE: 1622 ptype = BPF_PROG_TYPE_CGROUP_DEVICE; 1623 break; 1624 case BPF_SK_MSG_VERDICT: 1625 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false); 1626 case BPF_SK_SKB_STREAM_PARSER: 1627 case BPF_SK_SKB_STREAM_VERDICT: 1628 return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false); 1629 default: 1630 return -EINVAL; 1631 } 1632 1633 cgrp = cgroup_get_from_fd(attr->target_fd); 1634 if (IS_ERR(cgrp)) 1635 return PTR_ERR(cgrp); 1636 1637 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 1638 if (IS_ERR(prog)) 1639 prog = NULL; 1640 1641 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); 1642 if (prog) 1643 bpf_prog_put(prog); 1644 cgroup_put(cgrp); 1645 return ret; 1646 } 1647 1648 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 1649 1650 static int bpf_prog_query(const union bpf_attr *attr, 1651 union bpf_attr __user *uattr) 1652 { 1653 struct cgroup *cgrp; 1654 int ret; 1655 1656 if (!capable(CAP_NET_ADMIN)) 1657 return -EPERM; 1658 if (CHECK_ATTR(BPF_PROG_QUERY)) 1659 return -EINVAL; 1660 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 1661 return -EINVAL; 1662 1663 switch (attr->query.attach_type) { 1664 case BPF_CGROUP_INET_INGRESS: 1665 case BPF_CGROUP_INET_EGRESS: 1666 case BPF_CGROUP_INET_SOCK_CREATE: 1667 case BPF_CGROUP_INET4_BIND: 1668 case BPF_CGROUP_INET6_BIND: 1669 case BPF_CGROUP_INET4_POST_BIND: 1670 case BPF_CGROUP_INET6_POST_BIND: 1671 case BPF_CGROUP_INET4_CONNECT: 1672 case BPF_CGROUP_INET6_CONNECT: 1673 case BPF_CGROUP_SOCK_OPS: 1674 case BPF_CGROUP_DEVICE: 1675 break; 1676 default: 1677 return -EINVAL; 1678 } 1679 cgrp = cgroup_get_from_fd(attr->query.target_fd); 1680 if (IS_ERR(cgrp)) 1681 return PTR_ERR(cgrp); 1682 ret = cgroup_bpf_query(cgrp, attr, uattr); 1683 cgroup_put(cgrp); 1684 return ret; 1685 } 1686 #endif /* CONFIG_CGROUP_BPF */ 1687 1688 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration 1689 1690 static int bpf_prog_test_run(const union bpf_attr *attr, 1691 union bpf_attr __user *uattr) 1692 { 1693 struct bpf_prog *prog; 1694 int ret = -ENOTSUPP; 1695 1696 if (!capable(CAP_SYS_ADMIN)) 1697 return -EPERM; 1698 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 1699 return -EINVAL; 1700 1701 prog = bpf_prog_get(attr->test.prog_fd); 1702 if (IS_ERR(prog)) 1703 return PTR_ERR(prog); 1704 1705 if (prog->aux->ops->test_run) 1706 ret = prog->aux->ops->test_run(prog, attr, uattr); 1707 1708 bpf_prog_put(prog); 1709 return ret; 1710 } 1711 1712 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 1713 1714 static int bpf_obj_get_next_id(const union bpf_attr *attr, 1715 union bpf_attr __user *uattr, 1716 struct idr *idr, 1717 spinlock_t *lock) 1718 { 1719 u32 next_id = attr->start_id; 1720 int err = 0; 1721 1722 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 1723 return -EINVAL; 1724 1725 if (!capable(CAP_SYS_ADMIN)) 1726 return -EPERM; 1727 1728 next_id++; 1729 spin_lock_bh(lock); 1730 if (!idr_get_next(idr, &next_id)) 1731 err = -ENOENT; 1732 spin_unlock_bh(lock); 1733 1734 if (!err) 1735 err = put_user(next_id, &uattr->next_id); 1736 1737 return err; 1738 } 1739 1740 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 1741 1742 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 1743 { 1744 struct bpf_prog *prog; 1745 u32 id = attr->prog_id; 1746 int fd; 1747 1748 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 1749 return -EINVAL; 1750 1751 if (!capable(CAP_SYS_ADMIN)) 1752 return -EPERM; 1753 1754 spin_lock_bh(&prog_idr_lock); 1755 prog = idr_find(&prog_idr, id); 1756 if (prog) 1757 prog = bpf_prog_inc_not_zero(prog); 1758 else 1759 prog = ERR_PTR(-ENOENT); 1760 spin_unlock_bh(&prog_idr_lock); 1761 1762 if (IS_ERR(prog)) 1763 return PTR_ERR(prog); 1764 1765 fd = bpf_prog_new_fd(prog); 1766 if (fd < 0) 1767 bpf_prog_put(prog); 1768 1769 return fd; 1770 } 1771 1772 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 1773 1774 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 1775 { 1776 struct bpf_map *map; 1777 u32 id = attr->map_id; 1778 int f_flags; 1779 int fd; 1780 1781 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 1782 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 1783 return -EINVAL; 1784 1785 if (!capable(CAP_SYS_ADMIN)) 1786 return -EPERM; 1787 1788 f_flags = bpf_get_file_flag(attr->open_flags); 1789 if (f_flags < 0) 1790 return f_flags; 1791 1792 spin_lock_bh(&map_idr_lock); 1793 map = idr_find(&map_idr, id); 1794 if (map) 1795 map = bpf_map_inc_not_zero(map, true); 1796 else 1797 map = ERR_PTR(-ENOENT); 1798 spin_unlock_bh(&map_idr_lock); 1799 1800 if (IS_ERR(map)) 1801 return PTR_ERR(map); 1802 1803 fd = bpf_map_new_fd(map, f_flags); 1804 if (fd < 0) 1805 bpf_map_put(map); 1806 1807 return fd; 1808 } 1809 1810 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 1811 unsigned long addr) 1812 { 1813 int i; 1814 1815 for (i = 0; i < prog->aux->used_map_cnt; i++) 1816 if (prog->aux->used_maps[i] == (void *)addr) 1817 return prog->aux->used_maps[i]; 1818 return NULL; 1819 } 1820 1821 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) 1822 { 1823 const struct bpf_map *map; 1824 struct bpf_insn *insns; 1825 u64 imm; 1826 int i; 1827 1828 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 1829 GFP_USER); 1830 if (!insns) 1831 return insns; 1832 1833 for (i = 0; i < prog->len; i++) { 1834 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { 1835 insns[i].code = BPF_JMP | BPF_CALL; 1836 insns[i].imm = BPF_FUNC_tail_call; 1837 /* fall-through */ 1838 } 1839 if (insns[i].code == (BPF_JMP | BPF_CALL) || 1840 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { 1841 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) 1842 insns[i].code = BPF_JMP | BPF_CALL; 1843 if (!bpf_dump_raw_ok()) 1844 insns[i].imm = 0; 1845 continue; 1846 } 1847 1848 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) 1849 continue; 1850 1851 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 1852 map = bpf_map_from_imm(prog, imm); 1853 if (map) { 1854 insns[i].src_reg = BPF_PSEUDO_MAP_FD; 1855 insns[i].imm = map->id; 1856 insns[i + 1].imm = 0; 1857 continue; 1858 } 1859 1860 if (!bpf_dump_raw_ok() && 1861 imm == (unsigned long)prog->aux) { 1862 insns[i].imm = 0; 1863 insns[i + 1].imm = 0; 1864 continue; 1865 } 1866 } 1867 1868 return insns; 1869 } 1870 1871 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 1872 const union bpf_attr *attr, 1873 union bpf_attr __user *uattr) 1874 { 1875 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 1876 struct bpf_prog_info info = {}; 1877 u32 info_len = attr->info.info_len; 1878 char __user *uinsns; 1879 u32 ulen; 1880 int err; 1881 1882 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len); 1883 if (err) 1884 return err; 1885 info_len = min_t(u32, sizeof(info), info_len); 1886 1887 if (copy_from_user(&info, uinfo, info_len)) 1888 return -EFAULT; 1889 1890 info.type = prog->type; 1891 info.id = prog->aux->id; 1892 info.load_time = prog->aux->load_time; 1893 info.created_by_uid = from_kuid_munged(current_user_ns(), 1894 prog->aux->user->uid); 1895 1896 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 1897 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 1898 1899 ulen = info.nr_map_ids; 1900 info.nr_map_ids = prog->aux->used_map_cnt; 1901 ulen = min_t(u32, info.nr_map_ids, ulen); 1902 if (ulen) { 1903 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 1904 u32 i; 1905 1906 for (i = 0; i < ulen; i++) 1907 if (put_user(prog->aux->used_maps[i]->id, 1908 &user_map_ids[i])) 1909 return -EFAULT; 1910 } 1911 1912 if (!capable(CAP_SYS_ADMIN)) { 1913 info.jited_prog_len = 0; 1914 info.xlated_prog_len = 0; 1915 goto done; 1916 } 1917 1918 ulen = info.xlated_prog_len; 1919 info.xlated_prog_len = bpf_prog_insn_size(prog); 1920 if (info.xlated_prog_len && ulen) { 1921 struct bpf_insn *insns_sanitized; 1922 bool fault; 1923 1924 if (prog->blinded && !bpf_dump_raw_ok()) { 1925 info.xlated_prog_insns = 0; 1926 goto done; 1927 } 1928 insns_sanitized = bpf_insn_prepare_dump(prog); 1929 if (!insns_sanitized) 1930 return -ENOMEM; 1931 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 1932 ulen = min_t(u32, info.xlated_prog_len, ulen); 1933 fault = copy_to_user(uinsns, insns_sanitized, ulen); 1934 kfree(insns_sanitized); 1935 if (fault) 1936 return -EFAULT; 1937 } 1938 1939 if (bpf_prog_is_dev_bound(prog->aux)) { 1940 err = bpf_prog_offload_info_fill(&info, prog); 1941 if (err) 1942 return err; 1943 goto done; 1944 } 1945 1946 /* NOTE: the following code is supposed to be skipped for offload. 1947 * bpf_prog_offload_info_fill() is the place to fill similar fields 1948 * for offload. 1949 */ 1950 ulen = info.jited_prog_len; 1951 info.jited_prog_len = prog->jited_len; 1952 if (info.jited_prog_len && ulen) { 1953 if (bpf_dump_raw_ok()) { 1954 uinsns = u64_to_user_ptr(info.jited_prog_insns); 1955 ulen = min_t(u32, info.jited_prog_len, ulen); 1956 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 1957 return -EFAULT; 1958 } else { 1959 info.jited_prog_insns = 0; 1960 } 1961 } 1962 1963 done: 1964 if (copy_to_user(uinfo, &info, info_len) || 1965 put_user(info_len, &uattr->info.info_len)) 1966 return -EFAULT; 1967 1968 return 0; 1969 } 1970 1971 static int bpf_map_get_info_by_fd(struct bpf_map *map, 1972 const union bpf_attr *attr, 1973 union bpf_attr __user *uattr) 1974 { 1975 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 1976 struct bpf_map_info info = {}; 1977 u32 info_len = attr->info.info_len; 1978 int err; 1979 1980 err = check_uarg_tail_zero(uinfo, sizeof(info), info_len); 1981 if (err) 1982 return err; 1983 info_len = min_t(u32, sizeof(info), info_len); 1984 1985 info.type = map->map_type; 1986 info.id = map->id; 1987 info.key_size = map->key_size; 1988 info.value_size = map->value_size; 1989 info.max_entries = map->max_entries; 1990 info.map_flags = map->map_flags; 1991 memcpy(info.name, map->name, sizeof(map->name)); 1992 1993 if (bpf_map_is_dev_bound(map)) { 1994 err = bpf_map_offload_info_fill(&info, map); 1995 if (err) 1996 return err; 1997 } 1998 1999 if (copy_to_user(uinfo, &info, info_len) || 2000 put_user(info_len, &uattr->info.info_len)) 2001 return -EFAULT; 2002 2003 return 0; 2004 } 2005 2006 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 2007 2008 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 2009 union bpf_attr __user *uattr) 2010 { 2011 int ufd = attr->info.bpf_fd; 2012 struct fd f; 2013 int err; 2014 2015 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 2016 return -EINVAL; 2017 2018 f = fdget(ufd); 2019 if (!f.file) 2020 return -EBADFD; 2021 2022 if (f.file->f_op == &bpf_prog_fops) 2023 err = bpf_prog_get_info_by_fd(f.file->private_data, attr, 2024 uattr); 2025 else if (f.file->f_op == &bpf_map_fops) 2026 err = bpf_map_get_info_by_fd(f.file->private_data, attr, 2027 uattr); 2028 else 2029 err = -EINVAL; 2030 2031 fdput(f); 2032 return err; 2033 } 2034 2035 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 2036 { 2037 union bpf_attr attr = {}; 2038 int err; 2039 2040 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) 2041 return -EPERM; 2042 2043 err = check_uarg_tail_zero(uattr, sizeof(attr), size); 2044 if (err) 2045 return err; 2046 size = min_t(u32, size, sizeof(attr)); 2047 2048 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 2049 if (copy_from_user(&attr, uattr, size) != 0) 2050 return -EFAULT; 2051 2052 err = security_bpf(cmd, &attr, size); 2053 if (err < 0) 2054 return err; 2055 2056 switch (cmd) { 2057 case BPF_MAP_CREATE: 2058 err = map_create(&attr); 2059 break; 2060 case BPF_MAP_LOOKUP_ELEM: 2061 err = map_lookup_elem(&attr); 2062 break; 2063 case BPF_MAP_UPDATE_ELEM: 2064 err = map_update_elem(&attr); 2065 break; 2066 case BPF_MAP_DELETE_ELEM: 2067 err = map_delete_elem(&attr); 2068 break; 2069 case BPF_MAP_GET_NEXT_KEY: 2070 err = map_get_next_key(&attr); 2071 break; 2072 case BPF_PROG_LOAD: 2073 err = bpf_prog_load(&attr); 2074 break; 2075 case BPF_OBJ_PIN: 2076 err = bpf_obj_pin(&attr); 2077 break; 2078 case BPF_OBJ_GET: 2079 err = bpf_obj_get(&attr); 2080 break; 2081 #ifdef CONFIG_CGROUP_BPF 2082 case BPF_PROG_ATTACH: 2083 err = bpf_prog_attach(&attr); 2084 break; 2085 case BPF_PROG_DETACH: 2086 err = bpf_prog_detach(&attr); 2087 break; 2088 case BPF_PROG_QUERY: 2089 err = bpf_prog_query(&attr, uattr); 2090 break; 2091 #endif 2092 case BPF_PROG_TEST_RUN: 2093 err = bpf_prog_test_run(&attr, uattr); 2094 break; 2095 case BPF_PROG_GET_NEXT_ID: 2096 err = bpf_obj_get_next_id(&attr, uattr, 2097 &prog_idr, &prog_idr_lock); 2098 break; 2099 case BPF_MAP_GET_NEXT_ID: 2100 err = bpf_obj_get_next_id(&attr, uattr, 2101 &map_idr, &map_idr_lock); 2102 break; 2103 case BPF_PROG_GET_FD_BY_ID: 2104 err = bpf_prog_get_fd_by_id(&attr); 2105 break; 2106 case BPF_MAP_GET_FD_BY_ID: 2107 err = bpf_map_get_fd_by_id(&attr); 2108 break; 2109 case BPF_OBJ_GET_INFO_BY_FD: 2110 err = bpf_obj_get_info_by_fd(&attr, uattr); 2111 break; 2112 case BPF_RAW_TRACEPOINT_OPEN: 2113 err = bpf_raw_tracepoint_open(&attr); 2114 break; 2115 default: 2116 err = -EINVAL; 2117 break; 2118 } 2119 2120 return err; 2121 } 2122