1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/btf.h> 10 #include <linux/syscalls.h> 11 #include <linux/slab.h> 12 #include <linux/sched/signal.h> 13 #include <linux/vmalloc.h> 14 #include <linux/mmzone.h> 15 #include <linux/anon_inodes.h> 16 #include <linux/fdtable.h> 17 #include <linux/file.h> 18 #include <linux/fs.h> 19 #include <linux/license.h> 20 #include <linux/filter.h> 21 #include <linux/kernel.h> 22 #include <linux/idr.h> 23 #include <linux/cred.h> 24 #include <linux/timekeeping.h> 25 #include <linux/ctype.h> 26 #include <linux/nospec.h> 27 #include <linux/audit.h> 28 #include <uapi/linux/btf.h> 29 #include <linux/pgtable.h> 30 #include <linux/bpf_lsm.h> 31 #include <linux/poll.h> 32 #include <linux/bpf-netns.h> 33 #include <linux/rcupdate_trace.h> 34 #include <linux/memcontrol.h> 35 36 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 37 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 38 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 39 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 40 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 41 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 42 IS_FD_HASH(map)) 43 44 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 45 46 DEFINE_PER_CPU(int, bpf_prog_active); 47 static DEFINE_IDR(prog_idr); 48 static DEFINE_SPINLOCK(prog_idr_lock); 49 static DEFINE_IDR(map_idr); 50 static DEFINE_SPINLOCK(map_idr_lock); 51 static DEFINE_IDR(link_idr); 52 static DEFINE_SPINLOCK(link_idr_lock); 53 54 int sysctl_unprivileged_bpf_disabled __read_mostly = 55 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 56 57 static const struct bpf_map_ops * const bpf_map_types[] = { 58 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 59 #define BPF_MAP_TYPE(_id, _ops) \ 60 [_id] = &_ops, 61 #define BPF_LINK_TYPE(_id, _name) 62 #include <linux/bpf_types.h> 63 #undef BPF_PROG_TYPE 64 #undef BPF_MAP_TYPE 65 #undef BPF_LINK_TYPE 66 }; 67 68 /* 69 * If we're handed a bigger struct than we know of, ensure all the unknown bits 70 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 71 * we don't know about yet. 72 * 73 * There is a ToCToU between this function call and the following 74 * copy_from_user() call. However, this is not a concern since this function is 75 * meant to be a future-proofing of bits. 76 */ 77 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 78 size_t expected_size, 79 size_t actual_size) 80 { 81 int res; 82 83 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 84 return -E2BIG; 85 86 if (actual_size <= expected_size) 87 return 0; 88 89 if (uaddr.is_kernel) 90 res = memchr_inv(uaddr.kernel + expected_size, 0, 91 actual_size - expected_size) == NULL; 92 else 93 res = check_zeroed_user(uaddr.user + expected_size, 94 actual_size - expected_size); 95 if (res < 0) 96 return res; 97 return res ? 0 : -E2BIG; 98 } 99 100 const struct bpf_map_ops bpf_map_offload_ops = { 101 .map_meta_equal = bpf_map_meta_equal, 102 .map_alloc = bpf_map_offload_map_alloc, 103 .map_free = bpf_map_offload_map_free, 104 .map_check_btf = map_check_no_btf, 105 }; 106 107 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 108 { 109 const struct bpf_map_ops *ops; 110 u32 type = attr->map_type; 111 struct bpf_map *map; 112 int err; 113 114 if (type >= ARRAY_SIZE(bpf_map_types)) 115 return ERR_PTR(-EINVAL); 116 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 117 ops = bpf_map_types[type]; 118 if (!ops) 119 return ERR_PTR(-EINVAL); 120 121 if (ops->map_alloc_check) { 122 err = ops->map_alloc_check(attr); 123 if (err) 124 return ERR_PTR(err); 125 } 126 if (attr->map_ifindex) 127 ops = &bpf_map_offload_ops; 128 map = ops->map_alloc(attr); 129 if (IS_ERR(map)) 130 return map; 131 map->ops = ops; 132 map->map_type = type; 133 return map; 134 } 135 136 static void bpf_map_write_active_inc(struct bpf_map *map) 137 { 138 atomic64_inc(&map->writecnt); 139 } 140 141 static void bpf_map_write_active_dec(struct bpf_map *map) 142 { 143 atomic64_dec(&map->writecnt); 144 } 145 146 bool bpf_map_write_active(const struct bpf_map *map) 147 { 148 return atomic64_read(&map->writecnt) != 0; 149 } 150 151 static u32 bpf_map_value_size(const struct bpf_map *map) 152 { 153 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 154 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 155 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 156 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 157 return round_up(map->value_size, 8) * num_possible_cpus(); 158 else if (IS_FD_MAP(map)) 159 return sizeof(u32); 160 else 161 return map->value_size; 162 } 163 164 static void maybe_wait_bpf_programs(struct bpf_map *map) 165 { 166 /* Wait for any running BPF programs to complete so that 167 * userspace, when we return to it, knows that all programs 168 * that could be running use the new map value. 169 */ 170 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 171 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 172 synchronize_rcu(); 173 } 174 175 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, 176 void *value, __u64 flags) 177 { 178 int err; 179 180 /* Need to create a kthread, thus must support schedule */ 181 if (bpf_map_is_dev_bound(map)) { 182 return bpf_map_offload_update_elem(map, key, value, flags); 183 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 184 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 185 return map->ops->map_update_elem(map, key, value, flags); 186 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 187 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 188 return sock_map_update_elem_sys(map, key, value, flags); 189 } else if (IS_FD_PROG_ARRAY(map)) { 190 return bpf_fd_array_map_update_elem(map, f.file, key, value, 191 flags); 192 } 193 194 bpf_disable_instrumentation(); 195 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 196 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 197 err = bpf_percpu_hash_update(map, key, value, flags); 198 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 199 err = bpf_percpu_array_update(map, key, value, flags); 200 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 201 err = bpf_percpu_cgroup_storage_update(map, key, value, 202 flags); 203 } else if (IS_FD_ARRAY(map)) { 204 rcu_read_lock(); 205 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 206 flags); 207 rcu_read_unlock(); 208 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 209 rcu_read_lock(); 210 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 211 flags); 212 rcu_read_unlock(); 213 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 214 /* rcu_read_lock() is not needed */ 215 err = bpf_fd_reuseport_array_update_elem(map, key, value, 216 flags); 217 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 218 map->map_type == BPF_MAP_TYPE_STACK || 219 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 220 err = map->ops->map_push_elem(map, value, flags); 221 } else { 222 rcu_read_lock(); 223 err = map->ops->map_update_elem(map, key, value, flags); 224 rcu_read_unlock(); 225 } 226 bpf_enable_instrumentation(); 227 maybe_wait_bpf_programs(map); 228 229 return err; 230 } 231 232 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 233 __u64 flags) 234 { 235 void *ptr; 236 int err; 237 238 if (bpf_map_is_dev_bound(map)) 239 return bpf_map_offload_lookup_elem(map, key, value); 240 241 bpf_disable_instrumentation(); 242 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 243 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 244 err = bpf_percpu_hash_copy(map, key, value); 245 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 246 err = bpf_percpu_array_copy(map, key, value); 247 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 248 err = bpf_percpu_cgroup_storage_copy(map, key, value); 249 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 250 err = bpf_stackmap_copy(map, key, value); 251 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 252 err = bpf_fd_array_map_lookup_elem(map, key, value); 253 } else if (IS_FD_HASH(map)) { 254 err = bpf_fd_htab_map_lookup_elem(map, key, value); 255 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 256 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 257 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 258 map->map_type == BPF_MAP_TYPE_STACK || 259 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 260 err = map->ops->map_peek_elem(map, value); 261 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 262 /* struct_ops map requires directly updating "value" */ 263 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 264 } else { 265 rcu_read_lock(); 266 if (map->ops->map_lookup_elem_sys_only) 267 ptr = map->ops->map_lookup_elem_sys_only(map, key); 268 else 269 ptr = map->ops->map_lookup_elem(map, key); 270 if (IS_ERR(ptr)) { 271 err = PTR_ERR(ptr); 272 } else if (!ptr) { 273 err = -ENOENT; 274 } else { 275 err = 0; 276 if (flags & BPF_F_LOCK) 277 /* lock 'ptr' and copy everything but lock */ 278 copy_map_value_locked(map, value, ptr, true); 279 else 280 copy_map_value(map, value, ptr); 281 /* mask lock and timer, since value wasn't zero inited */ 282 check_and_init_map_value(map, value); 283 } 284 rcu_read_unlock(); 285 } 286 287 bpf_enable_instrumentation(); 288 maybe_wait_bpf_programs(map); 289 290 return err; 291 } 292 293 /* Please, do not use this function outside from the map creation path 294 * (e.g. in map update path) without taking care of setting the active 295 * memory cgroup (see at bpf_map_kmalloc_node() for example). 296 */ 297 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 298 { 299 /* We really just want to fail instead of triggering OOM killer 300 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 301 * which is used for lower order allocation requests. 302 * 303 * It has been observed that higher order allocation requests done by 304 * vmalloc with __GFP_NORETRY being set might fail due to not trying 305 * to reclaim memory from the page cache, thus we set 306 * __GFP_RETRY_MAYFAIL to avoid such situations. 307 */ 308 309 const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT; 310 unsigned int flags = 0; 311 unsigned long align = 1; 312 void *area; 313 314 if (size >= SIZE_MAX) 315 return NULL; 316 317 /* kmalloc()'ed memory can't be mmap()'ed */ 318 if (mmapable) { 319 BUG_ON(!PAGE_ALIGNED(size)); 320 align = SHMLBA; 321 flags = VM_USERMAP; 322 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 323 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 324 numa_node); 325 if (area != NULL) 326 return area; 327 } 328 329 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 330 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 331 flags, numa_node, __builtin_return_address(0)); 332 } 333 334 void *bpf_map_area_alloc(u64 size, int numa_node) 335 { 336 return __bpf_map_area_alloc(size, numa_node, false); 337 } 338 339 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 340 { 341 return __bpf_map_area_alloc(size, numa_node, true); 342 } 343 344 void bpf_map_area_free(void *area) 345 { 346 kvfree(area); 347 } 348 349 static u32 bpf_map_flags_retain_permanent(u32 flags) 350 { 351 /* Some map creation flags are not tied to the map object but 352 * rather to the map fd instead, so they have no meaning upon 353 * map object inspection since multiple file descriptors with 354 * different (access) properties can exist here. Thus, given 355 * this has zero meaning for the map itself, lets clear these 356 * from here. 357 */ 358 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 359 } 360 361 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 362 { 363 map->map_type = attr->map_type; 364 map->key_size = attr->key_size; 365 map->value_size = attr->value_size; 366 map->max_entries = attr->max_entries; 367 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 368 map->numa_node = bpf_map_attr_numa_node(attr); 369 map->map_extra = attr->map_extra; 370 } 371 372 static int bpf_map_alloc_id(struct bpf_map *map) 373 { 374 int id; 375 376 idr_preload(GFP_KERNEL); 377 spin_lock_bh(&map_idr_lock); 378 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 379 if (id > 0) 380 map->id = id; 381 spin_unlock_bh(&map_idr_lock); 382 idr_preload_end(); 383 384 if (WARN_ON_ONCE(!id)) 385 return -ENOSPC; 386 387 return id > 0 ? 0 : id; 388 } 389 390 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 391 { 392 unsigned long flags; 393 394 /* Offloaded maps are removed from the IDR store when their device 395 * disappears - even if someone holds an fd to them they are unusable, 396 * the memory is gone, all ops will fail; they are simply waiting for 397 * refcnt to drop to be freed. 398 */ 399 if (!map->id) 400 return; 401 402 if (do_idr_lock) 403 spin_lock_irqsave(&map_idr_lock, flags); 404 else 405 __acquire(&map_idr_lock); 406 407 idr_remove(&map_idr, map->id); 408 map->id = 0; 409 410 if (do_idr_lock) 411 spin_unlock_irqrestore(&map_idr_lock, flags); 412 else 413 __release(&map_idr_lock); 414 } 415 416 #ifdef CONFIG_MEMCG_KMEM 417 static void bpf_map_save_memcg(struct bpf_map *map) 418 { 419 map->memcg = get_mem_cgroup_from_mm(current->mm); 420 } 421 422 static void bpf_map_release_memcg(struct bpf_map *map) 423 { 424 mem_cgroup_put(map->memcg); 425 } 426 427 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 428 int node) 429 { 430 struct mem_cgroup *old_memcg; 431 void *ptr; 432 433 old_memcg = set_active_memcg(map->memcg); 434 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 435 set_active_memcg(old_memcg); 436 437 return ptr; 438 } 439 440 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 441 { 442 struct mem_cgroup *old_memcg; 443 void *ptr; 444 445 old_memcg = set_active_memcg(map->memcg); 446 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 447 set_active_memcg(old_memcg); 448 449 return ptr; 450 } 451 452 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 453 size_t align, gfp_t flags) 454 { 455 struct mem_cgroup *old_memcg; 456 void __percpu *ptr; 457 458 old_memcg = set_active_memcg(map->memcg); 459 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 460 set_active_memcg(old_memcg); 461 462 return ptr; 463 } 464 465 #else 466 static void bpf_map_save_memcg(struct bpf_map *map) 467 { 468 } 469 470 static void bpf_map_release_memcg(struct bpf_map *map) 471 { 472 } 473 #endif 474 475 /* called from workqueue */ 476 static void bpf_map_free_deferred(struct work_struct *work) 477 { 478 struct bpf_map *map = container_of(work, struct bpf_map, work); 479 480 security_bpf_map_free(map); 481 bpf_map_release_memcg(map); 482 /* implementation dependent freeing */ 483 map->ops->map_free(map); 484 } 485 486 static void bpf_map_put_uref(struct bpf_map *map) 487 { 488 if (atomic64_dec_and_test(&map->usercnt)) { 489 if (map->ops->map_release_uref) 490 map->ops->map_release_uref(map); 491 } 492 } 493 494 /* decrement map refcnt and schedule it for freeing via workqueue 495 * (unrelying map implementation ops->map_free() might sleep) 496 */ 497 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 498 { 499 if (atomic64_dec_and_test(&map->refcnt)) { 500 /* bpf_map_free_id() must be called first */ 501 bpf_map_free_id(map, do_idr_lock); 502 btf_put(map->btf); 503 INIT_WORK(&map->work, bpf_map_free_deferred); 504 schedule_work(&map->work); 505 } 506 } 507 508 void bpf_map_put(struct bpf_map *map) 509 { 510 __bpf_map_put(map, true); 511 } 512 EXPORT_SYMBOL_GPL(bpf_map_put); 513 514 void bpf_map_put_with_uref(struct bpf_map *map) 515 { 516 bpf_map_put_uref(map); 517 bpf_map_put(map); 518 } 519 520 static int bpf_map_release(struct inode *inode, struct file *filp) 521 { 522 struct bpf_map *map = filp->private_data; 523 524 if (map->ops->map_release) 525 map->ops->map_release(map, filp); 526 527 bpf_map_put_with_uref(map); 528 return 0; 529 } 530 531 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 532 { 533 fmode_t mode = f.file->f_mode; 534 535 /* Our file permissions may have been overridden by global 536 * map permissions facing syscall side. 537 */ 538 if (READ_ONCE(map->frozen)) 539 mode &= ~FMODE_CAN_WRITE; 540 return mode; 541 } 542 543 #ifdef CONFIG_PROC_FS 544 /* Provides an approximation of the map's memory footprint. 545 * Used only to provide a backward compatibility and display 546 * a reasonable "memlock" info. 547 */ 548 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) 549 { 550 unsigned long size; 551 552 size = round_up(map->key_size + bpf_map_value_size(map), 8); 553 554 return round_up(map->max_entries * size, PAGE_SIZE); 555 } 556 557 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 558 { 559 struct bpf_map *map = filp->private_data; 560 u32 type = 0, jited = 0; 561 562 if (map_type_contains_progs(map)) { 563 spin_lock(&map->owner.lock); 564 type = map->owner.type; 565 jited = map->owner.jited; 566 spin_unlock(&map->owner.lock); 567 } 568 569 seq_printf(m, 570 "map_type:\t%u\n" 571 "key_size:\t%u\n" 572 "value_size:\t%u\n" 573 "max_entries:\t%u\n" 574 "map_flags:\t%#x\n" 575 "map_extra:\t%#llx\n" 576 "memlock:\t%lu\n" 577 "map_id:\t%u\n" 578 "frozen:\t%u\n", 579 map->map_type, 580 map->key_size, 581 map->value_size, 582 map->max_entries, 583 map->map_flags, 584 (unsigned long long)map->map_extra, 585 bpf_map_memory_footprint(map), 586 map->id, 587 READ_ONCE(map->frozen)); 588 if (type) { 589 seq_printf(m, "owner_prog_type:\t%u\n", type); 590 seq_printf(m, "owner_jited:\t%u\n", jited); 591 } 592 } 593 #endif 594 595 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 596 loff_t *ppos) 597 { 598 /* We need this handler such that alloc_file() enables 599 * f_mode with FMODE_CAN_READ. 600 */ 601 return -EINVAL; 602 } 603 604 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 605 size_t siz, loff_t *ppos) 606 { 607 /* We need this handler such that alloc_file() enables 608 * f_mode with FMODE_CAN_WRITE. 609 */ 610 return -EINVAL; 611 } 612 613 /* called for any extra memory-mapped regions (except initial) */ 614 static void bpf_map_mmap_open(struct vm_area_struct *vma) 615 { 616 struct bpf_map *map = vma->vm_file->private_data; 617 618 if (vma->vm_flags & VM_MAYWRITE) 619 bpf_map_write_active_inc(map); 620 } 621 622 /* called for all unmapped memory region (including initial) */ 623 static void bpf_map_mmap_close(struct vm_area_struct *vma) 624 { 625 struct bpf_map *map = vma->vm_file->private_data; 626 627 if (vma->vm_flags & VM_MAYWRITE) 628 bpf_map_write_active_dec(map); 629 } 630 631 static const struct vm_operations_struct bpf_map_default_vmops = { 632 .open = bpf_map_mmap_open, 633 .close = bpf_map_mmap_close, 634 }; 635 636 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 637 { 638 struct bpf_map *map = filp->private_data; 639 int err; 640 641 if (!map->ops->map_mmap || map_value_has_spin_lock(map) || 642 map_value_has_timer(map)) 643 return -ENOTSUPP; 644 645 if (!(vma->vm_flags & VM_SHARED)) 646 return -EINVAL; 647 648 mutex_lock(&map->freeze_mutex); 649 650 if (vma->vm_flags & VM_WRITE) { 651 if (map->frozen) { 652 err = -EPERM; 653 goto out; 654 } 655 /* map is meant to be read-only, so do not allow mapping as 656 * writable, because it's possible to leak a writable page 657 * reference and allows user-space to still modify it after 658 * freezing, while verifier will assume contents do not change 659 */ 660 if (map->map_flags & BPF_F_RDONLY_PROG) { 661 err = -EACCES; 662 goto out; 663 } 664 } 665 666 /* set default open/close callbacks */ 667 vma->vm_ops = &bpf_map_default_vmops; 668 vma->vm_private_data = map; 669 vma->vm_flags &= ~VM_MAYEXEC; 670 if (!(vma->vm_flags & VM_WRITE)) 671 /* disallow re-mapping with PROT_WRITE */ 672 vma->vm_flags &= ~VM_MAYWRITE; 673 674 err = map->ops->map_mmap(map, vma); 675 if (err) 676 goto out; 677 678 if (vma->vm_flags & VM_MAYWRITE) 679 bpf_map_write_active_inc(map); 680 out: 681 mutex_unlock(&map->freeze_mutex); 682 return err; 683 } 684 685 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 686 { 687 struct bpf_map *map = filp->private_data; 688 689 if (map->ops->map_poll) 690 return map->ops->map_poll(map, filp, pts); 691 692 return EPOLLERR; 693 } 694 695 const struct file_operations bpf_map_fops = { 696 #ifdef CONFIG_PROC_FS 697 .show_fdinfo = bpf_map_show_fdinfo, 698 #endif 699 .release = bpf_map_release, 700 .read = bpf_dummy_read, 701 .write = bpf_dummy_write, 702 .mmap = bpf_map_mmap, 703 .poll = bpf_map_poll, 704 }; 705 706 int bpf_map_new_fd(struct bpf_map *map, int flags) 707 { 708 int ret; 709 710 ret = security_bpf_map(map, OPEN_FMODE(flags)); 711 if (ret < 0) 712 return ret; 713 714 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 715 flags | O_CLOEXEC); 716 } 717 718 int bpf_get_file_flag(int flags) 719 { 720 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 721 return -EINVAL; 722 if (flags & BPF_F_RDONLY) 723 return O_RDONLY; 724 if (flags & BPF_F_WRONLY) 725 return O_WRONLY; 726 return O_RDWR; 727 } 728 729 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 730 #define CHECK_ATTR(CMD) \ 731 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 732 sizeof(attr->CMD##_LAST_FIELD), 0, \ 733 sizeof(*attr) - \ 734 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 735 sizeof(attr->CMD##_LAST_FIELD)) != NULL 736 737 /* dst and src must have at least "size" number of bytes. 738 * Return strlen on success and < 0 on error. 739 */ 740 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 741 { 742 const char *end = src + size; 743 const char *orig_src = src; 744 745 memset(dst, 0, size); 746 /* Copy all isalnum(), '_' and '.' chars. */ 747 while (src < end && *src) { 748 if (!isalnum(*src) && 749 *src != '_' && *src != '.') 750 return -EINVAL; 751 *dst++ = *src++; 752 } 753 754 /* No '\0' found in "size" number of bytes */ 755 if (src == end) 756 return -EINVAL; 757 758 return src - orig_src; 759 } 760 761 int map_check_no_btf(const struct bpf_map *map, 762 const struct btf *btf, 763 const struct btf_type *key_type, 764 const struct btf_type *value_type) 765 { 766 return -ENOTSUPP; 767 } 768 769 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 770 u32 btf_key_id, u32 btf_value_id) 771 { 772 const struct btf_type *key_type, *value_type; 773 u32 key_size, value_size; 774 int ret = 0; 775 776 /* Some maps allow key to be unspecified. */ 777 if (btf_key_id) { 778 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 779 if (!key_type || key_size != map->key_size) 780 return -EINVAL; 781 } else { 782 key_type = btf_type_by_id(btf, 0); 783 if (!map->ops->map_check_btf) 784 return -EINVAL; 785 } 786 787 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 788 if (!value_type || value_size != map->value_size) 789 return -EINVAL; 790 791 map->spin_lock_off = btf_find_spin_lock(btf, value_type); 792 793 if (map_value_has_spin_lock(map)) { 794 if (map->map_flags & BPF_F_RDONLY_PROG) 795 return -EACCES; 796 if (map->map_type != BPF_MAP_TYPE_HASH && 797 map->map_type != BPF_MAP_TYPE_ARRAY && 798 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 799 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 800 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 801 map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 802 return -ENOTSUPP; 803 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > 804 map->value_size) { 805 WARN_ONCE(1, 806 "verifier bug spin_lock_off %d value_size %d\n", 807 map->spin_lock_off, map->value_size); 808 return -EFAULT; 809 } 810 } 811 812 map->timer_off = btf_find_timer(btf, value_type); 813 if (map_value_has_timer(map)) { 814 if (map->map_flags & BPF_F_RDONLY_PROG) 815 return -EACCES; 816 if (map->map_type != BPF_MAP_TYPE_HASH && 817 map->map_type != BPF_MAP_TYPE_LRU_HASH && 818 map->map_type != BPF_MAP_TYPE_ARRAY) 819 return -EOPNOTSUPP; 820 } 821 822 if (map->ops->map_check_btf) 823 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 824 825 return ret; 826 } 827 828 #define BPF_MAP_CREATE_LAST_FIELD map_extra 829 /* called via syscall */ 830 static int map_create(union bpf_attr *attr) 831 { 832 int numa_node = bpf_map_attr_numa_node(attr); 833 struct bpf_map *map; 834 int f_flags; 835 int err; 836 837 err = CHECK_ATTR(BPF_MAP_CREATE); 838 if (err) 839 return -EINVAL; 840 841 if (attr->btf_vmlinux_value_type_id) { 842 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 843 attr->btf_key_type_id || attr->btf_value_type_id) 844 return -EINVAL; 845 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 846 return -EINVAL; 847 } 848 849 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 850 attr->map_extra != 0) 851 return -EINVAL; 852 853 f_flags = bpf_get_file_flag(attr->map_flags); 854 if (f_flags < 0) 855 return f_flags; 856 857 if (numa_node != NUMA_NO_NODE && 858 ((unsigned int)numa_node >= nr_node_ids || 859 !node_online(numa_node))) 860 return -EINVAL; 861 862 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 863 map = find_and_alloc_map(attr); 864 if (IS_ERR(map)) 865 return PTR_ERR(map); 866 867 err = bpf_obj_name_cpy(map->name, attr->map_name, 868 sizeof(attr->map_name)); 869 if (err < 0) 870 goto free_map; 871 872 atomic64_set(&map->refcnt, 1); 873 atomic64_set(&map->usercnt, 1); 874 mutex_init(&map->freeze_mutex); 875 spin_lock_init(&map->owner.lock); 876 877 map->spin_lock_off = -EINVAL; 878 map->timer_off = -EINVAL; 879 if (attr->btf_key_type_id || attr->btf_value_type_id || 880 /* Even the map's value is a kernel's struct, 881 * the bpf_prog.o must have BTF to begin with 882 * to figure out the corresponding kernel's 883 * counter part. Thus, attr->btf_fd has 884 * to be valid also. 885 */ 886 attr->btf_vmlinux_value_type_id) { 887 struct btf *btf; 888 889 btf = btf_get_by_fd(attr->btf_fd); 890 if (IS_ERR(btf)) { 891 err = PTR_ERR(btf); 892 goto free_map; 893 } 894 if (btf_is_kernel(btf)) { 895 btf_put(btf); 896 err = -EACCES; 897 goto free_map; 898 } 899 map->btf = btf; 900 901 if (attr->btf_value_type_id) { 902 err = map_check_btf(map, btf, attr->btf_key_type_id, 903 attr->btf_value_type_id); 904 if (err) 905 goto free_map; 906 } 907 908 map->btf_key_type_id = attr->btf_key_type_id; 909 map->btf_value_type_id = attr->btf_value_type_id; 910 map->btf_vmlinux_value_type_id = 911 attr->btf_vmlinux_value_type_id; 912 } 913 914 err = security_bpf_map_alloc(map); 915 if (err) 916 goto free_map; 917 918 err = bpf_map_alloc_id(map); 919 if (err) 920 goto free_map_sec; 921 922 bpf_map_save_memcg(map); 923 924 err = bpf_map_new_fd(map, f_flags); 925 if (err < 0) { 926 /* failed to allocate fd. 927 * bpf_map_put_with_uref() is needed because the above 928 * bpf_map_alloc_id() has published the map 929 * to the userspace and the userspace may 930 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 931 */ 932 bpf_map_put_with_uref(map); 933 return err; 934 } 935 936 return err; 937 938 free_map_sec: 939 security_bpf_map_free(map); 940 free_map: 941 btf_put(map->btf); 942 map->ops->map_free(map); 943 return err; 944 } 945 946 /* if error is returned, fd is released. 947 * On success caller should complete fd access with matching fdput() 948 */ 949 struct bpf_map *__bpf_map_get(struct fd f) 950 { 951 if (!f.file) 952 return ERR_PTR(-EBADF); 953 if (f.file->f_op != &bpf_map_fops) { 954 fdput(f); 955 return ERR_PTR(-EINVAL); 956 } 957 958 return f.file->private_data; 959 } 960 961 void bpf_map_inc(struct bpf_map *map) 962 { 963 atomic64_inc(&map->refcnt); 964 } 965 EXPORT_SYMBOL_GPL(bpf_map_inc); 966 967 void bpf_map_inc_with_uref(struct bpf_map *map) 968 { 969 atomic64_inc(&map->refcnt); 970 atomic64_inc(&map->usercnt); 971 } 972 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 973 974 struct bpf_map *bpf_map_get(u32 ufd) 975 { 976 struct fd f = fdget(ufd); 977 struct bpf_map *map; 978 979 map = __bpf_map_get(f); 980 if (IS_ERR(map)) 981 return map; 982 983 bpf_map_inc(map); 984 fdput(f); 985 986 return map; 987 } 988 EXPORT_SYMBOL(bpf_map_get); 989 990 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 991 { 992 struct fd f = fdget(ufd); 993 struct bpf_map *map; 994 995 map = __bpf_map_get(f); 996 if (IS_ERR(map)) 997 return map; 998 999 bpf_map_inc_with_uref(map); 1000 fdput(f); 1001 1002 return map; 1003 } 1004 1005 /* map_idr_lock should have been held */ 1006 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1007 { 1008 int refold; 1009 1010 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1011 if (!refold) 1012 return ERR_PTR(-ENOENT); 1013 if (uref) 1014 atomic64_inc(&map->usercnt); 1015 1016 return map; 1017 } 1018 1019 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1020 { 1021 spin_lock_bh(&map_idr_lock); 1022 map = __bpf_map_inc_not_zero(map, false); 1023 spin_unlock_bh(&map_idr_lock); 1024 1025 return map; 1026 } 1027 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1028 1029 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1030 { 1031 return -ENOTSUPP; 1032 } 1033 1034 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1035 { 1036 if (key_size) 1037 return vmemdup_user(ukey, key_size); 1038 1039 if (ukey) 1040 return ERR_PTR(-EINVAL); 1041 1042 return NULL; 1043 } 1044 1045 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1046 { 1047 if (key_size) 1048 return kvmemdup_bpfptr(ukey, key_size); 1049 1050 if (!bpfptr_is_null(ukey)) 1051 return ERR_PTR(-EINVAL); 1052 1053 return NULL; 1054 } 1055 1056 /* last field in 'union bpf_attr' used by this command */ 1057 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1058 1059 static int map_lookup_elem(union bpf_attr *attr) 1060 { 1061 void __user *ukey = u64_to_user_ptr(attr->key); 1062 void __user *uvalue = u64_to_user_ptr(attr->value); 1063 int ufd = attr->map_fd; 1064 struct bpf_map *map; 1065 void *key, *value; 1066 u32 value_size; 1067 struct fd f; 1068 int err; 1069 1070 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1071 return -EINVAL; 1072 1073 if (attr->flags & ~BPF_F_LOCK) 1074 return -EINVAL; 1075 1076 f = fdget(ufd); 1077 map = __bpf_map_get(f); 1078 if (IS_ERR(map)) 1079 return PTR_ERR(map); 1080 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1081 err = -EPERM; 1082 goto err_put; 1083 } 1084 1085 if ((attr->flags & BPF_F_LOCK) && 1086 !map_value_has_spin_lock(map)) { 1087 err = -EINVAL; 1088 goto err_put; 1089 } 1090 1091 key = __bpf_copy_key(ukey, map->key_size); 1092 if (IS_ERR(key)) { 1093 err = PTR_ERR(key); 1094 goto err_put; 1095 } 1096 1097 value_size = bpf_map_value_size(map); 1098 1099 err = -ENOMEM; 1100 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1101 if (!value) 1102 goto free_key; 1103 1104 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1105 if (copy_from_user(value, uvalue, value_size)) 1106 err = -EFAULT; 1107 else 1108 err = bpf_map_copy_value(map, key, value, attr->flags); 1109 goto free_value; 1110 } 1111 1112 err = bpf_map_copy_value(map, key, value, attr->flags); 1113 if (err) 1114 goto free_value; 1115 1116 err = -EFAULT; 1117 if (copy_to_user(uvalue, value, value_size) != 0) 1118 goto free_value; 1119 1120 err = 0; 1121 1122 free_value: 1123 kvfree(value); 1124 free_key: 1125 kvfree(key); 1126 err_put: 1127 fdput(f); 1128 return err; 1129 } 1130 1131 1132 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1133 1134 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1135 { 1136 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1137 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1138 int ufd = attr->map_fd; 1139 struct bpf_map *map; 1140 void *key, *value; 1141 u32 value_size; 1142 struct fd f; 1143 int err; 1144 1145 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1146 return -EINVAL; 1147 1148 f = fdget(ufd); 1149 map = __bpf_map_get(f); 1150 if (IS_ERR(map)) 1151 return PTR_ERR(map); 1152 bpf_map_write_active_inc(map); 1153 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1154 err = -EPERM; 1155 goto err_put; 1156 } 1157 1158 if ((attr->flags & BPF_F_LOCK) && 1159 !map_value_has_spin_lock(map)) { 1160 err = -EINVAL; 1161 goto err_put; 1162 } 1163 1164 key = ___bpf_copy_key(ukey, map->key_size); 1165 if (IS_ERR(key)) { 1166 err = PTR_ERR(key); 1167 goto err_put; 1168 } 1169 1170 value_size = bpf_map_value_size(map); 1171 1172 err = -ENOMEM; 1173 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1174 if (!value) 1175 goto free_key; 1176 1177 err = -EFAULT; 1178 if (copy_from_bpfptr(value, uvalue, value_size) != 0) 1179 goto free_value; 1180 1181 err = bpf_map_update_value(map, f, key, value, attr->flags); 1182 1183 free_value: 1184 kvfree(value); 1185 free_key: 1186 kvfree(key); 1187 err_put: 1188 bpf_map_write_active_dec(map); 1189 fdput(f); 1190 return err; 1191 } 1192 1193 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1194 1195 static int map_delete_elem(union bpf_attr *attr) 1196 { 1197 void __user *ukey = u64_to_user_ptr(attr->key); 1198 int ufd = attr->map_fd; 1199 struct bpf_map *map; 1200 struct fd f; 1201 void *key; 1202 int err; 1203 1204 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1205 return -EINVAL; 1206 1207 f = fdget(ufd); 1208 map = __bpf_map_get(f); 1209 if (IS_ERR(map)) 1210 return PTR_ERR(map); 1211 bpf_map_write_active_inc(map); 1212 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1213 err = -EPERM; 1214 goto err_put; 1215 } 1216 1217 key = __bpf_copy_key(ukey, map->key_size); 1218 if (IS_ERR(key)) { 1219 err = PTR_ERR(key); 1220 goto err_put; 1221 } 1222 1223 if (bpf_map_is_dev_bound(map)) { 1224 err = bpf_map_offload_delete_elem(map, key); 1225 goto out; 1226 } else if (IS_FD_PROG_ARRAY(map) || 1227 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1228 /* These maps require sleepable context */ 1229 err = map->ops->map_delete_elem(map, key); 1230 goto out; 1231 } 1232 1233 bpf_disable_instrumentation(); 1234 rcu_read_lock(); 1235 err = map->ops->map_delete_elem(map, key); 1236 rcu_read_unlock(); 1237 bpf_enable_instrumentation(); 1238 maybe_wait_bpf_programs(map); 1239 out: 1240 kvfree(key); 1241 err_put: 1242 bpf_map_write_active_dec(map); 1243 fdput(f); 1244 return err; 1245 } 1246 1247 /* last field in 'union bpf_attr' used by this command */ 1248 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1249 1250 static int map_get_next_key(union bpf_attr *attr) 1251 { 1252 void __user *ukey = u64_to_user_ptr(attr->key); 1253 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1254 int ufd = attr->map_fd; 1255 struct bpf_map *map; 1256 void *key, *next_key; 1257 struct fd f; 1258 int err; 1259 1260 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1261 return -EINVAL; 1262 1263 f = fdget(ufd); 1264 map = __bpf_map_get(f); 1265 if (IS_ERR(map)) 1266 return PTR_ERR(map); 1267 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1268 err = -EPERM; 1269 goto err_put; 1270 } 1271 1272 if (ukey) { 1273 key = __bpf_copy_key(ukey, map->key_size); 1274 if (IS_ERR(key)) { 1275 err = PTR_ERR(key); 1276 goto err_put; 1277 } 1278 } else { 1279 key = NULL; 1280 } 1281 1282 err = -ENOMEM; 1283 next_key = kvmalloc(map->key_size, GFP_USER); 1284 if (!next_key) 1285 goto free_key; 1286 1287 if (bpf_map_is_dev_bound(map)) { 1288 err = bpf_map_offload_get_next_key(map, key, next_key); 1289 goto out; 1290 } 1291 1292 rcu_read_lock(); 1293 err = map->ops->map_get_next_key(map, key, next_key); 1294 rcu_read_unlock(); 1295 out: 1296 if (err) 1297 goto free_next_key; 1298 1299 err = -EFAULT; 1300 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1301 goto free_next_key; 1302 1303 err = 0; 1304 1305 free_next_key: 1306 kvfree(next_key); 1307 free_key: 1308 kvfree(key); 1309 err_put: 1310 fdput(f); 1311 return err; 1312 } 1313 1314 int generic_map_delete_batch(struct bpf_map *map, 1315 const union bpf_attr *attr, 1316 union bpf_attr __user *uattr) 1317 { 1318 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1319 u32 cp, max_count; 1320 int err = 0; 1321 void *key; 1322 1323 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1324 return -EINVAL; 1325 1326 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1327 !map_value_has_spin_lock(map)) { 1328 return -EINVAL; 1329 } 1330 1331 max_count = attr->batch.count; 1332 if (!max_count) 1333 return 0; 1334 1335 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1336 if (!key) 1337 return -ENOMEM; 1338 1339 for (cp = 0; cp < max_count; cp++) { 1340 err = -EFAULT; 1341 if (copy_from_user(key, keys + cp * map->key_size, 1342 map->key_size)) 1343 break; 1344 1345 if (bpf_map_is_dev_bound(map)) { 1346 err = bpf_map_offload_delete_elem(map, key); 1347 break; 1348 } 1349 1350 bpf_disable_instrumentation(); 1351 rcu_read_lock(); 1352 err = map->ops->map_delete_elem(map, key); 1353 rcu_read_unlock(); 1354 bpf_enable_instrumentation(); 1355 if (err) 1356 break; 1357 cond_resched(); 1358 } 1359 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1360 err = -EFAULT; 1361 1362 kvfree(key); 1363 1364 maybe_wait_bpf_programs(map); 1365 return err; 1366 } 1367 1368 int generic_map_update_batch(struct bpf_map *map, 1369 const union bpf_attr *attr, 1370 union bpf_attr __user *uattr) 1371 { 1372 void __user *values = u64_to_user_ptr(attr->batch.values); 1373 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1374 u32 value_size, cp, max_count; 1375 int ufd = attr->batch.map_fd; 1376 void *key, *value; 1377 struct fd f; 1378 int err = 0; 1379 1380 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1381 return -EINVAL; 1382 1383 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1384 !map_value_has_spin_lock(map)) { 1385 return -EINVAL; 1386 } 1387 1388 value_size = bpf_map_value_size(map); 1389 1390 max_count = attr->batch.count; 1391 if (!max_count) 1392 return 0; 1393 1394 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1395 if (!key) 1396 return -ENOMEM; 1397 1398 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1399 if (!value) { 1400 kvfree(key); 1401 return -ENOMEM; 1402 } 1403 1404 f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */ 1405 for (cp = 0; cp < max_count; cp++) { 1406 err = -EFAULT; 1407 if (copy_from_user(key, keys + cp * map->key_size, 1408 map->key_size) || 1409 copy_from_user(value, values + cp * value_size, value_size)) 1410 break; 1411 1412 err = bpf_map_update_value(map, f, key, value, 1413 attr->batch.elem_flags); 1414 1415 if (err) 1416 break; 1417 cond_resched(); 1418 } 1419 1420 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1421 err = -EFAULT; 1422 1423 kvfree(value); 1424 kvfree(key); 1425 fdput(f); 1426 return err; 1427 } 1428 1429 #define MAP_LOOKUP_RETRIES 3 1430 1431 int generic_map_lookup_batch(struct bpf_map *map, 1432 const union bpf_attr *attr, 1433 union bpf_attr __user *uattr) 1434 { 1435 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1436 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1437 void __user *values = u64_to_user_ptr(attr->batch.values); 1438 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1439 void *buf, *buf_prevkey, *prev_key, *key, *value; 1440 int err, retry = MAP_LOOKUP_RETRIES; 1441 u32 value_size, cp, max_count; 1442 1443 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1444 return -EINVAL; 1445 1446 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1447 !map_value_has_spin_lock(map)) 1448 return -EINVAL; 1449 1450 value_size = bpf_map_value_size(map); 1451 1452 max_count = attr->batch.count; 1453 if (!max_count) 1454 return 0; 1455 1456 if (put_user(0, &uattr->batch.count)) 1457 return -EFAULT; 1458 1459 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1460 if (!buf_prevkey) 1461 return -ENOMEM; 1462 1463 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1464 if (!buf) { 1465 kvfree(buf_prevkey); 1466 return -ENOMEM; 1467 } 1468 1469 err = -EFAULT; 1470 prev_key = NULL; 1471 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1472 goto free_buf; 1473 key = buf; 1474 value = key + map->key_size; 1475 if (ubatch) 1476 prev_key = buf_prevkey; 1477 1478 for (cp = 0; cp < max_count;) { 1479 rcu_read_lock(); 1480 err = map->ops->map_get_next_key(map, prev_key, key); 1481 rcu_read_unlock(); 1482 if (err) 1483 break; 1484 err = bpf_map_copy_value(map, key, value, 1485 attr->batch.elem_flags); 1486 1487 if (err == -ENOENT) { 1488 if (retry) { 1489 retry--; 1490 continue; 1491 } 1492 err = -EINTR; 1493 break; 1494 } 1495 1496 if (err) 1497 goto free_buf; 1498 1499 if (copy_to_user(keys + cp * map->key_size, key, 1500 map->key_size)) { 1501 err = -EFAULT; 1502 goto free_buf; 1503 } 1504 if (copy_to_user(values + cp * value_size, value, value_size)) { 1505 err = -EFAULT; 1506 goto free_buf; 1507 } 1508 1509 if (!prev_key) 1510 prev_key = buf_prevkey; 1511 1512 swap(prev_key, key); 1513 retry = MAP_LOOKUP_RETRIES; 1514 cp++; 1515 cond_resched(); 1516 } 1517 1518 if (err == -EFAULT) 1519 goto free_buf; 1520 1521 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1522 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1523 err = -EFAULT; 1524 1525 free_buf: 1526 kvfree(buf_prevkey); 1527 kvfree(buf); 1528 return err; 1529 } 1530 1531 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 1532 1533 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1534 { 1535 void __user *ukey = u64_to_user_ptr(attr->key); 1536 void __user *uvalue = u64_to_user_ptr(attr->value); 1537 int ufd = attr->map_fd; 1538 struct bpf_map *map; 1539 void *key, *value; 1540 u32 value_size; 1541 struct fd f; 1542 int err; 1543 1544 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1545 return -EINVAL; 1546 1547 if (attr->flags & ~BPF_F_LOCK) 1548 return -EINVAL; 1549 1550 f = fdget(ufd); 1551 map = __bpf_map_get(f); 1552 if (IS_ERR(map)) 1553 return PTR_ERR(map); 1554 bpf_map_write_active_inc(map); 1555 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 1556 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1557 err = -EPERM; 1558 goto err_put; 1559 } 1560 1561 if (attr->flags && 1562 (map->map_type == BPF_MAP_TYPE_QUEUE || 1563 map->map_type == BPF_MAP_TYPE_STACK)) { 1564 err = -EINVAL; 1565 goto err_put; 1566 } 1567 1568 if ((attr->flags & BPF_F_LOCK) && 1569 !map_value_has_spin_lock(map)) { 1570 err = -EINVAL; 1571 goto err_put; 1572 } 1573 1574 key = __bpf_copy_key(ukey, map->key_size); 1575 if (IS_ERR(key)) { 1576 err = PTR_ERR(key); 1577 goto err_put; 1578 } 1579 1580 value_size = bpf_map_value_size(map); 1581 1582 err = -ENOMEM; 1583 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1584 if (!value) 1585 goto free_key; 1586 1587 err = -ENOTSUPP; 1588 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1589 map->map_type == BPF_MAP_TYPE_STACK) { 1590 err = map->ops->map_pop_elem(map, value); 1591 } else if (map->map_type == BPF_MAP_TYPE_HASH || 1592 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1593 map->map_type == BPF_MAP_TYPE_LRU_HASH || 1594 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 1595 if (!bpf_map_is_dev_bound(map)) { 1596 bpf_disable_instrumentation(); 1597 rcu_read_lock(); 1598 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 1599 rcu_read_unlock(); 1600 bpf_enable_instrumentation(); 1601 } 1602 } 1603 1604 if (err) 1605 goto free_value; 1606 1607 if (copy_to_user(uvalue, value, value_size) != 0) { 1608 err = -EFAULT; 1609 goto free_value; 1610 } 1611 1612 err = 0; 1613 1614 free_value: 1615 kvfree(value); 1616 free_key: 1617 kvfree(key); 1618 err_put: 1619 bpf_map_write_active_dec(map); 1620 fdput(f); 1621 return err; 1622 } 1623 1624 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1625 1626 static int map_freeze(const union bpf_attr *attr) 1627 { 1628 int err = 0, ufd = attr->map_fd; 1629 struct bpf_map *map; 1630 struct fd f; 1631 1632 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1633 return -EINVAL; 1634 1635 f = fdget(ufd); 1636 map = __bpf_map_get(f); 1637 if (IS_ERR(map)) 1638 return PTR_ERR(map); 1639 1640 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || 1641 map_value_has_timer(map)) { 1642 fdput(f); 1643 return -ENOTSUPP; 1644 } 1645 1646 mutex_lock(&map->freeze_mutex); 1647 if (bpf_map_write_active(map)) { 1648 err = -EBUSY; 1649 goto err_put; 1650 } 1651 if (READ_ONCE(map->frozen)) { 1652 err = -EBUSY; 1653 goto err_put; 1654 } 1655 if (!bpf_capable()) { 1656 err = -EPERM; 1657 goto err_put; 1658 } 1659 1660 WRITE_ONCE(map->frozen, true); 1661 err_put: 1662 mutex_unlock(&map->freeze_mutex); 1663 fdput(f); 1664 return err; 1665 } 1666 1667 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1668 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1669 [_id] = & _name ## _prog_ops, 1670 #define BPF_MAP_TYPE(_id, _ops) 1671 #define BPF_LINK_TYPE(_id, _name) 1672 #include <linux/bpf_types.h> 1673 #undef BPF_PROG_TYPE 1674 #undef BPF_MAP_TYPE 1675 #undef BPF_LINK_TYPE 1676 }; 1677 1678 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1679 { 1680 const struct bpf_prog_ops *ops; 1681 1682 if (type >= ARRAY_SIZE(bpf_prog_types)) 1683 return -EINVAL; 1684 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1685 ops = bpf_prog_types[type]; 1686 if (!ops) 1687 return -EINVAL; 1688 1689 if (!bpf_prog_is_dev_bound(prog->aux)) 1690 prog->aux->ops = ops; 1691 else 1692 prog->aux->ops = &bpf_offload_prog_ops; 1693 prog->type = type; 1694 return 0; 1695 } 1696 1697 enum bpf_audit { 1698 BPF_AUDIT_LOAD, 1699 BPF_AUDIT_UNLOAD, 1700 BPF_AUDIT_MAX, 1701 }; 1702 1703 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 1704 [BPF_AUDIT_LOAD] = "LOAD", 1705 [BPF_AUDIT_UNLOAD] = "UNLOAD", 1706 }; 1707 1708 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 1709 { 1710 struct audit_context *ctx = NULL; 1711 struct audit_buffer *ab; 1712 1713 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 1714 return; 1715 if (audit_enabled == AUDIT_OFF) 1716 return; 1717 if (op == BPF_AUDIT_LOAD) 1718 ctx = audit_context(); 1719 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1720 if (unlikely(!ab)) 1721 return; 1722 audit_log_format(ab, "prog-id=%u op=%s", 1723 prog->aux->id, bpf_audit_str[op]); 1724 audit_log_end(ab); 1725 } 1726 1727 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1728 { 1729 int id; 1730 1731 idr_preload(GFP_KERNEL); 1732 spin_lock_bh(&prog_idr_lock); 1733 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1734 if (id > 0) 1735 prog->aux->id = id; 1736 spin_unlock_bh(&prog_idr_lock); 1737 idr_preload_end(); 1738 1739 /* id is in [1, INT_MAX) */ 1740 if (WARN_ON_ONCE(!id)) 1741 return -ENOSPC; 1742 1743 return id > 0 ? 0 : id; 1744 } 1745 1746 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1747 { 1748 unsigned long flags; 1749 1750 /* cBPF to eBPF migrations are currently not in the idr store. 1751 * Offloaded programs are removed from the store when their device 1752 * disappears - even if someone grabs an fd to them they are unusable, 1753 * simply waiting for refcnt to drop to be freed. 1754 */ 1755 if (!prog->aux->id) 1756 return; 1757 1758 if (do_idr_lock) 1759 spin_lock_irqsave(&prog_idr_lock, flags); 1760 else 1761 __acquire(&prog_idr_lock); 1762 1763 idr_remove(&prog_idr, prog->aux->id); 1764 prog->aux->id = 0; 1765 1766 if (do_idr_lock) 1767 spin_unlock_irqrestore(&prog_idr_lock, flags); 1768 else 1769 __release(&prog_idr_lock); 1770 } 1771 1772 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 1773 { 1774 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 1775 1776 kvfree(aux->func_info); 1777 kfree(aux->func_info_aux); 1778 free_uid(aux->user); 1779 security_bpf_prog_free(aux); 1780 bpf_prog_free(aux->prog); 1781 } 1782 1783 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 1784 { 1785 bpf_prog_kallsyms_del_all(prog); 1786 btf_put(prog->aux->btf); 1787 kvfree(prog->aux->jited_linfo); 1788 kvfree(prog->aux->linfo); 1789 kfree(prog->aux->kfunc_tab); 1790 if (prog->aux->attach_btf) 1791 btf_put(prog->aux->attach_btf); 1792 1793 if (deferred) { 1794 if (prog->aux->sleepable) 1795 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 1796 else 1797 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1798 } else { 1799 __bpf_prog_put_rcu(&prog->aux->rcu); 1800 } 1801 } 1802 1803 static void bpf_prog_put_deferred(struct work_struct *work) 1804 { 1805 struct bpf_prog_aux *aux; 1806 struct bpf_prog *prog; 1807 1808 aux = container_of(work, struct bpf_prog_aux, work); 1809 prog = aux->prog; 1810 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 1811 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 1812 __bpf_prog_put_noref(prog, true); 1813 } 1814 1815 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 1816 { 1817 struct bpf_prog_aux *aux = prog->aux; 1818 1819 if (atomic64_dec_and_test(&aux->refcnt)) { 1820 /* bpf_prog_free_id() must be called first */ 1821 bpf_prog_free_id(prog, do_idr_lock); 1822 1823 if (in_irq() || irqs_disabled()) { 1824 INIT_WORK(&aux->work, bpf_prog_put_deferred); 1825 schedule_work(&aux->work); 1826 } else { 1827 bpf_prog_put_deferred(&aux->work); 1828 } 1829 } 1830 } 1831 1832 void bpf_prog_put(struct bpf_prog *prog) 1833 { 1834 __bpf_prog_put(prog, true); 1835 } 1836 EXPORT_SYMBOL_GPL(bpf_prog_put); 1837 1838 static int bpf_prog_release(struct inode *inode, struct file *filp) 1839 { 1840 struct bpf_prog *prog = filp->private_data; 1841 1842 bpf_prog_put(prog); 1843 return 0; 1844 } 1845 1846 struct bpf_prog_kstats { 1847 u64 nsecs; 1848 u64 cnt; 1849 u64 misses; 1850 }; 1851 1852 static void bpf_prog_get_stats(const struct bpf_prog *prog, 1853 struct bpf_prog_kstats *stats) 1854 { 1855 u64 nsecs = 0, cnt = 0, misses = 0; 1856 int cpu; 1857 1858 for_each_possible_cpu(cpu) { 1859 const struct bpf_prog_stats *st; 1860 unsigned int start; 1861 u64 tnsecs, tcnt, tmisses; 1862 1863 st = per_cpu_ptr(prog->stats, cpu); 1864 do { 1865 start = u64_stats_fetch_begin_irq(&st->syncp); 1866 tnsecs = u64_stats_read(&st->nsecs); 1867 tcnt = u64_stats_read(&st->cnt); 1868 tmisses = u64_stats_read(&st->misses); 1869 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); 1870 nsecs += tnsecs; 1871 cnt += tcnt; 1872 misses += tmisses; 1873 } 1874 stats->nsecs = nsecs; 1875 stats->cnt = cnt; 1876 stats->misses = misses; 1877 } 1878 1879 #ifdef CONFIG_PROC_FS 1880 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 1881 { 1882 const struct bpf_prog *prog = filp->private_data; 1883 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 1884 struct bpf_prog_kstats stats; 1885 1886 bpf_prog_get_stats(prog, &stats); 1887 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 1888 seq_printf(m, 1889 "prog_type:\t%u\n" 1890 "prog_jited:\t%u\n" 1891 "prog_tag:\t%s\n" 1892 "memlock:\t%llu\n" 1893 "prog_id:\t%u\n" 1894 "run_time_ns:\t%llu\n" 1895 "run_cnt:\t%llu\n" 1896 "recursion_misses:\t%llu\n" 1897 "verified_insns:\t%u\n", 1898 prog->type, 1899 prog->jited, 1900 prog_tag, 1901 prog->pages * 1ULL << PAGE_SHIFT, 1902 prog->aux->id, 1903 stats.nsecs, 1904 stats.cnt, 1905 stats.misses, 1906 prog->aux->verified_insns); 1907 } 1908 #endif 1909 1910 const struct file_operations bpf_prog_fops = { 1911 #ifdef CONFIG_PROC_FS 1912 .show_fdinfo = bpf_prog_show_fdinfo, 1913 #endif 1914 .release = bpf_prog_release, 1915 .read = bpf_dummy_read, 1916 .write = bpf_dummy_write, 1917 }; 1918 1919 int bpf_prog_new_fd(struct bpf_prog *prog) 1920 { 1921 int ret; 1922 1923 ret = security_bpf_prog(prog); 1924 if (ret < 0) 1925 return ret; 1926 1927 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 1928 O_RDWR | O_CLOEXEC); 1929 } 1930 1931 static struct bpf_prog *____bpf_prog_get(struct fd f) 1932 { 1933 if (!f.file) 1934 return ERR_PTR(-EBADF); 1935 if (f.file->f_op != &bpf_prog_fops) { 1936 fdput(f); 1937 return ERR_PTR(-EINVAL); 1938 } 1939 1940 return f.file->private_data; 1941 } 1942 1943 void bpf_prog_add(struct bpf_prog *prog, int i) 1944 { 1945 atomic64_add(i, &prog->aux->refcnt); 1946 } 1947 EXPORT_SYMBOL_GPL(bpf_prog_add); 1948 1949 void bpf_prog_sub(struct bpf_prog *prog, int i) 1950 { 1951 /* Only to be used for undoing previous bpf_prog_add() in some 1952 * error path. We still know that another entity in our call 1953 * path holds a reference to the program, thus atomic_sub() can 1954 * be safely used in such cases! 1955 */ 1956 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 1957 } 1958 EXPORT_SYMBOL_GPL(bpf_prog_sub); 1959 1960 void bpf_prog_inc(struct bpf_prog *prog) 1961 { 1962 atomic64_inc(&prog->aux->refcnt); 1963 } 1964 EXPORT_SYMBOL_GPL(bpf_prog_inc); 1965 1966 /* prog_idr_lock should have been held */ 1967 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 1968 { 1969 int refold; 1970 1971 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 1972 1973 if (!refold) 1974 return ERR_PTR(-ENOENT); 1975 1976 return prog; 1977 } 1978 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1979 1980 bool bpf_prog_get_ok(struct bpf_prog *prog, 1981 enum bpf_prog_type *attach_type, bool attach_drv) 1982 { 1983 /* not an attachment, just a refcount inc, always allow */ 1984 if (!attach_type) 1985 return true; 1986 1987 if (prog->type != *attach_type) 1988 return false; 1989 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1990 return false; 1991 1992 return true; 1993 } 1994 1995 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1996 bool attach_drv) 1997 { 1998 struct fd f = fdget(ufd); 1999 struct bpf_prog *prog; 2000 2001 prog = ____bpf_prog_get(f); 2002 if (IS_ERR(prog)) 2003 return prog; 2004 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 2005 prog = ERR_PTR(-EINVAL); 2006 goto out; 2007 } 2008 2009 bpf_prog_inc(prog); 2010 out: 2011 fdput(f); 2012 return prog; 2013 } 2014 2015 struct bpf_prog *bpf_prog_get(u32 ufd) 2016 { 2017 return __bpf_prog_get(ufd, NULL, false); 2018 } 2019 2020 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2021 bool attach_drv) 2022 { 2023 return __bpf_prog_get(ufd, &type, attach_drv); 2024 } 2025 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2026 2027 /* Initially all BPF programs could be loaded w/o specifying 2028 * expected_attach_type. Later for some of them specifying expected_attach_type 2029 * at load time became required so that program could be validated properly. 2030 * Programs of types that are allowed to be loaded both w/ and w/o (for 2031 * backward compatibility) expected_attach_type, should have the default attach 2032 * type assigned to expected_attach_type for the latter case, so that it can be 2033 * validated later at attach time. 2034 * 2035 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2036 * prog type requires it but has some attach types that have to be backward 2037 * compatible. 2038 */ 2039 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2040 { 2041 switch (attr->prog_type) { 2042 case BPF_PROG_TYPE_CGROUP_SOCK: 2043 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2044 * exist so checking for non-zero is the way to go here. 2045 */ 2046 if (!attr->expected_attach_type) 2047 attr->expected_attach_type = 2048 BPF_CGROUP_INET_SOCK_CREATE; 2049 break; 2050 case BPF_PROG_TYPE_SK_REUSEPORT: 2051 if (!attr->expected_attach_type) 2052 attr->expected_attach_type = 2053 BPF_SK_REUSEPORT_SELECT; 2054 break; 2055 } 2056 } 2057 2058 static int 2059 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2060 enum bpf_attach_type expected_attach_type, 2061 struct btf *attach_btf, u32 btf_id, 2062 struct bpf_prog *dst_prog) 2063 { 2064 if (btf_id) { 2065 if (btf_id > BTF_MAX_TYPE) 2066 return -EINVAL; 2067 2068 if (!attach_btf && !dst_prog) 2069 return -EINVAL; 2070 2071 switch (prog_type) { 2072 case BPF_PROG_TYPE_TRACING: 2073 case BPF_PROG_TYPE_LSM: 2074 case BPF_PROG_TYPE_STRUCT_OPS: 2075 case BPF_PROG_TYPE_EXT: 2076 break; 2077 default: 2078 return -EINVAL; 2079 } 2080 } 2081 2082 if (attach_btf && (!btf_id || dst_prog)) 2083 return -EINVAL; 2084 2085 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2086 prog_type != BPF_PROG_TYPE_EXT) 2087 return -EINVAL; 2088 2089 switch (prog_type) { 2090 case BPF_PROG_TYPE_CGROUP_SOCK: 2091 switch (expected_attach_type) { 2092 case BPF_CGROUP_INET_SOCK_CREATE: 2093 case BPF_CGROUP_INET_SOCK_RELEASE: 2094 case BPF_CGROUP_INET4_POST_BIND: 2095 case BPF_CGROUP_INET6_POST_BIND: 2096 return 0; 2097 default: 2098 return -EINVAL; 2099 } 2100 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2101 switch (expected_attach_type) { 2102 case BPF_CGROUP_INET4_BIND: 2103 case BPF_CGROUP_INET6_BIND: 2104 case BPF_CGROUP_INET4_CONNECT: 2105 case BPF_CGROUP_INET6_CONNECT: 2106 case BPF_CGROUP_INET4_GETPEERNAME: 2107 case BPF_CGROUP_INET6_GETPEERNAME: 2108 case BPF_CGROUP_INET4_GETSOCKNAME: 2109 case BPF_CGROUP_INET6_GETSOCKNAME: 2110 case BPF_CGROUP_UDP4_SENDMSG: 2111 case BPF_CGROUP_UDP6_SENDMSG: 2112 case BPF_CGROUP_UDP4_RECVMSG: 2113 case BPF_CGROUP_UDP6_RECVMSG: 2114 return 0; 2115 default: 2116 return -EINVAL; 2117 } 2118 case BPF_PROG_TYPE_CGROUP_SKB: 2119 switch (expected_attach_type) { 2120 case BPF_CGROUP_INET_INGRESS: 2121 case BPF_CGROUP_INET_EGRESS: 2122 return 0; 2123 default: 2124 return -EINVAL; 2125 } 2126 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2127 switch (expected_attach_type) { 2128 case BPF_CGROUP_SETSOCKOPT: 2129 case BPF_CGROUP_GETSOCKOPT: 2130 return 0; 2131 default: 2132 return -EINVAL; 2133 } 2134 case BPF_PROG_TYPE_SK_LOOKUP: 2135 if (expected_attach_type == BPF_SK_LOOKUP) 2136 return 0; 2137 return -EINVAL; 2138 case BPF_PROG_TYPE_SK_REUSEPORT: 2139 switch (expected_attach_type) { 2140 case BPF_SK_REUSEPORT_SELECT: 2141 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2142 return 0; 2143 default: 2144 return -EINVAL; 2145 } 2146 case BPF_PROG_TYPE_SYSCALL: 2147 case BPF_PROG_TYPE_EXT: 2148 if (expected_attach_type) 2149 return -EINVAL; 2150 fallthrough; 2151 default: 2152 return 0; 2153 } 2154 } 2155 2156 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2157 { 2158 switch (prog_type) { 2159 case BPF_PROG_TYPE_SCHED_CLS: 2160 case BPF_PROG_TYPE_SCHED_ACT: 2161 case BPF_PROG_TYPE_XDP: 2162 case BPF_PROG_TYPE_LWT_IN: 2163 case BPF_PROG_TYPE_LWT_OUT: 2164 case BPF_PROG_TYPE_LWT_XMIT: 2165 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2166 case BPF_PROG_TYPE_SK_SKB: 2167 case BPF_PROG_TYPE_SK_MSG: 2168 case BPF_PROG_TYPE_LIRC_MODE2: 2169 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2170 case BPF_PROG_TYPE_CGROUP_DEVICE: 2171 case BPF_PROG_TYPE_CGROUP_SOCK: 2172 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2173 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2174 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2175 case BPF_PROG_TYPE_SOCK_OPS: 2176 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2177 return true; 2178 case BPF_PROG_TYPE_CGROUP_SKB: 2179 /* always unpriv */ 2180 case BPF_PROG_TYPE_SK_REUSEPORT: 2181 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2182 default: 2183 return false; 2184 } 2185 } 2186 2187 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2188 { 2189 switch (prog_type) { 2190 case BPF_PROG_TYPE_KPROBE: 2191 case BPF_PROG_TYPE_TRACEPOINT: 2192 case BPF_PROG_TYPE_PERF_EVENT: 2193 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2194 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2195 case BPF_PROG_TYPE_TRACING: 2196 case BPF_PROG_TYPE_LSM: 2197 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2198 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2199 return true; 2200 default: 2201 return false; 2202 } 2203 } 2204 2205 /* last field in 'union bpf_attr' used by this command */ 2206 #define BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size 2207 2208 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) 2209 { 2210 enum bpf_prog_type type = attr->prog_type; 2211 struct bpf_prog *prog, *dst_prog = NULL; 2212 struct btf *attach_btf = NULL; 2213 int err; 2214 char license[128]; 2215 bool is_gpl; 2216 2217 if (CHECK_ATTR(BPF_PROG_LOAD)) 2218 return -EINVAL; 2219 2220 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2221 BPF_F_ANY_ALIGNMENT | 2222 BPF_F_TEST_STATE_FREQ | 2223 BPF_F_SLEEPABLE | 2224 BPF_F_TEST_RND_HI32 | 2225 BPF_F_XDP_HAS_FRAGS)) 2226 return -EINVAL; 2227 2228 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2229 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2230 !bpf_capable()) 2231 return -EPERM; 2232 2233 /* copy eBPF program license from user space */ 2234 if (strncpy_from_bpfptr(license, 2235 make_bpfptr(attr->license, uattr.is_kernel), 2236 sizeof(license) - 1) < 0) 2237 return -EFAULT; 2238 license[sizeof(license) - 1] = 0; 2239 2240 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2241 is_gpl = license_is_gpl_compatible(license); 2242 2243 if (attr->insn_cnt == 0 || 2244 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2245 return -E2BIG; 2246 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2247 type != BPF_PROG_TYPE_CGROUP_SKB && 2248 !bpf_capable()) 2249 return -EPERM; 2250 2251 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) 2252 return -EPERM; 2253 if (is_perfmon_prog_type(type) && !perfmon_capable()) 2254 return -EPERM; 2255 2256 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2257 * or btf, we need to check which one it is 2258 */ 2259 if (attr->attach_prog_fd) { 2260 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2261 if (IS_ERR(dst_prog)) { 2262 dst_prog = NULL; 2263 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2264 if (IS_ERR(attach_btf)) 2265 return -EINVAL; 2266 if (!btf_is_kernel(attach_btf)) { 2267 /* attaching through specifying bpf_prog's BTF 2268 * objects directly might be supported eventually 2269 */ 2270 btf_put(attach_btf); 2271 return -ENOTSUPP; 2272 } 2273 } 2274 } else if (attr->attach_btf_id) { 2275 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2276 attach_btf = bpf_get_btf_vmlinux(); 2277 if (IS_ERR(attach_btf)) 2278 return PTR_ERR(attach_btf); 2279 if (!attach_btf) 2280 return -EINVAL; 2281 btf_get(attach_btf); 2282 } 2283 2284 bpf_prog_load_fixup_attach_type(attr); 2285 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2286 attach_btf, attr->attach_btf_id, 2287 dst_prog)) { 2288 if (dst_prog) 2289 bpf_prog_put(dst_prog); 2290 if (attach_btf) 2291 btf_put(attach_btf); 2292 return -EINVAL; 2293 } 2294 2295 /* plain bpf_prog allocation */ 2296 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2297 if (!prog) { 2298 if (dst_prog) 2299 bpf_prog_put(dst_prog); 2300 if (attach_btf) 2301 btf_put(attach_btf); 2302 return -ENOMEM; 2303 } 2304 2305 prog->expected_attach_type = attr->expected_attach_type; 2306 prog->aux->attach_btf = attach_btf; 2307 prog->aux->attach_btf_id = attr->attach_btf_id; 2308 prog->aux->dst_prog = dst_prog; 2309 prog->aux->offload_requested = !!attr->prog_ifindex; 2310 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; 2311 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2312 2313 err = security_bpf_prog_alloc(prog->aux); 2314 if (err) 2315 goto free_prog; 2316 2317 prog->aux->user = get_current_user(); 2318 prog->len = attr->insn_cnt; 2319 2320 err = -EFAULT; 2321 if (copy_from_bpfptr(prog->insns, 2322 make_bpfptr(attr->insns, uattr.is_kernel), 2323 bpf_prog_insn_size(prog)) != 0) 2324 goto free_prog_sec; 2325 2326 prog->orig_prog = NULL; 2327 prog->jited = 0; 2328 2329 atomic64_set(&prog->aux->refcnt, 1); 2330 prog->gpl_compatible = is_gpl ? 1 : 0; 2331 2332 if (bpf_prog_is_dev_bound(prog->aux)) { 2333 err = bpf_prog_offload_init(prog, attr); 2334 if (err) 2335 goto free_prog_sec; 2336 } 2337 2338 /* find program type: socket_filter vs tracing_filter */ 2339 err = find_prog_type(type, prog); 2340 if (err < 0) 2341 goto free_prog_sec; 2342 2343 prog->aux->load_time = ktime_get_boottime_ns(); 2344 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2345 sizeof(attr->prog_name)); 2346 if (err < 0) 2347 goto free_prog_sec; 2348 2349 /* run eBPF verifier */ 2350 err = bpf_check(&prog, attr, uattr); 2351 if (err < 0) 2352 goto free_used_maps; 2353 2354 prog = bpf_prog_select_runtime(prog, &err); 2355 if (err < 0) 2356 goto free_used_maps; 2357 2358 err = bpf_prog_alloc_id(prog); 2359 if (err) 2360 goto free_used_maps; 2361 2362 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2363 * effectively publicly exposed. However, retrieving via 2364 * bpf_prog_get_fd_by_id() will take another reference, 2365 * therefore it cannot be gone underneath us. 2366 * 2367 * Only for the time /after/ successful bpf_prog_new_fd() 2368 * and before returning to userspace, we might just hold 2369 * one reference and any parallel close on that fd could 2370 * rip everything out. Hence, below notifications must 2371 * happen before bpf_prog_new_fd(). 2372 * 2373 * Also, any failure handling from this point onwards must 2374 * be using bpf_prog_put() given the program is exposed. 2375 */ 2376 bpf_prog_kallsyms_add(prog); 2377 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2378 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2379 2380 err = bpf_prog_new_fd(prog); 2381 if (err < 0) 2382 bpf_prog_put(prog); 2383 return err; 2384 2385 free_used_maps: 2386 /* In case we have subprogs, we need to wait for a grace 2387 * period before we can tear down JIT memory since symbols 2388 * are already exposed under kallsyms. 2389 */ 2390 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2391 return err; 2392 free_prog_sec: 2393 free_uid(prog->aux->user); 2394 security_bpf_prog_free(prog->aux); 2395 free_prog: 2396 if (prog->aux->attach_btf) 2397 btf_put(prog->aux->attach_btf); 2398 bpf_prog_free(prog); 2399 return err; 2400 } 2401 2402 #define BPF_OBJ_LAST_FIELD file_flags 2403 2404 static int bpf_obj_pin(const union bpf_attr *attr) 2405 { 2406 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 2407 return -EINVAL; 2408 2409 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 2410 } 2411 2412 static int bpf_obj_get(const union bpf_attr *attr) 2413 { 2414 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2415 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 2416 return -EINVAL; 2417 2418 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 2419 attr->file_flags); 2420 } 2421 2422 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2423 const struct bpf_link_ops *ops, struct bpf_prog *prog) 2424 { 2425 atomic64_set(&link->refcnt, 1); 2426 link->type = type; 2427 link->id = 0; 2428 link->ops = ops; 2429 link->prog = prog; 2430 } 2431 2432 static void bpf_link_free_id(int id) 2433 { 2434 if (!id) 2435 return; 2436 2437 spin_lock_bh(&link_idr_lock); 2438 idr_remove(&link_idr, id); 2439 spin_unlock_bh(&link_idr_lock); 2440 } 2441 2442 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2443 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2444 * anon_inode's release() call. This helper marksbpf_link as 2445 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 2446 * is not decremented, it's the responsibility of a calling code that failed 2447 * to complete bpf_link initialization. 2448 */ 2449 void bpf_link_cleanup(struct bpf_link_primer *primer) 2450 { 2451 primer->link->prog = NULL; 2452 bpf_link_free_id(primer->id); 2453 fput(primer->file); 2454 put_unused_fd(primer->fd); 2455 } 2456 2457 void bpf_link_inc(struct bpf_link *link) 2458 { 2459 atomic64_inc(&link->refcnt); 2460 } 2461 2462 /* bpf_link_free is guaranteed to be called from process context */ 2463 static void bpf_link_free(struct bpf_link *link) 2464 { 2465 bpf_link_free_id(link->id); 2466 if (link->prog) { 2467 /* detach BPF program, clean up used resources */ 2468 link->ops->release(link); 2469 bpf_prog_put(link->prog); 2470 } 2471 /* free bpf_link and its containing memory */ 2472 link->ops->dealloc(link); 2473 } 2474 2475 static void bpf_link_put_deferred(struct work_struct *work) 2476 { 2477 struct bpf_link *link = container_of(work, struct bpf_link, work); 2478 2479 bpf_link_free(link); 2480 } 2481 2482 /* bpf_link_put can be called from atomic context, but ensures that resources 2483 * are freed from process context 2484 */ 2485 void bpf_link_put(struct bpf_link *link) 2486 { 2487 if (!atomic64_dec_and_test(&link->refcnt)) 2488 return; 2489 2490 if (in_atomic()) { 2491 INIT_WORK(&link->work, bpf_link_put_deferred); 2492 schedule_work(&link->work); 2493 } else { 2494 bpf_link_free(link); 2495 } 2496 } 2497 EXPORT_SYMBOL(bpf_link_put); 2498 2499 static int bpf_link_release(struct inode *inode, struct file *filp) 2500 { 2501 struct bpf_link *link = filp->private_data; 2502 2503 bpf_link_put(link); 2504 return 0; 2505 } 2506 2507 #ifdef CONFIG_PROC_FS 2508 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 2509 #define BPF_MAP_TYPE(_id, _ops) 2510 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 2511 static const char *bpf_link_type_strs[] = { 2512 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 2513 #include <linux/bpf_types.h> 2514 }; 2515 #undef BPF_PROG_TYPE 2516 #undef BPF_MAP_TYPE 2517 #undef BPF_LINK_TYPE 2518 2519 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 2520 { 2521 const struct bpf_link *link = filp->private_data; 2522 const struct bpf_prog *prog = link->prog; 2523 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2524 2525 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2526 seq_printf(m, 2527 "link_type:\t%s\n" 2528 "link_id:\t%u\n" 2529 "prog_tag:\t%s\n" 2530 "prog_id:\t%u\n", 2531 bpf_link_type_strs[link->type], 2532 link->id, 2533 prog_tag, 2534 prog->aux->id); 2535 if (link->ops->show_fdinfo) 2536 link->ops->show_fdinfo(link, m); 2537 } 2538 #endif 2539 2540 static const struct file_operations bpf_link_fops = { 2541 #ifdef CONFIG_PROC_FS 2542 .show_fdinfo = bpf_link_show_fdinfo, 2543 #endif 2544 .release = bpf_link_release, 2545 .read = bpf_dummy_read, 2546 .write = bpf_dummy_write, 2547 }; 2548 2549 static int bpf_link_alloc_id(struct bpf_link *link) 2550 { 2551 int id; 2552 2553 idr_preload(GFP_KERNEL); 2554 spin_lock_bh(&link_idr_lock); 2555 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 2556 spin_unlock_bh(&link_idr_lock); 2557 idr_preload_end(); 2558 2559 return id; 2560 } 2561 2562 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 2563 * reserving unused FD and allocating ID from link_idr. This is to be paired 2564 * with bpf_link_settle() to install FD and ID and expose bpf_link to 2565 * user-space, if bpf_link is successfully attached. If not, bpf_link and 2566 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 2567 * transient state is passed around in struct bpf_link_primer. 2568 * This is preferred way to create and initialize bpf_link, especially when 2569 * there are complicated and expensive operations in between creating bpf_link 2570 * itself and attaching it to BPF hook. By using bpf_link_prime() and 2571 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 2572 * expensive (and potentially failing) roll back operations in a rare case 2573 * that file, FD, or ID can't be allocated. 2574 */ 2575 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 2576 { 2577 struct file *file; 2578 int fd, id; 2579 2580 fd = get_unused_fd_flags(O_CLOEXEC); 2581 if (fd < 0) 2582 return fd; 2583 2584 2585 id = bpf_link_alloc_id(link); 2586 if (id < 0) { 2587 put_unused_fd(fd); 2588 return id; 2589 } 2590 2591 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 2592 if (IS_ERR(file)) { 2593 bpf_link_free_id(id); 2594 put_unused_fd(fd); 2595 return PTR_ERR(file); 2596 } 2597 2598 primer->link = link; 2599 primer->file = file; 2600 primer->fd = fd; 2601 primer->id = id; 2602 return 0; 2603 } 2604 2605 int bpf_link_settle(struct bpf_link_primer *primer) 2606 { 2607 /* make bpf_link fetchable by ID */ 2608 spin_lock_bh(&link_idr_lock); 2609 primer->link->id = primer->id; 2610 spin_unlock_bh(&link_idr_lock); 2611 /* make bpf_link fetchable by FD */ 2612 fd_install(primer->fd, primer->file); 2613 /* pass through installed FD */ 2614 return primer->fd; 2615 } 2616 2617 int bpf_link_new_fd(struct bpf_link *link) 2618 { 2619 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 2620 } 2621 2622 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 2623 { 2624 struct fd f = fdget(ufd); 2625 struct bpf_link *link; 2626 2627 if (!f.file) 2628 return ERR_PTR(-EBADF); 2629 if (f.file->f_op != &bpf_link_fops) { 2630 fdput(f); 2631 return ERR_PTR(-EINVAL); 2632 } 2633 2634 link = f.file->private_data; 2635 bpf_link_inc(link); 2636 fdput(f); 2637 2638 return link; 2639 } 2640 EXPORT_SYMBOL(bpf_link_get_from_fd); 2641 2642 struct bpf_tracing_link { 2643 struct bpf_link link; 2644 enum bpf_attach_type attach_type; 2645 struct bpf_trampoline *trampoline; 2646 struct bpf_prog *tgt_prog; 2647 }; 2648 2649 static void bpf_tracing_link_release(struct bpf_link *link) 2650 { 2651 struct bpf_tracing_link *tr_link = 2652 container_of(link, struct bpf_tracing_link, link); 2653 2654 WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog, 2655 tr_link->trampoline)); 2656 2657 bpf_trampoline_put(tr_link->trampoline); 2658 2659 /* tgt_prog is NULL if target is a kernel function */ 2660 if (tr_link->tgt_prog) 2661 bpf_prog_put(tr_link->tgt_prog); 2662 } 2663 2664 static void bpf_tracing_link_dealloc(struct bpf_link *link) 2665 { 2666 struct bpf_tracing_link *tr_link = 2667 container_of(link, struct bpf_tracing_link, link); 2668 2669 kfree(tr_link); 2670 } 2671 2672 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 2673 struct seq_file *seq) 2674 { 2675 struct bpf_tracing_link *tr_link = 2676 container_of(link, struct bpf_tracing_link, link); 2677 2678 seq_printf(seq, 2679 "attach_type:\t%d\n", 2680 tr_link->attach_type); 2681 } 2682 2683 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 2684 struct bpf_link_info *info) 2685 { 2686 struct bpf_tracing_link *tr_link = 2687 container_of(link, struct bpf_tracing_link, link); 2688 2689 info->tracing.attach_type = tr_link->attach_type; 2690 bpf_trampoline_unpack_key(tr_link->trampoline->key, 2691 &info->tracing.target_obj_id, 2692 &info->tracing.target_btf_id); 2693 2694 return 0; 2695 } 2696 2697 static const struct bpf_link_ops bpf_tracing_link_lops = { 2698 .release = bpf_tracing_link_release, 2699 .dealloc = bpf_tracing_link_dealloc, 2700 .show_fdinfo = bpf_tracing_link_show_fdinfo, 2701 .fill_link_info = bpf_tracing_link_fill_link_info, 2702 }; 2703 2704 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 2705 int tgt_prog_fd, 2706 u32 btf_id) 2707 { 2708 struct bpf_link_primer link_primer; 2709 struct bpf_prog *tgt_prog = NULL; 2710 struct bpf_trampoline *tr = NULL; 2711 struct bpf_tracing_link *link; 2712 u64 key = 0; 2713 int err; 2714 2715 switch (prog->type) { 2716 case BPF_PROG_TYPE_TRACING: 2717 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 2718 prog->expected_attach_type != BPF_TRACE_FEXIT && 2719 prog->expected_attach_type != BPF_MODIFY_RETURN) { 2720 err = -EINVAL; 2721 goto out_put_prog; 2722 } 2723 break; 2724 case BPF_PROG_TYPE_EXT: 2725 if (prog->expected_attach_type != 0) { 2726 err = -EINVAL; 2727 goto out_put_prog; 2728 } 2729 break; 2730 case BPF_PROG_TYPE_LSM: 2731 if (prog->expected_attach_type != BPF_LSM_MAC) { 2732 err = -EINVAL; 2733 goto out_put_prog; 2734 } 2735 break; 2736 default: 2737 err = -EINVAL; 2738 goto out_put_prog; 2739 } 2740 2741 if (!!tgt_prog_fd != !!btf_id) { 2742 err = -EINVAL; 2743 goto out_put_prog; 2744 } 2745 2746 if (tgt_prog_fd) { 2747 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */ 2748 if (prog->type != BPF_PROG_TYPE_EXT) { 2749 err = -EINVAL; 2750 goto out_put_prog; 2751 } 2752 2753 tgt_prog = bpf_prog_get(tgt_prog_fd); 2754 if (IS_ERR(tgt_prog)) { 2755 err = PTR_ERR(tgt_prog); 2756 tgt_prog = NULL; 2757 goto out_put_prog; 2758 } 2759 2760 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 2761 } 2762 2763 link = kzalloc(sizeof(*link), GFP_USER); 2764 if (!link) { 2765 err = -ENOMEM; 2766 goto out_put_prog; 2767 } 2768 bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING, 2769 &bpf_tracing_link_lops, prog); 2770 link->attach_type = prog->expected_attach_type; 2771 2772 mutex_lock(&prog->aux->dst_mutex); 2773 2774 /* There are a few possible cases here: 2775 * 2776 * - if prog->aux->dst_trampoline is set, the program was just loaded 2777 * and not yet attached to anything, so we can use the values stored 2778 * in prog->aux 2779 * 2780 * - if prog->aux->dst_trampoline is NULL, the program has already been 2781 * attached to a target and its initial target was cleared (below) 2782 * 2783 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 2784 * target_btf_id using the link_create API. 2785 * 2786 * - if tgt_prog == NULL when this function was called using the old 2787 * raw_tracepoint_open API, and we need a target from prog->aux 2788 * 2789 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 2790 * was detached and is going for re-attachment. 2791 */ 2792 if (!prog->aux->dst_trampoline && !tgt_prog) { 2793 /* 2794 * Allow re-attach for TRACING and LSM programs. If it's 2795 * currently linked, bpf_trampoline_link_prog will fail. 2796 * EXT programs need to specify tgt_prog_fd, so they 2797 * re-attach in separate code path. 2798 */ 2799 if (prog->type != BPF_PROG_TYPE_TRACING && 2800 prog->type != BPF_PROG_TYPE_LSM) { 2801 err = -EINVAL; 2802 goto out_unlock; 2803 } 2804 btf_id = prog->aux->attach_btf_id; 2805 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 2806 } 2807 2808 if (!prog->aux->dst_trampoline || 2809 (key && key != prog->aux->dst_trampoline->key)) { 2810 /* If there is no saved target, or the specified target is 2811 * different from the destination specified at load time, we 2812 * need a new trampoline and a check for compatibility 2813 */ 2814 struct bpf_attach_target_info tgt_info = {}; 2815 2816 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 2817 &tgt_info); 2818 if (err) 2819 goto out_unlock; 2820 2821 tr = bpf_trampoline_get(key, &tgt_info); 2822 if (!tr) { 2823 err = -ENOMEM; 2824 goto out_unlock; 2825 } 2826 } else { 2827 /* The caller didn't specify a target, or the target was the 2828 * same as the destination supplied during program load. This 2829 * means we can reuse the trampoline and reference from program 2830 * load time, and there is no need to allocate a new one. This 2831 * can only happen once for any program, as the saved values in 2832 * prog->aux are cleared below. 2833 */ 2834 tr = prog->aux->dst_trampoline; 2835 tgt_prog = prog->aux->dst_prog; 2836 } 2837 2838 err = bpf_link_prime(&link->link, &link_primer); 2839 if (err) 2840 goto out_unlock; 2841 2842 err = bpf_trampoline_link_prog(prog, tr); 2843 if (err) { 2844 bpf_link_cleanup(&link_primer); 2845 link = NULL; 2846 goto out_unlock; 2847 } 2848 2849 link->tgt_prog = tgt_prog; 2850 link->trampoline = tr; 2851 2852 /* Always clear the trampoline and target prog from prog->aux to make 2853 * sure the original attach destination is not kept alive after a 2854 * program is (re-)attached to another target. 2855 */ 2856 if (prog->aux->dst_prog && 2857 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 2858 /* got extra prog ref from syscall, or attaching to different prog */ 2859 bpf_prog_put(prog->aux->dst_prog); 2860 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 2861 /* we allocated a new trampoline, so free the old one */ 2862 bpf_trampoline_put(prog->aux->dst_trampoline); 2863 2864 prog->aux->dst_prog = NULL; 2865 prog->aux->dst_trampoline = NULL; 2866 mutex_unlock(&prog->aux->dst_mutex); 2867 2868 return bpf_link_settle(&link_primer); 2869 out_unlock: 2870 if (tr && tr != prog->aux->dst_trampoline) 2871 bpf_trampoline_put(tr); 2872 mutex_unlock(&prog->aux->dst_mutex); 2873 kfree(link); 2874 out_put_prog: 2875 if (tgt_prog_fd && tgt_prog) 2876 bpf_prog_put(tgt_prog); 2877 return err; 2878 } 2879 2880 struct bpf_raw_tp_link { 2881 struct bpf_link link; 2882 struct bpf_raw_event_map *btp; 2883 }; 2884 2885 static void bpf_raw_tp_link_release(struct bpf_link *link) 2886 { 2887 struct bpf_raw_tp_link *raw_tp = 2888 container_of(link, struct bpf_raw_tp_link, link); 2889 2890 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 2891 bpf_put_raw_tracepoint(raw_tp->btp); 2892 } 2893 2894 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 2895 { 2896 struct bpf_raw_tp_link *raw_tp = 2897 container_of(link, struct bpf_raw_tp_link, link); 2898 2899 kfree(raw_tp); 2900 } 2901 2902 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 2903 struct seq_file *seq) 2904 { 2905 struct bpf_raw_tp_link *raw_tp_link = 2906 container_of(link, struct bpf_raw_tp_link, link); 2907 2908 seq_printf(seq, 2909 "tp_name:\t%s\n", 2910 raw_tp_link->btp->tp->name); 2911 } 2912 2913 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 2914 struct bpf_link_info *info) 2915 { 2916 struct bpf_raw_tp_link *raw_tp_link = 2917 container_of(link, struct bpf_raw_tp_link, link); 2918 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 2919 const char *tp_name = raw_tp_link->btp->tp->name; 2920 u32 ulen = info->raw_tracepoint.tp_name_len; 2921 size_t tp_len = strlen(tp_name); 2922 2923 if (!ulen ^ !ubuf) 2924 return -EINVAL; 2925 2926 info->raw_tracepoint.tp_name_len = tp_len + 1; 2927 2928 if (!ubuf) 2929 return 0; 2930 2931 if (ulen >= tp_len + 1) { 2932 if (copy_to_user(ubuf, tp_name, tp_len + 1)) 2933 return -EFAULT; 2934 } else { 2935 char zero = '\0'; 2936 2937 if (copy_to_user(ubuf, tp_name, ulen - 1)) 2938 return -EFAULT; 2939 if (put_user(zero, ubuf + ulen - 1)) 2940 return -EFAULT; 2941 return -ENOSPC; 2942 } 2943 2944 return 0; 2945 } 2946 2947 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 2948 .release = bpf_raw_tp_link_release, 2949 .dealloc = bpf_raw_tp_link_dealloc, 2950 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 2951 .fill_link_info = bpf_raw_tp_link_fill_link_info, 2952 }; 2953 2954 #ifdef CONFIG_PERF_EVENTS 2955 struct bpf_perf_link { 2956 struct bpf_link link; 2957 struct file *perf_file; 2958 }; 2959 2960 static void bpf_perf_link_release(struct bpf_link *link) 2961 { 2962 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 2963 struct perf_event *event = perf_link->perf_file->private_data; 2964 2965 perf_event_free_bpf_prog(event); 2966 fput(perf_link->perf_file); 2967 } 2968 2969 static void bpf_perf_link_dealloc(struct bpf_link *link) 2970 { 2971 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 2972 2973 kfree(perf_link); 2974 } 2975 2976 static const struct bpf_link_ops bpf_perf_link_lops = { 2977 .release = bpf_perf_link_release, 2978 .dealloc = bpf_perf_link_dealloc, 2979 }; 2980 2981 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 2982 { 2983 struct bpf_link_primer link_primer; 2984 struct bpf_perf_link *link; 2985 struct perf_event *event; 2986 struct file *perf_file; 2987 int err; 2988 2989 if (attr->link_create.flags) 2990 return -EINVAL; 2991 2992 perf_file = perf_event_get(attr->link_create.target_fd); 2993 if (IS_ERR(perf_file)) 2994 return PTR_ERR(perf_file); 2995 2996 link = kzalloc(sizeof(*link), GFP_USER); 2997 if (!link) { 2998 err = -ENOMEM; 2999 goto out_put_file; 3000 } 3001 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3002 link->perf_file = perf_file; 3003 3004 err = bpf_link_prime(&link->link, &link_primer); 3005 if (err) { 3006 kfree(link); 3007 goto out_put_file; 3008 } 3009 3010 event = perf_file->private_data; 3011 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3012 if (err) { 3013 bpf_link_cleanup(&link_primer); 3014 goto out_put_file; 3015 } 3016 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3017 bpf_prog_inc(prog); 3018 3019 return bpf_link_settle(&link_primer); 3020 3021 out_put_file: 3022 fput(perf_file); 3023 return err; 3024 } 3025 #endif /* CONFIG_PERF_EVENTS */ 3026 3027 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 3028 3029 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3030 { 3031 struct bpf_link_primer link_primer; 3032 struct bpf_raw_tp_link *link; 3033 struct bpf_raw_event_map *btp; 3034 struct bpf_prog *prog; 3035 const char *tp_name; 3036 char buf[128]; 3037 int err; 3038 3039 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3040 return -EINVAL; 3041 3042 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3043 if (IS_ERR(prog)) 3044 return PTR_ERR(prog); 3045 3046 switch (prog->type) { 3047 case BPF_PROG_TYPE_TRACING: 3048 case BPF_PROG_TYPE_EXT: 3049 case BPF_PROG_TYPE_LSM: 3050 if (attr->raw_tracepoint.name) { 3051 /* The attach point for this category of programs 3052 * should be specified via btf_id during program load. 3053 */ 3054 err = -EINVAL; 3055 goto out_put_prog; 3056 } 3057 if (prog->type == BPF_PROG_TYPE_TRACING && 3058 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3059 tp_name = prog->aux->attach_func_name; 3060 break; 3061 } 3062 err = bpf_tracing_prog_attach(prog, 0, 0); 3063 if (err >= 0) 3064 return err; 3065 goto out_put_prog; 3066 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3067 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3068 if (strncpy_from_user(buf, 3069 u64_to_user_ptr(attr->raw_tracepoint.name), 3070 sizeof(buf) - 1) < 0) { 3071 err = -EFAULT; 3072 goto out_put_prog; 3073 } 3074 buf[sizeof(buf) - 1] = 0; 3075 tp_name = buf; 3076 break; 3077 default: 3078 err = -EINVAL; 3079 goto out_put_prog; 3080 } 3081 3082 btp = bpf_get_raw_tracepoint(tp_name); 3083 if (!btp) { 3084 err = -ENOENT; 3085 goto out_put_prog; 3086 } 3087 3088 link = kzalloc(sizeof(*link), GFP_USER); 3089 if (!link) { 3090 err = -ENOMEM; 3091 goto out_put_btp; 3092 } 3093 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3094 &bpf_raw_tp_link_lops, prog); 3095 link->btp = btp; 3096 3097 err = bpf_link_prime(&link->link, &link_primer); 3098 if (err) { 3099 kfree(link); 3100 goto out_put_btp; 3101 } 3102 3103 err = bpf_probe_register(link->btp, prog); 3104 if (err) { 3105 bpf_link_cleanup(&link_primer); 3106 goto out_put_btp; 3107 } 3108 3109 return bpf_link_settle(&link_primer); 3110 3111 out_put_btp: 3112 bpf_put_raw_tracepoint(btp); 3113 out_put_prog: 3114 bpf_prog_put(prog); 3115 return err; 3116 } 3117 3118 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 3119 enum bpf_attach_type attach_type) 3120 { 3121 switch (prog->type) { 3122 case BPF_PROG_TYPE_CGROUP_SOCK: 3123 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3124 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3125 case BPF_PROG_TYPE_SK_LOOKUP: 3126 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 3127 case BPF_PROG_TYPE_CGROUP_SKB: 3128 if (!capable(CAP_NET_ADMIN)) 3129 /* cg-skb progs can be loaded by unpriv user. 3130 * check permissions at attach time. 3131 */ 3132 return -EPERM; 3133 return prog->enforce_expected_attach_type && 3134 prog->expected_attach_type != attach_type ? 3135 -EINVAL : 0; 3136 default: 3137 return 0; 3138 } 3139 } 3140 3141 static enum bpf_prog_type 3142 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3143 { 3144 switch (attach_type) { 3145 case BPF_CGROUP_INET_INGRESS: 3146 case BPF_CGROUP_INET_EGRESS: 3147 return BPF_PROG_TYPE_CGROUP_SKB; 3148 case BPF_CGROUP_INET_SOCK_CREATE: 3149 case BPF_CGROUP_INET_SOCK_RELEASE: 3150 case BPF_CGROUP_INET4_POST_BIND: 3151 case BPF_CGROUP_INET6_POST_BIND: 3152 return BPF_PROG_TYPE_CGROUP_SOCK; 3153 case BPF_CGROUP_INET4_BIND: 3154 case BPF_CGROUP_INET6_BIND: 3155 case BPF_CGROUP_INET4_CONNECT: 3156 case BPF_CGROUP_INET6_CONNECT: 3157 case BPF_CGROUP_INET4_GETPEERNAME: 3158 case BPF_CGROUP_INET6_GETPEERNAME: 3159 case BPF_CGROUP_INET4_GETSOCKNAME: 3160 case BPF_CGROUP_INET6_GETSOCKNAME: 3161 case BPF_CGROUP_UDP4_SENDMSG: 3162 case BPF_CGROUP_UDP6_SENDMSG: 3163 case BPF_CGROUP_UDP4_RECVMSG: 3164 case BPF_CGROUP_UDP6_RECVMSG: 3165 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 3166 case BPF_CGROUP_SOCK_OPS: 3167 return BPF_PROG_TYPE_SOCK_OPS; 3168 case BPF_CGROUP_DEVICE: 3169 return BPF_PROG_TYPE_CGROUP_DEVICE; 3170 case BPF_SK_MSG_VERDICT: 3171 return BPF_PROG_TYPE_SK_MSG; 3172 case BPF_SK_SKB_STREAM_PARSER: 3173 case BPF_SK_SKB_STREAM_VERDICT: 3174 case BPF_SK_SKB_VERDICT: 3175 return BPF_PROG_TYPE_SK_SKB; 3176 case BPF_LIRC_MODE2: 3177 return BPF_PROG_TYPE_LIRC_MODE2; 3178 case BPF_FLOW_DISSECTOR: 3179 return BPF_PROG_TYPE_FLOW_DISSECTOR; 3180 case BPF_CGROUP_SYSCTL: 3181 return BPF_PROG_TYPE_CGROUP_SYSCTL; 3182 case BPF_CGROUP_GETSOCKOPT: 3183 case BPF_CGROUP_SETSOCKOPT: 3184 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 3185 case BPF_TRACE_ITER: 3186 return BPF_PROG_TYPE_TRACING; 3187 case BPF_SK_LOOKUP: 3188 return BPF_PROG_TYPE_SK_LOOKUP; 3189 case BPF_XDP: 3190 return BPF_PROG_TYPE_XDP; 3191 default: 3192 return BPF_PROG_TYPE_UNSPEC; 3193 } 3194 } 3195 3196 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd 3197 3198 #define BPF_F_ATTACH_MASK \ 3199 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) 3200 3201 static int bpf_prog_attach(const union bpf_attr *attr) 3202 { 3203 enum bpf_prog_type ptype; 3204 struct bpf_prog *prog; 3205 int ret; 3206 3207 if (CHECK_ATTR(BPF_PROG_ATTACH)) 3208 return -EINVAL; 3209 3210 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 3211 return -EINVAL; 3212 3213 ptype = attach_type_to_prog_type(attr->attach_type); 3214 if (ptype == BPF_PROG_TYPE_UNSPEC) 3215 return -EINVAL; 3216 3217 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 3218 if (IS_ERR(prog)) 3219 return PTR_ERR(prog); 3220 3221 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 3222 bpf_prog_put(prog); 3223 return -EINVAL; 3224 } 3225 3226 switch (ptype) { 3227 case BPF_PROG_TYPE_SK_SKB: 3228 case BPF_PROG_TYPE_SK_MSG: 3229 ret = sock_map_get_from_fd(attr, prog); 3230 break; 3231 case BPF_PROG_TYPE_LIRC_MODE2: 3232 ret = lirc_prog_attach(attr, prog); 3233 break; 3234 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3235 ret = netns_bpf_prog_attach(attr, prog); 3236 break; 3237 case BPF_PROG_TYPE_CGROUP_DEVICE: 3238 case BPF_PROG_TYPE_CGROUP_SKB: 3239 case BPF_PROG_TYPE_CGROUP_SOCK: 3240 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3241 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3242 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3243 case BPF_PROG_TYPE_SOCK_OPS: 3244 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 3245 break; 3246 default: 3247 ret = -EINVAL; 3248 } 3249 3250 if (ret) 3251 bpf_prog_put(prog); 3252 return ret; 3253 } 3254 3255 #define BPF_PROG_DETACH_LAST_FIELD attach_type 3256 3257 static int bpf_prog_detach(const union bpf_attr *attr) 3258 { 3259 enum bpf_prog_type ptype; 3260 3261 if (CHECK_ATTR(BPF_PROG_DETACH)) 3262 return -EINVAL; 3263 3264 ptype = attach_type_to_prog_type(attr->attach_type); 3265 3266 switch (ptype) { 3267 case BPF_PROG_TYPE_SK_MSG: 3268 case BPF_PROG_TYPE_SK_SKB: 3269 return sock_map_prog_detach(attr, ptype); 3270 case BPF_PROG_TYPE_LIRC_MODE2: 3271 return lirc_prog_detach(attr); 3272 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3273 return netns_bpf_prog_detach(attr, ptype); 3274 case BPF_PROG_TYPE_CGROUP_DEVICE: 3275 case BPF_PROG_TYPE_CGROUP_SKB: 3276 case BPF_PROG_TYPE_CGROUP_SOCK: 3277 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3278 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3279 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3280 case BPF_PROG_TYPE_SOCK_OPS: 3281 return cgroup_bpf_prog_detach(attr, ptype); 3282 default: 3283 return -EINVAL; 3284 } 3285 } 3286 3287 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt 3288 3289 static int bpf_prog_query(const union bpf_attr *attr, 3290 union bpf_attr __user *uattr) 3291 { 3292 if (!capable(CAP_NET_ADMIN)) 3293 return -EPERM; 3294 if (CHECK_ATTR(BPF_PROG_QUERY)) 3295 return -EINVAL; 3296 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 3297 return -EINVAL; 3298 3299 switch (attr->query.attach_type) { 3300 case BPF_CGROUP_INET_INGRESS: 3301 case BPF_CGROUP_INET_EGRESS: 3302 case BPF_CGROUP_INET_SOCK_CREATE: 3303 case BPF_CGROUP_INET_SOCK_RELEASE: 3304 case BPF_CGROUP_INET4_BIND: 3305 case BPF_CGROUP_INET6_BIND: 3306 case BPF_CGROUP_INET4_POST_BIND: 3307 case BPF_CGROUP_INET6_POST_BIND: 3308 case BPF_CGROUP_INET4_CONNECT: 3309 case BPF_CGROUP_INET6_CONNECT: 3310 case BPF_CGROUP_INET4_GETPEERNAME: 3311 case BPF_CGROUP_INET6_GETPEERNAME: 3312 case BPF_CGROUP_INET4_GETSOCKNAME: 3313 case BPF_CGROUP_INET6_GETSOCKNAME: 3314 case BPF_CGROUP_UDP4_SENDMSG: 3315 case BPF_CGROUP_UDP6_SENDMSG: 3316 case BPF_CGROUP_UDP4_RECVMSG: 3317 case BPF_CGROUP_UDP6_RECVMSG: 3318 case BPF_CGROUP_SOCK_OPS: 3319 case BPF_CGROUP_DEVICE: 3320 case BPF_CGROUP_SYSCTL: 3321 case BPF_CGROUP_GETSOCKOPT: 3322 case BPF_CGROUP_SETSOCKOPT: 3323 return cgroup_bpf_prog_query(attr, uattr); 3324 case BPF_LIRC_MODE2: 3325 return lirc_prog_query(attr, uattr); 3326 case BPF_FLOW_DISSECTOR: 3327 case BPF_SK_LOOKUP: 3328 return netns_bpf_prog_query(attr, uattr); 3329 case BPF_SK_SKB_STREAM_PARSER: 3330 case BPF_SK_SKB_STREAM_VERDICT: 3331 case BPF_SK_MSG_VERDICT: 3332 case BPF_SK_SKB_VERDICT: 3333 return sock_map_bpf_prog_query(attr, uattr); 3334 default: 3335 return -EINVAL; 3336 } 3337 } 3338 3339 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu 3340 3341 static int bpf_prog_test_run(const union bpf_attr *attr, 3342 union bpf_attr __user *uattr) 3343 { 3344 struct bpf_prog *prog; 3345 int ret = -ENOTSUPP; 3346 3347 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 3348 return -EINVAL; 3349 3350 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 3351 (!attr->test.ctx_size_in && attr->test.ctx_in)) 3352 return -EINVAL; 3353 3354 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 3355 (!attr->test.ctx_size_out && attr->test.ctx_out)) 3356 return -EINVAL; 3357 3358 prog = bpf_prog_get(attr->test.prog_fd); 3359 if (IS_ERR(prog)) 3360 return PTR_ERR(prog); 3361 3362 if (prog->aux->ops->test_run) 3363 ret = prog->aux->ops->test_run(prog, attr, uattr); 3364 3365 bpf_prog_put(prog); 3366 return ret; 3367 } 3368 3369 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 3370 3371 static int bpf_obj_get_next_id(const union bpf_attr *attr, 3372 union bpf_attr __user *uattr, 3373 struct idr *idr, 3374 spinlock_t *lock) 3375 { 3376 u32 next_id = attr->start_id; 3377 int err = 0; 3378 3379 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 3380 return -EINVAL; 3381 3382 if (!capable(CAP_SYS_ADMIN)) 3383 return -EPERM; 3384 3385 next_id++; 3386 spin_lock_bh(lock); 3387 if (!idr_get_next(idr, &next_id)) 3388 err = -ENOENT; 3389 spin_unlock_bh(lock); 3390 3391 if (!err) 3392 err = put_user(next_id, &uattr->next_id); 3393 3394 return err; 3395 } 3396 3397 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 3398 { 3399 struct bpf_map *map; 3400 3401 spin_lock_bh(&map_idr_lock); 3402 again: 3403 map = idr_get_next(&map_idr, id); 3404 if (map) { 3405 map = __bpf_map_inc_not_zero(map, false); 3406 if (IS_ERR(map)) { 3407 (*id)++; 3408 goto again; 3409 } 3410 } 3411 spin_unlock_bh(&map_idr_lock); 3412 3413 return map; 3414 } 3415 3416 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 3417 { 3418 struct bpf_prog *prog; 3419 3420 spin_lock_bh(&prog_idr_lock); 3421 again: 3422 prog = idr_get_next(&prog_idr, id); 3423 if (prog) { 3424 prog = bpf_prog_inc_not_zero(prog); 3425 if (IS_ERR(prog)) { 3426 (*id)++; 3427 goto again; 3428 } 3429 } 3430 spin_unlock_bh(&prog_idr_lock); 3431 3432 return prog; 3433 } 3434 3435 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 3436 3437 struct bpf_prog *bpf_prog_by_id(u32 id) 3438 { 3439 struct bpf_prog *prog; 3440 3441 if (!id) 3442 return ERR_PTR(-ENOENT); 3443 3444 spin_lock_bh(&prog_idr_lock); 3445 prog = idr_find(&prog_idr, id); 3446 if (prog) 3447 prog = bpf_prog_inc_not_zero(prog); 3448 else 3449 prog = ERR_PTR(-ENOENT); 3450 spin_unlock_bh(&prog_idr_lock); 3451 return prog; 3452 } 3453 3454 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 3455 { 3456 struct bpf_prog *prog; 3457 u32 id = attr->prog_id; 3458 int fd; 3459 3460 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 3461 return -EINVAL; 3462 3463 if (!capable(CAP_SYS_ADMIN)) 3464 return -EPERM; 3465 3466 prog = bpf_prog_by_id(id); 3467 if (IS_ERR(prog)) 3468 return PTR_ERR(prog); 3469 3470 fd = bpf_prog_new_fd(prog); 3471 if (fd < 0) 3472 bpf_prog_put(prog); 3473 3474 return fd; 3475 } 3476 3477 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 3478 3479 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 3480 { 3481 struct bpf_map *map; 3482 u32 id = attr->map_id; 3483 int f_flags; 3484 int fd; 3485 3486 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 3487 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 3488 return -EINVAL; 3489 3490 if (!capable(CAP_SYS_ADMIN)) 3491 return -EPERM; 3492 3493 f_flags = bpf_get_file_flag(attr->open_flags); 3494 if (f_flags < 0) 3495 return f_flags; 3496 3497 spin_lock_bh(&map_idr_lock); 3498 map = idr_find(&map_idr, id); 3499 if (map) 3500 map = __bpf_map_inc_not_zero(map, true); 3501 else 3502 map = ERR_PTR(-ENOENT); 3503 spin_unlock_bh(&map_idr_lock); 3504 3505 if (IS_ERR(map)) 3506 return PTR_ERR(map); 3507 3508 fd = bpf_map_new_fd(map, f_flags); 3509 if (fd < 0) 3510 bpf_map_put_with_uref(map); 3511 3512 return fd; 3513 } 3514 3515 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 3516 unsigned long addr, u32 *off, 3517 u32 *type) 3518 { 3519 const struct bpf_map *map; 3520 int i; 3521 3522 mutex_lock(&prog->aux->used_maps_mutex); 3523 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 3524 map = prog->aux->used_maps[i]; 3525 if (map == (void *)addr) { 3526 *type = BPF_PSEUDO_MAP_FD; 3527 goto out; 3528 } 3529 if (!map->ops->map_direct_value_meta) 3530 continue; 3531 if (!map->ops->map_direct_value_meta(map, addr, off)) { 3532 *type = BPF_PSEUDO_MAP_VALUE; 3533 goto out; 3534 } 3535 } 3536 map = NULL; 3537 3538 out: 3539 mutex_unlock(&prog->aux->used_maps_mutex); 3540 return map; 3541 } 3542 3543 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 3544 const struct cred *f_cred) 3545 { 3546 const struct bpf_map *map; 3547 struct bpf_insn *insns; 3548 u32 off, type; 3549 u64 imm; 3550 u8 code; 3551 int i; 3552 3553 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 3554 GFP_USER); 3555 if (!insns) 3556 return insns; 3557 3558 for (i = 0; i < prog->len; i++) { 3559 code = insns[i].code; 3560 3561 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 3562 insns[i].code = BPF_JMP | BPF_CALL; 3563 insns[i].imm = BPF_FUNC_tail_call; 3564 /* fall-through */ 3565 } 3566 if (code == (BPF_JMP | BPF_CALL) || 3567 code == (BPF_JMP | BPF_CALL_ARGS)) { 3568 if (code == (BPF_JMP | BPF_CALL_ARGS)) 3569 insns[i].code = BPF_JMP | BPF_CALL; 3570 if (!bpf_dump_raw_ok(f_cred)) 3571 insns[i].imm = 0; 3572 continue; 3573 } 3574 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 3575 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 3576 continue; 3577 } 3578 3579 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 3580 continue; 3581 3582 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 3583 map = bpf_map_from_imm(prog, imm, &off, &type); 3584 if (map) { 3585 insns[i].src_reg = type; 3586 insns[i].imm = map->id; 3587 insns[i + 1].imm = off; 3588 continue; 3589 } 3590 } 3591 3592 return insns; 3593 } 3594 3595 static int set_info_rec_size(struct bpf_prog_info *info) 3596 { 3597 /* 3598 * Ensure info.*_rec_size is the same as kernel expected size 3599 * 3600 * or 3601 * 3602 * Only allow zero *_rec_size if both _rec_size and _cnt are 3603 * zero. In this case, the kernel will set the expected 3604 * _rec_size back to the info. 3605 */ 3606 3607 if ((info->nr_func_info || info->func_info_rec_size) && 3608 info->func_info_rec_size != sizeof(struct bpf_func_info)) 3609 return -EINVAL; 3610 3611 if ((info->nr_line_info || info->line_info_rec_size) && 3612 info->line_info_rec_size != sizeof(struct bpf_line_info)) 3613 return -EINVAL; 3614 3615 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 3616 info->jited_line_info_rec_size != sizeof(__u64)) 3617 return -EINVAL; 3618 3619 info->func_info_rec_size = sizeof(struct bpf_func_info); 3620 info->line_info_rec_size = sizeof(struct bpf_line_info); 3621 info->jited_line_info_rec_size = sizeof(__u64); 3622 3623 return 0; 3624 } 3625 3626 static int bpf_prog_get_info_by_fd(struct file *file, 3627 struct bpf_prog *prog, 3628 const union bpf_attr *attr, 3629 union bpf_attr __user *uattr) 3630 { 3631 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3632 struct bpf_prog_info info; 3633 u32 info_len = attr->info.info_len; 3634 struct bpf_prog_kstats stats; 3635 char __user *uinsns; 3636 u32 ulen; 3637 int err; 3638 3639 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 3640 if (err) 3641 return err; 3642 info_len = min_t(u32, sizeof(info), info_len); 3643 3644 memset(&info, 0, sizeof(info)); 3645 if (copy_from_user(&info, uinfo, info_len)) 3646 return -EFAULT; 3647 3648 info.type = prog->type; 3649 info.id = prog->aux->id; 3650 info.load_time = prog->aux->load_time; 3651 info.created_by_uid = from_kuid_munged(current_user_ns(), 3652 prog->aux->user->uid); 3653 info.gpl_compatible = prog->gpl_compatible; 3654 3655 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 3656 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 3657 3658 mutex_lock(&prog->aux->used_maps_mutex); 3659 ulen = info.nr_map_ids; 3660 info.nr_map_ids = prog->aux->used_map_cnt; 3661 ulen = min_t(u32, info.nr_map_ids, ulen); 3662 if (ulen) { 3663 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 3664 u32 i; 3665 3666 for (i = 0; i < ulen; i++) 3667 if (put_user(prog->aux->used_maps[i]->id, 3668 &user_map_ids[i])) { 3669 mutex_unlock(&prog->aux->used_maps_mutex); 3670 return -EFAULT; 3671 } 3672 } 3673 mutex_unlock(&prog->aux->used_maps_mutex); 3674 3675 err = set_info_rec_size(&info); 3676 if (err) 3677 return err; 3678 3679 bpf_prog_get_stats(prog, &stats); 3680 info.run_time_ns = stats.nsecs; 3681 info.run_cnt = stats.cnt; 3682 info.recursion_misses = stats.misses; 3683 3684 info.verified_insns = prog->aux->verified_insns; 3685 3686 if (!bpf_capable()) { 3687 info.jited_prog_len = 0; 3688 info.xlated_prog_len = 0; 3689 info.nr_jited_ksyms = 0; 3690 info.nr_jited_func_lens = 0; 3691 info.nr_func_info = 0; 3692 info.nr_line_info = 0; 3693 info.nr_jited_line_info = 0; 3694 goto done; 3695 } 3696 3697 ulen = info.xlated_prog_len; 3698 info.xlated_prog_len = bpf_prog_insn_size(prog); 3699 if (info.xlated_prog_len && ulen) { 3700 struct bpf_insn *insns_sanitized; 3701 bool fault; 3702 3703 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 3704 info.xlated_prog_insns = 0; 3705 goto done; 3706 } 3707 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 3708 if (!insns_sanitized) 3709 return -ENOMEM; 3710 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 3711 ulen = min_t(u32, info.xlated_prog_len, ulen); 3712 fault = copy_to_user(uinsns, insns_sanitized, ulen); 3713 kfree(insns_sanitized); 3714 if (fault) 3715 return -EFAULT; 3716 } 3717 3718 if (bpf_prog_is_dev_bound(prog->aux)) { 3719 err = bpf_prog_offload_info_fill(&info, prog); 3720 if (err) 3721 return err; 3722 goto done; 3723 } 3724 3725 /* NOTE: the following code is supposed to be skipped for offload. 3726 * bpf_prog_offload_info_fill() is the place to fill similar fields 3727 * for offload. 3728 */ 3729 ulen = info.jited_prog_len; 3730 if (prog->aux->func_cnt) { 3731 u32 i; 3732 3733 info.jited_prog_len = 0; 3734 for (i = 0; i < prog->aux->func_cnt; i++) 3735 info.jited_prog_len += prog->aux->func[i]->jited_len; 3736 } else { 3737 info.jited_prog_len = prog->jited_len; 3738 } 3739 3740 if (info.jited_prog_len && ulen) { 3741 if (bpf_dump_raw_ok(file->f_cred)) { 3742 uinsns = u64_to_user_ptr(info.jited_prog_insns); 3743 ulen = min_t(u32, info.jited_prog_len, ulen); 3744 3745 /* for multi-function programs, copy the JITed 3746 * instructions for all the functions 3747 */ 3748 if (prog->aux->func_cnt) { 3749 u32 len, free, i; 3750 u8 *img; 3751 3752 free = ulen; 3753 for (i = 0; i < prog->aux->func_cnt; i++) { 3754 len = prog->aux->func[i]->jited_len; 3755 len = min_t(u32, len, free); 3756 img = (u8 *) prog->aux->func[i]->bpf_func; 3757 if (copy_to_user(uinsns, img, len)) 3758 return -EFAULT; 3759 uinsns += len; 3760 free -= len; 3761 if (!free) 3762 break; 3763 } 3764 } else { 3765 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 3766 return -EFAULT; 3767 } 3768 } else { 3769 info.jited_prog_insns = 0; 3770 } 3771 } 3772 3773 ulen = info.nr_jited_ksyms; 3774 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 3775 if (ulen) { 3776 if (bpf_dump_raw_ok(file->f_cred)) { 3777 unsigned long ksym_addr; 3778 u64 __user *user_ksyms; 3779 u32 i; 3780 3781 /* copy the address of the kernel symbol 3782 * corresponding to each function 3783 */ 3784 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 3785 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 3786 if (prog->aux->func_cnt) { 3787 for (i = 0; i < ulen; i++) { 3788 ksym_addr = (unsigned long) 3789 prog->aux->func[i]->bpf_func; 3790 if (put_user((u64) ksym_addr, 3791 &user_ksyms[i])) 3792 return -EFAULT; 3793 } 3794 } else { 3795 ksym_addr = (unsigned long) prog->bpf_func; 3796 if (put_user((u64) ksym_addr, &user_ksyms[0])) 3797 return -EFAULT; 3798 } 3799 } else { 3800 info.jited_ksyms = 0; 3801 } 3802 } 3803 3804 ulen = info.nr_jited_func_lens; 3805 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 3806 if (ulen) { 3807 if (bpf_dump_raw_ok(file->f_cred)) { 3808 u32 __user *user_lens; 3809 u32 func_len, i; 3810 3811 /* copy the JITed image lengths for each function */ 3812 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 3813 user_lens = u64_to_user_ptr(info.jited_func_lens); 3814 if (prog->aux->func_cnt) { 3815 for (i = 0; i < ulen; i++) { 3816 func_len = 3817 prog->aux->func[i]->jited_len; 3818 if (put_user(func_len, &user_lens[i])) 3819 return -EFAULT; 3820 } 3821 } else { 3822 func_len = prog->jited_len; 3823 if (put_user(func_len, &user_lens[0])) 3824 return -EFAULT; 3825 } 3826 } else { 3827 info.jited_func_lens = 0; 3828 } 3829 } 3830 3831 if (prog->aux->btf) 3832 info.btf_id = btf_obj_id(prog->aux->btf); 3833 3834 ulen = info.nr_func_info; 3835 info.nr_func_info = prog->aux->func_info_cnt; 3836 if (info.nr_func_info && ulen) { 3837 char __user *user_finfo; 3838 3839 user_finfo = u64_to_user_ptr(info.func_info); 3840 ulen = min_t(u32, info.nr_func_info, ulen); 3841 if (copy_to_user(user_finfo, prog->aux->func_info, 3842 info.func_info_rec_size * ulen)) 3843 return -EFAULT; 3844 } 3845 3846 ulen = info.nr_line_info; 3847 info.nr_line_info = prog->aux->nr_linfo; 3848 if (info.nr_line_info && ulen) { 3849 __u8 __user *user_linfo; 3850 3851 user_linfo = u64_to_user_ptr(info.line_info); 3852 ulen = min_t(u32, info.nr_line_info, ulen); 3853 if (copy_to_user(user_linfo, prog->aux->linfo, 3854 info.line_info_rec_size * ulen)) 3855 return -EFAULT; 3856 } 3857 3858 ulen = info.nr_jited_line_info; 3859 if (prog->aux->jited_linfo) 3860 info.nr_jited_line_info = prog->aux->nr_linfo; 3861 else 3862 info.nr_jited_line_info = 0; 3863 if (info.nr_jited_line_info && ulen) { 3864 if (bpf_dump_raw_ok(file->f_cred)) { 3865 __u64 __user *user_linfo; 3866 u32 i; 3867 3868 user_linfo = u64_to_user_ptr(info.jited_line_info); 3869 ulen = min_t(u32, info.nr_jited_line_info, ulen); 3870 for (i = 0; i < ulen; i++) { 3871 if (put_user((__u64)(long)prog->aux->jited_linfo[i], 3872 &user_linfo[i])) 3873 return -EFAULT; 3874 } 3875 } else { 3876 info.jited_line_info = 0; 3877 } 3878 } 3879 3880 ulen = info.nr_prog_tags; 3881 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 3882 if (ulen) { 3883 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 3884 u32 i; 3885 3886 user_prog_tags = u64_to_user_ptr(info.prog_tags); 3887 ulen = min_t(u32, info.nr_prog_tags, ulen); 3888 if (prog->aux->func_cnt) { 3889 for (i = 0; i < ulen; i++) { 3890 if (copy_to_user(user_prog_tags[i], 3891 prog->aux->func[i]->tag, 3892 BPF_TAG_SIZE)) 3893 return -EFAULT; 3894 } 3895 } else { 3896 if (copy_to_user(user_prog_tags[0], 3897 prog->tag, BPF_TAG_SIZE)) 3898 return -EFAULT; 3899 } 3900 } 3901 3902 done: 3903 if (copy_to_user(uinfo, &info, info_len) || 3904 put_user(info_len, &uattr->info.info_len)) 3905 return -EFAULT; 3906 3907 return 0; 3908 } 3909 3910 static int bpf_map_get_info_by_fd(struct file *file, 3911 struct bpf_map *map, 3912 const union bpf_attr *attr, 3913 union bpf_attr __user *uattr) 3914 { 3915 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3916 struct bpf_map_info info; 3917 u32 info_len = attr->info.info_len; 3918 int err; 3919 3920 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 3921 if (err) 3922 return err; 3923 info_len = min_t(u32, sizeof(info), info_len); 3924 3925 memset(&info, 0, sizeof(info)); 3926 info.type = map->map_type; 3927 info.id = map->id; 3928 info.key_size = map->key_size; 3929 info.value_size = map->value_size; 3930 info.max_entries = map->max_entries; 3931 info.map_flags = map->map_flags; 3932 info.map_extra = map->map_extra; 3933 memcpy(info.name, map->name, sizeof(map->name)); 3934 3935 if (map->btf) { 3936 info.btf_id = btf_obj_id(map->btf); 3937 info.btf_key_type_id = map->btf_key_type_id; 3938 info.btf_value_type_id = map->btf_value_type_id; 3939 } 3940 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 3941 3942 if (bpf_map_is_dev_bound(map)) { 3943 err = bpf_map_offload_info_fill(&info, map); 3944 if (err) 3945 return err; 3946 } 3947 3948 if (copy_to_user(uinfo, &info, info_len) || 3949 put_user(info_len, &uattr->info.info_len)) 3950 return -EFAULT; 3951 3952 return 0; 3953 } 3954 3955 static int bpf_btf_get_info_by_fd(struct file *file, 3956 struct btf *btf, 3957 const union bpf_attr *attr, 3958 union bpf_attr __user *uattr) 3959 { 3960 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3961 u32 info_len = attr->info.info_len; 3962 int err; 3963 3964 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 3965 if (err) 3966 return err; 3967 3968 return btf_get_info_by_fd(btf, attr, uattr); 3969 } 3970 3971 static int bpf_link_get_info_by_fd(struct file *file, 3972 struct bpf_link *link, 3973 const union bpf_attr *attr, 3974 union bpf_attr __user *uattr) 3975 { 3976 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3977 struct bpf_link_info info; 3978 u32 info_len = attr->info.info_len; 3979 int err; 3980 3981 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 3982 if (err) 3983 return err; 3984 info_len = min_t(u32, sizeof(info), info_len); 3985 3986 memset(&info, 0, sizeof(info)); 3987 if (copy_from_user(&info, uinfo, info_len)) 3988 return -EFAULT; 3989 3990 info.type = link->type; 3991 info.id = link->id; 3992 info.prog_id = link->prog->aux->id; 3993 3994 if (link->ops->fill_link_info) { 3995 err = link->ops->fill_link_info(link, &info); 3996 if (err) 3997 return err; 3998 } 3999 4000 if (copy_to_user(uinfo, &info, info_len) || 4001 put_user(info_len, &uattr->info.info_len)) 4002 return -EFAULT; 4003 4004 return 0; 4005 } 4006 4007 4008 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 4009 4010 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 4011 union bpf_attr __user *uattr) 4012 { 4013 int ufd = attr->info.bpf_fd; 4014 struct fd f; 4015 int err; 4016 4017 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 4018 return -EINVAL; 4019 4020 f = fdget(ufd); 4021 if (!f.file) 4022 return -EBADFD; 4023 4024 if (f.file->f_op == &bpf_prog_fops) 4025 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 4026 uattr); 4027 else if (f.file->f_op == &bpf_map_fops) 4028 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 4029 uattr); 4030 else if (f.file->f_op == &btf_fops) 4031 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 4032 else if (f.file->f_op == &bpf_link_fops) 4033 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 4034 attr, uattr); 4035 else 4036 err = -EINVAL; 4037 4038 fdput(f); 4039 return err; 4040 } 4041 4042 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 4043 4044 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr) 4045 { 4046 if (CHECK_ATTR(BPF_BTF_LOAD)) 4047 return -EINVAL; 4048 4049 if (!bpf_capable()) 4050 return -EPERM; 4051 4052 return btf_new_fd(attr, uattr); 4053 } 4054 4055 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 4056 4057 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 4058 { 4059 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 4060 return -EINVAL; 4061 4062 if (!capable(CAP_SYS_ADMIN)) 4063 return -EPERM; 4064 4065 return btf_get_fd_by_id(attr->btf_id); 4066 } 4067 4068 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 4069 union bpf_attr __user *uattr, 4070 u32 prog_id, u32 fd_type, 4071 const char *buf, u64 probe_offset, 4072 u64 probe_addr) 4073 { 4074 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 4075 u32 len = buf ? strlen(buf) : 0, input_len; 4076 int err = 0; 4077 4078 if (put_user(len, &uattr->task_fd_query.buf_len)) 4079 return -EFAULT; 4080 input_len = attr->task_fd_query.buf_len; 4081 if (input_len && ubuf) { 4082 if (!len) { 4083 /* nothing to copy, just make ubuf NULL terminated */ 4084 char zero = '\0'; 4085 4086 if (put_user(zero, ubuf)) 4087 return -EFAULT; 4088 } else if (input_len >= len + 1) { 4089 /* ubuf can hold the string with NULL terminator */ 4090 if (copy_to_user(ubuf, buf, len + 1)) 4091 return -EFAULT; 4092 } else { 4093 /* ubuf cannot hold the string with NULL terminator, 4094 * do a partial copy with NULL terminator. 4095 */ 4096 char zero = '\0'; 4097 4098 err = -ENOSPC; 4099 if (copy_to_user(ubuf, buf, input_len - 1)) 4100 return -EFAULT; 4101 if (put_user(zero, ubuf + input_len - 1)) 4102 return -EFAULT; 4103 } 4104 } 4105 4106 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 4107 put_user(fd_type, &uattr->task_fd_query.fd_type) || 4108 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 4109 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 4110 return -EFAULT; 4111 4112 return err; 4113 } 4114 4115 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 4116 4117 static int bpf_task_fd_query(const union bpf_attr *attr, 4118 union bpf_attr __user *uattr) 4119 { 4120 pid_t pid = attr->task_fd_query.pid; 4121 u32 fd = attr->task_fd_query.fd; 4122 const struct perf_event *event; 4123 struct task_struct *task; 4124 struct file *file; 4125 int err; 4126 4127 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 4128 return -EINVAL; 4129 4130 if (!capable(CAP_SYS_ADMIN)) 4131 return -EPERM; 4132 4133 if (attr->task_fd_query.flags != 0) 4134 return -EINVAL; 4135 4136 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 4137 if (!task) 4138 return -ENOENT; 4139 4140 err = 0; 4141 file = fget_task(task, fd); 4142 put_task_struct(task); 4143 if (!file) 4144 return -EBADF; 4145 4146 if (file->f_op == &bpf_link_fops) { 4147 struct bpf_link *link = file->private_data; 4148 4149 if (link->ops == &bpf_raw_tp_link_lops) { 4150 struct bpf_raw_tp_link *raw_tp = 4151 container_of(link, struct bpf_raw_tp_link, link); 4152 struct bpf_raw_event_map *btp = raw_tp->btp; 4153 4154 err = bpf_task_fd_query_copy(attr, uattr, 4155 raw_tp->link.prog->aux->id, 4156 BPF_FD_TYPE_RAW_TRACEPOINT, 4157 btp->tp->name, 0, 0); 4158 goto put_file; 4159 } 4160 goto out_not_supp; 4161 } 4162 4163 event = perf_get_event(file); 4164 if (!IS_ERR(event)) { 4165 u64 probe_offset, probe_addr; 4166 u32 prog_id, fd_type; 4167 const char *buf; 4168 4169 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 4170 &buf, &probe_offset, 4171 &probe_addr); 4172 if (!err) 4173 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 4174 fd_type, buf, 4175 probe_offset, 4176 probe_addr); 4177 goto put_file; 4178 } 4179 4180 out_not_supp: 4181 err = -ENOTSUPP; 4182 put_file: 4183 fput(file); 4184 return err; 4185 } 4186 4187 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 4188 4189 #define BPF_DO_BATCH(fn) \ 4190 do { \ 4191 if (!fn) { \ 4192 err = -ENOTSUPP; \ 4193 goto err_put; \ 4194 } \ 4195 err = fn(map, attr, uattr); \ 4196 } while (0) 4197 4198 static int bpf_map_do_batch(const union bpf_attr *attr, 4199 union bpf_attr __user *uattr, 4200 int cmd) 4201 { 4202 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 4203 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 4204 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 4205 struct bpf_map *map; 4206 int err, ufd; 4207 struct fd f; 4208 4209 if (CHECK_ATTR(BPF_MAP_BATCH)) 4210 return -EINVAL; 4211 4212 ufd = attr->batch.map_fd; 4213 f = fdget(ufd); 4214 map = __bpf_map_get(f); 4215 if (IS_ERR(map)) 4216 return PTR_ERR(map); 4217 if (has_write) 4218 bpf_map_write_active_inc(map); 4219 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 4220 err = -EPERM; 4221 goto err_put; 4222 } 4223 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 4224 err = -EPERM; 4225 goto err_put; 4226 } 4227 4228 if (cmd == BPF_MAP_LOOKUP_BATCH) 4229 BPF_DO_BATCH(map->ops->map_lookup_batch); 4230 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 4231 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); 4232 else if (cmd == BPF_MAP_UPDATE_BATCH) 4233 BPF_DO_BATCH(map->ops->map_update_batch); 4234 else 4235 BPF_DO_BATCH(map->ops->map_delete_batch); 4236 err_put: 4237 if (has_write) 4238 bpf_map_write_active_dec(map); 4239 fdput(f); 4240 return err; 4241 } 4242 4243 static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr, 4244 struct bpf_prog *prog) 4245 { 4246 if (attr->link_create.attach_type != prog->expected_attach_type) 4247 return -EINVAL; 4248 4249 if (prog->expected_attach_type == BPF_TRACE_ITER) 4250 return bpf_iter_link_attach(attr, uattr, prog); 4251 else if (prog->type == BPF_PROG_TYPE_EXT) 4252 return bpf_tracing_prog_attach(prog, 4253 attr->link_create.target_fd, 4254 attr->link_create.target_btf_id); 4255 return -EINVAL; 4256 } 4257 4258 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len 4259 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 4260 { 4261 enum bpf_prog_type ptype; 4262 struct bpf_prog *prog; 4263 int ret; 4264 4265 if (CHECK_ATTR(BPF_LINK_CREATE)) 4266 return -EINVAL; 4267 4268 prog = bpf_prog_get(attr->link_create.prog_fd); 4269 if (IS_ERR(prog)) 4270 return PTR_ERR(prog); 4271 4272 ret = bpf_prog_attach_check_attach_type(prog, 4273 attr->link_create.attach_type); 4274 if (ret) 4275 goto out; 4276 4277 switch (prog->type) { 4278 case BPF_PROG_TYPE_EXT: 4279 ret = tracing_bpf_link_attach(attr, uattr, prog); 4280 goto out; 4281 case BPF_PROG_TYPE_PERF_EVENT: 4282 case BPF_PROG_TYPE_KPROBE: 4283 case BPF_PROG_TYPE_TRACEPOINT: 4284 if (attr->link_create.attach_type != BPF_PERF_EVENT) { 4285 ret = -EINVAL; 4286 goto out; 4287 } 4288 ptype = prog->type; 4289 break; 4290 default: 4291 ptype = attach_type_to_prog_type(attr->link_create.attach_type); 4292 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { 4293 ret = -EINVAL; 4294 goto out; 4295 } 4296 break; 4297 } 4298 4299 switch (ptype) { 4300 case BPF_PROG_TYPE_CGROUP_SKB: 4301 case BPF_PROG_TYPE_CGROUP_SOCK: 4302 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4303 case BPF_PROG_TYPE_SOCK_OPS: 4304 case BPF_PROG_TYPE_CGROUP_DEVICE: 4305 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4306 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4307 ret = cgroup_bpf_link_attach(attr, prog); 4308 break; 4309 case BPF_PROG_TYPE_TRACING: 4310 ret = tracing_bpf_link_attach(attr, uattr, prog); 4311 break; 4312 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4313 case BPF_PROG_TYPE_SK_LOOKUP: 4314 ret = netns_bpf_link_create(attr, prog); 4315 break; 4316 #ifdef CONFIG_NET 4317 case BPF_PROG_TYPE_XDP: 4318 ret = bpf_xdp_link_attach(attr, prog); 4319 break; 4320 #endif 4321 #ifdef CONFIG_PERF_EVENTS 4322 case BPF_PROG_TYPE_PERF_EVENT: 4323 case BPF_PROG_TYPE_TRACEPOINT: 4324 case BPF_PROG_TYPE_KPROBE: 4325 ret = bpf_perf_link_attach(attr, prog); 4326 break; 4327 #endif 4328 default: 4329 ret = -EINVAL; 4330 } 4331 4332 out: 4333 if (ret < 0) 4334 bpf_prog_put(prog); 4335 return ret; 4336 } 4337 4338 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 4339 4340 static int link_update(union bpf_attr *attr) 4341 { 4342 struct bpf_prog *old_prog = NULL, *new_prog; 4343 struct bpf_link *link; 4344 u32 flags; 4345 int ret; 4346 4347 if (CHECK_ATTR(BPF_LINK_UPDATE)) 4348 return -EINVAL; 4349 4350 flags = attr->link_update.flags; 4351 if (flags & ~BPF_F_REPLACE) 4352 return -EINVAL; 4353 4354 link = bpf_link_get_from_fd(attr->link_update.link_fd); 4355 if (IS_ERR(link)) 4356 return PTR_ERR(link); 4357 4358 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 4359 if (IS_ERR(new_prog)) { 4360 ret = PTR_ERR(new_prog); 4361 goto out_put_link; 4362 } 4363 4364 if (flags & BPF_F_REPLACE) { 4365 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 4366 if (IS_ERR(old_prog)) { 4367 ret = PTR_ERR(old_prog); 4368 old_prog = NULL; 4369 goto out_put_progs; 4370 } 4371 } else if (attr->link_update.old_prog_fd) { 4372 ret = -EINVAL; 4373 goto out_put_progs; 4374 } 4375 4376 if (link->ops->update_prog) 4377 ret = link->ops->update_prog(link, new_prog, old_prog); 4378 else 4379 ret = -EINVAL; 4380 4381 out_put_progs: 4382 if (old_prog) 4383 bpf_prog_put(old_prog); 4384 if (ret) 4385 bpf_prog_put(new_prog); 4386 out_put_link: 4387 bpf_link_put(link); 4388 return ret; 4389 } 4390 4391 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 4392 4393 static int link_detach(union bpf_attr *attr) 4394 { 4395 struct bpf_link *link; 4396 int ret; 4397 4398 if (CHECK_ATTR(BPF_LINK_DETACH)) 4399 return -EINVAL; 4400 4401 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 4402 if (IS_ERR(link)) 4403 return PTR_ERR(link); 4404 4405 if (link->ops->detach) 4406 ret = link->ops->detach(link); 4407 else 4408 ret = -EOPNOTSUPP; 4409 4410 bpf_link_put(link); 4411 return ret; 4412 } 4413 4414 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 4415 { 4416 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 4417 } 4418 4419 struct bpf_link *bpf_link_by_id(u32 id) 4420 { 4421 struct bpf_link *link; 4422 4423 if (!id) 4424 return ERR_PTR(-ENOENT); 4425 4426 spin_lock_bh(&link_idr_lock); 4427 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 4428 link = idr_find(&link_idr, id); 4429 if (link) { 4430 if (link->id) 4431 link = bpf_link_inc_not_zero(link); 4432 else 4433 link = ERR_PTR(-EAGAIN); 4434 } else { 4435 link = ERR_PTR(-ENOENT); 4436 } 4437 spin_unlock_bh(&link_idr_lock); 4438 return link; 4439 } 4440 4441 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 4442 4443 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 4444 { 4445 struct bpf_link *link; 4446 u32 id = attr->link_id; 4447 int fd; 4448 4449 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 4450 return -EINVAL; 4451 4452 if (!capable(CAP_SYS_ADMIN)) 4453 return -EPERM; 4454 4455 link = bpf_link_by_id(id); 4456 if (IS_ERR(link)) 4457 return PTR_ERR(link); 4458 4459 fd = bpf_link_new_fd(link); 4460 if (fd < 0) 4461 bpf_link_put(link); 4462 4463 return fd; 4464 } 4465 4466 DEFINE_MUTEX(bpf_stats_enabled_mutex); 4467 4468 static int bpf_stats_release(struct inode *inode, struct file *file) 4469 { 4470 mutex_lock(&bpf_stats_enabled_mutex); 4471 static_key_slow_dec(&bpf_stats_enabled_key.key); 4472 mutex_unlock(&bpf_stats_enabled_mutex); 4473 return 0; 4474 } 4475 4476 static const struct file_operations bpf_stats_fops = { 4477 .release = bpf_stats_release, 4478 }; 4479 4480 static int bpf_enable_runtime_stats(void) 4481 { 4482 int fd; 4483 4484 mutex_lock(&bpf_stats_enabled_mutex); 4485 4486 /* Set a very high limit to avoid overflow */ 4487 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 4488 mutex_unlock(&bpf_stats_enabled_mutex); 4489 return -EBUSY; 4490 } 4491 4492 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 4493 if (fd >= 0) 4494 static_key_slow_inc(&bpf_stats_enabled_key.key); 4495 4496 mutex_unlock(&bpf_stats_enabled_mutex); 4497 return fd; 4498 } 4499 4500 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 4501 4502 static int bpf_enable_stats(union bpf_attr *attr) 4503 { 4504 4505 if (CHECK_ATTR(BPF_ENABLE_STATS)) 4506 return -EINVAL; 4507 4508 if (!capable(CAP_SYS_ADMIN)) 4509 return -EPERM; 4510 4511 switch (attr->enable_stats.type) { 4512 case BPF_STATS_RUN_TIME: 4513 return bpf_enable_runtime_stats(); 4514 default: 4515 break; 4516 } 4517 return -EINVAL; 4518 } 4519 4520 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 4521 4522 static int bpf_iter_create(union bpf_attr *attr) 4523 { 4524 struct bpf_link *link; 4525 int err; 4526 4527 if (CHECK_ATTR(BPF_ITER_CREATE)) 4528 return -EINVAL; 4529 4530 if (attr->iter_create.flags) 4531 return -EINVAL; 4532 4533 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 4534 if (IS_ERR(link)) 4535 return PTR_ERR(link); 4536 4537 err = bpf_iter_new_fd(link); 4538 bpf_link_put(link); 4539 4540 return err; 4541 } 4542 4543 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 4544 4545 static int bpf_prog_bind_map(union bpf_attr *attr) 4546 { 4547 struct bpf_prog *prog; 4548 struct bpf_map *map; 4549 struct bpf_map **used_maps_old, **used_maps_new; 4550 int i, ret = 0; 4551 4552 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 4553 return -EINVAL; 4554 4555 if (attr->prog_bind_map.flags) 4556 return -EINVAL; 4557 4558 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 4559 if (IS_ERR(prog)) 4560 return PTR_ERR(prog); 4561 4562 map = bpf_map_get(attr->prog_bind_map.map_fd); 4563 if (IS_ERR(map)) { 4564 ret = PTR_ERR(map); 4565 goto out_prog_put; 4566 } 4567 4568 mutex_lock(&prog->aux->used_maps_mutex); 4569 4570 used_maps_old = prog->aux->used_maps; 4571 4572 for (i = 0; i < prog->aux->used_map_cnt; i++) 4573 if (used_maps_old[i] == map) { 4574 bpf_map_put(map); 4575 goto out_unlock; 4576 } 4577 4578 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 4579 sizeof(used_maps_new[0]), 4580 GFP_KERNEL); 4581 if (!used_maps_new) { 4582 ret = -ENOMEM; 4583 goto out_unlock; 4584 } 4585 4586 memcpy(used_maps_new, used_maps_old, 4587 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 4588 used_maps_new[prog->aux->used_map_cnt] = map; 4589 4590 prog->aux->used_map_cnt++; 4591 prog->aux->used_maps = used_maps_new; 4592 4593 kfree(used_maps_old); 4594 4595 out_unlock: 4596 mutex_unlock(&prog->aux->used_maps_mutex); 4597 4598 if (ret) 4599 bpf_map_put(map); 4600 out_prog_put: 4601 bpf_prog_put(prog); 4602 return ret; 4603 } 4604 4605 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) 4606 { 4607 union bpf_attr attr; 4608 int err; 4609 4610 if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) 4611 return -EPERM; 4612 4613 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 4614 if (err) 4615 return err; 4616 size = min_t(u32, size, sizeof(attr)); 4617 4618 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 4619 memset(&attr, 0, sizeof(attr)); 4620 if (copy_from_bpfptr(&attr, uattr, size) != 0) 4621 return -EFAULT; 4622 4623 err = security_bpf(cmd, &attr, size); 4624 if (err < 0) 4625 return err; 4626 4627 switch (cmd) { 4628 case BPF_MAP_CREATE: 4629 err = map_create(&attr); 4630 break; 4631 case BPF_MAP_LOOKUP_ELEM: 4632 err = map_lookup_elem(&attr); 4633 break; 4634 case BPF_MAP_UPDATE_ELEM: 4635 err = map_update_elem(&attr, uattr); 4636 break; 4637 case BPF_MAP_DELETE_ELEM: 4638 err = map_delete_elem(&attr); 4639 break; 4640 case BPF_MAP_GET_NEXT_KEY: 4641 err = map_get_next_key(&attr); 4642 break; 4643 case BPF_MAP_FREEZE: 4644 err = map_freeze(&attr); 4645 break; 4646 case BPF_PROG_LOAD: 4647 err = bpf_prog_load(&attr, uattr); 4648 break; 4649 case BPF_OBJ_PIN: 4650 err = bpf_obj_pin(&attr); 4651 break; 4652 case BPF_OBJ_GET: 4653 err = bpf_obj_get(&attr); 4654 break; 4655 case BPF_PROG_ATTACH: 4656 err = bpf_prog_attach(&attr); 4657 break; 4658 case BPF_PROG_DETACH: 4659 err = bpf_prog_detach(&attr); 4660 break; 4661 case BPF_PROG_QUERY: 4662 err = bpf_prog_query(&attr, uattr.user); 4663 break; 4664 case BPF_PROG_TEST_RUN: 4665 err = bpf_prog_test_run(&attr, uattr.user); 4666 break; 4667 case BPF_PROG_GET_NEXT_ID: 4668 err = bpf_obj_get_next_id(&attr, uattr.user, 4669 &prog_idr, &prog_idr_lock); 4670 break; 4671 case BPF_MAP_GET_NEXT_ID: 4672 err = bpf_obj_get_next_id(&attr, uattr.user, 4673 &map_idr, &map_idr_lock); 4674 break; 4675 case BPF_BTF_GET_NEXT_ID: 4676 err = bpf_obj_get_next_id(&attr, uattr.user, 4677 &btf_idr, &btf_idr_lock); 4678 break; 4679 case BPF_PROG_GET_FD_BY_ID: 4680 err = bpf_prog_get_fd_by_id(&attr); 4681 break; 4682 case BPF_MAP_GET_FD_BY_ID: 4683 err = bpf_map_get_fd_by_id(&attr); 4684 break; 4685 case BPF_OBJ_GET_INFO_BY_FD: 4686 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 4687 break; 4688 case BPF_RAW_TRACEPOINT_OPEN: 4689 err = bpf_raw_tracepoint_open(&attr); 4690 break; 4691 case BPF_BTF_LOAD: 4692 err = bpf_btf_load(&attr, uattr); 4693 break; 4694 case BPF_BTF_GET_FD_BY_ID: 4695 err = bpf_btf_get_fd_by_id(&attr); 4696 break; 4697 case BPF_TASK_FD_QUERY: 4698 err = bpf_task_fd_query(&attr, uattr.user); 4699 break; 4700 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 4701 err = map_lookup_and_delete_elem(&attr); 4702 break; 4703 case BPF_MAP_LOOKUP_BATCH: 4704 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 4705 break; 4706 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 4707 err = bpf_map_do_batch(&attr, uattr.user, 4708 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 4709 break; 4710 case BPF_MAP_UPDATE_BATCH: 4711 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 4712 break; 4713 case BPF_MAP_DELETE_BATCH: 4714 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 4715 break; 4716 case BPF_LINK_CREATE: 4717 err = link_create(&attr, uattr); 4718 break; 4719 case BPF_LINK_UPDATE: 4720 err = link_update(&attr); 4721 break; 4722 case BPF_LINK_GET_FD_BY_ID: 4723 err = bpf_link_get_fd_by_id(&attr); 4724 break; 4725 case BPF_LINK_GET_NEXT_ID: 4726 err = bpf_obj_get_next_id(&attr, uattr.user, 4727 &link_idr, &link_idr_lock); 4728 break; 4729 case BPF_ENABLE_STATS: 4730 err = bpf_enable_stats(&attr); 4731 break; 4732 case BPF_ITER_CREATE: 4733 err = bpf_iter_create(&attr); 4734 break; 4735 case BPF_LINK_DETACH: 4736 err = link_detach(&attr); 4737 break; 4738 case BPF_PROG_BIND_MAP: 4739 err = bpf_prog_bind_map(&attr); 4740 break; 4741 default: 4742 err = -EINVAL; 4743 break; 4744 } 4745 4746 return err; 4747 } 4748 4749 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 4750 { 4751 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 4752 } 4753 4754 static bool syscall_prog_is_valid_access(int off, int size, 4755 enum bpf_access_type type, 4756 const struct bpf_prog *prog, 4757 struct bpf_insn_access_aux *info) 4758 { 4759 if (off < 0 || off >= U16_MAX) 4760 return false; 4761 if (off % size != 0) 4762 return false; 4763 return true; 4764 } 4765 4766 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 4767 { 4768 struct bpf_prog * __maybe_unused prog; 4769 4770 switch (cmd) { 4771 case BPF_MAP_CREATE: 4772 case BPF_MAP_UPDATE_ELEM: 4773 case BPF_MAP_FREEZE: 4774 case BPF_PROG_LOAD: 4775 case BPF_BTF_LOAD: 4776 case BPF_LINK_CREATE: 4777 case BPF_RAW_TRACEPOINT_OPEN: 4778 break; 4779 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 4780 case BPF_PROG_TEST_RUN: 4781 if (attr->test.data_in || attr->test.data_out || 4782 attr->test.ctx_out || attr->test.duration || 4783 attr->test.repeat || attr->test.flags) 4784 return -EINVAL; 4785 4786 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 4787 if (IS_ERR(prog)) 4788 return PTR_ERR(prog); 4789 4790 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 4791 attr->test.ctx_size_in > U16_MAX) { 4792 bpf_prog_put(prog); 4793 return -EINVAL; 4794 } 4795 4796 if (!__bpf_prog_enter_sleepable(prog)) { 4797 /* recursion detected */ 4798 bpf_prog_put(prog); 4799 return -EBUSY; 4800 } 4801 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 4802 __bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */); 4803 bpf_prog_put(prog); 4804 return 0; 4805 #endif 4806 default: 4807 return -EINVAL; 4808 } 4809 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 4810 } 4811 EXPORT_SYMBOL(bpf_sys_bpf); 4812 4813 static const struct bpf_func_proto bpf_sys_bpf_proto = { 4814 .func = bpf_sys_bpf, 4815 .gpl_only = false, 4816 .ret_type = RET_INTEGER, 4817 .arg1_type = ARG_ANYTHING, 4818 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 4819 .arg3_type = ARG_CONST_SIZE, 4820 }; 4821 4822 const struct bpf_func_proto * __weak 4823 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 4824 { 4825 return bpf_base_func_proto(func_id); 4826 } 4827 4828 BPF_CALL_1(bpf_sys_close, u32, fd) 4829 { 4830 /* When bpf program calls this helper there should not be 4831 * an fdget() without matching completed fdput(). 4832 * This helper is allowed in the following callchain only: 4833 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 4834 */ 4835 return close_fd(fd); 4836 } 4837 4838 static const struct bpf_func_proto bpf_sys_close_proto = { 4839 .func = bpf_sys_close, 4840 .gpl_only = false, 4841 .ret_type = RET_INTEGER, 4842 .arg1_type = ARG_ANYTHING, 4843 }; 4844 4845 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 4846 { 4847 if (flags) 4848 return -EINVAL; 4849 4850 if (name_sz <= 1 || name[name_sz - 1]) 4851 return -EINVAL; 4852 4853 if (!bpf_dump_raw_ok(current_cred())) 4854 return -EPERM; 4855 4856 *res = kallsyms_lookup_name(name); 4857 return *res ? 0 : -ENOENT; 4858 } 4859 4860 const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 4861 .func = bpf_kallsyms_lookup_name, 4862 .gpl_only = false, 4863 .ret_type = RET_INTEGER, 4864 .arg1_type = ARG_PTR_TO_MEM, 4865 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 4866 .arg3_type = ARG_ANYTHING, 4867 .arg4_type = ARG_PTR_TO_LONG, 4868 }; 4869 4870 static const struct bpf_func_proto * 4871 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 4872 { 4873 switch (func_id) { 4874 case BPF_FUNC_sys_bpf: 4875 return &bpf_sys_bpf_proto; 4876 case BPF_FUNC_btf_find_by_name_kind: 4877 return &bpf_btf_find_by_name_kind_proto; 4878 case BPF_FUNC_sys_close: 4879 return &bpf_sys_close_proto; 4880 case BPF_FUNC_kallsyms_lookup_name: 4881 return &bpf_kallsyms_lookup_name_proto; 4882 default: 4883 return tracing_prog_func_proto(func_id, prog); 4884 } 4885 } 4886 4887 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 4888 .get_func_proto = syscall_prog_func_proto, 4889 .is_valid_access = syscall_prog_is_valid_access, 4890 }; 4891 4892 const struct bpf_prog_ops bpf_syscall_prog_ops = { 4893 .test_run = bpf_prog_test_run_syscall, 4894 }; 4895