1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 39 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 40 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 41 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 42 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 43 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 44 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 45 IS_FD_HASH(map)) 46 47 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 48 49 DEFINE_PER_CPU(int, bpf_prog_active); 50 static DEFINE_IDR(prog_idr); 51 static DEFINE_SPINLOCK(prog_idr_lock); 52 static DEFINE_IDR(map_idr); 53 static DEFINE_SPINLOCK(map_idr_lock); 54 static DEFINE_IDR(link_idr); 55 static DEFINE_SPINLOCK(link_idr_lock); 56 57 int sysctl_unprivileged_bpf_disabled __read_mostly = 58 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 59 60 static const struct bpf_map_ops * const bpf_map_types[] = { 61 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 62 #define BPF_MAP_TYPE(_id, _ops) \ 63 [_id] = &_ops, 64 #define BPF_LINK_TYPE(_id, _name) 65 #include <linux/bpf_types.h> 66 #undef BPF_PROG_TYPE 67 #undef BPF_MAP_TYPE 68 #undef BPF_LINK_TYPE 69 }; 70 71 /* 72 * If we're handed a bigger struct than we know of, ensure all the unknown bits 73 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 74 * we don't know about yet. 75 * 76 * There is a ToCToU between this function call and the following 77 * copy_from_user() call. However, this is not a concern since this function is 78 * meant to be a future-proofing of bits. 79 */ 80 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 81 size_t expected_size, 82 size_t actual_size) 83 { 84 int res; 85 86 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 87 return -E2BIG; 88 89 if (actual_size <= expected_size) 90 return 0; 91 92 if (uaddr.is_kernel) 93 res = memchr_inv(uaddr.kernel + expected_size, 0, 94 actual_size - expected_size) == NULL; 95 else 96 res = check_zeroed_user(uaddr.user + expected_size, 97 actual_size - expected_size); 98 if (res < 0) 99 return res; 100 return res ? 0 : -E2BIG; 101 } 102 103 const struct bpf_map_ops bpf_map_offload_ops = { 104 .map_meta_equal = bpf_map_meta_equal, 105 .map_alloc = bpf_map_offload_map_alloc, 106 .map_free = bpf_map_offload_map_free, 107 .map_check_btf = map_check_no_btf, 108 }; 109 110 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 111 { 112 const struct bpf_map_ops *ops; 113 u32 type = attr->map_type; 114 struct bpf_map *map; 115 int err; 116 117 if (type >= ARRAY_SIZE(bpf_map_types)) 118 return ERR_PTR(-EINVAL); 119 type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types)); 120 ops = bpf_map_types[type]; 121 if (!ops) 122 return ERR_PTR(-EINVAL); 123 124 if (ops->map_alloc_check) { 125 err = ops->map_alloc_check(attr); 126 if (err) 127 return ERR_PTR(err); 128 } 129 if (attr->map_ifindex) 130 ops = &bpf_map_offload_ops; 131 map = ops->map_alloc(attr); 132 if (IS_ERR(map)) 133 return map; 134 map->ops = ops; 135 map->map_type = type; 136 return map; 137 } 138 139 static void bpf_map_write_active_inc(struct bpf_map *map) 140 { 141 atomic64_inc(&map->writecnt); 142 } 143 144 static void bpf_map_write_active_dec(struct bpf_map *map) 145 { 146 atomic64_dec(&map->writecnt); 147 } 148 149 bool bpf_map_write_active(const struct bpf_map *map) 150 { 151 return atomic64_read(&map->writecnt) != 0; 152 } 153 154 static u32 bpf_map_value_size(const struct bpf_map *map) 155 { 156 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 157 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 158 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 159 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 160 return round_up(map->value_size, 8) * num_possible_cpus(); 161 else if (IS_FD_MAP(map)) 162 return sizeof(u32); 163 else 164 return map->value_size; 165 } 166 167 static void maybe_wait_bpf_programs(struct bpf_map *map) 168 { 169 /* Wait for any running BPF programs to complete so that 170 * userspace, when we return to it, knows that all programs 171 * that could be running use the new map value. 172 */ 173 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 174 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 175 synchronize_rcu(); 176 } 177 178 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key, 179 void *value, __u64 flags) 180 { 181 int err; 182 183 /* Need to create a kthread, thus must support schedule */ 184 if (bpf_map_is_dev_bound(map)) { 185 return bpf_map_offload_update_elem(map, key, value, flags); 186 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 187 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 188 return map->ops->map_update_elem(map, key, value, flags); 189 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 190 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 191 return sock_map_update_elem_sys(map, key, value, flags); 192 } else if (IS_FD_PROG_ARRAY(map)) { 193 return bpf_fd_array_map_update_elem(map, f.file, key, value, 194 flags); 195 } 196 197 bpf_disable_instrumentation(); 198 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 199 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 200 err = bpf_percpu_hash_update(map, key, value, flags); 201 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 202 err = bpf_percpu_array_update(map, key, value, flags); 203 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 204 err = bpf_percpu_cgroup_storage_update(map, key, value, 205 flags); 206 } else if (IS_FD_ARRAY(map)) { 207 rcu_read_lock(); 208 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 209 flags); 210 rcu_read_unlock(); 211 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 212 rcu_read_lock(); 213 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, 214 flags); 215 rcu_read_unlock(); 216 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 217 /* rcu_read_lock() is not needed */ 218 err = bpf_fd_reuseport_array_update_elem(map, key, value, 219 flags); 220 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 221 map->map_type == BPF_MAP_TYPE_STACK || 222 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 223 err = map->ops->map_push_elem(map, value, flags); 224 } else { 225 rcu_read_lock(); 226 err = map->ops->map_update_elem(map, key, value, flags); 227 rcu_read_unlock(); 228 } 229 bpf_enable_instrumentation(); 230 maybe_wait_bpf_programs(map); 231 232 return err; 233 } 234 235 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 236 __u64 flags) 237 { 238 void *ptr; 239 int err; 240 241 if (bpf_map_is_dev_bound(map)) 242 return bpf_map_offload_lookup_elem(map, key, value); 243 244 bpf_disable_instrumentation(); 245 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 246 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 247 err = bpf_percpu_hash_copy(map, key, value); 248 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 249 err = bpf_percpu_array_copy(map, key, value); 250 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 251 err = bpf_percpu_cgroup_storage_copy(map, key, value); 252 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 253 err = bpf_stackmap_copy(map, key, value); 254 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 255 err = bpf_fd_array_map_lookup_elem(map, key, value); 256 } else if (IS_FD_HASH(map)) { 257 err = bpf_fd_htab_map_lookup_elem(map, key, value); 258 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 259 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 260 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 261 map->map_type == BPF_MAP_TYPE_STACK || 262 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 263 err = map->ops->map_peek_elem(map, value); 264 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 265 /* struct_ops map requires directly updating "value" */ 266 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 267 } else { 268 rcu_read_lock(); 269 if (map->ops->map_lookup_elem_sys_only) 270 ptr = map->ops->map_lookup_elem_sys_only(map, key); 271 else 272 ptr = map->ops->map_lookup_elem(map, key); 273 if (IS_ERR(ptr)) { 274 err = PTR_ERR(ptr); 275 } else if (!ptr) { 276 err = -ENOENT; 277 } else { 278 err = 0; 279 if (flags & BPF_F_LOCK) 280 /* lock 'ptr' and copy everything but lock */ 281 copy_map_value_locked(map, value, ptr, true); 282 else 283 copy_map_value(map, value, ptr); 284 /* mask lock and timer, since value wasn't zero inited */ 285 check_and_init_map_value(map, value); 286 } 287 rcu_read_unlock(); 288 } 289 290 bpf_enable_instrumentation(); 291 maybe_wait_bpf_programs(map); 292 293 return err; 294 } 295 296 /* Please, do not use this function outside from the map creation path 297 * (e.g. in map update path) without taking care of setting the active 298 * memory cgroup (see at bpf_map_kmalloc_node() for example). 299 */ 300 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 301 { 302 /* We really just want to fail instead of triggering OOM killer 303 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 304 * which is used for lower order allocation requests. 305 * 306 * It has been observed that higher order allocation requests done by 307 * vmalloc with __GFP_NORETRY being set might fail due to not trying 308 * to reclaim memory from the page cache, thus we set 309 * __GFP_RETRY_MAYFAIL to avoid such situations. 310 */ 311 312 const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO | __GFP_ACCOUNT; 313 unsigned int flags = 0; 314 unsigned long align = 1; 315 void *area; 316 317 if (size >= SIZE_MAX) 318 return NULL; 319 320 /* kmalloc()'ed memory can't be mmap()'ed */ 321 if (mmapable) { 322 BUG_ON(!PAGE_ALIGNED(size)); 323 align = SHMLBA; 324 flags = VM_USERMAP; 325 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 326 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 327 numa_node); 328 if (area != NULL) 329 return area; 330 } 331 332 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 333 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 334 flags, numa_node, __builtin_return_address(0)); 335 } 336 337 void *bpf_map_area_alloc(u64 size, int numa_node) 338 { 339 return __bpf_map_area_alloc(size, numa_node, false); 340 } 341 342 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 343 { 344 return __bpf_map_area_alloc(size, numa_node, true); 345 } 346 347 void bpf_map_area_free(void *area) 348 { 349 kvfree(area); 350 } 351 352 static u32 bpf_map_flags_retain_permanent(u32 flags) 353 { 354 /* Some map creation flags are not tied to the map object but 355 * rather to the map fd instead, so they have no meaning upon 356 * map object inspection since multiple file descriptors with 357 * different (access) properties can exist here. Thus, given 358 * this has zero meaning for the map itself, lets clear these 359 * from here. 360 */ 361 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 362 } 363 364 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 365 { 366 map->map_type = attr->map_type; 367 map->key_size = attr->key_size; 368 map->value_size = attr->value_size; 369 map->max_entries = attr->max_entries; 370 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 371 map->numa_node = bpf_map_attr_numa_node(attr); 372 map->map_extra = attr->map_extra; 373 } 374 375 static int bpf_map_alloc_id(struct bpf_map *map) 376 { 377 int id; 378 379 idr_preload(GFP_KERNEL); 380 spin_lock_bh(&map_idr_lock); 381 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 382 if (id > 0) 383 map->id = id; 384 spin_unlock_bh(&map_idr_lock); 385 idr_preload_end(); 386 387 if (WARN_ON_ONCE(!id)) 388 return -ENOSPC; 389 390 return id > 0 ? 0 : id; 391 } 392 393 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) 394 { 395 unsigned long flags; 396 397 /* Offloaded maps are removed from the IDR store when their device 398 * disappears - even if someone holds an fd to them they are unusable, 399 * the memory is gone, all ops will fail; they are simply waiting for 400 * refcnt to drop to be freed. 401 */ 402 if (!map->id) 403 return; 404 405 if (do_idr_lock) 406 spin_lock_irqsave(&map_idr_lock, flags); 407 else 408 __acquire(&map_idr_lock); 409 410 idr_remove(&map_idr, map->id); 411 map->id = 0; 412 413 if (do_idr_lock) 414 spin_unlock_irqrestore(&map_idr_lock, flags); 415 else 416 __release(&map_idr_lock); 417 } 418 419 #ifdef CONFIG_MEMCG_KMEM 420 static void bpf_map_save_memcg(struct bpf_map *map) 421 { 422 /* Currently if a map is created by a process belonging to the root 423 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 424 * So we have to check map->objcg for being NULL each time it's 425 * being used. 426 */ 427 map->objcg = get_obj_cgroup_from_current(); 428 } 429 430 static void bpf_map_release_memcg(struct bpf_map *map) 431 { 432 if (map->objcg) 433 obj_cgroup_put(map->objcg); 434 } 435 436 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 437 { 438 if (map->objcg) 439 return get_mem_cgroup_from_objcg(map->objcg); 440 441 return root_mem_cgroup; 442 } 443 444 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 445 int node) 446 { 447 struct mem_cgroup *memcg, *old_memcg; 448 void *ptr; 449 450 memcg = bpf_map_get_memcg(map); 451 old_memcg = set_active_memcg(memcg); 452 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 453 set_active_memcg(old_memcg); 454 mem_cgroup_put(memcg); 455 456 return ptr; 457 } 458 459 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 460 { 461 struct mem_cgroup *memcg, *old_memcg; 462 void *ptr; 463 464 memcg = bpf_map_get_memcg(map); 465 old_memcg = set_active_memcg(memcg); 466 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 467 set_active_memcg(old_memcg); 468 mem_cgroup_put(memcg); 469 470 return ptr; 471 } 472 473 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 474 size_t align, gfp_t flags) 475 { 476 struct mem_cgroup *memcg, *old_memcg; 477 void __percpu *ptr; 478 479 memcg = bpf_map_get_memcg(map); 480 old_memcg = set_active_memcg(memcg); 481 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 482 set_active_memcg(old_memcg); 483 mem_cgroup_put(memcg); 484 485 return ptr; 486 } 487 488 #else 489 static void bpf_map_save_memcg(struct bpf_map *map) 490 { 491 } 492 493 static void bpf_map_release_memcg(struct bpf_map *map) 494 { 495 } 496 #endif 497 498 static int bpf_map_kptr_off_cmp(const void *a, const void *b) 499 { 500 const struct bpf_map_value_off_desc *off_desc1 = a, *off_desc2 = b; 501 502 if (off_desc1->offset < off_desc2->offset) 503 return -1; 504 else if (off_desc1->offset > off_desc2->offset) 505 return 1; 506 return 0; 507 } 508 509 struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset) 510 { 511 /* Since members are iterated in btf_find_field in increasing order, 512 * offsets appended to kptr_off_tab are in increasing order, so we can 513 * do bsearch to find exact match. 514 */ 515 struct bpf_map_value_off *tab; 516 517 if (!map_value_has_kptrs(map)) 518 return NULL; 519 tab = map->kptr_off_tab; 520 return bsearch(&offset, tab->off, tab->nr_off, sizeof(tab->off[0]), bpf_map_kptr_off_cmp); 521 } 522 523 void bpf_map_free_kptr_off_tab(struct bpf_map *map) 524 { 525 struct bpf_map_value_off *tab = map->kptr_off_tab; 526 int i; 527 528 if (!map_value_has_kptrs(map)) 529 return; 530 for (i = 0; i < tab->nr_off; i++) { 531 if (tab->off[i].kptr.module) 532 module_put(tab->off[i].kptr.module); 533 btf_put(tab->off[i].kptr.btf); 534 } 535 kfree(tab); 536 map->kptr_off_tab = NULL; 537 } 538 539 struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map) 540 { 541 struct bpf_map_value_off *tab = map->kptr_off_tab, *new_tab; 542 int size, i; 543 544 if (!map_value_has_kptrs(map)) 545 return ERR_PTR(-ENOENT); 546 size = offsetof(struct bpf_map_value_off, off[tab->nr_off]); 547 new_tab = kmemdup(tab, size, GFP_KERNEL | __GFP_NOWARN); 548 if (!new_tab) 549 return ERR_PTR(-ENOMEM); 550 /* Do a deep copy of the kptr_off_tab */ 551 for (i = 0; i < tab->nr_off; i++) { 552 btf_get(tab->off[i].kptr.btf); 553 if (tab->off[i].kptr.module && !try_module_get(tab->off[i].kptr.module)) { 554 while (i--) { 555 if (tab->off[i].kptr.module) 556 module_put(tab->off[i].kptr.module); 557 btf_put(tab->off[i].kptr.btf); 558 } 559 kfree(new_tab); 560 return ERR_PTR(-ENXIO); 561 } 562 } 563 return new_tab; 564 } 565 566 bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b) 567 { 568 struct bpf_map_value_off *tab_a = map_a->kptr_off_tab, *tab_b = map_b->kptr_off_tab; 569 bool a_has_kptr = map_value_has_kptrs(map_a), b_has_kptr = map_value_has_kptrs(map_b); 570 int size; 571 572 if (!a_has_kptr && !b_has_kptr) 573 return true; 574 if (a_has_kptr != b_has_kptr) 575 return false; 576 if (tab_a->nr_off != tab_b->nr_off) 577 return false; 578 size = offsetof(struct bpf_map_value_off, off[tab_a->nr_off]); 579 return !memcmp(tab_a, tab_b, size); 580 } 581 582 /* Caller must ensure map_value_has_kptrs is true. Note that this function can 583 * be called on a map value while the map_value is visible to BPF programs, as 584 * it ensures the correct synchronization, and we already enforce the same using 585 * the bpf_kptr_xchg helper on the BPF program side for referenced kptrs. 586 */ 587 void bpf_map_free_kptrs(struct bpf_map *map, void *map_value) 588 { 589 struct bpf_map_value_off *tab = map->kptr_off_tab; 590 unsigned long *btf_id_ptr; 591 int i; 592 593 for (i = 0; i < tab->nr_off; i++) { 594 struct bpf_map_value_off_desc *off_desc = &tab->off[i]; 595 unsigned long old_ptr; 596 597 btf_id_ptr = map_value + off_desc->offset; 598 if (off_desc->type == BPF_KPTR_UNREF) { 599 u64 *p = (u64 *)btf_id_ptr; 600 601 WRITE_ONCE(p, 0); 602 continue; 603 } 604 old_ptr = xchg(btf_id_ptr, 0); 605 off_desc->kptr.dtor((void *)old_ptr); 606 } 607 } 608 609 /* called from workqueue */ 610 static void bpf_map_free_deferred(struct work_struct *work) 611 { 612 struct bpf_map *map = container_of(work, struct bpf_map, work); 613 614 security_bpf_map_free(map); 615 kfree(map->off_arr); 616 bpf_map_release_memcg(map); 617 /* implementation dependent freeing, map_free callback also does 618 * bpf_map_free_kptr_off_tab, if needed. 619 */ 620 map->ops->map_free(map); 621 } 622 623 static void bpf_map_put_uref(struct bpf_map *map) 624 { 625 if (atomic64_dec_and_test(&map->usercnt)) { 626 if (map->ops->map_release_uref) 627 map->ops->map_release_uref(map); 628 } 629 } 630 631 /* decrement map refcnt and schedule it for freeing via workqueue 632 * (unrelying map implementation ops->map_free() might sleep) 633 */ 634 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) 635 { 636 if (atomic64_dec_and_test(&map->refcnt)) { 637 /* bpf_map_free_id() must be called first */ 638 bpf_map_free_id(map, do_idr_lock); 639 btf_put(map->btf); 640 INIT_WORK(&map->work, bpf_map_free_deferred); 641 /* Avoid spawning kworkers, since they all might contend 642 * for the same mutex like slab_mutex. 643 */ 644 queue_work(system_unbound_wq, &map->work); 645 } 646 } 647 648 void bpf_map_put(struct bpf_map *map) 649 { 650 __bpf_map_put(map, true); 651 } 652 EXPORT_SYMBOL_GPL(bpf_map_put); 653 654 void bpf_map_put_with_uref(struct bpf_map *map) 655 { 656 bpf_map_put_uref(map); 657 bpf_map_put(map); 658 } 659 660 static int bpf_map_release(struct inode *inode, struct file *filp) 661 { 662 struct bpf_map *map = filp->private_data; 663 664 if (map->ops->map_release) 665 map->ops->map_release(map, filp); 666 667 bpf_map_put_with_uref(map); 668 return 0; 669 } 670 671 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 672 { 673 fmode_t mode = f.file->f_mode; 674 675 /* Our file permissions may have been overridden by global 676 * map permissions facing syscall side. 677 */ 678 if (READ_ONCE(map->frozen)) 679 mode &= ~FMODE_CAN_WRITE; 680 return mode; 681 } 682 683 #ifdef CONFIG_PROC_FS 684 /* Provides an approximation of the map's memory footprint. 685 * Used only to provide a backward compatibility and display 686 * a reasonable "memlock" info. 687 */ 688 static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) 689 { 690 unsigned long size; 691 692 size = round_up(map->key_size + bpf_map_value_size(map), 8); 693 694 return round_up(map->max_entries * size, PAGE_SIZE); 695 } 696 697 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 698 { 699 struct bpf_map *map = filp->private_data; 700 u32 type = 0, jited = 0; 701 702 if (map_type_contains_progs(map)) { 703 spin_lock(&map->owner.lock); 704 type = map->owner.type; 705 jited = map->owner.jited; 706 spin_unlock(&map->owner.lock); 707 } 708 709 seq_printf(m, 710 "map_type:\t%u\n" 711 "key_size:\t%u\n" 712 "value_size:\t%u\n" 713 "max_entries:\t%u\n" 714 "map_flags:\t%#x\n" 715 "map_extra:\t%#llx\n" 716 "memlock:\t%lu\n" 717 "map_id:\t%u\n" 718 "frozen:\t%u\n", 719 map->map_type, 720 map->key_size, 721 map->value_size, 722 map->max_entries, 723 map->map_flags, 724 (unsigned long long)map->map_extra, 725 bpf_map_memory_footprint(map), 726 map->id, 727 READ_ONCE(map->frozen)); 728 if (type) { 729 seq_printf(m, "owner_prog_type:\t%u\n", type); 730 seq_printf(m, "owner_jited:\t%u\n", jited); 731 } 732 } 733 #endif 734 735 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 736 loff_t *ppos) 737 { 738 /* We need this handler such that alloc_file() enables 739 * f_mode with FMODE_CAN_READ. 740 */ 741 return -EINVAL; 742 } 743 744 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 745 size_t siz, loff_t *ppos) 746 { 747 /* We need this handler such that alloc_file() enables 748 * f_mode with FMODE_CAN_WRITE. 749 */ 750 return -EINVAL; 751 } 752 753 /* called for any extra memory-mapped regions (except initial) */ 754 static void bpf_map_mmap_open(struct vm_area_struct *vma) 755 { 756 struct bpf_map *map = vma->vm_file->private_data; 757 758 if (vma->vm_flags & VM_MAYWRITE) 759 bpf_map_write_active_inc(map); 760 } 761 762 /* called for all unmapped memory region (including initial) */ 763 static void bpf_map_mmap_close(struct vm_area_struct *vma) 764 { 765 struct bpf_map *map = vma->vm_file->private_data; 766 767 if (vma->vm_flags & VM_MAYWRITE) 768 bpf_map_write_active_dec(map); 769 } 770 771 static const struct vm_operations_struct bpf_map_default_vmops = { 772 .open = bpf_map_mmap_open, 773 .close = bpf_map_mmap_close, 774 }; 775 776 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 777 { 778 struct bpf_map *map = filp->private_data; 779 int err; 780 781 if (!map->ops->map_mmap || map_value_has_spin_lock(map) || 782 map_value_has_timer(map) || map_value_has_kptrs(map)) 783 return -ENOTSUPP; 784 785 if (!(vma->vm_flags & VM_SHARED)) 786 return -EINVAL; 787 788 mutex_lock(&map->freeze_mutex); 789 790 if (vma->vm_flags & VM_WRITE) { 791 if (map->frozen) { 792 err = -EPERM; 793 goto out; 794 } 795 /* map is meant to be read-only, so do not allow mapping as 796 * writable, because it's possible to leak a writable page 797 * reference and allows user-space to still modify it after 798 * freezing, while verifier will assume contents do not change 799 */ 800 if (map->map_flags & BPF_F_RDONLY_PROG) { 801 err = -EACCES; 802 goto out; 803 } 804 } 805 806 /* set default open/close callbacks */ 807 vma->vm_ops = &bpf_map_default_vmops; 808 vma->vm_private_data = map; 809 vma->vm_flags &= ~VM_MAYEXEC; 810 if (!(vma->vm_flags & VM_WRITE)) 811 /* disallow re-mapping with PROT_WRITE */ 812 vma->vm_flags &= ~VM_MAYWRITE; 813 814 err = map->ops->map_mmap(map, vma); 815 if (err) 816 goto out; 817 818 if (vma->vm_flags & VM_MAYWRITE) 819 bpf_map_write_active_inc(map); 820 out: 821 mutex_unlock(&map->freeze_mutex); 822 return err; 823 } 824 825 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 826 { 827 struct bpf_map *map = filp->private_data; 828 829 if (map->ops->map_poll) 830 return map->ops->map_poll(map, filp, pts); 831 832 return EPOLLERR; 833 } 834 835 const struct file_operations bpf_map_fops = { 836 #ifdef CONFIG_PROC_FS 837 .show_fdinfo = bpf_map_show_fdinfo, 838 #endif 839 .release = bpf_map_release, 840 .read = bpf_dummy_read, 841 .write = bpf_dummy_write, 842 .mmap = bpf_map_mmap, 843 .poll = bpf_map_poll, 844 }; 845 846 int bpf_map_new_fd(struct bpf_map *map, int flags) 847 { 848 int ret; 849 850 ret = security_bpf_map(map, OPEN_FMODE(flags)); 851 if (ret < 0) 852 return ret; 853 854 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 855 flags | O_CLOEXEC); 856 } 857 858 int bpf_get_file_flag(int flags) 859 { 860 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 861 return -EINVAL; 862 if (flags & BPF_F_RDONLY) 863 return O_RDONLY; 864 if (flags & BPF_F_WRONLY) 865 return O_WRONLY; 866 return O_RDWR; 867 } 868 869 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 870 #define CHECK_ATTR(CMD) \ 871 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 872 sizeof(attr->CMD##_LAST_FIELD), 0, \ 873 sizeof(*attr) - \ 874 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 875 sizeof(attr->CMD##_LAST_FIELD)) != NULL 876 877 /* dst and src must have at least "size" number of bytes. 878 * Return strlen on success and < 0 on error. 879 */ 880 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 881 { 882 const char *end = src + size; 883 const char *orig_src = src; 884 885 memset(dst, 0, size); 886 /* Copy all isalnum(), '_' and '.' chars. */ 887 while (src < end && *src) { 888 if (!isalnum(*src) && 889 *src != '_' && *src != '.') 890 return -EINVAL; 891 *dst++ = *src++; 892 } 893 894 /* No '\0' found in "size" number of bytes */ 895 if (src == end) 896 return -EINVAL; 897 898 return src - orig_src; 899 } 900 901 int map_check_no_btf(const struct bpf_map *map, 902 const struct btf *btf, 903 const struct btf_type *key_type, 904 const struct btf_type *value_type) 905 { 906 return -ENOTSUPP; 907 } 908 909 static int map_off_arr_cmp(const void *_a, const void *_b, const void *priv) 910 { 911 const u32 a = *(const u32 *)_a; 912 const u32 b = *(const u32 *)_b; 913 914 if (a < b) 915 return -1; 916 else if (a > b) 917 return 1; 918 return 0; 919 } 920 921 static void map_off_arr_swap(void *_a, void *_b, int size, const void *priv) 922 { 923 struct bpf_map *map = (struct bpf_map *)priv; 924 u32 *off_base = map->off_arr->field_off; 925 u32 *a = _a, *b = _b; 926 u8 *sz_a, *sz_b; 927 928 sz_a = map->off_arr->field_sz + (a - off_base); 929 sz_b = map->off_arr->field_sz + (b - off_base); 930 931 swap(*a, *b); 932 swap(*sz_a, *sz_b); 933 } 934 935 static int bpf_map_alloc_off_arr(struct bpf_map *map) 936 { 937 bool has_spin_lock = map_value_has_spin_lock(map); 938 bool has_timer = map_value_has_timer(map); 939 bool has_kptrs = map_value_has_kptrs(map); 940 struct bpf_map_off_arr *off_arr; 941 u32 i; 942 943 if (!has_spin_lock && !has_timer && !has_kptrs) { 944 map->off_arr = NULL; 945 return 0; 946 } 947 948 off_arr = kmalloc(sizeof(*map->off_arr), GFP_KERNEL | __GFP_NOWARN); 949 if (!off_arr) 950 return -ENOMEM; 951 map->off_arr = off_arr; 952 953 off_arr->cnt = 0; 954 if (has_spin_lock) { 955 i = off_arr->cnt; 956 957 off_arr->field_off[i] = map->spin_lock_off; 958 off_arr->field_sz[i] = sizeof(struct bpf_spin_lock); 959 off_arr->cnt++; 960 } 961 if (has_timer) { 962 i = off_arr->cnt; 963 964 off_arr->field_off[i] = map->timer_off; 965 off_arr->field_sz[i] = sizeof(struct bpf_timer); 966 off_arr->cnt++; 967 } 968 if (has_kptrs) { 969 struct bpf_map_value_off *tab = map->kptr_off_tab; 970 u32 *off = &off_arr->field_off[off_arr->cnt]; 971 u8 *sz = &off_arr->field_sz[off_arr->cnt]; 972 973 for (i = 0; i < tab->nr_off; i++) { 974 *off++ = tab->off[i].offset; 975 *sz++ = sizeof(u64); 976 } 977 off_arr->cnt += tab->nr_off; 978 } 979 980 if (off_arr->cnt == 1) 981 return 0; 982 sort_r(off_arr->field_off, off_arr->cnt, sizeof(off_arr->field_off[0]), 983 map_off_arr_cmp, map_off_arr_swap, map); 984 return 0; 985 } 986 987 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 988 u32 btf_key_id, u32 btf_value_id) 989 { 990 const struct btf_type *key_type, *value_type; 991 u32 key_size, value_size; 992 int ret = 0; 993 994 /* Some maps allow key to be unspecified. */ 995 if (btf_key_id) { 996 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 997 if (!key_type || key_size != map->key_size) 998 return -EINVAL; 999 } else { 1000 key_type = btf_type_by_id(btf, 0); 1001 if (!map->ops->map_check_btf) 1002 return -EINVAL; 1003 } 1004 1005 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1006 if (!value_type || value_size != map->value_size) 1007 return -EINVAL; 1008 1009 map->spin_lock_off = btf_find_spin_lock(btf, value_type); 1010 1011 if (map_value_has_spin_lock(map)) { 1012 if (map->map_flags & BPF_F_RDONLY_PROG) 1013 return -EACCES; 1014 if (map->map_type != BPF_MAP_TYPE_HASH && 1015 map->map_type != BPF_MAP_TYPE_ARRAY && 1016 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1017 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1018 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1019 map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 1020 return -ENOTSUPP; 1021 if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > 1022 map->value_size) { 1023 WARN_ONCE(1, 1024 "verifier bug spin_lock_off %d value_size %d\n", 1025 map->spin_lock_off, map->value_size); 1026 return -EFAULT; 1027 } 1028 } 1029 1030 map->timer_off = btf_find_timer(btf, value_type); 1031 if (map_value_has_timer(map)) { 1032 if (map->map_flags & BPF_F_RDONLY_PROG) 1033 return -EACCES; 1034 if (map->map_type != BPF_MAP_TYPE_HASH && 1035 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1036 map->map_type != BPF_MAP_TYPE_ARRAY) 1037 return -EOPNOTSUPP; 1038 } 1039 1040 map->kptr_off_tab = btf_parse_kptrs(btf, value_type); 1041 if (map_value_has_kptrs(map)) { 1042 if (!bpf_capable()) { 1043 ret = -EPERM; 1044 goto free_map_tab; 1045 } 1046 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1047 ret = -EACCES; 1048 goto free_map_tab; 1049 } 1050 if (map->map_type != BPF_MAP_TYPE_HASH && 1051 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1052 map->map_type != BPF_MAP_TYPE_ARRAY) { 1053 ret = -EOPNOTSUPP; 1054 goto free_map_tab; 1055 } 1056 } 1057 1058 if (map->ops->map_check_btf) { 1059 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1060 if (ret < 0) 1061 goto free_map_tab; 1062 } 1063 1064 return ret; 1065 free_map_tab: 1066 bpf_map_free_kptr_off_tab(map); 1067 return ret; 1068 } 1069 1070 #define BPF_MAP_CREATE_LAST_FIELD map_extra 1071 /* called via syscall */ 1072 static int map_create(union bpf_attr *attr) 1073 { 1074 int numa_node = bpf_map_attr_numa_node(attr); 1075 struct bpf_map *map; 1076 int f_flags; 1077 int err; 1078 1079 err = CHECK_ATTR(BPF_MAP_CREATE); 1080 if (err) 1081 return -EINVAL; 1082 1083 if (attr->btf_vmlinux_value_type_id) { 1084 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1085 attr->btf_key_type_id || attr->btf_value_type_id) 1086 return -EINVAL; 1087 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1088 return -EINVAL; 1089 } 1090 1091 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1092 attr->map_extra != 0) 1093 return -EINVAL; 1094 1095 f_flags = bpf_get_file_flag(attr->map_flags); 1096 if (f_flags < 0) 1097 return f_flags; 1098 1099 if (numa_node != NUMA_NO_NODE && 1100 ((unsigned int)numa_node >= nr_node_ids || 1101 !node_online(numa_node))) 1102 return -EINVAL; 1103 1104 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1105 map = find_and_alloc_map(attr); 1106 if (IS_ERR(map)) 1107 return PTR_ERR(map); 1108 1109 err = bpf_obj_name_cpy(map->name, attr->map_name, 1110 sizeof(attr->map_name)); 1111 if (err < 0) 1112 goto free_map; 1113 1114 atomic64_set(&map->refcnt, 1); 1115 atomic64_set(&map->usercnt, 1); 1116 mutex_init(&map->freeze_mutex); 1117 spin_lock_init(&map->owner.lock); 1118 1119 map->spin_lock_off = -EINVAL; 1120 map->timer_off = -EINVAL; 1121 if (attr->btf_key_type_id || attr->btf_value_type_id || 1122 /* Even the map's value is a kernel's struct, 1123 * the bpf_prog.o must have BTF to begin with 1124 * to figure out the corresponding kernel's 1125 * counter part. Thus, attr->btf_fd has 1126 * to be valid also. 1127 */ 1128 attr->btf_vmlinux_value_type_id) { 1129 struct btf *btf; 1130 1131 btf = btf_get_by_fd(attr->btf_fd); 1132 if (IS_ERR(btf)) { 1133 err = PTR_ERR(btf); 1134 goto free_map; 1135 } 1136 if (btf_is_kernel(btf)) { 1137 btf_put(btf); 1138 err = -EACCES; 1139 goto free_map; 1140 } 1141 map->btf = btf; 1142 1143 if (attr->btf_value_type_id) { 1144 err = map_check_btf(map, btf, attr->btf_key_type_id, 1145 attr->btf_value_type_id); 1146 if (err) 1147 goto free_map; 1148 } 1149 1150 map->btf_key_type_id = attr->btf_key_type_id; 1151 map->btf_value_type_id = attr->btf_value_type_id; 1152 map->btf_vmlinux_value_type_id = 1153 attr->btf_vmlinux_value_type_id; 1154 } 1155 1156 err = bpf_map_alloc_off_arr(map); 1157 if (err) 1158 goto free_map; 1159 1160 err = security_bpf_map_alloc(map); 1161 if (err) 1162 goto free_map_off_arr; 1163 1164 err = bpf_map_alloc_id(map); 1165 if (err) 1166 goto free_map_sec; 1167 1168 bpf_map_save_memcg(map); 1169 1170 err = bpf_map_new_fd(map, f_flags); 1171 if (err < 0) { 1172 /* failed to allocate fd. 1173 * bpf_map_put_with_uref() is needed because the above 1174 * bpf_map_alloc_id() has published the map 1175 * to the userspace and the userspace may 1176 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1177 */ 1178 bpf_map_put_with_uref(map); 1179 return err; 1180 } 1181 1182 return err; 1183 1184 free_map_sec: 1185 security_bpf_map_free(map); 1186 free_map_off_arr: 1187 kfree(map->off_arr); 1188 free_map: 1189 btf_put(map->btf); 1190 map->ops->map_free(map); 1191 return err; 1192 } 1193 1194 /* if error is returned, fd is released. 1195 * On success caller should complete fd access with matching fdput() 1196 */ 1197 struct bpf_map *__bpf_map_get(struct fd f) 1198 { 1199 if (!f.file) 1200 return ERR_PTR(-EBADF); 1201 if (f.file->f_op != &bpf_map_fops) { 1202 fdput(f); 1203 return ERR_PTR(-EINVAL); 1204 } 1205 1206 return f.file->private_data; 1207 } 1208 1209 void bpf_map_inc(struct bpf_map *map) 1210 { 1211 atomic64_inc(&map->refcnt); 1212 } 1213 EXPORT_SYMBOL_GPL(bpf_map_inc); 1214 1215 void bpf_map_inc_with_uref(struct bpf_map *map) 1216 { 1217 atomic64_inc(&map->refcnt); 1218 atomic64_inc(&map->usercnt); 1219 } 1220 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1221 1222 struct bpf_map *bpf_map_get(u32 ufd) 1223 { 1224 struct fd f = fdget(ufd); 1225 struct bpf_map *map; 1226 1227 map = __bpf_map_get(f); 1228 if (IS_ERR(map)) 1229 return map; 1230 1231 bpf_map_inc(map); 1232 fdput(f); 1233 1234 return map; 1235 } 1236 EXPORT_SYMBOL(bpf_map_get); 1237 1238 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1239 { 1240 struct fd f = fdget(ufd); 1241 struct bpf_map *map; 1242 1243 map = __bpf_map_get(f); 1244 if (IS_ERR(map)) 1245 return map; 1246 1247 bpf_map_inc_with_uref(map); 1248 fdput(f); 1249 1250 return map; 1251 } 1252 1253 /* map_idr_lock should have been held */ 1254 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1255 { 1256 int refold; 1257 1258 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1259 if (!refold) 1260 return ERR_PTR(-ENOENT); 1261 if (uref) 1262 atomic64_inc(&map->usercnt); 1263 1264 return map; 1265 } 1266 1267 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1268 { 1269 spin_lock_bh(&map_idr_lock); 1270 map = __bpf_map_inc_not_zero(map, false); 1271 spin_unlock_bh(&map_idr_lock); 1272 1273 return map; 1274 } 1275 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1276 1277 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1278 { 1279 return -ENOTSUPP; 1280 } 1281 1282 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1283 { 1284 if (key_size) 1285 return vmemdup_user(ukey, key_size); 1286 1287 if (ukey) 1288 return ERR_PTR(-EINVAL); 1289 1290 return NULL; 1291 } 1292 1293 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1294 { 1295 if (key_size) 1296 return kvmemdup_bpfptr(ukey, key_size); 1297 1298 if (!bpfptr_is_null(ukey)) 1299 return ERR_PTR(-EINVAL); 1300 1301 return NULL; 1302 } 1303 1304 /* last field in 'union bpf_attr' used by this command */ 1305 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1306 1307 static int map_lookup_elem(union bpf_attr *attr) 1308 { 1309 void __user *ukey = u64_to_user_ptr(attr->key); 1310 void __user *uvalue = u64_to_user_ptr(attr->value); 1311 int ufd = attr->map_fd; 1312 struct bpf_map *map; 1313 void *key, *value; 1314 u32 value_size; 1315 struct fd f; 1316 int err; 1317 1318 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1319 return -EINVAL; 1320 1321 if (attr->flags & ~BPF_F_LOCK) 1322 return -EINVAL; 1323 1324 f = fdget(ufd); 1325 map = __bpf_map_get(f); 1326 if (IS_ERR(map)) 1327 return PTR_ERR(map); 1328 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1329 err = -EPERM; 1330 goto err_put; 1331 } 1332 1333 if ((attr->flags & BPF_F_LOCK) && 1334 !map_value_has_spin_lock(map)) { 1335 err = -EINVAL; 1336 goto err_put; 1337 } 1338 1339 key = __bpf_copy_key(ukey, map->key_size); 1340 if (IS_ERR(key)) { 1341 err = PTR_ERR(key); 1342 goto err_put; 1343 } 1344 1345 value_size = bpf_map_value_size(map); 1346 1347 err = -ENOMEM; 1348 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1349 if (!value) 1350 goto free_key; 1351 1352 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1353 if (copy_from_user(value, uvalue, value_size)) 1354 err = -EFAULT; 1355 else 1356 err = bpf_map_copy_value(map, key, value, attr->flags); 1357 goto free_value; 1358 } 1359 1360 err = bpf_map_copy_value(map, key, value, attr->flags); 1361 if (err) 1362 goto free_value; 1363 1364 err = -EFAULT; 1365 if (copy_to_user(uvalue, value, value_size) != 0) 1366 goto free_value; 1367 1368 err = 0; 1369 1370 free_value: 1371 kvfree(value); 1372 free_key: 1373 kvfree(key); 1374 err_put: 1375 fdput(f); 1376 return err; 1377 } 1378 1379 1380 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1381 1382 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1383 { 1384 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1385 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1386 int ufd = attr->map_fd; 1387 struct bpf_map *map; 1388 void *key, *value; 1389 u32 value_size; 1390 struct fd f; 1391 int err; 1392 1393 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1394 return -EINVAL; 1395 1396 f = fdget(ufd); 1397 map = __bpf_map_get(f); 1398 if (IS_ERR(map)) 1399 return PTR_ERR(map); 1400 bpf_map_write_active_inc(map); 1401 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1402 err = -EPERM; 1403 goto err_put; 1404 } 1405 1406 if ((attr->flags & BPF_F_LOCK) && 1407 !map_value_has_spin_lock(map)) { 1408 err = -EINVAL; 1409 goto err_put; 1410 } 1411 1412 key = ___bpf_copy_key(ukey, map->key_size); 1413 if (IS_ERR(key)) { 1414 err = PTR_ERR(key); 1415 goto err_put; 1416 } 1417 1418 value_size = bpf_map_value_size(map); 1419 1420 err = -ENOMEM; 1421 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1422 if (!value) 1423 goto free_key; 1424 1425 err = -EFAULT; 1426 if (copy_from_bpfptr(value, uvalue, value_size) != 0) 1427 goto free_value; 1428 1429 err = bpf_map_update_value(map, f, key, value, attr->flags); 1430 1431 free_value: 1432 kvfree(value); 1433 free_key: 1434 kvfree(key); 1435 err_put: 1436 bpf_map_write_active_dec(map); 1437 fdput(f); 1438 return err; 1439 } 1440 1441 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1442 1443 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1444 { 1445 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1446 int ufd = attr->map_fd; 1447 struct bpf_map *map; 1448 struct fd f; 1449 void *key; 1450 int err; 1451 1452 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1453 return -EINVAL; 1454 1455 f = fdget(ufd); 1456 map = __bpf_map_get(f); 1457 if (IS_ERR(map)) 1458 return PTR_ERR(map); 1459 bpf_map_write_active_inc(map); 1460 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1461 err = -EPERM; 1462 goto err_put; 1463 } 1464 1465 key = ___bpf_copy_key(ukey, map->key_size); 1466 if (IS_ERR(key)) { 1467 err = PTR_ERR(key); 1468 goto err_put; 1469 } 1470 1471 if (bpf_map_is_dev_bound(map)) { 1472 err = bpf_map_offload_delete_elem(map, key); 1473 goto out; 1474 } else if (IS_FD_PROG_ARRAY(map) || 1475 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1476 /* These maps require sleepable context */ 1477 err = map->ops->map_delete_elem(map, key); 1478 goto out; 1479 } 1480 1481 bpf_disable_instrumentation(); 1482 rcu_read_lock(); 1483 err = map->ops->map_delete_elem(map, key); 1484 rcu_read_unlock(); 1485 bpf_enable_instrumentation(); 1486 maybe_wait_bpf_programs(map); 1487 out: 1488 kvfree(key); 1489 err_put: 1490 bpf_map_write_active_dec(map); 1491 fdput(f); 1492 return err; 1493 } 1494 1495 /* last field in 'union bpf_attr' used by this command */ 1496 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1497 1498 static int map_get_next_key(union bpf_attr *attr) 1499 { 1500 void __user *ukey = u64_to_user_ptr(attr->key); 1501 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1502 int ufd = attr->map_fd; 1503 struct bpf_map *map; 1504 void *key, *next_key; 1505 struct fd f; 1506 int err; 1507 1508 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1509 return -EINVAL; 1510 1511 f = fdget(ufd); 1512 map = __bpf_map_get(f); 1513 if (IS_ERR(map)) 1514 return PTR_ERR(map); 1515 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1516 err = -EPERM; 1517 goto err_put; 1518 } 1519 1520 if (ukey) { 1521 key = __bpf_copy_key(ukey, map->key_size); 1522 if (IS_ERR(key)) { 1523 err = PTR_ERR(key); 1524 goto err_put; 1525 } 1526 } else { 1527 key = NULL; 1528 } 1529 1530 err = -ENOMEM; 1531 next_key = kvmalloc(map->key_size, GFP_USER); 1532 if (!next_key) 1533 goto free_key; 1534 1535 if (bpf_map_is_dev_bound(map)) { 1536 err = bpf_map_offload_get_next_key(map, key, next_key); 1537 goto out; 1538 } 1539 1540 rcu_read_lock(); 1541 err = map->ops->map_get_next_key(map, key, next_key); 1542 rcu_read_unlock(); 1543 out: 1544 if (err) 1545 goto free_next_key; 1546 1547 err = -EFAULT; 1548 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1549 goto free_next_key; 1550 1551 err = 0; 1552 1553 free_next_key: 1554 kvfree(next_key); 1555 free_key: 1556 kvfree(key); 1557 err_put: 1558 fdput(f); 1559 return err; 1560 } 1561 1562 int generic_map_delete_batch(struct bpf_map *map, 1563 const union bpf_attr *attr, 1564 union bpf_attr __user *uattr) 1565 { 1566 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1567 u32 cp, max_count; 1568 int err = 0; 1569 void *key; 1570 1571 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1572 return -EINVAL; 1573 1574 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1575 !map_value_has_spin_lock(map)) { 1576 return -EINVAL; 1577 } 1578 1579 max_count = attr->batch.count; 1580 if (!max_count) 1581 return 0; 1582 1583 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1584 if (!key) 1585 return -ENOMEM; 1586 1587 for (cp = 0; cp < max_count; cp++) { 1588 err = -EFAULT; 1589 if (copy_from_user(key, keys + cp * map->key_size, 1590 map->key_size)) 1591 break; 1592 1593 if (bpf_map_is_dev_bound(map)) { 1594 err = bpf_map_offload_delete_elem(map, key); 1595 break; 1596 } 1597 1598 bpf_disable_instrumentation(); 1599 rcu_read_lock(); 1600 err = map->ops->map_delete_elem(map, key); 1601 rcu_read_unlock(); 1602 bpf_enable_instrumentation(); 1603 if (err) 1604 break; 1605 cond_resched(); 1606 } 1607 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1608 err = -EFAULT; 1609 1610 kvfree(key); 1611 1612 maybe_wait_bpf_programs(map); 1613 return err; 1614 } 1615 1616 int generic_map_update_batch(struct bpf_map *map, 1617 const union bpf_attr *attr, 1618 union bpf_attr __user *uattr) 1619 { 1620 void __user *values = u64_to_user_ptr(attr->batch.values); 1621 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1622 u32 value_size, cp, max_count; 1623 int ufd = attr->batch.map_fd; 1624 void *key, *value; 1625 struct fd f; 1626 int err = 0; 1627 1628 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1629 return -EINVAL; 1630 1631 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1632 !map_value_has_spin_lock(map)) { 1633 return -EINVAL; 1634 } 1635 1636 value_size = bpf_map_value_size(map); 1637 1638 max_count = attr->batch.count; 1639 if (!max_count) 1640 return 0; 1641 1642 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1643 if (!key) 1644 return -ENOMEM; 1645 1646 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1647 if (!value) { 1648 kvfree(key); 1649 return -ENOMEM; 1650 } 1651 1652 f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */ 1653 for (cp = 0; cp < max_count; cp++) { 1654 err = -EFAULT; 1655 if (copy_from_user(key, keys + cp * map->key_size, 1656 map->key_size) || 1657 copy_from_user(value, values + cp * value_size, value_size)) 1658 break; 1659 1660 err = bpf_map_update_value(map, f, key, value, 1661 attr->batch.elem_flags); 1662 1663 if (err) 1664 break; 1665 cond_resched(); 1666 } 1667 1668 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1669 err = -EFAULT; 1670 1671 kvfree(value); 1672 kvfree(key); 1673 fdput(f); 1674 return err; 1675 } 1676 1677 #define MAP_LOOKUP_RETRIES 3 1678 1679 int generic_map_lookup_batch(struct bpf_map *map, 1680 const union bpf_attr *attr, 1681 union bpf_attr __user *uattr) 1682 { 1683 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1684 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1685 void __user *values = u64_to_user_ptr(attr->batch.values); 1686 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1687 void *buf, *buf_prevkey, *prev_key, *key, *value; 1688 int err, retry = MAP_LOOKUP_RETRIES; 1689 u32 value_size, cp, max_count; 1690 1691 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1692 return -EINVAL; 1693 1694 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1695 !map_value_has_spin_lock(map)) 1696 return -EINVAL; 1697 1698 value_size = bpf_map_value_size(map); 1699 1700 max_count = attr->batch.count; 1701 if (!max_count) 1702 return 0; 1703 1704 if (put_user(0, &uattr->batch.count)) 1705 return -EFAULT; 1706 1707 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1708 if (!buf_prevkey) 1709 return -ENOMEM; 1710 1711 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1712 if (!buf) { 1713 kvfree(buf_prevkey); 1714 return -ENOMEM; 1715 } 1716 1717 err = -EFAULT; 1718 prev_key = NULL; 1719 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1720 goto free_buf; 1721 key = buf; 1722 value = key + map->key_size; 1723 if (ubatch) 1724 prev_key = buf_prevkey; 1725 1726 for (cp = 0; cp < max_count;) { 1727 rcu_read_lock(); 1728 err = map->ops->map_get_next_key(map, prev_key, key); 1729 rcu_read_unlock(); 1730 if (err) 1731 break; 1732 err = bpf_map_copy_value(map, key, value, 1733 attr->batch.elem_flags); 1734 1735 if (err == -ENOENT) { 1736 if (retry) { 1737 retry--; 1738 continue; 1739 } 1740 err = -EINTR; 1741 break; 1742 } 1743 1744 if (err) 1745 goto free_buf; 1746 1747 if (copy_to_user(keys + cp * map->key_size, key, 1748 map->key_size)) { 1749 err = -EFAULT; 1750 goto free_buf; 1751 } 1752 if (copy_to_user(values + cp * value_size, value, value_size)) { 1753 err = -EFAULT; 1754 goto free_buf; 1755 } 1756 1757 if (!prev_key) 1758 prev_key = buf_prevkey; 1759 1760 swap(prev_key, key); 1761 retry = MAP_LOOKUP_RETRIES; 1762 cp++; 1763 cond_resched(); 1764 } 1765 1766 if (err == -EFAULT) 1767 goto free_buf; 1768 1769 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1770 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1771 err = -EFAULT; 1772 1773 free_buf: 1774 kvfree(buf_prevkey); 1775 kvfree(buf); 1776 return err; 1777 } 1778 1779 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 1780 1781 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1782 { 1783 void __user *ukey = u64_to_user_ptr(attr->key); 1784 void __user *uvalue = u64_to_user_ptr(attr->value); 1785 int ufd = attr->map_fd; 1786 struct bpf_map *map; 1787 void *key, *value; 1788 u32 value_size; 1789 struct fd f; 1790 int err; 1791 1792 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1793 return -EINVAL; 1794 1795 if (attr->flags & ~BPF_F_LOCK) 1796 return -EINVAL; 1797 1798 f = fdget(ufd); 1799 map = __bpf_map_get(f); 1800 if (IS_ERR(map)) 1801 return PTR_ERR(map); 1802 bpf_map_write_active_inc(map); 1803 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 1804 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1805 err = -EPERM; 1806 goto err_put; 1807 } 1808 1809 if (attr->flags && 1810 (map->map_type == BPF_MAP_TYPE_QUEUE || 1811 map->map_type == BPF_MAP_TYPE_STACK)) { 1812 err = -EINVAL; 1813 goto err_put; 1814 } 1815 1816 if ((attr->flags & BPF_F_LOCK) && 1817 !map_value_has_spin_lock(map)) { 1818 err = -EINVAL; 1819 goto err_put; 1820 } 1821 1822 key = __bpf_copy_key(ukey, map->key_size); 1823 if (IS_ERR(key)) { 1824 err = PTR_ERR(key); 1825 goto err_put; 1826 } 1827 1828 value_size = bpf_map_value_size(map); 1829 1830 err = -ENOMEM; 1831 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1832 if (!value) 1833 goto free_key; 1834 1835 err = -ENOTSUPP; 1836 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1837 map->map_type == BPF_MAP_TYPE_STACK) { 1838 err = map->ops->map_pop_elem(map, value); 1839 } else if (map->map_type == BPF_MAP_TYPE_HASH || 1840 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1841 map->map_type == BPF_MAP_TYPE_LRU_HASH || 1842 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 1843 if (!bpf_map_is_dev_bound(map)) { 1844 bpf_disable_instrumentation(); 1845 rcu_read_lock(); 1846 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 1847 rcu_read_unlock(); 1848 bpf_enable_instrumentation(); 1849 } 1850 } 1851 1852 if (err) 1853 goto free_value; 1854 1855 if (copy_to_user(uvalue, value, value_size) != 0) { 1856 err = -EFAULT; 1857 goto free_value; 1858 } 1859 1860 err = 0; 1861 1862 free_value: 1863 kvfree(value); 1864 free_key: 1865 kvfree(key); 1866 err_put: 1867 bpf_map_write_active_dec(map); 1868 fdput(f); 1869 return err; 1870 } 1871 1872 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1873 1874 static int map_freeze(const union bpf_attr *attr) 1875 { 1876 int err = 0, ufd = attr->map_fd; 1877 struct bpf_map *map; 1878 struct fd f; 1879 1880 if (CHECK_ATTR(BPF_MAP_FREEZE)) 1881 return -EINVAL; 1882 1883 f = fdget(ufd); 1884 map = __bpf_map_get(f); 1885 if (IS_ERR(map)) 1886 return PTR_ERR(map); 1887 1888 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || 1889 map_value_has_timer(map) || map_value_has_kptrs(map)) { 1890 fdput(f); 1891 return -ENOTSUPP; 1892 } 1893 1894 mutex_lock(&map->freeze_mutex); 1895 if (bpf_map_write_active(map)) { 1896 err = -EBUSY; 1897 goto err_put; 1898 } 1899 if (READ_ONCE(map->frozen)) { 1900 err = -EBUSY; 1901 goto err_put; 1902 } 1903 if (!bpf_capable()) { 1904 err = -EPERM; 1905 goto err_put; 1906 } 1907 1908 WRITE_ONCE(map->frozen, true); 1909 err_put: 1910 mutex_unlock(&map->freeze_mutex); 1911 fdput(f); 1912 return err; 1913 } 1914 1915 static const struct bpf_prog_ops * const bpf_prog_types[] = { 1916 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1917 [_id] = & _name ## _prog_ops, 1918 #define BPF_MAP_TYPE(_id, _ops) 1919 #define BPF_LINK_TYPE(_id, _name) 1920 #include <linux/bpf_types.h> 1921 #undef BPF_PROG_TYPE 1922 #undef BPF_MAP_TYPE 1923 #undef BPF_LINK_TYPE 1924 }; 1925 1926 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 1927 { 1928 const struct bpf_prog_ops *ops; 1929 1930 if (type >= ARRAY_SIZE(bpf_prog_types)) 1931 return -EINVAL; 1932 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 1933 ops = bpf_prog_types[type]; 1934 if (!ops) 1935 return -EINVAL; 1936 1937 if (!bpf_prog_is_dev_bound(prog->aux)) 1938 prog->aux->ops = ops; 1939 else 1940 prog->aux->ops = &bpf_offload_prog_ops; 1941 prog->type = type; 1942 return 0; 1943 } 1944 1945 enum bpf_audit { 1946 BPF_AUDIT_LOAD, 1947 BPF_AUDIT_UNLOAD, 1948 BPF_AUDIT_MAX, 1949 }; 1950 1951 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 1952 [BPF_AUDIT_LOAD] = "LOAD", 1953 [BPF_AUDIT_UNLOAD] = "UNLOAD", 1954 }; 1955 1956 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 1957 { 1958 struct audit_context *ctx = NULL; 1959 struct audit_buffer *ab; 1960 1961 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 1962 return; 1963 if (audit_enabled == AUDIT_OFF) 1964 return; 1965 if (op == BPF_AUDIT_LOAD) 1966 ctx = audit_context(); 1967 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1968 if (unlikely(!ab)) 1969 return; 1970 audit_log_format(ab, "prog-id=%u op=%s", 1971 prog->aux->id, bpf_audit_str[op]); 1972 audit_log_end(ab); 1973 } 1974 1975 static int bpf_prog_alloc_id(struct bpf_prog *prog) 1976 { 1977 int id; 1978 1979 idr_preload(GFP_KERNEL); 1980 spin_lock_bh(&prog_idr_lock); 1981 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 1982 if (id > 0) 1983 prog->aux->id = id; 1984 spin_unlock_bh(&prog_idr_lock); 1985 idr_preload_end(); 1986 1987 /* id is in [1, INT_MAX) */ 1988 if (WARN_ON_ONCE(!id)) 1989 return -ENOSPC; 1990 1991 return id > 0 ? 0 : id; 1992 } 1993 1994 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 1995 { 1996 unsigned long flags; 1997 1998 /* cBPF to eBPF migrations are currently not in the idr store. 1999 * Offloaded programs are removed from the store when their device 2000 * disappears - even if someone grabs an fd to them they are unusable, 2001 * simply waiting for refcnt to drop to be freed. 2002 */ 2003 if (!prog->aux->id) 2004 return; 2005 2006 if (do_idr_lock) 2007 spin_lock_irqsave(&prog_idr_lock, flags); 2008 else 2009 __acquire(&prog_idr_lock); 2010 2011 idr_remove(&prog_idr, prog->aux->id); 2012 prog->aux->id = 0; 2013 2014 if (do_idr_lock) 2015 spin_unlock_irqrestore(&prog_idr_lock, flags); 2016 else 2017 __release(&prog_idr_lock); 2018 } 2019 2020 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2021 { 2022 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2023 2024 kvfree(aux->func_info); 2025 kfree(aux->func_info_aux); 2026 free_uid(aux->user); 2027 security_bpf_prog_free(aux); 2028 bpf_prog_free(aux->prog); 2029 } 2030 2031 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2032 { 2033 bpf_prog_kallsyms_del_all(prog); 2034 btf_put(prog->aux->btf); 2035 kvfree(prog->aux->jited_linfo); 2036 kvfree(prog->aux->linfo); 2037 kfree(prog->aux->kfunc_tab); 2038 if (prog->aux->attach_btf) 2039 btf_put(prog->aux->attach_btf); 2040 2041 if (deferred) { 2042 if (prog->aux->sleepable) 2043 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2044 else 2045 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2046 } else { 2047 __bpf_prog_put_rcu(&prog->aux->rcu); 2048 } 2049 } 2050 2051 static void bpf_prog_put_deferred(struct work_struct *work) 2052 { 2053 struct bpf_prog_aux *aux; 2054 struct bpf_prog *prog; 2055 2056 aux = container_of(work, struct bpf_prog_aux, work); 2057 prog = aux->prog; 2058 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2059 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2060 __bpf_prog_put_noref(prog, true); 2061 } 2062 2063 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 2064 { 2065 struct bpf_prog_aux *aux = prog->aux; 2066 2067 if (atomic64_dec_and_test(&aux->refcnt)) { 2068 /* bpf_prog_free_id() must be called first */ 2069 bpf_prog_free_id(prog, do_idr_lock); 2070 2071 if (in_irq() || irqs_disabled()) { 2072 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2073 schedule_work(&aux->work); 2074 } else { 2075 bpf_prog_put_deferred(&aux->work); 2076 } 2077 } 2078 } 2079 2080 void bpf_prog_put(struct bpf_prog *prog) 2081 { 2082 __bpf_prog_put(prog, true); 2083 } 2084 EXPORT_SYMBOL_GPL(bpf_prog_put); 2085 2086 static int bpf_prog_release(struct inode *inode, struct file *filp) 2087 { 2088 struct bpf_prog *prog = filp->private_data; 2089 2090 bpf_prog_put(prog); 2091 return 0; 2092 } 2093 2094 struct bpf_prog_kstats { 2095 u64 nsecs; 2096 u64 cnt; 2097 u64 misses; 2098 }; 2099 2100 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2101 struct bpf_prog_kstats *stats) 2102 { 2103 u64 nsecs = 0, cnt = 0, misses = 0; 2104 int cpu; 2105 2106 for_each_possible_cpu(cpu) { 2107 const struct bpf_prog_stats *st; 2108 unsigned int start; 2109 u64 tnsecs, tcnt, tmisses; 2110 2111 st = per_cpu_ptr(prog->stats, cpu); 2112 do { 2113 start = u64_stats_fetch_begin_irq(&st->syncp); 2114 tnsecs = u64_stats_read(&st->nsecs); 2115 tcnt = u64_stats_read(&st->cnt); 2116 tmisses = u64_stats_read(&st->misses); 2117 } while (u64_stats_fetch_retry_irq(&st->syncp, start)); 2118 nsecs += tnsecs; 2119 cnt += tcnt; 2120 misses += tmisses; 2121 } 2122 stats->nsecs = nsecs; 2123 stats->cnt = cnt; 2124 stats->misses = misses; 2125 } 2126 2127 #ifdef CONFIG_PROC_FS 2128 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2129 { 2130 const struct bpf_prog *prog = filp->private_data; 2131 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2132 struct bpf_prog_kstats stats; 2133 2134 bpf_prog_get_stats(prog, &stats); 2135 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2136 seq_printf(m, 2137 "prog_type:\t%u\n" 2138 "prog_jited:\t%u\n" 2139 "prog_tag:\t%s\n" 2140 "memlock:\t%llu\n" 2141 "prog_id:\t%u\n" 2142 "run_time_ns:\t%llu\n" 2143 "run_cnt:\t%llu\n" 2144 "recursion_misses:\t%llu\n" 2145 "verified_insns:\t%u\n", 2146 prog->type, 2147 prog->jited, 2148 prog_tag, 2149 prog->pages * 1ULL << PAGE_SHIFT, 2150 prog->aux->id, 2151 stats.nsecs, 2152 stats.cnt, 2153 stats.misses, 2154 prog->aux->verified_insns); 2155 } 2156 #endif 2157 2158 const struct file_operations bpf_prog_fops = { 2159 #ifdef CONFIG_PROC_FS 2160 .show_fdinfo = bpf_prog_show_fdinfo, 2161 #endif 2162 .release = bpf_prog_release, 2163 .read = bpf_dummy_read, 2164 .write = bpf_dummy_write, 2165 }; 2166 2167 int bpf_prog_new_fd(struct bpf_prog *prog) 2168 { 2169 int ret; 2170 2171 ret = security_bpf_prog(prog); 2172 if (ret < 0) 2173 return ret; 2174 2175 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2176 O_RDWR | O_CLOEXEC); 2177 } 2178 2179 static struct bpf_prog *____bpf_prog_get(struct fd f) 2180 { 2181 if (!f.file) 2182 return ERR_PTR(-EBADF); 2183 if (f.file->f_op != &bpf_prog_fops) { 2184 fdput(f); 2185 return ERR_PTR(-EINVAL); 2186 } 2187 2188 return f.file->private_data; 2189 } 2190 2191 void bpf_prog_add(struct bpf_prog *prog, int i) 2192 { 2193 atomic64_add(i, &prog->aux->refcnt); 2194 } 2195 EXPORT_SYMBOL_GPL(bpf_prog_add); 2196 2197 void bpf_prog_sub(struct bpf_prog *prog, int i) 2198 { 2199 /* Only to be used for undoing previous bpf_prog_add() in some 2200 * error path. We still know that another entity in our call 2201 * path holds a reference to the program, thus atomic_sub() can 2202 * be safely used in such cases! 2203 */ 2204 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2205 } 2206 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2207 2208 void bpf_prog_inc(struct bpf_prog *prog) 2209 { 2210 atomic64_inc(&prog->aux->refcnt); 2211 } 2212 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2213 2214 /* prog_idr_lock should have been held */ 2215 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2216 { 2217 int refold; 2218 2219 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2220 2221 if (!refold) 2222 return ERR_PTR(-ENOENT); 2223 2224 return prog; 2225 } 2226 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2227 2228 bool bpf_prog_get_ok(struct bpf_prog *prog, 2229 enum bpf_prog_type *attach_type, bool attach_drv) 2230 { 2231 /* not an attachment, just a refcount inc, always allow */ 2232 if (!attach_type) 2233 return true; 2234 2235 if (prog->type != *attach_type) 2236 return false; 2237 if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 2238 return false; 2239 2240 return true; 2241 } 2242 2243 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2244 bool attach_drv) 2245 { 2246 struct fd f = fdget(ufd); 2247 struct bpf_prog *prog; 2248 2249 prog = ____bpf_prog_get(f); 2250 if (IS_ERR(prog)) 2251 return prog; 2252 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 2253 prog = ERR_PTR(-EINVAL); 2254 goto out; 2255 } 2256 2257 bpf_prog_inc(prog); 2258 out: 2259 fdput(f); 2260 return prog; 2261 } 2262 2263 struct bpf_prog *bpf_prog_get(u32 ufd) 2264 { 2265 return __bpf_prog_get(ufd, NULL, false); 2266 } 2267 2268 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2269 bool attach_drv) 2270 { 2271 return __bpf_prog_get(ufd, &type, attach_drv); 2272 } 2273 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2274 2275 /* Initially all BPF programs could be loaded w/o specifying 2276 * expected_attach_type. Later for some of them specifying expected_attach_type 2277 * at load time became required so that program could be validated properly. 2278 * Programs of types that are allowed to be loaded both w/ and w/o (for 2279 * backward compatibility) expected_attach_type, should have the default attach 2280 * type assigned to expected_attach_type for the latter case, so that it can be 2281 * validated later at attach time. 2282 * 2283 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2284 * prog type requires it but has some attach types that have to be backward 2285 * compatible. 2286 */ 2287 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2288 { 2289 switch (attr->prog_type) { 2290 case BPF_PROG_TYPE_CGROUP_SOCK: 2291 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2292 * exist so checking for non-zero is the way to go here. 2293 */ 2294 if (!attr->expected_attach_type) 2295 attr->expected_attach_type = 2296 BPF_CGROUP_INET_SOCK_CREATE; 2297 break; 2298 case BPF_PROG_TYPE_SK_REUSEPORT: 2299 if (!attr->expected_attach_type) 2300 attr->expected_attach_type = 2301 BPF_SK_REUSEPORT_SELECT; 2302 break; 2303 } 2304 } 2305 2306 static int 2307 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2308 enum bpf_attach_type expected_attach_type, 2309 struct btf *attach_btf, u32 btf_id, 2310 struct bpf_prog *dst_prog) 2311 { 2312 if (btf_id) { 2313 if (btf_id > BTF_MAX_TYPE) 2314 return -EINVAL; 2315 2316 if (!attach_btf && !dst_prog) 2317 return -EINVAL; 2318 2319 switch (prog_type) { 2320 case BPF_PROG_TYPE_TRACING: 2321 case BPF_PROG_TYPE_LSM: 2322 case BPF_PROG_TYPE_STRUCT_OPS: 2323 case BPF_PROG_TYPE_EXT: 2324 break; 2325 default: 2326 return -EINVAL; 2327 } 2328 } 2329 2330 if (attach_btf && (!btf_id || dst_prog)) 2331 return -EINVAL; 2332 2333 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2334 prog_type != BPF_PROG_TYPE_EXT) 2335 return -EINVAL; 2336 2337 switch (prog_type) { 2338 case BPF_PROG_TYPE_CGROUP_SOCK: 2339 switch (expected_attach_type) { 2340 case BPF_CGROUP_INET_SOCK_CREATE: 2341 case BPF_CGROUP_INET_SOCK_RELEASE: 2342 case BPF_CGROUP_INET4_POST_BIND: 2343 case BPF_CGROUP_INET6_POST_BIND: 2344 return 0; 2345 default: 2346 return -EINVAL; 2347 } 2348 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2349 switch (expected_attach_type) { 2350 case BPF_CGROUP_INET4_BIND: 2351 case BPF_CGROUP_INET6_BIND: 2352 case BPF_CGROUP_INET4_CONNECT: 2353 case BPF_CGROUP_INET6_CONNECT: 2354 case BPF_CGROUP_INET4_GETPEERNAME: 2355 case BPF_CGROUP_INET6_GETPEERNAME: 2356 case BPF_CGROUP_INET4_GETSOCKNAME: 2357 case BPF_CGROUP_INET6_GETSOCKNAME: 2358 case BPF_CGROUP_UDP4_SENDMSG: 2359 case BPF_CGROUP_UDP6_SENDMSG: 2360 case BPF_CGROUP_UDP4_RECVMSG: 2361 case BPF_CGROUP_UDP6_RECVMSG: 2362 return 0; 2363 default: 2364 return -EINVAL; 2365 } 2366 case BPF_PROG_TYPE_CGROUP_SKB: 2367 switch (expected_attach_type) { 2368 case BPF_CGROUP_INET_INGRESS: 2369 case BPF_CGROUP_INET_EGRESS: 2370 return 0; 2371 default: 2372 return -EINVAL; 2373 } 2374 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2375 switch (expected_attach_type) { 2376 case BPF_CGROUP_SETSOCKOPT: 2377 case BPF_CGROUP_GETSOCKOPT: 2378 return 0; 2379 default: 2380 return -EINVAL; 2381 } 2382 case BPF_PROG_TYPE_SK_LOOKUP: 2383 if (expected_attach_type == BPF_SK_LOOKUP) 2384 return 0; 2385 return -EINVAL; 2386 case BPF_PROG_TYPE_SK_REUSEPORT: 2387 switch (expected_attach_type) { 2388 case BPF_SK_REUSEPORT_SELECT: 2389 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2390 return 0; 2391 default: 2392 return -EINVAL; 2393 } 2394 case BPF_PROG_TYPE_SYSCALL: 2395 case BPF_PROG_TYPE_EXT: 2396 if (expected_attach_type) 2397 return -EINVAL; 2398 fallthrough; 2399 default: 2400 return 0; 2401 } 2402 } 2403 2404 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2405 { 2406 switch (prog_type) { 2407 case BPF_PROG_TYPE_SCHED_CLS: 2408 case BPF_PROG_TYPE_SCHED_ACT: 2409 case BPF_PROG_TYPE_XDP: 2410 case BPF_PROG_TYPE_LWT_IN: 2411 case BPF_PROG_TYPE_LWT_OUT: 2412 case BPF_PROG_TYPE_LWT_XMIT: 2413 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2414 case BPF_PROG_TYPE_SK_SKB: 2415 case BPF_PROG_TYPE_SK_MSG: 2416 case BPF_PROG_TYPE_LIRC_MODE2: 2417 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2418 case BPF_PROG_TYPE_CGROUP_DEVICE: 2419 case BPF_PROG_TYPE_CGROUP_SOCK: 2420 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2421 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2422 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2423 case BPF_PROG_TYPE_SOCK_OPS: 2424 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2425 return true; 2426 case BPF_PROG_TYPE_CGROUP_SKB: 2427 /* always unpriv */ 2428 case BPF_PROG_TYPE_SK_REUSEPORT: 2429 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2430 default: 2431 return false; 2432 } 2433 } 2434 2435 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2436 { 2437 switch (prog_type) { 2438 case BPF_PROG_TYPE_KPROBE: 2439 case BPF_PROG_TYPE_TRACEPOINT: 2440 case BPF_PROG_TYPE_PERF_EVENT: 2441 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2442 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2443 case BPF_PROG_TYPE_TRACING: 2444 case BPF_PROG_TYPE_LSM: 2445 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2446 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2447 return true; 2448 default: 2449 return false; 2450 } 2451 } 2452 2453 /* last field in 'union bpf_attr' used by this command */ 2454 #define BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size 2455 2456 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr) 2457 { 2458 enum bpf_prog_type type = attr->prog_type; 2459 struct bpf_prog *prog, *dst_prog = NULL; 2460 struct btf *attach_btf = NULL; 2461 int err; 2462 char license[128]; 2463 bool is_gpl; 2464 2465 if (CHECK_ATTR(BPF_PROG_LOAD)) 2466 return -EINVAL; 2467 2468 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2469 BPF_F_ANY_ALIGNMENT | 2470 BPF_F_TEST_STATE_FREQ | 2471 BPF_F_SLEEPABLE | 2472 BPF_F_TEST_RND_HI32 | 2473 BPF_F_XDP_HAS_FRAGS)) 2474 return -EINVAL; 2475 2476 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2477 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2478 !bpf_capable()) 2479 return -EPERM; 2480 2481 /* copy eBPF program license from user space */ 2482 if (strncpy_from_bpfptr(license, 2483 make_bpfptr(attr->license, uattr.is_kernel), 2484 sizeof(license) - 1) < 0) 2485 return -EFAULT; 2486 license[sizeof(license) - 1] = 0; 2487 2488 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2489 is_gpl = license_is_gpl_compatible(license); 2490 2491 if (attr->insn_cnt == 0 || 2492 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2493 return -E2BIG; 2494 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2495 type != BPF_PROG_TYPE_CGROUP_SKB && 2496 !bpf_capable()) 2497 return -EPERM; 2498 2499 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) 2500 return -EPERM; 2501 if (is_perfmon_prog_type(type) && !perfmon_capable()) 2502 return -EPERM; 2503 2504 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2505 * or btf, we need to check which one it is 2506 */ 2507 if (attr->attach_prog_fd) { 2508 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2509 if (IS_ERR(dst_prog)) { 2510 dst_prog = NULL; 2511 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2512 if (IS_ERR(attach_btf)) 2513 return -EINVAL; 2514 if (!btf_is_kernel(attach_btf)) { 2515 /* attaching through specifying bpf_prog's BTF 2516 * objects directly might be supported eventually 2517 */ 2518 btf_put(attach_btf); 2519 return -ENOTSUPP; 2520 } 2521 } 2522 } else if (attr->attach_btf_id) { 2523 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2524 attach_btf = bpf_get_btf_vmlinux(); 2525 if (IS_ERR(attach_btf)) 2526 return PTR_ERR(attach_btf); 2527 if (!attach_btf) 2528 return -EINVAL; 2529 btf_get(attach_btf); 2530 } 2531 2532 bpf_prog_load_fixup_attach_type(attr); 2533 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2534 attach_btf, attr->attach_btf_id, 2535 dst_prog)) { 2536 if (dst_prog) 2537 bpf_prog_put(dst_prog); 2538 if (attach_btf) 2539 btf_put(attach_btf); 2540 return -EINVAL; 2541 } 2542 2543 /* plain bpf_prog allocation */ 2544 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2545 if (!prog) { 2546 if (dst_prog) 2547 bpf_prog_put(dst_prog); 2548 if (attach_btf) 2549 btf_put(attach_btf); 2550 return -ENOMEM; 2551 } 2552 2553 prog->expected_attach_type = attr->expected_attach_type; 2554 prog->aux->attach_btf = attach_btf; 2555 prog->aux->attach_btf_id = attr->attach_btf_id; 2556 prog->aux->dst_prog = dst_prog; 2557 prog->aux->offload_requested = !!attr->prog_ifindex; 2558 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; 2559 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2560 2561 err = security_bpf_prog_alloc(prog->aux); 2562 if (err) 2563 goto free_prog; 2564 2565 prog->aux->user = get_current_user(); 2566 prog->len = attr->insn_cnt; 2567 2568 err = -EFAULT; 2569 if (copy_from_bpfptr(prog->insns, 2570 make_bpfptr(attr->insns, uattr.is_kernel), 2571 bpf_prog_insn_size(prog)) != 0) 2572 goto free_prog_sec; 2573 2574 prog->orig_prog = NULL; 2575 prog->jited = 0; 2576 2577 atomic64_set(&prog->aux->refcnt, 1); 2578 prog->gpl_compatible = is_gpl ? 1 : 0; 2579 2580 if (bpf_prog_is_dev_bound(prog->aux)) { 2581 err = bpf_prog_offload_init(prog, attr); 2582 if (err) 2583 goto free_prog_sec; 2584 } 2585 2586 /* find program type: socket_filter vs tracing_filter */ 2587 err = find_prog_type(type, prog); 2588 if (err < 0) 2589 goto free_prog_sec; 2590 2591 prog->aux->load_time = ktime_get_boottime_ns(); 2592 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2593 sizeof(attr->prog_name)); 2594 if (err < 0) 2595 goto free_prog_sec; 2596 2597 /* run eBPF verifier */ 2598 err = bpf_check(&prog, attr, uattr); 2599 if (err < 0) 2600 goto free_used_maps; 2601 2602 prog = bpf_prog_select_runtime(prog, &err); 2603 if (err < 0) 2604 goto free_used_maps; 2605 2606 err = bpf_prog_alloc_id(prog); 2607 if (err) 2608 goto free_used_maps; 2609 2610 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2611 * effectively publicly exposed. However, retrieving via 2612 * bpf_prog_get_fd_by_id() will take another reference, 2613 * therefore it cannot be gone underneath us. 2614 * 2615 * Only for the time /after/ successful bpf_prog_new_fd() 2616 * and before returning to userspace, we might just hold 2617 * one reference and any parallel close on that fd could 2618 * rip everything out. Hence, below notifications must 2619 * happen before bpf_prog_new_fd(). 2620 * 2621 * Also, any failure handling from this point onwards must 2622 * be using bpf_prog_put() given the program is exposed. 2623 */ 2624 bpf_prog_kallsyms_add(prog); 2625 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2626 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2627 2628 err = bpf_prog_new_fd(prog); 2629 if (err < 0) 2630 bpf_prog_put(prog); 2631 return err; 2632 2633 free_used_maps: 2634 /* In case we have subprogs, we need to wait for a grace 2635 * period before we can tear down JIT memory since symbols 2636 * are already exposed under kallsyms. 2637 */ 2638 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2639 return err; 2640 free_prog_sec: 2641 free_uid(prog->aux->user); 2642 security_bpf_prog_free(prog->aux); 2643 free_prog: 2644 if (prog->aux->attach_btf) 2645 btf_put(prog->aux->attach_btf); 2646 bpf_prog_free(prog); 2647 return err; 2648 } 2649 2650 #define BPF_OBJ_LAST_FIELD file_flags 2651 2652 static int bpf_obj_pin(const union bpf_attr *attr) 2653 { 2654 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) 2655 return -EINVAL; 2656 2657 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); 2658 } 2659 2660 static int bpf_obj_get(const union bpf_attr *attr) 2661 { 2662 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2663 attr->file_flags & ~BPF_OBJ_FLAG_MASK) 2664 return -EINVAL; 2665 2666 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), 2667 attr->file_flags); 2668 } 2669 2670 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2671 const struct bpf_link_ops *ops, struct bpf_prog *prog) 2672 { 2673 atomic64_set(&link->refcnt, 1); 2674 link->type = type; 2675 link->id = 0; 2676 link->ops = ops; 2677 link->prog = prog; 2678 } 2679 2680 static void bpf_link_free_id(int id) 2681 { 2682 if (!id) 2683 return; 2684 2685 spin_lock_bh(&link_idr_lock); 2686 idr_remove(&link_idr, id); 2687 spin_unlock_bh(&link_idr_lock); 2688 } 2689 2690 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2691 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2692 * anon_inode's release() call. This helper marksbpf_link as 2693 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 2694 * is not decremented, it's the responsibility of a calling code that failed 2695 * to complete bpf_link initialization. 2696 */ 2697 void bpf_link_cleanup(struct bpf_link_primer *primer) 2698 { 2699 primer->link->prog = NULL; 2700 bpf_link_free_id(primer->id); 2701 fput(primer->file); 2702 put_unused_fd(primer->fd); 2703 } 2704 2705 void bpf_link_inc(struct bpf_link *link) 2706 { 2707 atomic64_inc(&link->refcnt); 2708 } 2709 2710 /* bpf_link_free is guaranteed to be called from process context */ 2711 static void bpf_link_free(struct bpf_link *link) 2712 { 2713 bpf_link_free_id(link->id); 2714 if (link->prog) { 2715 /* detach BPF program, clean up used resources */ 2716 link->ops->release(link); 2717 bpf_prog_put(link->prog); 2718 } 2719 /* free bpf_link and its containing memory */ 2720 link->ops->dealloc(link); 2721 } 2722 2723 static void bpf_link_put_deferred(struct work_struct *work) 2724 { 2725 struct bpf_link *link = container_of(work, struct bpf_link, work); 2726 2727 bpf_link_free(link); 2728 } 2729 2730 /* bpf_link_put can be called from atomic context, but ensures that resources 2731 * are freed from process context 2732 */ 2733 void bpf_link_put(struct bpf_link *link) 2734 { 2735 if (!atomic64_dec_and_test(&link->refcnt)) 2736 return; 2737 2738 if (in_atomic()) { 2739 INIT_WORK(&link->work, bpf_link_put_deferred); 2740 schedule_work(&link->work); 2741 } else { 2742 bpf_link_free(link); 2743 } 2744 } 2745 EXPORT_SYMBOL(bpf_link_put); 2746 2747 static int bpf_link_release(struct inode *inode, struct file *filp) 2748 { 2749 struct bpf_link *link = filp->private_data; 2750 2751 bpf_link_put(link); 2752 return 0; 2753 } 2754 2755 #ifdef CONFIG_PROC_FS 2756 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 2757 #define BPF_MAP_TYPE(_id, _ops) 2758 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 2759 static const char *bpf_link_type_strs[] = { 2760 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 2761 #include <linux/bpf_types.h> 2762 }; 2763 #undef BPF_PROG_TYPE 2764 #undef BPF_MAP_TYPE 2765 #undef BPF_LINK_TYPE 2766 2767 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 2768 { 2769 const struct bpf_link *link = filp->private_data; 2770 const struct bpf_prog *prog = link->prog; 2771 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2772 2773 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2774 seq_printf(m, 2775 "link_type:\t%s\n" 2776 "link_id:\t%u\n" 2777 "prog_tag:\t%s\n" 2778 "prog_id:\t%u\n", 2779 bpf_link_type_strs[link->type], 2780 link->id, 2781 prog_tag, 2782 prog->aux->id); 2783 if (link->ops->show_fdinfo) 2784 link->ops->show_fdinfo(link, m); 2785 } 2786 #endif 2787 2788 static const struct file_operations bpf_link_fops = { 2789 #ifdef CONFIG_PROC_FS 2790 .show_fdinfo = bpf_link_show_fdinfo, 2791 #endif 2792 .release = bpf_link_release, 2793 .read = bpf_dummy_read, 2794 .write = bpf_dummy_write, 2795 }; 2796 2797 static int bpf_link_alloc_id(struct bpf_link *link) 2798 { 2799 int id; 2800 2801 idr_preload(GFP_KERNEL); 2802 spin_lock_bh(&link_idr_lock); 2803 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 2804 spin_unlock_bh(&link_idr_lock); 2805 idr_preload_end(); 2806 2807 return id; 2808 } 2809 2810 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 2811 * reserving unused FD and allocating ID from link_idr. This is to be paired 2812 * with bpf_link_settle() to install FD and ID and expose bpf_link to 2813 * user-space, if bpf_link is successfully attached. If not, bpf_link and 2814 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 2815 * transient state is passed around in struct bpf_link_primer. 2816 * This is preferred way to create and initialize bpf_link, especially when 2817 * there are complicated and expensive operations in between creating bpf_link 2818 * itself and attaching it to BPF hook. By using bpf_link_prime() and 2819 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 2820 * expensive (and potentially failing) roll back operations in a rare case 2821 * that file, FD, or ID can't be allocated. 2822 */ 2823 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 2824 { 2825 struct file *file; 2826 int fd, id; 2827 2828 fd = get_unused_fd_flags(O_CLOEXEC); 2829 if (fd < 0) 2830 return fd; 2831 2832 2833 id = bpf_link_alloc_id(link); 2834 if (id < 0) { 2835 put_unused_fd(fd); 2836 return id; 2837 } 2838 2839 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 2840 if (IS_ERR(file)) { 2841 bpf_link_free_id(id); 2842 put_unused_fd(fd); 2843 return PTR_ERR(file); 2844 } 2845 2846 primer->link = link; 2847 primer->file = file; 2848 primer->fd = fd; 2849 primer->id = id; 2850 return 0; 2851 } 2852 2853 int bpf_link_settle(struct bpf_link_primer *primer) 2854 { 2855 /* make bpf_link fetchable by ID */ 2856 spin_lock_bh(&link_idr_lock); 2857 primer->link->id = primer->id; 2858 spin_unlock_bh(&link_idr_lock); 2859 /* make bpf_link fetchable by FD */ 2860 fd_install(primer->fd, primer->file); 2861 /* pass through installed FD */ 2862 return primer->fd; 2863 } 2864 2865 int bpf_link_new_fd(struct bpf_link *link) 2866 { 2867 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 2868 } 2869 2870 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 2871 { 2872 struct fd f = fdget(ufd); 2873 struct bpf_link *link; 2874 2875 if (!f.file) 2876 return ERR_PTR(-EBADF); 2877 if (f.file->f_op != &bpf_link_fops) { 2878 fdput(f); 2879 return ERR_PTR(-EINVAL); 2880 } 2881 2882 link = f.file->private_data; 2883 bpf_link_inc(link); 2884 fdput(f); 2885 2886 return link; 2887 } 2888 EXPORT_SYMBOL(bpf_link_get_from_fd); 2889 2890 static void bpf_tracing_link_release(struct bpf_link *link) 2891 { 2892 struct bpf_tracing_link *tr_link = 2893 container_of(link, struct bpf_tracing_link, link.link); 2894 2895 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 2896 tr_link->trampoline)); 2897 2898 bpf_trampoline_put(tr_link->trampoline); 2899 2900 /* tgt_prog is NULL if target is a kernel function */ 2901 if (tr_link->tgt_prog) 2902 bpf_prog_put(tr_link->tgt_prog); 2903 } 2904 2905 static void bpf_tracing_link_dealloc(struct bpf_link *link) 2906 { 2907 struct bpf_tracing_link *tr_link = 2908 container_of(link, struct bpf_tracing_link, link.link); 2909 2910 kfree(tr_link); 2911 } 2912 2913 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 2914 struct seq_file *seq) 2915 { 2916 struct bpf_tracing_link *tr_link = 2917 container_of(link, struct bpf_tracing_link, link.link); 2918 2919 seq_printf(seq, 2920 "attach_type:\t%d\n", 2921 tr_link->attach_type); 2922 } 2923 2924 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 2925 struct bpf_link_info *info) 2926 { 2927 struct bpf_tracing_link *tr_link = 2928 container_of(link, struct bpf_tracing_link, link.link); 2929 2930 info->tracing.attach_type = tr_link->attach_type; 2931 bpf_trampoline_unpack_key(tr_link->trampoline->key, 2932 &info->tracing.target_obj_id, 2933 &info->tracing.target_btf_id); 2934 2935 return 0; 2936 } 2937 2938 static const struct bpf_link_ops bpf_tracing_link_lops = { 2939 .release = bpf_tracing_link_release, 2940 .dealloc = bpf_tracing_link_dealloc, 2941 .show_fdinfo = bpf_tracing_link_show_fdinfo, 2942 .fill_link_info = bpf_tracing_link_fill_link_info, 2943 }; 2944 2945 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 2946 int tgt_prog_fd, 2947 u32 btf_id, 2948 u64 bpf_cookie) 2949 { 2950 struct bpf_link_primer link_primer; 2951 struct bpf_prog *tgt_prog = NULL; 2952 struct bpf_trampoline *tr = NULL; 2953 struct bpf_tracing_link *link; 2954 u64 key = 0; 2955 int err; 2956 2957 switch (prog->type) { 2958 case BPF_PROG_TYPE_TRACING: 2959 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 2960 prog->expected_attach_type != BPF_TRACE_FEXIT && 2961 prog->expected_attach_type != BPF_MODIFY_RETURN) { 2962 err = -EINVAL; 2963 goto out_put_prog; 2964 } 2965 break; 2966 case BPF_PROG_TYPE_EXT: 2967 if (prog->expected_attach_type != 0) { 2968 err = -EINVAL; 2969 goto out_put_prog; 2970 } 2971 break; 2972 case BPF_PROG_TYPE_LSM: 2973 if (prog->expected_attach_type != BPF_LSM_MAC) { 2974 err = -EINVAL; 2975 goto out_put_prog; 2976 } 2977 break; 2978 default: 2979 err = -EINVAL; 2980 goto out_put_prog; 2981 } 2982 2983 if (!!tgt_prog_fd != !!btf_id) { 2984 err = -EINVAL; 2985 goto out_put_prog; 2986 } 2987 2988 if (tgt_prog_fd) { 2989 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */ 2990 if (prog->type != BPF_PROG_TYPE_EXT) { 2991 err = -EINVAL; 2992 goto out_put_prog; 2993 } 2994 2995 tgt_prog = bpf_prog_get(tgt_prog_fd); 2996 if (IS_ERR(tgt_prog)) { 2997 err = PTR_ERR(tgt_prog); 2998 tgt_prog = NULL; 2999 goto out_put_prog; 3000 } 3001 3002 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3003 } 3004 3005 link = kzalloc(sizeof(*link), GFP_USER); 3006 if (!link) { 3007 err = -ENOMEM; 3008 goto out_put_prog; 3009 } 3010 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3011 &bpf_tracing_link_lops, prog); 3012 link->attach_type = prog->expected_attach_type; 3013 link->link.cookie = bpf_cookie; 3014 3015 mutex_lock(&prog->aux->dst_mutex); 3016 3017 /* There are a few possible cases here: 3018 * 3019 * - if prog->aux->dst_trampoline is set, the program was just loaded 3020 * and not yet attached to anything, so we can use the values stored 3021 * in prog->aux 3022 * 3023 * - if prog->aux->dst_trampoline is NULL, the program has already been 3024 * attached to a target and its initial target was cleared (below) 3025 * 3026 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3027 * target_btf_id using the link_create API. 3028 * 3029 * - if tgt_prog == NULL when this function was called using the old 3030 * raw_tracepoint_open API, and we need a target from prog->aux 3031 * 3032 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3033 * was detached and is going for re-attachment. 3034 */ 3035 if (!prog->aux->dst_trampoline && !tgt_prog) { 3036 /* 3037 * Allow re-attach for TRACING and LSM programs. If it's 3038 * currently linked, bpf_trampoline_link_prog will fail. 3039 * EXT programs need to specify tgt_prog_fd, so they 3040 * re-attach in separate code path. 3041 */ 3042 if (prog->type != BPF_PROG_TYPE_TRACING && 3043 prog->type != BPF_PROG_TYPE_LSM) { 3044 err = -EINVAL; 3045 goto out_unlock; 3046 } 3047 btf_id = prog->aux->attach_btf_id; 3048 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3049 } 3050 3051 if (!prog->aux->dst_trampoline || 3052 (key && key != prog->aux->dst_trampoline->key)) { 3053 /* If there is no saved target, or the specified target is 3054 * different from the destination specified at load time, we 3055 * need a new trampoline and a check for compatibility 3056 */ 3057 struct bpf_attach_target_info tgt_info = {}; 3058 3059 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3060 &tgt_info); 3061 if (err) 3062 goto out_unlock; 3063 3064 tr = bpf_trampoline_get(key, &tgt_info); 3065 if (!tr) { 3066 err = -ENOMEM; 3067 goto out_unlock; 3068 } 3069 } else { 3070 /* The caller didn't specify a target, or the target was the 3071 * same as the destination supplied during program load. This 3072 * means we can reuse the trampoline and reference from program 3073 * load time, and there is no need to allocate a new one. This 3074 * can only happen once for any program, as the saved values in 3075 * prog->aux are cleared below. 3076 */ 3077 tr = prog->aux->dst_trampoline; 3078 tgt_prog = prog->aux->dst_prog; 3079 } 3080 3081 err = bpf_link_prime(&link->link.link, &link_primer); 3082 if (err) 3083 goto out_unlock; 3084 3085 err = bpf_trampoline_link_prog(&link->link, tr); 3086 if (err) { 3087 bpf_link_cleanup(&link_primer); 3088 link = NULL; 3089 goto out_unlock; 3090 } 3091 3092 link->tgt_prog = tgt_prog; 3093 link->trampoline = tr; 3094 3095 /* Always clear the trampoline and target prog from prog->aux to make 3096 * sure the original attach destination is not kept alive after a 3097 * program is (re-)attached to another target. 3098 */ 3099 if (prog->aux->dst_prog && 3100 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3101 /* got extra prog ref from syscall, or attaching to different prog */ 3102 bpf_prog_put(prog->aux->dst_prog); 3103 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3104 /* we allocated a new trampoline, so free the old one */ 3105 bpf_trampoline_put(prog->aux->dst_trampoline); 3106 3107 prog->aux->dst_prog = NULL; 3108 prog->aux->dst_trampoline = NULL; 3109 mutex_unlock(&prog->aux->dst_mutex); 3110 3111 return bpf_link_settle(&link_primer); 3112 out_unlock: 3113 if (tr && tr != prog->aux->dst_trampoline) 3114 bpf_trampoline_put(tr); 3115 mutex_unlock(&prog->aux->dst_mutex); 3116 kfree(link); 3117 out_put_prog: 3118 if (tgt_prog_fd && tgt_prog) 3119 bpf_prog_put(tgt_prog); 3120 return err; 3121 } 3122 3123 struct bpf_raw_tp_link { 3124 struct bpf_link link; 3125 struct bpf_raw_event_map *btp; 3126 }; 3127 3128 static void bpf_raw_tp_link_release(struct bpf_link *link) 3129 { 3130 struct bpf_raw_tp_link *raw_tp = 3131 container_of(link, struct bpf_raw_tp_link, link); 3132 3133 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 3134 bpf_put_raw_tracepoint(raw_tp->btp); 3135 } 3136 3137 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3138 { 3139 struct bpf_raw_tp_link *raw_tp = 3140 container_of(link, struct bpf_raw_tp_link, link); 3141 3142 kfree(raw_tp); 3143 } 3144 3145 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3146 struct seq_file *seq) 3147 { 3148 struct bpf_raw_tp_link *raw_tp_link = 3149 container_of(link, struct bpf_raw_tp_link, link); 3150 3151 seq_printf(seq, 3152 "tp_name:\t%s\n", 3153 raw_tp_link->btp->tp->name); 3154 } 3155 3156 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3157 struct bpf_link_info *info) 3158 { 3159 struct bpf_raw_tp_link *raw_tp_link = 3160 container_of(link, struct bpf_raw_tp_link, link); 3161 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3162 const char *tp_name = raw_tp_link->btp->tp->name; 3163 u32 ulen = info->raw_tracepoint.tp_name_len; 3164 size_t tp_len = strlen(tp_name); 3165 3166 if (!ulen ^ !ubuf) 3167 return -EINVAL; 3168 3169 info->raw_tracepoint.tp_name_len = tp_len + 1; 3170 3171 if (!ubuf) 3172 return 0; 3173 3174 if (ulen >= tp_len + 1) { 3175 if (copy_to_user(ubuf, tp_name, tp_len + 1)) 3176 return -EFAULT; 3177 } else { 3178 char zero = '\0'; 3179 3180 if (copy_to_user(ubuf, tp_name, ulen - 1)) 3181 return -EFAULT; 3182 if (put_user(zero, ubuf + ulen - 1)) 3183 return -EFAULT; 3184 return -ENOSPC; 3185 } 3186 3187 return 0; 3188 } 3189 3190 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3191 .release = bpf_raw_tp_link_release, 3192 .dealloc = bpf_raw_tp_link_dealloc, 3193 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3194 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3195 }; 3196 3197 #ifdef CONFIG_PERF_EVENTS 3198 struct bpf_perf_link { 3199 struct bpf_link link; 3200 struct file *perf_file; 3201 }; 3202 3203 static void bpf_perf_link_release(struct bpf_link *link) 3204 { 3205 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3206 struct perf_event *event = perf_link->perf_file->private_data; 3207 3208 perf_event_free_bpf_prog(event); 3209 fput(perf_link->perf_file); 3210 } 3211 3212 static void bpf_perf_link_dealloc(struct bpf_link *link) 3213 { 3214 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3215 3216 kfree(perf_link); 3217 } 3218 3219 static const struct bpf_link_ops bpf_perf_link_lops = { 3220 .release = bpf_perf_link_release, 3221 .dealloc = bpf_perf_link_dealloc, 3222 }; 3223 3224 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3225 { 3226 struct bpf_link_primer link_primer; 3227 struct bpf_perf_link *link; 3228 struct perf_event *event; 3229 struct file *perf_file; 3230 int err; 3231 3232 if (attr->link_create.flags) 3233 return -EINVAL; 3234 3235 perf_file = perf_event_get(attr->link_create.target_fd); 3236 if (IS_ERR(perf_file)) 3237 return PTR_ERR(perf_file); 3238 3239 link = kzalloc(sizeof(*link), GFP_USER); 3240 if (!link) { 3241 err = -ENOMEM; 3242 goto out_put_file; 3243 } 3244 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3245 link->perf_file = perf_file; 3246 3247 err = bpf_link_prime(&link->link, &link_primer); 3248 if (err) { 3249 kfree(link); 3250 goto out_put_file; 3251 } 3252 3253 event = perf_file->private_data; 3254 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3255 if (err) { 3256 bpf_link_cleanup(&link_primer); 3257 goto out_put_file; 3258 } 3259 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3260 bpf_prog_inc(prog); 3261 3262 return bpf_link_settle(&link_primer); 3263 3264 out_put_file: 3265 fput(perf_file); 3266 return err; 3267 } 3268 #else 3269 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3270 { 3271 return -EOPNOTSUPP; 3272 } 3273 #endif /* CONFIG_PERF_EVENTS */ 3274 3275 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3276 const char __user *user_tp_name) 3277 { 3278 struct bpf_link_primer link_primer; 3279 struct bpf_raw_tp_link *link; 3280 struct bpf_raw_event_map *btp; 3281 const char *tp_name; 3282 char buf[128]; 3283 int err; 3284 3285 switch (prog->type) { 3286 case BPF_PROG_TYPE_TRACING: 3287 case BPF_PROG_TYPE_EXT: 3288 case BPF_PROG_TYPE_LSM: 3289 if (user_tp_name) 3290 /* The attach point for this category of programs 3291 * should be specified via btf_id during program load. 3292 */ 3293 return -EINVAL; 3294 if (prog->type == BPF_PROG_TYPE_TRACING && 3295 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3296 tp_name = prog->aux->attach_func_name; 3297 break; 3298 } 3299 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3300 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3301 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3302 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3303 return -EFAULT; 3304 buf[sizeof(buf) - 1] = 0; 3305 tp_name = buf; 3306 break; 3307 default: 3308 return -EINVAL; 3309 } 3310 3311 btp = bpf_get_raw_tracepoint(tp_name); 3312 if (!btp) 3313 return -ENOENT; 3314 3315 link = kzalloc(sizeof(*link), GFP_USER); 3316 if (!link) { 3317 err = -ENOMEM; 3318 goto out_put_btp; 3319 } 3320 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3321 &bpf_raw_tp_link_lops, prog); 3322 link->btp = btp; 3323 3324 err = bpf_link_prime(&link->link, &link_primer); 3325 if (err) { 3326 kfree(link); 3327 goto out_put_btp; 3328 } 3329 3330 err = bpf_probe_register(link->btp, prog); 3331 if (err) { 3332 bpf_link_cleanup(&link_primer); 3333 goto out_put_btp; 3334 } 3335 3336 return bpf_link_settle(&link_primer); 3337 3338 out_put_btp: 3339 bpf_put_raw_tracepoint(btp); 3340 return err; 3341 } 3342 3343 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 3344 3345 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3346 { 3347 struct bpf_prog *prog; 3348 int fd; 3349 3350 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3351 return -EINVAL; 3352 3353 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3354 if (IS_ERR(prog)) 3355 return PTR_ERR(prog); 3356 3357 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); 3358 if (fd < 0) 3359 bpf_prog_put(prog); 3360 return fd; 3361 } 3362 3363 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 3364 enum bpf_attach_type attach_type) 3365 { 3366 switch (prog->type) { 3367 case BPF_PROG_TYPE_CGROUP_SOCK: 3368 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3369 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3370 case BPF_PROG_TYPE_SK_LOOKUP: 3371 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 3372 case BPF_PROG_TYPE_CGROUP_SKB: 3373 if (!capable(CAP_NET_ADMIN)) 3374 /* cg-skb progs can be loaded by unpriv user. 3375 * check permissions at attach time. 3376 */ 3377 return -EPERM; 3378 return prog->enforce_expected_attach_type && 3379 prog->expected_attach_type != attach_type ? 3380 -EINVAL : 0; 3381 default: 3382 return 0; 3383 } 3384 } 3385 3386 static enum bpf_prog_type 3387 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3388 { 3389 switch (attach_type) { 3390 case BPF_CGROUP_INET_INGRESS: 3391 case BPF_CGROUP_INET_EGRESS: 3392 return BPF_PROG_TYPE_CGROUP_SKB; 3393 case BPF_CGROUP_INET_SOCK_CREATE: 3394 case BPF_CGROUP_INET_SOCK_RELEASE: 3395 case BPF_CGROUP_INET4_POST_BIND: 3396 case BPF_CGROUP_INET6_POST_BIND: 3397 return BPF_PROG_TYPE_CGROUP_SOCK; 3398 case BPF_CGROUP_INET4_BIND: 3399 case BPF_CGROUP_INET6_BIND: 3400 case BPF_CGROUP_INET4_CONNECT: 3401 case BPF_CGROUP_INET6_CONNECT: 3402 case BPF_CGROUP_INET4_GETPEERNAME: 3403 case BPF_CGROUP_INET6_GETPEERNAME: 3404 case BPF_CGROUP_INET4_GETSOCKNAME: 3405 case BPF_CGROUP_INET6_GETSOCKNAME: 3406 case BPF_CGROUP_UDP4_SENDMSG: 3407 case BPF_CGROUP_UDP6_SENDMSG: 3408 case BPF_CGROUP_UDP4_RECVMSG: 3409 case BPF_CGROUP_UDP6_RECVMSG: 3410 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 3411 case BPF_CGROUP_SOCK_OPS: 3412 return BPF_PROG_TYPE_SOCK_OPS; 3413 case BPF_CGROUP_DEVICE: 3414 return BPF_PROG_TYPE_CGROUP_DEVICE; 3415 case BPF_SK_MSG_VERDICT: 3416 return BPF_PROG_TYPE_SK_MSG; 3417 case BPF_SK_SKB_STREAM_PARSER: 3418 case BPF_SK_SKB_STREAM_VERDICT: 3419 case BPF_SK_SKB_VERDICT: 3420 return BPF_PROG_TYPE_SK_SKB; 3421 case BPF_LIRC_MODE2: 3422 return BPF_PROG_TYPE_LIRC_MODE2; 3423 case BPF_FLOW_DISSECTOR: 3424 return BPF_PROG_TYPE_FLOW_DISSECTOR; 3425 case BPF_CGROUP_SYSCTL: 3426 return BPF_PROG_TYPE_CGROUP_SYSCTL; 3427 case BPF_CGROUP_GETSOCKOPT: 3428 case BPF_CGROUP_SETSOCKOPT: 3429 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 3430 case BPF_TRACE_ITER: 3431 case BPF_TRACE_RAW_TP: 3432 case BPF_TRACE_FENTRY: 3433 case BPF_TRACE_FEXIT: 3434 case BPF_MODIFY_RETURN: 3435 return BPF_PROG_TYPE_TRACING; 3436 case BPF_LSM_MAC: 3437 return BPF_PROG_TYPE_LSM; 3438 case BPF_SK_LOOKUP: 3439 return BPF_PROG_TYPE_SK_LOOKUP; 3440 case BPF_XDP: 3441 return BPF_PROG_TYPE_XDP; 3442 case BPF_LSM_CGROUP: 3443 return BPF_PROG_TYPE_LSM; 3444 default: 3445 return BPF_PROG_TYPE_UNSPEC; 3446 } 3447 } 3448 3449 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd 3450 3451 #define BPF_F_ATTACH_MASK \ 3452 (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE) 3453 3454 static int bpf_prog_attach(const union bpf_attr *attr) 3455 { 3456 enum bpf_prog_type ptype; 3457 struct bpf_prog *prog; 3458 int ret; 3459 3460 if (CHECK_ATTR(BPF_PROG_ATTACH)) 3461 return -EINVAL; 3462 3463 if (attr->attach_flags & ~BPF_F_ATTACH_MASK) 3464 return -EINVAL; 3465 3466 ptype = attach_type_to_prog_type(attr->attach_type); 3467 if (ptype == BPF_PROG_TYPE_UNSPEC) 3468 return -EINVAL; 3469 3470 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 3471 if (IS_ERR(prog)) 3472 return PTR_ERR(prog); 3473 3474 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 3475 bpf_prog_put(prog); 3476 return -EINVAL; 3477 } 3478 3479 switch (ptype) { 3480 case BPF_PROG_TYPE_SK_SKB: 3481 case BPF_PROG_TYPE_SK_MSG: 3482 ret = sock_map_get_from_fd(attr, prog); 3483 break; 3484 case BPF_PROG_TYPE_LIRC_MODE2: 3485 ret = lirc_prog_attach(attr, prog); 3486 break; 3487 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3488 ret = netns_bpf_prog_attach(attr, prog); 3489 break; 3490 case BPF_PROG_TYPE_CGROUP_DEVICE: 3491 case BPF_PROG_TYPE_CGROUP_SKB: 3492 case BPF_PROG_TYPE_CGROUP_SOCK: 3493 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3494 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3495 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3496 case BPF_PROG_TYPE_SOCK_OPS: 3497 case BPF_PROG_TYPE_LSM: 3498 if (ptype == BPF_PROG_TYPE_LSM && 3499 prog->expected_attach_type != BPF_LSM_CGROUP) 3500 return -EINVAL; 3501 3502 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 3503 break; 3504 default: 3505 ret = -EINVAL; 3506 } 3507 3508 if (ret) 3509 bpf_prog_put(prog); 3510 return ret; 3511 } 3512 3513 #define BPF_PROG_DETACH_LAST_FIELD attach_type 3514 3515 static int bpf_prog_detach(const union bpf_attr *attr) 3516 { 3517 enum bpf_prog_type ptype; 3518 3519 if (CHECK_ATTR(BPF_PROG_DETACH)) 3520 return -EINVAL; 3521 3522 ptype = attach_type_to_prog_type(attr->attach_type); 3523 3524 switch (ptype) { 3525 case BPF_PROG_TYPE_SK_MSG: 3526 case BPF_PROG_TYPE_SK_SKB: 3527 return sock_map_prog_detach(attr, ptype); 3528 case BPF_PROG_TYPE_LIRC_MODE2: 3529 return lirc_prog_detach(attr); 3530 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3531 return netns_bpf_prog_detach(attr, ptype); 3532 case BPF_PROG_TYPE_CGROUP_DEVICE: 3533 case BPF_PROG_TYPE_CGROUP_SKB: 3534 case BPF_PROG_TYPE_CGROUP_SOCK: 3535 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3536 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3537 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3538 case BPF_PROG_TYPE_SOCK_OPS: 3539 case BPF_PROG_TYPE_LSM: 3540 return cgroup_bpf_prog_detach(attr, ptype); 3541 default: 3542 return -EINVAL; 3543 } 3544 } 3545 3546 #define BPF_PROG_QUERY_LAST_FIELD query.prog_attach_flags 3547 3548 static int bpf_prog_query(const union bpf_attr *attr, 3549 union bpf_attr __user *uattr) 3550 { 3551 if (!capable(CAP_NET_ADMIN)) 3552 return -EPERM; 3553 if (CHECK_ATTR(BPF_PROG_QUERY)) 3554 return -EINVAL; 3555 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 3556 return -EINVAL; 3557 3558 switch (attr->query.attach_type) { 3559 case BPF_CGROUP_INET_INGRESS: 3560 case BPF_CGROUP_INET_EGRESS: 3561 case BPF_CGROUP_INET_SOCK_CREATE: 3562 case BPF_CGROUP_INET_SOCK_RELEASE: 3563 case BPF_CGROUP_INET4_BIND: 3564 case BPF_CGROUP_INET6_BIND: 3565 case BPF_CGROUP_INET4_POST_BIND: 3566 case BPF_CGROUP_INET6_POST_BIND: 3567 case BPF_CGROUP_INET4_CONNECT: 3568 case BPF_CGROUP_INET6_CONNECT: 3569 case BPF_CGROUP_INET4_GETPEERNAME: 3570 case BPF_CGROUP_INET6_GETPEERNAME: 3571 case BPF_CGROUP_INET4_GETSOCKNAME: 3572 case BPF_CGROUP_INET6_GETSOCKNAME: 3573 case BPF_CGROUP_UDP4_SENDMSG: 3574 case BPF_CGROUP_UDP6_SENDMSG: 3575 case BPF_CGROUP_UDP4_RECVMSG: 3576 case BPF_CGROUP_UDP6_RECVMSG: 3577 case BPF_CGROUP_SOCK_OPS: 3578 case BPF_CGROUP_DEVICE: 3579 case BPF_CGROUP_SYSCTL: 3580 case BPF_CGROUP_GETSOCKOPT: 3581 case BPF_CGROUP_SETSOCKOPT: 3582 case BPF_LSM_CGROUP: 3583 return cgroup_bpf_prog_query(attr, uattr); 3584 case BPF_LIRC_MODE2: 3585 return lirc_prog_query(attr, uattr); 3586 case BPF_FLOW_DISSECTOR: 3587 case BPF_SK_LOOKUP: 3588 return netns_bpf_prog_query(attr, uattr); 3589 case BPF_SK_SKB_STREAM_PARSER: 3590 case BPF_SK_SKB_STREAM_VERDICT: 3591 case BPF_SK_MSG_VERDICT: 3592 case BPF_SK_SKB_VERDICT: 3593 return sock_map_bpf_prog_query(attr, uattr); 3594 default: 3595 return -EINVAL; 3596 } 3597 } 3598 3599 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 3600 3601 static int bpf_prog_test_run(const union bpf_attr *attr, 3602 union bpf_attr __user *uattr) 3603 { 3604 struct bpf_prog *prog; 3605 int ret = -ENOTSUPP; 3606 3607 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 3608 return -EINVAL; 3609 3610 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 3611 (!attr->test.ctx_size_in && attr->test.ctx_in)) 3612 return -EINVAL; 3613 3614 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 3615 (!attr->test.ctx_size_out && attr->test.ctx_out)) 3616 return -EINVAL; 3617 3618 prog = bpf_prog_get(attr->test.prog_fd); 3619 if (IS_ERR(prog)) 3620 return PTR_ERR(prog); 3621 3622 if (prog->aux->ops->test_run) 3623 ret = prog->aux->ops->test_run(prog, attr, uattr); 3624 3625 bpf_prog_put(prog); 3626 return ret; 3627 } 3628 3629 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 3630 3631 static int bpf_obj_get_next_id(const union bpf_attr *attr, 3632 union bpf_attr __user *uattr, 3633 struct idr *idr, 3634 spinlock_t *lock) 3635 { 3636 u32 next_id = attr->start_id; 3637 int err = 0; 3638 3639 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 3640 return -EINVAL; 3641 3642 if (!capable(CAP_SYS_ADMIN)) 3643 return -EPERM; 3644 3645 next_id++; 3646 spin_lock_bh(lock); 3647 if (!idr_get_next(idr, &next_id)) 3648 err = -ENOENT; 3649 spin_unlock_bh(lock); 3650 3651 if (!err) 3652 err = put_user(next_id, &uattr->next_id); 3653 3654 return err; 3655 } 3656 3657 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 3658 { 3659 struct bpf_map *map; 3660 3661 spin_lock_bh(&map_idr_lock); 3662 again: 3663 map = idr_get_next(&map_idr, id); 3664 if (map) { 3665 map = __bpf_map_inc_not_zero(map, false); 3666 if (IS_ERR(map)) { 3667 (*id)++; 3668 goto again; 3669 } 3670 } 3671 spin_unlock_bh(&map_idr_lock); 3672 3673 return map; 3674 } 3675 3676 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 3677 { 3678 struct bpf_prog *prog; 3679 3680 spin_lock_bh(&prog_idr_lock); 3681 again: 3682 prog = idr_get_next(&prog_idr, id); 3683 if (prog) { 3684 prog = bpf_prog_inc_not_zero(prog); 3685 if (IS_ERR(prog)) { 3686 (*id)++; 3687 goto again; 3688 } 3689 } 3690 spin_unlock_bh(&prog_idr_lock); 3691 3692 return prog; 3693 } 3694 3695 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 3696 3697 struct bpf_prog *bpf_prog_by_id(u32 id) 3698 { 3699 struct bpf_prog *prog; 3700 3701 if (!id) 3702 return ERR_PTR(-ENOENT); 3703 3704 spin_lock_bh(&prog_idr_lock); 3705 prog = idr_find(&prog_idr, id); 3706 if (prog) 3707 prog = bpf_prog_inc_not_zero(prog); 3708 else 3709 prog = ERR_PTR(-ENOENT); 3710 spin_unlock_bh(&prog_idr_lock); 3711 return prog; 3712 } 3713 3714 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 3715 { 3716 struct bpf_prog *prog; 3717 u32 id = attr->prog_id; 3718 int fd; 3719 3720 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 3721 return -EINVAL; 3722 3723 if (!capable(CAP_SYS_ADMIN)) 3724 return -EPERM; 3725 3726 prog = bpf_prog_by_id(id); 3727 if (IS_ERR(prog)) 3728 return PTR_ERR(prog); 3729 3730 fd = bpf_prog_new_fd(prog); 3731 if (fd < 0) 3732 bpf_prog_put(prog); 3733 3734 return fd; 3735 } 3736 3737 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 3738 3739 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 3740 { 3741 struct bpf_map *map; 3742 u32 id = attr->map_id; 3743 int f_flags; 3744 int fd; 3745 3746 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 3747 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 3748 return -EINVAL; 3749 3750 if (!capable(CAP_SYS_ADMIN)) 3751 return -EPERM; 3752 3753 f_flags = bpf_get_file_flag(attr->open_flags); 3754 if (f_flags < 0) 3755 return f_flags; 3756 3757 spin_lock_bh(&map_idr_lock); 3758 map = idr_find(&map_idr, id); 3759 if (map) 3760 map = __bpf_map_inc_not_zero(map, true); 3761 else 3762 map = ERR_PTR(-ENOENT); 3763 spin_unlock_bh(&map_idr_lock); 3764 3765 if (IS_ERR(map)) 3766 return PTR_ERR(map); 3767 3768 fd = bpf_map_new_fd(map, f_flags); 3769 if (fd < 0) 3770 bpf_map_put_with_uref(map); 3771 3772 return fd; 3773 } 3774 3775 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 3776 unsigned long addr, u32 *off, 3777 u32 *type) 3778 { 3779 const struct bpf_map *map; 3780 int i; 3781 3782 mutex_lock(&prog->aux->used_maps_mutex); 3783 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 3784 map = prog->aux->used_maps[i]; 3785 if (map == (void *)addr) { 3786 *type = BPF_PSEUDO_MAP_FD; 3787 goto out; 3788 } 3789 if (!map->ops->map_direct_value_meta) 3790 continue; 3791 if (!map->ops->map_direct_value_meta(map, addr, off)) { 3792 *type = BPF_PSEUDO_MAP_VALUE; 3793 goto out; 3794 } 3795 } 3796 map = NULL; 3797 3798 out: 3799 mutex_unlock(&prog->aux->used_maps_mutex); 3800 return map; 3801 } 3802 3803 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 3804 const struct cred *f_cred) 3805 { 3806 const struct bpf_map *map; 3807 struct bpf_insn *insns; 3808 u32 off, type; 3809 u64 imm; 3810 u8 code; 3811 int i; 3812 3813 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 3814 GFP_USER); 3815 if (!insns) 3816 return insns; 3817 3818 for (i = 0; i < prog->len; i++) { 3819 code = insns[i].code; 3820 3821 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 3822 insns[i].code = BPF_JMP | BPF_CALL; 3823 insns[i].imm = BPF_FUNC_tail_call; 3824 /* fall-through */ 3825 } 3826 if (code == (BPF_JMP | BPF_CALL) || 3827 code == (BPF_JMP | BPF_CALL_ARGS)) { 3828 if (code == (BPF_JMP | BPF_CALL_ARGS)) 3829 insns[i].code = BPF_JMP | BPF_CALL; 3830 if (!bpf_dump_raw_ok(f_cred)) 3831 insns[i].imm = 0; 3832 continue; 3833 } 3834 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 3835 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 3836 continue; 3837 } 3838 3839 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 3840 continue; 3841 3842 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 3843 map = bpf_map_from_imm(prog, imm, &off, &type); 3844 if (map) { 3845 insns[i].src_reg = type; 3846 insns[i].imm = map->id; 3847 insns[i + 1].imm = off; 3848 continue; 3849 } 3850 } 3851 3852 return insns; 3853 } 3854 3855 static int set_info_rec_size(struct bpf_prog_info *info) 3856 { 3857 /* 3858 * Ensure info.*_rec_size is the same as kernel expected size 3859 * 3860 * or 3861 * 3862 * Only allow zero *_rec_size if both _rec_size and _cnt are 3863 * zero. In this case, the kernel will set the expected 3864 * _rec_size back to the info. 3865 */ 3866 3867 if ((info->nr_func_info || info->func_info_rec_size) && 3868 info->func_info_rec_size != sizeof(struct bpf_func_info)) 3869 return -EINVAL; 3870 3871 if ((info->nr_line_info || info->line_info_rec_size) && 3872 info->line_info_rec_size != sizeof(struct bpf_line_info)) 3873 return -EINVAL; 3874 3875 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 3876 info->jited_line_info_rec_size != sizeof(__u64)) 3877 return -EINVAL; 3878 3879 info->func_info_rec_size = sizeof(struct bpf_func_info); 3880 info->line_info_rec_size = sizeof(struct bpf_line_info); 3881 info->jited_line_info_rec_size = sizeof(__u64); 3882 3883 return 0; 3884 } 3885 3886 static int bpf_prog_get_info_by_fd(struct file *file, 3887 struct bpf_prog *prog, 3888 const union bpf_attr *attr, 3889 union bpf_attr __user *uattr) 3890 { 3891 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3892 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 3893 struct bpf_prog_info info; 3894 u32 info_len = attr->info.info_len; 3895 struct bpf_prog_kstats stats; 3896 char __user *uinsns; 3897 u32 ulen; 3898 int err; 3899 3900 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 3901 if (err) 3902 return err; 3903 info_len = min_t(u32, sizeof(info), info_len); 3904 3905 memset(&info, 0, sizeof(info)); 3906 if (copy_from_user(&info, uinfo, info_len)) 3907 return -EFAULT; 3908 3909 info.type = prog->type; 3910 info.id = prog->aux->id; 3911 info.load_time = prog->aux->load_time; 3912 info.created_by_uid = from_kuid_munged(current_user_ns(), 3913 prog->aux->user->uid); 3914 info.gpl_compatible = prog->gpl_compatible; 3915 3916 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 3917 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 3918 3919 mutex_lock(&prog->aux->used_maps_mutex); 3920 ulen = info.nr_map_ids; 3921 info.nr_map_ids = prog->aux->used_map_cnt; 3922 ulen = min_t(u32, info.nr_map_ids, ulen); 3923 if (ulen) { 3924 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 3925 u32 i; 3926 3927 for (i = 0; i < ulen; i++) 3928 if (put_user(prog->aux->used_maps[i]->id, 3929 &user_map_ids[i])) { 3930 mutex_unlock(&prog->aux->used_maps_mutex); 3931 return -EFAULT; 3932 } 3933 } 3934 mutex_unlock(&prog->aux->used_maps_mutex); 3935 3936 err = set_info_rec_size(&info); 3937 if (err) 3938 return err; 3939 3940 bpf_prog_get_stats(prog, &stats); 3941 info.run_time_ns = stats.nsecs; 3942 info.run_cnt = stats.cnt; 3943 info.recursion_misses = stats.misses; 3944 3945 info.verified_insns = prog->aux->verified_insns; 3946 3947 if (!bpf_capable()) { 3948 info.jited_prog_len = 0; 3949 info.xlated_prog_len = 0; 3950 info.nr_jited_ksyms = 0; 3951 info.nr_jited_func_lens = 0; 3952 info.nr_func_info = 0; 3953 info.nr_line_info = 0; 3954 info.nr_jited_line_info = 0; 3955 goto done; 3956 } 3957 3958 ulen = info.xlated_prog_len; 3959 info.xlated_prog_len = bpf_prog_insn_size(prog); 3960 if (info.xlated_prog_len && ulen) { 3961 struct bpf_insn *insns_sanitized; 3962 bool fault; 3963 3964 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 3965 info.xlated_prog_insns = 0; 3966 goto done; 3967 } 3968 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 3969 if (!insns_sanitized) 3970 return -ENOMEM; 3971 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 3972 ulen = min_t(u32, info.xlated_prog_len, ulen); 3973 fault = copy_to_user(uinsns, insns_sanitized, ulen); 3974 kfree(insns_sanitized); 3975 if (fault) 3976 return -EFAULT; 3977 } 3978 3979 if (bpf_prog_is_dev_bound(prog->aux)) { 3980 err = bpf_prog_offload_info_fill(&info, prog); 3981 if (err) 3982 return err; 3983 goto done; 3984 } 3985 3986 /* NOTE: the following code is supposed to be skipped for offload. 3987 * bpf_prog_offload_info_fill() is the place to fill similar fields 3988 * for offload. 3989 */ 3990 ulen = info.jited_prog_len; 3991 if (prog->aux->func_cnt) { 3992 u32 i; 3993 3994 info.jited_prog_len = 0; 3995 for (i = 0; i < prog->aux->func_cnt; i++) 3996 info.jited_prog_len += prog->aux->func[i]->jited_len; 3997 } else { 3998 info.jited_prog_len = prog->jited_len; 3999 } 4000 4001 if (info.jited_prog_len && ulen) { 4002 if (bpf_dump_raw_ok(file->f_cred)) { 4003 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4004 ulen = min_t(u32, info.jited_prog_len, ulen); 4005 4006 /* for multi-function programs, copy the JITed 4007 * instructions for all the functions 4008 */ 4009 if (prog->aux->func_cnt) { 4010 u32 len, free, i; 4011 u8 *img; 4012 4013 free = ulen; 4014 for (i = 0; i < prog->aux->func_cnt; i++) { 4015 len = prog->aux->func[i]->jited_len; 4016 len = min_t(u32, len, free); 4017 img = (u8 *) prog->aux->func[i]->bpf_func; 4018 if (copy_to_user(uinsns, img, len)) 4019 return -EFAULT; 4020 uinsns += len; 4021 free -= len; 4022 if (!free) 4023 break; 4024 } 4025 } else { 4026 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4027 return -EFAULT; 4028 } 4029 } else { 4030 info.jited_prog_insns = 0; 4031 } 4032 } 4033 4034 ulen = info.nr_jited_ksyms; 4035 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4036 if (ulen) { 4037 if (bpf_dump_raw_ok(file->f_cred)) { 4038 unsigned long ksym_addr; 4039 u64 __user *user_ksyms; 4040 u32 i; 4041 4042 /* copy the address of the kernel symbol 4043 * corresponding to each function 4044 */ 4045 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4046 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4047 if (prog->aux->func_cnt) { 4048 for (i = 0; i < ulen; i++) { 4049 ksym_addr = (unsigned long) 4050 prog->aux->func[i]->bpf_func; 4051 if (put_user((u64) ksym_addr, 4052 &user_ksyms[i])) 4053 return -EFAULT; 4054 } 4055 } else { 4056 ksym_addr = (unsigned long) prog->bpf_func; 4057 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4058 return -EFAULT; 4059 } 4060 } else { 4061 info.jited_ksyms = 0; 4062 } 4063 } 4064 4065 ulen = info.nr_jited_func_lens; 4066 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4067 if (ulen) { 4068 if (bpf_dump_raw_ok(file->f_cred)) { 4069 u32 __user *user_lens; 4070 u32 func_len, i; 4071 4072 /* copy the JITed image lengths for each function */ 4073 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4074 user_lens = u64_to_user_ptr(info.jited_func_lens); 4075 if (prog->aux->func_cnt) { 4076 for (i = 0; i < ulen; i++) { 4077 func_len = 4078 prog->aux->func[i]->jited_len; 4079 if (put_user(func_len, &user_lens[i])) 4080 return -EFAULT; 4081 } 4082 } else { 4083 func_len = prog->jited_len; 4084 if (put_user(func_len, &user_lens[0])) 4085 return -EFAULT; 4086 } 4087 } else { 4088 info.jited_func_lens = 0; 4089 } 4090 } 4091 4092 if (prog->aux->btf) 4093 info.btf_id = btf_obj_id(prog->aux->btf); 4094 info.attach_btf_id = prog->aux->attach_btf_id; 4095 if (attach_btf) 4096 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4097 4098 ulen = info.nr_func_info; 4099 info.nr_func_info = prog->aux->func_info_cnt; 4100 if (info.nr_func_info && ulen) { 4101 char __user *user_finfo; 4102 4103 user_finfo = u64_to_user_ptr(info.func_info); 4104 ulen = min_t(u32, info.nr_func_info, ulen); 4105 if (copy_to_user(user_finfo, prog->aux->func_info, 4106 info.func_info_rec_size * ulen)) 4107 return -EFAULT; 4108 } 4109 4110 ulen = info.nr_line_info; 4111 info.nr_line_info = prog->aux->nr_linfo; 4112 if (info.nr_line_info && ulen) { 4113 __u8 __user *user_linfo; 4114 4115 user_linfo = u64_to_user_ptr(info.line_info); 4116 ulen = min_t(u32, info.nr_line_info, ulen); 4117 if (copy_to_user(user_linfo, prog->aux->linfo, 4118 info.line_info_rec_size * ulen)) 4119 return -EFAULT; 4120 } 4121 4122 ulen = info.nr_jited_line_info; 4123 if (prog->aux->jited_linfo) 4124 info.nr_jited_line_info = prog->aux->nr_linfo; 4125 else 4126 info.nr_jited_line_info = 0; 4127 if (info.nr_jited_line_info && ulen) { 4128 if (bpf_dump_raw_ok(file->f_cred)) { 4129 unsigned long line_addr; 4130 __u64 __user *user_linfo; 4131 u32 i; 4132 4133 user_linfo = u64_to_user_ptr(info.jited_line_info); 4134 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4135 for (i = 0; i < ulen; i++) { 4136 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4137 if (put_user((__u64)line_addr, &user_linfo[i])) 4138 return -EFAULT; 4139 } 4140 } else { 4141 info.jited_line_info = 0; 4142 } 4143 } 4144 4145 ulen = info.nr_prog_tags; 4146 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4147 if (ulen) { 4148 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4149 u32 i; 4150 4151 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4152 ulen = min_t(u32, info.nr_prog_tags, ulen); 4153 if (prog->aux->func_cnt) { 4154 for (i = 0; i < ulen; i++) { 4155 if (copy_to_user(user_prog_tags[i], 4156 prog->aux->func[i]->tag, 4157 BPF_TAG_SIZE)) 4158 return -EFAULT; 4159 } 4160 } else { 4161 if (copy_to_user(user_prog_tags[0], 4162 prog->tag, BPF_TAG_SIZE)) 4163 return -EFAULT; 4164 } 4165 } 4166 4167 done: 4168 if (copy_to_user(uinfo, &info, info_len) || 4169 put_user(info_len, &uattr->info.info_len)) 4170 return -EFAULT; 4171 4172 return 0; 4173 } 4174 4175 static int bpf_map_get_info_by_fd(struct file *file, 4176 struct bpf_map *map, 4177 const union bpf_attr *attr, 4178 union bpf_attr __user *uattr) 4179 { 4180 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4181 struct bpf_map_info info; 4182 u32 info_len = attr->info.info_len; 4183 int err; 4184 4185 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4186 if (err) 4187 return err; 4188 info_len = min_t(u32, sizeof(info), info_len); 4189 4190 memset(&info, 0, sizeof(info)); 4191 info.type = map->map_type; 4192 info.id = map->id; 4193 info.key_size = map->key_size; 4194 info.value_size = map->value_size; 4195 info.max_entries = map->max_entries; 4196 info.map_flags = map->map_flags; 4197 info.map_extra = map->map_extra; 4198 memcpy(info.name, map->name, sizeof(map->name)); 4199 4200 if (map->btf) { 4201 info.btf_id = btf_obj_id(map->btf); 4202 info.btf_key_type_id = map->btf_key_type_id; 4203 info.btf_value_type_id = map->btf_value_type_id; 4204 } 4205 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4206 4207 if (bpf_map_is_dev_bound(map)) { 4208 err = bpf_map_offload_info_fill(&info, map); 4209 if (err) 4210 return err; 4211 } 4212 4213 if (copy_to_user(uinfo, &info, info_len) || 4214 put_user(info_len, &uattr->info.info_len)) 4215 return -EFAULT; 4216 4217 return 0; 4218 } 4219 4220 static int bpf_btf_get_info_by_fd(struct file *file, 4221 struct btf *btf, 4222 const union bpf_attr *attr, 4223 union bpf_attr __user *uattr) 4224 { 4225 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4226 u32 info_len = attr->info.info_len; 4227 int err; 4228 4229 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 4230 if (err) 4231 return err; 4232 4233 return btf_get_info_by_fd(btf, attr, uattr); 4234 } 4235 4236 static int bpf_link_get_info_by_fd(struct file *file, 4237 struct bpf_link *link, 4238 const union bpf_attr *attr, 4239 union bpf_attr __user *uattr) 4240 { 4241 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4242 struct bpf_link_info info; 4243 u32 info_len = attr->info.info_len; 4244 int err; 4245 4246 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4247 if (err) 4248 return err; 4249 info_len = min_t(u32, sizeof(info), info_len); 4250 4251 memset(&info, 0, sizeof(info)); 4252 if (copy_from_user(&info, uinfo, info_len)) 4253 return -EFAULT; 4254 4255 info.type = link->type; 4256 info.id = link->id; 4257 info.prog_id = link->prog->aux->id; 4258 4259 if (link->ops->fill_link_info) { 4260 err = link->ops->fill_link_info(link, &info); 4261 if (err) 4262 return err; 4263 } 4264 4265 if (copy_to_user(uinfo, &info, info_len) || 4266 put_user(info_len, &uattr->info.info_len)) 4267 return -EFAULT; 4268 4269 return 0; 4270 } 4271 4272 4273 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 4274 4275 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 4276 union bpf_attr __user *uattr) 4277 { 4278 int ufd = attr->info.bpf_fd; 4279 struct fd f; 4280 int err; 4281 4282 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 4283 return -EINVAL; 4284 4285 f = fdget(ufd); 4286 if (!f.file) 4287 return -EBADFD; 4288 4289 if (f.file->f_op == &bpf_prog_fops) 4290 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 4291 uattr); 4292 else if (f.file->f_op == &bpf_map_fops) 4293 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 4294 uattr); 4295 else if (f.file->f_op == &btf_fops) 4296 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 4297 else if (f.file->f_op == &bpf_link_fops) 4298 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 4299 attr, uattr); 4300 else 4301 err = -EINVAL; 4302 4303 fdput(f); 4304 return err; 4305 } 4306 4307 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level 4308 4309 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr) 4310 { 4311 if (CHECK_ATTR(BPF_BTF_LOAD)) 4312 return -EINVAL; 4313 4314 if (!bpf_capable()) 4315 return -EPERM; 4316 4317 return btf_new_fd(attr, uattr); 4318 } 4319 4320 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 4321 4322 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 4323 { 4324 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 4325 return -EINVAL; 4326 4327 if (!capable(CAP_SYS_ADMIN)) 4328 return -EPERM; 4329 4330 return btf_get_fd_by_id(attr->btf_id); 4331 } 4332 4333 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 4334 union bpf_attr __user *uattr, 4335 u32 prog_id, u32 fd_type, 4336 const char *buf, u64 probe_offset, 4337 u64 probe_addr) 4338 { 4339 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 4340 u32 len = buf ? strlen(buf) : 0, input_len; 4341 int err = 0; 4342 4343 if (put_user(len, &uattr->task_fd_query.buf_len)) 4344 return -EFAULT; 4345 input_len = attr->task_fd_query.buf_len; 4346 if (input_len && ubuf) { 4347 if (!len) { 4348 /* nothing to copy, just make ubuf NULL terminated */ 4349 char zero = '\0'; 4350 4351 if (put_user(zero, ubuf)) 4352 return -EFAULT; 4353 } else if (input_len >= len + 1) { 4354 /* ubuf can hold the string with NULL terminator */ 4355 if (copy_to_user(ubuf, buf, len + 1)) 4356 return -EFAULT; 4357 } else { 4358 /* ubuf cannot hold the string with NULL terminator, 4359 * do a partial copy with NULL terminator. 4360 */ 4361 char zero = '\0'; 4362 4363 err = -ENOSPC; 4364 if (copy_to_user(ubuf, buf, input_len - 1)) 4365 return -EFAULT; 4366 if (put_user(zero, ubuf + input_len - 1)) 4367 return -EFAULT; 4368 } 4369 } 4370 4371 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 4372 put_user(fd_type, &uattr->task_fd_query.fd_type) || 4373 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 4374 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 4375 return -EFAULT; 4376 4377 return err; 4378 } 4379 4380 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 4381 4382 static int bpf_task_fd_query(const union bpf_attr *attr, 4383 union bpf_attr __user *uattr) 4384 { 4385 pid_t pid = attr->task_fd_query.pid; 4386 u32 fd = attr->task_fd_query.fd; 4387 const struct perf_event *event; 4388 struct task_struct *task; 4389 struct file *file; 4390 int err; 4391 4392 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 4393 return -EINVAL; 4394 4395 if (!capable(CAP_SYS_ADMIN)) 4396 return -EPERM; 4397 4398 if (attr->task_fd_query.flags != 0) 4399 return -EINVAL; 4400 4401 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 4402 if (!task) 4403 return -ENOENT; 4404 4405 err = 0; 4406 file = fget_task(task, fd); 4407 put_task_struct(task); 4408 if (!file) 4409 return -EBADF; 4410 4411 if (file->f_op == &bpf_link_fops) { 4412 struct bpf_link *link = file->private_data; 4413 4414 if (link->ops == &bpf_raw_tp_link_lops) { 4415 struct bpf_raw_tp_link *raw_tp = 4416 container_of(link, struct bpf_raw_tp_link, link); 4417 struct bpf_raw_event_map *btp = raw_tp->btp; 4418 4419 err = bpf_task_fd_query_copy(attr, uattr, 4420 raw_tp->link.prog->aux->id, 4421 BPF_FD_TYPE_RAW_TRACEPOINT, 4422 btp->tp->name, 0, 0); 4423 goto put_file; 4424 } 4425 goto out_not_supp; 4426 } 4427 4428 event = perf_get_event(file); 4429 if (!IS_ERR(event)) { 4430 u64 probe_offset, probe_addr; 4431 u32 prog_id, fd_type; 4432 const char *buf; 4433 4434 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 4435 &buf, &probe_offset, 4436 &probe_addr); 4437 if (!err) 4438 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 4439 fd_type, buf, 4440 probe_offset, 4441 probe_addr); 4442 goto put_file; 4443 } 4444 4445 out_not_supp: 4446 err = -ENOTSUPP; 4447 put_file: 4448 fput(file); 4449 return err; 4450 } 4451 4452 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 4453 4454 #define BPF_DO_BATCH(fn) \ 4455 do { \ 4456 if (!fn) { \ 4457 err = -ENOTSUPP; \ 4458 goto err_put; \ 4459 } \ 4460 err = fn(map, attr, uattr); \ 4461 } while (0) 4462 4463 static int bpf_map_do_batch(const union bpf_attr *attr, 4464 union bpf_attr __user *uattr, 4465 int cmd) 4466 { 4467 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 4468 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 4469 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 4470 struct bpf_map *map; 4471 int err, ufd; 4472 struct fd f; 4473 4474 if (CHECK_ATTR(BPF_MAP_BATCH)) 4475 return -EINVAL; 4476 4477 ufd = attr->batch.map_fd; 4478 f = fdget(ufd); 4479 map = __bpf_map_get(f); 4480 if (IS_ERR(map)) 4481 return PTR_ERR(map); 4482 if (has_write) 4483 bpf_map_write_active_inc(map); 4484 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 4485 err = -EPERM; 4486 goto err_put; 4487 } 4488 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 4489 err = -EPERM; 4490 goto err_put; 4491 } 4492 4493 if (cmd == BPF_MAP_LOOKUP_BATCH) 4494 BPF_DO_BATCH(map->ops->map_lookup_batch); 4495 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 4496 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch); 4497 else if (cmd == BPF_MAP_UPDATE_BATCH) 4498 BPF_DO_BATCH(map->ops->map_update_batch); 4499 else 4500 BPF_DO_BATCH(map->ops->map_delete_batch); 4501 err_put: 4502 if (has_write) 4503 bpf_map_write_active_dec(map); 4504 fdput(f); 4505 return err; 4506 } 4507 4508 #define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies 4509 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 4510 { 4511 enum bpf_prog_type ptype; 4512 struct bpf_prog *prog; 4513 int ret; 4514 4515 if (CHECK_ATTR(BPF_LINK_CREATE)) 4516 return -EINVAL; 4517 4518 prog = bpf_prog_get(attr->link_create.prog_fd); 4519 if (IS_ERR(prog)) 4520 return PTR_ERR(prog); 4521 4522 ret = bpf_prog_attach_check_attach_type(prog, 4523 attr->link_create.attach_type); 4524 if (ret) 4525 goto out; 4526 4527 switch (prog->type) { 4528 case BPF_PROG_TYPE_EXT: 4529 break; 4530 case BPF_PROG_TYPE_PERF_EVENT: 4531 case BPF_PROG_TYPE_TRACEPOINT: 4532 if (attr->link_create.attach_type != BPF_PERF_EVENT) { 4533 ret = -EINVAL; 4534 goto out; 4535 } 4536 break; 4537 case BPF_PROG_TYPE_KPROBE: 4538 if (attr->link_create.attach_type != BPF_PERF_EVENT && 4539 attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { 4540 ret = -EINVAL; 4541 goto out; 4542 } 4543 break; 4544 default: 4545 ptype = attach_type_to_prog_type(attr->link_create.attach_type); 4546 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { 4547 ret = -EINVAL; 4548 goto out; 4549 } 4550 break; 4551 } 4552 4553 switch (prog->type) { 4554 case BPF_PROG_TYPE_CGROUP_SKB: 4555 case BPF_PROG_TYPE_CGROUP_SOCK: 4556 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4557 case BPF_PROG_TYPE_SOCK_OPS: 4558 case BPF_PROG_TYPE_CGROUP_DEVICE: 4559 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4560 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4561 ret = cgroup_bpf_link_attach(attr, prog); 4562 break; 4563 case BPF_PROG_TYPE_EXT: 4564 ret = bpf_tracing_prog_attach(prog, 4565 attr->link_create.target_fd, 4566 attr->link_create.target_btf_id, 4567 attr->link_create.tracing.cookie); 4568 break; 4569 case BPF_PROG_TYPE_LSM: 4570 case BPF_PROG_TYPE_TRACING: 4571 if (attr->link_create.attach_type != prog->expected_attach_type) { 4572 ret = -EINVAL; 4573 goto out; 4574 } 4575 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 4576 ret = bpf_raw_tp_link_attach(prog, NULL); 4577 else if (prog->expected_attach_type == BPF_TRACE_ITER) 4578 ret = bpf_iter_link_attach(attr, uattr, prog); 4579 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 4580 ret = cgroup_bpf_link_attach(attr, prog); 4581 else 4582 ret = bpf_tracing_prog_attach(prog, 4583 attr->link_create.target_fd, 4584 attr->link_create.target_btf_id, 4585 attr->link_create.tracing.cookie); 4586 break; 4587 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4588 case BPF_PROG_TYPE_SK_LOOKUP: 4589 ret = netns_bpf_link_create(attr, prog); 4590 break; 4591 #ifdef CONFIG_NET 4592 case BPF_PROG_TYPE_XDP: 4593 ret = bpf_xdp_link_attach(attr, prog); 4594 break; 4595 #endif 4596 case BPF_PROG_TYPE_PERF_EVENT: 4597 case BPF_PROG_TYPE_TRACEPOINT: 4598 ret = bpf_perf_link_attach(attr, prog); 4599 break; 4600 case BPF_PROG_TYPE_KPROBE: 4601 if (attr->link_create.attach_type == BPF_PERF_EVENT) 4602 ret = bpf_perf_link_attach(attr, prog); 4603 else 4604 ret = bpf_kprobe_multi_link_attach(attr, prog); 4605 break; 4606 default: 4607 ret = -EINVAL; 4608 } 4609 4610 out: 4611 if (ret < 0) 4612 bpf_prog_put(prog); 4613 return ret; 4614 } 4615 4616 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 4617 4618 static int link_update(union bpf_attr *attr) 4619 { 4620 struct bpf_prog *old_prog = NULL, *new_prog; 4621 struct bpf_link *link; 4622 u32 flags; 4623 int ret; 4624 4625 if (CHECK_ATTR(BPF_LINK_UPDATE)) 4626 return -EINVAL; 4627 4628 flags = attr->link_update.flags; 4629 if (flags & ~BPF_F_REPLACE) 4630 return -EINVAL; 4631 4632 link = bpf_link_get_from_fd(attr->link_update.link_fd); 4633 if (IS_ERR(link)) 4634 return PTR_ERR(link); 4635 4636 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 4637 if (IS_ERR(new_prog)) { 4638 ret = PTR_ERR(new_prog); 4639 goto out_put_link; 4640 } 4641 4642 if (flags & BPF_F_REPLACE) { 4643 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 4644 if (IS_ERR(old_prog)) { 4645 ret = PTR_ERR(old_prog); 4646 old_prog = NULL; 4647 goto out_put_progs; 4648 } 4649 } else if (attr->link_update.old_prog_fd) { 4650 ret = -EINVAL; 4651 goto out_put_progs; 4652 } 4653 4654 if (link->ops->update_prog) 4655 ret = link->ops->update_prog(link, new_prog, old_prog); 4656 else 4657 ret = -EINVAL; 4658 4659 out_put_progs: 4660 if (old_prog) 4661 bpf_prog_put(old_prog); 4662 if (ret) 4663 bpf_prog_put(new_prog); 4664 out_put_link: 4665 bpf_link_put(link); 4666 return ret; 4667 } 4668 4669 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 4670 4671 static int link_detach(union bpf_attr *attr) 4672 { 4673 struct bpf_link *link; 4674 int ret; 4675 4676 if (CHECK_ATTR(BPF_LINK_DETACH)) 4677 return -EINVAL; 4678 4679 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 4680 if (IS_ERR(link)) 4681 return PTR_ERR(link); 4682 4683 if (link->ops->detach) 4684 ret = link->ops->detach(link); 4685 else 4686 ret = -EOPNOTSUPP; 4687 4688 bpf_link_put(link); 4689 return ret; 4690 } 4691 4692 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 4693 { 4694 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 4695 } 4696 4697 struct bpf_link *bpf_link_by_id(u32 id) 4698 { 4699 struct bpf_link *link; 4700 4701 if (!id) 4702 return ERR_PTR(-ENOENT); 4703 4704 spin_lock_bh(&link_idr_lock); 4705 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 4706 link = idr_find(&link_idr, id); 4707 if (link) { 4708 if (link->id) 4709 link = bpf_link_inc_not_zero(link); 4710 else 4711 link = ERR_PTR(-EAGAIN); 4712 } else { 4713 link = ERR_PTR(-ENOENT); 4714 } 4715 spin_unlock_bh(&link_idr_lock); 4716 return link; 4717 } 4718 4719 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 4720 { 4721 struct bpf_link *link; 4722 4723 spin_lock_bh(&link_idr_lock); 4724 again: 4725 link = idr_get_next(&link_idr, id); 4726 if (link) { 4727 link = bpf_link_inc_not_zero(link); 4728 if (IS_ERR(link)) { 4729 (*id)++; 4730 goto again; 4731 } 4732 } 4733 spin_unlock_bh(&link_idr_lock); 4734 4735 return link; 4736 } 4737 4738 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 4739 4740 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 4741 { 4742 struct bpf_link *link; 4743 u32 id = attr->link_id; 4744 int fd; 4745 4746 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 4747 return -EINVAL; 4748 4749 if (!capable(CAP_SYS_ADMIN)) 4750 return -EPERM; 4751 4752 link = bpf_link_by_id(id); 4753 if (IS_ERR(link)) 4754 return PTR_ERR(link); 4755 4756 fd = bpf_link_new_fd(link); 4757 if (fd < 0) 4758 bpf_link_put(link); 4759 4760 return fd; 4761 } 4762 4763 DEFINE_MUTEX(bpf_stats_enabled_mutex); 4764 4765 static int bpf_stats_release(struct inode *inode, struct file *file) 4766 { 4767 mutex_lock(&bpf_stats_enabled_mutex); 4768 static_key_slow_dec(&bpf_stats_enabled_key.key); 4769 mutex_unlock(&bpf_stats_enabled_mutex); 4770 return 0; 4771 } 4772 4773 static const struct file_operations bpf_stats_fops = { 4774 .release = bpf_stats_release, 4775 }; 4776 4777 static int bpf_enable_runtime_stats(void) 4778 { 4779 int fd; 4780 4781 mutex_lock(&bpf_stats_enabled_mutex); 4782 4783 /* Set a very high limit to avoid overflow */ 4784 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 4785 mutex_unlock(&bpf_stats_enabled_mutex); 4786 return -EBUSY; 4787 } 4788 4789 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 4790 if (fd >= 0) 4791 static_key_slow_inc(&bpf_stats_enabled_key.key); 4792 4793 mutex_unlock(&bpf_stats_enabled_mutex); 4794 return fd; 4795 } 4796 4797 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 4798 4799 static int bpf_enable_stats(union bpf_attr *attr) 4800 { 4801 4802 if (CHECK_ATTR(BPF_ENABLE_STATS)) 4803 return -EINVAL; 4804 4805 if (!capable(CAP_SYS_ADMIN)) 4806 return -EPERM; 4807 4808 switch (attr->enable_stats.type) { 4809 case BPF_STATS_RUN_TIME: 4810 return bpf_enable_runtime_stats(); 4811 default: 4812 break; 4813 } 4814 return -EINVAL; 4815 } 4816 4817 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 4818 4819 static int bpf_iter_create(union bpf_attr *attr) 4820 { 4821 struct bpf_link *link; 4822 int err; 4823 4824 if (CHECK_ATTR(BPF_ITER_CREATE)) 4825 return -EINVAL; 4826 4827 if (attr->iter_create.flags) 4828 return -EINVAL; 4829 4830 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 4831 if (IS_ERR(link)) 4832 return PTR_ERR(link); 4833 4834 err = bpf_iter_new_fd(link); 4835 bpf_link_put(link); 4836 4837 return err; 4838 } 4839 4840 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 4841 4842 static int bpf_prog_bind_map(union bpf_attr *attr) 4843 { 4844 struct bpf_prog *prog; 4845 struct bpf_map *map; 4846 struct bpf_map **used_maps_old, **used_maps_new; 4847 int i, ret = 0; 4848 4849 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 4850 return -EINVAL; 4851 4852 if (attr->prog_bind_map.flags) 4853 return -EINVAL; 4854 4855 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 4856 if (IS_ERR(prog)) 4857 return PTR_ERR(prog); 4858 4859 map = bpf_map_get(attr->prog_bind_map.map_fd); 4860 if (IS_ERR(map)) { 4861 ret = PTR_ERR(map); 4862 goto out_prog_put; 4863 } 4864 4865 mutex_lock(&prog->aux->used_maps_mutex); 4866 4867 used_maps_old = prog->aux->used_maps; 4868 4869 for (i = 0; i < prog->aux->used_map_cnt; i++) 4870 if (used_maps_old[i] == map) { 4871 bpf_map_put(map); 4872 goto out_unlock; 4873 } 4874 4875 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 4876 sizeof(used_maps_new[0]), 4877 GFP_KERNEL); 4878 if (!used_maps_new) { 4879 ret = -ENOMEM; 4880 goto out_unlock; 4881 } 4882 4883 memcpy(used_maps_new, used_maps_old, 4884 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 4885 used_maps_new[prog->aux->used_map_cnt] = map; 4886 4887 prog->aux->used_map_cnt++; 4888 prog->aux->used_maps = used_maps_new; 4889 4890 kfree(used_maps_old); 4891 4892 out_unlock: 4893 mutex_unlock(&prog->aux->used_maps_mutex); 4894 4895 if (ret) 4896 bpf_map_put(map); 4897 out_prog_put: 4898 bpf_prog_put(prog); 4899 return ret; 4900 } 4901 4902 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) 4903 { 4904 union bpf_attr attr; 4905 bool capable; 4906 int err; 4907 4908 capable = bpf_capable() || !sysctl_unprivileged_bpf_disabled; 4909 4910 /* Intent here is for unprivileged_bpf_disabled to block key object 4911 * creation commands for unprivileged users; other actions depend 4912 * of fd availability and access to bpffs, so are dependent on 4913 * object creation success. Capabilities are later verified for 4914 * operations such as load and map create, so even with unprivileged 4915 * BPF disabled, capability checks are still carried out for these 4916 * and other operations. 4917 */ 4918 if (!capable && 4919 (cmd == BPF_MAP_CREATE || cmd == BPF_PROG_LOAD)) 4920 return -EPERM; 4921 4922 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 4923 if (err) 4924 return err; 4925 size = min_t(u32, size, sizeof(attr)); 4926 4927 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 4928 memset(&attr, 0, sizeof(attr)); 4929 if (copy_from_bpfptr(&attr, uattr, size) != 0) 4930 return -EFAULT; 4931 4932 err = security_bpf(cmd, &attr, size); 4933 if (err < 0) 4934 return err; 4935 4936 switch (cmd) { 4937 case BPF_MAP_CREATE: 4938 err = map_create(&attr); 4939 break; 4940 case BPF_MAP_LOOKUP_ELEM: 4941 err = map_lookup_elem(&attr); 4942 break; 4943 case BPF_MAP_UPDATE_ELEM: 4944 err = map_update_elem(&attr, uattr); 4945 break; 4946 case BPF_MAP_DELETE_ELEM: 4947 err = map_delete_elem(&attr, uattr); 4948 break; 4949 case BPF_MAP_GET_NEXT_KEY: 4950 err = map_get_next_key(&attr); 4951 break; 4952 case BPF_MAP_FREEZE: 4953 err = map_freeze(&attr); 4954 break; 4955 case BPF_PROG_LOAD: 4956 err = bpf_prog_load(&attr, uattr); 4957 break; 4958 case BPF_OBJ_PIN: 4959 err = bpf_obj_pin(&attr); 4960 break; 4961 case BPF_OBJ_GET: 4962 err = bpf_obj_get(&attr); 4963 break; 4964 case BPF_PROG_ATTACH: 4965 err = bpf_prog_attach(&attr); 4966 break; 4967 case BPF_PROG_DETACH: 4968 err = bpf_prog_detach(&attr); 4969 break; 4970 case BPF_PROG_QUERY: 4971 err = bpf_prog_query(&attr, uattr.user); 4972 break; 4973 case BPF_PROG_TEST_RUN: 4974 err = bpf_prog_test_run(&attr, uattr.user); 4975 break; 4976 case BPF_PROG_GET_NEXT_ID: 4977 err = bpf_obj_get_next_id(&attr, uattr.user, 4978 &prog_idr, &prog_idr_lock); 4979 break; 4980 case BPF_MAP_GET_NEXT_ID: 4981 err = bpf_obj_get_next_id(&attr, uattr.user, 4982 &map_idr, &map_idr_lock); 4983 break; 4984 case BPF_BTF_GET_NEXT_ID: 4985 err = bpf_obj_get_next_id(&attr, uattr.user, 4986 &btf_idr, &btf_idr_lock); 4987 break; 4988 case BPF_PROG_GET_FD_BY_ID: 4989 err = bpf_prog_get_fd_by_id(&attr); 4990 break; 4991 case BPF_MAP_GET_FD_BY_ID: 4992 err = bpf_map_get_fd_by_id(&attr); 4993 break; 4994 case BPF_OBJ_GET_INFO_BY_FD: 4995 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 4996 break; 4997 case BPF_RAW_TRACEPOINT_OPEN: 4998 err = bpf_raw_tracepoint_open(&attr); 4999 break; 5000 case BPF_BTF_LOAD: 5001 err = bpf_btf_load(&attr, uattr); 5002 break; 5003 case BPF_BTF_GET_FD_BY_ID: 5004 err = bpf_btf_get_fd_by_id(&attr); 5005 break; 5006 case BPF_TASK_FD_QUERY: 5007 err = bpf_task_fd_query(&attr, uattr.user); 5008 break; 5009 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5010 err = map_lookup_and_delete_elem(&attr); 5011 break; 5012 case BPF_MAP_LOOKUP_BATCH: 5013 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5014 break; 5015 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5016 err = bpf_map_do_batch(&attr, uattr.user, 5017 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5018 break; 5019 case BPF_MAP_UPDATE_BATCH: 5020 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5021 break; 5022 case BPF_MAP_DELETE_BATCH: 5023 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5024 break; 5025 case BPF_LINK_CREATE: 5026 err = link_create(&attr, uattr); 5027 break; 5028 case BPF_LINK_UPDATE: 5029 err = link_update(&attr); 5030 break; 5031 case BPF_LINK_GET_FD_BY_ID: 5032 err = bpf_link_get_fd_by_id(&attr); 5033 break; 5034 case BPF_LINK_GET_NEXT_ID: 5035 err = bpf_obj_get_next_id(&attr, uattr.user, 5036 &link_idr, &link_idr_lock); 5037 break; 5038 case BPF_ENABLE_STATS: 5039 err = bpf_enable_stats(&attr); 5040 break; 5041 case BPF_ITER_CREATE: 5042 err = bpf_iter_create(&attr); 5043 break; 5044 case BPF_LINK_DETACH: 5045 err = link_detach(&attr); 5046 break; 5047 case BPF_PROG_BIND_MAP: 5048 err = bpf_prog_bind_map(&attr); 5049 break; 5050 default: 5051 err = -EINVAL; 5052 break; 5053 } 5054 5055 return err; 5056 } 5057 5058 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5059 { 5060 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5061 } 5062 5063 static bool syscall_prog_is_valid_access(int off, int size, 5064 enum bpf_access_type type, 5065 const struct bpf_prog *prog, 5066 struct bpf_insn_access_aux *info) 5067 { 5068 if (off < 0 || off >= U16_MAX) 5069 return false; 5070 if (off % size != 0) 5071 return false; 5072 return true; 5073 } 5074 5075 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5076 { 5077 switch (cmd) { 5078 case BPF_MAP_CREATE: 5079 case BPF_MAP_DELETE_ELEM: 5080 case BPF_MAP_UPDATE_ELEM: 5081 case BPF_MAP_FREEZE: 5082 case BPF_MAP_GET_FD_BY_ID: 5083 case BPF_PROG_LOAD: 5084 case BPF_BTF_LOAD: 5085 case BPF_LINK_CREATE: 5086 case BPF_RAW_TRACEPOINT_OPEN: 5087 break; 5088 default: 5089 return -EINVAL; 5090 } 5091 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5092 } 5093 5094 5095 /* To shut up -Wmissing-prototypes. 5096 * This function is used by the kernel light skeleton 5097 * to load bpf programs when modules are loaded or during kernel boot. 5098 * See tools/lib/bpf/skel_internal.h 5099 */ 5100 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5101 5102 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5103 { 5104 struct bpf_prog * __maybe_unused prog; 5105 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5106 5107 switch (cmd) { 5108 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5109 case BPF_PROG_TEST_RUN: 5110 if (attr->test.data_in || attr->test.data_out || 5111 attr->test.ctx_out || attr->test.duration || 5112 attr->test.repeat || attr->test.flags) 5113 return -EINVAL; 5114 5115 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5116 if (IS_ERR(prog)) 5117 return PTR_ERR(prog); 5118 5119 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5120 attr->test.ctx_size_in > U16_MAX) { 5121 bpf_prog_put(prog); 5122 return -EINVAL; 5123 } 5124 5125 run_ctx.bpf_cookie = 0; 5126 run_ctx.saved_run_ctx = NULL; 5127 if (!__bpf_prog_enter_sleepable(prog, &run_ctx)) { 5128 /* recursion detected */ 5129 bpf_prog_put(prog); 5130 return -EBUSY; 5131 } 5132 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5133 __bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx); 5134 bpf_prog_put(prog); 5135 return 0; 5136 #endif 5137 default: 5138 return ____bpf_sys_bpf(cmd, attr, size); 5139 } 5140 } 5141 EXPORT_SYMBOL(kern_sys_bpf); 5142 5143 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5144 .func = bpf_sys_bpf, 5145 .gpl_only = false, 5146 .ret_type = RET_INTEGER, 5147 .arg1_type = ARG_ANYTHING, 5148 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5149 .arg3_type = ARG_CONST_SIZE, 5150 }; 5151 5152 const struct bpf_func_proto * __weak 5153 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5154 { 5155 return bpf_base_func_proto(func_id); 5156 } 5157 5158 BPF_CALL_1(bpf_sys_close, u32, fd) 5159 { 5160 /* When bpf program calls this helper there should not be 5161 * an fdget() without matching completed fdput(). 5162 * This helper is allowed in the following callchain only: 5163 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 5164 */ 5165 return close_fd(fd); 5166 } 5167 5168 static const struct bpf_func_proto bpf_sys_close_proto = { 5169 .func = bpf_sys_close, 5170 .gpl_only = false, 5171 .ret_type = RET_INTEGER, 5172 .arg1_type = ARG_ANYTHING, 5173 }; 5174 5175 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 5176 { 5177 if (flags) 5178 return -EINVAL; 5179 5180 if (name_sz <= 1 || name[name_sz - 1]) 5181 return -EINVAL; 5182 5183 if (!bpf_dump_raw_ok(current_cred())) 5184 return -EPERM; 5185 5186 *res = kallsyms_lookup_name(name); 5187 return *res ? 0 : -ENOENT; 5188 } 5189 5190 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 5191 .func = bpf_kallsyms_lookup_name, 5192 .gpl_only = false, 5193 .ret_type = RET_INTEGER, 5194 .arg1_type = ARG_PTR_TO_MEM, 5195 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 5196 .arg3_type = ARG_ANYTHING, 5197 .arg4_type = ARG_PTR_TO_LONG, 5198 }; 5199 5200 static const struct bpf_func_proto * 5201 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5202 { 5203 switch (func_id) { 5204 case BPF_FUNC_sys_bpf: 5205 return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto; 5206 case BPF_FUNC_btf_find_by_name_kind: 5207 return &bpf_btf_find_by_name_kind_proto; 5208 case BPF_FUNC_sys_close: 5209 return &bpf_sys_close_proto; 5210 case BPF_FUNC_kallsyms_lookup_name: 5211 return &bpf_kallsyms_lookup_name_proto; 5212 default: 5213 return tracing_prog_func_proto(func_id, prog); 5214 } 5215 } 5216 5217 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 5218 .get_func_proto = syscall_prog_func_proto, 5219 .is_valid_access = syscall_prog_is_valid_access, 5220 }; 5221 5222 const struct bpf_prog_ops bpf_syscall_prog_ops = { 5223 .test_run = bpf_prog_test_run_syscall, 5224 }; 5225 5226 #ifdef CONFIG_SYSCTL 5227 static int bpf_stats_handler(struct ctl_table *table, int write, 5228 void *buffer, size_t *lenp, loff_t *ppos) 5229 { 5230 struct static_key *key = (struct static_key *)table->data; 5231 static int saved_val; 5232 int val, ret; 5233 struct ctl_table tmp = { 5234 .data = &val, 5235 .maxlen = sizeof(val), 5236 .mode = table->mode, 5237 .extra1 = SYSCTL_ZERO, 5238 .extra2 = SYSCTL_ONE, 5239 }; 5240 5241 if (write && !capable(CAP_SYS_ADMIN)) 5242 return -EPERM; 5243 5244 mutex_lock(&bpf_stats_enabled_mutex); 5245 val = saved_val; 5246 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5247 if (write && !ret && val != saved_val) { 5248 if (val) 5249 static_key_slow_inc(key); 5250 else 5251 static_key_slow_dec(key); 5252 saved_val = val; 5253 } 5254 mutex_unlock(&bpf_stats_enabled_mutex); 5255 return ret; 5256 } 5257 5258 void __weak unpriv_ebpf_notify(int new_state) 5259 { 5260 } 5261 5262 static int bpf_unpriv_handler(struct ctl_table *table, int write, 5263 void *buffer, size_t *lenp, loff_t *ppos) 5264 { 5265 int ret, unpriv_enable = *(int *)table->data; 5266 bool locked_state = unpriv_enable == 1; 5267 struct ctl_table tmp = *table; 5268 5269 if (write && !capable(CAP_SYS_ADMIN)) 5270 return -EPERM; 5271 5272 tmp.data = &unpriv_enable; 5273 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5274 if (write && !ret) { 5275 if (locked_state && unpriv_enable != 1) 5276 return -EPERM; 5277 *(int *)table->data = unpriv_enable; 5278 } 5279 5280 unpriv_ebpf_notify(unpriv_enable); 5281 5282 return ret; 5283 } 5284 5285 static struct ctl_table bpf_syscall_table[] = { 5286 { 5287 .procname = "unprivileged_bpf_disabled", 5288 .data = &sysctl_unprivileged_bpf_disabled, 5289 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 5290 .mode = 0644, 5291 .proc_handler = bpf_unpriv_handler, 5292 .extra1 = SYSCTL_ZERO, 5293 .extra2 = SYSCTL_TWO, 5294 }, 5295 { 5296 .procname = "bpf_stats_enabled", 5297 .data = &bpf_stats_enabled_key.key, 5298 .maxlen = sizeof(bpf_stats_enabled_key), 5299 .mode = 0644, 5300 .proc_handler = bpf_stats_handler, 5301 }, 5302 { } 5303 }; 5304 5305 static int __init bpf_syscall_sysctl_init(void) 5306 { 5307 register_sysctl_init("kernel", bpf_syscall_table); 5308 return 0; 5309 } 5310 late_initcall(bpf_syscall_sysctl_init); 5311 #endif /* CONFIG_SYSCTL */ 5312