1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 #include <net/netfilter/nf_bpf_link.h> 39 40 #include <net/tcx.h> 41 42 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 43 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 44 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 45 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 46 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 47 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 48 IS_FD_HASH(map)) 49 50 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 51 52 DEFINE_PER_CPU(int, bpf_prog_active); 53 static DEFINE_IDR(prog_idr); 54 static DEFINE_SPINLOCK(prog_idr_lock); 55 static DEFINE_IDR(map_idr); 56 static DEFINE_SPINLOCK(map_idr_lock); 57 static DEFINE_IDR(link_idr); 58 static DEFINE_SPINLOCK(link_idr_lock); 59 60 int sysctl_unprivileged_bpf_disabled __read_mostly = 61 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 62 63 static const struct bpf_map_ops * const bpf_map_types[] = { 64 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 65 #define BPF_MAP_TYPE(_id, _ops) \ 66 [_id] = &_ops, 67 #define BPF_LINK_TYPE(_id, _name) 68 #include <linux/bpf_types.h> 69 #undef BPF_PROG_TYPE 70 #undef BPF_MAP_TYPE 71 #undef BPF_LINK_TYPE 72 }; 73 74 /* 75 * If we're handed a bigger struct than we know of, ensure all the unknown bits 76 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 77 * we don't know about yet. 78 * 79 * There is a ToCToU between this function call and the following 80 * copy_from_user() call. However, this is not a concern since this function is 81 * meant to be a future-proofing of bits. 82 */ 83 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 84 size_t expected_size, 85 size_t actual_size) 86 { 87 int res; 88 89 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 90 return -E2BIG; 91 92 if (actual_size <= expected_size) 93 return 0; 94 95 if (uaddr.is_kernel) 96 res = memchr_inv(uaddr.kernel + expected_size, 0, 97 actual_size - expected_size) == NULL; 98 else 99 res = check_zeroed_user(uaddr.user + expected_size, 100 actual_size - expected_size); 101 if (res < 0) 102 return res; 103 return res ? 0 : -E2BIG; 104 } 105 106 const struct bpf_map_ops bpf_map_offload_ops = { 107 .map_meta_equal = bpf_map_meta_equal, 108 .map_alloc = bpf_map_offload_map_alloc, 109 .map_free = bpf_map_offload_map_free, 110 .map_check_btf = map_check_no_btf, 111 .map_mem_usage = bpf_map_offload_map_mem_usage, 112 }; 113 114 static void bpf_map_write_active_inc(struct bpf_map *map) 115 { 116 atomic64_inc(&map->writecnt); 117 } 118 119 static void bpf_map_write_active_dec(struct bpf_map *map) 120 { 121 atomic64_dec(&map->writecnt); 122 } 123 124 bool bpf_map_write_active(const struct bpf_map *map) 125 { 126 return atomic64_read(&map->writecnt) != 0; 127 } 128 129 static u32 bpf_map_value_size(const struct bpf_map *map) 130 { 131 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 132 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 133 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 134 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 135 return round_up(map->value_size, 8) * num_possible_cpus(); 136 else if (IS_FD_MAP(map)) 137 return sizeof(u32); 138 else 139 return map->value_size; 140 } 141 142 static void maybe_wait_bpf_programs(struct bpf_map *map) 143 { 144 /* Wait for any running BPF programs to complete so that 145 * userspace, when we return to it, knows that all programs 146 * that could be running use the new map value. 147 */ 148 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 149 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 150 synchronize_rcu(); 151 } 152 153 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 154 void *key, void *value, __u64 flags) 155 { 156 int err; 157 158 /* Need to create a kthread, thus must support schedule */ 159 if (bpf_map_is_offloaded(map)) { 160 return bpf_map_offload_update_elem(map, key, value, flags); 161 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 162 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 163 return map->ops->map_update_elem(map, key, value, flags); 164 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 165 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 166 return sock_map_update_elem_sys(map, key, value, flags); 167 } else if (IS_FD_PROG_ARRAY(map)) { 168 return bpf_fd_array_map_update_elem(map, map_file, key, value, 169 flags); 170 } 171 172 bpf_disable_instrumentation(); 173 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 174 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 175 err = bpf_percpu_hash_update(map, key, value, flags); 176 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 177 err = bpf_percpu_array_update(map, key, value, flags); 178 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 179 err = bpf_percpu_cgroup_storage_update(map, key, value, 180 flags); 181 } else if (IS_FD_ARRAY(map)) { 182 rcu_read_lock(); 183 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 184 flags); 185 rcu_read_unlock(); 186 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 187 rcu_read_lock(); 188 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 189 flags); 190 rcu_read_unlock(); 191 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 192 /* rcu_read_lock() is not needed */ 193 err = bpf_fd_reuseport_array_update_elem(map, key, value, 194 flags); 195 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 196 map->map_type == BPF_MAP_TYPE_STACK || 197 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 198 err = map->ops->map_push_elem(map, value, flags); 199 } else { 200 rcu_read_lock(); 201 err = map->ops->map_update_elem(map, key, value, flags); 202 rcu_read_unlock(); 203 } 204 bpf_enable_instrumentation(); 205 maybe_wait_bpf_programs(map); 206 207 return err; 208 } 209 210 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 211 __u64 flags) 212 { 213 void *ptr; 214 int err; 215 216 if (bpf_map_is_offloaded(map)) 217 return bpf_map_offload_lookup_elem(map, key, value); 218 219 bpf_disable_instrumentation(); 220 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 221 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 222 err = bpf_percpu_hash_copy(map, key, value); 223 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 224 err = bpf_percpu_array_copy(map, key, value); 225 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 226 err = bpf_percpu_cgroup_storage_copy(map, key, value); 227 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 228 err = bpf_stackmap_copy(map, key, value); 229 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 230 err = bpf_fd_array_map_lookup_elem(map, key, value); 231 } else if (IS_FD_HASH(map)) { 232 err = bpf_fd_htab_map_lookup_elem(map, key, value); 233 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 234 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 235 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 236 map->map_type == BPF_MAP_TYPE_STACK || 237 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 238 err = map->ops->map_peek_elem(map, value); 239 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 240 /* struct_ops map requires directly updating "value" */ 241 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 242 } else { 243 rcu_read_lock(); 244 if (map->ops->map_lookup_elem_sys_only) 245 ptr = map->ops->map_lookup_elem_sys_only(map, key); 246 else 247 ptr = map->ops->map_lookup_elem(map, key); 248 if (IS_ERR(ptr)) { 249 err = PTR_ERR(ptr); 250 } else if (!ptr) { 251 err = -ENOENT; 252 } else { 253 err = 0; 254 if (flags & BPF_F_LOCK) 255 /* lock 'ptr' and copy everything but lock */ 256 copy_map_value_locked(map, value, ptr, true); 257 else 258 copy_map_value(map, value, ptr); 259 /* mask lock and timer, since value wasn't zero inited */ 260 check_and_init_map_value(map, value); 261 } 262 rcu_read_unlock(); 263 } 264 265 bpf_enable_instrumentation(); 266 maybe_wait_bpf_programs(map); 267 268 return err; 269 } 270 271 /* Please, do not use this function outside from the map creation path 272 * (e.g. in map update path) without taking care of setting the active 273 * memory cgroup (see at bpf_map_kmalloc_node() for example). 274 */ 275 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 276 { 277 /* We really just want to fail instead of triggering OOM killer 278 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 279 * which is used for lower order allocation requests. 280 * 281 * It has been observed that higher order allocation requests done by 282 * vmalloc with __GFP_NORETRY being set might fail due to not trying 283 * to reclaim memory from the page cache, thus we set 284 * __GFP_RETRY_MAYFAIL to avoid such situations. 285 */ 286 287 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 288 unsigned int flags = 0; 289 unsigned long align = 1; 290 void *area; 291 292 if (size >= SIZE_MAX) 293 return NULL; 294 295 /* kmalloc()'ed memory can't be mmap()'ed */ 296 if (mmapable) { 297 BUG_ON(!PAGE_ALIGNED(size)); 298 align = SHMLBA; 299 flags = VM_USERMAP; 300 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 301 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 302 numa_node); 303 if (area != NULL) 304 return area; 305 } 306 307 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 308 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 309 flags, numa_node, __builtin_return_address(0)); 310 } 311 312 void *bpf_map_area_alloc(u64 size, int numa_node) 313 { 314 return __bpf_map_area_alloc(size, numa_node, false); 315 } 316 317 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 318 { 319 return __bpf_map_area_alloc(size, numa_node, true); 320 } 321 322 void bpf_map_area_free(void *area) 323 { 324 kvfree(area); 325 } 326 327 static u32 bpf_map_flags_retain_permanent(u32 flags) 328 { 329 /* Some map creation flags are not tied to the map object but 330 * rather to the map fd instead, so they have no meaning upon 331 * map object inspection since multiple file descriptors with 332 * different (access) properties can exist here. Thus, given 333 * this has zero meaning for the map itself, lets clear these 334 * from here. 335 */ 336 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 337 } 338 339 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 340 { 341 map->map_type = attr->map_type; 342 map->key_size = attr->key_size; 343 map->value_size = attr->value_size; 344 map->max_entries = attr->max_entries; 345 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 346 map->numa_node = bpf_map_attr_numa_node(attr); 347 map->map_extra = attr->map_extra; 348 } 349 350 static int bpf_map_alloc_id(struct bpf_map *map) 351 { 352 int id; 353 354 idr_preload(GFP_KERNEL); 355 spin_lock_bh(&map_idr_lock); 356 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 357 if (id > 0) 358 map->id = id; 359 spin_unlock_bh(&map_idr_lock); 360 idr_preload_end(); 361 362 if (WARN_ON_ONCE(!id)) 363 return -ENOSPC; 364 365 return id > 0 ? 0 : id; 366 } 367 368 void bpf_map_free_id(struct bpf_map *map) 369 { 370 unsigned long flags; 371 372 /* Offloaded maps are removed from the IDR store when their device 373 * disappears - even if someone holds an fd to them they are unusable, 374 * the memory is gone, all ops will fail; they are simply waiting for 375 * refcnt to drop to be freed. 376 */ 377 if (!map->id) 378 return; 379 380 spin_lock_irqsave(&map_idr_lock, flags); 381 382 idr_remove(&map_idr, map->id); 383 map->id = 0; 384 385 spin_unlock_irqrestore(&map_idr_lock, flags); 386 } 387 388 #ifdef CONFIG_MEMCG_KMEM 389 static void bpf_map_save_memcg(struct bpf_map *map) 390 { 391 /* Currently if a map is created by a process belonging to the root 392 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 393 * So we have to check map->objcg for being NULL each time it's 394 * being used. 395 */ 396 if (memcg_bpf_enabled()) 397 map->objcg = get_obj_cgroup_from_current(); 398 } 399 400 static void bpf_map_release_memcg(struct bpf_map *map) 401 { 402 if (map->objcg) 403 obj_cgroup_put(map->objcg); 404 } 405 406 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 407 { 408 if (map->objcg) 409 return get_mem_cgroup_from_objcg(map->objcg); 410 411 return root_mem_cgroup; 412 } 413 414 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 415 int node) 416 { 417 struct mem_cgroup *memcg, *old_memcg; 418 void *ptr; 419 420 memcg = bpf_map_get_memcg(map); 421 old_memcg = set_active_memcg(memcg); 422 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 423 set_active_memcg(old_memcg); 424 mem_cgroup_put(memcg); 425 426 return ptr; 427 } 428 429 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 430 { 431 struct mem_cgroup *memcg, *old_memcg; 432 void *ptr; 433 434 memcg = bpf_map_get_memcg(map); 435 old_memcg = set_active_memcg(memcg); 436 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 437 set_active_memcg(old_memcg); 438 mem_cgroup_put(memcg); 439 440 return ptr; 441 } 442 443 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 444 gfp_t flags) 445 { 446 struct mem_cgroup *memcg, *old_memcg; 447 void *ptr; 448 449 memcg = bpf_map_get_memcg(map); 450 old_memcg = set_active_memcg(memcg); 451 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 452 set_active_memcg(old_memcg); 453 mem_cgroup_put(memcg); 454 455 return ptr; 456 } 457 458 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 459 size_t align, gfp_t flags) 460 { 461 struct mem_cgroup *memcg, *old_memcg; 462 void __percpu *ptr; 463 464 memcg = bpf_map_get_memcg(map); 465 old_memcg = set_active_memcg(memcg); 466 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 467 set_active_memcg(old_memcg); 468 mem_cgroup_put(memcg); 469 470 return ptr; 471 } 472 473 #else 474 static void bpf_map_save_memcg(struct bpf_map *map) 475 { 476 } 477 478 static void bpf_map_release_memcg(struct bpf_map *map) 479 { 480 } 481 #endif 482 483 static int btf_field_cmp(const void *a, const void *b) 484 { 485 const struct btf_field *f1 = a, *f2 = b; 486 487 if (f1->offset < f2->offset) 488 return -1; 489 else if (f1->offset > f2->offset) 490 return 1; 491 return 0; 492 } 493 494 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 495 u32 field_mask) 496 { 497 struct btf_field *field; 498 499 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 500 return NULL; 501 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 502 if (!field || !(field->type & field_mask)) 503 return NULL; 504 return field; 505 } 506 507 void btf_record_free(struct btf_record *rec) 508 { 509 int i; 510 511 if (IS_ERR_OR_NULL(rec)) 512 return; 513 for (i = 0; i < rec->cnt; i++) { 514 switch (rec->fields[i].type) { 515 case BPF_KPTR_UNREF: 516 case BPF_KPTR_REF: 517 if (rec->fields[i].kptr.module) 518 module_put(rec->fields[i].kptr.module); 519 btf_put(rec->fields[i].kptr.btf); 520 break; 521 case BPF_LIST_HEAD: 522 case BPF_LIST_NODE: 523 case BPF_RB_ROOT: 524 case BPF_RB_NODE: 525 case BPF_SPIN_LOCK: 526 case BPF_TIMER: 527 case BPF_REFCOUNT: 528 /* Nothing to release */ 529 break; 530 default: 531 WARN_ON_ONCE(1); 532 continue; 533 } 534 } 535 kfree(rec); 536 } 537 538 void bpf_map_free_record(struct bpf_map *map) 539 { 540 btf_record_free(map->record); 541 map->record = NULL; 542 } 543 544 struct btf_record *btf_record_dup(const struct btf_record *rec) 545 { 546 const struct btf_field *fields; 547 struct btf_record *new_rec; 548 int ret, size, i; 549 550 if (IS_ERR_OR_NULL(rec)) 551 return NULL; 552 size = offsetof(struct btf_record, fields[rec->cnt]); 553 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 554 if (!new_rec) 555 return ERR_PTR(-ENOMEM); 556 /* Do a deep copy of the btf_record */ 557 fields = rec->fields; 558 new_rec->cnt = 0; 559 for (i = 0; i < rec->cnt; i++) { 560 switch (fields[i].type) { 561 case BPF_KPTR_UNREF: 562 case BPF_KPTR_REF: 563 btf_get(fields[i].kptr.btf); 564 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 565 ret = -ENXIO; 566 goto free; 567 } 568 break; 569 case BPF_LIST_HEAD: 570 case BPF_LIST_NODE: 571 case BPF_RB_ROOT: 572 case BPF_RB_NODE: 573 case BPF_SPIN_LOCK: 574 case BPF_TIMER: 575 case BPF_REFCOUNT: 576 /* Nothing to acquire */ 577 break; 578 default: 579 ret = -EFAULT; 580 WARN_ON_ONCE(1); 581 goto free; 582 } 583 new_rec->cnt++; 584 } 585 return new_rec; 586 free: 587 btf_record_free(new_rec); 588 return ERR_PTR(ret); 589 } 590 591 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 592 { 593 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 594 int size; 595 596 if (!a_has_fields && !b_has_fields) 597 return true; 598 if (a_has_fields != b_has_fields) 599 return false; 600 if (rec_a->cnt != rec_b->cnt) 601 return false; 602 size = offsetof(struct btf_record, fields[rec_a->cnt]); 603 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 604 * members are zeroed out. So memcmp is safe to do without worrying 605 * about padding/unused fields. 606 * 607 * While spin_lock, timer, and kptr have no relation to map BTF, 608 * list_head metadata is specific to map BTF, the btf and value_rec 609 * members in particular. btf is the map BTF, while value_rec points to 610 * btf_record in that map BTF. 611 * 612 * So while by default, we don't rely on the map BTF (which the records 613 * were parsed from) matching for both records, which is not backwards 614 * compatible, in case list_head is part of it, we implicitly rely on 615 * that by way of depending on memcmp succeeding for it. 616 */ 617 return !memcmp(rec_a, rec_b, size); 618 } 619 620 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 621 { 622 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 623 return; 624 bpf_timer_cancel_and_free(obj + rec->timer_off); 625 } 626 627 extern void __bpf_obj_drop_impl(void *p, const struct btf_record *rec); 628 629 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 630 { 631 const struct btf_field *fields; 632 int i; 633 634 if (IS_ERR_OR_NULL(rec)) 635 return; 636 fields = rec->fields; 637 for (i = 0; i < rec->cnt; i++) { 638 struct btf_struct_meta *pointee_struct_meta; 639 const struct btf_field *field = &fields[i]; 640 void *field_ptr = obj + field->offset; 641 void *xchgd_field; 642 643 switch (fields[i].type) { 644 case BPF_SPIN_LOCK: 645 break; 646 case BPF_TIMER: 647 bpf_timer_cancel_and_free(field_ptr); 648 break; 649 case BPF_KPTR_UNREF: 650 WRITE_ONCE(*(u64 *)field_ptr, 0); 651 break; 652 case BPF_KPTR_REF: 653 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 654 if (!xchgd_field) 655 break; 656 657 if (!btf_is_kernel(field->kptr.btf)) { 658 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 659 field->kptr.btf_id); 660 migrate_disable(); 661 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 662 pointee_struct_meta->record : 663 NULL); 664 migrate_enable(); 665 } else { 666 field->kptr.dtor(xchgd_field); 667 } 668 break; 669 case BPF_LIST_HEAD: 670 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 671 continue; 672 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 673 break; 674 case BPF_RB_ROOT: 675 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 676 continue; 677 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 678 break; 679 case BPF_LIST_NODE: 680 case BPF_RB_NODE: 681 case BPF_REFCOUNT: 682 break; 683 default: 684 WARN_ON_ONCE(1); 685 continue; 686 } 687 } 688 } 689 690 /* called from workqueue */ 691 static void bpf_map_free_deferred(struct work_struct *work) 692 { 693 struct bpf_map *map = container_of(work, struct bpf_map, work); 694 struct btf_record *rec = map->record; 695 struct btf *btf = map->btf; 696 697 security_bpf_map_free(map); 698 bpf_map_release_memcg(map); 699 /* implementation dependent freeing */ 700 map->ops->map_free(map); 701 /* Delay freeing of btf_record for maps, as map_free 702 * callback usually needs access to them. It is better to do it here 703 * than require each callback to do the free itself manually. 704 * 705 * Note that the btf_record stashed in map->inner_map_meta->record was 706 * already freed using the map_free callback for map in map case which 707 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 708 * template bpf_map struct used during verification. 709 */ 710 btf_record_free(rec); 711 /* Delay freeing of btf for maps, as map_free callback may need 712 * struct_meta info which will be freed with btf_put(). 713 */ 714 btf_put(btf); 715 } 716 717 static void bpf_map_put_uref(struct bpf_map *map) 718 { 719 if (atomic64_dec_and_test(&map->usercnt)) { 720 if (map->ops->map_release_uref) 721 map->ops->map_release_uref(map); 722 } 723 } 724 725 static void bpf_map_free_in_work(struct bpf_map *map) 726 { 727 INIT_WORK(&map->work, bpf_map_free_deferred); 728 /* Avoid spawning kworkers, since they all might contend 729 * for the same mutex like slab_mutex. 730 */ 731 queue_work(system_unbound_wq, &map->work); 732 } 733 734 static void bpf_map_free_rcu_gp(struct rcu_head *rcu) 735 { 736 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); 737 } 738 739 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) 740 { 741 if (rcu_trace_implies_rcu_gp()) 742 bpf_map_free_rcu_gp(rcu); 743 else 744 call_rcu(rcu, bpf_map_free_rcu_gp); 745 } 746 747 /* decrement map refcnt and schedule it for freeing via workqueue 748 * (underlying map implementation ops->map_free() might sleep) 749 */ 750 void bpf_map_put(struct bpf_map *map) 751 { 752 if (atomic64_dec_and_test(&map->refcnt)) { 753 /* bpf_map_free_id() must be called first */ 754 bpf_map_free_id(map); 755 756 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); 757 if (READ_ONCE(map->free_after_mult_rcu_gp)) 758 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); 759 else if (READ_ONCE(map->free_after_rcu_gp)) 760 call_rcu(&map->rcu, bpf_map_free_rcu_gp); 761 else 762 bpf_map_free_in_work(map); 763 } 764 } 765 EXPORT_SYMBOL_GPL(bpf_map_put); 766 767 void bpf_map_put_with_uref(struct bpf_map *map) 768 { 769 bpf_map_put_uref(map); 770 bpf_map_put(map); 771 } 772 773 static int bpf_map_release(struct inode *inode, struct file *filp) 774 { 775 struct bpf_map *map = filp->private_data; 776 777 if (map->ops->map_release) 778 map->ops->map_release(map, filp); 779 780 bpf_map_put_with_uref(map); 781 return 0; 782 } 783 784 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 785 { 786 fmode_t mode = f.file->f_mode; 787 788 /* Our file permissions may have been overridden by global 789 * map permissions facing syscall side. 790 */ 791 if (READ_ONCE(map->frozen)) 792 mode &= ~FMODE_CAN_WRITE; 793 return mode; 794 } 795 796 #ifdef CONFIG_PROC_FS 797 /* Show the memory usage of a bpf map */ 798 static u64 bpf_map_memory_usage(const struct bpf_map *map) 799 { 800 return map->ops->map_mem_usage(map); 801 } 802 803 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 804 { 805 struct bpf_map *map = filp->private_data; 806 u32 type = 0, jited = 0; 807 808 if (map_type_contains_progs(map)) { 809 spin_lock(&map->owner.lock); 810 type = map->owner.type; 811 jited = map->owner.jited; 812 spin_unlock(&map->owner.lock); 813 } 814 815 seq_printf(m, 816 "map_type:\t%u\n" 817 "key_size:\t%u\n" 818 "value_size:\t%u\n" 819 "max_entries:\t%u\n" 820 "map_flags:\t%#x\n" 821 "map_extra:\t%#llx\n" 822 "memlock:\t%llu\n" 823 "map_id:\t%u\n" 824 "frozen:\t%u\n", 825 map->map_type, 826 map->key_size, 827 map->value_size, 828 map->max_entries, 829 map->map_flags, 830 (unsigned long long)map->map_extra, 831 bpf_map_memory_usage(map), 832 map->id, 833 READ_ONCE(map->frozen)); 834 if (type) { 835 seq_printf(m, "owner_prog_type:\t%u\n", type); 836 seq_printf(m, "owner_jited:\t%u\n", jited); 837 } 838 } 839 #endif 840 841 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 842 loff_t *ppos) 843 { 844 /* We need this handler such that alloc_file() enables 845 * f_mode with FMODE_CAN_READ. 846 */ 847 return -EINVAL; 848 } 849 850 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 851 size_t siz, loff_t *ppos) 852 { 853 /* We need this handler such that alloc_file() enables 854 * f_mode with FMODE_CAN_WRITE. 855 */ 856 return -EINVAL; 857 } 858 859 /* called for any extra memory-mapped regions (except initial) */ 860 static void bpf_map_mmap_open(struct vm_area_struct *vma) 861 { 862 struct bpf_map *map = vma->vm_file->private_data; 863 864 if (vma->vm_flags & VM_MAYWRITE) 865 bpf_map_write_active_inc(map); 866 } 867 868 /* called for all unmapped memory region (including initial) */ 869 static void bpf_map_mmap_close(struct vm_area_struct *vma) 870 { 871 struct bpf_map *map = vma->vm_file->private_data; 872 873 if (vma->vm_flags & VM_MAYWRITE) 874 bpf_map_write_active_dec(map); 875 } 876 877 static const struct vm_operations_struct bpf_map_default_vmops = { 878 .open = bpf_map_mmap_open, 879 .close = bpf_map_mmap_close, 880 }; 881 882 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 883 { 884 struct bpf_map *map = filp->private_data; 885 int err = 0; 886 887 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 888 return -ENOTSUPP; 889 890 if (!(vma->vm_flags & VM_SHARED)) 891 return -EINVAL; 892 893 mutex_lock(&map->freeze_mutex); 894 895 if (vma->vm_flags & VM_WRITE) { 896 if (map->frozen) { 897 err = -EPERM; 898 goto out; 899 } 900 /* map is meant to be read-only, so do not allow mapping as 901 * writable, because it's possible to leak a writable page 902 * reference and allows user-space to still modify it after 903 * freezing, while verifier will assume contents do not change 904 */ 905 if (map->map_flags & BPF_F_RDONLY_PROG) { 906 err = -EACCES; 907 goto out; 908 } 909 bpf_map_write_active_inc(map); 910 } 911 out: 912 mutex_unlock(&map->freeze_mutex); 913 if (err) 914 return err; 915 916 /* set default open/close callbacks */ 917 vma->vm_ops = &bpf_map_default_vmops; 918 vma->vm_private_data = map; 919 vm_flags_clear(vma, VM_MAYEXEC); 920 /* If mapping is read-only, then disallow potentially re-mapping with 921 * PROT_WRITE by dropping VM_MAYWRITE flag. This VM_MAYWRITE clearing 922 * means that as far as BPF map's memory-mapped VMAs are concerned, 923 * VM_WRITE and VM_MAYWRITE and equivalent, if one of them is set, 924 * both should be set, so we can forget about VM_MAYWRITE and always 925 * check just VM_WRITE 926 */ 927 if (!(vma->vm_flags & VM_WRITE)) 928 vm_flags_clear(vma, VM_MAYWRITE); 929 930 err = map->ops->map_mmap(map, vma); 931 if (err) { 932 if (vma->vm_flags & VM_WRITE) 933 bpf_map_write_active_dec(map); 934 } 935 936 return err; 937 } 938 939 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 940 { 941 struct bpf_map *map = filp->private_data; 942 943 if (map->ops->map_poll) 944 return map->ops->map_poll(map, filp, pts); 945 946 return EPOLLERR; 947 } 948 949 const struct file_operations bpf_map_fops = { 950 #ifdef CONFIG_PROC_FS 951 .show_fdinfo = bpf_map_show_fdinfo, 952 #endif 953 .release = bpf_map_release, 954 .read = bpf_dummy_read, 955 .write = bpf_dummy_write, 956 .mmap = bpf_map_mmap, 957 .poll = bpf_map_poll, 958 }; 959 960 int bpf_map_new_fd(struct bpf_map *map, int flags) 961 { 962 int ret; 963 964 ret = security_bpf_map(map, OPEN_FMODE(flags)); 965 if (ret < 0) 966 return ret; 967 968 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 969 flags | O_CLOEXEC); 970 } 971 972 int bpf_get_file_flag(int flags) 973 { 974 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 975 return -EINVAL; 976 if (flags & BPF_F_RDONLY) 977 return O_RDONLY; 978 if (flags & BPF_F_WRONLY) 979 return O_WRONLY; 980 return O_RDWR; 981 } 982 983 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 984 #define CHECK_ATTR(CMD) \ 985 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 986 sizeof(attr->CMD##_LAST_FIELD), 0, \ 987 sizeof(*attr) - \ 988 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 989 sizeof(attr->CMD##_LAST_FIELD)) != NULL 990 991 /* dst and src must have at least "size" number of bytes. 992 * Return strlen on success and < 0 on error. 993 */ 994 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 995 { 996 const char *end = src + size; 997 const char *orig_src = src; 998 999 memset(dst, 0, size); 1000 /* Copy all isalnum(), '_' and '.' chars. */ 1001 while (src < end && *src) { 1002 if (!isalnum(*src) && 1003 *src != '_' && *src != '.') 1004 return -EINVAL; 1005 *dst++ = *src++; 1006 } 1007 1008 /* No '\0' found in "size" number of bytes */ 1009 if (src == end) 1010 return -EINVAL; 1011 1012 return src - orig_src; 1013 } 1014 1015 int map_check_no_btf(const struct bpf_map *map, 1016 const struct btf *btf, 1017 const struct btf_type *key_type, 1018 const struct btf_type *value_type) 1019 { 1020 return -ENOTSUPP; 1021 } 1022 1023 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 1024 u32 btf_key_id, u32 btf_value_id) 1025 { 1026 const struct btf_type *key_type, *value_type; 1027 u32 key_size, value_size; 1028 int ret = 0; 1029 1030 /* Some maps allow key to be unspecified. */ 1031 if (btf_key_id) { 1032 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 1033 if (!key_type || key_size != map->key_size) 1034 return -EINVAL; 1035 } else { 1036 key_type = btf_type_by_id(btf, 0); 1037 if (!map->ops->map_check_btf) 1038 return -EINVAL; 1039 } 1040 1041 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1042 if (!value_type || value_size != map->value_size) 1043 return -EINVAL; 1044 1045 map->record = btf_parse_fields(btf, value_type, 1046 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1047 BPF_RB_ROOT | BPF_REFCOUNT, 1048 map->value_size); 1049 if (!IS_ERR_OR_NULL(map->record)) { 1050 int i; 1051 1052 if (!bpf_capable()) { 1053 ret = -EPERM; 1054 goto free_map_tab; 1055 } 1056 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1057 ret = -EACCES; 1058 goto free_map_tab; 1059 } 1060 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1061 switch (map->record->field_mask & (1 << i)) { 1062 case 0: 1063 continue; 1064 case BPF_SPIN_LOCK: 1065 if (map->map_type != BPF_MAP_TYPE_HASH && 1066 map->map_type != BPF_MAP_TYPE_ARRAY && 1067 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1068 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1069 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1070 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1071 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1072 ret = -EOPNOTSUPP; 1073 goto free_map_tab; 1074 } 1075 break; 1076 case BPF_TIMER: 1077 if (map->map_type != BPF_MAP_TYPE_HASH && 1078 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1079 map->map_type != BPF_MAP_TYPE_ARRAY) { 1080 ret = -EOPNOTSUPP; 1081 goto free_map_tab; 1082 } 1083 break; 1084 case BPF_KPTR_UNREF: 1085 case BPF_KPTR_REF: 1086 case BPF_REFCOUNT: 1087 if (map->map_type != BPF_MAP_TYPE_HASH && 1088 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1089 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1090 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1091 map->map_type != BPF_MAP_TYPE_ARRAY && 1092 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1093 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1094 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1095 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1096 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1097 ret = -EOPNOTSUPP; 1098 goto free_map_tab; 1099 } 1100 break; 1101 case BPF_LIST_HEAD: 1102 case BPF_RB_ROOT: 1103 if (map->map_type != BPF_MAP_TYPE_HASH && 1104 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1105 map->map_type != BPF_MAP_TYPE_ARRAY) { 1106 ret = -EOPNOTSUPP; 1107 goto free_map_tab; 1108 } 1109 break; 1110 default: 1111 /* Fail if map_type checks are missing for a field type */ 1112 ret = -EOPNOTSUPP; 1113 goto free_map_tab; 1114 } 1115 } 1116 } 1117 1118 ret = btf_check_and_fixup_fields(btf, map->record); 1119 if (ret < 0) 1120 goto free_map_tab; 1121 1122 if (map->ops->map_check_btf) { 1123 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1124 if (ret < 0) 1125 goto free_map_tab; 1126 } 1127 1128 return ret; 1129 free_map_tab: 1130 bpf_map_free_record(map); 1131 return ret; 1132 } 1133 1134 #define BPF_MAP_CREATE_LAST_FIELD map_extra 1135 /* called via syscall */ 1136 static int map_create(union bpf_attr *attr) 1137 { 1138 const struct bpf_map_ops *ops; 1139 int numa_node = bpf_map_attr_numa_node(attr); 1140 u32 map_type = attr->map_type; 1141 struct bpf_map *map; 1142 int f_flags; 1143 int err; 1144 1145 err = CHECK_ATTR(BPF_MAP_CREATE); 1146 if (err) 1147 return -EINVAL; 1148 1149 if (attr->btf_vmlinux_value_type_id) { 1150 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1151 attr->btf_key_type_id || attr->btf_value_type_id) 1152 return -EINVAL; 1153 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1154 return -EINVAL; 1155 } 1156 1157 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1158 attr->map_extra != 0) 1159 return -EINVAL; 1160 1161 f_flags = bpf_get_file_flag(attr->map_flags); 1162 if (f_flags < 0) 1163 return f_flags; 1164 1165 if (numa_node != NUMA_NO_NODE && 1166 ((unsigned int)numa_node >= nr_node_ids || 1167 !node_online(numa_node))) 1168 return -EINVAL; 1169 1170 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1171 map_type = attr->map_type; 1172 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1173 return -EINVAL; 1174 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1175 ops = bpf_map_types[map_type]; 1176 if (!ops) 1177 return -EINVAL; 1178 1179 if (ops->map_alloc_check) { 1180 err = ops->map_alloc_check(attr); 1181 if (err) 1182 return err; 1183 } 1184 if (attr->map_ifindex) 1185 ops = &bpf_map_offload_ops; 1186 if (!ops->map_mem_usage) 1187 return -EINVAL; 1188 1189 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1190 * creation for unprivileged users; other actions depend 1191 * on fd availability and access to bpffs, so are dependent on 1192 * object creation success. Even with unprivileged BPF disabled, 1193 * capability checks are still carried out. 1194 */ 1195 if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) 1196 return -EPERM; 1197 1198 /* check privileged map type permissions */ 1199 switch (map_type) { 1200 case BPF_MAP_TYPE_ARRAY: 1201 case BPF_MAP_TYPE_PERCPU_ARRAY: 1202 case BPF_MAP_TYPE_PROG_ARRAY: 1203 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1204 case BPF_MAP_TYPE_CGROUP_ARRAY: 1205 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1206 case BPF_MAP_TYPE_HASH: 1207 case BPF_MAP_TYPE_PERCPU_HASH: 1208 case BPF_MAP_TYPE_HASH_OF_MAPS: 1209 case BPF_MAP_TYPE_RINGBUF: 1210 case BPF_MAP_TYPE_USER_RINGBUF: 1211 case BPF_MAP_TYPE_CGROUP_STORAGE: 1212 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1213 /* unprivileged */ 1214 break; 1215 case BPF_MAP_TYPE_SK_STORAGE: 1216 case BPF_MAP_TYPE_INODE_STORAGE: 1217 case BPF_MAP_TYPE_TASK_STORAGE: 1218 case BPF_MAP_TYPE_CGRP_STORAGE: 1219 case BPF_MAP_TYPE_BLOOM_FILTER: 1220 case BPF_MAP_TYPE_LPM_TRIE: 1221 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1222 case BPF_MAP_TYPE_STACK_TRACE: 1223 case BPF_MAP_TYPE_QUEUE: 1224 case BPF_MAP_TYPE_STACK: 1225 case BPF_MAP_TYPE_LRU_HASH: 1226 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1227 case BPF_MAP_TYPE_STRUCT_OPS: 1228 case BPF_MAP_TYPE_CPUMAP: 1229 if (!bpf_capable()) 1230 return -EPERM; 1231 break; 1232 case BPF_MAP_TYPE_SOCKMAP: 1233 case BPF_MAP_TYPE_SOCKHASH: 1234 case BPF_MAP_TYPE_DEVMAP: 1235 case BPF_MAP_TYPE_DEVMAP_HASH: 1236 case BPF_MAP_TYPE_XSKMAP: 1237 if (!capable(CAP_NET_ADMIN)) 1238 return -EPERM; 1239 break; 1240 default: 1241 WARN(1, "unsupported map type %d", map_type); 1242 return -EPERM; 1243 } 1244 1245 map = ops->map_alloc(attr); 1246 if (IS_ERR(map)) 1247 return PTR_ERR(map); 1248 map->ops = ops; 1249 map->map_type = map_type; 1250 1251 err = bpf_obj_name_cpy(map->name, attr->map_name, 1252 sizeof(attr->map_name)); 1253 if (err < 0) 1254 goto free_map; 1255 1256 atomic64_set(&map->refcnt, 1); 1257 atomic64_set(&map->usercnt, 1); 1258 mutex_init(&map->freeze_mutex); 1259 spin_lock_init(&map->owner.lock); 1260 1261 if (attr->btf_key_type_id || attr->btf_value_type_id || 1262 /* Even the map's value is a kernel's struct, 1263 * the bpf_prog.o must have BTF to begin with 1264 * to figure out the corresponding kernel's 1265 * counter part. Thus, attr->btf_fd has 1266 * to be valid also. 1267 */ 1268 attr->btf_vmlinux_value_type_id) { 1269 struct btf *btf; 1270 1271 btf = btf_get_by_fd(attr->btf_fd); 1272 if (IS_ERR(btf)) { 1273 err = PTR_ERR(btf); 1274 goto free_map; 1275 } 1276 if (btf_is_kernel(btf)) { 1277 btf_put(btf); 1278 err = -EACCES; 1279 goto free_map; 1280 } 1281 map->btf = btf; 1282 1283 if (attr->btf_value_type_id) { 1284 err = map_check_btf(map, btf, attr->btf_key_type_id, 1285 attr->btf_value_type_id); 1286 if (err) 1287 goto free_map; 1288 } 1289 1290 map->btf_key_type_id = attr->btf_key_type_id; 1291 map->btf_value_type_id = attr->btf_value_type_id; 1292 map->btf_vmlinux_value_type_id = 1293 attr->btf_vmlinux_value_type_id; 1294 } 1295 1296 err = security_bpf_map_alloc(map); 1297 if (err) 1298 goto free_map; 1299 1300 err = bpf_map_alloc_id(map); 1301 if (err) 1302 goto free_map_sec; 1303 1304 bpf_map_save_memcg(map); 1305 1306 err = bpf_map_new_fd(map, f_flags); 1307 if (err < 0) { 1308 /* failed to allocate fd. 1309 * bpf_map_put_with_uref() is needed because the above 1310 * bpf_map_alloc_id() has published the map 1311 * to the userspace and the userspace may 1312 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1313 */ 1314 bpf_map_put_with_uref(map); 1315 return err; 1316 } 1317 1318 return err; 1319 1320 free_map_sec: 1321 security_bpf_map_free(map); 1322 free_map: 1323 btf_put(map->btf); 1324 map->ops->map_free(map); 1325 return err; 1326 } 1327 1328 /* if error is returned, fd is released. 1329 * On success caller should complete fd access with matching fdput() 1330 */ 1331 struct bpf_map *__bpf_map_get(struct fd f) 1332 { 1333 if (!f.file) 1334 return ERR_PTR(-EBADF); 1335 if (f.file->f_op != &bpf_map_fops) { 1336 fdput(f); 1337 return ERR_PTR(-EINVAL); 1338 } 1339 1340 return f.file->private_data; 1341 } 1342 1343 void bpf_map_inc(struct bpf_map *map) 1344 { 1345 atomic64_inc(&map->refcnt); 1346 } 1347 EXPORT_SYMBOL_GPL(bpf_map_inc); 1348 1349 void bpf_map_inc_with_uref(struct bpf_map *map) 1350 { 1351 atomic64_inc(&map->refcnt); 1352 atomic64_inc(&map->usercnt); 1353 } 1354 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1355 1356 struct bpf_map *bpf_map_get(u32 ufd) 1357 { 1358 struct fd f = fdget(ufd); 1359 struct bpf_map *map; 1360 1361 map = __bpf_map_get(f); 1362 if (IS_ERR(map)) 1363 return map; 1364 1365 bpf_map_inc(map); 1366 fdput(f); 1367 1368 return map; 1369 } 1370 EXPORT_SYMBOL(bpf_map_get); 1371 1372 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1373 { 1374 struct fd f = fdget(ufd); 1375 struct bpf_map *map; 1376 1377 map = __bpf_map_get(f); 1378 if (IS_ERR(map)) 1379 return map; 1380 1381 bpf_map_inc_with_uref(map); 1382 fdput(f); 1383 1384 return map; 1385 } 1386 1387 /* map_idr_lock should have been held or the map should have been 1388 * protected by rcu read lock. 1389 */ 1390 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1391 { 1392 int refold; 1393 1394 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1395 if (!refold) 1396 return ERR_PTR(-ENOENT); 1397 if (uref) 1398 atomic64_inc(&map->usercnt); 1399 1400 return map; 1401 } 1402 1403 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1404 { 1405 spin_lock_bh(&map_idr_lock); 1406 map = __bpf_map_inc_not_zero(map, false); 1407 spin_unlock_bh(&map_idr_lock); 1408 1409 return map; 1410 } 1411 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1412 1413 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1414 { 1415 return -ENOTSUPP; 1416 } 1417 1418 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1419 { 1420 if (key_size) 1421 return vmemdup_user(ukey, key_size); 1422 1423 if (ukey) 1424 return ERR_PTR(-EINVAL); 1425 1426 return NULL; 1427 } 1428 1429 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1430 { 1431 if (key_size) 1432 return kvmemdup_bpfptr(ukey, key_size); 1433 1434 if (!bpfptr_is_null(ukey)) 1435 return ERR_PTR(-EINVAL); 1436 1437 return NULL; 1438 } 1439 1440 /* last field in 'union bpf_attr' used by this command */ 1441 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1442 1443 static int map_lookup_elem(union bpf_attr *attr) 1444 { 1445 void __user *ukey = u64_to_user_ptr(attr->key); 1446 void __user *uvalue = u64_to_user_ptr(attr->value); 1447 int ufd = attr->map_fd; 1448 struct bpf_map *map; 1449 void *key, *value; 1450 u32 value_size; 1451 struct fd f; 1452 int err; 1453 1454 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1455 return -EINVAL; 1456 1457 if (attr->flags & ~BPF_F_LOCK) 1458 return -EINVAL; 1459 1460 f = fdget(ufd); 1461 map = __bpf_map_get(f); 1462 if (IS_ERR(map)) 1463 return PTR_ERR(map); 1464 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1465 err = -EPERM; 1466 goto err_put; 1467 } 1468 1469 if ((attr->flags & BPF_F_LOCK) && 1470 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1471 err = -EINVAL; 1472 goto err_put; 1473 } 1474 1475 key = __bpf_copy_key(ukey, map->key_size); 1476 if (IS_ERR(key)) { 1477 err = PTR_ERR(key); 1478 goto err_put; 1479 } 1480 1481 value_size = bpf_map_value_size(map); 1482 1483 err = -ENOMEM; 1484 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1485 if (!value) 1486 goto free_key; 1487 1488 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1489 if (copy_from_user(value, uvalue, value_size)) 1490 err = -EFAULT; 1491 else 1492 err = bpf_map_copy_value(map, key, value, attr->flags); 1493 goto free_value; 1494 } 1495 1496 err = bpf_map_copy_value(map, key, value, attr->flags); 1497 if (err) 1498 goto free_value; 1499 1500 err = -EFAULT; 1501 if (copy_to_user(uvalue, value, value_size) != 0) 1502 goto free_value; 1503 1504 err = 0; 1505 1506 free_value: 1507 kvfree(value); 1508 free_key: 1509 kvfree(key); 1510 err_put: 1511 fdput(f); 1512 return err; 1513 } 1514 1515 1516 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1517 1518 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1519 { 1520 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1521 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1522 int ufd = attr->map_fd; 1523 struct bpf_map *map; 1524 void *key, *value; 1525 u32 value_size; 1526 struct fd f; 1527 int err; 1528 1529 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1530 return -EINVAL; 1531 1532 f = fdget(ufd); 1533 map = __bpf_map_get(f); 1534 if (IS_ERR(map)) 1535 return PTR_ERR(map); 1536 bpf_map_write_active_inc(map); 1537 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1538 err = -EPERM; 1539 goto err_put; 1540 } 1541 1542 if ((attr->flags & BPF_F_LOCK) && 1543 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1544 err = -EINVAL; 1545 goto err_put; 1546 } 1547 1548 key = ___bpf_copy_key(ukey, map->key_size); 1549 if (IS_ERR(key)) { 1550 err = PTR_ERR(key); 1551 goto err_put; 1552 } 1553 1554 value_size = bpf_map_value_size(map); 1555 value = kvmemdup_bpfptr(uvalue, value_size); 1556 if (IS_ERR(value)) { 1557 err = PTR_ERR(value); 1558 goto free_key; 1559 } 1560 1561 err = bpf_map_update_value(map, f.file, key, value, attr->flags); 1562 1563 kvfree(value); 1564 free_key: 1565 kvfree(key); 1566 err_put: 1567 bpf_map_write_active_dec(map); 1568 fdput(f); 1569 return err; 1570 } 1571 1572 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1573 1574 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1575 { 1576 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1577 int ufd = attr->map_fd; 1578 struct bpf_map *map; 1579 struct fd f; 1580 void *key; 1581 int err; 1582 1583 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1584 return -EINVAL; 1585 1586 f = fdget(ufd); 1587 map = __bpf_map_get(f); 1588 if (IS_ERR(map)) 1589 return PTR_ERR(map); 1590 bpf_map_write_active_inc(map); 1591 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1592 err = -EPERM; 1593 goto err_put; 1594 } 1595 1596 key = ___bpf_copy_key(ukey, map->key_size); 1597 if (IS_ERR(key)) { 1598 err = PTR_ERR(key); 1599 goto err_put; 1600 } 1601 1602 if (bpf_map_is_offloaded(map)) { 1603 err = bpf_map_offload_delete_elem(map, key); 1604 goto out; 1605 } else if (IS_FD_PROG_ARRAY(map) || 1606 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1607 /* These maps require sleepable context */ 1608 err = map->ops->map_delete_elem(map, key); 1609 goto out; 1610 } 1611 1612 bpf_disable_instrumentation(); 1613 rcu_read_lock(); 1614 err = map->ops->map_delete_elem(map, key); 1615 rcu_read_unlock(); 1616 bpf_enable_instrumentation(); 1617 maybe_wait_bpf_programs(map); 1618 out: 1619 kvfree(key); 1620 err_put: 1621 bpf_map_write_active_dec(map); 1622 fdput(f); 1623 return err; 1624 } 1625 1626 /* last field in 'union bpf_attr' used by this command */ 1627 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1628 1629 static int map_get_next_key(union bpf_attr *attr) 1630 { 1631 void __user *ukey = u64_to_user_ptr(attr->key); 1632 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1633 int ufd = attr->map_fd; 1634 struct bpf_map *map; 1635 void *key, *next_key; 1636 struct fd f; 1637 int err; 1638 1639 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1640 return -EINVAL; 1641 1642 f = fdget(ufd); 1643 map = __bpf_map_get(f); 1644 if (IS_ERR(map)) 1645 return PTR_ERR(map); 1646 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1647 err = -EPERM; 1648 goto err_put; 1649 } 1650 1651 if (ukey) { 1652 key = __bpf_copy_key(ukey, map->key_size); 1653 if (IS_ERR(key)) { 1654 err = PTR_ERR(key); 1655 goto err_put; 1656 } 1657 } else { 1658 key = NULL; 1659 } 1660 1661 err = -ENOMEM; 1662 next_key = kvmalloc(map->key_size, GFP_USER); 1663 if (!next_key) 1664 goto free_key; 1665 1666 if (bpf_map_is_offloaded(map)) { 1667 err = bpf_map_offload_get_next_key(map, key, next_key); 1668 goto out; 1669 } 1670 1671 rcu_read_lock(); 1672 err = map->ops->map_get_next_key(map, key, next_key); 1673 rcu_read_unlock(); 1674 out: 1675 if (err) 1676 goto free_next_key; 1677 1678 err = -EFAULT; 1679 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1680 goto free_next_key; 1681 1682 err = 0; 1683 1684 free_next_key: 1685 kvfree(next_key); 1686 free_key: 1687 kvfree(key); 1688 err_put: 1689 fdput(f); 1690 return err; 1691 } 1692 1693 int generic_map_delete_batch(struct bpf_map *map, 1694 const union bpf_attr *attr, 1695 union bpf_attr __user *uattr) 1696 { 1697 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1698 u32 cp, max_count; 1699 int err = 0; 1700 void *key; 1701 1702 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1703 return -EINVAL; 1704 1705 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1706 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1707 return -EINVAL; 1708 } 1709 1710 max_count = attr->batch.count; 1711 if (!max_count) 1712 return 0; 1713 1714 if (put_user(0, &uattr->batch.count)) 1715 return -EFAULT; 1716 1717 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1718 if (!key) 1719 return -ENOMEM; 1720 1721 for (cp = 0; cp < max_count; cp++) { 1722 err = -EFAULT; 1723 if (copy_from_user(key, keys + cp * map->key_size, 1724 map->key_size)) 1725 break; 1726 1727 if (bpf_map_is_offloaded(map)) { 1728 err = bpf_map_offload_delete_elem(map, key); 1729 break; 1730 } 1731 1732 bpf_disable_instrumentation(); 1733 rcu_read_lock(); 1734 err = map->ops->map_delete_elem(map, key); 1735 rcu_read_unlock(); 1736 bpf_enable_instrumentation(); 1737 if (err) 1738 break; 1739 cond_resched(); 1740 } 1741 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1742 err = -EFAULT; 1743 1744 kvfree(key); 1745 1746 maybe_wait_bpf_programs(map); 1747 return err; 1748 } 1749 1750 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1751 const union bpf_attr *attr, 1752 union bpf_attr __user *uattr) 1753 { 1754 void __user *values = u64_to_user_ptr(attr->batch.values); 1755 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1756 u32 value_size, cp, max_count; 1757 void *key, *value; 1758 int err = 0; 1759 1760 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1761 return -EINVAL; 1762 1763 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1764 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1765 return -EINVAL; 1766 } 1767 1768 value_size = bpf_map_value_size(map); 1769 1770 max_count = attr->batch.count; 1771 if (!max_count) 1772 return 0; 1773 1774 if (put_user(0, &uattr->batch.count)) 1775 return -EFAULT; 1776 1777 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1778 if (!key) 1779 return -ENOMEM; 1780 1781 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1782 if (!value) { 1783 kvfree(key); 1784 return -ENOMEM; 1785 } 1786 1787 for (cp = 0; cp < max_count; cp++) { 1788 err = -EFAULT; 1789 if (copy_from_user(key, keys + cp * map->key_size, 1790 map->key_size) || 1791 copy_from_user(value, values + cp * value_size, value_size)) 1792 break; 1793 1794 err = bpf_map_update_value(map, map_file, key, value, 1795 attr->batch.elem_flags); 1796 1797 if (err) 1798 break; 1799 cond_resched(); 1800 } 1801 1802 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1803 err = -EFAULT; 1804 1805 kvfree(value); 1806 kvfree(key); 1807 return err; 1808 } 1809 1810 int generic_map_lookup_batch(struct bpf_map *map, 1811 const union bpf_attr *attr, 1812 union bpf_attr __user *uattr) 1813 { 1814 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1815 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1816 void __user *values = u64_to_user_ptr(attr->batch.values); 1817 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1818 void *buf, *buf_prevkey, *prev_key, *key, *value; 1819 u32 value_size, cp, max_count; 1820 int err; 1821 1822 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1823 return -EINVAL; 1824 1825 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1826 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1827 return -EINVAL; 1828 1829 value_size = bpf_map_value_size(map); 1830 1831 max_count = attr->batch.count; 1832 if (!max_count) 1833 return 0; 1834 1835 if (put_user(0, &uattr->batch.count)) 1836 return -EFAULT; 1837 1838 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1839 if (!buf_prevkey) 1840 return -ENOMEM; 1841 1842 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1843 if (!buf) { 1844 kvfree(buf_prevkey); 1845 return -ENOMEM; 1846 } 1847 1848 err = -EFAULT; 1849 prev_key = NULL; 1850 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1851 goto free_buf; 1852 key = buf; 1853 value = key + map->key_size; 1854 if (ubatch) 1855 prev_key = buf_prevkey; 1856 1857 for (cp = 0; cp < max_count;) { 1858 rcu_read_lock(); 1859 err = map->ops->map_get_next_key(map, prev_key, key); 1860 rcu_read_unlock(); 1861 if (err) 1862 break; 1863 err = bpf_map_copy_value(map, key, value, 1864 attr->batch.elem_flags); 1865 1866 if (err == -ENOENT) 1867 goto next_key; 1868 1869 if (err) 1870 goto free_buf; 1871 1872 if (copy_to_user(keys + cp * map->key_size, key, 1873 map->key_size)) { 1874 err = -EFAULT; 1875 goto free_buf; 1876 } 1877 if (copy_to_user(values + cp * value_size, value, value_size)) { 1878 err = -EFAULT; 1879 goto free_buf; 1880 } 1881 1882 cp++; 1883 next_key: 1884 if (!prev_key) 1885 prev_key = buf_prevkey; 1886 1887 swap(prev_key, key); 1888 cond_resched(); 1889 } 1890 1891 if (err == -EFAULT) 1892 goto free_buf; 1893 1894 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1895 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1896 err = -EFAULT; 1897 1898 free_buf: 1899 kvfree(buf_prevkey); 1900 kvfree(buf); 1901 return err; 1902 } 1903 1904 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 1905 1906 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1907 { 1908 void __user *ukey = u64_to_user_ptr(attr->key); 1909 void __user *uvalue = u64_to_user_ptr(attr->value); 1910 int ufd = attr->map_fd; 1911 struct bpf_map *map; 1912 void *key, *value; 1913 u32 value_size; 1914 struct fd f; 1915 int err; 1916 1917 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1918 return -EINVAL; 1919 1920 if (attr->flags & ~BPF_F_LOCK) 1921 return -EINVAL; 1922 1923 f = fdget(ufd); 1924 map = __bpf_map_get(f); 1925 if (IS_ERR(map)) 1926 return PTR_ERR(map); 1927 bpf_map_write_active_inc(map); 1928 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 1929 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1930 err = -EPERM; 1931 goto err_put; 1932 } 1933 1934 if (attr->flags && 1935 (map->map_type == BPF_MAP_TYPE_QUEUE || 1936 map->map_type == BPF_MAP_TYPE_STACK)) { 1937 err = -EINVAL; 1938 goto err_put; 1939 } 1940 1941 if ((attr->flags & BPF_F_LOCK) && 1942 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1943 err = -EINVAL; 1944 goto err_put; 1945 } 1946 1947 key = __bpf_copy_key(ukey, map->key_size); 1948 if (IS_ERR(key)) { 1949 err = PTR_ERR(key); 1950 goto err_put; 1951 } 1952 1953 value_size = bpf_map_value_size(map); 1954 1955 err = -ENOMEM; 1956 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1957 if (!value) 1958 goto free_key; 1959 1960 err = -ENOTSUPP; 1961 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1962 map->map_type == BPF_MAP_TYPE_STACK) { 1963 err = map->ops->map_pop_elem(map, value); 1964 } else if (map->map_type == BPF_MAP_TYPE_HASH || 1965 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1966 map->map_type == BPF_MAP_TYPE_LRU_HASH || 1967 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 1968 if (!bpf_map_is_offloaded(map)) { 1969 bpf_disable_instrumentation(); 1970 rcu_read_lock(); 1971 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 1972 rcu_read_unlock(); 1973 bpf_enable_instrumentation(); 1974 } 1975 } 1976 1977 if (err) 1978 goto free_value; 1979 1980 if (copy_to_user(uvalue, value, value_size) != 0) { 1981 err = -EFAULT; 1982 goto free_value; 1983 } 1984 1985 err = 0; 1986 1987 free_value: 1988 kvfree(value); 1989 free_key: 1990 kvfree(key); 1991 err_put: 1992 bpf_map_write_active_dec(map); 1993 fdput(f); 1994 return err; 1995 } 1996 1997 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1998 1999 static int map_freeze(const union bpf_attr *attr) 2000 { 2001 int err = 0, ufd = attr->map_fd; 2002 struct bpf_map *map; 2003 struct fd f; 2004 2005 if (CHECK_ATTR(BPF_MAP_FREEZE)) 2006 return -EINVAL; 2007 2008 f = fdget(ufd); 2009 map = __bpf_map_get(f); 2010 if (IS_ERR(map)) 2011 return PTR_ERR(map); 2012 2013 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) { 2014 fdput(f); 2015 return -ENOTSUPP; 2016 } 2017 2018 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2019 fdput(f); 2020 return -EPERM; 2021 } 2022 2023 mutex_lock(&map->freeze_mutex); 2024 if (bpf_map_write_active(map)) { 2025 err = -EBUSY; 2026 goto err_put; 2027 } 2028 if (READ_ONCE(map->frozen)) { 2029 err = -EBUSY; 2030 goto err_put; 2031 } 2032 2033 WRITE_ONCE(map->frozen, true); 2034 err_put: 2035 mutex_unlock(&map->freeze_mutex); 2036 fdput(f); 2037 return err; 2038 } 2039 2040 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2041 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2042 [_id] = & _name ## _prog_ops, 2043 #define BPF_MAP_TYPE(_id, _ops) 2044 #define BPF_LINK_TYPE(_id, _name) 2045 #include <linux/bpf_types.h> 2046 #undef BPF_PROG_TYPE 2047 #undef BPF_MAP_TYPE 2048 #undef BPF_LINK_TYPE 2049 }; 2050 2051 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2052 { 2053 const struct bpf_prog_ops *ops; 2054 2055 if (type >= ARRAY_SIZE(bpf_prog_types)) 2056 return -EINVAL; 2057 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2058 ops = bpf_prog_types[type]; 2059 if (!ops) 2060 return -EINVAL; 2061 2062 if (!bpf_prog_is_offloaded(prog->aux)) 2063 prog->aux->ops = ops; 2064 else 2065 prog->aux->ops = &bpf_offload_prog_ops; 2066 prog->type = type; 2067 return 0; 2068 } 2069 2070 enum bpf_audit { 2071 BPF_AUDIT_LOAD, 2072 BPF_AUDIT_UNLOAD, 2073 BPF_AUDIT_MAX, 2074 }; 2075 2076 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2077 [BPF_AUDIT_LOAD] = "LOAD", 2078 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2079 }; 2080 2081 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2082 { 2083 struct audit_context *ctx = NULL; 2084 struct audit_buffer *ab; 2085 2086 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2087 return; 2088 if (audit_enabled == AUDIT_OFF) 2089 return; 2090 if (!in_irq() && !irqs_disabled()) 2091 ctx = audit_context(); 2092 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2093 if (unlikely(!ab)) 2094 return; 2095 audit_log_format(ab, "prog-id=%u op=%s", 2096 prog->aux->id, bpf_audit_str[op]); 2097 audit_log_end(ab); 2098 } 2099 2100 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2101 { 2102 int id; 2103 2104 idr_preload(GFP_KERNEL); 2105 spin_lock_bh(&prog_idr_lock); 2106 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2107 if (id > 0) 2108 prog->aux->id = id; 2109 spin_unlock_bh(&prog_idr_lock); 2110 idr_preload_end(); 2111 2112 /* id is in [1, INT_MAX) */ 2113 if (WARN_ON_ONCE(!id)) 2114 return -ENOSPC; 2115 2116 return id > 0 ? 0 : id; 2117 } 2118 2119 void bpf_prog_free_id(struct bpf_prog *prog) 2120 { 2121 unsigned long flags; 2122 2123 /* cBPF to eBPF migrations are currently not in the idr store. 2124 * Offloaded programs are removed from the store when their device 2125 * disappears - even if someone grabs an fd to them they are unusable, 2126 * simply waiting for refcnt to drop to be freed. 2127 */ 2128 if (!prog->aux->id) 2129 return; 2130 2131 spin_lock_irqsave(&prog_idr_lock, flags); 2132 idr_remove(&prog_idr, prog->aux->id); 2133 prog->aux->id = 0; 2134 spin_unlock_irqrestore(&prog_idr_lock, flags); 2135 } 2136 2137 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2138 { 2139 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2140 2141 kvfree(aux->func_info); 2142 kfree(aux->func_info_aux); 2143 free_uid(aux->user); 2144 security_bpf_prog_free(aux); 2145 bpf_prog_free(aux->prog); 2146 } 2147 2148 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2149 { 2150 bpf_prog_kallsyms_del_all(prog); 2151 btf_put(prog->aux->btf); 2152 module_put(prog->aux->mod); 2153 kvfree(prog->aux->jited_linfo); 2154 kvfree(prog->aux->linfo); 2155 kfree(prog->aux->kfunc_tab); 2156 if (prog->aux->attach_btf) 2157 btf_put(prog->aux->attach_btf); 2158 2159 if (deferred) { 2160 if (prog->aux->sleepable) 2161 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2162 else 2163 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2164 } else { 2165 __bpf_prog_put_rcu(&prog->aux->rcu); 2166 } 2167 } 2168 2169 static void bpf_prog_put_deferred(struct work_struct *work) 2170 { 2171 struct bpf_prog_aux *aux; 2172 struct bpf_prog *prog; 2173 2174 aux = container_of(work, struct bpf_prog_aux, work); 2175 prog = aux->prog; 2176 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2177 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2178 bpf_prog_free_id(prog); 2179 __bpf_prog_put_noref(prog, true); 2180 } 2181 2182 static void __bpf_prog_put(struct bpf_prog *prog) 2183 { 2184 struct bpf_prog_aux *aux = prog->aux; 2185 2186 if (atomic64_dec_and_test(&aux->refcnt)) { 2187 if (in_irq() || irqs_disabled()) { 2188 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2189 schedule_work(&aux->work); 2190 } else { 2191 bpf_prog_put_deferred(&aux->work); 2192 } 2193 } 2194 } 2195 2196 void bpf_prog_put(struct bpf_prog *prog) 2197 { 2198 __bpf_prog_put(prog); 2199 } 2200 EXPORT_SYMBOL_GPL(bpf_prog_put); 2201 2202 static int bpf_prog_release(struct inode *inode, struct file *filp) 2203 { 2204 struct bpf_prog *prog = filp->private_data; 2205 2206 bpf_prog_put(prog); 2207 return 0; 2208 } 2209 2210 struct bpf_prog_kstats { 2211 u64 nsecs; 2212 u64 cnt; 2213 u64 misses; 2214 }; 2215 2216 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2217 { 2218 struct bpf_prog_stats *stats; 2219 unsigned int flags; 2220 2221 stats = this_cpu_ptr(prog->stats); 2222 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2223 u64_stats_inc(&stats->misses); 2224 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2225 } 2226 2227 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2228 struct bpf_prog_kstats *stats) 2229 { 2230 u64 nsecs = 0, cnt = 0, misses = 0; 2231 int cpu; 2232 2233 for_each_possible_cpu(cpu) { 2234 const struct bpf_prog_stats *st; 2235 unsigned int start; 2236 u64 tnsecs, tcnt, tmisses; 2237 2238 st = per_cpu_ptr(prog->stats, cpu); 2239 do { 2240 start = u64_stats_fetch_begin(&st->syncp); 2241 tnsecs = u64_stats_read(&st->nsecs); 2242 tcnt = u64_stats_read(&st->cnt); 2243 tmisses = u64_stats_read(&st->misses); 2244 } while (u64_stats_fetch_retry(&st->syncp, start)); 2245 nsecs += tnsecs; 2246 cnt += tcnt; 2247 misses += tmisses; 2248 } 2249 stats->nsecs = nsecs; 2250 stats->cnt = cnt; 2251 stats->misses = misses; 2252 } 2253 2254 #ifdef CONFIG_PROC_FS 2255 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2256 { 2257 const struct bpf_prog *prog = filp->private_data; 2258 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2259 struct bpf_prog_kstats stats; 2260 2261 bpf_prog_get_stats(prog, &stats); 2262 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2263 seq_printf(m, 2264 "prog_type:\t%u\n" 2265 "prog_jited:\t%u\n" 2266 "prog_tag:\t%s\n" 2267 "memlock:\t%llu\n" 2268 "prog_id:\t%u\n" 2269 "run_time_ns:\t%llu\n" 2270 "run_cnt:\t%llu\n" 2271 "recursion_misses:\t%llu\n" 2272 "verified_insns:\t%u\n", 2273 prog->type, 2274 prog->jited, 2275 prog_tag, 2276 prog->pages * 1ULL << PAGE_SHIFT, 2277 prog->aux->id, 2278 stats.nsecs, 2279 stats.cnt, 2280 stats.misses, 2281 prog->aux->verified_insns); 2282 } 2283 #endif 2284 2285 const struct file_operations bpf_prog_fops = { 2286 #ifdef CONFIG_PROC_FS 2287 .show_fdinfo = bpf_prog_show_fdinfo, 2288 #endif 2289 .release = bpf_prog_release, 2290 .read = bpf_dummy_read, 2291 .write = bpf_dummy_write, 2292 }; 2293 2294 int bpf_prog_new_fd(struct bpf_prog *prog) 2295 { 2296 int ret; 2297 2298 ret = security_bpf_prog(prog); 2299 if (ret < 0) 2300 return ret; 2301 2302 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2303 O_RDWR | O_CLOEXEC); 2304 } 2305 2306 static struct bpf_prog *____bpf_prog_get(struct fd f) 2307 { 2308 if (!f.file) 2309 return ERR_PTR(-EBADF); 2310 if (f.file->f_op != &bpf_prog_fops) { 2311 fdput(f); 2312 return ERR_PTR(-EINVAL); 2313 } 2314 2315 return f.file->private_data; 2316 } 2317 2318 void bpf_prog_add(struct bpf_prog *prog, int i) 2319 { 2320 atomic64_add(i, &prog->aux->refcnt); 2321 } 2322 EXPORT_SYMBOL_GPL(bpf_prog_add); 2323 2324 void bpf_prog_sub(struct bpf_prog *prog, int i) 2325 { 2326 /* Only to be used for undoing previous bpf_prog_add() in some 2327 * error path. We still know that another entity in our call 2328 * path holds a reference to the program, thus atomic_sub() can 2329 * be safely used in such cases! 2330 */ 2331 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2332 } 2333 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2334 2335 void bpf_prog_inc(struct bpf_prog *prog) 2336 { 2337 atomic64_inc(&prog->aux->refcnt); 2338 } 2339 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2340 2341 /* prog_idr_lock should have been held */ 2342 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2343 { 2344 int refold; 2345 2346 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2347 2348 if (!refold) 2349 return ERR_PTR(-ENOENT); 2350 2351 return prog; 2352 } 2353 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2354 2355 bool bpf_prog_get_ok(struct bpf_prog *prog, 2356 enum bpf_prog_type *attach_type, bool attach_drv) 2357 { 2358 /* not an attachment, just a refcount inc, always allow */ 2359 if (!attach_type) 2360 return true; 2361 2362 if (prog->type != *attach_type) 2363 return false; 2364 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2365 return false; 2366 2367 return true; 2368 } 2369 2370 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2371 bool attach_drv) 2372 { 2373 struct fd f = fdget(ufd); 2374 struct bpf_prog *prog; 2375 2376 prog = ____bpf_prog_get(f); 2377 if (IS_ERR(prog)) 2378 return prog; 2379 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 2380 prog = ERR_PTR(-EINVAL); 2381 goto out; 2382 } 2383 2384 bpf_prog_inc(prog); 2385 out: 2386 fdput(f); 2387 return prog; 2388 } 2389 2390 struct bpf_prog *bpf_prog_get(u32 ufd) 2391 { 2392 return __bpf_prog_get(ufd, NULL, false); 2393 } 2394 2395 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2396 bool attach_drv) 2397 { 2398 return __bpf_prog_get(ufd, &type, attach_drv); 2399 } 2400 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2401 2402 /* Initially all BPF programs could be loaded w/o specifying 2403 * expected_attach_type. Later for some of them specifying expected_attach_type 2404 * at load time became required so that program could be validated properly. 2405 * Programs of types that are allowed to be loaded both w/ and w/o (for 2406 * backward compatibility) expected_attach_type, should have the default attach 2407 * type assigned to expected_attach_type for the latter case, so that it can be 2408 * validated later at attach time. 2409 * 2410 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2411 * prog type requires it but has some attach types that have to be backward 2412 * compatible. 2413 */ 2414 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2415 { 2416 switch (attr->prog_type) { 2417 case BPF_PROG_TYPE_CGROUP_SOCK: 2418 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2419 * exist so checking for non-zero is the way to go here. 2420 */ 2421 if (!attr->expected_attach_type) 2422 attr->expected_attach_type = 2423 BPF_CGROUP_INET_SOCK_CREATE; 2424 break; 2425 case BPF_PROG_TYPE_SK_REUSEPORT: 2426 if (!attr->expected_attach_type) 2427 attr->expected_attach_type = 2428 BPF_SK_REUSEPORT_SELECT; 2429 break; 2430 } 2431 } 2432 2433 static int 2434 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2435 enum bpf_attach_type expected_attach_type, 2436 struct btf *attach_btf, u32 btf_id, 2437 struct bpf_prog *dst_prog) 2438 { 2439 if (btf_id) { 2440 if (btf_id > BTF_MAX_TYPE) 2441 return -EINVAL; 2442 2443 if (!attach_btf && !dst_prog) 2444 return -EINVAL; 2445 2446 switch (prog_type) { 2447 case BPF_PROG_TYPE_TRACING: 2448 case BPF_PROG_TYPE_LSM: 2449 case BPF_PROG_TYPE_STRUCT_OPS: 2450 case BPF_PROG_TYPE_EXT: 2451 break; 2452 default: 2453 return -EINVAL; 2454 } 2455 } 2456 2457 if (attach_btf && (!btf_id || dst_prog)) 2458 return -EINVAL; 2459 2460 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2461 prog_type != BPF_PROG_TYPE_EXT) 2462 return -EINVAL; 2463 2464 switch (prog_type) { 2465 case BPF_PROG_TYPE_CGROUP_SOCK: 2466 switch (expected_attach_type) { 2467 case BPF_CGROUP_INET_SOCK_CREATE: 2468 case BPF_CGROUP_INET_SOCK_RELEASE: 2469 case BPF_CGROUP_INET4_POST_BIND: 2470 case BPF_CGROUP_INET6_POST_BIND: 2471 return 0; 2472 default: 2473 return -EINVAL; 2474 } 2475 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2476 switch (expected_attach_type) { 2477 case BPF_CGROUP_INET4_BIND: 2478 case BPF_CGROUP_INET6_BIND: 2479 case BPF_CGROUP_INET4_CONNECT: 2480 case BPF_CGROUP_INET6_CONNECT: 2481 case BPF_CGROUP_INET4_GETPEERNAME: 2482 case BPF_CGROUP_INET6_GETPEERNAME: 2483 case BPF_CGROUP_INET4_GETSOCKNAME: 2484 case BPF_CGROUP_INET6_GETSOCKNAME: 2485 case BPF_CGROUP_UDP4_SENDMSG: 2486 case BPF_CGROUP_UDP6_SENDMSG: 2487 case BPF_CGROUP_UDP4_RECVMSG: 2488 case BPF_CGROUP_UDP6_RECVMSG: 2489 return 0; 2490 default: 2491 return -EINVAL; 2492 } 2493 case BPF_PROG_TYPE_CGROUP_SKB: 2494 switch (expected_attach_type) { 2495 case BPF_CGROUP_INET_INGRESS: 2496 case BPF_CGROUP_INET_EGRESS: 2497 return 0; 2498 default: 2499 return -EINVAL; 2500 } 2501 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2502 switch (expected_attach_type) { 2503 case BPF_CGROUP_SETSOCKOPT: 2504 case BPF_CGROUP_GETSOCKOPT: 2505 return 0; 2506 default: 2507 return -EINVAL; 2508 } 2509 case BPF_PROG_TYPE_SK_LOOKUP: 2510 if (expected_attach_type == BPF_SK_LOOKUP) 2511 return 0; 2512 return -EINVAL; 2513 case BPF_PROG_TYPE_SK_REUSEPORT: 2514 switch (expected_attach_type) { 2515 case BPF_SK_REUSEPORT_SELECT: 2516 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2517 return 0; 2518 default: 2519 return -EINVAL; 2520 } 2521 case BPF_PROG_TYPE_NETFILTER: 2522 if (expected_attach_type == BPF_NETFILTER) 2523 return 0; 2524 return -EINVAL; 2525 case BPF_PROG_TYPE_SYSCALL: 2526 case BPF_PROG_TYPE_EXT: 2527 if (expected_attach_type) 2528 return -EINVAL; 2529 fallthrough; 2530 default: 2531 return 0; 2532 } 2533 } 2534 2535 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2536 { 2537 switch (prog_type) { 2538 case BPF_PROG_TYPE_SCHED_CLS: 2539 case BPF_PROG_TYPE_SCHED_ACT: 2540 case BPF_PROG_TYPE_XDP: 2541 case BPF_PROG_TYPE_LWT_IN: 2542 case BPF_PROG_TYPE_LWT_OUT: 2543 case BPF_PROG_TYPE_LWT_XMIT: 2544 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2545 case BPF_PROG_TYPE_SK_SKB: 2546 case BPF_PROG_TYPE_SK_MSG: 2547 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2548 case BPF_PROG_TYPE_CGROUP_DEVICE: 2549 case BPF_PROG_TYPE_CGROUP_SOCK: 2550 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2551 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2552 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2553 case BPF_PROG_TYPE_SOCK_OPS: 2554 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2555 case BPF_PROG_TYPE_NETFILTER: 2556 return true; 2557 case BPF_PROG_TYPE_CGROUP_SKB: 2558 /* always unpriv */ 2559 case BPF_PROG_TYPE_SK_REUSEPORT: 2560 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2561 default: 2562 return false; 2563 } 2564 } 2565 2566 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2567 { 2568 switch (prog_type) { 2569 case BPF_PROG_TYPE_KPROBE: 2570 case BPF_PROG_TYPE_TRACEPOINT: 2571 case BPF_PROG_TYPE_PERF_EVENT: 2572 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2573 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2574 case BPF_PROG_TYPE_TRACING: 2575 case BPF_PROG_TYPE_LSM: 2576 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2577 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2578 return true; 2579 default: 2580 return false; 2581 } 2582 } 2583 2584 /* last field in 'union bpf_attr' used by this command */ 2585 #define BPF_PROG_LOAD_LAST_FIELD log_true_size 2586 2587 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2588 { 2589 enum bpf_prog_type type = attr->prog_type; 2590 struct bpf_prog *prog, *dst_prog = NULL; 2591 struct btf *attach_btf = NULL; 2592 int err; 2593 char license[128]; 2594 2595 if (CHECK_ATTR(BPF_PROG_LOAD)) 2596 return -EINVAL; 2597 2598 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2599 BPF_F_ANY_ALIGNMENT | 2600 BPF_F_TEST_STATE_FREQ | 2601 BPF_F_SLEEPABLE | 2602 BPF_F_TEST_RND_HI32 | 2603 BPF_F_XDP_HAS_FRAGS | 2604 BPF_F_XDP_DEV_BOUND_ONLY)) 2605 return -EINVAL; 2606 2607 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2608 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2609 !bpf_capable()) 2610 return -EPERM; 2611 2612 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2613 * creation for unprivileged users; other actions depend 2614 * on fd availability and access to bpffs, so are dependent on 2615 * object creation success. Even with unprivileged BPF disabled, 2616 * capability checks are still carried out for these 2617 * and other operations. 2618 */ 2619 if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) 2620 return -EPERM; 2621 2622 if (attr->insn_cnt == 0 || 2623 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2624 return -E2BIG; 2625 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2626 type != BPF_PROG_TYPE_CGROUP_SKB && 2627 !bpf_capable()) 2628 return -EPERM; 2629 2630 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) 2631 return -EPERM; 2632 if (is_perfmon_prog_type(type) && !perfmon_capable()) 2633 return -EPERM; 2634 2635 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2636 * or btf, we need to check which one it is 2637 */ 2638 if (attr->attach_prog_fd) { 2639 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2640 if (IS_ERR(dst_prog)) { 2641 dst_prog = NULL; 2642 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2643 if (IS_ERR(attach_btf)) 2644 return -EINVAL; 2645 if (!btf_is_kernel(attach_btf)) { 2646 /* attaching through specifying bpf_prog's BTF 2647 * objects directly might be supported eventually 2648 */ 2649 btf_put(attach_btf); 2650 return -ENOTSUPP; 2651 } 2652 } 2653 } else if (attr->attach_btf_id) { 2654 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2655 attach_btf = bpf_get_btf_vmlinux(); 2656 if (IS_ERR(attach_btf)) 2657 return PTR_ERR(attach_btf); 2658 if (!attach_btf) 2659 return -EINVAL; 2660 btf_get(attach_btf); 2661 } 2662 2663 bpf_prog_load_fixup_attach_type(attr); 2664 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2665 attach_btf, attr->attach_btf_id, 2666 dst_prog)) { 2667 if (dst_prog) 2668 bpf_prog_put(dst_prog); 2669 if (attach_btf) 2670 btf_put(attach_btf); 2671 return -EINVAL; 2672 } 2673 2674 /* plain bpf_prog allocation */ 2675 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2676 if (!prog) { 2677 if (dst_prog) 2678 bpf_prog_put(dst_prog); 2679 if (attach_btf) 2680 btf_put(attach_btf); 2681 return -ENOMEM; 2682 } 2683 2684 prog->expected_attach_type = attr->expected_attach_type; 2685 prog->aux->attach_btf = attach_btf; 2686 prog->aux->attach_btf_id = attr->attach_btf_id; 2687 prog->aux->dst_prog = dst_prog; 2688 prog->aux->dev_bound = !!attr->prog_ifindex; 2689 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; 2690 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2691 2692 err = security_bpf_prog_alloc(prog->aux); 2693 if (err) 2694 goto free_prog; 2695 2696 prog->aux->user = get_current_user(); 2697 prog->len = attr->insn_cnt; 2698 2699 err = -EFAULT; 2700 if (copy_from_bpfptr(prog->insns, 2701 make_bpfptr(attr->insns, uattr.is_kernel), 2702 bpf_prog_insn_size(prog)) != 0) 2703 goto free_prog_sec; 2704 /* copy eBPF program license from user space */ 2705 if (strncpy_from_bpfptr(license, 2706 make_bpfptr(attr->license, uattr.is_kernel), 2707 sizeof(license) - 1) < 0) 2708 goto free_prog_sec; 2709 license[sizeof(license) - 1] = 0; 2710 2711 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2712 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2713 2714 prog->orig_prog = NULL; 2715 prog->jited = 0; 2716 2717 atomic64_set(&prog->aux->refcnt, 1); 2718 2719 if (bpf_prog_is_dev_bound(prog->aux)) { 2720 err = bpf_prog_dev_bound_init(prog, attr); 2721 if (err) 2722 goto free_prog_sec; 2723 } 2724 2725 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2726 bpf_prog_is_dev_bound(dst_prog->aux)) { 2727 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2728 if (err) 2729 goto free_prog_sec; 2730 } 2731 2732 /* find program type: socket_filter vs tracing_filter */ 2733 err = find_prog_type(type, prog); 2734 if (err < 0) 2735 goto free_prog_sec; 2736 2737 prog->aux->load_time = ktime_get_boottime_ns(); 2738 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2739 sizeof(attr->prog_name)); 2740 if (err < 0) 2741 goto free_prog_sec; 2742 2743 /* run eBPF verifier */ 2744 err = bpf_check(&prog, attr, uattr, uattr_size); 2745 if (err < 0) 2746 goto free_used_maps; 2747 2748 prog = bpf_prog_select_runtime(prog, &err); 2749 if (err < 0) 2750 goto free_used_maps; 2751 2752 err = bpf_prog_alloc_id(prog); 2753 if (err) 2754 goto free_used_maps; 2755 2756 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2757 * effectively publicly exposed. However, retrieving via 2758 * bpf_prog_get_fd_by_id() will take another reference, 2759 * therefore it cannot be gone underneath us. 2760 * 2761 * Only for the time /after/ successful bpf_prog_new_fd() 2762 * and before returning to userspace, we might just hold 2763 * one reference and any parallel close on that fd could 2764 * rip everything out. Hence, below notifications must 2765 * happen before bpf_prog_new_fd(). 2766 * 2767 * Also, any failure handling from this point onwards must 2768 * be using bpf_prog_put() given the program is exposed. 2769 */ 2770 bpf_prog_kallsyms_add(prog); 2771 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2772 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2773 2774 err = bpf_prog_new_fd(prog); 2775 if (err < 0) 2776 bpf_prog_put(prog); 2777 return err; 2778 2779 free_used_maps: 2780 /* In case we have subprogs, we need to wait for a grace 2781 * period before we can tear down JIT memory since symbols 2782 * are already exposed under kallsyms. 2783 */ 2784 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2785 return err; 2786 free_prog_sec: 2787 free_uid(prog->aux->user); 2788 security_bpf_prog_free(prog->aux); 2789 free_prog: 2790 if (prog->aux->attach_btf) 2791 btf_put(prog->aux->attach_btf); 2792 bpf_prog_free(prog); 2793 return err; 2794 } 2795 2796 #define BPF_OBJ_LAST_FIELD path_fd 2797 2798 static int bpf_obj_pin(const union bpf_attr *attr) 2799 { 2800 int path_fd; 2801 2802 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 2803 return -EINVAL; 2804 2805 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2806 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2807 return -EINVAL; 2808 2809 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2810 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 2811 u64_to_user_ptr(attr->pathname)); 2812 } 2813 2814 static int bpf_obj_get(const union bpf_attr *attr) 2815 { 2816 int path_fd; 2817 2818 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2819 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 2820 return -EINVAL; 2821 2822 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2823 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2824 return -EINVAL; 2825 2826 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2827 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 2828 attr->file_flags); 2829 } 2830 2831 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2832 const struct bpf_link_ops *ops, struct bpf_prog *prog) 2833 { 2834 WARN_ON(ops->dealloc && ops->dealloc_deferred); 2835 atomic64_set(&link->refcnt, 1); 2836 link->type = type; 2837 link->id = 0; 2838 link->ops = ops; 2839 link->prog = prog; 2840 } 2841 2842 static void bpf_link_free_id(int id) 2843 { 2844 if (!id) 2845 return; 2846 2847 spin_lock_bh(&link_idr_lock); 2848 idr_remove(&link_idr, id); 2849 spin_unlock_bh(&link_idr_lock); 2850 } 2851 2852 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2853 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2854 * anon_inode's release() call. This helper marks bpf_link as 2855 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 2856 * is not decremented, it's the responsibility of a calling code that failed 2857 * to complete bpf_link initialization. 2858 * This helper eventually calls link's dealloc callback, but does not call 2859 * link's release callback. 2860 */ 2861 void bpf_link_cleanup(struct bpf_link_primer *primer) 2862 { 2863 primer->link->prog = NULL; 2864 bpf_link_free_id(primer->id); 2865 fput(primer->file); 2866 put_unused_fd(primer->fd); 2867 } 2868 2869 void bpf_link_inc(struct bpf_link *link) 2870 { 2871 atomic64_inc(&link->refcnt); 2872 } 2873 2874 static void bpf_link_dealloc(struct bpf_link *link) 2875 { 2876 /* now that we know that bpf_link itself can't be reached, put underlying BPF program */ 2877 if (link->prog) 2878 bpf_prog_put(link->prog); 2879 2880 /* free bpf_link and its containing memory */ 2881 if (link->ops->dealloc_deferred) 2882 link->ops->dealloc_deferred(link); 2883 else 2884 link->ops->dealloc(link); 2885 } 2886 2887 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu) 2888 { 2889 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu); 2890 2891 bpf_link_dealloc(link); 2892 } 2893 2894 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) 2895 { 2896 if (rcu_trace_implies_rcu_gp()) 2897 bpf_link_defer_dealloc_rcu_gp(rcu); 2898 else 2899 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp); 2900 } 2901 2902 /* bpf_link_free is guaranteed to be called from process context */ 2903 static void bpf_link_free(struct bpf_link *link) 2904 { 2905 const struct bpf_link_ops *ops = link->ops; 2906 bool sleepable = false; 2907 2908 bpf_link_free_id(link->id); 2909 if (link->prog) { 2910 sleepable = link->prog->aux->sleepable; 2911 /* detach BPF program, clean up used resources */ 2912 ops->release(link); 2913 } 2914 if (ops->dealloc_deferred) { 2915 /* schedule BPF link deallocation; if underlying BPF program 2916 * is sleepable, we need to first wait for RCU tasks trace 2917 * sync, then go through "classic" RCU grace period 2918 */ 2919 if (sleepable) 2920 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); 2921 else 2922 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 2923 } else if (ops->dealloc) { 2924 bpf_link_dealloc(link); 2925 } 2926 } 2927 2928 static void bpf_link_put_deferred(struct work_struct *work) 2929 { 2930 struct bpf_link *link = container_of(work, struct bpf_link, work); 2931 2932 bpf_link_free(link); 2933 } 2934 2935 /* bpf_link_put might be called from atomic context. It needs to be called 2936 * from sleepable context in order to acquire sleeping locks during the process. 2937 */ 2938 void bpf_link_put(struct bpf_link *link) 2939 { 2940 if (!atomic64_dec_and_test(&link->refcnt)) 2941 return; 2942 2943 INIT_WORK(&link->work, bpf_link_put_deferred); 2944 schedule_work(&link->work); 2945 } 2946 EXPORT_SYMBOL(bpf_link_put); 2947 2948 static void bpf_link_put_direct(struct bpf_link *link) 2949 { 2950 if (!atomic64_dec_and_test(&link->refcnt)) 2951 return; 2952 bpf_link_free(link); 2953 } 2954 2955 static int bpf_link_release(struct inode *inode, struct file *filp) 2956 { 2957 struct bpf_link *link = filp->private_data; 2958 2959 bpf_link_put_direct(link); 2960 return 0; 2961 } 2962 2963 #ifdef CONFIG_PROC_FS 2964 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 2965 #define BPF_MAP_TYPE(_id, _ops) 2966 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 2967 static const char *bpf_link_type_strs[] = { 2968 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 2969 #include <linux/bpf_types.h> 2970 }; 2971 #undef BPF_PROG_TYPE 2972 #undef BPF_MAP_TYPE 2973 #undef BPF_LINK_TYPE 2974 2975 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 2976 { 2977 const struct bpf_link *link = filp->private_data; 2978 const struct bpf_prog *prog = link->prog; 2979 enum bpf_link_type type = link->type; 2980 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2981 2982 if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) { 2983 seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]); 2984 } else { 2985 WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type); 2986 seq_printf(m, "link_type:\t<%u>\n", type); 2987 } 2988 seq_printf(m, "link_id:\t%u\n", link->id); 2989 2990 if (prog) { 2991 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2992 seq_printf(m, 2993 "prog_tag:\t%s\n" 2994 "prog_id:\t%u\n", 2995 prog_tag, 2996 prog->aux->id); 2997 } 2998 if (link->ops->show_fdinfo) 2999 link->ops->show_fdinfo(link, m); 3000 } 3001 #endif 3002 3003 static const struct file_operations bpf_link_fops = { 3004 #ifdef CONFIG_PROC_FS 3005 .show_fdinfo = bpf_link_show_fdinfo, 3006 #endif 3007 .release = bpf_link_release, 3008 .read = bpf_dummy_read, 3009 .write = bpf_dummy_write, 3010 }; 3011 3012 static int bpf_link_alloc_id(struct bpf_link *link) 3013 { 3014 int id; 3015 3016 idr_preload(GFP_KERNEL); 3017 spin_lock_bh(&link_idr_lock); 3018 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 3019 spin_unlock_bh(&link_idr_lock); 3020 idr_preload_end(); 3021 3022 return id; 3023 } 3024 3025 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 3026 * reserving unused FD and allocating ID from link_idr. This is to be paired 3027 * with bpf_link_settle() to install FD and ID and expose bpf_link to 3028 * user-space, if bpf_link is successfully attached. If not, bpf_link and 3029 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 3030 * transient state is passed around in struct bpf_link_primer. 3031 * This is preferred way to create and initialize bpf_link, especially when 3032 * there are complicated and expensive operations in between creating bpf_link 3033 * itself and attaching it to BPF hook. By using bpf_link_prime() and 3034 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 3035 * expensive (and potentially failing) roll back operations in a rare case 3036 * that file, FD, or ID can't be allocated. 3037 */ 3038 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 3039 { 3040 struct file *file; 3041 int fd, id; 3042 3043 fd = get_unused_fd_flags(O_CLOEXEC); 3044 if (fd < 0) 3045 return fd; 3046 3047 3048 id = bpf_link_alloc_id(link); 3049 if (id < 0) { 3050 put_unused_fd(fd); 3051 return id; 3052 } 3053 3054 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 3055 if (IS_ERR(file)) { 3056 bpf_link_free_id(id); 3057 put_unused_fd(fd); 3058 return PTR_ERR(file); 3059 } 3060 3061 primer->link = link; 3062 primer->file = file; 3063 primer->fd = fd; 3064 primer->id = id; 3065 return 0; 3066 } 3067 3068 int bpf_link_settle(struct bpf_link_primer *primer) 3069 { 3070 /* make bpf_link fetchable by ID */ 3071 spin_lock_bh(&link_idr_lock); 3072 primer->link->id = primer->id; 3073 spin_unlock_bh(&link_idr_lock); 3074 /* make bpf_link fetchable by FD */ 3075 fd_install(primer->fd, primer->file); 3076 /* pass through installed FD */ 3077 return primer->fd; 3078 } 3079 3080 int bpf_link_new_fd(struct bpf_link *link) 3081 { 3082 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 3083 } 3084 3085 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3086 { 3087 struct fd f = fdget(ufd); 3088 struct bpf_link *link; 3089 3090 if (!f.file) 3091 return ERR_PTR(-EBADF); 3092 if (f.file->f_op != &bpf_link_fops) { 3093 fdput(f); 3094 return ERR_PTR(-EINVAL); 3095 } 3096 3097 link = f.file->private_data; 3098 bpf_link_inc(link); 3099 fdput(f); 3100 3101 return link; 3102 } 3103 EXPORT_SYMBOL(bpf_link_get_from_fd); 3104 3105 static void bpf_tracing_link_release(struct bpf_link *link) 3106 { 3107 struct bpf_tracing_link *tr_link = 3108 container_of(link, struct bpf_tracing_link, link.link); 3109 3110 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3111 tr_link->trampoline)); 3112 3113 bpf_trampoline_put(tr_link->trampoline); 3114 3115 /* tgt_prog is NULL if target is a kernel function */ 3116 if (tr_link->tgt_prog) 3117 bpf_prog_put(tr_link->tgt_prog); 3118 } 3119 3120 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3121 { 3122 struct bpf_tracing_link *tr_link = 3123 container_of(link, struct bpf_tracing_link, link.link); 3124 3125 kfree(tr_link); 3126 } 3127 3128 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3129 struct seq_file *seq) 3130 { 3131 struct bpf_tracing_link *tr_link = 3132 container_of(link, struct bpf_tracing_link, link.link); 3133 u32 target_btf_id, target_obj_id; 3134 3135 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3136 &target_obj_id, &target_btf_id); 3137 seq_printf(seq, 3138 "attach_type:\t%d\n" 3139 "target_obj_id:\t%u\n" 3140 "target_btf_id:\t%u\n", 3141 tr_link->attach_type, 3142 target_obj_id, 3143 target_btf_id); 3144 } 3145 3146 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3147 struct bpf_link_info *info) 3148 { 3149 struct bpf_tracing_link *tr_link = 3150 container_of(link, struct bpf_tracing_link, link.link); 3151 3152 info->tracing.attach_type = tr_link->attach_type; 3153 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3154 &info->tracing.target_obj_id, 3155 &info->tracing.target_btf_id); 3156 3157 return 0; 3158 } 3159 3160 static const struct bpf_link_ops bpf_tracing_link_lops = { 3161 .release = bpf_tracing_link_release, 3162 .dealloc = bpf_tracing_link_dealloc, 3163 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3164 .fill_link_info = bpf_tracing_link_fill_link_info, 3165 }; 3166 3167 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3168 int tgt_prog_fd, 3169 u32 btf_id, 3170 u64 bpf_cookie) 3171 { 3172 struct bpf_link_primer link_primer; 3173 struct bpf_prog *tgt_prog = NULL; 3174 struct bpf_trampoline *tr = NULL; 3175 struct bpf_tracing_link *link; 3176 u64 key = 0; 3177 int err; 3178 3179 switch (prog->type) { 3180 case BPF_PROG_TYPE_TRACING: 3181 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3182 prog->expected_attach_type != BPF_TRACE_FEXIT && 3183 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3184 err = -EINVAL; 3185 goto out_put_prog; 3186 } 3187 break; 3188 case BPF_PROG_TYPE_EXT: 3189 if (prog->expected_attach_type != 0) { 3190 err = -EINVAL; 3191 goto out_put_prog; 3192 } 3193 break; 3194 case BPF_PROG_TYPE_LSM: 3195 if (prog->expected_attach_type != BPF_LSM_MAC) { 3196 err = -EINVAL; 3197 goto out_put_prog; 3198 } 3199 break; 3200 default: 3201 err = -EINVAL; 3202 goto out_put_prog; 3203 } 3204 3205 if (!!tgt_prog_fd != !!btf_id) { 3206 err = -EINVAL; 3207 goto out_put_prog; 3208 } 3209 3210 if (tgt_prog_fd) { 3211 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */ 3212 if (prog->type != BPF_PROG_TYPE_EXT) { 3213 err = -EINVAL; 3214 goto out_put_prog; 3215 } 3216 3217 tgt_prog = bpf_prog_get(tgt_prog_fd); 3218 if (IS_ERR(tgt_prog)) { 3219 err = PTR_ERR(tgt_prog); 3220 tgt_prog = NULL; 3221 goto out_put_prog; 3222 } 3223 3224 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3225 } 3226 3227 link = kzalloc(sizeof(*link), GFP_USER); 3228 if (!link) { 3229 err = -ENOMEM; 3230 goto out_put_prog; 3231 } 3232 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3233 &bpf_tracing_link_lops, prog); 3234 link->attach_type = prog->expected_attach_type; 3235 link->link.cookie = bpf_cookie; 3236 3237 mutex_lock(&prog->aux->dst_mutex); 3238 3239 /* There are a few possible cases here: 3240 * 3241 * - if prog->aux->dst_trampoline is set, the program was just loaded 3242 * and not yet attached to anything, so we can use the values stored 3243 * in prog->aux 3244 * 3245 * - if prog->aux->dst_trampoline is NULL, the program has already been 3246 * attached to a target and its initial target was cleared (below) 3247 * 3248 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3249 * target_btf_id using the link_create API. 3250 * 3251 * - if tgt_prog == NULL when this function was called using the old 3252 * raw_tracepoint_open API, and we need a target from prog->aux 3253 * 3254 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3255 * was detached and is going for re-attachment. 3256 * 3257 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf 3258 * are NULL, then program was already attached and user did not provide 3259 * tgt_prog_fd so we have no way to find out or create trampoline 3260 */ 3261 if (!prog->aux->dst_trampoline && !tgt_prog) { 3262 /* 3263 * Allow re-attach for TRACING and LSM programs. If it's 3264 * currently linked, bpf_trampoline_link_prog will fail. 3265 * EXT programs need to specify tgt_prog_fd, so they 3266 * re-attach in separate code path. 3267 */ 3268 if (prog->type != BPF_PROG_TYPE_TRACING && 3269 prog->type != BPF_PROG_TYPE_LSM) { 3270 err = -EINVAL; 3271 goto out_unlock; 3272 } 3273 /* We can allow re-attach only if we have valid attach_btf. */ 3274 if (!prog->aux->attach_btf) { 3275 err = -EINVAL; 3276 goto out_unlock; 3277 } 3278 btf_id = prog->aux->attach_btf_id; 3279 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3280 } 3281 3282 if (!prog->aux->dst_trampoline || 3283 (key && key != prog->aux->dst_trampoline->key)) { 3284 /* If there is no saved target, or the specified target is 3285 * different from the destination specified at load time, we 3286 * need a new trampoline and a check for compatibility 3287 */ 3288 struct bpf_attach_target_info tgt_info = {}; 3289 3290 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3291 &tgt_info); 3292 if (err) 3293 goto out_unlock; 3294 3295 if (tgt_info.tgt_mod) { 3296 module_put(prog->aux->mod); 3297 prog->aux->mod = tgt_info.tgt_mod; 3298 } 3299 3300 tr = bpf_trampoline_get(key, &tgt_info); 3301 if (!tr) { 3302 err = -ENOMEM; 3303 goto out_unlock; 3304 } 3305 } else { 3306 /* The caller didn't specify a target, or the target was the 3307 * same as the destination supplied during program load. This 3308 * means we can reuse the trampoline and reference from program 3309 * load time, and there is no need to allocate a new one. This 3310 * can only happen once for any program, as the saved values in 3311 * prog->aux are cleared below. 3312 */ 3313 tr = prog->aux->dst_trampoline; 3314 tgt_prog = prog->aux->dst_prog; 3315 } 3316 3317 err = bpf_link_prime(&link->link.link, &link_primer); 3318 if (err) 3319 goto out_unlock; 3320 3321 err = bpf_trampoline_link_prog(&link->link, tr); 3322 if (err) { 3323 bpf_link_cleanup(&link_primer); 3324 link = NULL; 3325 goto out_unlock; 3326 } 3327 3328 link->tgt_prog = tgt_prog; 3329 link->trampoline = tr; 3330 3331 /* Always clear the trampoline and target prog from prog->aux to make 3332 * sure the original attach destination is not kept alive after a 3333 * program is (re-)attached to another target. 3334 */ 3335 if (prog->aux->dst_prog && 3336 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3337 /* got extra prog ref from syscall, or attaching to different prog */ 3338 bpf_prog_put(prog->aux->dst_prog); 3339 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3340 /* we allocated a new trampoline, so free the old one */ 3341 bpf_trampoline_put(prog->aux->dst_trampoline); 3342 3343 prog->aux->dst_prog = NULL; 3344 prog->aux->dst_trampoline = NULL; 3345 mutex_unlock(&prog->aux->dst_mutex); 3346 3347 return bpf_link_settle(&link_primer); 3348 out_unlock: 3349 if (tr && tr != prog->aux->dst_trampoline) 3350 bpf_trampoline_put(tr); 3351 mutex_unlock(&prog->aux->dst_mutex); 3352 kfree(link); 3353 out_put_prog: 3354 if (tgt_prog_fd && tgt_prog) 3355 bpf_prog_put(tgt_prog); 3356 return err; 3357 } 3358 3359 struct bpf_raw_tp_link { 3360 struct bpf_link link; 3361 struct bpf_raw_event_map *btp; 3362 }; 3363 3364 static void bpf_raw_tp_link_release(struct bpf_link *link) 3365 { 3366 struct bpf_raw_tp_link *raw_tp = 3367 container_of(link, struct bpf_raw_tp_link, link); 3368 3369 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 3370 bpf_put_raw_tracepoint(raw_tp->btp); 3371 } 3372 3373 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3374 { 3375 struct bpf_raw_tp_link *raw_tp = 3376 container_of(link, struct bpf_raw_tp_link, link); 3377 3378 kfree(raw_tp); 3379 } 3380 3381 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3382 struct seq_file *seq) 3383 { 3384 struct bpf_raw_tp_link *raw_tp_link = 3385 container_of(link, struct bpf_raw_tp_link, link); 3386 3387 seq_printf(seq, 3388 "tp_name:\t%s\n", 3389 raw_tp_link->btp->tp->name); 3390 } 3391 3392 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen, 3393 u32 len) 3394 { 3395 if (ulen >= len + 1) { 3396 if (copy_to_user(ubuf, buf, len + 1)) 3397 return -EFAULT; 3398 } else { 3399 char zero = '\0'; 3400 3401 if (copy_to_user(ubuf, buf, ulen - 1)) 3402 return -EFAULT; 3403 if (put_user(zero, ubuf + ulen - 1)) 3404 return -EFAULT; 3405 return -ENOSPC; 3406 } 3407 3408 return 0; 3409 } 3410 3411 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3412 struct bpf_link_info *info) 3413 { 3414 struct bpf_raw_tp_link *raw_tp_link = 3415 container_of(link, struct bpf_raw_tp_link, link); 3416 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3417 const char *tp_name = raw_tp_link->btp->tp->name; 3418 u32 ulen = info->raw_tracepoint.tp_name_len; 3419 size_t tp_len = strlen(tp_name); 3420 3421 if (!ulen ^ !ubuf) 3422 return -EINVAL; 3423 3424 info->raw_tracepoint.tp_name_len = tp_len + 1; 3425 3426 if (!ubuf) 3427 return 0; 3428 3429 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len); 3430 } 3431 3432 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3433 .release = bpf_raw_tp_link_release, 3434 .dealloc_deferred = bpf_raw_tp_link_dealloc, 3435 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3436 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3437 }; 3438 3439 #ifdef CONFIG_PERF_EVENTS 3440 struct bpf_perf_link { 3441 struct bpf_link link; 3442 struct file *perf_file; 3443 }; 3444 3445 static void bpf_perf_link_release(struct bpf_link *link) 3446 { 3447 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3448 struct perf_event *event = perf_link->perf_file->private_data; 3449 3450 perf_event_free_bpf_prog(event); 3451 fput(perf_link->perf_file); 3452 } 3453 3454 static void bpf_perf_link_dealloc(struct bpf_link *link) 3455 { 3456 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3457 3458 kfree(perf_link); 3459 } 3460 3461 static int bpf_perf_link_fill_common(const struct perf_event *event, 3462 char __user *uname, u32 *ulenp, 3463 u64 *probe_offset, u64 *probe_addr, 3464 u32 *fd_type, unsigned long *missed) 3465 { 3466 const char *buf; 3467 u32 prog_id, ulen; 3468 size_t len; 3469 int err; 3470 3471 ulen = *ulenp; 3472 if (!ulen ^ !uname) 3473 return -EINVAL; 3474 3475 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf, 3476 probe_offset, probe_addr, missed); 3477 if (err) 3478 return err; 3479 3480 if (buf) { 3481 len = strlen(buf); 3482 *ulenp = len + 1; 3483 } else { 3484 *ulenp = 1; 3485 } 3486 if (!uname) 3487 return 0; 3488 3489 if (buf) { 3490 err = bpf_copy_to_user(uname, buf, ulen, len); 3491 if (err) 3492 return err; 3493 } else { 3494 char zero = '\0'; 3495 3496 if (put_user(zero, uname)) 3497 return -EFAULT; 3498 } 3499 return 0; 3500 } 3501 3502 #ifdef CONFIG_KPROBE_EVENTS 3503 static int bpf_perf_link_fill_kprobe(const struct perf_event *event, 3504 struct bpf_link_info *info) 3505 { 3506 unsigned long missed; 3507 char __user *uname; 3508 u64 addr, offset; 3509 u32 ulen, type; 3510 int err; 3511 3512 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3513 ulen = info->perf_event.kprobe.name_len; 3514 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3515 &type, &missed); 3516 if (err) 3517 return err; 3518 if (type == BPF_FD_TYPE_KRETPROBE) 3519 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3520 else 3521 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3522 info->perf_event.kprobe.name_len = ulen; 3523 info->perf_event.kprobe.offset = offset; 3524 info->perf_event.kprobe.missed = missed; 3525 if (!kallsyms_show_value(current_cred())) 3526 addr = 0; 3527 info->perf_event.kprobe.addr = addr; 3528 info->perf_event.kprobe.cookie = event->bpf_cookie; 3529 return 0; 3530 } 3531 #endif 3532 3533 #ifdef CONFIG_UPROBE_EVENTS 3534 static int bpf_perf_link_fill_uprobe(const struct perf_event *event, 3535 struct bpf_link_info *info) 3536 { 3537 char __user *uname; 3538 u64 addr, offset; 3539 u32 ulen, type; 3540 int err; 3541 3542 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3543 ulen = info->perf_event.uprobe.name_len; 3544 err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3545 &type, NULL); 3546 if (err) 3547 return err; 3548 3549 if (type == BPF_FD_TYPE_URETPROBE) 3550 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3551 else 3552 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3553 info->perf_event.uprobe.name_len = ulen; 3554 info->perf_event.uprobe.offset = offset; 3555 info->perf_event.uprobe.cookie = event->bpf_cookie; 3556 return 0; 3557 } 3558 #endif 3559 3560 static int bpf_perf_link_fill_probe(const struct perf_event *event, 3561 struct bpf_link_info *info) 3562 { 3563 #ifdef CONFIG_KPROBE_EVENTS 3564 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3565 return bpf_perf_link_fill_kprobe(event, info); 3566 #endif 3567 #ifdef CONFIG_UPROBE_EVENTS 3568 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3569 return bpf_perf_link_fill_uprobe(event, info); 3570 #endif 3571 return -EOPNOTSUPP; 3572 } 3573 3574 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event, 3575 struct bpf_link_info *info) 3576 { 3577 char __user *uname; 3578 u32 ulen; 3579 int err; 3580 3581 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3582 ulen = info->perf_event.tracepoint.name_len; 3583 err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL); 3584 if (err) 3585 return err; 3586 3587 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3588 info->perf_event.tracepoint.name_len = ulen; 3589 info->perf_event.tracepoint.cookie = event->bpf_cookie; 3590 return 0; 3591 } 3592 3593 static int bpf_perf_link_fill_perf_event(const struct perf_event *event, 3594 struct bpf_link_info *info) 3595 { 3596 info->perf_event.event.type = event->attr.type; 3597 info->perf_event.event.config = event->attr.config; 3598 info->perf_event.event.cookie = event->bpf_cookie; 3599 info->perf_event.type = BPF_PERF_EVENT_EVENT; 3600 return 0; 3601 } 3602 3603 static int bpf_perf_link_fill_link_info(const struct bpf_link *link, 3604 struct bpf_link_info *info) 3605 { 3606 struct bpf_perf_link *perf_link; 3607 const struct perf_event *event; 3608 3609 perf_link = container_of(link, struct bpf_perf_link, link); 3610 event = perf_get_event(perf_link->perf_file); 3611 if (IS_ERR(event)) 3612 return PTR_ERR(event); 3613 3614 switch (event->prog->type) { 3615 case BPF_PROG_TYPE_PERF_EVENT: 3616 return bpf_perf_link_fill_perf_event(event, info); 3617 case BPF_PROG_TYPE_TRACEPOINT: 3618 return bpf_perf_link_fill_tracepoint(event, info); 3619 case BPF_PROG_TYPE_KPROBE: 3620 return bpf_perf_link_fill_probe(event, info); 3621 default: 3622 return -EOPNOTSUPP; 3623 } 3624 } 3625 3626 static const struct bpf_link_ops bpf_perf_link_lops = { 3627 .release = bpf_perf_link_release, 3628 .dealloc = bpf_perf_link_dealloc, 3629 .fill_link_info = bpf_perf_link_fill_link_info, 3630 }; 3631 3632 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3633 { 3634 struct bpf_link_primer link_primer; 3635 struct bpf_perf_link *link; 3636 struct perf_event *event; 3637 struct file *perf_file; 3638 int err; 3639 3640 if (attr->link_create.flags) 3641 return -EINVAL; 3642 3643 perf_file = perf_event_get(attr->link_create.target_fd); 3644 if (IS_ERR(perf_file)) 3645 return PTR_ERR(perf_file); 3646 3647 link = kzalloc(sizeof(*link), GFP_USER); 3648 if (!link) { 3649 err = -ENOMEM; 3650 goto out_put_file; 3651 } 3652 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3653 link->perf_file = perf_file; 3654 3655 err = bpf_link_prime(&link->link, &link_primer); 3656 if (err) { 3657 kfree(link); 3658 goto out_put_file; 3659 } 3660 3661 event = perf_file->private_data; 3662 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3663 if (err) { 3664 bpf_link_cleanup(&link_primer); 3665 goto out_put_file; 3666 } 3667 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3668 bpf_prog_inc(prog); 3669 3670 return bpf_link_settle(&link_primer); 3671 3672 out_put_file: 3673 fput(perf_file); 3674 return err; 3675 } 3676 #else 3677 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3678 { 3679 return -EOPNOTSUPP; 3680 } 3681 #endif /* CONFIG_PERF_EVENTS */ 3682 3683 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3684 const char __user *user_tp_name) 3685 { 3686 struct bpf_link_primer link_primer; 3687 struct bpf_raw_tp_link *link; 3688 struct bpf_raw_event_map *btp; 3689 const char *tp_name; 3690 char buf[128]; 3691 int err; 3692 3693 switch (prog->type) { 3694 case BPF_PROG_TYPE_TRACING: 3695 case BPF_PROG_TYPE_EXT: 3696 case BPF_PROG_TYPE_LSM: 3697 if (user_tp_name) 3698 /* The attach point for this category of programs 3699 * should be specified via btf_id during program load. 3700 */ 3701 return -EINVAL; 3702 if (prog->type == BPF_PROG_TYPE_TRACING && 3703 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3704 tp_name = prog->aux->attach_func_name; 3705 break; 3706 } 3707 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3708 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3709 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3710 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3711 return -EFAULT; 3712 buf[sizeof(buf) - 1] = 0; 3713 tp_name = buf; 3714 break; 3715 default: 3716 return -EINVAL; 3717 } 3718 3719 btp = bpf_get_raw_tracepoint(tp_name); 3720 if (!btp) 3721 return -ENOENT; 3722 3723 link = kzalloc(sizeof(*link), GFP_USER); 3724 if (!link) { 3725 err = -ENOMEM; 3726 goto out_put_btp; 3727 } 3728 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3729 &bpf_raw_tp_link_lops, prog); 3730 link->btp = btp; 3731 3732 err = bpf_link_prime(&link->link, &link_primer); 3733 if (err) { 3734 kfree(link); 3735 goto out_put_btp; 3736 } 3737 3738 err = bpf_probe_register(link->btp, prog); 3739 if (err) { 3740 bpf_link_cleanup(&link_primer); 3741 goto out_put_btp; 3742 } 3743 3744 return bpf_link_settle(&link_primer); 3745 3746 out_put_btp: 3747 bpf_put_raw_tracepoint(btp); 3748 return err; 3749 } 3750 3751 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 3752 3753 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3754 { 3755 struct bpf_prog *prog; 3756 int fd; 3757 3758 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3759 return -EINVAL; 3760 3761 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3762 if (IS_ERR(prog)) 3763 return PTR_ERR(prog); 3764 3765 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); 3766 if (fd < 0) 3767 bpf_prog_put(prog); 3768 return fd; 3769 } 3770 3771 static enum bpf_prog_type 3772 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3773 { 3774 switch (attach_type) { 3775 case BPF_CGROUP_INET_INGRESS: 3776 case BPF_CGROUP_INET_EGRESS: 3777 return BPF_PROG_TYPE_CGROUP_SKB; 3778 case BPF_CGROUP_INET_SOCK_CREATE: 3779 case BPF_CGROUP_INET_SOCK_RELEASE: 3780 case BPF_CGROUP_INET4_POST_BIND: 3781 case BPF_CGROUP_INET6_POST_BIND: 3782 return BPF_PROG_TYPE_CGROUP_SOCK; 3783 case BPF_CGROUP_INET4_BIND: 3784 case BPF_CGROUP_INET6_BIND: 3785 case BPF_CGROUP_INET4_CONNECT: 3786 case BPF_CGROUP_INET6_CONNECT: 3787 case BPF_CGROUP_INET4_GETPEERNAME: 3788 case BPF_CGROUP_INET6_GETPEERNAME: 3789 case BPF_CGROUP_INET4_GETSOCKNAME: 3790 case BPF_CGROUP_INET6_GETSOCKNAME: 3791 case BPF_CGROUP_UDP4_SENDMSG: 3792 case BPF_CGROUP_UDP6_SENDMSG: 3793 case BPF_CGROUP_UDP4_RECVMSG: 3794 case BPF_CGROUP_UDP6_RECVMSG: 3795 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 3796 case BPF_CGROUP_SOCK_OPS: 3797 return BPF_PROG_TYPE_SOCK_OPS; 3798 case BPF_CGROUP_DEVICE: 3799 return BPF_PROG_TYPE_CGROUP_DEVICE; 3800 case BPF_SK_MSG_VERDICT: 3801 return BPF_PROG_TYPE_SK_MSG; 3802 case BPF_SK_SKB_STREAM_PARSER: 3803 case BPF_SK_SKB_STREAM_VERDICT: 3804 case BPF_SK_SKB_VERDICT: 3805 return BPF_PROG_TYPE_SK_SKB; 3806 case BPF_LIRC_MODE2: 3807 return BPF_PROG_TYPE_LIRC_MODE2; 3808 case BPF_FLOW_DISSECTOR: 3809 return BPF_PROG_TYPE_FLOW_DISSECTOR; 3810 case BPF_CGROUP_SYSCTL: 3811 return BPF_PROG_TYPE_CGROUP_SYSCTL; 3812 case BPF_CGROUP_GETSOCKOPT: 3813 case BPF_CGROUP_SETSOCKOPT: 3814 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 3815 case BPF_TRACE_ITER: 3816 case BPF_TRACE_RAW_TP: 3817 case BPF_TRACE_FENTRY: 3818 case BPF_TRACE_FEXIT: 3819 case BPF_MODIFY_RETURN: 3820 return BPF_PROG_TYPE_TRACING; 3821 case BPF_LSM_MAC: 3822 return BPF_PROG_TYPE_LSM; 3823 case BPF_SK_LOOKUP: 3824 return BPF_PROG_TYPE_SK_LOOKUP; 3825 case BPF_XDP: 3826 return BPF_PROG_TYPE_XDP; 3827 case BPF_LSM_CGROUP: 3828 return BPF_PROG_TYPE_LSM; 3829 case BPF_TCX_INGRESS: 3830 case BPF_TCX_EGRESS: 3831 return BPF_PROG_TYPE_SCHED_CLS; 3832 default: 3833 return BPF_PROG_TYPE_UNSPEC; 3834 } 3835 } 3836 3837 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 3838 enum bpf_attach_type attach_type) 3839 { 3840 enum bpf_prog_type ptype; 3841 3842 switch (prog->type) { 3843 case BPF_PROG_TYPE_CGROUP_SOCK: 3844 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3845 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3846 case BPF_PROG_TYPE_SK_LOOKUP: 3847 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 3848 case BPF_PROG_TYPE_CGROUP_SKB: 3849 if (!capable(CAP_NET_ADMIN)) 3850 /* cg-skb progs can be loaded by unpriv user. 3851 * check permissions at attach time. 3852 */ 3853 return -EPERM; 3854 3855 ptype = attach_type_to_prog_type(attach_type); 3856 if (prog->type != ptype) 3857 return -EINVAL; 3858 3859 return prog->enforce_expected_attach_type && 3860 prog->expected_attach_type != attach_type ? 3861 -EINVAL : 0; 3862 case BPF_PROG_TYPE_EXT: 3863 return 0; 3864 case BPF_PROG_TYPE_NETFILTER: 3865 if (attach_type != BPF_NETFILTER) 3866 return -EINVAL; 3867 return 0; 3868 case BPF_PROG_TYPE_PERF_EVENT: 3869 case BPF_PROG_TYPE_TRACEPOINT: 3870 if (attach_type != BPF_PERF_EVENT) 3871 return -EINVAL; 3872 return 0; 3873 case BPF_PROG_TYPE_KPROBE: 3874 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 3875 attach_type != BPF_TRACE_KPROBE_MULTI) 3876 return -EINVAL; 3877 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && 3878 attach_type != BPF_TRACE_UPROBE_MULTI) 3879 return -EINVAL; 3880 if (attach_type != BPF_PERF_EVENT && 3881 attach_type != BPF_TRACE_KPROBE_MULTI && 3882 attach_type != BPF_TRACE_UPROBE_MULTI) 3883 return -EINVAL; 3884 return 0; 3885 case BPF_PROG_TYPE_SCHED_CLS: 3886 if (attach_type != BPF_TCX_INGRESS && 3887 attach_type != BPF_TCX_EGRESS) 3888 return -EINVAL; 3889 return 0; 3890 default: 3891 ptype = attach_type_to_prog_type(attach_type); 3892 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) 3893 return -EINVAL; 3894 return 0; 3895 } 3896 } 3897 3898 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision 3899 3900 #define BPF_F_ATTACH_MASK_BASE \ 3901 (BPF_F_ALLOW_OVERRIDE | \ 3902 BPF_F_ALLOW_MULTI | \ 3903 BPF_F_REPLACE) 3904 3905 #define BPF_F_ATTACH_MASK_MPROG \ 3906 (BPF_F_REPLACE | \ 3907 BPF_F_BEFORE | \ 3908 BPF_F_AFTER | \ 3909 BPF_F_ID | \ 3910 BPF_F_LINK) 3911 3912 static int bpf_prog_attach(const union bpf_attr *attr) 3913 { 3914 enum bpf_prog_type ptype; 3915 struct bpf_prog *prog; 3916 int ret; 3917 3918 if (CHECK_ATTR(BPF_PROG_ATTACH)) 3919 return -EINVAL; 3920 3921 ptype = attach_type_to_prog_type(attr->attach_type); 3922 if (ptype == BPF_PROG_TYPE_UNSPEC) 3923 return -EINVAL; 3924 if (bpf_mprog_supported(ptype)) { 3925 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 3926 return -EINVAL; 3927 } else { 3928 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) 3929 return -EINVAL; 3930 if (attr->relative_fd || 3931 attr->expected_revision) 3932 return -EINVAL; 3933 } 3934 3935 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 3936 if (IS_ERR(prog)) 3937 return PTR_ERR(prog); 3938 3939 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 3940 bpf_prog_put(prog); 3941 return -EINVAL; 3942 } 3943 3944 switch (ptype) { 3945 case BPF_PROG_TYPE_SK_SKB: 3946 case BPF_PROG_TYPE_SK_MSG: 3947 ret = sock_map_get_from_fd(attr, prog); 3948 break; 3949 case BPF_PROG_TYPE_LIRC_MODE2: 3950 ret = lirc_prog_attach(attr, prog); 3951 break; 3952 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3953 ret = netns_bpf_prog_attach(attr, prog); 3954 break; 3955 case BPF_PROG_TYPE_CGROUP_DEVICE: 3956 case BPF_PROG_TYPE_CGROUP_SKB: 3957 case BPF_PROG_TYPE_CGROUP_SOCK: 3958 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3959 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3960 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3961 case BPF_PROG_TYPE_SOCK_OPS: 3962 case BPF_PROG_TYPE_LSM: 3963 if (ptype == BPF_PROG_TYPE_LSM && 3964 prog->expected_attach_type != BPF_LSM_CGROUP) 3965 ret = -EINVAL; 3966 else 3967 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 3968 break; 3969 case BPF_PROG_TYPE_SCHED_CLS: 3970 ret = tcx_prog_attach(attr, prog); 3971 break; 3972 default: 3973 ret = -EINVAL; 3974 } 3975 3976 if (ret) 3977 bpf_prog_put(prog); 3978 return ret; 3979 } 3980 3981 #define BPF_PROG_DETACH_LAST_FIELD expected_revision 3982 3983 static int bpf_prog_detach(const union bpf_attr *attr) 3984 { 3985 struct bpf_prog *prog = NULL; 3986 enum bpf_prog_type ptype; 3987 int ret; 3988 3989 if (CHECK_ATTR(BPF_PROG_DETACH)) 3990 return -EINVAL; 3991 3992 ptype = attach_type_to_prog_type(attr->attach_type); 3993 if (bpf_mprog_supported(ptype)) { 3994 if (ptype == BPF_PROG_TYPE_UNSPEC) 3995 return -EINVAL; 3996 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 3997 return -EINVAL; 3998 if (attr->attach_bpf_fd) { 3999 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4000 if (IS_ERR(prog)) 4001 return PTR_ERR(prog); 4002 } 4003 } else if (attr->attach_flags || 4004 attr->relative_fd || 4005 attr->expected_revision) { 4006 return -EINVAL; 4007 } 4008 4009 switch (ptype) { 4010 case BPF_PROG_TYPE_SK_MSG: 4011 case BPF_PROG_TYPE_SK_SKB: 4012 ret = sock_map_prog_detach(attr, ptype); 4013 break; 4014 case BPF_PROG_TYPE_LIRC_MODE2: 4015 ret = lirc_prog_detach(attr); 4016 break; 4017 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4018 ret = netns_bpf_prog_detach(attr, ptype); 4019 break; 4020 case BPF_PROG_TYPE_CGROUP_DEVICE: 4021 case BPF_PROG_TYPE_CGROUP_SKB: 4022 case BPF_PROG_TYPE_CGROUP_SOCK: 4023 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4024 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4025 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4026 case BPF_PROG_TYPE_SOCK_OPS: 4027 case BPF_PROG_TYPE_LSM: 4028 ret = cgroup_bpf_prog_detach(attr, ptype); 4029 break; 4030 case BPF_PROG_TYPE_SCHED_CLS: 4031 ret = tcx_prog_detach(attr, prog); 4032 break; 4033 default: 4034 ret = -EINVAL; 4035 } 4036 4037 if (prog) 4038 bpf_prog_put(prog); 4039 return ret; 4040 } 4041 4042 #define BPF_PROG_QUERY_LAST_FIELD query.revision 4043 4044 static int bpf_prog_query(const union bpf_attr *attr, 4045 union bpf_attr __user *uattr) 4046 { 4047 if (!capable(CAP_NET_ADMIN)) 4048 return -EPERM; 4049 if (CHECK_ATTR(BPF_PROG_QUERY)) 4050 return -EINVAL; 4051 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 4052 return -EINVAL; 4053 4054 switch (attr->query.attach_type) { 4055 case BPF_CGROUP_INET_INGRESS: 4056 case BPF_CGROUP_INET_EGRESS: 4057 case BPF_CGROUP_INET_SOCK_CREATE: 4058 case BPF_CGROUP_INET_SOCK_RELEASE: 4059 case BPF_CGROUP_INET4_BIND: 4060 case BPF_CGROUP_INET6_BIND: 4061 case BPF_CGROUP_INET4_POST_BIND: 4062 case BPF_CGROUP_INET6_POST_BIND: 4063 case BPF_CGROUP_INET4_CONNECT: 4064 case BPF_CGROUP_INET6_CONNECT: 4065 case BPF_CGROUP_INET4_GETPEERNAME: 4066 case BPF_CGROUP_INET6_GETPEERNAME: 4067 case BPF_CGROUP_INET4_GETSOCKNAME: 4068 case BPF_CGROUP_INET6_GETSOCKNAME: 4069 case BPF_CGROUP_UDP4_SENDMSG: 4070 case BPF_CGROUP_UDP6_SENDMSG: 4071 case BPF_CGROUP_UDP4_RECVMSG: 4072 case BPF_CGROUP_UDP6_RECVMSG: 4073 case BPF_CGROUP_SOCK_OPS: 4074 case BPF_CGROUP_DEVICE: 4075 case BPF_CGROUP_SYSCTL: 4076 case BPF_CGROUP_GETSOCKOPT: 4077 case BPF_CGROUP_SETSOCKOPT: 4078 case BPF_LSM_CGROUP: 4079 return cgroup_bpf_prog_query(attr, uattr); 4080 case BPF_LIRC_MODE2: 4081 return lirc_prog_query(attr, uattr); 4082 case BPF_FLOW_DISSECTOR: 4083 case BPF_SK_LOOKUP: 4084 return netns_bpf_prog_query(attr, uattr); 4085 case BPF_SK_SKB_STREAM_PARSER: 4086 case BPF_SK_SKB_STREAM_VERDICT: 4087 case BPF_SK_MSG_VERDICT: 4088 case BPF_SK_SKB_VERDICT: 4089 return sock_map_bpf_prog_query(attr, uattr); 4090 case BPF_TCX_INGRESS: 4091 case BPF_TCX_EGRESS: 4092 return tcx_prog_query(attr, uattr); 4093 default: 4094 return -EINVAL; 4095 } 4096 } 4097 4098 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 4099 4100 static int bpf_prog_test_run(const union bpf_attr *attr, 4101 union bpf_attr __user *uattr) 4102 { 4103 struct bpf_prog *prog; 4104 int ret = -ENOTSUPP; 4105 4106 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 4107 return -EINVAL; 4108 4109 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 4110 (!attr->test.ctx_size_in && attr->test.ctx_in)) 4111 return -EINVAL; 4112 4113 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 4114 (!attr->test.ctx_size_out && attr->test.ctx_out)) 4115 return -EINVAL; 4116 4117 prog = bpf_prog_get(attr->test.prog_fd); 4118 if (IS_ERR(prog)) 4119 return PTR_ERR(prog); 4120 4121 if (prog->aux->ops->test_run) 4122 ret = prog->aux->ops->test_run(prog, attr, uattr); 4123 4124 bpf_prog_put(prog); 4125 return ret; 4126 } 4127 4128 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 4129 4130 static int bpf_obj_get_next_id(const union bpf_attr *attr, 4131 union bpf_attr __user *uattr, 4132 struct idr *idr, 4133 spinlock_t *lock) 4134 { 4135 u32 next_id = attr->start_id; 4136 int err = 0; 4137 4138 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 4139 return -EINVAL; 4140 4141 if (!capable(CAP_SYS_ADMIN)) 4142 return -EPERM; 4143 4144 next_id++; 4145 spin_lock_bh(lock); 4146 if (!idr_get_next(idr, &next_id)) 4147 err = -ENOENT; 4148 spin_unlock_bh(lock); 4149 4150 if (!err) 4151 err = put_user(next_id, &uattr->next_id); 4152 4153 return err; 4154 } 4155 4156 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 4157 { 4158 struct bpf_map *map; 4159 4160 spin_lock_bh(&map_idr_lock); 4161 again: 4162 map = idr_get_next(&map_idr, id); 4163 if (map) { 4164 map = __bpf_map_inc_not_zero(map, false); 4165 if (IS_ERR(map)) { 4166 (*id)++; 4167 goto again; 4168 } 4169 } 4170 spin_unlock_bh(&map_idr_lock); 4171 4172 return map; 4173 } 4174 4175 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 4176 { 4177 struct bpf_prog *prog; 4178 4179 spin_lock_bh(&prog_idr_lock); 4180 again: 4181 prog = idr_get_next(&prog_idr, id); 4182 if (prog) { 4183 prog = bpf_prog_inc_not_zero(prog); 4184 if (IS_ERR(prog)) { 4185 (*id)++; 4186 goto again; 4187 } 4188 } 4189 spin_unlock_bh(&prog_idr_lock); 4190 4191 return prog; 4192 } 4193 4194 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 4195 4196 struct bpf_prog *bpf_prog_by_id(u32 id) 4197 { 4198 struct bpf_prog *prog; 4199 4200 if (!id) 4201 return ERR_PTR(-ENOENT); 4202 4203 spin_lock_bh(&prog_idr_lock); 4204 prog = idr_find(&prog_idr, id); 4205 if (prog) 4206 prog = bpf_prog_inc_not_zero(prog); 4207 else 4208 prog = ERR_PTR(-ENOENT); 4209 spin_unlock_bh(&prog_idr_lock); 4210 return prog; 4211 } 4212 4213 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 4214 { 4215 struct bpf_prog *prog; 4216 u32 id = attr->prog_id; 4217 int fd; 4218 4219 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 4220 return -EINVAL; 4221 4222 if (!capable(CAP_SYS_ADMIN)) 4223 return -EPERM; 4224 4225 prog = bpf_prog_by_id(id); 4226 if (IS_ERR(prog)) 4227 return PTR_ERR(prog); 4228 4229 fd = bpf_prog_new_fd(prog); 4230 if (fd < 0) 4231 bpf_prog_put(prog); 4232 4233 return fd; 4234 } 4235 4236 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 4237 4238 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 4239 { 4240 struct bpf_map *map; 4241 u32 id = attr->map_id; 4242 int f_flags; 4243 int fd; 4244 4245 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 4246 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 4247 return -EINVAL; 4248 4249 if (!capable(CAP_SYS_ADMIN)) 4250 return -EPERM; 4251 4252 f_flags = bpf_get_file_flag(attr->open_flags); 4253 if (f_flags < 0) 4254 return f_flags; 4255 4256 spin_lock_bh(&map_idr_lock); 4257 map = idr_find(&map_idr, id); 4258 if (map) 4259 map = __bpf_map_inc_not_zero(map, true); 4260 else 4261 map = ERR_PTR(-ENOENT); 4262 spin_unlock_bh(&map_idr_lock); 4263 4264 if (IS_ERR(map)) 4265 return PTR_ERR(map); 4266 4267 fd = bpf_map_new_fd(map, f_flags); 4268 if (fd < 0) 4269 bpf_map_put_with_uref(map); 4270 4271 return fd; 4272 } 4273 4274 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 4275 unsigned long addr, u32 *off, 4276 u32 *type) 4277 { 4278 const struct bpf_map *map; 4279 int i; 4280 4281 mutex_lock(&prog->aux->used_maps_mutex); 4282 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 4283 map = prog->aux->used_maps[i]; 4284 if (map == (void *)addr) { 4285 *type = BPF_PSEUDO_MAP_FD; 4286 goto out; 4287 } 4288 if (!map->ops->map_direct_value_meta) 4289 continue; 4290 if (!map->ops->map_direct_value_meta(map, addr, off)) { 4291 *type = BPF_PSEUDO_MAP_VALUE; 4292 goto out; 4293 } 4294 } 4295 map = NULL; 4296 4297 out: 4298 mutex_unlock(&prog->aux->used_maps_mutex); 4299 return map; 4300 } 4301 4302 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 4303 const struct cred *f_cred) 4304 { 4305 const struct bpf_map *map; 4306 struct bpf_insn *insns; 4307 u32 off, type; 4308 u64 imm; 4309 u8 code; 4310 int i; 4311 4312 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 4313 GFP_USER); 4314 if (!insns) 4315 return insns; 4316 4317 for (i = 0; i < prog->len; i++) { 4318 code = insns[i].code; 4319 4320 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 4321 insns[i].code = BPF_JMP | BPF_CALL; 4322 insns[i].imm = BPF_FUNC_tail_call; 4323 /* fall-through */ 4324 } 4325 if (code == (BPF_JMP | BPF_CALL) || 4326 code == (BPF_JMP | BPF_CALL_ARGS)) { 4327 if (code == (BPF_JMP | BPF_CALL_ARGS)) 4328 insns[i].code = BPF_JMP | BPF_CALL; 4329 if (!bpf_dump_raw_ok(f_cred)) 4330 insns[i].imm = 0; 4331 continue; 4332 } 4333 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 4334 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 4335 continue; 4336 } 4337 4338 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 4339 continue; 4340 4341 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 4342 map = bpf_map_from_imm(prog, imm, &off, &type); 4343 if (map) { 4344 insns[i].src_reg = type; 4345 insns[i].imm = map->id; 4346 insns[i + 1].imm = off; 4347 continue; 4348 } 4349 } 4350 4351 return insns; 4352 } 4353 4354 static int set_info_rec_size(struct bpf_prog_info *info) 4355 { 4356 /* 4357 * Ensure info.*_rec_size is the same as kernel expected size 4358 * 4359 * or 4360 * 4361 * Only allow zero *_rec_size if both _rec_size and _cnt are 4362 * zero. In this case, the kernel will set the expected 4363 * _rec_size back to the info. 4364 */ 4365 4366 if ((info->nr_func_info || info->func_info_rec_size) && 4367 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4368 return -EINVAL; 4369 4370 if ((info->nr_line_info || info->line_info_rec_size) && 4371 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4372 return -EINVAL; 4373 4374 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4375 info->jited_line_info_rec_size != sizeof(__u64)) 4376 return -EINVAL; 4377 4378 info->func_info_rec_size = sizeof(struct bpf_func_info); 4379 info->line_info_rec_size = sizeof(struct bpf_line_info); 4380 info->jited_line_info_rec_size = sizeof(__u64); 4381 4382 return 0; 4383 } 4384 4385 static int bpf_prog_get_info_by_fd(struct file *file, 4386 struct bpf_prog *prog, 4387 const union bpf_attr *attr, 4388 union bpf_attr __user *uattr) 4389 { 4390 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4391 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4392 struct bpf_prog_info info; 4393 u32 info_len = attr->info.info_len; 4394 struct bpf_prog_kstats stats; 4395 char __user *uinsns; 4396 u32 ulen; 4397 int err; 4398 4399 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4400 if (err) 4401 return err; 4402 info_len = min_t(u32, sizeof(info), info_len); 4403 4404 memset(&info, 0, sizeof(info)); 4405 if (copy_from_user(&info, uinfo, info_len)) 4406 return -EFAULT; 4407 4408 info.type = prog->type; 4409 info.id = prog->aux->id; 4410 info.load_time = prog->aux->load_time; 4411 info.created_by_uid = from_kuid_munged(current_user_ns(), 4412 prog->aux->user->uid); 4413 info.gpl_compatible = prog->gpl_compatible; 4414 4415 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4416 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4417 4418 mutex_lock(&prog->aux->used_maps_mutex); 4419 ulen = info.nr_map_ids; 4420 info.nr_map_ids = prog->aux->used_map_cnt; 4421 ulen = min_t(u32, info.nr_map_ids, ulen); 4422 if (ulen) { 4423 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4424 u32 i; 4425 4426 for (i = 0; i < ulen; i++) 4427 if (put_user(prog->aux->used_maps[i]->id, 4428 &user_map_ids[i])) { 4429 mutex_unlock(&prog->aux->used_maps_mutex); 4430 return -EFAULT; 4431 } 4432 } 4433 mutex_unlock(&prog->aux->used_maps_mutex); 4434 4435 err = set_info_rec_size(&info); 4436 if (err) 4437 return err; 4438 4439 bpf_prog_get_stats(prog, &stats); 4440 info.run_time_ns = stats.nsecs; 4441 info.run_cnt = stats.cnt; 4442 info.recursion_misses = stats.misses; 4443 4444 info.verified_insns = prog->aux->verified_insns; 4445 4446 if (!bpf_capable()) { 4447 info.jited_prog_len = 0; 4448 info.xlated_prog_len = 0; 4449 info.nr_jited_ksyms = 0; 4450 info.nr_jited_func_lens = 0; 4451 info.nr_func_info = 0; 4452 info.nr_line_info = 0; 4453 info.nr_jited_line_info = 0; 4454 goto done; 4455 } 4456 4457 ulen = info.xlated_prog_len; 4458 info.xlated_prog_len = bpf_prog_insn_size(prog); 4459 if (info.xlated_prog_len && ulen) { 4460 struct bpf_insn *insns_sanitized; 4461 bool fault; 4462 4463 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4464 info.xlated_prog_insns = 0; 4465 goto done; 4466 } 4467 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4468 if (!insns_sanitized) 4469 return -ENOMEM; 4470 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4471 ulen = min_t(u32, info.xlated_prog_len, ulen); 4472 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4473 kfree(insns_sanitized); 4474 if (fault) 4475 return -EFAULT; 4476 } 4477 4478 if (bpf_prog_is_offloaded(prog->aux)) { 4479 err = bpf_prog_offload_info_fill(&info, prog); 4480 if (err) 4481 return err; 4482 goto done; 4483 } 4484 4485 /* NOTE: the following code is supposed to be skipped for offload. 4486 * bpf_prog_offload_info_fill() is the place to fill similar fields 4487 * for offload. 4488 */ 4489 ulen = info.jited_prog_len; 4490 if (prog->aux->func_cnt) { 4491 u32 i; 4492 4493 info.jited_prog_len = 0; 4494 for (i = 0; i < prog->aux->func_cnt; i++) 4495 info.jited_prog_len += prog->aux->func[i]->jited_len; 4496 } else { 4497 info.jited_prog_len = prog->jited_len; 4498 } 4499 4500 if (info.jited_prog_len && ulen) { 4501 if (bpf_dump_raw_ok(file->f_cred)) { 4502 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4503 ulen = min_t(u32, info.jited_prog_len, ulen); 4504 4505 /* for multi-function programs, copy the JITed 4506 * instructions for all the functions 4507 */ 4508 if (prog->aux->func_cnt) { 4509 u32 len, free, i; 4510 u8 *img; 4511 4512 free = ulen; 4513 for (i = 0; i < prog->aux->func_cnt; i++) { 4514 len = prog->aux->func[i]->jited_len; 4515 len = min_t(u32, len, free); 4516 img = (u8 *) prog->aux->func[i]->bpf_func; 4517 if (copy_to_user(uinsns, img, len)) 4518 return -EFAULT; 4519 uinsns += len; 4520 free -= len; 4521 if (!free) 4522 break; 4523 } 4524 } else { 4525 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4526 return -EFAULT; 4527 } 4528 } else { 4529 info.jited_prog_insns = 0; 4530 } 4531 } 4532 4533 ulen = info.nr_jited_ksyms; 4534 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4535 if (ulen) { 4536 if (bpf_dump_raw_ok(file->f_cred)) { 4537 unsigned long ksym_addr; 4538 u64 __user *user_ksyms; 4539 u32 i; 4540 4541 /* copy the address of the kernel symbol 4542 * corresponding to each function 4543 */ 4544 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4545 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4546 if (prog->aux->func_cnt) { 4547 for (i = 0; i < ulen; i++) { 4548 ksym_addr = (unsigned long) 4549 prog->aux->func[i]->bpf_func; 4550 if (put_user((u64) ksym_addr, 4551 &user_ksyms[i])) 4552 return -EFAULT; 4553 } 4554 } else { 4555 ksym_addr = (unsigned long) prog->bpf_func; 4556 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4557 return -EFAULT; 4558 } 4559 } else { 4560 info.jited_ksyms = 0; 4561 } 4562 } 4563 4564 ulen = info.nr_jited_func_lens; 4565 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4566 if (ulen) { 4567 if (bpf_dump_raw_ok(file->f_cred)) { 4568 u32 __user *user_lens; 4569 u32 func_len, i; 4570 4571 /* copy the JITed image lengths for each function */ 4572 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4573 user_lens = u64_to_user_ptr(info.jited_func_lens); 4574 if (prog->aux->func_cnt) { 4575 for (i = 0; i < ulen; i++) { 4576 func_len = 4577 prog->aux->func[i]->jited_len; 4578 if (put_user(func_len, &user_lens[i])) 4579 return -EFAULT; 4580 } 4581 } else { 4582 func_len = prog->jited_len; 4583 if (put_user(func_len, &user_lens[0])) 4584 return -EFAULT; 4585 } 4586 } else { 4587 info.jited_func_lens = 0; 4588 } 4589 } 4590 4591 if (prog->aux->btf) 4592 info.btf_id = btf_obj_id(prog->aux->btf); 4593 info.attach_btf_id = prog->aux->attach_btf_id; 4594 if (attach_btf) 4595 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4596 4597 ulen = info.nr_func_info; 4598 info.nr_func_info = prog->aux->func_info_cnt; 4599 if (info.nr_func_info && ulen) { 4600 char __user *user_finfo; 4601 4602 user_finfo = u64_to_user_ptr(info.func_info); 4603 ulen = min_t(u32, info.nr_func_info, ulen); 4604 if (copy_to_user(user_finfo, prog->aux->func_info, 4605 info.func_info_rec_size * ulen)) 4606 return -EFAULT; 4607 } 4608 4609 ulen = info.nr_line_info; 4610 info.nr_line_info = prog->aux->nr_linfo; 4611 if (info.nr_line_info && ulen) { 4612 __u8 __user *user_linfo; 4613 4614 user_linfo = u64_to_user_ptr(info.line_info); 4615 ulen = min_t(u32, info.nr_line_info, ulen); 4616 if (copy_to_user(user_linfo, prog->aux->linfo, 4617 info.line_info_rec_size * ulen)) 4618 return -EFAULT; 4619 } 4620 4621 ulen = info.nr_jited_line_info; 4622 if (prog->aux->jited_linfo) 4623 info.nr_jited_line_info = prog->aux->nr_linfo; 4624 else 4625 info.nr_jited_line_info = 0; 4626 if (info.nr_jited_line_info && ulen) { 4627 if (bpf_dump_raw_ok(file->f_cred)) { 4628 unsigned long line_addr; 4629 __u64 __user *user_linfo; 4630 u32 i; 4631 4632 user_linfo = u64_to_user_ptr(info.jited_line_info); 4633 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4634 for (i = 0; i < ulen; i++) { 4635 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4636 if (put_user((__u64)line_addr, &user_linfo[i])) 4637 return -EFAULT; 4638 } 4639 } else { 4640 info.jited_line_info = 0; 4641 } 4642 } 4643 4644 ulen = info.nr_prog_tags; 4645 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4646 if (ulen) { 4647 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4648 u32 i; 4649 4650 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4651 ulen = min_t(u32, info.nr_prog_tags, ulen); 4652 if (prog->aux->func_cnt) { 4653 for (i = 0; i < ulen; i++) { 4654 if (copy_to_user(user_prog_tags[i], 4655 prog->aux->func[i]->tag, 4656 BPF_TAG_SIZE)) 4657 return -EFAULT; 4658 } 4659 } else { 4660 if (copy_to_user(user_prog_tags[0], 4661 prog->tag, BPF_TAG_SIZE)) 4662 return -EFAULT; 4663 } 4664 } 4665 4666 done: 4667 if (copy_to_user(uinfo, &info, info_len) || 4668 put_user(info_len, &uattr->info.info_len)) 4669 return -EFAULT; 4670 4671 return 0; 4672 } 4673 4674 static int bpf_map_get_info_by_fd(struct file *file, 4675 struct bpf_map *map, 4676 const union bpf_attr *attr, 4677 union bpf_attr __user *uattr) 4678 { 4679 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4680 struct bpf_map_info info; 4681 u32 info_len = attr->info.info_len; 4682 int err; 4683 4684 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4685 if (err) 4686 return err; 4687 info_len = min_t(u32, sizeof(info), info_len); 4688 4689 memset(&info, 0, sizeof(info)); 4690 info.type = map->map_type; 4691 info.id = map->id; 4692 info.key_size = map->key_size; 4693 info.value_size = map->value_size; 4694 info.max_entries = map->max_entries; 4695 info.map_flags = map->map_flags; 4696 info.map_extra = map->map_extra; 4697 memcpy(info.name, map->name, sizeof(map->name)); 4698 4699 if (map->btf) { 4700 info.btf_id = btf_obj_id(map->btf); 4701 info.btf_key_type_id = map->btf_key_type_id; 4702 info.btf_value_type_id = map->btf_value_type_id; 4703 } 4704 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4705 4706 if (bpf_map_is_offloaded(map)) { 4707 err = bpf_map_offload_info_fill(&info, map); 4708 if (err) 4709 return err; 4710 } 4711 4712 if (copy_to_user(uinfo, &info, info_len) || 4713 put_user(info_len, &uattr->info.info_len)) 4714 return -EFAULT; 4715 4716 return 0; 4717 } 4718 4719 static int bpf_btf_get_info_by_fd(struct file *file, 4720 struct btf *btf, 4721 const union bpf_attr *attr, 4722 union bpf_attr __user *uattr) 4723 { 4724 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4725 u32 info_len = attr->info.info_len; 4726 int err; 4727 4728 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 4729 if (err) 4730 return err; 4731 4732 return btf_get_info_by_fd(btf, attr, uattr); 4733 } 4734 4735 static int bpf_link_get_info_by_fd(struct file *file, 4736 struct bpf_link *link, 4737 const union bpf_attr *attr, 4738 union bpf_attr __user *uattr) 4739 { 4740 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4741 struct bpf_link_info info; 4742 u32 info_len = attr->info.info_len; 4743 int err; 4744 4745 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4746 if (err) 4747 return err; 4748 info_len = min_t(u32, sizeof(info), info_len); 4749 4750 memset(&info, 0, sizeof(info)); 4751 if (copy_from_user(&info, uinfo, info_len)) 4752 return -EFAULT; 4753 4754 info.type = link->type; 4755 info.id = link->id; 4756 if (link->prog) 4757 info.prog_id = link->prog->aux->id; 4758 4759 if (link->ops->fill_link_info) { 4760 err = link->ops->fill_link_info(link, &info); 4761 if (err) 4762 return err; 4763 } 4764 4765 if (copy_to_user(uinfo, &info, info_len) || 4766 put_user(info_len, &uattr->info.info_len)) 4767 return -EFAULT; 4768 4769 return 0; 4770 } 4771 4772 4773 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 4774 4775 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 4776 union bpf_attr __user *uattr) 4777 { 4778 int ufd = attr->info.bpf_fd; 4779 struct fd f; 4780 int err; 4781 4782 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 4783 return -EINVAL; 4784 4785 f = fdget(ufd); 4786 if (!f.file) 4787 return -EBADFD; 4788 4789 if (f.file->f_op == &bpf_prog_fops) 4790 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 4791 uattr); 4792 else if (f.file->f_op == &bpf_map_fops) 4793 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 4794 uattr); 4795 else if (f.file->f_op == &btf_fops) 4796 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 4797 else if (f.file->f_op == &bpf_link_fops) 4798 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 4799 attr, uattr); 4800 else 4801 err = -EINVAL; 4802 4803 fdput(f); 4804 return err; 4805 } 4806 4807 #define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size 4808 4809 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 4810 { 4811 if (CHECK_ATTR(BPF_BTF_LOAD)) 4812 return -EINVAL; 4813 4814 if (!bpf_capable()) 4815 return -EPERM; 4816 4817 return btf_new_fd(attr, uattr, uattr_size); 4818 } 4819 4820 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 4821 4822 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 4823 { 4824 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 4825 return -EINVAL; 4826 4827 if (!capable(CAP_SYS_ADMIN)) 4828 return -EPERM; 4829 4830 return btf_get_fd_by_id(attr->btf_id); 4831 } 4832 4833 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 4834 union bpf_attr __user *uattr, 4835 u32 prog_id, u32 fd_type, 4836 const char *buf, u64 probe_offset, 4837 u64 probe_addr) 4838 { 4839 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 4840 u32 len = buf ? strlen(buf) : 0, input_len; 4841 int err = 0; 4842 4843 if (put_user(len, &uattr->task_fd_query.buf_len)) 4844 return -EFAULT; 4845 input_len = attr->task_fd_query.buf_len; 4846 if (input_len && ubuf) { 4847 if (!len) { 4848 /* nothing to copy, just make ubuf NULL terminated */ 4849 char zero = '\0'; 4850 4851 if (put_user(zero, ubuf)) 4852 return -EFAULT; 4853 } else if (input_len >= len + 1) { 4854 /* ubuf can hold the string with NULL terminator */ 4855 if (copy_to_user(ubuf, buf, len + 1)) 4856 return -EFAULT; 4857 } else { 4858 /* ubuf cannot hold the string with NULL terminator, 4859 * do a partial copy with NULL terminator. 4860 */ 4861 char zero = '\0'; 4862 4863 err = -ENOSPC; 4864 if (copy_to_user(ubuf, buf, input_len - 1)) 4865 return -EFAULT; 4866 if (put_user(zero, ubuf + input_len - 1)) 4867 return -EFAULT; 4868 } 4869 } 4870 4871 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 4872 put_user(fd_type, &uattr->task_fd_query.fd_type) || 4873 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 4874 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 4875 return -EFAULT; 4876 4877 return err; 4878 } 4879 4880 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 4881 4882 static int bpf_task_fd_query(const union bpf_attr *attr, 4883 union bpf_attr __user *uattr) 4884 { 4885 pid_t pid = attr->task_fd_query.pid; 4886 u32 fd = attr->task_fd_query.fd; 4887 const struct perf_event *event; 4888 struct task_struct *task; 4889 struct file *file; 4890 int err; 4891 4892 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 4893 return -EINVAL; 4894 4895 if (!capable(CAP_SYS_ADMIN)) 4896 return -EPERM; 4897 4898 if (attr->task_fd_query.flags != 0) 4899 return -EINVAL; 4900 4901 rcu_read_lock(); 4902 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 4903 rcu_read_unlock(); 4904 if (!task) 4905 return -ENOENT; 4906 4907 err = 0; 4908 file = fget_task(task, fd); 4909 put_task_struct(task); 4910 if (!file) 4911 return -EBADF; 4912 4913 if (file->f_op == &bpf_link_fops) { 4914 struct bpf_link *link = file->private_data; 4915 4916 if (link->ops == &bpf_raw_tp_link_lops) { 4917 struct bpf_raw_tp_link *raw_tp = 4918 container_of(link, struct bpf_raw_tp_link, link); 4919 struct bpf_raw_event_map *btp = raw_tp->btp; 4920 4921 err = bpf_task_fd_query_copy(attr, uattr, 4922 raw_tp->link.prog->aux->id, 4923 BPF_FD_TYPE_RAW_TRACEPOINT, 4924 btp->tp->name, 0, 0); 4925 goto put_file; 4926 } 4927 goto out_not_supp; 4928 } 4929 4930 event = perf_get_event(file); 4931 if (!IS_ERR(event)) { 4932 u64 probe_offset, probe_addr; 4933 u32 prog_id, fd_type; 4934 const char *buf; 4935 4936 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 4937 &buf, &probe_offset, 4938 &probe_addr, NULL); 4939 if (!err) 4940 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 4941 fd_type, buf, 4942 probe_offset, 4943 probe_addr); 4944 goto put_file; 4945 } 4946 4947 out_not_supp: 4948 err = -ENOTSUPP; 4949 put_file: 4950 fput(file); 4951 return err; 4952 } 4953 4954 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 4955 4956 #define BPF_DO_BATCH(fn, ...) \ 4957 do { \ 4958 if (!fn) { \ 4959 err = -ENOTSUPP; \ 4960 goto err_put; \ 4961 } \ 4962 err = fn(__VA_ARGS__); \ 4963 } while (0) 4964 4965 static int bpf_map_do_batch(const union bpf_attr *attr, 4966 union bpf_attr __user *uattr, 4967 int cmd) 4968 { 4969 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 4970 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 4971 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 4972 struct bpf_map *map; 4973 int err, ufd; 4974 struct fd f; 4975 4976 if (CHECK_ATTR(BPF_MAP_BATCH)) 4977 return -EINVAL; 4978 4979 ufd = attr->batch.map_fd; 4980 f = fdget(ufd); 4981 map = __bpf_map_get(f); 4982 if (IS_ERR(map)) 4983 return PTR_ERR(map); 4984 if (has_write) 4985 bpf_map_write_active_inc(map); 4986 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 4987 err = -EPERM; 4988 goto err_put; 4989 } 4990 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 4991 err = -EPERM; 4992 goto err_put; 4993 } 4994 4995 if (cmd == BPF_MAP_LOOKUP_BATCH) 4996 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 4997 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 4998 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 4999 else if (cmd == BPF_MAP_UPDATE_BATCH) 5000 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr); 5001 else 5002 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 5003 err_put: 5004 if (has_write) 5005 bpf_map_write_active_dec(map); 5006 fdput(f); 5007 return err; 5008 } 5009 5010 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid 5011 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 5012 { 5013 struct bpf_prog *prog; 5014 int ret; 5015 5016 if (CHECK_ATTR(BPF_LINK_CREATE)) 5017 return -EINVAL; 5018 5019 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 5020 return bpf_struct_ops_link_create(attr); 5021 5022 prog = bpf_prog_get(attr->link_create.prog_fd); 5023 if (IS_ERR(prog)) 5024 return PTR_ERR(prog); 5025 5026 ret = bpf_prog_attach_check_attach_type(prog, 5027 attr->link_create.attach_type); 5028 if (ret) 5029 goto out; 5030 5031 switch (prog->type) { 5032 case BPF_PROG_TYPE_CGROUP_SKB: 5033 case BPF_PROG_TYPE_CGROUP_SOCK: 5034 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 5035 case BPF_PROG_TYPE_SOCK_OPS: 5036 case BPF_PROG_TYPE_CGROUP_DEVICE: 5037 case BPF_PROG_TYPE_CGROUP_SYSCTL: 5038 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5039 ret = cgroup_bpf_link_attach(attr, prog); 5040 break; 5041 case BPF_PROG_TYPE_EXT: 5042 ret = bpf_tracing_prog_attach(prog, 5043 attr->link_create.target_fd, 5044 attr->link_create.target_btf_id, 5045 attr->link_create.tracing.cookie); 5046 break; 5047 case BPF_PROG_TYPE_LSM: 5048 case BPF_PROG_TYPE_TRACING: 5049 if (attr->link_create.attach_type != prog->expected_attach_type) { 5050 ret = -EINVAL; 5051 goto out; 5052 } 5053 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5054 ret = bpf_raw_tp_link_attach(prog, NULL); 5055 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5056 ret = bpf_iter_link_attach(attr, uattr, prog); 5057 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 5058 ret = cgroup_bpf_link_attach(attr, prog); 5059 else 5060 ret = bpf_tracing_prog_attach(prog, 5061 attr->link_create.target_fd, 5062 attr->link_create.target_btf_id, 5063 attr->link_create.tracing.cookie); 5064 break; 5065 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5066 case BPF_PROG_TYPE_SK_LOOKUP: 5067 ret = netns_bpf_link_create(attr, prog); 5068 break; 5069 #ifdef CONFIG_NET 5070 case BPF_PROG_TYPE_XDP: 5071 ret = bpf_xdp_link_attach(attr, prog); 5072 break; 5073 case BPF_PROG_TYPE_SCHED_CLS: 5074 ret = tcx_link_attach(attr, prog); 5075 break; 5076 case BPF_PROG_TYPE_NETFILTER: 5077 ret = bpf_nf_link_attach(attr, prog); 5078 break; 5079 #endif 5080 case BPF_PROG_TYPE_PERF_EVENT: 5081 case BPF_PROG_TYPE_TRACEPOINT: 5082 ret = bpf_perf_link_attach(attr, prog); 5083 break; 5084 case BPF_PROG_TYPE_KPROBE: 5085 if (attr->link_create.attach_type == BPF_PERF_EVENT) 5086 ret = bpf_perf_link_attach(attr, prog); 5087 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI) 5088 ret = bpf_kprobe_multi_link_attach(attr, prog); 5089 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) 5090 ret = bpf_uprobe_multi_link_attach(attr, prog); 5091 break; 5092 default: 5093 ret = -EINVAL; 5094 } 5095 5096 out: 5097 if (ret < 0) 5098 bpf_prog_put(prog); 5099 return ret; 5100 } 5101 5102 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 5103 { 5104 struct bpf_map *new_map, *old_map = NULL; 5105 int ret; 5106 5107 new_map = bpf_map_get(attr->link_update.new_map_fd); 5108 if (IS_ERR(new_map)) 5109 return PTR_ERR(new_map); 5110 5111 if (attr->link_update.flags & BPF_F_REPLACE) { 5112 old_map = bpf_map_get(attr->link_update.old_map_fd); 5113 if (IS_ERR(old_map)) { 5114 ret = PTR_ERR(old_map); 5115 goto out_put; 5116 } 5117 } else if (attr->link_update.old_map_fd) { 5118 ret = -EINVAL; 5119 goto out_put; 5120 } 5121 5122 ret = link->ops->update_map(link, new_map, old_map); 5123 5124 if (old_map) 5125 bpf_map_put(old_map); 5126 out_put: 5127 bpf_map_put(new_map); 5128 return ret; 5129 } 5130 5131 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 5132 5133 static int link_update(union bpf_attr *attr) 5134 { 5135 struct bpf_prog *old_prog = NULL, *new_prog; 5136 struct bpf_link *link; 5137 u32 flags; 5138 int ret; 5139 5140 if (CHECK_ATTR(BPF_LINK_UPDATE)) 5141 return -EINVAL; 5142 5143 flags = attr->link_update.flags; 5144 if (flags & ~BPF_F_REPLACE) 5145 return -EINVAL; 5146 5147 link = bpf_link_get_from_fd(attr->link_update.link_fd); 5148 if (IS_ERR(link)) 5149 return PTR_ERR(link); 5150 5151 if (link->ops->update_map) { 5152 ret = link_update_map(link, attr); 5153 goto out_put_link; 5154 } 5155 5156 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 5157 if (IS_ERR(new_prog)) { 5158 ret = PTR_ERR(new_prog); 5159 goto out_put_link; 5160 } 5161 5162 if (flags & BPF_F_REPLACE) { 5163 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 5164 if (IS_ERR(old_prog)) { 5165 ret = PTR_ERR(old_prog); 5166 old_prog = NULL; 5167 goto out_put_progs; 5168 } 5169 } else if (attr->link_update.old_prog_fd) { 5170 ret = -EINVAL; 5171 goto out_put_progs; 5172 } 5173 5174 if (link->ops->update_prog) 5175 ret = link->ops->update_prog(link, new_prog, old_prog); 5176 else 5177 ret = -EINVAL; 5178 5179 out_put_progs: 5180 if (old_prog) 5181 bpf_prog_put(old_prog); 5182 if (ret) 5183 bpf_prog_put(new_prog); 5184 out_put_link: 5185 bpf_link_put_direct(link); 5186 return ret; 5187 } 5188 5189 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 5190 5191 static int link_detach(union bpf_attr *attr) 5192 { 5193 struct bpf_link *link; 5194 int ret; 5195 5196 if (CHECK_ATTR(BPF_LINK_DETACH)) 5197 return -EINVAL; 5198 5199 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 5200 if (IS_ERR(link)) 5201 return PTR_ERR(link); 5202 5203 if (link->ops->detach) 5204 ret = link->ops->detach(link); 5205 else 5206 ret = -EOPNOTSUPP; 5207 5208 bpf_link_put_direct(link); 5209 return ret; 5210 } 5211 5212 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5213 { 5214 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5215 } 5216 5217 struct bpf_link *bpf_link_by_id(u32 id) 5218 { 5219 struct bpf_link *link; 5220 5221 if (!id) 5222 return ERR_PTR(-ENOENT); 5223 5224 spin_lock_bh(&link_idr_lock); 5225 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 5226 link = idr_find(&link_idr, id); 5227 if (link) { 5228 if (link->id) 5229 link = bpf_link_inc_not_zero(link); 5230 else 5231 link = ERR_PTR(-EAGAIN); 5232 } else { 5233 link = ERR_PTR(-ENOENT); 5234 } 5235 spin_unlock_bh(&link_idr_lock); 5236 return link; 5237 } 5238 5239 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 5240 { 5241 struct bpf_link *link; 5242 5243 spin_lock_bh(&link_idr_lock); 5244 again: 5245 link = idr_get_next(&link_idr, id); 5246 if (link) { 5247 link = bpf_link_inc_not_zero(link); 5248 if (IS_ERR(link)) { 5249 (*id)++; 5250 goto again; 5251 } 5252 } 5253 spin_unlock_bh(&link_idr_lock); 5254 5255 return link; 5256 } 5257 5258 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 5259 5260 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 5261 { 5262 struct bpf_link *link; 5263 u32 id = attr->link_id; 5264 int fd; 5265 5266 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 5267 return -EINVAL; 5268 5269 if (!capable(CAP_SYS_ADMIN)) 5270 return -EPERM; 5271 5272 link = bpf_link_by_id(id); 5273 if (IS_ERR(link)) 5274 return PTR_ERR(link); 5275 5276 fd = bpf_link_new_fd(link); 5277 if (fd < 0) 5278 bpf_link_put_direct(link); 5279 5280 return fd; 5281 } 5282 5283 DEFINE_MUTEX(bpf_stats_enabled_mutex); 5284 5285 static int bpf_stats_release(struct inode *inode, struct file *file) 5286 { 5287 mutex_lock(&bpf_stats_enabled_mutex); 5288 static_key_slow_dec(&bpf_stats_enabled_key.key); 5289 mutex_unlock(&bpf_stats_enabled_mutex); 5290 return 0; 5291 } 5292 5293 static const struct file_operations bpf_stats_fops = { 5294 .release = bpf_stats_release, 5295 }; 5296 5297 static int bpf_enable_runtime_stats(void) 5298 { 5299 int fd; 5300 5301 mutex_lock(&bpf_stats_enabled_mutex); 5302 5303 /* Set a very high limit to avoid overflow */ 5304 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 5305 mutex_unlock(&bpf_stats_enabled_mutex); 5306 return -EBUSY; 5307 } 5308 5309 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 5310 if (fd >= 0) 5311 static_key_slow_inc(&bpf_stats_enabled_key.key); 5312 5313 mutex_unlock(&bpf_stats_enabled_mutex); 5314 return fd; 5315 } 5316 5317 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 5318 5319 static int bpf_enable_stats(union bpf_attr *attr) 5320 { 5321 5322 if (CHECK_ATTR(BPF_ENABLE_STATS)) 5323 return -EINVAL; 5324 5325 if (!capable(CAP_SYS_ADMIN)) 5326 return -EPERM; 5327 5328 switch (attr->enable_stats.type) { 5329 case BPF_STATS_RUN_TIME: 5330 return bpf_enable_runtime_stats(); 5331 default: 5332 break; 5333 } 5334 return -EINVAL; 5335 } 5336 5337 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5338 5339 static int bpf_iter_create(union bpf_attr *attr) 5340 { 5341 struct bpf_link *link; 5342 int err; 5343 5344 if (CHECK_ATTR(BPF_ITER_CREATE)) 5345 return -EINVAL; 5346 5347 if (attr->iter_create.flags) 5348 return -EINVAL; 5349 5350 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5351 if (IS_ERR(link)) 5352 return PTR_ERR(link); 5353 5354 err = bpf_iter_new_fd(link); 5355 bpf_link_put_direct(link); 5356 5357 return err; 5358 } 5359 5360 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5361 5362 static int bpf_prog_bind_map(union bpf_attr *attr) 5363 { 5364 struct bpf_prog *prog; 5365 struct bpf_map *map; 5366 struct bpf_map **used_maps_old, **used_maps_new; 5367 int i, ret = 0; 5368 5369 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5370 return -EINVAL; 5371 5372 if (attr->prog_bind_map.flags) 5373 return -EINVAL; 5374 5375 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5376 if (IS_ERR(prog)) 5377 return PTR_ERR(prog); 5378 5379 map = bpf_map_get(attr->prog_bind_map.map_fd); 5380 if (IS_ERR(map)) { 5381 ret = PTR_ERR(map); 5382 goto out_prog_put; 5383 } 5384 5385 mutex_lock(&prog->aux->used_maps_mutex); 5386 5387 used_maps_old = prog->aux->used_maps; 5388 5389 for (i = 0; i < prog->aux->used_map_cnt; i++) 5390 if (used_maps_old[i] == map) { 5391 bpf_map_put(map); 5392 goto out_unlock; 5393 } 5394 5395 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5396 sizeof(used_maps_new[0]), 5397 GFP_KERNEL); 5398 if (!used_maps_new) { 5399 ret = -ENOMEM; 5400 goto out_unlock; 5401 } 5402 5403 /* The bpf program will not access the bpf map, but for the sake of 5404 * simplicity, increase sleepable_refcnt for sleepable program as well. 5405 */ 5406 if (prog->aux->sleepable) 5407 atomic64_inc(&map->sleepable_refcnt); 5408 memcpy(used_maps_new, used_maps_old, 5409 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5410 used_maps_new[prog->aux->used_map_cnt] = map; 5411 5412 prog->aux->used_map_cnt++; 5413 prog->aux->used_maps = used_maps_new; 5414 5415 kfree(used_maps_old); 5416 5417 out_unlock: 5418 mutex_unlock(&prog->aux->used_maps_mutex); 5419 5420 if (ret) 5421 bpf_map_put(map); 5422 out_prog_put: 5423 bpf_prog_put(prog); 5424 return ret; 5425 } 5426 5427 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) 5428 { 5429 union bpf_attr attr; 5430 int err; 5431 5432 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5433 if (err) 5434 return err; 5435 size = min_t(u32, size, sizeof(attr)); 5436 5437 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5438 memset(&attr, 0, sizeof(attr)); 5439 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5440 return -EFAULT; 5441 5442 err = security_bpf(cmd, &attr, size); 5443 if (err < 0) 5444 return err; 5445 5446 switch (cmd) { 5447 case BPF_MAP_CREATE: 5448 err = map_create(&attr); 5449 break; 5450 case BPF_MAP_LOOKUP_ELEM: 5451 err = map_lookup_elem(&attr); 5452 break; 5453 case BPF_MAP_UPDATE_ELEM: 5454 err = map_update_elem(&attr, uattr); 5455 break; 5456 case BPF_MAP_DELETE_ELEM: 5457 err = map_delete_elem(&attr, uattr); 5458 break; 5459 case BPF_MAP_GET_NEXT_KEY: 5460 err = map_get_next_key(&attr); 5461 break; 5462 case BPF_MAP_FREEZE: 5463 err = map_freeze(&attr); 5464 break; 5465 case BPF_PROG_LOAD: 5466 err = bpf_prog_load(&attr, uattr, size); 5467 break; 5468 case BPF_OBJ_PIN: 5469 err = bpf_obj_pin(&attr); 5470 break; 5471 case BPF_OBJ_GET: 5472 err = bpf_obj_get(&attr); 5473 break; 5474 case BPF_PROG_ATTACH: 5475 err = bpf_prog_attach(&attr); 5476 break; 5477 case BPF_PROG_DETACH: 5478 err = bpf_prog_detach(&attr); 5479 break; 5480 case BPF_PROG_QUERY: 5481 err = bpf_prog_query(&attr, uattr.user); 5482 break; 5483 case BPF_PROG_TEST_RUN: 5484 err = bpf_prog_test_run(&attr, uattr.user); 5485 break; 5486 case BPF_PROG_GET_NEXT_ID: 5487 err = bpf_obj_get_next_id(&attr, uattr.user, 5488 &prog_idr, &prog_idr_lock); 5489 break; 5490 case BPF_MAP_GET_NEXT_ID: 5491 err = bpf_obj_get_next_id(&attr, uattr.user, 5492 &map_idr, &map_idr_lock); 5493 break; 5494 case BPF_BTF_GET_NEXT_ID: 5495 err = bpf_obj_get_next_id(&attr, uattr.user, 5496 &btf_idr, &btf_idr_lock); 5497 break; 5498 case BPF_PROG_GET_FD_BY_ID: 5499 err = bpf_prog_get_fd_by_id(&attr); 5500 break; 5501 case BPF_MAP_GET_FD_BY_ID: 5502 err = bpf_map_get_fd_by_id(&attr); 5503 break; 5504 case BPF_OBJ_GET_INFO_BY_FD: 5505 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 5506 break; 5507 case BPF_RAW_TRACEPOINT_OPEN: 5508 err = bpf_raw_tracepoint_open(&attr); 5509 break; 5510 case BPF_BTF_LOAD: 5511 err = bpf_btf_load(&attr, uattr, size); 5512 break; 5513 case BPF_BTF_GET_FD_BY_ID: 5514 err = bpf_btf_get_fd_by_id(&attr); 5515 break; 5516 case BPF_TASK_FD_QUERY: 5517 err = bpf_task_fd_query(&attr, uattr.user); 5518 break; 5519 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5520 err = map_lookup_and_delete_elem(&attr); 5521 break; 5522 case BPF_MAP_LOOKUP_BATCH: 5523 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5524 break; 5525 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5526 err = bpf_map_do_batch(&attr, uattr.user, 5527 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5528 break; 5529 case BPF_MAP_UPDATE_BATCH: 5530 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5531 break; 5532 case BPF_MAP_DELETE_BATCH: 5533 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5534 break; 5535 case BPF_LINK_CREATE: 5536 err = link_create(&attr, uattr); 5537 break; 5538 case BPF_LINK_UPDATE: 5539 err = link_update(&attr); 5540 break; 5541 case BPF_LINK_GET_FD_BY_ID: 5542 err = bpf_link_get_fd_by_id(&attr); 5543 break; 5544 case BPF_LINK_GET_NEXT_ID: 5545 err = bpf_obj_get_next_id(&attr, uattr.user, 5546 &link_idr, &link_idr_lock); 5547 break; 5548 case BPF_ENABLE_STATS: 5549 err = bpf_enable_stats(&attr); 5550 break; 5551 case BPF_ITER_CREATE: 5552 err = bpf_iter_create(&attr); 5553 break; 5554 case BPF_LINK_DETACH: 5555 err = link_detach(&attr); 5556 break; 5557 case BPF_PROG_BIND_MAP: 5558 err = bpf_prog_bind_map(&attr); 5559 break; 5560 default: 5561 err = -EINVAL; 5562 break; 5563 } 5564 5565 return err; 5566 } 5567 5568 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5569 { 5570 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5571 } 5572 5573 static bool syscall_prog_is_valid_access(int off, int size, 5574 enum bpf_access_type type, 5575 const struct bpf_prog *prog, 5576 struct bpf_insn_access_aux *info) 5577 { 5578 if (off < 0 || off >= U16_MAX) 5579 return false; 5580 if (off % size != 0) 5581 return false; 5582 return true; 5583 } 5584 5585 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5586 { 5587 switch (cmd) { 5588 case BPF_MAP_CREATE: 5589 case BPF_MAP_DELETE_ELEM: 5590 case BPF_MAP_UPDATE_ELEM: 5591 case BPF_MAP_FREEZE: 5592 case BPF_MAP_GET_FD_BY_ID: 5593 case BPF_PROG_LOAD: 5594 case BPF_BTF_LOAD: 5595 case BPF_LINK_CREATE: 5596 case BPF_RAW_TRACEPOINT_OPEN: 5597 break; 5598 default: 5599 return -EINVAL; 5600 } 5601 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5602 } 5603 5604 5605 /* To shut up -Wmissing-prototypes. 5606 * This function is used by the kernel light skeleton 5607 * to load bpf programs when modules are loaded or during kernel boot. 5608 * See tools/lib/bpf/skel_internal.h 5609 */ 5610 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5611 5612 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5613 { 5614 struct bpf_prog * __maybe_unused prog; 5615 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5616 5617 switch (cmd) { 5618 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5619 case BPF_PROG_TEST_RUN: 5620 if (attr->test.data_in || attr->test.data_out || 5621 attr->test.ctx_out || attr->test.duration || 5622 attr->test.repeat || attr->test.flags) 5623 return -EINVAL; 5624 5625 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5626 if (IS_ERR(prog)) 5627 return PTR_ERR(prog); 5628 5629 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5630 attr->test.ctx_size_in > U16_MAX) { 5631 bpf_prog_put(prog); 5632 return -EINVAL; 5633 } 5634 5635 run_ctx.bpf_cookie = 0; 5636 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 5637 /* recursion detected */ 5638 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx); 5639 bpf_prog_put(prog); 5640 return -EBUSY; 5641 } 5642 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5643 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 5644 &run_ctx); 5645 bpf_prog_put(prog); 5646 return 0; 5647 #endif 5648 default: 5649 return ____bpf_sys_bpf(cmd, attr, size); 5650 } 5651 } 5652 EXPORT_SYMBOL(kern_sys_bpf); 5653 5654 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5655 .func = bpf_sys_bpf, 5656 .gpl_only = false, 5657 .ret_type = RET_INTEGER, 5658 .arg1_type = ARG_ANYTHING, 5659 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5660 .arg3_type = ARG_CONST_SIZE, 5661 }; 5662 5663 const struct bpf_func_proto * __weak 5664 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5665 { 5666 return bpf_base_func_proto(func_id); 5667 } 5668 5669 BPF_CALL_1(bpf_sys_close, u32, fd) 5670 { 5671 /* When bpf program calls this helper there should not be 5672 * an fdget() without matching completed fdput(). 5673 * This helper is allowed in the following callchain only: 5674 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 5675 */ 5676 return close_fd(fd); 5677 } 5678 5679 static const struct bpf_func_proto bpf_sys_close_proto = { 5680 .func = bpf_sys_close, 5681 .gpl_only = false, 5682 .ret_type = RET_INTEGER, 5683 .arg1_type = ARG_ANYTHING, 5684 }; 5685 5686 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 5687 { 5688 *res = 0; 5689 if (flags) 5690 return -EINVAL; 5691 5692 if (name_sz <= 1 || name[name_sz - 1]) 5693 return -EINVAL; 5694 5695 if (!bpf_dump_raw_ok(current_cred())) 5696 return -EPERM; 5697 5698 *res = kallsyms_lookup_name(name); 5699 return *res ? 0 : -ENOENT; 5700 } 5701 5702 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 5703 .func = bpf_kallsyms_lookup_name, 5704 .gpl_only = false, 5705 .ret_type = RET_INTEGER, 5706 .arg1_type = ARG_PTR_TO_MEM, 5707 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 5708 .arg3_type = ARG_ANYTHING, 5709 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, 5710 .arg4_size = sizeof(u64), 5711 }; 5712 5713 static const struct bpf_func_proto * 5714 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5715 { 5716 switch (func_id) { 5717 case BPF_FUNC_sys_bpf: 5718 return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto; 5719 case BPF_FUNC_btf_find_by_name_kind: 5720 return &bpf_btf_find_by_name_kind_proto; 5721 case BPF_FUNC_sys_close: 5722 return &bpf_sys_close_proto; 5723 case BPF_FUNC_kallsyms_lookup_name: 5724 return &bpf_kallsyms_lookup_name_proto; 5725 default: 5726 return tracing_prog_func_proto(func_id, prog); 5727 } 5728 } 5729 5730 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 5731 .get_func_proto = syscall_prog_func_proto, 5732 .is_valid_access = syscall_prog_is_valid_access, 5733 }; 5734 5735 const struct bpf_prog_ops bpf_syscall_prog_ops = { 5736 .test_run = bpf_prog_test_run_syscall, 5737 }; 5738 5739 #ifdef CONFIG_SYSCTL 5740 static int bpf_stats_handler(struct ctl_table *table, int write, 5741 void *buffer, size_t *lenp, loff_t *ppos) 5742 { 5743 struct static_key *key = (struct static_key *)table->data; 5744 static int saved_val; 5745 int val, ret; 5746 struct ctl_table tmp = { 5747 .data = &val, 5748 .maxlen = sizeof(val), 5749 .mode = table->mode, 5750 .extra1 = SYSCTL_ZERO, 5751 .extra2 = SYSCTL_ONE, 5752 }; 5753 5754 if (write && !capable(CAP_SYS_ADMIN)) 5755 return -EPERM; 5756 5757 mutex_lock(&bpf_stats_enabled_mutex); 5758 val = saved_val; 5759 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5760 if (write && !ret && val != saved_val) { 5761 if (val) 5762 static_key_slow_inc(key); 5763 else 5764 static_key_slow_dec(key); 5765 saved_val = val; 5766 } 5767 mutex_unlock(&bpf_stats_enabled_mutex); 5768 return ret; 5769 } 5770 5771 void __weak unpriv_ebpf_notify(int new_state) 5772 { 5773 } 5774 5775 static int bpf_unpriv_handler(struct ctl_table *table, int write, 5776 void *buffer, size_t *lenp, loff_t *ppos) 5777 { 5778 int ret, unpriv_enable = *(int *)table->data; 5779 bool locked_state = unpriv_enable == 1; 5780 struct ctl_table tmp = *table; 5781 5782 if (write && !capable(CAP_SYS_ADMIN)) 5783 return -EPERM; 5784 5785 tmp.data = &unpriv_enable; 5786 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5787 if (write && !ret) { 5788 if (locked_state && unpriv_enable != 1) 5789 return -EPERM; 5790 *(int *)table->data = unpriv_enable; 5791 } 5792 5793 if (write) 5794 unpriv_ebpf_notify(unpriv_enable); 5795 5796 return ret; 5797 } 5798 5799 static struct ctl_table bpf_syscall_table[] = { 5800 { 5801 .procname = "unprivileged_bpf_disabled", 5802 .data = &sysctl_unprivileged_bpf_disabled, 5803 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 5804 .mode = 0644, 5805 .proc_handler = bpf_unpriv_handler, 5806 .extra1 = SYSCTL_ZERO, 5807 .extra2 = SYSCTL_TWO, 5808 }, 5809 { 5810 .procname = "bpf_stats_enabled", 5811 .data = &bpf_stats_enabled_key.key, 5812 .mode = 0644, 5813 .proc_handler = bpf_stats_handler, 5814 }, 5815 { } 5816 }; 5817 5818 static int __init bpf_syscall_sysctl_init(void) 5819 { 5820 register_sysctl_init("kernel", bpf_syscall_table); 5821 return 0; 5822 } 5823 late_initcall(bpf_syscall_sysctl_init); 5824 #endif /* CONFIG_SYSCTL */ 5825