1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 #include <net/netfilter/nf_bpf_link.h> 39 40 #include <net/tcx.h> 41 42 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 43 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 44 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 45 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 46 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 47 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 48 IS_FD_HASH(map)) 49 50 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 51 52 DEFINE_PER_CPU(int, bpf_prog_active); 53 static DEFINE_IDR(prog_idr); 54 static DEFINE_SPINLOCK(prog_idr_lock); 55 static DEFINE_IDR(map_idr); 56 static DEFINE_SPINLOCK(map_idr_lock); 57 static DEFINE_IDR(link_idr); 58 static DEFINE_SPINLOCK(link_idr_lock); 59 60 int sysctl_unprivileged_bpf_disabled __read_mostly = 61 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 62 63 static const struct bpf_map_ops * const bpf_map_types[] = { 64 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 65 #define BPF_MAP_TYPE(_id, _ops) \ 66 [_id] = &_ops, 67 #define BPF_LINK_TYPE(_id, _name) 68 #include <linux/bpf_types.h> 69 #undef BPF_PROG_TYPE 70 #undef BPF_MAP_TYPE 71 #undef BPF_LINK_TYPE 72 }; 73 74 /* 75 * If we're handed a bigger struct than we know of, ensure all the unknown bits 76 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 77 * we don't know about yet. 78 * 79 * There is a ToCToU between this function call and the following 80 * copy_from_user() call. However, this is not a concern since this function is 81 * meant to be a future-proofing of bits. 82 */ 83 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 84 size_t expected_size, 85 size_t actual_size) 86 { 87 int res; 88 89 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 90 return -E2BIG; 91 92 if (actual_size <= expected_size) 93 return 0; 94 95 if (uaddr.is_kernel) 96 res = memchr_inv(uaddr.kernel + expected_size, 0, 97 actual_size - expected_size) == NULL; 98 else 99 res = check_zeroed_user(uaddr.user + expected_size, 100 actual_size - expected_size); 101 if (res < 0) 102 return res; 103 return res ? 0 : -E2BIG; 104 } 105 106 const struct bpf_map_ops bpf_map_offload_ops = { 107 .map_meta_equal = bpf_map_meta_equal, 108 .map_alloc = bpf_map_offload_map_alloc, 109 .map_free = bpf_map_offload_map_free, 110 .map_check_btf = map_check_no_btf, 111 .map_mem_usage = bpf_map_offload_map_mem_usage, 112 }; 113 114 static void bpf_map_write_active_inc(struct bpf_map *map) 115 { 116 atomic64_inc(&map->writecnt); 117 } 118 119 static void bpf_map_write_active_dec(struct bpf_map *map) 120 { 121 atomic64_dec(&map->writecnt); 122 } 123 124 bool bpf_map_write_active(const struct bpf_map *map) 125 { 126 return atomic64_read(&map->writecnt) != 0; 127 } 128 129 static u32 bpf_map_value_size(const struct bpf_map *map) 130 { 131 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 132 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 133 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 134 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 135 return round_up(map->value_size, 8) * num_possible_cpus(); 136 else if (IS_FD_MAP(map)) 137 return sizeof(u32); 138 else 139 return map->value_size; 140 } 141 142 static void maybe_wait_bpf_programs(struct bpf_map *map) 143 { 144 /* Wait for any running BPF programs to complete so that 145 * userspace, when we return to it, knows that all programs 146 * that could be running use the new map value. 147 */ 148 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 149 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 150 synchronize_rcu(); 151 } 152 153 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 154 void *key, void *value, __u64 flags) 155 { 156 int err; 157 158 /* Need to create a kthread, thus must support schedule */ 159 if (bpf_map_is_offloaded(map)) { 160 return bpf_map_offload_update_elem(map, key, value, flags); 161 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 162 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 163 return map->ops->map_update_elem(map, key, value, flags); 164 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 165 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 166 return sock_map_update_elem_sys(map, key, value, flags); 167 } else if (IS_FD_PROG_ARRAY(map)) { 168 return bpf_fd_array_map_update_elem(map, map_file, key, value, 169 flags); 170 } 171 172 bpf_disable_instrumentation(); 173 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 174 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 175 err = bpf_percpu_hash_update(map, key, value, flags); 176 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 177 err = bpf_percpu_array_update(map, key, value, flags); 178 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 179 err = bpf_percpu_cgroup_storage_update(map, key, value, 180 flags); 181 } else if (IS_FD_ARRAY(map)) { 182 rcu_read_lock(); 183 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 184 flags); 185 rcu_read_unlock(); 186 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 187 rcu_read_lock(); 188 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 189 flags); 190 rcu_read_unlock(); 191 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 192 /* rcu_read_lock() is not needed */ 193 err = bpf_fd_reuseport_array_update_elem(map, key, value, 194 flags); 195 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 196 map->map_type == BPF_MAP_TYPE_STACK || 197 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 198 err = map->ops->map_push_elem(map, value, flags); 199 } else { 200 rcu_read_lock(); 201 err = map->ops->map_update_elem(map, key, value, flags); 202 rcu_read_unlock(); 203 } 204 bpf_enable_instrumentation(); 205 maybe_wait_bpf_programs(map); 206 207 return err; 208 } 209 210 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 211 __u64 flags) 212 { 213 void *ptr; 214 int err; 215 216 if (bpf_map_is_offloaded(map)) 217 return bpf_map_offload_lookup_elem(map, key, value); 218 219 bpf_disable_instrumentation(); 220 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 221 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 222 err = bpf_percpu_hash_copy(map, key, value); 223 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 224 err = bpf_percpu_array_copy(map, key, value); 225 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 226 err = bpf_percpu_cgroup_storage_copy(map, key, value); 227 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 228 err = bpf_stackmap_copy(map, key, value); 229 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 230 err = bpf_fd_array_map_lookup_elem(map, key, value); 231 } else if (IS_FD_HASH(map)) { 232 err = bpf_fd_htab_map_lookup_elem(map, key, value); 233 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 234 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 235 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 236 map->map_type == BPF_MAP_TYPE_STACK || 237 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 238 err = map->ops->map_peek_elem(map, value); 239 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 240 /* struct_ops map requires directly updating "value" */ 241 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 242 } else { 243 rcu_read_lock(); 244 if (map->ops->map_lookup_elem_sys_only) 245 ptr = map->ops->map_lookup_elem_sys_only(map, key); 246 else 247 ptr = map->ops->map_lookup_elem(map, key); 248 if (IS_ERR(ptr)) { 249 err = PTR_ERR(ptr); 250 } else if (!ptr) { 251 err = -ENOENT; 252 } else { 253 err = 0; 254 if (flags & BPF_F_LOCK) 255 /* lock 'ptr' and copy everything but lock */ 256 copy_map_value_locked(map, value, ptr, true); 257 else 258 copy_map_value(map, value, ptr); 259 /* mask lock and timer, since value wasn't zero inited */ 260 check_and_init_map_value(map, value); 261 } 262 rcu_read_unlock(); 263 } 264 265 bpf_enable_instrumentation(); 266 maybe_wait_bpf_programs(map); 267 268 return err; 269 } 270 271 /* Please, do not use this function outside from the map creation path 272 * (e.g. in map update path) without taking care of setting the active 273 * memory cgroup (see at bpf_map_kmalloc_node() for example). 274 */ 275 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 276 { 277 /* We really just want to fail instead of triggering OOM killer 278 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 279 * which is used for lower order allocation requests. 280 * 281 * It has been observed that higher order allocation requests done by 282 * vmalloc with __GFP_NORETRY being set might fail due to not trying 283 * to reclaim memory from the page cache, thus we set 284 * __GFP_RETRY_MAYFAIL to avoid such situations. 285 */ 286 287 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 288 unsigned int flags = 0; 289 unsigned long align = 1; 290 void *area; 291 292 if (size >= SIZE_MAX) 293 return NULL; 294 295 /* kmalloc()'ed memory can't be mmap()'ed */ 296 if (mmapable) { 297 BUG_ON(!PAGE_ALIGNED(size)); 298 align = SHMLBA; 299 flags = VM_USERMAP; 300 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 301 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 302 numa_node); 303 if (area != NULL) 304 return area; 305 } 306 307 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 308 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 309 flags, numa_node, __builtin_return_address(0)); 310 } 311 312 void *bpf_map_area_alloc(u64 size, int numa_node) 313 { 314 return __bpf_map_area_alloc(size, numa_node, false); 315 } 316 317 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 318 { 319 return __bpf_map_area_alloc(size, numa_node, true); 320 } 321 322 void bpf_map_area_free(void *area) 323 { 324 kvfree(area); 325 } 326 327 static u32 bpf_map_flags_retain_permanent(u32 flags) 328 { 329 /* Some map creation flags are not tied to the map object but 330 * rather to the map fd instead, so they have no meaning upon 331 * map object inspection since multiple file descriptors with 332 * different (access) properties can exist here. Thus, given 333 * this has zero meaning for the map itself, lets clear these 334 * from here. 335 */ 336 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 337 } 338 339 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 340 { 341 map->map_type = attr->map_type; 342 map->key_size = attr->key_size; 343 map->value_size = attr->value_size; 344 map->max_entries = attr->max_entries; 345 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 346 map->numa_node = bpf_map_attr_numa_node(attr); 347 map->map_extra = attr->map_extra; 348 } 349 350 static int bpf_map_alloc_id(struct bpf_map *map) 351 { 352 int id; 353 354 idr_preload(GFP_KERNEL); 355 spin_lock_bh(&map_idr_lock); 356 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 357 if (id > 0) 358 map->id = id; 359 spin_unlock_bh(&map_idr_lock); 360 idr_preload_end(); 361 362 if (WARN_ON_ONCE(!id)) 363 return -ENOSPC; 364 365 return id > 0 ? 0 : id; 366 } 367 368 void bpf_map_free_id(struct bpf_map *map) 369 { 370 unsigned long flags; 371 372 /* Offloaded maps are removed from the IDR store when their device 373 * disappears - even if someone holds an fd to them they are unusable, 374 * the memory is gone, all ops will fail; they are simply waiting for 375 * refcnt to drop to be freed. 376 */ 377 if (!map->id) 378 return; 379 380 spin_lock_irqsave(&map_idr_lock, flags); 381 382 idr_remove(&map_idr, map->id); 383 map->id = 0; 384 385 spin_unlock_irqrestore(&map_idr_lock, flags); 386 } 387 388 #ifdef CONFIG_MEMCG_KMEM 389 static void bpf_map_save_memcg(struct bpf_map *map) 390 { 391 /* Currently if a map is created by a process belonging to the root 392 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 393 * So we have to check map->objcg for being NULL each time it's 394 * being used. 395 */ 396 if (memcg_bpf_enabled()) 397 map->objcg = get_obj_cgroup_from_current(); 398 } 399 400 static void bpf_map_release_memcg(struct bpf_map *map) 401 { 402 if (map->objcg) 403 obj_cgroup_put(map->objcg); 404 } 405 406 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 407 { 408 if (map->objcg) 409 return get_mem_cgroup_from_objcg(map->objcg); 410 411 return root_mem_cgroup; 412 } 413 414 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 415 int node) 416 { 417 struct mem_cgroup *memcg, *old_memcg; 418 void *ptr; 419 420 memcg = bpf_map_get_memcg(map); 421 old_memcg = set_active_memcg(memcg); 422 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 423 set_active_memcg(old_memcg); 424 mem_cgroup_put(memcg); 425 426 return ptr; 427 } 428 429 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 430 { 431 struct mem_cgroup *memcg, *old_memcg; 432 void *ptr; 433 434 memcg = bpf_map_get_memcg(map); 435 old_memcg = set_active_memcg(memcg); 436 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 437 set_active_memcg(old_memcg); 438 mem_cgroup_put(memcg); 439 440 return ptr; 441 } 442 443 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 444 gfp_t flags) 445 { 446 struct mem_cgroup *memcg, *old_memcg; 447 void *ptr; 448 449 memcg = bpf_map_get_memcg(map); 450 old_memcg = set_active_memcg(memcg); 451 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 452 set_active_memcg(old_memcg); 453 mem_cgroup_put(memcg); 454 455 return ptr; 456 } 457 458 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 459 size_t align, gfp_t flags) 460 { 461 struct mem_cgroup *memcg, *old_memcg; 462 void __percpu *ptr; 463 464 memcg = bpf_map_get_memcg(map); 465 old_memcg = set_active_memcg(memcg); 466 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 467 set_active_memcg(old_memcg); 468 mem_cgroup_put(memcg); 469 470 return ptr; 471 } 472 473 #else 474 static void bpf_map_save_memcg(struct bpf_map *map) 475 { 476 } 477 478 static void bpf_map_release_memcg(struct bpf_map *map) 479 { 480 } 481 #endif 482 483 static int btf_field_cmp(const void *a, const void *b) 484 { 485 const struct btf_field *f1 = a, *f2 = b; 486 487 if (f1->offset < f2->offset) 488 return -1; 489 else if (f1->offset > f2->offset) 490 return 1; 491 return 0; 492 } 493 494 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 495 u32 field_mask) 496 { 497 struct btf_field *field; 498 499 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 500 return NULL; 501 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 502 if (!field || !(field->type & field_mask)) 503 return NULL; 504 return field; 505 } 506 507 void btf_record_free(struct btf_record *rec) 508 { 509 int i; 510 511 if (IS_ERR_OR_NULL(rec)) 512 return; 513 for (i = 0; i < rec->cnt; i++) { 514 switch (rec->fields[i].type) { 515 case BPF_KPTR_UNREF: 516 case BPF_KPTR_REF: 517 if (rec->fields[i].kptr.module) 518 module_put(rec->fields[i].kptr.module); 519 btf_put(rec->fields[i].kptr.btf); 520 break; 521 case BPF_LIST_HEAD: 522 case BPF_LIST_NODE: 523 case BPF_RB_ROOT: 524 case BPF_RB_NODE: 525 case BPF_SPIN_LOCK: 526 case BPF_TIMER: 527 case BPF_REFCOUNT: 528 /* Nothing to release */ 529 break; 530 default: 531 WARN_ON_ONCE(1); 532 continue; 533 } 534 } 535 kfree(rec); 536 } 537 538 void bpf_map_free_record(struct bpf_map *map) 539 { 540 btf_record_free(map->record); 541 map->record = NULL; 542 } 543 544 struct btf_record *btf_record_dup(const struct btf_record *rec) 545 { 546 const struct btf_field *fields; 547 struct btf_record *new_rec; 548 int ret, size, i; 549 550 if (IS_ERR_OR_NULL(rec)) 551 return NULL; 552 size = offsetof(struct btf_record, fields[rec->cnt]); 553 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 554 if (!new_rec) 555 return ERR_PTR(-ENOMEM); 556 /* Do a deep copy of the btf_record */ 557 fields = rec->fields; 558 new_rec->cnt = 0; 559 for (i = 0; i < rec->cnt; i++) { 560 switch (fields[i].type) { 561 case BPF_KPTR_UNREF: 562 case BPF_KPTR_REF: 563 btf_get(fields[i].kptr.btf); 564 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 565 ret = -ENXIO; 566 goto free; 567 } 568 break; 569 case BPF_LIST_HEAD: 570 case BPF_LIST_NODE: 571 case BPF_RB_ROOT: 572 case BPF_RB_NODE: 573 case BPF_SPIN_LOCK: 574 case BPF_TIMER: 575 case BPF_REFCOUNT: 576 /* Nothing to acquire */ 577 break; 578 default: 579 ret = -EFAULT; 580 WARN_ON_ONCE(1); 581 goto free; 582 } 583 new_rec->cnt++; 584 } 585 return new_rec; 586 free: 587 btf_record_free(new_rec); 588 return ERR_PTR(ret); 589 } 590 591 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 592 { 593 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 594 int size; 595 596 if (!a_has_fields && !b_has_fields) 597 return true; 598 if (a_has_fields != b_has_fields) 599 return false; 600 if (rec_a->cnt != rec_b->cnt) 601 return false; 602 size = offsetof(struct btf_record, fields[rec_a->cnt]); 603 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 604 * members are zeroed out. So memcmp is safe to do without worrying 605 * about padding/unused fields. 606 * 607 * While spin_lock, timer, and kptr have no relation to map BTF, 608 * list_head metadata is specific to map BTF, the btf and value_rec 609 * members in particular. btf is the map BTF, while value_rec points to 610 * btf_record in that map BTF. 611 * 612 * So while by default, we don't rely on the map BTF (which the records 613 * were parsed from) matching for both records, which is not backwards 614 * compatible, in case list_head is part of it, we implicitly rely on 615 * that by way of depending on memcmp succeeding for it. 616 */ 617 return !memcmp(rec_a, rec_b, size); 618 } 619 620 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 621 { 622 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 623 return; 624 bpf_timer_cancel_and_free(obj + rec->timer_off); 625 } 626 627 extern void __bpf_obj_drop_impl(void *p, const struct btf_record *rec); 628 629 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 630 { 631 const struct btf_field *fields; 632 int i; 633 634 if (IS_ERR_OR_NULL(rec)) 635 return; 636 fields = rec->fields; 637 for (i = 0; i < rec->cnt; i++) { 638 struct btf_struct_meta *pointee_struct_meta; 639 const struct btf_field *field = &fields[i]; 640 void *field_ptr = obj + field->offset; 641 void *xchgd_field; 642 643 switch (fields[i].type) { 644 case BPF_SPIN_LOCK: 645 break; 646 case BPF_TIMER: 647 bpf_timer_cancel_and_free(field_ptr); 648 break; 649 case BPF_KPTR_UNREF: 650 WRITE_ONCE(*(u64 *)field_ptr, 0); 651 break; 652 case BPF_KPTR_REF: 653 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 654 if (!xchgd_field) 655 break; 656 657 if (!btf_is_kernel(field->kptr.btf)) { 658 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 659 field->kptr.btf_id); 660 migrate_disable(); 661 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 662 pointee_struct_meta->record : 663 NULL); 664 migrate_enable(); 665 } else { 666 field->kptr.dtor(xchgd_field); 667 } 668 break; 669 case BPF_LIST_HEAD: 670 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 671 continue; 672 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 673 break; 674 case BPF_RB_ROOT: 675 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 676 continue; 677 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 678 break; 679 case BPF_LIST_NODE: 680 case BPF_RB_NODE: 681 case BPF_REFCOUNT: 682 break; 683 default: 684 WARN_ON_ONCE(1); 685 continue; 686 } 687 } 688 } 689 690 /* called from workqueue */ 691 static void bpf_map_free_deferred(struct work_struct *work) 692 { 693 struct bpf_map *map = container_of(work, struct bpf_map, work); 694 struct btf_record *rec = map->record; 695 struct btf *btf = map->btf; 696 697 security_bpf_map_free(map); 698 bpf_map_release_memcg(map); 699 /* implementation dependent freeing */ 700 map->ops->map_free(map); 701 /* Delay freeing of btf_record for maps, as map_free 702 * callback usually needs access to them. It is better to do it here 703 * than require each callback to do the free itself manually. 704 * 705 * Note that the btf_record stashed in map->inner_map_meta->record was 706 * already freed using the map_free callback for map in map case which 707 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 708 * template bpf_map struct used during verification. 709 */ 710 btf_record_free(rec); 711 /* Delay freeing of btf for maps, as map_free callback may need 712 * struct_meta info which will be freed with btf_put(). 713 */ 714 btf_put(btf); 715 } 716 717 static void bpf_map_put_uref(struct bpf_map *map) 718 { 719 if (atomic64_dec_and_test(&map->usercnt)) { 720 if (map->ops->map_release_uref) 721 map->ops->map_release_uref(map); 722 } 723 } 724 725 static void bpf_map_free_in_work(struct bpf_map *map) 726 { 727 INIT_WORK(&map->work, bpf_map_free_deferred); 728 /* Avoid spawning kworkers, since they all might contend 729 * for the same mutex like slab_mutex. 730 */ 731 queue_work(system_unbound_wq, &map->work); 732 } 733 734 static void bpf_map_free_rcu_gp(struct rcu_head *rcu) 735 { 736 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); 737 } 738 739 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) 740 { 741 if (rcu_trace_implies_rcu_gp()) 742 bpf_map_free_rcu_gp(rcu); 743 else 744 call_rcu(rcu, bpf_map_free_rcu_gp); 745 } 746 747 /* decrement map refcnt and schedule it for freeing via workqueue 748 * (underlying map implementation ops->map_free() might sleep) 749 */ 750 void bpf_map_put(struct bpf_map *map) 751 { 752 if (atomic64_dec_and_test(&map->refcnt)) { 753 /* bpf_map_free_id() must be called first */ 754 bpf_map_free_id(map); 755 756 if (READ_ONCE(map->free_after_mult_rcu_gp)) 757 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); 758 else 759 bpf_map_free_in_work(map); 760 } 761 } 762 EXPORT_SYMBOL_GPL(bpf_map_put); 763 764 void bpf_map_put_with_uref(struct bpf_map *map) 765 { 766 bpf_map_put_uref(map); 767 bpf_map_put(map); 768 } 769 770 static int bpf_map_release(struct inode *inode, struct file *filp) 771 { 772 struct bpf_map *map = filp->private_data; 773 774 if (map->ops->map_release) 775 map->ops->map_release(map, filp); 776 777 bpf_map_put_with_uref(map); 778 return 0; 779 } 780 781 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 782 { 783 fmode_t mode = f.file->f_mode; 784 785 /* Our file permissions may have been overridden by global 786 * map permissions facing syscall side. 787 */ 788 if (READ_ONCE(map->frozen)) 789 mode &= ~FMODE_CAN_WRITE; 790 return mode; 791 } 792 793 #ifdef CONFIG_PROC_FS 794 /* Show the memory usage of a bpf map */ 795 static u64 bpf_map_memory_usage(const struct bpf_map *map) 796 { 797 return map->ops->map_mem_usage(map); 798 } 799 800 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 801 { 802 struct bpf_map *map = filp->private_data; 803 u32 type = 0, jited = 0; 804 805 if (map_type_contains_progs(map)) { 806 spin_lock(&map->owner.lock); 807 type = map->owner.type; 808 jited = map->owner.jited; 809 spin_unlock(&map->owner.lock); 810 } 811 812 seq_printf(m, 813 "map_type:\t%u\n" 814 "key_size:\t%u\n" 815 "value_size:\t%u\n" 816 "max_entries:\t%u\n" 817 "map_flags:\t%#x\n" 818 "map_extra:\t%#llx\n" 819 "memlock:\t%llu\n" 820 "map_id:\t%u\n" 821 "frozen:\t%u\n", 822 map->map_type, 823 map->key_size, 824 map->value_size, 825 map->max_entries, 826 map->map_flags, 827 (unsigned long long)map->map_extra, 828 bpf_map_memory_usage(map), 829 map->id, 830 READ_ONCE(map->frozen)); 831 if (type) { 832 seq_printf(m, "owner_prog_type:\t%u\n", type); 833 seq_printf(m, "owner_jited:\t%u\n", jited); 834 } 835 } 836 #endif 837 838 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 839 loff_t *ppos) 840 { 841 /* We need this handler such that alloc_file() enables 842 * f_mode with FMODE_CAN_READ. 843 */ 844 return -EINVAL; 845 } 846 847 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 848 size_t siz, loff_t *ppos) 849 { 850 /* We need this handler such that alloc_file() enables 851 * f_mode with FMODE_CAN_WRITE. 852 */ 853 return -EINVAL; 854 } 855 856 /* called for any extra memory-mapped regions (except initial) */ 857 static void bpf_map_mmap_open(struct vm_area_struct *vma) 858 { 859 struct bpf_map *map = vma->vm_file->private_data; 860 861 if (vma->vm_flags & VM_MAYWRITE) 862 bpf_map_write_active_inc(map); 863 } 864 865 /* called for all unmapped memory region (including initial) */ 866 static void bpf_map_mmap_close(struct vm_area_struct *vma) 867 { 868 struct bpf_map *map = vma->vm_file->private_data; 869 870 if (vma->vm_flags & VM_MAYWRITE) 871 bpf_map_write_active_dec(map); 872 } 873 874 static const struct vm_operations_struct bpf_map_default_vmops = { 875 .open = bpf_map_mmap_open, 876 .close = bpf_map_mmap_close, 877 }; 878 879 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 880 { 881 struct bpf_map *map = filp->private_data; 882 int err; 883 884 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 885 return -ENOTSUPP; 886 887 if (!(vma->vm_flags & VM_SHARED)) 888 return -EINVAL; 889 890 mutex_lock(&map->freeze_mutex); 891 892 if (vma->vm_flags & VM_WRITE) { 893 if (map->frozen) { 894 err = -EPERM; 895 goto out; 896 } 897 /* map is meant to be read-only, so do not allow mapping as 898 * writable, because it's possible to leak a writable page 899 * reference and allows user-space to still modify it after 900 * freezing, while verifier will assume contents do not change 901 */ 902 if (map->map_flags & BPF_F_RDONLY_PROG) { 903 err = -EACCES; 904 goto out; 905 } 906 } 907 908 /* set default open/close callbacks */ 909 vma->vm_ops = &bpf_map_default_vmops; 910 vma->vm_private_data = map; 911 vm_flags_clear(vma, VM_MAYEXEC); 912 if (!(vma->vm_flags & VM_WRITE)) 913 /* disallow re-mapping with PROT_WRITE */ 914 vm_flags_clear(vma, VM_MAYWRITE); 915 916 err = map->ops->map_mmap(map, vma); 917 if (err) 918 goto out; 919 920 if (vma->vm_flags & VM_MAYWRITE) 921 bpf_map_write_active_inc(map); 922 out: 923 mutex_unlock(&map->freeze_mutex); 924 return err; 925 } 926 927 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 928 { 929 struct bpf_map *map = filp->private_data; 930 931 if (map->ops->map_poll) 932 return map->ops->map_poll(map, filp, pts); 933 934 return EPOLLERR; 935 } 936 937 const struct file_operations bpf_map_fops = { 938 #ifdef CONFIG_PROC_FS 939 .show_fdinfo = bpf_map_show_fdinfo, 940 #endif 941 .release = bpf_map_release, 942 .read = bpf_dummy_read, 943 .write = bpf_dummy_write, 944 .mmap = bpf_map_mmap, 945 .poll = bpf_map_poll, 946 }; 947 948 int bpf_map_new_fd(struct bpf_map *map, int flags) 949 { 950 int ret; 951 952 ret = security_bpf_map(map, OPEN_FMODE(flags)); 953 if (ret < 0) 954 return ret; 955 956 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 957 flags | O_CLOEXEC); 958 } 959 960 int bpf_get_file_flag(int flags) 961 { 962 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 963 return -EINVAL; 964 if (flags & BPF_F_RDONLY) 965 return O_RDONLY; 966 if (flags & BPF_F_WRONLY) 967 return O_WRONLY; 968 return O_RDWR; 969 } 970 971 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 972 #define CHECK_ATTR(CMD) \ 973 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 974 sizeof(attr->CMD##_LAST_FIELD), 0, \ 975 sizeof(*attr) - \ 976 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 977 sizeof(attr->CMD##_LAST_FIELD)) != NULL 978 979 /* dst and src must have at least "size" number of bytes. 980 * Return strlen on success and < 0 on error. 981 */ 982 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 983 { 984 const char *end = src + size; 985 const char *orig_src = src; 986 987 memset(dst, 0, size); 988 /* Copy all isalnum(), '_' and '.' chars. */ 989 while (src < end && *src) { 990 if (!isalnum(*src) && 991 *src != '_' && *src != '.') 992 return -EINVAL; 993 *dst++ = *src++; 994 } 995 996 /* No '\0' found in "size" number of bytes */ 997 if (src == end) 998 return -EINVAL; 999 1000 return src - orig_src; 1001 } 1002 1003 int map_check_no_btf(const struct bpf_map *map, 1004 const struct btf *btf, 1005 const struct btf_type *key_type, 1006 const struct btf_type *value_type) 1007 { 1008 return -ENOTSUPP; 1009 } 1010 1011 static int map_check_btf(struct bpf_map *map, const struct btf *btf, 1012 u32 btf_key_id, u32 btf_value_id) 1013 { 1014 const struct btf_type *key_type, *value_type; 1015 u32 key_size, value_size; 1016 int ret = 0; 1017 1018 /* Some maps allow key to be unspecified. */ 1019 if (btf_key_id) { 1020 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 1021 if (!key_type || key_size != map->key_size) 1022 return -EINVAL; 1023 } else { 1024 key_type = btf_type_by_id(btf, 0); 1025 if (!map->ops->map_check_btf) 1026 return -EINVAL; 1027 } 1028 1029 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1030 if (!value_type || value_size != map->value_size) 1031 return -EINVAL; 1032 1033 map->record = btf_parse_fields(btf, value_type, 1034 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1035 BPF_RB_ROOT | BPF_REFCOUNT, 1036 map->value_size); 1037 if (!IS_ERR_OR_NULL(map->record)) { 1038 int i; 1039 1040 if (!bpf_capable()) { 1041 ret = -EPERM; 1042 goto free_map_tab; 1043 } 1044 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1045 ret = -EACCES; 1046 goto free_map_tab; 1047 } 1048 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1049 switch (map->record->field_mask & (1 << i)) { 1050 case 0: 1051 continue; 1052 case BPF_SPIN_LOCK: 1053 if (map->map_type != BPF_MAP_TYPE_HASH && 1054 map->map_type != BPF_MAP_TYPE_ARRAY && 1055 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1056 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1057 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1058 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1059 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1060 ret = -EOPNOTSUPP; 1061 goto free_map_tab; 1062 } 1063 break; 1064 case BPF_TIMER: 1065 if (map->map_type != BPF_MAP_TYPE_HASH && 1066 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1067 map->map_type != BPF_MAP_TYPE_ARRAY) { 1068 ret = -EOPNOTSUPP; 1069 goto free_map_tab; 1070 } 1071 break; 1072 case BPF_KPTR_UNREF: 1073 case BPF_KPTR_REF: 1074 case BPF_REFCOUNT: 1075 if (map->map_type != BPF_MAP_TYPE_HASH && 1076 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1077 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1078 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1079 map->map_type != BPF_MAP_TYPE_ARRAY && 1080 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1081 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1082 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1083 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1084 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1085 ret = -EOPNOTSUPP; 1086 goto free_map_tab; 1087 } 1088 break; 1089 case BPF_LIST_HEAD: 1090 case BPF_RB_ROOT: 1091 if (map->map_type != BPF_MAP_TYPE_HASH && 1092 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1093 map->map_type != BPF_MAP_TYPE_ARRAY) { 1094 ret = -EOPNOTSUPP; 1095 goto free_map_tab; 1096 } 1097 break; 1098 default: 1099 /* Fail if map_type checks are missing for a field type */ 1100 ret = -EOPNOTSUPP; 1101 goto free_map_tab; 1102 } 1103 } 1104 } 1105 1106 ret = btf_check_and_fixup_fields(btf, map->record); 1107 if (ret < 0) 1108 goto free_map_tab; 1109 1110 if (map->ops->map_check_btf) { 1111 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1112 if (ret < 0) 1113 goto free_map_tab; 1114 } 1115 1116 return ret; 1117 free_map_tab: 1118 bpf_map_free_record(map); 1119 return ret; 1120 } 1121 1122 #define BPF_MAP_CREATE_LAST_FIELD map_extra 1123 /* called via syscall */ 1124 static int map_create(union bpf_attr *attr) 1125 { 1126 const struct bpf_map_ops *ops; 1127 int numa_node = bpf_map_attr_numa_node(attr); 1128 u32 map_type = attr->map_type; 1129 struct bpf_map *map; 1130 int f_flags; 1131 int err; 1132 1133 err = CHECK_ATTR(BPF_MAP_CREATE); 1134 if (err) 1135 return -EINVAL; 1136 1137 if (attr->btf_vmlinux_value_type_id) { 1138 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1139 attr->btf_key_type_id || attr->btf_value_type_id) 1140 return -EINVAL; 1141 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1142 return -EINVAL; 1143 } 1144 1145 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1146 attr->map_extra != 0) 1147 return -EINVAL; 1148 1149 f_flags = bpf_get_file_flag(attr->map_flags); 1150 if (f_flags < 0) 1151 return f_flags; 1152 1153 if (numa_node != NUMA_NO_NODE && 1154 ((unsigned int)numa_node >= nr_node_ids || 1155 !node_online(numa_node))) 1156 return -EINVAL; 1157 1158 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1159 map_type = attr->map_type; 1160 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1161 return -EINVAL; 1162 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1163 ops = bpf_map_types[map_type]; 1164 if (!ops) 1165 return -EINVAL; 1166 1167 if (ops->map_alloc_check) { 1168 err = ops->map_alloc_check(attr); 1169 if (err) 1170 return err; 1171 } 1172 if (attr->map_ifindex) 1173 ops = &bpf_map_offload_ops; 1174 if (!ops->map_mem_usage) 1175 return -EINVAL; 1176 1177 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1178 * creation for unprivileged users; other actions depend 1179 * on fd availability and access to bpffs, so are dependent on 1180 * object creation success. Even with unprivileged BPF disabled, 1181 * capability checks are still carried out. 1182 */ 1183 if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) 1184 return -EPERM; 1185 1186 /* check privileged map type permissions */ 1187 switch (map_type) { 1188 case BPF_MAP_TYPE_ARRAY: 1189 case BPF_MAP_TYPE_PERCPU_ARRAY: 1190 case BPF_MAP_TYPE_PROG_ARRAY: 1191 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1192 case BPF_MAP_TYPE_CGROUP_ARRAY: 1193 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1194 case BPF_MAP_TYPE_HASH: 1195 case BPF_MAP_TYPE_PERCPU_HASH: 1196 case BPF_MAP_TYPE_HASH_OF_MAPS: 1197 case BPF_MAP_TYPE_RINGBUF: 1198 case BPF_MAP_TYPE_USER_RINGBUF: 1199 case BPF_MAP_TYPE_CGROUP_STORAGE: 1200 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1201 /* unprivileged */ 1202 break; 1203 case BPF_MAP_TYPE_SK_STORAGE: 1204 case BPF_MAP_TYPE_INODE_STORAGE: 1205 case BPF_MAP_TYPE_TASK_STORAGE: 1206 case BPF_MAP_TYPE_CGRP_STORAGE: 1207 case BPF_MAP_TYPE_BLOOM_FILTER: 1208 case BPF_MAP_TYPE_LPM_TRIE: 1209 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1210 case BPF_MAP_TYPE_STACK_TRACE: 1211 case BPF_MAP_TYPE_QUEUE: 1212 case BPF_MAP_TYPE_STACK: 1213 case BPF_MAP_TYPE_LRU_HASH: 1214 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1215 case BPF_MAP_TYPE_STRUCT_OPS: 1216 case BPF_MAP_TYPE_CPUMAP: 1217 if (!bpf_capable()) 1218 return -EPERM; 1219 break; 1220 case BPF_MAP_TYPE_SOCKMAP: 1221 case BPF_MAP_TYPE_SOCKHASH: 1222 case BPF_MAP_TYPE_DEVMAP: 1223 case BPF_MAP_TYPE_DEVMAP_HASH: 1224 case BPF_MAP_TYPE_XSKMAP: 1225 if (!capable(CAP_NET_ADMIN)) 1226 return -EPERM; 1227 break; 1228 default: 1229 WARN(1, "unsupported map type %d", map_type); 1230 return -EPERM; 1231 } 1232 1233 map = ops->map_alloc(attr); 1234 if (IS_ERR(map)) 1235 return PTR_ERR(map); 1236 map->ops = ops; 1237 map->map_type = map_type; 1238 1239 err = bpf_obj_name_cpy(map->name, attr->map_name, 1240 sizeof(attr->map_name)); 1241 if (err < 0) 1242 goto free_map; 1243 1244 atomic64_set(&map->refcnt, 1); 1245 atomic64_set(&map->usercnt, 1); 1246 mutex_init(&map->freeze_mutex); 1247 spin_lock_init(&map->owner.lock); 1248 1249 if (attr->btf_key_type_id || attr->btf_value_type_id || 1250 /* Even the map's value is a kernel's struct, 1251 * the bpf_prog.o must have BTF to begin with 1252 * to figure out the corresponding kernel's 1253 * counter part. Thus, attr->btf_fd has 1254 * to be valid also. 1255 */ 1256 attr->btf_vmlinux_value_type_id) { 1257 struct btf *btf; 1258 1259 btf = btf_get_by_fd(attr->btf_fd); 1260 if (IS_ERR(btf)) { 1261 err = PTR_ERR(btf); 1262 goto free_map; 1263 } 1264 if (btf_is_kernel(btf)) { 1265 btf_put(btf); 1266 err = -EACCES; 1267 goto free_map; 1268 } 1269 map->btf = btf; 1270 1271 if (attr->btf_value_type_id) { 1272 err = map_check_btf(map, btf, attr->btf_key_type_id, 1273 attr->btf_value_type_id); 1274 if (err) 1275 goto free_map; 1276 } 1277 1278 map->btf_key_type_id = attr->btf_key_type_id; 1279 map->btf_value_type_id = attr->btf_value_type_id; 1280 map->btf_vmlinux_value_type_id = 1281 attr->btf_vmlinux_value_type_id; 1282 } 1283 1284 err = security_bpf_map_alloc(map); 1285 if (err) 1286 goto free_map; 1287 1288 err = bpf_map_alloc_id(map); 1289 if (err) 1290 goto free_map_sec; 1291 1292 bpf_map_save_memcg(map); 1293 1294 err = bpf_map_new_fd(map, f_flags); 1295 if (err < 0) { 1296 /* failed to allocate fd. 1297 * bpf_map_put_with_uref() is needed because the above 1298 * bpf_map_alloc_id() has published the map 1299 * to the userspace and the userspace may 1300 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1301 */ 1302 bpf_map_put_with_uref(map); 1303 return err; 1304 } 1305 1306 return err; 1307 1308 free_map_sec: 1309 security_bpf_map_free(map); 1310 free_map: 1311 btf_put(map->btf); 1312 map->ops->map_free(map); 1313 return err; 1314 } 1315 1316 /* if error is returned, fd is released. 1317 * On success caller should complete fd access with matching fdput() 1318 */ 1319 struct bpf_map *__bpf_map_get(struct fd f) 1320 { 1321 if (!f.file) 1322 return ERR_PTR(-EBADF); 1323 if (f.file->f_op != &bpf_map_fops) { 1324 fdput(f); 1325 return ERR_PTR(-EINVAL); 1326 } 1327 1328 return f.file->private_data; 1329 } 1330 1331 void bpf_map_inc(struct bpf_map *map) 1332 { 1333 atomic64_inc(&map->refcnt); 1334 } 1335 EXPORT_SYMBOL_GPL(bpf_map_inc); 1336 1337 void bpf_map_inc_with_uref(struct bpf_map *map) 1338 { 1339 atomic64_inc(&map->refcnt); 1340 atomic64_inc(&map->usercnt); 1341 } 1342 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1343 1344 struct bpf_map *bpf_map_get(u32 ufd) 1345 { 1346 struct fd f = fdget(ufd); 1347 struct bpf_map *map; 1348 1349 map = __bpf_map_get(f); 1350 if (IS_ERR(map)) 1351 return map; 1352 1353 bpf_map_inc(map); 1354 fdput(f); 1355 1356 return map; 1357 } 1358 EXPORT_SYMBOL(bpf_map_get); 1359 1360 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1361 { 1362 struct fd f = fdget(ufd); 1363 struct bpf_map *map; 1364 1365 map = __bpf_map_get(f); 1366 if (IS_ERR(map)) 1367 return map; 1368 1369 bpf_map_inc_with_uref(map); 1370 fdput(f); 1371 1372 return map; 1373 } 1374 1375 /* map_idr_lock should have been held or the map should have been 1376 * protected by rcu read lock. 1377 */ 1378 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1379 { 1380 int refold; 1381 1382 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1383 if (!refold) 1384 return ERR_PTR(-ENOENT); 1385 if (uref) 1386 atomic64_inc(&map->usercnt); 1387 1388 return map; 1389 } 1390 1391 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1392 { 1393 spin_lock_bh(&map_idr_lock); 1394 map = __bpf_map_inc_not_zero(map, false); 1395 spin_unlock_bh(&map_idr_lock); 1396 1397 return map; 1398 } 1399 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1400 1401 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1402 { 1403 return -ENOTSUPP; 1404 } 1405 1406 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1407 { 1408 if (key_size) 1409 return vmemdup_user(ukey, key_size); 1410 1411 if (ukey) 1412 return ERR_PTR(-EINVAL); 1413 1414 return NULL; 1415 } 1416 1417 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1418 { 1419 if (key_size) 1420 return kvmemdup_bpfptr(ukey, key_size); 1421 1422 if (!bpfptr_is_null(ukey)) 1423 return ERR_PTR(-EINVAL); 1424 1425 return NULL; 1426 } 1427 1428 /* last field in 'union bpf_attr' used by this command */ 1429 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1430 1431 static int map_lookup_elem(union bpf_attr *attr) 1432 { 1433 void __user *ukey = u64_to_user_ptr(attr->key); 1434 void __user *uvalue = u64_to_user_ptr(attr->value); 1435 int ufd = attr->map_fd; 1436 struct bpf_map *map; 1437 void *key, *value; 1438 u32 value_size; 1439 struct fd f; 1440 int err; 1441 1442 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1443 return -EINVAL; 1444 1445 if (attr->flags & ~BPF_F_LOCK) 1446 return -EINVAL; 1447 1448 f = fdget(ufd); 1449 map = __bpf_map_get(f); 1450 if (IS_ERR(map)) 1451 return PTR_ERR(map); 1452 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1453 err = -EPERM; 1454 goto err_put; 1455 } 1456 1457 if ((attr->flags & BPF_F_LOCK) && 1458 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1459 err = -EINVAL; 1460 goto err_put; 1461 } 1462 1463 key = __bpf_copy_key(ukey, map->key_size); 1464 if (IS_ERR(key)) { 1465 err = PTR_ERR(key); 1466 goto err_put; 1467 } 1468 1469 value_size = bpf_map_value_size(map); 1470 1471 err = -ENOMEM; 1472 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1473 if (!value) 1474 goto free_key; 1475 1476 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1477 if (copy_from_user(value, uvalue, value_size)) 1478 err = -EFAULT; 1479 else 1480 err = bpf_map_copy_value(map, key, value, attr->flags); 1481 goto free_value; 1482 } 1483 1484 err = bpf_map_copy_value(map, key, value, attr->flags); 1485 if (err) 1486 goto free_value; 1487 1488 err = -EFAULT; 1489 if (copy_to_user(uvalue, value, value_size) != 0) 1490 goto free_value; 1491 1492 err = 0; 1493 1494 free_value: 1495 kvfree(value); 1496 free_key: 1497 kvfree(key); 1498 err_put: 1499 fdput(f); 1500 return err; 1501 } 1502 1503 1504 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1505 1506 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1507 { 1508 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1509 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1510 int ufd = attr->map_fd; 1511 struct bpf_map *map; 1512 void *key, *value; 1513 u32 value_size; 1514 struct fd f; 1515 int err; 1516 1517 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1518 return -EINVAL; 1519 1520 f = fdget(ufd); 1521 map = __bpf_map_get(f); 1522 if (IS_ERR(map)) 1523 return PTR_ERR(map); 1524 bpf_map_write_active_inc(map); 1525 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1526 err = -EPERM; 1527 goto err_put; 1528 } 1529 1530 if ((attr->flags & BPF_F_LOCK) && 1531 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1532 err = -EINVAL; 1533 goto err_put; 1534 } 1535 1536 key = ___bpf_copy_key(ukey, map->key_size); 1537 if (IS_ERR(key)) { 1538 err = PTR_ERR(key); 1539 goto err_put; 1540 } 1541 1542 value_size = bpf_map_value_size(map); 1543 value = kvmemdup_bpfptr(uvalue, value_size); 1544 if (IS_ERR(value)) { 1545 err = PTR_ERR(value); 1546 goto free_key; 1547 } 1548 1549 err = bpf_map_update_value(map, f.file, key, value, attr->flags); 1550 1551 kvfree(value); 1552 free_key: 1553 kvfree(key); 1554 err_put: 1555 bpf_map_write_active_dec(map); 1556 fdput(f); 1557 return err; 1558 } 1559 1560 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1561 1562 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1563 { 1564 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1565 int ufd = attr->map_fd; 1566 struct bpf_map *map; 1567 struct fd f; 1568 void *key; 1569 int err; 1570 1571 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1572 return -EINVAL; 1573 1574 f = fdget(ufd); 1575 map = __bpf_map_get(f); 1576 if (IS_ERR(map)) 1577 return PTR_ERR(map); 1578 bpf_map_write_active_inc(map); 1579 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1580 err = -EPERM; 1581 goto err_put; 1582 } 1583 1584 key = ___bpf_copy_key(ukey, map->key_size); 1585 if (IS_ERR(key)) { 1586 err = PTR_ERR(key); 1587 goto err_put; 1588 } 1589 1590 if (bpf_map_is_offloaded(map)) { 1591 err = bpf_map_offload_delete_elem(map, key); 1592 goto out; 1593 } else if (IS_FD_PROG_ARRAY(map) || 1594 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1595 /* These maps require sleepable context */ 1596 err = map->ops->map_delete_elem(map, key); 1597 goto out; 1598 } 1599 1600 bpf_disable_instrumentation(); 1601 rcu_read_lock(); 1602 err = map->ops->map_delete_elem(map, key); 1603 rcu_read_unlock(); 1604 bpf_enable_instrumentation(); 1605 maybe_wait_bpf_programs(map); 1606 out: 1607 kvfree(key); 1608 err_put: 1609 bpf_map_write_active_dec(map); 1610 fdput(f); 1611 return err; 1612 } 1613 1614 /* last field in 'union bpf_attr' used by this command */ 1615 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1616 1617 static int map_get_next_key(union bpf_attr *attr) 1618 { 1619 void __user *ukey = u64_to_user_ptr(attr->key); 1620 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1621 int ufd = attr->map_fd; 1622 struct bpf_map *map; 1623 void *key, *next_key; 1624 struct fd f; 1625 int err; 1626 1627 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1628 return -EINVAL; 1629 1630 f = fdget(ufd); 1631 map = __bpf_map_get(f); 1632 if (IS_ERR(map)) 1633 return PTR_ERR(map); 1634 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1635 err = -EPERM; 1636 goto err_put; 1637 } 1638 1639 if (ukey) { 1640 key = __bpf_copy_key(ukey, map->key_size); 1641 if (IS_ERR(key)) { 1642 err = PTR_ERR(key); 1643 goto err_put; 1644 } 1645 } else { 1646 key = NULL; 1647 } 1648 1649 err = -ENOMEM; 1650 next_key = kvmalloc(map->key_size, GFP_USER); 1651 if (!next_key) 1652 goto free_key; 1653 1654 if (bpf_map_is_offloaded(map)) { 1655 err = bpf_map_offload_get_next_key(map, key, next_key); 1656 goto out; 1657 } 1658 1659 rcu_read_lock(); 1660 err = map->ops->map_get_next_key(map, key, next_key); 1661 rcu_read_unlock(); 1662 out: 1663 if (err) 1664 goto free_next_key; 1665 1666 err = -EFAULT; 1667 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1668 goto free_next_key; 1669 1670 err = 0; 1671 1672 free_next_key: 1673 kvfree(next_key); 1674 free_key: 1675 kvfree(key); 1676 err_put: 1677 fdput(f); 1678 return err; 1679 } 1680 1681 int generic_map_delete_batch(struct bpf_map *map, 1682 const union bpf_attr *attr, 1683 union bpf_attr __user *uattr) 1684 { 1685 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1686 u32 cp, max_count; 1687 int err = 0; 1688 void *key; 1689 1690 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1691 return -EINVAL; 1692 1693 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1694 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1695 return -EINVAL; 1696 } 1697 1698 max_count = attr->batch.count; 1699 if (!max_count) 1700 return 0; 1701 1702 if (put_user(0, &uattr->batch.count)) 1703 return -EFAULT; 1704 1705 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1706 if (!key) 1707 return -ENOMEM; 1708 1709 for (cp = 0; cp < max_count; cp++) { 1710 err = -EFAULT; 1711 if (copy_from_user(key, keys + cp * map->key_size, 1712 map->key_size)) 1713 break; 1714 1715 if (bpf_map_is_offloaded(map)) { 1716 err = bpf_map_offload_delete_elem(map, key); 1717 break; 1718 } 1719 1720 bpf_disable_instrumentation(); 1721 rcu_read_lock(); 1722 err = map->ops->map_delete_elem(map, key); 1723 rcu_read_unlock(); 1724 bpf_enable_instrumentation(); 1725 if (err) 1726 break; 1727 cond_resched(); 1728 } 1729 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1730 err = -EFAULT; 1731 1732 kvfree(key); 1733 1734 maybe_wait_bpf_programs(map); 1735 return err; 1736 } 1737 1738 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1739 const union bpf_attr *attr, 1740 union bpf_attr __user *uattr) 1741 { 1742 void __user *values = u64_to_user_ptr(attr->batch.values); 1743 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1744 u32 value_size, cp, max_count; 1745 void *key, *value; 1746 int err = 0; 1747 1748 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1749 return -EINVAL; 1750 1751 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1752 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1753 return -EINVAL; 1754 } 1755 1756 value_size = bpf_map_value_size(map); 1757 1758 max_count = attr->batch.count; 1759 if (!max_count) 1760 return 0; 1761 1762 if (put_user(0, &uattr->batch.count)) 1763 return -EFAULT; 1764 1765 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1766 if (!key) 1767 return -ENOMEM; 1768 1769 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1770 if (!value) { 1771 kvfree(key); 1772 return -ENOMEM; 1773 } 1774 1775 for (cp = 0; cp < max_count; cp++) { 1776 err = -EFAULT; 1777 if (copy_from_user(key, keys + cp * map->key_size, 1778 map->key_size) || 1779 copy_from_user(value, values + cp * value_size, value_size)) 1780 break; 1781 1782 err = bpf_map_update_value(map, map_file, key, value, 1783 attr->batch.elem_flags); 1784 1785 if (err) 1786 break; 1787 cond_resched(); 1788 } 1789 1790 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1791 err = -EFAULT; 1792 1793 kvfree(value); 1794 kvfree(key); 1795 return err; 1796 } 1797 1798 #define MAP_LOOKUP_RETRIES 3 1799 1800 int generic_map_lookup_batch(struct bpf_map *map, 1801 const union bpf_attr *attr, 1802 union bpf_attr __user *uattr) 1803 { 1804 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1805 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1806 void __user *values = u64_to_user_ptr(attr->batch.values); 1807 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1808 void *buf, *buf_prevkey, *prev_key, *key, *value; 1809 int err, retry = MAP_LOOKUP_RETRIES; 1810 u32 value_size, cp, max_count; 1811 1812 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1813 return -EINVAL; 1814 1815 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1816 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1817 return -EINVAL; 1818 1819 value_size = bpf_map_value_size(map); 1820 1821 max_count = attr->batch.count; 1822 if (!max_count) 1823 return 0; 1824 1825 if (put_user(0, &uattr->batch.count)) 1826 return -EFAULT; 1827 1828 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1829 if (!buf_prevkey) 1830 return -ENOMEM; 1831 1832 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1833 if (!buf) { 1834 kvfree(buf_prevkey); 1835 return -ENOMEM; 1836 } 1837 1838 err = -EFAULT; 1839 prev_key = NULL; 1840 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1841 goto free_buf; 1842 key = buf; 1843 value = key + map->key_size; 1844 if (ubatch) 1845 prev_key = buf_prevkey; 1846 1847 for (cp = 0; cp < max_count;) { 1848 rcu_read_lock(); 1849 err = map->ops->map_get_next_key(map, prev_key, key); 1850 rcu_read_unlock(); 1851 if (err) 1852 break; 1853 err = bpf_map_copy_value(map, key, value, 1854 attr->batch.elem_flags); 1855 1856 if (err == -ENOENT) { 1857 if (retry) { 1858 retry--; 1859 continue; 1860 } 1861 err = -EINTR; 1862 break; 1863 } 1864 1865 if (err) 1866 goto free_buf; 1867 1868 if (copy_to_user(keys + cp * map->key_size, key, 1869 map->key_size)) { 1870 err = -EFAULT; 1871 goto free_buf; 1872 } 1873 if (copy_to_user(values + cp * value_size, value, value_size)) { 1874 err = -EFAULT; 1875 goto free_buf; 1876 } 1877 1878 if (!prev_key) 1879 prev_key = buf_prevkey; 1880 1881 swap(prev_key, key); 1882 retry = MAP_LOOKUP_RETRIES; 1883 cp++; 1884 cond_resched(); 1885 } 1886 1887 if (err == -EFAULT) 1888 goto free_buf; 1889 1890 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 1891 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 1892 err = -EFAULT; 1893 1894 free_buf: 1895 kvfree(buf_prevkey); 1896 kvfree(buf); 1897 return err; 1898 } 1899 1900 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 1901 1902 static int map_lookup_and_delete_elem(union bpf_attr *attr) 1903 { 1904 void __user *ukey = u64_to_user_ptr(attr->key); 1905 void __user *uvalue = u64_to_user_ptr(attr->value); 1906 int ufd = attr->map_fd; 1907 struct bpf_map *map; 1908 void *key, *value; 1909 u32 value_size; 1910 struct fd f; 1911 int err; 1912 1913 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 1914 return -EINVAL; 1915 1916 if (attr->flags & ~BPF_F_LOCK) 1917 return -EINVAL; 1918 1919 f = fdget(ufd); 1920 map = __bpf_map_get(f); 1921 if (IS_ERR(map)) 1922 return PTR_ERR(map); 1923 bpf_map_write_active_inc(map); 1924 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 1925 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1926 err = -EPERM; 1927 goto err_put; 1928 } 1929 1930 if (attr->flags && 1931 (map->map_type == BPF_MAP_TYPE_QUEUE || 1932 map->map_type == BPF_MAP_TYPE_STACK)) { 1933 err = -EINVAL; 1934 goto err_put; 1935 } 1936 1937 if ((attr->flags & BPF_F_LOCK) && 1938 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1939 err = -EINVAL; 1940 goto err_put; 1941 } 1942 1943 key = __bpf_copy_key(ukey, map->key_size); 1944 if (IS_ERR(key)) { 1945 err = PTR_ERR(key); 1946 goto err_put; 1947 } 1948 1949 value_size = bpf_map_value_size(map); 1950 1951 err = -ENOMEM; 1952 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1953 if (!value) 1954 goto free_key; 1955 1956 err = -ENOTSUPP; 1957 if (map->map_type == BPF_MAP_TYPE_QUEUE || 1958 map->map_type == BPF_MAP_TYPE_STACK) { 1959 err = map->ops->map_pop_elem(map, value); 1960 } else if (map->map_type == BPF_MAP_TYPE_HASH || 1961 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 1962 map->map_type == BPF_MAP_TYPE_LRU_HASH || 1963 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 1964 if (!bpf_map_is_offloaded(map)) { 1965 bpf_disable_instrumentation(); 1966 rcu_read_lock(); 1967 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 1968 rcu_read_unlock(); 1969 bpf_enable_instrumentation(); 1970 } 1971 } 1972 1973 if (err) 1974 goto free_value; 1975 1976 if (copy_to_user(uvalue, value, value_size) != 0) { 1977 err = -EFAULT; 1978 goto free_value; 1979 } 1980 1981 err = 0; 1982 1983 free_value: 1984 kvfree(value); 1985 free_key: 1986 kvfree(key); 1987 err_put: 1988 bpf_map_write_active_dec(map); 1989 fdput(f); 1990 return err; 1991 } 1992 1993 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 1994 1995 static int map_freeze(const union bpf_attr *attr) 1996 { 1997 int err = 0, ufd = attr->map_fd; 1998 struct bpf_map *map; 1999 struct fd f; 2000 2001 if (CHECK_ATTR(BPF_MAP_FREEZE)) 2002 return -EINVAL; 2003 2004 f = fdget(ufd); 2005 map = __bpf_map_get(f); 2006 if (IS_ERR(map)) 2007 return PTR_ERR(map); 2008 2009 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) { 2010 fdput(f); 2011 return -ENOTSUPP; 2012 } 2013 2014 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2015 fdput(f); 2016 return -EPERM; 2017 } 2018 2019 mutex_lock(&map->freeze_mutex); 2020 if (bpf_map_write_active(map)) { 2021 err = -EBUSY; 2022 goto err_put; 2023 } 2024 if (READ_ONCE(map->frozen)) { 2025 err = -EBUSY; 2026 goto err_put; 2027 } 2028 2029 WRITE_ONCE(map->frozen, true); 2030 err_put: 2031 mutex_unlock(&map->freeze_mutex); 2032 fdput(f); 2033 return err; 2034 } 2035 2036 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2037 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2038 [_id] = & _name ## _prog_ops, 2039 #define BPF_MAP_TYPE(_id, _ops) 2040 #define BPF_LINK_TYPE(_id, _name) 2041 #include <linux/bpf_types.h> 2042 #undef BPF_PROG_TYPE 2043 #undef BPF_MAP_TYPE 2044 #undef BPF_LINK_TYPE 2045 }; 2046 2047 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2048 { 2049 const struct bpf_prog_ops *ops; 2050 2051 if (type >= ARRAY_SIZE(bpf_prog_types)) 2052 return -EINVAL; 2053 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2054 ops = bpf_prog_types[type]; 2055 if (!ops) 2056 return -EINVAL; 2057 2058 if (!bpf_prog_is_offloaded(prog->aux)) 2059 prog->aux->ops = ops; 2060 else 2061 prog->aux->ops = &bpf_offload_prog_ops; 2062 prog->type = type; 2063 return 0; 2064 } 2065 2066 enum bpf_audit { 2067 BPF_AUDIT_LOAD, 2068 BPF_AUDIT_UNLOAD, 2069 BPF_AUDIT_MAX, 2070 }; 2071 2072 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2073 [BPF_AUDIT_LOAD] = "LOAD", 2074 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2075 }; 2076 2077 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2078 { 2079 struct audit_context *ctx = NULL; 2080 struct audit_buffer *ab; 2081 2082 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2083 return; 2084 if (audit_enabled == AUDIT_OFF) 2085 return; 2086 if (!in_irq() && !irqs_disabled()) 2087 ctx = audit_context(); 2088 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2089 if (unlikely(!ab)) 2090 return; 2091 audit_log_format(ab, "prog-id=%u op=%s", 2092 prog->aux->id, bpf_audit_str[op]); 2093 audit_log_end(ab); 2094 } 2095 2096 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2097 { 2098 int id; 2099 2100 idr_preload(GFP_KERNEL); 2101 spin_lock_bh(&prog_idr_lock); 2102 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2103 if (id > 0) 2104 prog->aux->id = id; 2105 spin_unlock_bh(&prog_idr_lock); 2106 idr_preload_end(); 2107 2108 /* id is in [1, INT_MAX) */ 2109 if (WARN_ON_ONCE(!id)) 2110 return -ENOSPC; 2111 2112 return id > 0 ? 0 : id; 2113 } 2114 2115 void bpf_prog_free_id(struct bpf_prog *prog) 2116 { 2117 unsigned long flags; 2118 2119 /* cBPF to eBPF migrations are currently not in the idr store. 2120 * Offloaded programs are removed from the store when their device 2121 * disappears - even if someone grabs an fd to them they are unusable, 2122 * simply waiting for refcnt to drop to be freed. 2123 */ 2124 if (!prog->aux->id) 2125 return; 2126 2127 spin_lock_irqsave(&prog_idr_lock, flags); 2128 idr_remove(&prog_idr, prog->aux->id); 2129 prog->aux->id = 0; 2130 spin_unlock_irqrestore(&prog_idr_lock, flags); 2131 } 2132 2133 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2134 { 2135 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2136 2137 kvfree(aux->func_info); 2138 kfree(aux->func_info_aux); 2139 free_uid(aux->user); 2140 security_bpf_prog_free(aux); 2141 bpf_prog_free(aux->prog); 2142 } 2143 2144 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2145 { 2146 bpf_prog_kallsyms_del_all(prog); 2147 btf_put(prog->aux->btf); 2148 module_put(prog->aux->mod); 2149 kvfree(prog->aux->jited_linfo); 2150 kvfree(prog->aux->linfo); 2151 kfree(prog->aux->kfunc_tab); 2152 if (prog->aux->attach_btf) 2153 btf_put(prog->aux->attach_btf); 2154 2155 if (deferred) { 2156 if (prog->aux->sleepable) 2157 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2158 else 2159 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2160 } else { 2161 __bpf_prog_put_rcu(&prog->aux->rcu); 2162 } 2163 } 2164 2165 static void bpf_prog_put_deferred(struct work_struct *work) 2166 { 2167 struct bpf_prog_aux *aux; 2168 struct bpf_prog *prog; 2169 2170 aux = container_of(work, struct bpf_prog_aux, work); 2171 prog = aux->prog; 2172 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2173 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2174 bpf_prog_free_id(prog); 2175 __bpf_prog_put_noref(prog, true); 2176 } 2177 2178 static void __bpf_prog_put(struct bpf_prog *prog) 2179 { 2180 struct bpf_prog_aux *aux = prog->aux; 2181 2182 if (atomic64_dec_and_test(&aux->refcnt)) { 2183 if (in_irq() || irqs_disabled()) { 2184 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2185 schedule_work(&aux->work); 2186 } else { 2187 bpf_prog_put_deferred(&aux->work); 2188 } 2189 } 2190 } 2191 2192 void bpf_prog_put(struct bpf_prog *prog) 2193 { 2194 __bpf_prog_put(prog); 2195 } 2196 EXPORT_SYMBOL_GPL(bpf_prog_put); 2197 2198 static int bpf_prog_release(struct inode *inode, struct file *filp) 2199 { 2200 struct bpf_prog *prog = filp->private_data; 2201 2202 bpf_prog_put(prog); 2203 return 0; 2204 } 2205 2206 struct bpf_prog_kstats { 2207 u64 nsecs; 2208 u64 cnt; 2209 u64 misses; 2210 }; 2211 2212 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2213 { 2214 struct bpf_prog_stats *stats; 2215 unsigned int flags; 2216 2217 stats = this_cpu_ptr(prog->stats); 2218 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2219 u64_stats_inc(&stats->misses); 2220 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2221 } 2222 2223 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2224 struct bpf_prog_kstats *stats) 2225 { 2226 u64 nsecs = 0, cnt = 0, misses = 0; 2227 int cpu; 2228 2229 for_each_possible_cpu(cpu) { 2230 const struct bpf_prog_stats *st; 2231 unsigned int start; 2232 u64 tnsecs, tcnt, tmisses; 2233 2234 st = per_cpu_ptr(prog->stats, cpu); 2235 do { 2236 start = u64_stats_fetch_begin(&st->syncp); 2237 tnsecs = u64_stats_read(&st->nsecs); 2238 tcnt = u64_stats_read(&st->cnt); 2239 tmisses = u64_stats_read(&st->misses); 2240 } while (u64_stats_fetch_retry(&st->syncp, start)); 2241 nsecs += tnsecs; 2242 cnt += tcnt; 2243 misses += tmisses; 2244 } 2245 stats->nsecs = nsecs; 2246 stats->cnt = cnt; 2247 stats->misses = misses; 2248 } 2249 2250 #ifdef CONFIG_PROC_FS 2251 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2252 { 2253 const struct bpf_prog *prog = filp->private_data; 2254 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2255 struct bpf_prog_kstats stats; 2256 2257 bpf_prog_get_stats(prog, &stats); 2258 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2259 seq_printf(m, 2260 "prog_type:\t%u\n" 2261 "prog_jited:\t%u\n" 2262 "prog_tag:\t%s\n" 2263 "memlock:\t%llu\n" 2264 "prog_id:\t%u\n" 2265 "run_time_ns:\t%llu\n" 2266 "run_cnt:\t%llu\n" 2267 "recursion_misses:\t%llu\n" 2268 "verified_insns:\t%u\n", 2269 prog->type, 2270 prog->jited, 2271 prog_tag, 2272 prog->pages * 1ULL << PAGE_SHIFT, 2273 prog->aux->id, 2274 stats.nsecs, 2275 stats.cnt, 2276 stats.misses, 2277 prog->aux->verified_insns); 2278 } 2279 #endif 2280 2281 const struct file_operations bpf_prog_fops = { 2282 #ifdef CONFIG_PROC_FS 2283 .show_fdinfo = bpf_prog_show_fdinfo, 2284 #endif 2285 .release = bpf_prog_release, 2286 .read = bpf_dummy_read, 2287 .write = bpf_dummy_write, 2288 }; 2289 2290 int bpf_prog_new_fd(struct bpf_prog *prog) 2291 { 2292 int ret; 2293 2294 ret = security_bpf_prog(prog); 2295 if (ret < 0) 2296 return ret; 2297 2298 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2299 O_RDWR | O_CLOEXEC); 2300 } 2301 2302 static struct bpf_prog *____bpf_prog_get(struct fd f) 2303 { 2304 if (!f.file) 2305 return ERR_PTR(-EBADF); 2306 if (f.file->f_op != &bpf_prog_fops) { 2307 fdput(f); 2308 return ERR_PTR(-EINVAL); 2309 } 2310 2311 return f.file->private_data; 2312 } 2313 2314 void bpf_prog_add(struct bpf_prog *prog, int i) 2315 { 2316 atomic64_add(i, &prog->aux->refcnt); 2317 } 2318 EXPORT_SYMBOL_GPL(bpf_prog_add); 2319 2320 void bpf_prog_sub(struct bpf_prog *prog, int i) 2321 { 2322 /* Only to be used for undoing previous bpf_prog_add() in some 2323 * error path. We still know that another entity in our call 2324 * path holds a reference to the program, thus atomic_sub() can 2325 * be safely used in such cases! 2326 */ 2327 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2328 } 2329 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2330 2331 void bpf_prog_inc(struct bpf_prog *prog) 2332 { 2333 atomic64_inc(&prog->aux->refcnt); 2334 } 2335 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2336 2337 /* prog_idr_lock should have been held */ 2338 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2339 { 2340 int refold; 2341 2342 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2343 2344 if (!refold) 2345 return ERR_PTR(-ENOENT); 2346 2347 return prog; 2348 } 2349 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2350 2351 bool bpf_prog_get_ok(struct bpf_prog *prog, 2352 enum bpf_prog_type *attach_type, bool attach_drv) 2353 { 2354 /* not an attachment, just a refcount inc, always allow */ 2355 if (!attach_type) 2356 return true; 2357 2358 if (prog->type != *attach_type) 2359 return false; 2360 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2361 return false; 2362 2363 return true; 2364 } 2365 2366 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2367 bool attach_drv) 2368 { 2369 struct fd f = fdget(ufd); 2370 struct bpf_prog *prog; 2371 2372 prog = ____bpf_prog_get(f); 2373 if (IS_ERR(prog)) 2374 return prog; 2375 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 2376 prog = ERR_PTR(-EINVAL); 2377 goto out; 2378 } 2379 2380 bpf_prog_inc(prog); 2381 out: 2382 fdput(f); 2383 return prog; 2384 } 2385 2386 struct bpf_prog *bpf_prog_get(u32 ufd) 2387 { 2388 return __bpf_prog_get(ufd, NULL, false); 2389 } 2390 2391 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2392 bool attach_drv) 2393 { 2394 return __bpf_prog_get(ufd, &type, attach_drv); 2395 } 2396 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2397 2398 /* Initially all BPF programs could be loaded w/o specifying 2399 * expected_attach_type. Later for some of them specifying expected_attach_type 2400 * at load time became required so that program could be validated properly. 2401 * Programs of types that are allowed to be loaded both w/ and w/o (for 2402 * backward compatibility) expected_attach_type, should have the default attach 2403 * type assigned to expected_attach_type for the latter case, so that it can be 2404 * validated later at attach time. 2405 * 2406 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2407 * prog type requires it but has some attach types that have to be backward 2408 * compatible. 2409 */ 2410 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2411 { 2412 switch (attr->prog_type) { 2413 case BPF_PROG_TYPE_CGROUP_SOCK: 2414 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2415 * exist so checking for non-zero is the way to go here. 2416 */ 2417 if (!attr->expected_attach_type) 2418 attr->expected_attach_type = 2419 BPF_CGROUP_INET_SOCK_CREATE; 2420 break; 2421 case BPF_PROG_TYPE_SK_REUSEPORT: 2422 if (!attr->expected_attach_type) 2423 attr->expected_attach_type = 2424 BPF_SK_REUSEPORT_SELECT; 2425 break; 2426 } 2427 } 2428 2429 static int 2430 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2431 enum bpf_attach_type expected_attach_type, 2432 struct btf *attach_btf, u32 btf_id, 2433 struct bpf_prog *dst_prog) 2434 { 2435 if (btf_id) { 2436 if (btf_id > BTF_MAX_TYPE) 2437 return -EINVAL; 2438 2439 if (!attach_btf && !dst_prog) 2440 return -EINVAL; 2441 2442 switch (prog_type) { 2443 case BPF_PROG_TYPE_TRACING: 2444 case BPF_PROG_TYPE_LSM: 2445 case BPF_PROG_TYPE_STRUCT_OPS: 2446 case BPF_PROG_TYPE_EXT: 2447 break; 2448 default: 2449 return -EINVAL; 2450 } 2451 } 2452 2453 if (attach_btf && (!btf_id || dst_prog)) 2454 return -EINVAL; 2455 2456 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2457 prog_type != BPF_PROG_TYPE_EXT) 2458 return -EINVAL; 2459 2460 switch (prog_type) { 2461 case BPF_PROG_TYPE_CGROUP_SOCK: 2462 switch (expected_attach_type) { 2463 case BPF_CGROUP_INET_SOCK_CREATE: 2464 case BPF_CGROUP_INET_SOCK_RELEASE: 2465 case BPF_CGROUP_INET4_POST_BIND: 2466 case BPF_CGROUP_INET6_POST_BIND: 2467 return 0; 2468 default: 2469 return -EINVAL; 2470 } 2471 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2472 switch (expected_attach_type) { 2473 case BPF_CGROUP_INET4_BIND: 2474 case BPF_CGROUP_INET6_BIND: 2475 case BPF_CGROUP_INET4_CONNECT: 2476 case BPF_CGROUP_INET6_CONNECT: 2477 case BPF_CGROUP_INET4_GETPEERNAME: 2478 case BPF_CGROUP_INET6_GETPEERNAME: 2479 case BPF_CGROUP_INET4_GETSOCKNAME: 2480 case BPF_CGROUP_INET6_GETSOCKNAME: 2481 case BPF_CGROUP_UDP4_SENDMSG: 2482 case BPF_CGROUP_UDP6_SENDMSG: 2483 case BPF_CGROUP_UDP4_RECVMSG: 2484 case BPF_CGROUP_UDP6_RECVMSG: 2485 return 0; 2486 default: 2487 return -EINVAL; 2488 } 2489 case BPF_PROG_TYPE_CGROUP_SKB: 2490 switch (expected_attach_type) { 2491 case BPF_CGROUP_INET_INGRESS: 2492 case BPF_CGROUP_INET_EGRESS: 2493 return 0; 2494 default: 2495 return -EINVAL; 2496 } 2497 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2498 switch (expected_attach_type) { 2499 case BPF_CGROUP_SETSOCKOPT: 2500 case BPF_CGROUP_GETSOCKOPT: 2501 return 0; 2502 default: 2503 return -EINVAL; 2504 } 2505 case BPF_PROG_TYPE_SK_LOOKUP: 2506 if (expected_attach_type == BPF_SK_LOOKUP) 2507 return 0; 2508 return -EINVAL; 2509 case BPF_PROG_TYPE_SK_REUSEPORT: 2510 switch (expected_attach_type) { 2511 case BPF_SK_REUSEPORT_SELECT: 2512 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2513 return 0; 2514 default: 2515 return -EINVAL; 2516 } 2517 case BPF_PROG_TYPE_NETFILTER: 2518 if (expected_attach_type == BPF_NETFILTER) 2519 return 0; 2520 return -EINVAL; 2521 case BPF_PROG_TYPE_SYSCALL: 2522 case BPF_PROG_TYPE_EXT: 2523 if (expected_attach_type) 2524 return -EINVAL; 2525 fallthrough; 2526 default: 2527 return 0; 2528 } 2529 } 2530 2531 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2532 { 2533 switch (prog_type) { 2534 case BPF_PROG_TYPE_SCHED_CLS: 2535 case BPF_PROG_TYPE_SCHED_ACT: 2536 case BPF_PROG_TYPE_XDP: 2537 case BPF_PROG_TYPE_LWT_IN: 2538 case BPF_PROG_TYPE_LWT_OUT: 2539 case BPF_PROG_TYPE_LWT_XMIT: 2540 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2541 case BPF_PROG_TYPE_SK_SKB: 2542 case BPF_PROG_TYPE_SK_MSG: 2543 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2544 case BPF_PROG_TYPE_CGROUP_DEVICE: 2545 case BPF_PROG_TYPE_CGROUP_SOCK: 2546 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2547 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2548 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2549 case BPF_PROG_TYPE_SOCK_OPS: 2550 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2551 case BPF_PROG_TYPE_NETFILTER: 2552 return true; 2553 case BPF_PROG_TYPE_CGROUP_SKB: 2554 /* always unpriv */ 2555 case BPF_PROG_TYPE_SK_REUSEPORT: 2556 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2557 default: 2558 return false; 2559 } 2560 } 2561 2562 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2563 { 2564 switch (prog_type) { 2565 case BPF_PROG_TYPE_KPROBE: 2566 case BPF_PROG_TYPE_TRACEPOINT: 2567 case BPF_PROG_TYPE_PERF_EVENT: 2568 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2569 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2570 case BPF_PROG_TYPE_TRACING: 2571 case BPF_PROG_TYPE_LSM: 2572 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2573 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2574 return true; 2575 default: 2576 return false; 2577 } 2578 } 2579 2580 /* last field in 'union bpf_attr' used by this command */ 2581 #define BPF_PROG_LOAD_LAST_FIELD log_true_size 2582 2583 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2584 { 2585 enum bpf_prog_type type = attr->prog_type; 2586 struct bpf_prog *prog, *dst_prog = NULL; 2587 struct btf *attach_btf = NULL; 2588 int err; 2589 char license[128]; 2590 2591 if (CHECK_ATTR(BPF_PROG_LOAD)) 2592 return -EINVAL; 2593 2594 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2595 BPF_F_ANY_ALIGNMENT | 2596 BPF_F_TEST_STATE_FREQ | 2597 BPF_F_SLEEPABLE | 2598 BPF_F_TEST_RND_HI32 | 2599 BPF_F_XDP_HAS_FRAGS | 2600 BPF_F_XDP_DEV_BOUND_ONLY)) 2601 return -EINVAL; 2602 2603 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2604 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2605 !bpf_capable()) 2606 return -EPERM; 2607 2608 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2609 * creation for unprivileged users; other actions depend 2610 * on fd availability and access to bpffs, so are dependent on 2611 * object creation success. Even with unprivileged BPF disabled, 2612 * capability checks are still carried out for these 2613 * and other operations. 2614 */ 2615 if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) 2616 return -EPERM; 2617 2618 if (attr->insn_cnt == 0 || 2619 attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) 2620 return -E2BIG; 2621 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2622 type != BPF_PROG_TYPE_CGROUP_SKB && 2623 !bpf_capable()) 2624 return -EPERM; 2625 2626 if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) 2627 return -EPERM; 2628 if (is_perfmon_prog_type(type) && !perfmon_capable()) 2629 return -EPERM; 2630 2631 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2632 * or btf, we need to check which one it is 2633 */ 2634 if (attr->attach_prog_fd) { 2635 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2636 if (IS_ERR(dst_prog)) { 2637 dst_prog = NULL; 2638 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2639 if (IS_ERR(attach_btf)) 2640 return -EINVAL; 2641 if (!btf_is_kernel(attach_btf)) { 2642 /* attaching through specifying bpf_prog's BTF 2643 * objects directly might be supported eventually 2644 */ 2645 btf_put(attach_btf); 2646 return -ENOTSUPP; 2647 } 2648 } 2649 } else if (attr->attach_btf_id) { 2650 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2651 attach_btf = bpf_get_btf_vmlinux(); 2652 if (IS_ERR(attach_btf)) 2653 return PTR_ERR(attach_btf); 2654 if (!attach_btf) 2655 return -EINVAL; 2656 btf_get(attach_btf); 2657 } 2658 2659 bpf_prog_load_fixup_attach_type(attr); 2660 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2661 attach_btf, attr->attach_btf_id, 2662 dst_prog)) { 2663 if (dst_prog) 2664 bpf_prog_put(dst_prog); 2665 if (attach_btf) 2666 btf_put(attach_btf); 2667 return -EINVAL; 2668 } 2669 2670 /* plain bpf_prog allocation */ 2671 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2672 if (!prog) { 2673 if (dst_prog) 2674 bpf_prog_put(dst_prog); 2675 if (attach_btf) 2676 btf_put(attach_btf); 2677 return -ENOMEM; 2678 } 2679 2680 prog->expected_attach_type = attr->expected_attach_type; 2681 prog->aux->attach_btf = attach_btf; 2682 prog->aux->attach_btf_id = attr->attach_btf_id; 2683 prog->aux->dst_prog = dst_prog; 2684 prog->aux->dev_bound = !!attr->prog_ifindex; 2685 prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; 2686 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2687 2688 err = security_bpf_prog_alloc(prog->aux); 2689 if (err) 2690 goto free_prog; 2691 2692 prog->aux->user = get_current_user(); 2693 prog->len = attr->insn_cnt; 2694 2695 err = -EFAULT; 2696 if (copy_from_bpfptr(prog->insns, 2697 make_bpfptr(attr->insns, uattr.is_kernel), 2698 bpf_prog_insn_size(prog)) != 0) 2699 goto free_prog_sec; 2700 /* copy eBPF program license from user space */ 2701 if (strncpy_from_bpfptr(license, 2702 make_bpfptr(attr->license, uattr.is_kernel), 2703 sizeof(license) - 1) < 0) 2704 goto free_prog_sec; 2705 license[sizeof(license) - 1] = 0; 2706 2707 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2708 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2709 2710 prog->orig_prog = NULL; 2711 prog->jited = 0; 2712 2713 atomic64_set(&prog->aux->refcnt, 1); 2714 2715 if (bpf_prog_is_dev_bound(prog->aux)) { 2716 err = bpf_prog_dev_bound_init(prog, attr); 2717 if (err) 2718 goto free_prog_sec; 2719 } 2720 2721 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2722 bpf_prog_is_dev_bound(dst_prog->aux)) { 2723 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2724 if (err) 2725 goto free_prog_sec; 2726 } 2727 2728 /* find program type: socket_filter vs tracing_filter */ 2729 err = find_prog_type(type, prog); 2730 if (err < 0) 2731 goto free_prog_sec; 2732 2733 prog->aux->load_time = ktime_get_boottime_ns(); 2734 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2735 sizeof(attr->prog_name)); 2736 if (err < 0) 2737 goto free_prog_sec; 2738 2739 /* run eBPF verifier */ 2740 err = bpf_check(&prog, attr, uattr, uattr_size); 2741 if (err < 0) 2742 goto free_used_maps; 2743 2744 prog = bpf_prog_select_runtime(prog, &err); 2745 if (err < 0) 2746 goto free_used_maps; 2747 2748 err = bpf_prog_alloc_id(prog); 2749 if (err) 2750 goto free_used_maps; 2751 2752 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2753 * effectively publicly exposed. However, retrieving via 2754 * bpf_prog_get_fd_by_id() will take another reference, 2755 * therefore it cannot be gone underneath us. 2756 * 2757 * Only for the time /after/ successful bpf_prog_new_fd() 2758 * and before returning to userspace, we might just hold 2759 * one reference and any parallel close on that fd could 2760 * rip everything out. Hence, below notifications must 2761 * happen before bpf_prog_new_fd(). 2762 * 2763 * Also, any failure handling from this point onwards must 2764 * be using bpf_prog_put() given the program is exposed. 2765 */ 2766 bpf_prog_kallsyms_add(prog); 2767 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2768 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2769 2770 err = bpf_prog_new_fd(prog); 2771 if (err < 0) 2772 bpf_prog_put(prog); 2773 return err; 2774 2775 free_used_maps: 2776 /* In case we have subprogs, we need to wait for a grace 2777 * period before we can tear down JIT memory since symbols 2778 * are already exposed under kallsyms. 2779 */ 2780 __bpf_prog_put_noref(prog, prog->aux->func_cnt); 2781 return err; 2782 free_prog_sec: 2783 free_uid(prog->aux->user); 2784 security_bpf_prog_free(prog->aux); 2785 free_prog: 2786 if (prog->aux->attach_btf) 2787 btf_put(prog->aux->attach_btf); 2788 bpf_prog_free(prog); 2789 return err; 2790 } 2791 2792 #define BPF_OBJ_LAST_FIELD path_fd 2793 2794 static int bpf_obj_pin(const union bpf_attr *attr) 2795 { 2796 int path_fd; 2797 2798 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 2799 return -EINVAL; 2800 2801 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2802 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2803 return -EINVAL; 2804 2805 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2806 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 2807 u64_to_user_ptr(attr->pathname)); 2808 } 2809 2810 static int bpf_obj_get(const union bpf_attr *attr) 2811 { 2812 int path_fd; 2813 2814 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2815 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 2816 return -EINVAL; 2817 2818 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2819 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2820 return -EINVAL; 2821 2822 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2823 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 2824 attr->file_flags); 2825 } 2826 2827 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2828 const struct bpf_link_ops *ops, struct bpf_prog *prog) 2829 { 2830 atomic64_set(&link->refcnt, 1); 2831 link->type = type; 2832 link->id = 0; 2833 link->ops = ops; 2834 link->prog = prog; 2835 } 2836 2837 static void bpf_link_free_id(int id) 2838 { 2839 if (!id) 2840 return; 2841 2842 spin_lock_bh(&link_idr_lock); 2843 idr_remove(&link_idr, id); 2844 spin_unlock_bh(&link_idr_lock); 2845 } 2846 2847 /* Clean up bpf_link and corresponding anon_inode file and FD. After 2848 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 2849 * anon_inode's release() call. This helper marks bpf_link as 2850 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 2851 * is not decremented, it's the responsibility of a calling code that failed 2852 * to complete bpf_link initialization. 2853 * This helper eventually calls link's dealloc callback, but does not call 2854 * link's release callback. 2855 */ 2856 void bpf_link_cleanup(struct bpf_link_primer *primer) 2857 { 2858 primer->link->prog = NULL; 2859 bpf_link_free_id(primer->id); 2860 fput(primer->file); 2861 put_unused_fd(primer->fd); 2862 } 2863 2864 void bpf_link_inc(struct bpf_link *link) 2865 { 2866 atomic64_inc(&link->refcnt); 2867 } 2868 2869 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu) 2870 { 2871 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu); 2872 2873 /* free bpf_link and its containing memory */ 2874 link->ops->dealloc_deferred(link); 2875 } 2876 2877 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) 2878 { 2879 if (rcu_trace_implies_rcu_gp()) 2880 bpf_link_defer_dealloc_rcu_gp(rcu); 2881 else 2882 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp); 2883 } 2884 2885 /* bpf_link_free is guaranteed to be called from process context */ 2886 static void bpf_link_free(struct bpf_link *link) 2887 { 2888 bool sleepable = false; 2889 2890 bpf_link_free_id(link->id); 2891 if (link->prog) { 2892 sleepable = link->prog->aux->sleepable; 2893 /* detach BPF program, clean up used resources */ 2894 link->ops->release(link); 2895 bpf_prog_put(link->prog); 2896 } 2897 if (link->ops->dealloc_deferred) { 2898 /* schedule BPF link deallocation; if underlying BPF program 2899 * is sleepable, we need to first wait for RCU tasks trace 2900 * sync, then go through "classic" RCU grace period 2901 */ 2902 if (sleepable) 2903 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); 2904 else 2905 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 2906 } 2907 if (link->ops->dealloc) 2908 link->ops->dealloc(link); 2909 } 2910 2911 static void bpf_link_put_deferred(struct work_struct *work) 2912 { 2913 struct bpf_link *link = container_of(work, struct bpf_link, work); 2914 2915 bpf_link_free(link); 2916 } 2917 2918 /* bpf_link_put might be called from atomic context. It needs to be called 2919 * from sleepable context in order to acquire sleeping locks during the process. 2920 */ 2921 void bpf_link_put(struct bpf_link *link) 2922 { 2923 if (!atomic64_dec_and_test(&link->refcnt)) 2924 return; 2925 2926 INIT_WORK(&link->work, bpf_link_put_deferred); 2927 schedule_work(&link->work); 2928 } 2929 EXPORT_SYMBOL(bpf_link_put); 2930 2931 static void bpf_link_put_direct(struct bpf_link *link) 2932 { 2933 if (!atomic64_dec_and_test(&link->refcnt)) 2934 return; 2935 bpf_link_free(link); 2936 } 2937 2938 static int bpf_link_release(struct inode *inode, struct file *filp) 2939 { 2940 struct bpf_link *link = filp->private_data; 2941 2942 bpf_link_put_direct(link); 2943 return 0; 2944 } 2945 2946 #ifdef CONFIG_PROC_FS 2947 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 2948 #define BPF_MAP_TYPE(_id, _ops) 2949 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 2950 static const char *bpf_link_type_strs[] = { 2951 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 2952 #include <linux/bpf_types.h> 2953 }; 2954 #undef BPF_PROG_TYPE 2955 #undef BPF_MAP_TYPE 2956 #undef BPF_LINK_TYPE 2957 2958 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 2959 { 2960 const struct bpf_link *link = filp->private_data; 2961 const struct bpf_prog *prog = link->prog; 2962 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2963 2964 seq_printf(m, 2965 "link_type:\t%s\n" 2966 "link_id:\t%u\n", 2967 bpf_link_type_strs[link->type], 2968 link->id); 2969 if (prog) { 2970 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2971 seq_printf(m, 2972 "prog_tag:\t%s\n" 2973 "prog_id:\t%u\n", 2974 prog_tag, 2975 prog->aux->id); 2976 } 2977 if (link->ops->show_fdinfo) 2978 link->ops->show_fdinfo(link, m); 2979 } 2980 #endif 2981 2982 static const struct file_operations bpf_link_fops = { 2983 #ifdef CONFIG_PROC_FS 2984 .show_fdinfo = bpf_link_show_fdinfo, 2985 #endif 2986 .release = bpf_link_release, 2987 .read = bpf_dummy_read, 2988 .write = bpf_dummy_write, 2989 }; 2990 2991 static int bpf_link_alloc_id(struct bpf_link *link) 2992 { 2993 int id; 2994 2995 idr_preload(GFP_KERNEL); 2996 spin_lock_bh(&link_idr_lock); 2997 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 2998 spin_unlock_bh(&link_idr_lock); 2999 idr_preload_end(); 3000 3001 return id; 3002 } 3003 3004 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 3005 * reserving unused FD and allocating ID from link_idr. This is to be paired 3006 * with bpf_link_settle() to install FD and ID and expose bpf_link to 3007 * user-space, if bpf_link is successfully attached. If not, bpf_link and 3008 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 3009 * transient state is passed around in struct bpf_link_primer. 3010 * This is preferred way to create and initialize bpf_link, especially when 3011 * there are complicated and expensive operations in between creating bpf_link 3012 * itself and attaching it to BPF hook. By using bpf_link_prime() and 3013 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 3014 * expensive (and potentially failing) roll back operations in a rare case 3015 * that file, FD, or ID can't be allocated. 3016 */ 3017 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 3018 { 3019 struct file *file; 3020 int fd, id; 3021 3022 fd = get_unused_fd_flags(O_CLOEXEC); 3023 if (fd < 0) 3024 return fd; 3025 3026 3027 id = bpf_link_alloc_id(link); 3028 if (id < 0) { 3029 put_unused_fd(fd); 3030 return id; 3031 } 3032 3033 file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 3034 if (IS_ERR(file)) { 3035 bpf_link_free_id(id); 3036 put_unused_fd(fd); 3037 return PTR_ERR(file); 3038 } 3039 3040 primer->link = link; 3041 primer->file = file; 3042 primer->fd = fd; 3043 primer->id = id; 3044 return 0; 3045 } 3046 3047 int bpf_link_settle(struct bpf_link_primer *primer) 3048 { 3049 /* make bpf_link fetchable by ID */ 3050 spin_lock_bh(&link_idr_lock); 3051 primer->link->id = primer->id; 3052 spin_unlock_bh(&link_idr_lock); 3053 /* make bpf_link fetchable by FD */ 3054 fd_install(primer->fd, primer->file); 3055 /* pass through installed FD */ 3056 return primer->fd; 3057 } 3058 3059 int bpf_link_new_fd(struct bpf_link *link) 3060 { 3061 return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 3062 } 3063 3064 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3065 { 3066 struct fd f = fdget(ufd); 3067 struct bpf_link *link; 3068 3069 if (!f.file) 3070 return ERR_PTR(-EBADF); 3071 if (f.file->f_op != &bpf_link_fops) { 3072 fdput(f); 3073 return ERR_PTR(-EINVAL); 3074 } 3075 3076 link = f.file->private_data; 3077 bpf_link_inc(link); 3078 fdput(f); 3079 3080 return link; 3081 } 3082 EXPORT_SYMBOL(bpf_link_get_from_fd); 3083 3084 static void bpf_tracing_link_release(struct bpf_link *link) 3085 { 3086 struct bpf_tracing_link *tr_link = 3087 container_of(link, struct bpf_tracing_link, link.link); 3088 3089 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3090 tr_link->trampoline)); 3091 3092 bpf_trampoline_put(tr_link->trampoline); 3093 3094 /* tgt_prog is NULL if target is a kernel function */ 3095 if (tr_link->tgt_prog) 3096 bpf_prog_put(tr_link->tgt_prog); 3097 } 3098 3099 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3100 { 3101 struct bpf_tracing_link *tr_link = 3102 container_of(link, struct bpf_tracing_link, link.link); 3103 3104 kfree(tr_link); 3105 } 3106 3107 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3108 struct seq_file *seq) 3109 { 3110 struct bpf_tracing_link *tr_link = 3111 container_of(link, struct bpf_tracing_link, link.link); 3112 u32 target_btf_id, target_obj_id; 3113 3114 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3115 &target_obj_id, &target_btf_id); 3116 seq_printf(seq, 3117 "attach_type:\t%d\n" 3118 "target_obj_id:\t%u\n" 3119 "target_btf_id:\t%u\n", 3120 tr_link->attach_type, 3121 target_obj_id, 3122 target_btf_id); 3123 } 3124 3125 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3126 struct bpf_link_info *info) 3127 { 3128 struct bpf_tracing_link *tr_link = 3129 container_of(link, struct bpf_tracing_link, link.link); 3130 3131 info->tracing.attach_type = tr_link->attach_type; 3132 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3133 &info->tracing.target_obj_id, 3134 &info->tracing.target_btf_id); 3135 3136 return 0; 3137 } 3138 3139 static const struct bpf_link_ops bpf_tracing_link_lops = { 3140 .release = bpf_tracing_link_release, 3141 .dealloc = bpf_tracing_link_dealloc, 3142 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3143 .fill_link_info = bpf_tracing_link_fill_link_info, 3144 }; 3145 3146 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3147 int tgt_prog_fd, 3148 u32 btf_id, 3149 u64 bpf_cookie) 3150 { 3151 struct bpf_link_primer link_primer; 3152 struct bpf_prog *tgt_prog = NULL; 3153 struct bpf_trampoline *tr = NULL; 3154 struct bpf_tracing_link *link; 3155 u64 key = 0; 3156 int err; 3157 3158 switch (prog->type) { 3159 case BPF_PROG_TYPE_TRACING: 3160 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3161 prog->expected_attach_type != BPF_TRACE_FEXIT && 3162 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3163 err = -EINVAL; 3164 goto out_put_prog; 3165 } 3166 break; 3167 case BPF_PROG_TYPE_EXT: 3168 if (prog->expected_attach_type != 0) { 3169 err = -EINVAL; 3170 goto out_put_prog; 3171 } 3172 break; 3173 case BPF_PROG_TYPE_LSM: 3174 if (prog->expected_attach_type != BPF_LSM_MAC) { 3175 err = -EINVAL; 3176 goto out_put_prog; 3177 } 3178 break; 3179 default: 3180 err = -EINVAL; 3181 goto out_put_prog; 3182 } 3183 3184 if (!!tgt_prog_fd != !!btf_id) { 3185 err = -EINVAL; 3186 goto out_put_prog; 3187 } 3188 3189 if (tgt_prog_fd) { 3190 /* For now we only allow new targets for BPF_PROG_TYPE_EXT */ 3191 if (prog->type != BPF_PROG_TYPE_EXT) { 3192 err = -EINVAL; 3193 goto out_put_prog; 3194 } 3195 3196 tgt_prog = bpf_prog_get(tgt_prog_fd); 3197 if (IS_ERR(tgt_prog)) { 3198 err = PTR_ERR(tgt_prog); 3199 tgt_prog = NULL; 3200 goto out_put_prog; 3201 } 3202 3203 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3204 } 3205 3206 link = kzalloc(sizeof(*link), GFP_USER); 3207 if (!link) { 3208 err = -ENOMEM; 3209 goto out_put_prog; 3210 } 3211 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3212 &bpf_tracing_link_lops, prog); 3213 link->attach_type = prog->expected_attach_type; 3214 link->link.cookie = bpf_cookie; 3215 3216 mutex_lock(&prog->aux->dst_mutex); 3217 3218 /* There are a few possible cases here: 3219 * 3220 * - if prog->aux->dst_trampoline is set, the program was just loaded 3221 * and not yet attached to anything, so we can use the values stored 3222 * in prog->aux 3223 * 3224 * - if prog->aux->dst_trampoline is NULL, the program has already been 3225 * attached to a target and its initial target was cleared (below) 3226 * 3227 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3228 * target_btf_id using the link_create API. 3229 * 3230 * - if tgt_prog == NULL when this function was called using the old 3231 * raw_tracepoint_open API, and we need a target from prog->aux 3232 * 3233 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3234 * was detached and is going for re-attachment. 3235 * 3236 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf 3237 * are NULL, then program was already attached and user did not provide 3238 * tgt_prog_fd so we have no way to find out or create trampoline 3239 */ 3240 if (!prog->aux->dst_trampoline && !tgt_prog) { 3241 /* 3242 * Allow re-attach for TRACING and LSM programs. If it's 3243 * currently linked, bpf_trampoline_link_prog will fail. 3244 * EXT programs need to specify tgt_prog_fd, so they 3245 * re-attach in separate code path. 3246 */ 3247 if (prog->type != BPF_PROG_TYPE_TRACING && 3248 prog->type != BPF_PROG_TYPE_LSM) { 3249 err = -EINVAL; 3250 goto out_unlock; 3251 } 3252 /* We can allow re-attach only if we have valid attach_btf. */ 3253 if (!prog->aux->attach_btf) { 3254 err = -EINVAL; 3255 goto out_unlock; 3256 } 3257 btf_id = prog->aux->attach_btf_id; 3258 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3259 } 3260 3261 if (!prog->aux->dst_trampoline || 3262 (key && key != prog->aux->dst_trampoline->key)) { 3263 /* If there is no saved target, or the specified target is 3264 * different from the destination specified at load time, we 3265 * need a new trampoline and a check for compatibility 3266 */ 3267 struct bpf_attach_target_info tgt_info = {}; 3268 3269 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3270 &tgt_info); 3271 if (err) 3272 goto out_unlock; 3273 3274 if (tgt_info.tgt_mod) { 3275 module_put(prog->aux->mod); 3276 prog->aux->mod = tgt_info.tgt_mod; 3277 } 3278 3279 tr = bpf_trampoline_get(key, &tgt_info); 3280 if (!tr) { 3281 err = -ENOMEM; 3282 goto out_unlock; 3283 } 3284 } else { 3285 /* The caller didn't specify a target, or the target was the 3286 * same as the destination supplied during program load. This 3287 * means we can reuse the trampoline and reference from program 3288 * load time, and there is no need to allocate a new one. This 3289 * can only happen once for any program, as the saved values in 3290 * prog->aux are cleared below. 3291 */ 3292 tr = prog->aux->dst_trampoline; 3293 tgt_prog = prog->aux->dst_prog; 3294 } 3295 3296 err = bpf_link_prime(&link->link.link, &link_primer); 3297 if (err) 3298 goto out_unlock; 3299 3300 err = bpf_trampoline_link_prog(&link->link, tr); 3301 if (err) { 3302 bpf_link_cleanup(&link_primer); 3303 link = NULL; 3304 goto out_unlock; 3305 } 3306 3307 link->tgt_prog = tgt_prog; 3308 link->trampoline = tr; 3309 3310 /* Always clear the trampoline and target prog from prog->aux to make 3311 * sure the original attach destination is not kept alive after a 3312 * program is (re-)attached to another target. 3313 */ 3314 if (prog->aux->dst_prog && 3315 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3316 /* got extra prog ref from syscall, or attaching to different prog */ 3317 bpf_prog_put(prog->aux->dst_prog); 3318 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3319 /* we allocated a new trampoline, so free the old one */ 3320 bpf_trampoline_put(prog->aux->dst_trampoline); 3321 3322 prog->aux->dst_prog = NULL; 3323 prog->aux->dst_trampoline = NULL; 3324 mutex_unlock(&prog->aux->dst_mutex); 3325 3326 return bpf_link_settle(&link_primer); 3327 out_unlock: 3328 if (tr && tr != prog->aux->dst_trampoline) 3329 bpf_trampoline_put(tr); 3330 mutex_unlock(&prog->aux->dst_mutex); 3331 kfree(link); 3332 out_put_prog: 3333 if (tgt_prog_fd && tgt_prog) 3334 bpf_prog_put(tgt_prog); 3335 return err; 3336 } 3337 3338 struct bpf_raw_tp_link { 3339 struct bpf_link link; 3340 struct bpf_raw_event_map *btp; 3341 }; 3342 3343 static void bpf_raw_tp_link_release(struct bpf_link *link) 3344 { 3345 struct bpf_raw_tp_link *raw_tp = 3346 container_of(link, struct bpf_raw_tp_link, link); 3347 3348 bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); 3349 bpf_put_raw_tracepoint(raw_tp->btp); 3350 } 3351 3352 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3353 { 3354 struct bpf_raw_tp_link *raw_tp = 3355 container_of(link, struct bpf_raw_tp_link, link); 3356 3357 kfree(raw_tp); 3358 } 3359 3360 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3361 struct seq_file *seq) 3362 { 3363 struct bpf_raw_tp_link *raw_tp_link = 3364 container_of(link, struct bpf_raw_tp_link, link); 3365 3366 seq_printf(seq, 3367 "tp_name:\t%s\n", 3368 raw_tp_link->btp->tp->name); 3369 } 3370 3371 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen, 3372 u32 len) 3373 { 3374 if (ulen >= len + 1) { 3375 if (copy_to_user(ubuf, buf, len + 1)) 3376 return -EFAULT; 3377 } else { 3378 char zero = '\0'; 3379 3380 if (copy_to_user(ubuf, buf, ulen - 1)) 3381 return -EFAULT; 3382 if (put_user(zero, ubuf + ulen - 1)) 3383 return -EFAULT; 3384 return -ENOSPC; 3385 } 3386 3387 return 0; 3388 } 3389 3390 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3391 struct bpf_link_info *info) 3392 { 3393 struct bpf_raw_tp_link *raw_tp_link = 3394 container_of(link, struct bpf_raw_tp_link, link); 3395 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3396 const char *tp_name = raw_tp_link->btp->tp->name; 3397 u32 ulen = info->raw_tracepoint.tp_name_len; 3398 size_t tp_len = strlen(tp_name); 3399 3400 if (!ulen ^ !ubuf) 3401 return -EINVAL; 3402 3403 info->raw_tracepoint.tp_name_len = tp_len + 1; 3404 3405 if (!ubuf) 3406 return 0; 3407 3408 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len); 3409 } 3410 3411 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3412 .release = bpf_raw_tp_link_release, 3413 .dealloc_deferred = bpf_raw_tp_link_dealloc, 3414 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3415 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3416 }; 3417 3418 #ifdef CONFIG_PERF_EVENTS 3419 struct bpf_perf_link { 3420 struct bpf_link link; 3421 struct file *perf_file; 3422 }; 3423 3424 static void bpf_perf_link_release(struct bpf_link *link) 3425 { 3426 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3427 struct perf_event *event = perf_link->perf_file->private_data; 3428 3429 perf_event_free_bpf_prog(event); 3430 fput(perf_link->perf_file); 3431 } 3432 3433 static void bpf_perf_link_dealloc(struct bpf_link *link) 3434 { 3435 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3436 3437 kfree(perf_link); 3438 } 3439 3440 static int bpf_perf_link_fill_common(const struct perf_event *event, 3441 char __user *uname, u32 ulen, 3442 u64 *probe_offset, u64 *probe_addr, 3443 u32 *fd_type) 3444 { 3445 const char *buf; 3446 u32 prog_id; 3447 size_t len; 3448 int err; 3449 3450 if (!ulen ^ !uname) 3451 return -EINVAL; 3452 3453 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf, 3454 probe_offset, probe_addr); 3455 if (err) 3456 return err; 3457 if (!uname) 3458 return 0; 3459 if (buf) { 3460 len = strlen(buf); 3461 err = bpf_copy_to_user(uname, buf, ulen, len); 3462 if (err) 3463 return err; 3464 } else { 3465 char zero = '\0'; 3466 3467 if (put_user(zero, uname)) 3468 return -EFAULT; 3469 } 3470 return 0; 3471 } 3472 3473 #ifdef CONFIG_KPROBE_EVENTS 3474 static int bpf_perf_link_fill_kprobe(const struct perf_event *event, 3475 struct bpf_link_info *info) 3476 { 3477 char __user *uname; 3478 u64 addr, offset; 3479 u32 ulen, type; 3480 int err; 3481 3482 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3483 ulen = info->perf_event.kprobe.name_len; 3484 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr, 3485 &type); 3486 if (err) 3487 return err; 3488 if (type == BPF_FD_TYPE_KRETPROBE) 3489 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3490 else 3491 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3492 3493 info->perf_event.kprobe.offset = offset; 3494 if (!kallsyms_show_value(current_cred())) 3495 addr = 0; 3496 info->perf_event.kprobe.addr = addr; 3497 return 0; 3498 } 3499 #endif 3500 3501 #ifdef CONFIG_UPROBE_EVENTS 3502 static int bpf_perf_link_fill_uprobe(const struct perf_event *event, 3503 struct bpf_link_info *info) 3504 { 3505 char __user *uname; 3506 u64 addr, offset; 3507 u32 ulen, type; 3508 int err; 3509 3510 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3511 ulen = info->perf_event.uprobe.name_len; 3512 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr, 3513 &type); 3514 if (err) 3515 return err; 3516 3517 if (type == BPF_FD_TYPE_URETPROBE) 3518 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3519 else 3520 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3521 info->perf_event.uprobe.offset = offset; 3522 return 0; 3523 } 3524 #endif 3525 3526 static int bpf_perf_link_fill_probe(const struct perf_event *event, 3527 struct bpf_link_info *info) 3528 { 3529 #ifdef CONFIG_KPROBE_EVENTS 3530 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3531 return bpf_perf_link_fill_kprobe(event, info); 3532 #endif 3533 #ifdef CONFIG_UPROBE_EVENTS 3534 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3535 return bpf_perf_link_fill_uprobe(event, info); 3536 #endif 3537 return -EOPNOTSUPP; 3538 } 3539 3540 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event, 3541 struct bpf_link_info *info) 3542 { 3543 char __user *uname; 3544 u32 ulen; 3545 3546 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3547 ulen = info->perf_event.tracepoint.name_len; 3548 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3549 return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL); 3550 } 3551 3552 static int bpf_perf_link_fill_perf_event(const struct perf_event *event, 3553 struct bpf_link_info *info) 3554 { 3555 info->perf_event.event.type = event->attr.type; 3556 info->perf_event.event.config = event->attr.config; 3557 info->perf_event.type = BPF_PERF_EVENT_EVENT; 3558 return 0; 3559 } 3560 3561 static int bpf_perf_link_fill_link_info(const struct bpf_link *link, 3562 struct bpf_link_info *info) 3563 { 3564 struct bpf_perf_link *perf_link; 3565 const struct perf_event *event; 3566 3567 perf_link = container_of(link, struct bpf_perf_link, link); 3568 event = perf_get_event(perf_link->perf_file); 3569 if (IS_ERR(event)) 3570 return PTR_ERR(event); 3571 3572 switch (event->prog->type) { 3573 case BPF_PROG_TYPE_PERF_EVENT: 3574 return bpf_perf_link_fill_perf_event(event, info); 3575 case BPF_PROG_TYPE_TRACEPOINT: 3576 return bpf_perf_link_fill_tracepoint(event, info); 3577 case BPF_PROG_TYPE_KPROBE: 3578 return bpf_perf_link_fill_probe(event, info); 3579 default: 3580 return -EOPNOTSUPP; 3581 } 3582 } 3583 3584 static const struct bpf_link_ops bpf_perf_link_lops = { 3585 .release = bpf_perf_link_release, 3586 .dealloc = bpf_perf_link_dealloc, 3587 .fill_link_info = bpf_perf_link_fill_link_info, 3588 }; 3589 3590 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3591 { 3592 struct bpf_link_primer link_primer; 3593 struct bpf_perf_link *link; 3594 struct perf_event *event; 3595 struct file *perf_file; 3596 int err; 3597 3598 if (attr->link_create.flags) 3599 return -EINVAL; 3600 3601 perf_file = perf_event_get(attr->link_create.target_fd); 3602 if (IS_ERR(perf_file)) 3603 return PTR_ERR(perf_file); 3604 3605 link = kzalloc(sizeof(*link), GFP_USER); 3606 if (!link) { 3607 err = -ENOMEM; 3608 goto out_put_file; 3609 } 3610 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3611 link->perf_file = perf_file; 3612 3613 err = bpf_link_prime(&link->link, &link_primer); 3614 if (err) { 3615 kfree(link); 3616 goto out_put_file; 3617 } 3618 3619 event = perf_file->private_data; 3620 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3621 if (err) { 3622 bpf_link_cleanup(&link_primer); 3623 goto out_put_file; 3624 } 3625 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3626 bpf_prog_inc(prog); 3627 3628 return bpf_link_settle(&link_primer); 3629 3630 out_put_file: 3631 fput(perf_file); 3632 return err; 3633 } 3634 #else 3635 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3636 { 3637 return -EOPNOTSUPP; 3638 } 3639 #endif /* CONFIG_PERF_EVENTS */ 3640 3641 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3642 const char __user *user_tp_name) 3643 { 3644 struct bpf_link_primer link_primer; 3645 struct bpf_raw_tp_link *link; 3646 struct bpf_raw_event_map *btp; 3647 const char *tp_name; 3648 char buf[128]; 3649 int err; 3650 3651 switch (prog->type) { 3652 case BPF_PROG_TYPE_TRACING: 3653 case BPF_PROG_TYPE_EXT: 3654 case BPF_PROG_TYPE_LSM: 3655 if (user_tp_name) 3656 /* The attach point for this category of programs 3657 * should be specified via btf_id during program load. 3658 */ 3659 return -EINVAL; 3660 if (prog->type == BPF_PROG_TYPE_TRACING && 3661 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3662 tp_name = prog->aux->attach_func_name; 3663 break; 3664 } 3665 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3666 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3667 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3668 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3669 return -EFAULT; 3670 buf[sizeof(buf) - 1] = 0; 3671 tp_name = buf; 3672 break; 3673 default: 3674 return -EINVAL; 3675 } 3676 3677 btp = bpf_get_raw_tracepoint(tp_name); 3678 if (!btp) 3679 return -ENOENT; 3680 3681 link = kzalloc(sizeof(*link), GFP_USER); 3682 if (!link) { 3683 err = -ENOMEM; 3684 goto out_put_btp; 3685 } 3686 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3687 &bpf_raw_tp_link_lops, prog); 3688 link->btp = btp; 3689 3690 err = bpf_link_prime(&link->link, &link_primer); 3691 if (err) { 3692 kfree(link); 3693 goto out_put_btp; 3694 } 3695 3696 err = bpf_probe_register(link->btp, prog); 3697 if (err) { 3698 bpf_link_cleanup(&link_primer); 3699 goto out_put_btp; 3700 } 3701 3702 return bpf_link_settle(&link_primer); 3703 3704 out_put_btp: 3705 bpf_put_raw_tracepoint(btp); 3706 return err; 3707 } 3708 3709 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd 3710 3711 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3712 { 3713 struct bpf_prog *prog; 3714 int fd; 3715 3716 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3717 return -EINVAL; 3718 3719 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3720 if (IS_ERR(prog)) 3721 return PTR_ERR(prog); 3722 3723 fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); 3724 if (fd < 0) 3725 bpf_prog_put(prog); 3726 return fd; 3727 } 3728 3729 static enum bpf_prog_type 3730 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3731 { 3732 switch (attach_type) { 3733 case BPF_CGROUP_INET_INGRESS: 3734 case BPF_CGROUP_INET_EGRESS: 3735 return BPF_PROG_TYPE_CGROUP_SKB; 3736 case BPF_CGROUP_INET_SOCK_CREATE: 3737 case BPF_CGROUP_INET_SOCK_RELEASE: 3738 case BPF_CGROUP_INET4_POST_BIND: 3739 case BPF_CGROUP_INET6_POST_BIND: 3740 return BPF_PROG_TYPE_CGROUP_SOCK; 3741 case BPF_CGROUP_INET4_BIND: 3742 case BPF_CGROUP_INET6_BIND: 3743 case BPF_CGROUP_INET4_CONNECT: 3744 case BPF_CGROUP_INET6_CONNECT: 3745 case BPF_CGROUP_INET4_GETPEERNAME: 3746 case BPF_CGROUP_INET6_GETPEERNAME: 3747 case BPF_CGROUP_INET4_GETSOCKNAME: 3748 case BPF_CGROUP_INET6_GETSOCKNAME: 3749 case BPF_CGROUP_UDP4_SENDMSG: 3750 case BPF_CGROUP_UDP6_SENDMSG: 3751 case BPF_CGROUP_UDP4_RECVMSG: 3752 case BPF_CGROUP_UDP6_RECVMSG: 3753 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 3754 case BPF_CGROUP_SOCK_OPS: 3755 return BPF_PROG_TYPE_SOCK_OPS; 3756 case BPF_CGROUP_DEVICE: 3757 return BPF_PROG_TYPE_CGROUP_DEVICE; 3758 case BPF_SK_MSG_VERDICT: 3759 return BPF_PROG_TYPE_SK_MSG; 3760 case BPF_SK_SKB_STREAM_PARSER: 3761 case BPF_SK_SKB_STREAM_VERDICT: 3762 case BPF_SK_SKB_VERDICT: 3763 return BPF_PROG_TYPE_SK_SKB; 3764 case BPF_LIRC_MODE2: 3765 return BPF_PROG_TYPE_LIRC_MODE2; 3766 case BPF_FLOW_DISSECTOR: 3767 return BPF_PROG_TYPE_FLOW_DISSECTOR; 3768 case BPF_CGROUP_SYSCTL: 3769 return BPF_PROG_TYPE_CGROUP_SYSCTL; 3770 case BPF_CGROUP_GETSOCKOPT: 3771 case BPF_CGROUP_SETSOCKOPT: 3772 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 3773 case BPF_TRACE_ITER: 3774 case BPF_TRACE_RAW_TP: 3775 case BPF_TRACE_FENTRY: 3776 case BPF_TRACE_FEXIT: 3777 case BPF_MODIFY_RETURN: 3778 return BPF_PROG_TYPE_TRACING; 3779 case BPF_LSM_MAC: 3780 return BPF_PROG_TYPE_LSM; 3781 case BPF_SK_LOOKUP: 3782 return BPF_PROG_TYPE_SK_LOOKUP; 3783 case BPF_XDP: 3784 return BPF_PROG_TYPE_XDP; 3785 case BPF_LSM_CGROUP: 3786 return BPF_PROG_TYPE_LSM; 3787 case BPF_TCX_INGRESS: 3788 case BPF_TCX_EGRESS: 3789 return BPF_PROG_TYPE_SCHED_CLS; 3790 default: 3791 return BPF_PROG_TYPE_UNSPEC; 3792 } 3793 } 3794 3795 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 3796 enum bpf_attach_type attach_type) 3797 { 3798 enum bpf_prog_type ptype; 3799 3800 switch (prog->type) { 3801 case BPF_PROG_TYPE_CGROUP_SOCK: 3802 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3803 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3804 case BPF_PROG_TYPE_SK_LOOKUP: 3805 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 3806 case BPF_PROG_TYPE_CGROUP_SKB: 3807 if (!capable(CAP_NET_ADMIN)) 3808 /* cg-skb progs can be loaded by unpriv user. 3809 * check permissions at attach time. 3810 */ 3811 return -EPERM; 3812 3813 ptype = attach_type_to_prog_type(attach_type); 3814 if (prog->type != ptype) 3815 return -EINVAL; 3816 3817 return prog->enforce_expected_attach_type && 3818 prog->expected_attach_type != attach_type ? 3819 -EINVAL : 0; 3820 case BPF_PROG_TYPE_EXT: 3821 return 0; 3822 case BPF_PROG_TYPE_NETFILTER: 3823 if (attach_type != BPF_NETFILTER) 3824 return -EINVAL; 3825 return 0; 3826 case BPF_PROG_TYPE_PERF_EVENT: 3827 case BPF_PROG_TYPE_TRACEPOINT: 3828 if (attach_type != BPF_PERF_EVENT) 3829 return -EINVAL; 3830 return 0; 3831 case BPF_PROG_TYPE_KPROBE: 3832 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 3833 attach_type != BPF_TRACE_KPROBE_MULTI) 3834 return -EINVAL; 3835 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && 3836 attach_type != BPF_TRACE_UPROBE_MULTI) 3837 return -EINVAL; 3838 if (attach_type != BPF_PERF_EVENT && 3839 attach_type != BPF_TRACE_KPROBE_MULTI && 3840 attach_type != BPF_TRACE_UPROBE_MULTI) 3841 return -EINVAL; 3842 return 0; 3843 case BPF_PROG_TYPE_SCHED_CLS: 3844 if (attach_type != BPF_TCX_INGRESS && 3845 attach_type != BPF_TCX_EGRESS) 3846 return -EINVAL; 3847 return 0; 3848 default: 3849 ptype = attach_type_to_prog_type(attach_type); 3850 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) 3851 return -EINVAL; 3852 return 0; 3853 } 3854 } 3855 3856 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision 3857 3858 #define BPF_F_ATTACH_MASK_BASE \ 3859 (BPF_F_ALLOW_OVERRIDE | \ 3860 BPF_F_ALLOW_MULTI | \ 3861 BPF_F_REPLACE) 3862 3863 #define BPF_F_ATTACH_MASK_MPROG \ 3864 (BPF_F_REPLACE | \ 3865 BPF_F_BEFORE | \ 3866 BPF_F_AFTER | \ 3867 BPF_F_ID | \ 3868 BPF_F_LINK) 3869 3870 static int bpf_prog_attach(const union bpf_attr *attr) 3871 { 3872 enum bpf_prog_type ptype; 3873 struct bpf_prog *prog; 3874 int ret; 3875 3876 if (CHECK_ATTR(BPF_PROG_ATTACH)) 3877 return -EINVAL; 3878 3879 ptype = attach_type_to_prog_type(attr->attach_type); 3880 if (ptype == BPF_PROG_TYPE_UNSPEC) 3881 return -EINVAL; 3882 if (bpf_mprog_supported(ptype)) { 3883 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 3884 return -EINVAL; 3885 } else { 3886 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) 3887 return -EINVAL; 3888 if (attr->relative_fd || 3889 attr->expected_revision) 3890 return -EINVAL; 3891 } 3892 3893 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 3894 if (IS_ERR(prog)) 3895 return PTR_ERR(prog); 3896 3897 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 3898 bpf_prog_put(prog); 3899 return -EINVAL; 3900 } 3901 3902 switch (ptype) { 3903 case BPF_PROG_TYPE_SK_SKB: 3904 case BPF_PROG_TYPE_SK_MSG: 3905 ret = sock_map_get_from_fd(attr, prog); 3906 break; 3907 case BPF_PROG_TYPE_LIRC_MODE2: 3908 ret = lirc_prog_attach(attr, prog); 3909 break; 3910 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3911 ret = netns_bpf_prog_attach(attr, prog); 3912 break; 3913 case BPF_PROG_TYPE_CGROUP_DEVICE: 3914 case BPF_PROG_TYPE_CGROUP_SKB: 3915 case BPF_PROG_TYPE_CGROUP_SOCK: 3916 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3917 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3918 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3919 case BPF_PROG_TYPE_SOCK_OPS: 3920 case BPF_PROG_TYPE_LSM: 3921 if (ptype == BPF_PROG_TYPE_LSM && 3922 prog->expected_attach_type != BPF_LSM_CGROUP) 3923 ret = -EINVAL; 3924 else 3925 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 3926 break; 3927 case BPF_PROG_TYPE_SCHED_CLS: 3928 ret = tcx_prog_attach(attr, prog); 3929 break; 3930 default: 3931 ret = -EINVAL; 3932 } 3933 3934 if (ret) 3935 bpf_prog_put(prog); 3936 return ret; 3937 } 3938 3939 #define BPF_PROG_DETACH_LAST_FIELD expected_revision 3940 3941 static int bpf_prog_detach(const union bpf_attr *attr) 3942 { 3943 struct bpf_prog *prog = NULL; 3944 enum bpf_prog_type ptype; 3945 int ret; 3946 3947 if (CHECK_ATTR(BPF_PROG_DETACH)) 3948 return -EINVAL; 3949 3950 ptype = attach_type_to_prog_type(attr->attach_type); 3951 if (bpf_mprog_supported(ptype)) { 3952 if (ptype == BPF_PROG_TYPE_UNSPEC) 3953 return -EINVAL; 3954 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 3955 return -EINVAL; 3956 if (attr->attach_bpf_fd) { 3957 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 3958 if (IS_ERR(prog)) 3959 return PTR_ERR(prog); 3960 } 3961 } else if (attr->attach_flags || 3962 attr->relative_fd || 3963 attr->expected_revision) { 3964 return -EINVAL; 3965 } 3966 3967 switch (ptype) { 3968 case BPF_PROG_TYPE_SK_MSG: 3969 case BPF_PROG_TYPE_SK_SKB: 3970 ret = sock_map_prog_detach(attr, ptype); 3971 break; 3972 case BPF_PROG_TYPE_LIRC_MODE2: 3973 ret = lirc_prog_detach(attr); 3974 break; 3975 case BPF_PROG_TYPE_FLOW_DISSECTOR: 3976 ret = netns_bpf_prog_detach(attr, ptype); 3977 break; 3978 case BPF_PROG_TYPE_CGROUP_DEVICE: 3979 case BPF_PROG_TYPE_CGROUP_SKB: 3980 case BPF_PROG_TYPE_CGROUP_SOCK: 3981 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 3982 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 3983 case BPF_PROG_TYPE_CGROUP_SYSCTL: 3984 case BPF_PROG_TYPE_SOCK_OPS: 3985 case BPF_PROG_TYPE_LSM: 3986 ret = cgroup_bpf_prog_detach(attr, ptype); 3987 break; 3988 case BPF_PROG_TYPE_SCHED_CLS: 3989 ret = tcx_prog_detach(attr, prog); 3990 break; 3991 default: 3992 ret = -EINVAL; 3993 } 3994 3995 if (prog) 3996 bpf_prog_put(prog); 3997 return ret; 3998 } 3999 4000 #define BPF_PROG_QUERY_LAST_FIELD query.revision 4001 4002 static int bpf_prog_query(const union bpf_attr *attr, 4003 union bpf_attr __user *uattr) 4004 { 4005 if (!capable(CAP_NET_ADMIN)) 4006 return -EPERM; 4007 if (CHECK_ATTR(BPF_PROG_QUERY)) 4008 return -EINVAL; 4009 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 4010 return -EINVAL; 4011 4012 switch (attr->query.attach_type) { 4013 case BPF_CGROUP_INET_INGRESS: 4014 case BPF_CGROUP_INET_EGRESS: 4015 case BPF_CGROUP_INET_SOCK_CREATE: 4016 case BPF_CGROUP_INET_SOCK_RELEASE: 4017 case BPF_CGROUP_INET4_BIND: 4018 case BPF_CGROUP_INET6_BIND: 4019 case BPF_CGROUP_INET4_POST_BIND: 4020 case BPF_CGROUP_INET6_POST_BIND: 4021 case BPF_CGROUP_INET4_CONNECT: 4022 case BPF_CGROUP_INET6_CONNECT: 4023 case BPF_CGROUP_INET4_GETPEERNAME: 4024 case BPF_CGROUP_INET6_GETPEERNAME: 4025 case BPF_CGROUP_INET4_GETSOCKNAME: 4026 case BPF_CGROUP_INET6_GETSOCKNAME: 4027 case BPF_CGROUP_UDP4_SENDMSG: 4028 case BPF_CGROUP_UDP6_SENDMSG: 4029 case BPF_CGROUP_UDP4_RECVMSG: 4030 case BPF_CGROUP_UDP6_RECVMSG: 4031 case BPF_CGROUP_SOCK_OPS: 4032 case BPF_CGROUP_DEVICE: 4033 case BPF_CGROUP_SYSCTL: 4034 case BPF_CGROUP_GETSOCKOPT: 4035 case BPF_CGROUP_SETSOCKOPT: 4036 case BPF_LSM_CGROUP: 4037 return cgroup_bpf_prog_query(attr, uattr); 4038 case BPF_LIRC_MODE2: 4039 return lirc_prog_query(attr, uattr); 4040 case BPF_FLOW_DISSECTOR: 4041 case BPF_SK_LOOKUP: 4042 return netns_bpf_prog_query(attr, uattr); 4043 case BPF_SK_SKB_STREAM_PARSER: 4044 case BPF_SK_SKB_STREAM_VERDICT: 4045 case BPF_SK_MSG_VERDICT: 4046 case BPF_SK_SKB_VERDICT: 4047 return sock_map_bpf_prog_query(attr, uattr); 4048 case BPF_TCX_INGRESS: 4049 case BPF_TCX_EGRESS: 4050 return tcx_prog_query(attr, uattr); 4051 default: 4052 return -EINVAL; 4053 } 4054 } 4055 4056 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 4057 4058 static int bpf_prog_test_run(const union bpf_attr *attr, 4059 union bpf_attr __user *uattr) 4060 { 4061 struct bpf_prog *prog; 4062 int ret = -ENOTSUPP; 4063 4064 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 4065 return -EINVAL; 4066 4067 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 4068 (!attr->test.ctx_size_in && attr->test.ctx_in)) 4069 return -EINVAL; 4070 4071 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 4072 (!attr->test.ctx_size_out && attr->test.ctx_out)) 4073 return -EINVAL; 4074 4075 prog = bpf_prog_get(attr->test.prog_fd); 4076 if (IS_ERR(prog)) 4077 return PTR_ERR(prog); 4078 4079 if (prog->aux->ops->test_run) 4080 ret = prog->aux->ops->test_run(prog, attr, uattr); 4081 4082 bpf_prog_put(prog); 4083 return ret; 4084 } 4085 4086 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 4087 4088 static int bpf_obj_get_next_id(const union bpf_attr *attr, 4089 union bpf_attr __user *uattr, 4090 struct idr *idr, 4091 spinlock_t *lock) 4092 { 4093 u32 next_id = attr->start_id; 4094 int err = 0; 4095 4096 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 4097 return -EINVAL; 4098 4099 if (!capable(CAP_SYS_ADMIN)) 4100 return -EPERM; 4101 4102 next_id++; 4103 spin_lock_bh(lock); 4104 if (!idr_get_next(idr, &next_id)) 4105 err = -ENOENT; 4106 spin_unlock_bh(lock); 4107 4108 if (!err) 4109 err = put_user(next_id, &uattr->next_id); 4110 4111 return err; 4112 } 4113 4114 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 4115 { 4116 struct bpf_map *map; 4117 4118 spin_lock_bh(&map_idr_lock); 4119 again: 4120 map = idr_get_next(&map_idr, id); 4121 if (map) { 4122 map = __bpf_map_inc_not_zero(map, false); 4123 if (IS_ERR(map)) { 4124 (*id)++; 4125 goto again; 4126 } 4127 } 4128 spin_unlock_bh(&map_idr_lock); 4129 4130 return map; 4131 } 4132 4133 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 4134 { 4135 struct bpf_prog *prog; 4136 4137 spin_lock_bh(&prog_idr_lock); 4138 again: 4139 prog = idr_get_next(&prog_idr, id); 4140 if (prog) { 4141 prog = bpf_prog_inc_not_zero(prog); 4142 if (IS_ERR(prog)) { 4143 (*id)++; 4144 goto again; 4145 } 4146 } 4147 spin_unlock_bh(&prog_idr_lock); 4148 4149 return prog; 4150 } 4151 4152 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 4153 4154 struct bpf_prog *bpf_prog_by_id(u32 id) 4155 { 4156 struct bpf_prog *prog; 4157 4158 if (!id) 4159 return ERR_PTR(-ENOENT); 4160 4161 spin_lock_bh(&prog_idr_lock); 4162 prog = idr_find(&prog_idr, id); 4163 if (prog) 4164 prog = bpf_prog_inc_not_zero(prog); 4165 else 4166 prog = ERR_PTR(-ENOENT); 4167 spin_unlock_bh(&prog_idr_lock); 4168 return prog; 4169 } 4170 4171 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 4172 { 4173 struct bpf_prog *prog; 4174 u32 id = attr->prog_id; 4175 int fd; 4176 4177 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 4178 return -EINVAL; 4179 4180 if (!capable(CAP_SYS_ADMIN)) 4181 return -EPERM; 4182 4183 prog = bpf_prog_by_id(id); 4184 if (IS_ERR(prog)) 4185 return PTR_ERR(prog); 4186 4187 fd = bpf_prog_new_fd(prog); 4188 if (fd < 0) 4189 bpf_prog_put(prog); 4190 4191 return fd; 4192 } 4193 4194 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 4195 4196 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 4197 { 4198 struct bpf_map *map; 4199 u32 id = attr->map_id; 4200 int f_flags; 4201 int fd; 4202 4203 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 4204 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 4205 return -EINVAL; 4206 4207 if (!capable(CAP_SYS_ADMIN)) 4208 return -EPERM; 4209 4210 f_flags = bpf_get_file_flag(attr->open_flags); 4211 if (f_flags < 0) 4212 return f_flags; 4213 4214 spin_lock_bh(&map_idr_lock); 4215 map = idr_find(&map_idr, id); 4216 if (map) 4217 map = __bpf_map_inc_not_zero(map, true); 4218 else 4219 map = ERR_PTR(-ENOENT); 4220 spin_unlock_bh(&map_idr_lock); 4221 4222 if (IS_ERR(map)) 4223 return PTR_ERR(map); 4224 4225 fd = bpf_map_new_fd(map, f_flags); 4226 if (fd < 0) 4227 bpf_map_put_with_uref(map); 4228 4229 return fd; 4230 } 4231 4232 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 4233 unsigned long addr, u32 *off, 4234 u32 *type) 4235 { 4236 const struct bpf_map *map; 4237 int i; 4238 4239 mutex_lock(&prog->aux->used_maps_mutex); 4240 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 4241 map = prog->aux->used_maps[i]; 4242 if (map == (void *)addr) { 4243 *type = BPF_PSEUDO_MAP_FD; 4244 goto out; 4245 } 4246 if (!map->ops->map_direct_value_meta) 4247 continue; 4248 if (!map->ops->map_direct_value_meta(map, addr, off)) { 4249 *type = BPF_PSEUDO_MAP_VALUE; 4250 goto out; 4251 } 4252 } 4253 map = NULL; 4254 4255 out: 4256 mutex_unlock(&prog->aux->used_maps_mutex); 4257 return map; 4258 } 4259 4260 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 4261 const struct cred *f_cred) 4262 { 4263 const struct bpf_map *map; 4264 struct bpf_insn *insns; 4265 u32 off, type; 4266 u64 imm; 4267 u8 code; 4268 int i; 4269 4270 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 4271 GFP_USER); 4272 if (!insns) 4273 return insns; 4274 4275 for (i = 0; i < prog->len; i++) { 4276 code = insns[i].code; 4277 4278 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 4279 insns[i].code = BPF_JMP | BPF_CALL; 4280 insns[i].imm = BPF_FUNC_tail_call; 4281 /* fall-through */ 4282 } 4283 if (code == (BPF_JMP | BPF_CALL) || 4284 code == (BPF_JMP | BPF_CALL_ARGS)) { 4285 if (code == (BPF_JMP | BPF_CALL_ARGS)) 4286 insns[i].code = BPF_JMP | BPF_CALL; 4287 if (!bpf_dump_raw_ok(f_cred)) 4288 insns[i].imm = 0; 4289 continue; 4290 } 4291 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 4292 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 4293 continue; 4294 } 4295 4296 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 4297 continue; 4298 4299 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 4300 map = bpf_map_from_imm(prog, imm, &off, &type); 4301 if (map) { 4302 insns[i].src_reg = type; 4303 insns[i].imm = map->id; 4304 insns[i + 1].imm = off; 4305 continue; 4306 } 4307 } 4308 4309 return insns; 4310 } 4311 4312 static int set_info_rec_size(struct bpf_prog_info *info) 4313 { 4314 /* 4315 * Ensure info.*_rec_size is the same as kernel expected size 4316 * 4317 * or 4318 * 4319 * Only allow zero *_rec_size if both _rec_size and _cnt are 4320 * zero. In this case, the kernel will set the expected 4321 * _rec_size back to the info. 4322 */ 4323 4324 if ((info->nr_func_info || info->func_info_rec_size) && 4325 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4326 return -EINVAL; 4327 4328 if ((info->nr_line_info || info->line_info_rec_size) && 4329 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4330 return -EINVAL; 4331 4332 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4333 info->jited_line_info_rec_size != sizeof(__u64)) 4334 return -EINVAL; 4335 4336 info->func_info_rec_size = sizeof(struct bpf_func_info); 4337 info->line_info_rec_size = sizeof(struct bpf_line_info); 4338 info->jited_line_info_rec_size = sizeof(__u64); 4339 4340 return 0; 4341 } 4342 4343 static int bpf_prog_get_info_by_fd(struct file *file, 4344 struct bpf_prog *prog, 4345 const union bpf_attr *attr, 4346 union bpf_attr __user *uattr) 4347 { 4348 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4349 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4350 struct bpf_prog_info info; 4351 u32 info_len = attr->info.info_len; 4352 struct bpf_prog_kstats stats; 4353 char __user *uinsns; 4354 u32 ulen; 4355 int err; 4356 4357 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4358 if (err) 4359 return err; 4360 info_len = min_t(u32, sizeof(info), info_len); 4361 4362 memset(&info, 0, sizeof(info)); 4363 if (copy_from_user(&info, uinfo, info_len)) 4364 return -EFAULT; 4365 4366 info.type = prog->type; 4367 info.id = prog->aux->id; 4368 info.load_time = prog->aux->load_time; 4369 info.created_by_uid = from_kuid_munged(current_user_ns(), 4370 prog->aux->user->uid); 4371 info.gpl_compatible = prog->gpl_compatible; 4372 4373 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4374 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4375 4376 mutex_lock(&prog->aux->used_maps_mutex); 4377 ulen = info.nr_map_ids; 4378 info.nr_map_ids = prog->aux->used_map_cnt; 4379 ulen = min_t(u32, info.nr_map_ids, ulen); 4380 if (ulen) { 4381 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4382 u32 i; 4383 4384 for (i = 0; i < ulen; i++) 4385 if (put_user(prog->aux->used_maps[i]->id, 4386 &user_map_ids[i])) { 4387 mutex_unlock(&prog->aux->used_maps_mutex); 4388 return -EFAULT; 4389 } 4390 } 4391 mutex_unlock(&prog->aux->used_maps_mutex); 4392 4393 err = set_info_rec_size(&info); 4394 if (err) 4395 return err; 4396 4397 bpf_prog_get_stats(prog, &stats); 4398 info.run_time_ns = stats.nsecs; 4399 info.run_cnt = stats.cnt; 4400 info.recursion_misses = stats.misses; 4401 4402 info.verified_insns = prog->aux->verified_insns; 4403 4404 if (!bpf_capable()) { 4405 info.jited_prog_len = 0; 4406 info.xlated_prog_len = 0; 4407 info.nr_jited_ksyms = 0; 4408 info.nr_jited_func_lens = 0; 4409 info.nr_func_info = 0; 4410 info.nr_line_info = 0; 4411 info.nr_jited_line_info = 0; 4412 goto done; 4413 } 4414 4415 ulen = info.xlated_prog_len; 4416 info.xlated_prog_len = bpf_prog_insn_size(prog); 4417 if (info.xlated_prog_len && ulen) { 4418 struct bpf_insn *insns_sanitized; 4419 bool fault; 4420 4421 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4422 info.xlated_prog_insns = 0; 4423 goto done; 4424 } 4425 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4426 if (!insns_sanitized) 4427 return -ENOMEM; 4428 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4429 ulen = min_t(u32, info.xlated_prog_len, ulen); 4430 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4431 kfree(insns_sanitized); 4432 if (fault) 4433 return -EFAULT; 4434 } 4435 4436 if (bpf_prog_is_offloaded(prog->aux)) { 4437 err = bpf_prog_offload_info_fill(&info, prog); 4438 if (err) 4439 return err; 4440 goto done; 4441 } 4442 4443 /* NOTE: the following code is supposed to be skipped for offload. 4444 * bpf_prog_offload_info_fill() is the place to fill similar fields 4445 * for offload. 4446 */ 4447 ulen = info.jited_prog_len; 4448 if (prog->aux->func_cnt) { 4449 u32 i; 4450 4451 info.jited_prog_len = 0; 4452 for (i = 0; i < prog->aux->func_cnt; i++) 4453 info.jited_prog_len += prog->aux->func[i]->jited_len; 4454 } else { 4455 info.jited_prog_len = prog->jited_len; 4456 } 4457 4458 if (info.jited_prog_len && ulen) { 4459 if (bpf_dump_raw_ok(file->f_cred)) { 4460 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4461 ulen = min_t(u32, info.jited_prog_len, ulen); 4462 4463 /* for multi-function programs, copy the JITed 4464 * instructions for all the functions 4465 */ 4466 if (prog->aux->func_cnt) { 4467 u32 len, free, i; 4468 u8 *img; 4469 4470 free = ulen; 4471 for (i = 0; i < prog->aux->func_cnt; i++) { 4472 len = prog->aux->func[i]->jited_len; 4473 len = min_t(u32, len, free); 4474 img = (u8 *) prog->aux->func[i]->bpf_func; 4475 if (copy_to_user(uinsns, img, len)) 4476 return -EFAULT; 4477 uinsns += len; 4478 free -= len; 4479 if (!free) 4480 break; 4481 } 4482 } else { 4483 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4484 return -EFAULT; 4485 } 4486 } else { 4487 info.jited_prog_insns = 0; 4488 } 4489 } 4490 4491 ulen = info.nr_jited_ksyms; 4492 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4493 if (ulen) { 4494 if (bpf_dump_raw_ok(file->f_cred)) { 4495 unsigned long ksym_addr; 4496 u64 __user *user_ksyms; 4497 u32 i; 4498 4499 /* copy the address of the kernel symbol 4500 * corresponding to each function 4501 */ 4502 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4503 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4504 if (prog->aux->func_cnt) { 4505 for (i = 0; i < ulen; i++) { 4506 ksym_addr = (unsigned long) 4507 prog->aux->func[i]->bpf_func; 4508 if (put_user((u64) ksym_addr, 4509 &user_ksyms[i])) 4510 return -EFAULT; 4511 } 4512 } else { 4513 ksym_addr = (unsigned long) prog->bpf_func; 4514 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4515 return -EFAULT; 4516 } 4517 } else { 4518 info.jited_ksyms = 0; 4519 } 4520 } 4521 4522 ulen = info.nr_jited_func_lens; 4523 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4524 if (ulen) { 4525 if (bpf_dump_raw_ok(file->f_cred)) { 4526 u32 __user *user_lens; 4527 u32 func_len, i; 4528 4529 /* copy the JITed image lengths for each function */ 4530 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4531 user_lens = u64_to_user_ptr(info.jited_func_lens); 4532 if (prog->aux->func_cnt) { 4533 for (i = 0; i < ulen; i++) { 4534 func_len = 4535 prog->aux->func[i]->jited_len; 4536 if (put_user(func_len, &user_lens[i])) 4537 return -EFAULT; 4538 } 4539 } else { 4540 func_len = prog->jited_len; 4541 if (put_user(func_len, &user_lens[0])) 4542 return -EFAULT; 4543 } 4544 } else { 4545 info.jited_func_lens = 0; 4546 } 4547 } 4548 4549 if (prog->aux->btf) 4550 info.btf_id = btf_obj_id(prog->aux->btf); 4551 info.attach_btf_id = prog->aux->attach_btf_id; 4552 if (attach_btf) 4553 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4554 4555 ulen = info.nr_func_info; 4556 info.nr_func_info = prog->aux->func_info_cnt; 4557 if (info.nr_func_info && ulen) { 4558 char __user *user_finfo; 4559 4560 user_finfo = u64_to_user_ptr(info.func_info); 4561 ulen = min_t(u32, info.nr_func_info, ulen); 4562 if (copy_to_user(user_finfo, prog->aux->func_info, 4563 info.func_info_rec_size * ulen)) 4564 return -EFAULT; 4565 } 4566 4567 ulen = info.nr_line_info; 4568 info.nr_line_info = prog->aux->nr_linfo; 4569 if (info.nr_line_info && ulen) { 4570 __u8 __user *user_linfo; 4571 4572 user_linfo = u64_to_user_ptr(info.line_info); 4573 ulen = min_t(u32, info.nr_line_info, ulen); 4574 if (copy_to_user(user_linfo, prog->aux->linfo, 4575 info.line_info_rec_size * ulen)) 4576 return -EFAULT; 4577 } 4578 4579 ulen = info.nr_jited_line_info; 4580 if (prog->aux->jited_linfo) 4581 info.nr_jited_line_info = prog->aux->nr_linfo; 4582 else 4583 info.nr_jited_line_info = 0; 4584 if (info.nr_jited_line_info && ulen) { 4585 if (bpf_dump_raw_ok(file->f_cred)) { 4586 unsigned long line_addr; 4587 __u64 __user *user_linfo; 4588 u32 i; 4589 4590 user_linfo = u64_to_user_ptr(info.jited_line_info); 4591 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4592 for (i = 0; i < ulen; i++) { 4593 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4594 if (put_user((__u64)line_addr, &user_linfo[i])) 4595 return -EFAULT; 4596 } 4597 } else { 4598 info.jited_line_info = 0; 4599 } 4600 } 4601 4602 ulen = info.nr_prog_tags; 4603 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4604 if (ulen) { 4605 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4606 u32 i; 4607 4608 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4609 ulen = min_t(u32, info.nr_prog_tags, ulen); 4610 if (prog->aux->func_cnt) { 4611 for (i = 0; i < ulen; i++) { 4612 if (copy_to_user(user_prog_tags[i], 4613 prog->aux->func[i]->tag, 4614 BPF_TAG_SIZE)) 4615 return -EFAULT; 4616 } 4617 } else { 4618 if (copy_to_user(user_prog_tags[0], 4619 prog->tag, BPF_TAG_SIZE)) 4620 return -EFAULT; 4621 } 4622 } 4623 4624 done: 4625 if (copy_to_user(uinfo, &info, info_len) || 4626 put_user(info_len, &uattr->info.info_len)) 4627 return -EFAULT; 4628 4629 return 0; 4630 } 4631 4632 static int bpf_map_get_info_by_fd(struct file *file, 4633 struct bpf_map *map, 4634 const union bpf_attr *attr, 4635 union bpf_attr __user *uattr) 4636 { 4637 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4638 struct bpf_map_info info; 4639 u32 info_len = attr->info.info_len; 4640 int err; 4641 4642 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4643 if (err) 4644 return err; 4645 info_len = min_t(u32, sizeof(info), info_len); 4646 4647 memset(&info, 0, sizeof(info)); 4648 info.type = map->map_type; 4649 info.id = map->id; 4650 info.key_size = map->key_size; 4651 info.value_size = map->value_size; 4652 info.max_entries = map->max_entries; 4653 info.map_flags = map->map_flags; 4654 info.map_extra = map->map_extra; 4655 memcpy(info.name, map->name, sizeof(map->name)); 4656 4657 if (map->btf) { 4658 info.btf_id = btf_obj_id(map->btf); 4659 info.btf_key_type_id = map->btf_key_type_id; 4660 info.btf_value_type_id = map->btf_value_type_id; 4661 } 4662 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4663 4664 if (bpf_map_is_offloaded(map)) { 4665 err = bpf_map_offload_info_fill(&info, map); 4666 if (err) 4667 return err; 4668 } 4669 4670 if (copy_to_user(uinfo, &info, info_len) || 4671 put_user(info_len, &uattr->info.info_len)) 4672 return -EFAULT; 4673 4674 return 0; 4675 } 4676 4677 static int bpf_btf_get_info_by_fd(struct file *file, 4678 struct btf *btf, 4679 const union bpf_attr *attr, 4680 union bpf_attr __user *uattr) 4681 { 4682 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4683 u32 info_len = attr->info.info_len; 4684 int err; 4685 4686 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 4687 if (err) 4688 return err; 4689 4690 return btf_get_info_by_fd(btf, attr, uattr); 4691 } 4692 4693 static int bpf_link_get_info_by_fd(struct file *file, 4694 struct bpf_link *link, 4695 const union bpf_attr *attr, 4696 union bpf_attr __user *uattr) 4697 { 4698 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4699 struct bpf_link_info info; 4700 u32 info_len = attr->info.info_len; 4701 int err; 4702 4703 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4704 if (err) 4705 return err; 4706 info_len = min_t(u32, sizeof(info), info_len); 4707 4708 memset(&info, 0, sizeof(info)); 4709 if (copy_from_user(&info, uinfo, info_len)) 4710 return -EFAULT; 4711 4712 info.type = link->type; 4713 info.id = link->id; 4714 if (link->prog) 4715 info.prog_id = link->prog->aux->id; 4716 4717 if (link->ops->fill_link_info) { 4718 err = link->ops->fill_link_info(link, &info); 4719 if (err) 4720 return err; 4721 } 4722 4723 if (copy_to_user(uinfo, &info, info_len) || 4724 put_user(info_len, &uattr->info.info_len)) 4725 return -EFAULT; 4726 4727 return 0; 4728 } 4729 4730 4731 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 4732 4733 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 4734 union bpf_attr __user *uattr) 4735 { 4736 int ufd = attr->info.bpf_fd; 4737 struct fd f; 4738 int err; 4739 4740 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 4741 return -EINVAL; 4742 4743 f = fdget(ufd); 4744 if (!f.file) 4745 return -EBADFD; 4746 4747 if (f.file->f_op == &bpf_prog_fops) 4748 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 4749 uattr); 4750 else if (f.file->f_op == &bpf_map_fops) 4751 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 4752 uattr); 4753 else if (f.file->f_op == &btf_fops) 4754 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 4755 else if (f.file->f_op == &bpf_link_fops) 4756 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 4757 attr, uattr); 4758 else 4759 err = -EINVAL; 4760 4761 fdput(f); 4762 return err; 4763 } 4764 4765 #define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size 4766 4767 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 4768 { 4769 if (CHECK_ATTR(BPF_BTF_LOAD)) 4770 return -EINVAL; 4771 4772 if (!bpf_capable()) 4773 return -EPERM; 4774 4775 return btf_new_fd(attr, uattr, uattr_size); 4776 } 4777 4778 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 4779 4780 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 4781 { 4782 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 4783 return -EINVAL; 4784 4785 if (!capable(CAP_SYS_ADMIN)) 4786 return -EPERM; 4787 4788 return btf_get_fd_by_id(attr->btf_id); 4789 } 4790 4791 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 4792 union bpf_attr __user *uattr, 4793 u32 prog_id, u32 fd_type, 4794 const char *buf, u64 probe_offset, 4795 u64 probe_addr) 4796 { 4797 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 4798 u32 len = buf ? strlen(buf) : 0, input_len; 4799 int err = 0; 4800 4801 if (put_user(len, &uattr->task_fd_query.buf_len)) 4802 return -EFAULT; 4803 input_len = attr->task_fd_query.buf_len; 4804 if (input_len && ubuf) { 4805 if (!len) { 4806 /* nothing to copy, just make ubuf NULL terminated */ 4807 char zero = '\0'; 4808 4809 if (put_user(zero, ubuf)) 4810 return -EFAULT; 4811 } else if (input_len >= len + 1) { 4812 /* ubuf can hold the string with NULL terminator */ 4813 if (copy_to_user(ubuf, buf, len + 1)) 4814 return -EFAULT; 4815 } else { 4816 /* ubuf cannot hold the string with NULL terminator, 4817 * do a partial copy with NULL terminator. 4818 */ 4819 char zero = '\0'; 4820 4821 err = -ENOSPC; 4822 if (copy_to_user(ubuf, buf, input_len - 1)) 4823 return -EFAULT; 4824 if (put_user(zero, ubuf + input_len - 1)) 4825 return -EFAULT; 4826 } 4827 } 4828 4829 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 4830 put_user(fd_type, &uattr->task_fd_query.fd_type) || 4831 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 4832 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 4833 return -EFAULT; 4834 4835 return err; 4836 } 4837 4838 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 4839 4840 static int bpf_task_fd_query(const union bpf_attr *attr, 4841 union bpf_attr __user *uattr) 4842 { 4843 pid_t pid = attr->task_fd_query.pid; 4844 u32 fd = attr->task_fd_query.fd; 4845 const struct perf_event *event; 4846 struct task_struct *task; 4847 struct file *file; 4848 int err; 4849 4850 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 4851 return -EINVAL; 4852 4853 if (!capable(CAP_SYS_ADMIN)) 4854 return -EPERM; 4855 4856 if (attr->task_fd_query.flags != 0) 4857 return -EINVAL; 4858 4859 rcu_read_lock(); 4860 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 4861 rcu_read_unlock(); 4862 if (!task) 4863 return -ENOENT; 4864 4865 err = 0; 4866 file = fget_task(task, fd); 4867 put_task_struct(task); 4868 if (!file) 4869 return -EBADF; 4870 4871 if (file->f_op == &bpf_link_fops) { 4872 struct bpf_link *link = file->private_data; 4873 4874 if (link->ops == &bpf_raw_tp_link_lops) { 4875 struct bpf_raw_tp_link *raw_tp = 4876 container_of(link, struct bpf_raw_tp_link, link); 4877 struct bpf_raw_event_map *btp = raw_tp->btp; 4878 4879 err = bpf_task_fd_query_copy(attr, uattr, 4880 raw_tp->link.prog->aux->id, 4881 BPF_FD_TYPE_RAW_TRACEPOINT, 4882 btp->tp->name, 0, 0); 4883 goto put_file; 4884 } 4885 goto out_not_supp; 4886 } 4887 4888 event = perf_get_event(file); 4889 if (!IS_ERR(event)) { 4890 u64 probe_offset, probe_addr; 4891 u32 prog_id, fd_type; 4892 const char *buf; 4893 4894 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 4895 &buf, &probe_offset, 4896 &probe_addr); 4897 if (!err) 4898 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 4899 fd_type, buf, 4900 probe_offset, 4901 probe_addr); 4902 goto put_file; 4903 } 4904 4905 out_not_supp: 4906 err = -ENOTSUPP; 4907 put_file: 4908 fput(file); 4909 return err; 4910 } 4911 4912 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 4913 4914 #define BPF_DO_BATCH(fn, ...) \ 4915 do { \ 4916 if (!fn) { \ 4917 err = -ENOTSUPP; \ 4918 goto err_put; \ 4919 } \ 4920 err = fn(__VA_ARGS__); \ 4921 } while (0) 4922 4923 static int bpf_map_do_batch(const union bpf_attr *attr, 4924 union bpf_attr __user *uattr, 4925 int cmd) 4926 { 4927 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 4928 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 4929 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 4930 struct bpf_map *map; 4931 int err, ufd; 4932 struct fd f; 4933 4934 if (CHECK_ATTR(BPF_MAP_BATCH)) 4935 return -EINVAL; 4936 4937 ufd = attr->batch.map_fd; 4938 f = fdget(ufd); 4939 map = __bpf_map_get(f); 4940 if (IS_ERR(map)) 4941 return PTR_ERR(map); 4942 if (has_write) 4943 bpf_map_write_active_inc(map); 4944 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 4945 err = -EPERM; 4946 goto err_put; 4947 } 4948 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 4949 err = -EPERM; 4950 goto err_put; 4951 } 4952 4953 if (cmd == BPF_MAP_LOOKUP_BATCH) 4954 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 4955 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 4956 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 4957 else if (cmd == BPF_MAP_UPDATE_BATCH) 4958 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr); 4959 else 4960 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 4961 err_put: 4962 if (has_write) 4963 bpf_map_write_active_dec(map); 4964 fdput(f); 4965 return err; 4966 } 4967 4968 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid 4969 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 4970 { 4971 struct bpf_prog *prog; 4972 int ret; 4973 4974 if (CHECK_ATTR(BPF_LINK_CREATE)) 4975 return -EINVAL; 4976 4977 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 4978 return bpf_struct_ops_link_create(attr); 4979 4980 prog = bpf_prog_get(attr->link_create.prog_fd); 4981 if (IS_ERR(prog)) 4982 return PTR_ERR(prog); 4983 4984 ret = bpf_prog_attach_check_attach_type(prog, 4985 attr->link_create.attach_type); 4986 if (ret) 4987 goto out; 4988 4989 switch (prog->type) { 4990 case BPF_PROG_TYPE_CGROUP_SKB: 4991 case BPF_PROG_TYPE_CGROUP_SOCK: 4992 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4993 case BPF_PROG_TYPE_SOCK_OPS: 4994 case BPF_PROG_TYPE_CGROUP_DEVICE: 4995 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4996 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4997 ret = cgroup_bpf_link_attach(attr, prog); 4998 break; 4999 case BPF_PROG_TYPE_EXT: 5000 ret = bpf_tracing_prog_attach(prog, 5001 attr->link_create.target_fd, 5002 attr->link_create.target_btf_id, 5003 attr->link_create.tracing.cookie); 5004 break; 5005 case BPF_PROG_TYPE_LSM: 5006 case BPF_PROG_TYPE_TRACING: 5007 if (attr->link_create.attach_type != prog->expected_attach_type) { 5008 ret = -EINVAL; 5009 goto out; 5010 } 5011 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5012 ret = bpf_raw_tp_link_attach(prog, NULL); 5013 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5014 ret = bpf_iter_link_attach(attr, uattr, prog); 5015 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 5016 ret = cgroup_bpf_link_attach(attr, prog); 5017 else 5018 ret = bpf_tracing_prog_attach(prog, 5019 attr->link_create.target_fd, 5020 attr->link_create.target_btf_id, 5021 attr->link_create.tracing.cookie); 5022 break; 5023 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5024 case BPF_PROG_TYPE_SK_LOOKUP: 5025 ret = netns_bpf_link_create(attr, prog); 5026 break; 5027 #ifdef CONFIG_NET 5028 case BPF_PROG_TYPE_XDP: 5029 ret = bpf_xdp_link_attach(attr, prog); 5030 break; 5031 case BPF_PROG_TYPE_SCHED_CLS: 5032 ret = tcx_link_attach(attr, prog); 5033 break; 5034 case BPF_PROG_TYPE_NETFILTER: 5035 ret = bpf_nf_link_attach(attr, prog); 5036 break; 5037 #endif 5038 case BPF_PROG_TYPE_PERF_EVENT: 5039 case BPF_PROG_TYPE_TRACEPOINT: 5040 ret = bpf_perf_link_attach(attr, prog); 5041 break; 5042 case BPF_PROG_TYPE_KPROBE: 5043 if (attr->link_create.attach_type == BPF_PERF_EVENT) 5044 ret = bpf_perf_link_attach(attr, prog); 5045 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI) 5046 ret = bpf_kprobe_multi_link_attach(attr, prog); 5047 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) 5048 ret = bpf_uprobe_multi_link_attach(attr, prog); 5049 break; 5050 default: 5051 ret = -EINVAL; 5052 } 5053 5054 out: 5055 if (ret < 0) 5056 bpf_prog_put(prog); 5057 return ret; 5058 } 5059 5060 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 5061 { 5062 struct bpf_map *new_map, *old_map = NULL; 5063 int ret; 5064 5065 new_map = bpf_map_get(attr->link_update.new_map_fd); 5066 if (IS_ERR(new_map)) 5067 return PTR_ERR(new_map); 5068 5069 if (attr->link_update.flags & BPF_F_REPLACE) { 5070 old_map = bpf_map_get(attr->link_update.old_map_fd); 5071 if (IS_ERR(old_map)) { 5072 ret = PTR_ERR(old_map); 5073 goto out_put; 5074 } 5075 } else if (attr->link_update.old_map_fd) { 5076 ret = -EINVAL; 5077 goto out_put; 5078 } 5079 5080 ret = link->ops->update_map(link, new_map, old_map); 5081 5082 if (old_map) 5083 bpf_map_put(old_map); 5084 out_put: 5085 bpf_map_put(new_map); 5086 return ret; 5087 } 5088 5089 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 5090 5091 static int link_update(union bpf_attr *attr) 5092 { 5093 struct bpf_prog *old_prog = NULL, *new_prog; 5094 struct bpf_link *link; 5095 u32 flags; 5096 int ret; 5097 5098 if (CHECK_ATTR(BPF_LINK_UPDATE)) 5099 return -EINVAL; 5100 5101 flags = attr->link_update.flags; 5102 if (flags & ~BPF_F_REPLACE) 5103 return -EINVAL; 5104 5105 link = bpf_link_get_from_fd(attr->link_update.link_fd); 5106 if (IS_ERR(link)) 5107 return PTR_ERR(link); 5108 5109 if (link->ops->update_map) { 5110 ret = link_update_map(link, attr); 5111 goto out_put_link; 5112 } 5113 5114 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 5115 if (IS_ERR(new_prog)) { 5116 ret = PTR_ERR(new_prog); 5117 goto out_put_link; 5118 } 5119 5120 if (flags & BPF_F_REPLACE) { 5121 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 5122 if (IS_ERR(old_prog)) { 5123 ret = PTR_ERR(old_prog); 5124 old_prog = NULL; 5125 goto out_put_progs; 5126 } 5127 } else if (attr->link_update.old_prog_fd) { 5128 ret = -EINVAL; 5129 goto out_put_progs; 5130 } 5131 5132 if (link->ops->update_prog) 5133 ret = link->ops->update_prog(link, new_prog, old_prog); 5134 else 5135 ret = -EINVAL; 5136 5137 out_put_progs: 5138 if (old_prog) 5139 bpf_prog_put(old_prog); 5140 if (ret) 5141 bpf_prog_put(new_prog); 5142 out_put_link: 5143 bpf_link_put_direct(link); 5144 return ret; 5145 } 5146 5147 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 5148 5149 static int link_detach(union bpf_attr *attr) 5150 { 5151 struct bpf_link *link; 5152 int ret; 5153 5154 if (CHECK_ATTR(BPF_LINK_DETACH)) 5155 return -EINVAL; 5156 5157 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 5158 if (IS_ERR(link)) 5159 return PTR_ERR(link); 5160 5161 if (link->ops->detach) 5162 ret = link->ops->detach(link); 5163 else 5164 ret = -EOPNOTSUPP; 5165 5166 bpf_link_put_direct(link); 5167 return ret; 5168 } 5169 5170 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5171 { 5172 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5173 } 5174 5175 struct bpf_link *bpf_link_by_id(u32 id) 5176 { 5177 struct bpf_link *link; 5178 5179 if (!id) 5180 return ERR_PTR(-ENOENT); 5181 5182 spin_lock_bh(&link_idr_lock); 5183 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 5184 link = idr_find(&link_idr, id); 5185 if (link) { 5186 if (link->id) 5187 link = bpf_link_inc_not_zero(link); 5188 else 5189 link = ERR_PTR(-EAGAIN); 5190 } else { 5191 link = ERR_PTR(-ENOENT); 5192 } 5193 spin_unlock_bh(&link_idr_lock); 5194 return link; 5195 } 5196 5197 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 5198 { 5199 struct bpf_link *link; 5200 5201 spin_lock_bh(&link_idr_lock); 5202 again: 5203 link = idr_get_next(&link_idr, id); 5204 if (link) { 5205 link = bpf_link_inc_not_zero(link); 5206 if (IS_ERR(link)) { 5207 (*id)++; 5208 goto again; 5209 } 5210 } 5211 spin_unlock_bh(&link_idr_lock); 5212 5213 return link; 5214 } 5215 5216 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 5217 5218 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 5219 { 5220 struct bpf_link *link; 5221 u32 id = attr->link_id; 5222 int fd; 5223 5224 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 5225 return -EINVAL; 5226 5227 if (!capable(CAP_SYS_ADMIN)) 5228 return -EPERM; 5229 5230 link = bpf_link_by_id(id); 5231 if (IS_ERR(link)) 5232 return PTR_ERR(link); 5233 5234 fd = bpf_link_new_fd(link); 5235 if (fd < 0) 5236 bpf_link_put_direct(link); 5237 5238 return fd; 5239 } 5240 5241 DEFINE_MUTEX(bpf_stats_enabled_mutex); 5242 5243 static int bpf_stats_release(struct inode *inode, struct file *file) 5244 { 5245 mutex_lock(&bpf_stats_enabled_mutex); 5246 static_key_slow_dec(&bpf_stats_enabled_key.key); 5247 mutex_unlock(&bpf_stats_enabled_mutex); 5248 return 0; 5249 } 5250 5251 static const struct file_operations bpf_stats_fops = { 5252 .release = bpf_stats_release, 5253 }; 5254 5255 static int bpf_enable_runtime_stats(void) 5256 { 5257 int fd; 5258 5259 mutex_lock(&bpf_stats_enabled_mutex); 5260 5261 /* Set a very high limit to avoid overflow */ 5262 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 5263 mutex_unlock(&bpf_stats_enabled_mutex); 5264 return -EBUSY; 5265 } 5266 5267 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 5268 if (fd >= 0) 5269 static_key_slow_inc(&bpf_stats_enabled_key.key); 5270 5271 mutex_unlock(&bpf_stats_enabled_mutex); 5272 return fd; 5273 } 5274 5275 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 5276 5277 static int bpf_enable_stats(union bpf_attr *attr) 5278 { 5279 5280 if (CHECK_ATTR(BPF_ENABLE_STATS)) 5281 return -EINVAL; 5282 5283 if (!capable(CAP_SYS_ADMIN)) 5284 return -EPERM; 5285 5286 switch (attr->enable_stats.type) { 5287 case BPF_STATS_RUN_TIME: 5288 return bpf_enable_runtime_stats(); 5289 default: 5290 break; 5291 } 5292 return -EINVAL; 5293 } 5294 5295 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5296 5297 static int bpf_iter_create(union bpf_attr *attr) 5298 { 5299 struct bpf_link *link; 5300 int err; 5301 5302 if (CHECK_ATTR(BPF_ITER_CREATE)) 5303 return -EINVAL; 5304 5305 if (attr->iter_create.flags) 5306 return -EINVAL; 5307 5308 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5309 if (IS_ERR(link)) 5310 return PTR_ERR(link); 5311 5312 err = bpf_iter_new_fd(link); 5313 bpf_link_put_direct(link); 5314 5315 return err; 5316 } 5317 5318 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5319 5320 static int bpf_prog_bind_map(union bpf_attr *attr) 5321 { 5322 struct bpf_prog *prog; 5323 struct bpf_map *map; 5324 struct bpf_map **used_maps_old, **used_maps_new; 5325 int i, ret = 0; 5326 5327 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5328 return -EINVAL; 5329 5330 if (attr->prog_bind_map.flags) 5331 return -EINVAL; 5332 5333 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5334 if (IS_ERR(prog)) 5335 return PTR_ERR(prog); 5336 5337 map = bpf_map_get(attr->prog_bind_map.map_fd); 5338 if (IS_ERR(map)) { 5339 ret = PTR_ERR(map); 5340 goto out_prog_put; 5341 } 5342 5343 mutex_lock(&prog->aux->used_maps_mutex); 5344 5345 used_maps_old = prog->aux->used_maps; 5346 5347 for (i = 0; i < prog->aux->used_map_cnt; i++) 5348 if (used_maps_old[i] == map) { 5349 bpf_map_put(map); 5350 goto out_unlock; 5351 } 5352 5353 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5354 sizeof(used_maps_new[0]), 5355 GFP_KERNEL); 5356 if (!used_maps_new) { 5357 ret = -ENOMEM; 5358 goto out_unlock; 5359 } 5360 5361 memcpy(used_maps_new, used_maps_old, 5362 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5363 used_maps_new[prog->aux->used_map_cnt] = map; 5364 5365 prog->aux->used_map_cnt++; 5366 prog->aux->used_maps = used_maps_new; 5367 5368 kfree(used_maps_old); 5369 5370 out_unlock: 5371 mutex_unlock(&prog->aux->used_maps_mutex); 5372 5373 if (ret) 5374 bpf_map_put(map); 5375 out_prog_put: 5376 bpf_prog_put(prog); 5377 return ret; 5378 } 5379 5380 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) 5381 { 5382 union bpf_attr attr; 5383 int err; 5384 5385 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5386 if (err) 5387 return err; 5388 size = min_t(u32, size, sizeof(attr)); 5389 5390 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5391 memset(&attr, 0, sizeof(attr)); 5392 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5393 return -EFAULT; 5394 5395 err = security_bpf(cmd, &attr, size); 5396 if (err < 0) 5397 return err; 5398 5399 switch (cmd) { 5400 case BPF_MAP_CREATE: 5401 err = map_create(&attr); 5402 break; 5403 case BPF_MAP_LOOKUP_ELEM: 5404 err = map_lookup_elem(&attr); 5405 break; 5406 case BPF_MAP_UPDATE_ELEM: 5407 err = map_update_elem(&attr, uattr); 5408 break; 5409 case BPF_MAP_DELETE_ELEM: 5410 err = map_delete_elem(&attr, uattr); 5411 break; 5412 case BPF_MAP_GET_NEXT_KEY: 5413 err = map_get_next_key(&attr); 5414 break; 5415 case BPF_MAP_FREEZE: 5416 err = map_freeze(&attr); 5417 break; 5418 case BPF_PROG_LOAD: 5419 err = bpf_prog_load(&attr, uattr, size); 5420 break; 5421 case BPF_OBJ_PIN: 5422 err = bpf_obj_pin(&attr); 5423 break; 5424 case BPF_OBJ_GET: 5425 err = bpf_obj_get(&attr); 5426 break; 5427 case BPF_PROG_ATTACH: 5428 err = bpf_prog_attach(&attr); 5429 break; 5430 case BPF_PROG_DETACH: 5431 err = bpf_prog_detach(&attr); 5432 break; 5433 case BPF_PROG_QUERY: 5434 err = bpf_prog_query(&attr, uattr.user); 5435 break; 5436 case BPF_PROG_TEST_RUN: 5437 err = bpf_prog_test_run(&attr, uattr.user); 5438 break; 5439 case BPF_PROG_GET_NEXT_ID: 5440 err = bpf_obj_get_next_id(&attr, uattr.user, 5441 &prog_idr, &prog_idr_lock); 5442 break; 5443 case BPF_MAP_GET_NEXT_ID: 5444 err = bpf_obj_get_next_id(&attr, uattr.user, 5445 &map_idr, &map_idr_lock); 5446 break; 5447 case BPF_BTF_GET_NEXT_ID: 5448 err = bpf_obj_get_next_id(&attr, uattr.user, 5449 &btf_idr, &btf_idr_lock); 5450 break; 5451 case BPF_PROG_GET_FD_BY_ID: 5452 err = bpf_prog_get_fd_by_id(&attr); 5453 break; 5454 case BPF_MAP_GET_FD_BY_ID: 5455 err = bpf_map_get_fd_by_id(&attr); 5456 break; 5457 case BPF_OBJ_GET_INFO_BY_FD: 5458 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 5459 break; 5460 case BPF_RAW_TRACEPOINT_OPEN: 5461 err = bpf_raw_tracepoint_open(&attr); 5462 break; 5463 case BPF_BTF_LOAD: 5464 err = bpf_btf_load(&attr, uattr, size); 5465 break; 5466 case BPF_BTF_GET_FD_BY_ID: 5467 err = bpf_btf_get_fd_by_id(&attr); 5468 break; 5469 case BPF_TASK_FD_QUERY: 5470 err = bpf_task_fd_query(&attr, uattr.user); 5471 break; 5472 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5473 err = map_lookup_and_delete_elem(&attr); 5474 break; 5475 case BPF_MAP_LOOKUP_BATCH: 5476 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5477 break; 5478 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5479 err = bpf_map_do_batch(&attr, uattr.user, 5480 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5481 break; 5482 case BPF_MAP_UPDATE_BATCH: 5483 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5484 break; 5485 case BPF_MAP_DELETE_BATCH: 5486 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5487 break; 5488 case BPF_LINK_CREATE: 5489 err = link_create(&attr, uattr); 5490 break; 5491 case BPF_LINK_UPDATE: 5492 err = link_update(&attr); 5493 break; 5494 case BPF_LINK_GET_FD_BY_ID: 5495 err = bpf_link_get_fd_by_id(&attr); 5496 break; 5497 case BPF_LINK_GET_NEXT_ID: 5498 err = bpf_obj_get_next_id(&attr, uattr.user, 5499 &link_idr, &link_idr_lock); 5500 break; 5501 case BPF_ENABLE_STATS: 5502 err = bpf_enable_stats(&attr); 5503 break; 5504 case BPF_ITER_CREATE: 5505 err = bpf_iter_create(&attr); 5506 break; 5507 case BPF_LINK_DETACH: 5508 err = link_detach(&attr); 5509 break; 5510 case BPF_PROG_BIND_MAP: 5511 err = bpf_prog_bind_map(&attr); 5512 break; 5513 default: 5514 err = -EINVAL; 5515 break; 5516 } 5517 5518 return err; 5519 } 5520 5521 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5522 { 5523 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5524 } 5525 5526 static bool syscall_prog_is_valid_access(int off, int size, 5527 enum bpf_access_type type, 5528 const struct bpf_prog *prog, 5529 struct bpf_insn_access_aux *info) 5530 { 5531 if (off < 0 || off >= U16_MAX) 5532 return false; 5533 if (off % size != 0) 5534 return false; 5535 return true; 5536 } 5537 5538 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5539 { 5540 switch (cmd) { 5541 case BPF_MAP_CREATE: 5542 case BPF_MAP_DELETE_ELEM: 5543 case BPF_MAP_UPDATE_ELEM: 5544 case BPF_MAP_FREEZE: 5545 case BPF_MAP_GET_FD_BY_ID: 5546 case BPF_PROG_LOAD: 5547 case BPF_BTF_LOAD: 5548 case BPF_LINK_CREATE: 5549 case BPF_RAW_TRACEPOINT_OPEN: 5550 break; 5551 default: 5552 return -EINVAL; 5553 } 5554 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5555 } 5556 5557 5558 /* To shut up -Wmissing-prototypes. 5559 * This function is used by the kernel light skeleton 5560 * to load bpf programs when modules are loaded or during kernel boot. 5561 * See tools/lib/bpf/skel_internal.h 5562 */ 5563 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5564 5565 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5566 { 5567 struct bpf_prog * __maybe_unused prog; 5568 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5569 5570 switch (cmd) { 5571 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5572 case BPF_PROG_TEST_RUN: 5573 if (attr->test.data_in || attr->test.data_out || 5574 attr->test.ctx_out || attr->test.duration || 5575 attr->test.repeat || attr->test.flags) 5576 return -EINVAL; 5577 5578 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5579 if (IS_ERR(prog)) 5580 return PTR_ERR(prog); 5581 5582 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5583 attr->test.ctx_size_in > U16_MAX) { 5584 bpf_prog_put(prog); 5585 return -EINVAL; 5586 } 5587 5588 run_ctx.bpf_cookie = 0; 5589 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 5590 /* recursion detected */ 5591 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx); 5592 bpf_prog_put(prog); 5593 return -EBUSY; 5594 } 5595 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5596 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 5597 &run_ctx); 5598 bpf_prog_put(prog); 5599 return 0; 5600 #endif 5601 default: 5602 return ____bpf_sys_bpf(cmd, attr, size); 5603 } 5604 } 5605 EXPORT_SYMBOL(kern_sys_bpf); 5606 5607 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5608 .func = bpf_sys_bpf, 5609 .gpl_only = false, 5610 .ret_type = RET_INTEGER, 5611 .arg1_type = ARG_ANYTHING, 5612 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5613 .arg3_type = ARG_CONST_SIZE, 5614 }; 5615 5616 const struct bpf_func_proto * __weak 5617 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5618 { 5619 return bpf_base_func_proto(func_id); 5620 } 5621 5622 BPF_CALL_1(bpf_sys_close, u32, fd) 5623 { 5624 /* When bpf program calls this helper there should not be 5625 * an fdget() without matching completed fdput(). 5626 * This helper is allowed in the following callchain only: 5627 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 5628 */ 5629 return close_fd(fd); 5630 } 5631 5632 static const struct bpf_func_proto bpf_sys_close_proto = { 5633 .func = bpf_sys_close, 5634 .gpl_only = false, 5635 .ret_type = RET_INTEGER, 5636 .arg1_type = ARG_ANYTHING, 5637 }; 5638 5639 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 5640 { 5641 if (flags) 5642 return -EINVAL; 5643 5644 if (name_sz <= 1 || name[name_sz - 1]) 5645 return -EINVAL; 5646 5647 if (!bpf_dump_raw_ok(current_cred())) 5648 return -EPERM; 5649 5650 *res = kallsyms_lookup_name(name); 5651 return *res ? 0 : -ENOENT; 5652 } 5653 5654 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 5655 .func = bpf_kallsyms_lookup_name, 5656 .gpl_only = false, 5657 .ret_type = RET_INTEGER, 5658 .arg1_type = ARG_PTR_TO_MEM, 5659 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 5660 .arg3_type = ARG_ANYTHING, 5661 .arg4_type = ARG_PTR_TO_LONG, 5662 }; 5663 5664 static const struct bpf_func_proto * 5665 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5666 { 5667 switch (func_id) { 5668 case BPF_FUNC_sys_bpf: 5669 return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto; 5670 case BPF_FUNC_btf_find_by_name_kind: 5671 return &bpf_btf_find_by_name_kind_proto; 5672 case BPF_FUNC_sys_close: 5673 return &bpf_sys_close_proto; 5674 case BPF_FUNC_kallsyms_lookup_name: 5675 return &bpf_kallsyms_lookup_name_proto; 5676 default: 5677 return tracing_prog_func_proto(func_id, prog); 5678 } 5679 } 5680 5681 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 5682 .get_func_proto = syscall_prog_func_proto, 5683 .is_valid_access = syscall_prog_is_valid_access, 5684 }; 5685 5686 const struct bpf_prog_ops bpf_syscall_prog_ops = { 5687 .test_run = bpf_prog_test_run_syscall, 5688 }; 5689 5690 #ifdef CONFIG_SYSCTL 5691 static int bpf_stats_handler(struct ctl_table *table, int write, 5692 void *buffer, size_t *lenp, loff_t *ppos) 5693 { 5694 struct static_key *key = (struct static_key *)table->data; 5695 static int saved_val; 5696 int val, ret; 5697 struct ctl_table tmp = { 5698 .data = &val, 5699 .maxlen = sizeof(val), 5700 .mode = table->mode, 5701 .extra1 = SYSCTL_ZERO, 5702 .extra2 = SYSCTL_ONE, 5703 }; 5704 5705 if (write && !capable(CAP_SYS_ADMIN)) 5706 return -EPERM; 5707 5708 mutex_lock(&bpf_stats_enabled_mutex); 5709 val = saved_val; 5710 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5711 if (write && !ret && val != saved_val) { 5712 if (val) 5713 static_key_slow_inc(key); 5714 else 5715 static_key_slow_dec(key); 5716 saved_val = val; 5717 } 5718 mutex_unlock(&bpf_stats_enabled_mutex); 5719 return ret; 5720 } 5721 5722 void __weak unpriv_ebpf_notify(int new_state) 5723 { 5724 } 5725 5726 static int bpf_unpriv_handler(struct ctl_table *table, int write, 5727 void *buffer, size_t *lenp, loff_t *ppos) 5728 { 5729 int ret, unpriv_enable = *(int *)table->data; 5730 bool locked_state = unpriv_enable == 1; 5731 struct ctl_table tmp = *table; 5732 5733 if (write && !capable(CAP_SYS_ADMIN)) 5734 return -EPERM; 5735 5736 tmp.data = &unpriv_enable; 5737 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 5738 if (write && !ret) { 5739 if (locked_state && unpriv_enable != 1) 5740 return -EPERM; 5741 *(int *)table->data = unpriv_enable; 5742 } 5743 5744 if (write) 5745 unpriv_ebpf_notify(unpriv_enable); 5746 5747 return ret; 5748 } 5749 5750 static struct ctl_table bpf_syscall_table[] = { 5751 { 5752 .procname = "unprivileged_bpf_disabled", 5753 .data = &sysctl_unprivileged_bpf_disabled, 5754 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 5755 .mode = 0644, 5756 .proc_handler = bpf_unpriv_handler, 5757 .extra1 = SYSCTL_ZERO, 5758 .extra2 = SYSCTL_TWO, 5759 }, 5760 { 5761 .procname = "bpf_stats_enabled", 5762 .data = &bpf_stats_enabled_key.key, 5763 .mode = 0644, 5764 .proc_handler = bpf_stats_handler, 5765 }, 5766 { } 5767 }; 5768 5769 static int __init bpf_syscall_sysctl_init(void) 5770 { 5771 register_sysctl_init("kernel", bpf_syscall_table); 5772 return 0; 5773 } 5774 late_initcall(bpf_syscall_sysctl_init); 5775 #endif /* CONFIG_SYSCTL */ 5776