1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2016 Facebook 3 */ 4 #include <linux/bpf.h> 5 #include <linux/jhash.h> 6 #include <linux/filter.h> 7 #include <linux/stacktrace.h> 8 #include <linux/perf_event.h> 9 #include <linux/elf.h> 10 #include <linux/pagemap.h> 11 #include <linux/irq_work.h> 12 #include "percpu_freelist.h" 13 14 #define STACK_CREATE_FLAG_MASK \ 15 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \ 16 BPF_F_STACK_BUILD_ID) 17 18 struct stack_map_bucket { 19 struct pcpu_freelist_node fnode; 20 u32 hash; 21 u32 nr; 22 u64 data[]; 23 }; 24 25 struct bpf_stack_map { 26 struct bpf_map map; 27 void *elems; 28 struct pcpu_freelist freelist; 29 u32 n_buckets; 30 struct stack_map_bucket *buckets[]; 31 }; 32 33 /* irq_work to run up_read() for build_id lookup in nmi context */ 34 struct stack_map_irq_work { 35 struct irq_work irq_work; 36 struct rw_semaphore *sem; 37 }; 38 39 static void do_up_read(struct irq_work *entry) 40 { 41 struct stack_map_irq_work *work; 42 43 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT))) 44 return; 45 46 work = container_of(entry, struct stack_map_irq_work, irq_work); 47 up_read_non_owner(work->sem); 48 work->sem = NULL; 49 } 50 51 static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work); 52 53 static inline bool stack_map_use_build_id(struct bpf_map *map) 54 { 55 return (map->map_flags & BPF_F_STACK_BUILD_ID); 56 } 57 58 static inline int stack_map_data_size(struct bpf_map *map) 59 { 60 return stack_map_use_build_id(map) ? 61 sizeof(struct bpf_stack_build_id) : sizeof(u64); 62 } 63 64 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) 65 { 66 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; 67 int err; 68 69 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, 70 smap->map.numa_node); 71 if (!smap->elems) 72 return -ENOMEM; 73 74 err = pcpu_freelist_init(&smap->freelist); 75 if (err) 76 goto free_elems; 77 78 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, 79 smap->map.max_entries); 80 return 0; 81 82 free_elems: 83 bpf_map_area_free(smap->elems); 84 return err; 85 } 86 87 /* Called from syscall */ 88 static struct bpf_map *stack_map_alloc(union bpf_attr *attr) 89 { 90 u32 value_size = attr->value_size; 91 struct bpf_stack_map *smap; 92 struct bpf_map_memory mem; 93 u64 cost, n_buckets; 94 int err; 95 96 if (!capable(CAP_SYS_ADMIN)) 97 return ERR_PTR(-EPERM); 98 99 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) 100 return ERR_PTR(-EINVAL); 101 102 /* check sanity of attributes */ 103 if (attr->max_entries == 0 || attr->key_size != 4 || 104 value_size < 8 || value_size % 8) 105 return ERR_PTR(-EINVAL); 106 107 BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64)); 108 if (attr->map_flags & BPF_F_STACK_BUILD_ID) { 109 if (value_size % sizeof(struct bpf_stack_build_id) || 110 value_size / sizeof(struct bpf_stack_build_id) 111 > sysctl_perf_event_max_stack) 112 return ERR_PTR(-EINVAL); 113 } else if (value_size / 8 > sysctl_perf_event_max_stack) 114 return ERR_PTR(-EINVAL); 115 116 /* hash table size must be power of 2 */ 117 n_buckets = roundup_pow_of_two(attr->max_entries); 118 119 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); 120 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); 121 err = bpf_map_charge_init(&mem, cost); 122 if (err) 123 return ERR_PTR(err); 124 125 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); 126 if (!smap) { 127 bpf_map_charge_finish(&mem); 128 return ERR_PTR(-ENOMEM); 129 } 130 131 bpf_map_init_from_attr(&smap->map, attr); 132 smap->map.value_size = value_size; 133 smap->n_buckets = n_buckets; 134 135 err = get_callchain_buffers(sysctl_perf_event_max_stack); 136 if (err) 137 goto free_charge; 138 139 err = prealloc_elems_and_freelist(smap); 140 if (err) 141 goto put_buffers; 142 143 bpf_map_charge_move(&smap->map.memory, &mem); 144 145 return &smap->map; 146 147 put_buffers: 148 put_callchain_buffers(); 149 free_charge: 150 bpf_map_charge_finish(&mem); 151 bpf_map_area_free(smap); 152 return ERR_PTR(err); 153 } 154 155 #define BPF_BUILD_ID 3 156 /* 157 * Parse build id from the note segment. This logic can be shared between 158 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are 159 * identical. 160 */ 161 static inline int stack_map_parse_build_id(void *page_addr, 162 unsigned char *build_id, 163 void *note_start, 164 Elf32_Word note_size) 165 { 166 Elf32_Word note_offs = 0, new_offs; 167 168 /* check for overflow */ 169 if (note_start < page_addr || note_start + note_size < note_start) 170 return -EINVAL; 171 172 /* only supports note that fits in the first page */ 173 if (note_start + note_size > page_addr + PAGE_SIZE) 174 return -EINVAL; 175 176 while (note_offs + sizeof(Elf32_Nhdr) < note_size) { 177 Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs); 178 179 if (nhdr->n_type == BPF_BUILD_ID && 180 nhdr->n_namesz == sizeof("GNU") && 181 nhdr->n_descsz > 0 && 182 nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { 183 memcpy(build_id, 184 note_start + note_offs + 185 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), 186 nhdr->n_descsz); 187 memset(build_id + nhdr->n_descsz, 0, 188 BPF_BUILD_ID_SIZE - nhdr->n_descsz); 189 return 0; 190 } 191 new_offs = note_offs + sizeof(Elf32_Nhdr) + 192 ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4); 193 if (new_offs <= note_offs) /* overflow */ 194 break; 195 note_offs = new_offs; 196 } 197 return -EINVAL; 198 } 199 200 /* Parse build ID from 32-bit ELF */ 201 static int stack_map_get_build_id_32(void *page_addr, 202 unsigned char *build_id) 203 { 204 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr; 205 Elf32_Phdr *phdr; 206 int i; 207 208 /* only supports phdr that fits in one page */ 209 if (ehdr->e_phnum > 210 (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) 211 return -EINVAL; 212 213 phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); 214 215 for (i = 0; i < ehdr->e_phnum; ++i) 216 if (phdr[i].p_type == PT_NOTE) 217 return stack_map_parse_build_id(page_addr, build_id, 218 page_addr + phdr[i].p_offset, 219 phdr[i].p_filesz); 220 return -EINVAL; 221 } 222 223 /* Parse build ID from 64-bit ELF */ 224 static int stack_map_get_build_id_64(void *page_addr, 225 unsigned char *build_id) 226 { 227 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr; 228 Elf64_Phdr *phdr; 229 int i; 230 231 /* only supports phdr that fits in one page */ 232 if (ehdr->e_phnum > 233 (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) 234 return -EINVAL; 235 236 phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); 237 238 for (i = 0; i < ehdr->e_phnum; ++i) 239 if (phdr[i].p_type == PT_NOTE) 240 return stack_map_parse_build_id(page_addr, build_id, 241 page_addr + phdr[i].p_offset, 242 phdr[i].p_filesz); 243 return -EINVAL; 244 } 245 246 /* Parse build ID of ELF file mapped to vma */ 247 static int stack_map_get_build_id(struct vm_area_struct *vma, 248 unsigned char *build_id) 249 { 250 Elf32_Ehdr *ehdr; 251 struct page *page; 252 void *page_addr; 253 int ret; 254 255 /* only works for page backed storage */ 256 if (!vma->vm_file) 257 return -EINVAL; 258 259 page = find_get_page(vma->vm_file->f_mapping, 0); 260 if (!page) 261 return -EFAULT; /* page not mapped */ 262 263 ret = -EINVAL; 264 page_addr = kmap_atomic(page); 265 ehdr = (Elf32_Ehdr *)page_addr; 266 267 /* compare magic x7f "ELF" */ 268 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) 269 goto out; 270 271 /* only support executable file and shared object file */ 272 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) 273 goto out; 274 275 if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) 276 ret = stack_map_get_build_id_32(page_addr, build_id); 277 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) 278 ret = stack_map_get_build_id_64(page_addr, build_id); 279 out: 280 kunmap_atomic(page_addr); 281 put_page(page); 282 return ret; 283 } 284 285 static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, 286 u64 *ips, u32 trace_nr, bool user) 287 { 288 int i; 289 struct vm_area_struct *vma; 290 bool irq_work_busy = false; 291 struct stack_map_irq_work *work = NULL; 292 293 if (irqs_disabled()) { 294 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 295 work = this_cpu_ptr(&up_read_work); 296 if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) { 297 /* cannot queue more up_read, fallback */ 298 irq_work_busy = true; 299 } 300 } else { 301 /* 302 * PREEMPT_RT does not allow to trylock mmap sem in 303 * interrupt disabled context. Force the fallback code. 304 */ 305 irq_work_busy = true; 306 } 307 } 308 309 /* 310 * We cannot do up_read() when the irq is disabled, because of 311 * risk to deadlock with rq_lock. To do build_id lookup when the 312 * irqs are disabled, we need to run up_read() in irq_work. We use 313 * a percpu variable to do the irq_work. If the irq_work is 314 * already used by another lookup, we fall back to report ips. 315 * 316 * Same fallback is used for kernel stack (!user) on a stackmap 317 * with build_id. 318 */ 319 if (!user || !current || !current->mm || irq_work_busy || 320 down_read_trylock(¤t->mm->mmap_sem) == 0) { 321 /* cannot access current->mm, fall back to ips */ 322 for (i = 0; i < trace_nr; i++) { 323 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 324 id_offs[i].ip = ips[i]; 325 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); 326 } 327 return; 328 } 329 330 for (i = 0; i < trace_nr; i++) { 331 vma = find_vma(current->mm, ips[i]); 332 if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) { 333 /* per entry fall back to ips */ 334 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 335 id_offs[i].ip = ips[i]; 336 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); 337 continue; 338 } 339 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] 340 - vma->vm_start; 341 id_offs[i].status = BPF_STACK_BUILD_ID_VALID; 342 } 343 344 if (!work) { 345 up_read(¤t->mm->mmap_sem); 346 } else { 347 work->sem = ¤t->mm->mmap_sem; 348 irq_work_queue(&work->irq_work); 349 /* 350 * The irq_work will release the mmap_sem with 351 * up_read_non_owner(). The rwsem_release() is called 352 * here to release the lock from lockdep's perspective. 353 */ 354 rwsem_release(¤t->mm->mmap_sem.dep_map, _RET_IP_); 355 } 356 } 357 358 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, 359 u64, flags) 360 { 361 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 362 struct perf_callchain_entry *trace; 363 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; 364 u32 max_depth = map->value_size / stack_map_data_size(map); 365 /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ 366 u32 init_nr = sysctl_perf_event_max_stack - max_depth; 367 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; 368 u32 hash, id, trace_nr, trace_len; 369 bool user = flags & BPF_F_USER_STACK; 370 bool kernel = !user; 371 u64 *ips; 372 bool hash_matches; 373 374 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | 375 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) 376 return -EINVAL; 377 378 trace = get_perf_callchain(regs, init_nr, kernel, user, 379 sysctl_perf_event_max_stack, false, false); 380 381 if (unlikely(!trace)) 382 /* couldn't fetch the stack trace */ 383 return -EFAULT; 384 385 /* get_perf_callchain() guarantees that trace->nr >= init_nr 386 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth 387 */ 388 trace_nr = trace->nr - init_nr; 389 390 if (trace_nr <= skip) 391 /* skipping more than usable stack trace */ 392 return -EFAULT; 393 394 trace_nr -= skip; 395 trace_len = trace_nr * sizeof(u64); 396 ips = trace->ip + skip + init_nr; 397 hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); 398 id = hash & (smap->n_buckets - 1); 399 bucket = READ_ONCE(smap->buckets[id]); 400 401 hash_matches = bucket && bucket->hash == hash; 402 /* fast cmp */ 403 if (hash_matches && flags & BPF_F_FAST_STACK_CMP) 404 return id; 405 406 if (stack_map_use_build_id(map)) { 407 /* for build_id+offset, pop a bucket before slow cmp */ 408 new_bucket = (struct stack_map_bucket *) 409 pcpu_freelist_pop(&smap->freelist); 410 if (unlikely(!new_bucket)) 411 return -ENOMEM; 412 new_bucket->nr = trace_nr; 413 stack_map_get_build_id_offset( 414 (struct bpf_stack_build_id *)new_bucket->data, 415 ips, trace_nr, user); 416 trace_len = trace_nr * sizeof(struct bpf_stack_build_id); 417 if (hash_matches && bucket->nr == trace_nr && 418 memcmp(bucket->data, new_bucket->data, trace_len) == 0) { 419 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); 420 return id; 421 } 422 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { 423 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); 424 return -EEXIST; 425 } 426 } else { 427 if (hash_matches && bucket->nr == trace_nr && 428 memcmp(bucket->data, ips, trace_len) == 0) 429 return id; 430 if (bucket && !(flags & BPF_F_REUSE_STACKID)) 431 return -EEXIST; 432 433 new_bucket = (struct stack_map_bucket *) 434 pcpu_freelist_pop(&smap->freelist); 435 if (unlikely(!new_bucket)) 436 return -ENOMEM; 437 memcpy(new_bucket->data, ips, trace_len); 438 } 439 440 new_bucket->hash = hash; 441 new_bucket->nr = trace_nr; 442 443 old_bucket = xchg(&smap->buckets[id], new_bucket); 444 if (old_bucket) 445 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 446 return id; 447 } 448 449 const struct bpf_func_proto bpf_get_stackid_proto = { 450 .func = bpf_get_stackid, 451 .gpl_only = true, 452 .ret_type = RET_INTEGER, 453 .arg1_type = ARG_PTR_TO_CTX, 454 .arg2_type = ARG_CONST_MAP_PTR, 455 .arg3_type = ARG_ANYTHING, 456 }; 457 458 BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, 459 u64, flags) 460 { 461 u32 init_nr, trace_nr, copy_len, elem_size, num_elem; 462 bool user_build_id = flags & BPF_F_USER_BUILD_ID; 463 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; 464 bool user = flags & BPF_F_USER_STACK; 465 struct perf_callchain_entry *trace; 466 bool kernel = !user; 467 int err = -EINVAL; 468 u64 *ips; 469 470 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | 471 BPF_F_USER_BUILD_ID))) 472 goto clear; 473 if (kernel && user_build_id) 474 goto clear; 475 476 elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id) 477 : sizeof(u64); 478 if (unlikely(size % elem_size)) 479 goto clear; 480 481 num_elem = size / elem_size; 482 if (sysctl_perf_event_max_stack < num_elem) 483 init_nr = 0; 484 else 485 init_nr = sysctl_perf_event_max_stack - num_elem; 486 trace = get_perf_callchain(regs, init_nr, kernel, user, 487 sysctl_perf_event_max_stack, false, false); 488 if (unlikely(!trace)) 489 goto err_fault; 490 491 trace_nr = trace->nr - init_nr; 492 if (trace_nr < skip) 493 goto err_fault; 494 495 trace_nr -= skip; 496 trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; 497 copy_len = trace_nr * elem_size; 498 ips = trace->ip + skip + init_nr; 499 if (user && user_build_id) 500 stack_map_get_build_id_offset(buf, ips, trace_nr, user); 501 else 502 memcpy(buf, ips, copy_len); 503 504 if (size > copy_len) 505 memset(buf + copy_len, 0, size - copy_len); 506 return copy_len; 507 508 err_fault: 509 err = -EFAULT; 510 clear: 511 memset(buf, 0, size); 512 return err; 513 } 514 515 const struct bpf_func_proto bpf_get_stack_proto = { 516 .func = bpf_get_stack, 517 .gpl_only = true, 518 .ret_type = RET_INTEGER, 519 .arg1_type = ARG_PTR_TO_CTX, 520 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 521 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 522 .arg4_type = ARG_ANYTHING, 523 }; 524 525 /* Called from eBPF program */ 526 static void *stack_map_lookup_elem(struct bpf_map *map, void *key) 527 { 528 return ERR_PTR(-EOPNOTSUPP); 529 } 530 531 /* Called from syscall */ 532 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 533 { 534 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 535 struct stack_map_bucket *bucket, *old_bucket; 536 u32 id = *(u32 *)key, trace_len; 537 538 if (unlikely(id >= smap->n_buckets)) 539 return -ENOENT; 540 541 bucket = xchg(&smap->buckets[id], NULL); 542 if (!bucket) 543 return -ENOENT; 544 545 trace_len = bucket->nr * stack_map_data_size(map); 546 memcpy(value, bucket->data, trace_len); 547 memset(value + trace_len, 0, map->value_size - trace_len); 548 549 old_bucket = xchg(&smap->buckets[id], bucket); 550 if (old_bucket) 551 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 552 return 0; 553 } 554 555 static int stack_map_get_next_key(struct bpf_map *map, void *key, 556 void *next_key) 557 { 558 struct bpf_stack_map *smap = container_of(map, 559 struct bpf_stack_map, map); 560 u32 id; 561 562 WARN_ON_ONCE(!rcu_read_lock_held()); 563 564 if (!key) { 565 id = 0; 566 } else { 567 id = *(u32 *)key; 568 if (id >= smap->n_buckets || !smap->buckets[id]) 569 id = 0; 570 else 571 id++; 572 } 573 574 while (id < smap->n_buckets && !smap->buckets[id]) 575 id++; 576 577 if (id >= smap->n_buckets) 578 return -ENOENT; 579 580 *(u32 *)next_key = id; 581 return 0; 582 } 583 584 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value, 585 u64 map_flags) 586 { 587 return -EINVAL; 588 } 589 590 /* Called from syscall or from eBPF program */ 591 static int stack_map_delete_elem(struct bpf_map *map, void *key) 592 { 593 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 594 struct stack_map_bucket *old_bucket; 595 u32 id = *(u32 *)key; 596 597 if (unlikely(id >= smap->n_buckets)) 598 return -E2BIG; 599 600 old_bucket = xchg(&smap->buckets[id], NULL); 601 if (old_bucket) { 602 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 603 return 0; 604 } else { 605 return -ENOENT; 606 } 607 } 608 609 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 610 static void stack_map_free(struct bpf_map *map) 611 { 612 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 613 614 /* wait for bpf programs to complete before freeing stack map */ 615 synchronize_rcu(); 616 617 bpf_map_area_free(smap->elems); 618 pcpu_freelist_destroy(&smap->freelist); 619 bpf_map_area_free(smap); 620 put_callchain_buffers(); 621 } 622 623 const struct bpf_map_ops stack_trace_map_ops = { 624 .map_alloc = stack_map_alloc, 625 .map_free = stack_map_free, 626 .map_get_next_key = stack_map_get_next_key, 627 .map_lookup_elem = stack_map_lookup_elem, 628 .map_update_elem = stack_map_update_elem, 629 .map_delete_elem = stack_map_delete_elem, 630 .map_check_btf = map_check_no_btf, 631 }; 632 633 static int __init stack_map_init(void) 634 { 635 int cpu; 636 struct stack_map_irq_work *work; 637 638 for_each_possible_cpu(cpu) { 639 work = per_cpu_ptr(&up_read_work, cpu); 640 init_irq_work(&work->irq_work, do_up_read); 641 } 642 return 0; 643 } 644 subsys_initcall(stack_map_init); 645