1 /* Copyright (c) 2016 Facebook 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of version 2 of the GNU General Public 5 * License as published by the Free Software Foundation. 6 */ 7 #include <linux/bpf.h> 8 #include <linux/jhash.h> 9 #include <linux/filter.h> 10 #include <linux/stacktrace.h> 11 #include <linux/perf_event.h> 12 #include <linux/elf.h> 13 #include <linux/pagemap.h> 14 #include <linux/irq_work.h> 15 #include "percpu_freelist.h" 16 17 #define STACK_CREATE_FLAG_MASK \ 18 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \ 19 BPF_F_STACK_BUILD_ID) 20 21 struct stack_map_bucket { 22 struct pcpu_freelist_node fnode; 23 u32 hash; 24 u32 nr; 25 u64 data[]; 26 }; 27 28 struct bpf_stack_map { 29 struct bpf_map map; 30 void *elems; 31 struct pcpu_freelist freelist; 32 u32 n_buckets; 33 struct stack_map_bucket *buckets[]; 34 }; 35 36 /* irq_work to run up_read() for build_id lookup in nmi context */ 37 struct stack_map_irq_work { 38 struct irq_work irq_work; 39 struct rw_semaphore *sem; 40 }; 41 42 static void do_up_read(struct irq_work *entry) 43 { 44 struct stack_map_irq_work *work; 45 46 work = container_of(entry, struct stack_map_irq_work, irq_work); 47 up_read(work->sem); 48 work->sem = NULL; 49 } 50 51 static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work); 52 53 static inline bool stack_map_use_build_id(struct bpf_map *map) 54 { 55 return (map->map_flags & BPF_F_STACK_BUILD_ID); 56 } 57 58 static inline int stack_map_data_size(struct bpf_map *map) 59 { 60 return stack_map_use_build_id(map) ? 61 sizeof(struct bpf_stack_build_id) : sizeof(u64); 62 } 63 64 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) 65 { 66 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; 67 int err; 68 69 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, 70 smap->map.numa_node); 71 if (!smap->elems) 72 return -ENOMEM; 73 74 err = pcpu_freelist_init(&smap->freelist); 75 if (err) 76 goto free_elems; 77 78 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, 79 smap->map.max_entries); 80 return 0; 81 82 free_elems: 83 bpf_map_area_free(smap->elems); 84 return err; 85 } 86 87 /* Called from syscall */ 88 static struct bpf_map *stack_map_alloc(union bpf_attr *attr) 89 { 90 u32 value_size = attr->value_size; 91 struct bpf_stack_map *smap; 92 u64 cost, n_buckets; 93 int err; 94 95 if (!capable(CAP_SYS_ADMIN)) 96 return ERR_PTR(-EPERM); 97 98 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) 99 return ERR_PTR(-EINVAL); 100 101 /* check sanity of attributes */ 102 if (attr->max_entries == 0 || attr->key_size != 4 || 103 value_size < 8 || value_size % 8) 104 return ERR_PTR(-EINVAL); 105 106 BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64)); 107 if (attr->map_flags & BPF_F_STACK_BUILD_ID) { 108 if (value_size % sizeof(struct bpf_stack_build_id) || 109 value_size / sizeof(struct bpf_stack_build_id) 110 > sysctl_perf_event_max_stack) 111 return ERR_PTR(-EINVAL); 112 } else if (value_size / 8 > sysctl_perf_event_max_stack) 113 return ERR_PTR(-EINVAL); 114 115 /* hash table size must be power of 2 */ 116 n_buckets = roundup_pow_of_two(attr->max_entries); 117 118 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); 119 if (cost >= U32_MAX - PAGE_SIZE) 120 return ERR_PTR(-E2BIG); 121 122 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); 123 if (!smap) 124 return ERR_PTR(-ENOMEM); 125 126 err = -E2BIG; 127 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); 128 if (cost >= U32_MAX - PAGE_SIZE) 129 goto free_smap; 130 131 bpf_map_init_from_attr(&smap->map, attr); 132 smap->map.value_size = value_size; 133 smap->n_buckets = n_buckets; 134 smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 135 136 err = bpf_map_precharge_memlock(smap->map.pages); 137 if (err) 138 goto free_smap; 139 140 err = get_callchain_buffers(sysctl_perf_event_max_stack); 141 if (err) 142 goto free_smap; 143 144 err = prealloc_elems_and_freelist(smap); 145 if (err) 146 goto put_buffers; 147 148 return &smap->map; 149 150 put_buffers: 151 put_callchain_buffers(); 152 free_smap: 153 bpf_map_area_free(smap); 154 return ERR_PTR(err); 155 } 156 157 #define BPF_BUILD_ID 3 158 /* 159 * Parse build id from the note segment. This logic can be shared between 160 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are 161 * identical. 162 */ 163 static inline int stack_map_parse_build_id(void *page_addr, 164 unsigned char *build_id, 165 void *note_start, 166 Elf32_Word note_size) 167 { 168 Elf32_Word note_offs = 0, new_offs; 169 170 /* check for overflow */ 171 if (note_start < page_addr || note_start + note_size < note_start) 172 return -EINVAL; 173 174 /* only supports note that fits in the first page */ 175 if (note_start + note_size > page_addr + PAGE_SIZE) 176 return -EINVAL; 177 178 while (note_offs + sizeof(Elf32_Nhdr) < note_size) { 179 Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs); 180 181 if (nhdr->n_type == BPF_BUILD_ID && 182 nhdr->n_namesz == sizeof("GNU") && 183 nhdr->n_descsz > 0 && 184 nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { 185 memcpy(build_id, 186 note_start + note_offs + 187 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), 188 nhdr->n_descsz); 189 memset(build_id + nhdr->n_descsz, 0, 190 BPF_BUILD_ID_SIZE - nhdr->n_descsz); 191 return 0; 192 } 193 new_offs = note_offs + sizeof(Elf32_Nhdr) + 194 ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4); 195 if (new_offs <= note_offs) /* overflow */ 196 break; 197 note_offs = new_offs; 198 } 199 return -EINVAL; 200 } 201 202 /* Parse build ID from 32-bit ELF */ 203 static int stack_map_get_build_id_32(void *page_addr, 204 unsigned char *build_id) 205 { 206 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr; 207 Elf32_Phdr *phdr; 208 int i; 209 210 /* only supports phdr that fits in one page */ 211 if (ehdr->e_phnum > 212 (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) 213 return -EINVAL; 214 215 phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); 216 217 for (i = 0; i < ehdr->e_phnum; ++i) 218 if (phdr[i].p_type == PT_NOTE) 219 return stack_map_parse_build_id(page_addr, build_id, 220 page_addr + phdr[i].p_offset, 221 phdr[i].p_filesz); 222 return -EINVAL; 223 } 224 225 /* Parse build ID from 64-bit ELF */ 226 static int stack_map_get_build_id_64(void *page_addr, 227 unsigned char *build_id) 228 { 229 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr; 230 Elf64_Phdr *phdr; 231 int i; 232 233 /* only supports phdr that fits in one page */ 234 if (ehdr->e_phnum > 235 (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) 236 return -EINVAL; 237 238 phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); 239 240 for (i = 0; i < ehdr->e_phnum; ++i) 241 if (phdr[i].p_type == PT_NOTE) 242 return stack_map_parse_build_id(page_addr, build_id, 243 page_addr + phdr[i].p_offset, 244 phdr[i].p_filesz); 245 return -EINVAL; 246 } 247 248 /* Parse build ID of ELF file mapped to vma */ 249 static int stack_map_get_build_id(struct vm_area_struct *vma, 250 unsigned char *build_id) 251 { 252 Elf32_Ehdr *ehdr; 253 struct page *page; 254 void *page_addr; 255 int ret; 256 257 /* only works for page backed storage */ 258 if (!vma->vm_file) 259 return -EINVAL; 260 261 page = find_get_page(vma->vm_file->f_mapping, 0); 262 if (!page) 263 return -EFAULT; /* page not mapped */ 264 265 ret = -EINVAL; 266 page_addr = kmap_atomic(page); 267 ehdr = (Elf32_Ehdr *)page_addr; 268 269 /* compare magic x7f "ELF" */ 270 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) 271 goto out; 272 273 /* only support executable file and shared object file */ 274 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) 275 goto out; 276 277 if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) 278 ret = stack_map_get_build_id_32(page_addr, build_id); 279 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) 280 ret = stack_map_get_build_id_64(page_addr, build_id); 281 out: 282 kunmap_atomic(page_addr); 283 put_page(page); 284 return ret; 285 } 286 287 static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, 288 u64 *ips, u32 trace_nr, bool user) 289 { 290 int i; 291 struct vm_area_struct *vma; 292 bool irq_work_busy = false; 293 struct stack_map_irq_work *work = NULL; 294 295 if (in_nmi()) { 296 work = this_cpu_ptr(&up_read_work); 297 if (work->irq_work.flags & IRQ_WORK_BUSY) 298 /* cannot queue more up_read, fallback */ 299 irq_work_busy = true; 300 } 301 302 /* 303 * We cannot do up_read() in nmi context. To do build_id lookup 304 * in nmi context, we need to run up_read() in irq_work. We use 305 * a percpu variable to do the irq_work. If the irq_work is 306 * already used by another lookup, we fall back to report ips. 307 * 308 * Same fallback is used for kernel stack (!user) on a stackmap 309 * with build_id. 310 */ 311 if (!user || !current || !current->mm || irq_work_busy || 312 down_read_trylock(¤t->mm->mmap_sem) == 0) { 313 /* cannot access current->mm, fall back to ips */ 314 for (i = 0; i < trace_nr; i++) { 315 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 316 id_offs[i].ip = ips[i]; 317 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); 318 } 319 return; 320 } 321 322 for (i = 0; i < trace_nr; i++) { 323 vma = find_vma(current->mm, ips[i]); 324 if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) { 325 /* per entry fall back to ips */ 326 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 327 id_offs[i].ip = ips[i]; 328 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); 329 continue; 330 } 331 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] 332 - vma->vm_start; 333 id_offs[i].status = BPF_STACK_BUILD_ID_VALID; 334 } 335 336 if (!work) { 337 up_read(¤t->mm->mmap_sem); 338 } else { 339 work->sem = ¤t->mm->mmap_sem; 340 irq_work_queue(&work->irq_work); 341 } 342 } 343 344 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, 345 u64, flags) 346 { 347 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 348 struct perf_callchain_entry *trace; 349 struct stack_map_bucket *bucket, *new_bucket, *old_bucket; 350 u32 max_depth = map->value_size / stack_map_data_size(map); 351 /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ 352 u32 init_nr = sysctl_perf_event_max_stack - max_depth; 353 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; 354 u32 hash, id, trace_nr, trace_len; 355 bool user = flags & BPF_F_USER_STACK; 356 bool kernel = !user; 357 u64 *ips; 358 bool hash_matches; 359 360 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | 361 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) 362 return -EINVAL; 363 364 trace = get_perf_callchain(regs, init_nr, kernel, user, 365 sysctl_perf_event_max_stack, false, false); 366 367 if (unlikely(!trace)) 368 /* couldn't fetch the stack trace */ 369 return -EFAULT; 370 371 /* get_perf_callchain() guarantees that trace->nr >= init_nr 372 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth 373 */ 374 trace_nr = trace->nr - init_nr; 375 376 if (trace_nr <= skip) 377 /* skipping more than usable stack trace */ 378 return -EFAULT; 379 380 trace_nr -= skip; 381 trace_len = trace_nr * sizeof(u64); 382 ips = trace->ip + skip + init_nr; 383 hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); 384 id = hash & (smap->n_buckets - 1); 385 bucket = READ_ONCE(smap->buckets[id]); 386 387 hash_matches = bucket && bucket->hash == hash; 388 /* fast cmp */ 389 if (hash_matches && flags & BPF_F_FAST_STACK_CMP) 390 return id; 391 392 if (stack_map_use_build_id(map)) { 393 /* for build_id+offset, pop a bucket before slow cmp */ 394 new_bucket = (struct stack_map_bucket *) 395 pcpu_freelist_pop(&smap->freelist); 396 if (unlikely(!new_bucket)) 397 return -ENOMEM; 398 new_bucket->nr = trace_nr; 399 stack_map_get_build_id_offset( 400 (struct bpf_stack_build_id *)new_bucket->data, 401 ips, trace_nr, user); 402 trace_len = trace_nr * sizeof(struct bpf_stack_build_id); 403 if (hash_matches && bucket->nr == trace_nr && 404 memcmp(bucket->data, new_bucket->data, trace_len) == 0) { 405 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); 406 return id; 407 } 408 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { 409 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); 410 return -EEXIST; 411 } 412 } else { 413 if (hash_matches && bucket->nr == trace_nr && 414 memcmp(bucket->data, ips, trace_len) == 0) 415 return id; 416 if (bucket && !(flags & BPF_F_REUSE_STACKID)) 417 return -EEXIST; 418 419 new_bucket = (struct stack_map_bucket *) 420 pcpu_freelist_pop(&smap->freelist); 421 if (unlikely(!new_bucket)) 422 return -ENOMEM; 423 memcpy(new_bucket->data, ips, trace_len); 424 } 425 426 new_bucket->hash = hash; 427 new_bucket->nr = trace_nr; 428 429 old_bucket = xchg(&smap->buckets[id], new_bucket); 430 if (old_bucket) 431 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 432 return id; 433 } 434 435 const struct bpf_func_proto bpf_get_stackid_proto = { 436 .func = bpf_get_stackid, 437 .gpl_only = true, 438 .ret_type = RET_INTEGER, 439 .arg1_type = ARG_PTR_TO_CTX, 440 .arg2_type = ARG_CONST_MAP_PTR, 441 .arg3_type = ARG_ANYTHING, 442 }; 443 444 BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, 445 u64, flags) 446 { 447 u32 init_nr, trace_nr, copy_len, elem_size, num_elem; 448 bool user_build_id = flags & BPF_F_USER_BUILD_ID; 449 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; 450 bool user = flags & BPF_F_USER_STACK; 451 struct perf_callchain_entry *trace; 452 bool kernel = !user; 453 int err = -EINVAL; 454 u64 *ips; 455 456 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | 457 BPF_F_USER_BUILD_ID))) 458 goto clear; 459 if (kernel && user_build_id) 460 goto clear; 461 462 elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id) 463 : sizeof(u64); 464 if (unlikely(size % elem_size)) 465 goto clear; 466 467 num_elem = size / elem_size; 468 if (sysctl_perf_event_max_stack < num_elem) 469 init_nr = 0; 470 else 471 init_nr = sysctl_perf_event_max_stack - num_elem; 472 trace = get_perf_callchain(regs, init_nr, kernel, user, 473 sysctl_perf_event_max_stack, false, false); 474 if (unlikely(!trace)) 475 goto err_fault; 476 477 trace_nr = trace->nr - init_nr; 478 if (trace_nr < skip) 479 goto err_fault; 480 481 trace_nr -= skip; 482 trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; 483 copy_len = trace_nr * elem_size; 484 ips = trace->ip + skip + init_nr; 485 if (user && user_build_id) 486 stack_map_get_build_id_offset(buf, ips, trace_nr, user); 487 else 488 memcpy(buf, ips, copy_len); 489 490 if (size > copy_len) 491 memset(buf + copy_len, 0, size - copy_len); 492 return copy_len; 493 494 err_fault: 495 err = -EFAULT; 496 clear: 497 memset(buf, 0, size); 498 return err; 499 } 500 501 const struct bpf_func_proto bpf_get_stack_proto = { 502 .func = bpf_get_stack, 503 .gpl_only = true, 504 .ret_type = RET_INTEGER, 505 .arg1_type = ARG_PTR_TO_CTX, 506 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 507 .arg3_type = ARG_CONST_SIZE_OR_ZERO, 508 .arg4_type = ARG_ANYTHING, 509 }; 510 511 /* Called from eBPF program */ 512 static void *stack_map_lookup_elem(struct bpf_map *map, void *key) 513 { 514 return ERR_PTR(-EOPNOTSUPP); 515 } 516 517 /* Called from syscall */ 518 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 519 { 520 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 521 struct stack_map_bucket *bucket, *old_bucket; 522 u32 id = *(u32 *)key, trace_len; 523 524 if (unlikely(id >= smap->n_buckets)) 525 return -ENOENT; 526 527 bucket = xchg(&smap->buckets[id], NULL); 528 if (!bucket) 529 return -ENOENT; 530 531 trace_len = bucket->nr * stack_map_data_size(map); 532 memcpy(value, bucket->data, trace_len); 533 memset(value + trace_len, 0, map->value_size - trace_len); 534 535 old_bucket = xchg(&smap->buckets[id], bucket); 536 if (old_bucket) 537 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 538 return 0; 539 } 540 541 static int stack_map_get_next_key(struct bpf_map *map, void *key, 542 void *next_key) 543 { 544 struct bpf_stack_map *smap = container_of(map, 545 struct bpf_stack_map, map); 546 u32 id; 547 548 WARN_ON_ONCE(!rcu_read_lock_held()); 549 550 if (!key) { 551 id = 0; 552 } else { 553 id = *(u32 *)key; 554 if (id >= smap->n_buckets || !smap->buckets[id]) 555 id = 0; 556 else 557 id++; 558 } 559 560 while (id < smap->n_buckets && !smap->buckets[id]) 561 id++; 562 563 if (id >= smap->n_buckets) 564 return -ENOENT; 565 566 *(u32 *)next_key = id; 567 return 0; 568 } 569 570 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value, 571 u64 map_flags) 572 { 573 return -EINVAL; 574 } 575 576 /* Called from syscall or from eBPF program */ 577 static int stack_map_delete_elem(struct bpf_map *map, void *key) 578 { 579 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 580 struct stack_map_bucket *old_bucket; 581 u32 id = *(u32 *)key; 582 583 if (unlikely(id >= smap->n_buckets)) 584 return -E2BIG; 585 586 old_bucket = xchg(&smap->buckets[id], NULL); 587 if (old_bucket) { 588 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 589 return 0; 590 } else { 591 return -ENOENT; 592 } 593 } 594 595 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 596 static void stack_map_free(struct bpf_map *map) 597 { 598 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 599 600 /* wait for bpf programs to complete before freeing stack map */ 601 synchronize_rcu(); 602 603 bpf_map_area_free(smap->elems); 604 pcpu_freelist_destroy(&smap->freelist); 605 bpf_map_area_free(smap); 606 put_callchain_buffers(); 607 } 608 609 const struct bpf_map_ops stack_trace_map_ops = { 610 .map_alloc = stack_map_alloc, 611 .map_free = stack_map_free, 612 .map_get_next_key = stack_map_get_next_key, 613 .map_lookup_elem = stack_map_lookup_elem, 614 .map_update_elem = stack_map_update_elem, 615 .map_delete_elem = stack_map_delete_elem, 616 .map_check_btf = map_check_no_btf, 617 }; 618 619 static int __init stack_map_init(void) 620 { 621 int cpu; 622 struct stack_map_irq_work *work; 623 624 for_each_possible_cpu(cpu) { 625 work = per_cpu_ptr(&up_read_work, cpu); 626 init_irq_work(&work->irq_work, do_up_read); 627 } 628 return 0; 629 } 630 subsys_initcall(stack_map_init); 631