1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/proc/kcore.c kernel ELF core dumper 4 * 5 * Modelled on fs/exec.c:aout_core_dump() 6 * Jeremy Fitzhardinge <jeremy@sw.oz.au> 7 * ELF version written by David Howells <David.Howells@nexor.co.uk> 8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com> 9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com> 10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com> 11 */ 12 13 #include <linux/crash_core.h> 14 #include <linux/mm.h> 15 #include <linux/proc_fs.h> 16 #include <linux/kcore.h> 17 #include <linux/user.h> 18 #include <linux/capability.h> 19 #include <linux/elf.h> 20 #include <linux/elfcore.h> 21 #include <linux/notifier.h> 22 #include <linux/vmalloc.h> 23 #include <linux/highmem.h> 24 #include <linux/printk.h> 25 #include <linux/memblock.h> 26 #include <linux/init.h> 27 #include <linux/slab.h> 28 #include <linux/uaccess.h> 29 #include <asm/io.h> 30 #include <linux/list.h> 31 #include <linux/ioport.h> 32 #include <linux/memory.h> 33 #include <linux/sched/task.h> 34 #include <linux/security.h> 35 #include <asm/sections.h> 36 #include "internal.h" 37 38 #define CORE_STR "CORE" 39 40 #ifndef ELF_CORE_EFLAGS 41 #define ELF_CORE_EFLAGS 0 42 #endif 43 44 static struct proc_dir_entry *proc_root_kcore; 45 46 47 #ifndef kc_vaddr_to_offset 48 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET) 49 #endif 50 #ifndef kc_offset_to_vaddr 51 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET) 52 #endif 53 54 static LIST_HEAD(kclist_head); 55 static DECLARE_RWSEM(kclist_lock); 56 static int kcore_need_update = 1; 57 58 /* 59 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error 60 * Same as oldmem_pfn_is_ram in vmcore 61 */ 62 static int (*mem_pfn_is_ram)(unsigned long pfn); 63 64 int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)) 65 { 66 if (mem_pfn_is_ram) 67 return -EBUSY; 68 mem_pfn_is_ram = fn; 69 return 0; 70 } 71 72 static int pfn_is_ram(unsigned long pfn) 73 { 74 if (mem_pfn_is_ram) 75 return mem_pfn_is_ram(pfn); 76 else 77 return 1; 78 } 79 80 /* This doesn't grab kclist_lock, so it should only be used at init time. */ 81 void __init kclist_add(struct kcore_list *new, void *addr, size_t size, 82 int type) 83 { 84 new->addr = (unsigned long)addr; 85 new->size = size; 86 new->type = type; 87 88 list_add_tail(&new->list, &kclist_head); 89 } 90 91 static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len, 92 size_t *data_offset) 93 { 94 size_t try, size; 95 struct kcore_list *m; 96 97 *nphdr = 1; /* PT_NOTE */ 98 size = 0; 99 100 list_for_each_entry(m, &kclist_head, list) { 101 try = kc_vaddr_to_offset((size_t)m->addr + m->size); 102 if (try > size) 103 size = try; 104 *nphdr = *nphdr + 1; 105 } 106 107 *phdrs_len = *nphdr * sizeof(struct elf_phdr); 108 *notes_len = (4 * sizeof(struct elf_note) + 109 3 * ALIGN(sizeof(CORE_STR), 4) + 110 VMCOREINFO_NOTE_NAME_BYTES + 111 ALIGN(sizeof(struct elf_prstatus), 4) + 112 ALIGN(sizeof(struct elf_prpsinfo), 4) + 113 ALIGN(arch_task_struct_size, 4) + 114 ALIGN(vmcoreinfo_size, 4)); 115 *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len + 116 *notes_len); 117 return *data_offset + size; 118 } 119 120 #ifdef CONFIG_HIGHMEM 121 /* 122 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory 123 * because memory hole is not as big as !HIGHMEM case. 124 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.) 125 */ 126 static int kcore_ram_list(struct list_head *head) 127 { 128 struct kcore_list *ent; 129 130 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 131 if (!ent) 132 return -ENOMEM; 133 ent->addr = (unsigned long)__va(0); 134 ent->size = max_low_pfn << PAGE_SHIFT; 135 ent->type = KCORE_RAM; 136 list_add(&ent->list, head); 137 return 0; 138 } 139 140 #else /* !CONFIG_HIGHMEM */ 141 142 #ifdef CONFIG_SPARSEMEM_VMEMMAP 143 /* calculate vmemmap's address from given system ram pfn and register it */ 144 static int 145 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) 146 { 147 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT; 148 unsigned long nr_pages = ent->size >> PAGE_SHIFT; 149 unsigned long start, end; 150 struct kcore_list *vmm, *tmp; 151 152 153 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK; 154 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1; 155 end = PAGE_ALIGN(end); 156 /* overlap check (because we have to align page */ 157 list_for_each_entry(tmp, head, list) { 158 if (tmp->type != KCORE_VMEMMAP) 159 continue; 160 if (start < tmp->addr + tmp->size) 161 if (end > tmp->addr) 162 end = tmp->addr; 163 } 164 if (start < end) { 165 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL); 166 if (!vmm) 167 return 0; 168 vmm->addr = start; 169 vmm->size = end - start; 170 vmm->type = KCORE_VMEMMAP; 171 list_add_tail(&vmm->list, head); 172 } 173 return 1; 174 175 } 176 #else 177 static int 178 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) 179 { 180 return 1; 181 } 182 183 #endif 184 185 static int 186 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) 187 { 188 struct list_head *head = (struct list_head *)arg; 189 struct kcore_list *ent; 190 struct page *p; 191 192 if (!pfn_valid(pfn)) 193 return 1; 194 195 p = pfn_to_page(pfn); 196 if (!memmap_valid_within(pfn, p, page_zone(p))) 197 return 1; 198 199 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 200 if (!ent) 201 return -ENOMEM; 202 ent->addr = (unsigned long)page_to_virt(p); 203 ent->size = nr_pages << PAGE_SHIFT; 204 205 if (!virt_addr_valid(ent->addr)) 206 goto free_out; 207 208 /* cut not-mapped area. ....from ppc-32 code. */ 209 if (ULONG_MAX - ent->addr < ent->size) 210 ent->size = ULONG_MAX - ent->addr; 211 212 /* 213 * We've already checked virt_addr_valid so we know this address 214 * is a valid pointer, therefore we can check against it to determine 215 * if we need to trim 216 */ 217 if (VMALLOC_START > ent->addr) { 218 if (VMALLOC_START - ent->addr < ent->size) 219 ent->size = VMALLOC_START - ent->addr; 220 } 221 222 ent->type = KCORE_RAM; 223 list_add_tail(&ent->list, head); 224 225 if (!get_sparsemem_vmemmap_info(ent, head)) { 226 list_del(&ent->list); 227 goto free_out; 228 } 229 230 return 0; 231 free_out: 232 kfree(ent); 233 return 1; 234 } 235 236 static int kcore_ram_list(struct list_head *list) 237 { 238 int nid, ret; 239 unsigned long end_pfn; 240 241 /* Not inialized....update now */ 242 /* find out "max pfn" */ 243 end_pfn = 0; 244 for_each_node_state(nid, N_MEMORY) { 245 unsigned long node_end; 246 node_end = node_end_pfn(nid); 247 if (end_pfn < node_end) 248 end_pfn = node_end; 249 } 250 /* scan 0 to max_pfn */ 251 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private); 252 if (ret) 253 return -ENOMEM; 254 return 0; 255 } 256 #endif /* CONFIG_HIGHMEM */ 257 258 static int kcore_update_ram(void) 259 { 260 LIST_HEAD(list); 261 LIST_HEAD(garbage); 262 int nphdr; 263 size_t phdrs_len, notes_len, data_offset; 264 struct kcore_list *tmp, *pos; 265 int ret = 0; 266 267 down_write(&kclist_lock); 268 if (!xchg(&kcore_need_update, 0)) 269 goto out; 270 271 ret = kcore_ram_list(&list); 272 if (ret) { 273 /* Couldn't get the RAM list, try again next time. */ 274 WRITE_ONCE(kcore_need_update, 1); 275 list_splice_tail(&list, &garbage); 276 goto out; 277 } 278 279 list_for_each_entry_safe(pos, tmp, &kclist_head, list) { 280 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP) 281 list_move(&pos->list, &garbage); 282 } 283 list_splice_tail(&list, &kclist_head); 284 285 proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len, 286 &data_offset); 287 288 out: 289 up_write(&kclist_lock); 290 list_for_each_entry_safe(pos, tmp, &garbage, list) { 291 list_del(&pos->list); 292 kfree(pos); 293 } 294 return ret; 295 } 296 297 static void append_kcore_note(char *notes, size_t *i, const char *name, 298 unsigned int type, const void *desc, 299 size_t descsz) 300 { 301 struct elf_note *note = (struct elf_note *)¬es[*i]; 302 303 note->n_namesz = strlen(name) + 1; 304 note->n_descsz = descsz; 305 note->n_type = type; 306 *i += sizeof(*note); 307 memcpy(¬es[*i], name, note->n_namesz); 308 *i = ALIGN(*i + note->n_namesz, 4); 309 memcpy(¬es[*i], desc, descsz); 310 *i = ALIGN(*i + descsz, 4); 311 } 312 313 static ssize_t 314 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) 315 { 316 char *buf = file->private_data; 317 size_t phdrs_offset, notes_offset, data_offset; 318 size_t phdrs_len, notes_len; 319 struct kcore_list *m; 320 size_t tsz; 321 int nphdr; 322 unsigned long start; 323 size_t orig_buflen = buflen; 324 int ret = 0; 325 326 down_read(&kclist_lock); 327 328 get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset); 329 phdrs_offset = sizeof(struct elfhdr); 330 notes_offset = phdrs_offset + phdrs_len; 331 332 /* ELF file header. */ 333 if (buflen && *fpos < sizeof(struct elfhdr)) { 334 struct elfhdr ehdr = { 335 .e_ident = { 336 [EI_MAG0] = ELFMAG0, 337 [EI_MAG1] = ELFMAG1, 338 [EI_MAG2] = ELFMAG2, 339 [EI_MAG3] = ELFMAG3, 340 [EI_CLASS] = ELF_CLASS, 341 [EI_DATA] = ELF_DATA, 342 [EI_VERSION] = EV_CURRENT, 343 [EI_OSABI] = ELF_OSABI, 344 }, 345 .e_type = ET_CORE, 346 .e_machine = ELF_ARCH, 347 .e_version = EV_CURRENT, 348 .e_phoff = sizeof(struct elfhdr), 349 .e_flags = ELF_CORE_EFLAGS, 350 .e_ehsize = sizeof(struct elfhdr), 351 .e_phentsize = sizeof(struct elf_phdr), 352 .e_phnum = nphdr, 353 }; 354 355 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos); 356 if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) { 357 ret = -EFAULT; 358 goto out; 359 } 360 361 buffer += tsz; 362 buflen -= tsz; 363 *fpos += tsz; 364 } 365 366 /* ELF program headers. */ 367 if (buflen && *fpos < phdrs_offset + phdrs_len) { 368 struct elf_phdr *phdrs, *phdr; 369 370 phdrs = kzalloc(phdrs_len, GFP_KERNEL); 371 if (!phdrs) { 372 ret = -ENOMEM; 373 goto out; 374 } 375 376 phdrs[0].p_type = PT_NOTE; 377 phdrs[0].p_offset = notes_offset; 378 phdrs[0].p_filesz = notes_len; 379 380 phdr = &phdrs[1]; 381 list_for_each_entry(m, &kclist_head, list) { 382 phdr->p_type = PT_LOAD; 383 phdr->p_flags = PF_R | PF_W | PF_X; 384 phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset; 385 if (m->type == KCORE_REMAP) 386 phdr->p_vaddr = (size_t)m->vaddr; 387 else 388 phdr->p_vaddr = (size_t)m->addr; 389 if (m->type == KCORE_RAM || m->type == KCORE_REMAP) 390 phdr->p_paddr = __pa(m->addr); 391 else if (m->type == KCORE_TEXT) 392 phdr->p_paddr = __pa_symbol(m->addr); 393 else 394 phdr->p_paddr = (elf_addr_t)-1; 395 phdr->p_filesz = phdr->p_memsz = m->size; 396 phdr->p_align = PAGE_SIZE; 397 phdr++; 398 } 399 400 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos); 401 if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset, 402 tsz)) { 403 kfree(phdrs); 404 ret = -EFAULT; 405 goto out; 406 } 407 kfree(phdrs); 408 409 buffer += tsz; 410 buflen -= tsz; 411 *fpos += tsz; 412 } 413 414 /* ELF note segment. */ 415 if (buflen && *fpos < notes_offset + notes_len) { 416 struct elf_prstatus prstatus = {}; 417 struct elf_prpsinfo prpsinfo = { 418 .pr_sname = 'R', 419 .pr_fname = "vmlinux", 420 }; 421 char *notes; 422 size_t i = 0; 423 424 strlcpy(prpsinfo.pr_psargs, saved_command_line, 425 sizeof(prpsinfo.pr_psargs)); 426 427 notes = kzalloc(notes_len, GFP_KERNEL); 428 if (!notes) { 429 ret = -ENOMEM; 430 goto out; 431 } 432 433 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus, 434 sizeof(prstatus)); 435 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo, 436 sizeof(prpsinfo)); 437 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current, 438 arch_task_struct_size); 439 /* 440 * vmcoreinfo_size is mostly constant after init time, but it 441 * can be changed by crash_save_vmcoreinfo(). Racing here with a 442 * panic on another CPU before the machine goes down is insanely 443 * unlikely, but it's better to not leave potential buffer 444 * overflows lying around, regardless. 445 */ 446 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0, 447 vmcoreinfo_data, 448 min(vmcoreinfo_size, notes_len - i)); 449 450 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos); 451 if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) { 452 kfree(notes); 453 ret = -EFAULT; 454 goto out; 455 } 456 kfree(notes); 457 458 buffer += tsz; 459 buflen -= tsz; 460 *fpos += tsz; 461 } 462 463 /* 464 * Check to see if our file offset matches with any of 465 * the addresses in the elf_phdr on our list. 466 */ 467 start = kc_offset_to_vaddr(*fpos - data_offset); 468 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) 469 tsz = buflen; 470 471 m = NULL; 472 while (buflen) { 473 /* 474 * If this is the first iteration or the address is not within 475 * the previous entry, search for a matching entry. 476 */ 477 if (!m || start < m->addr || start >= m->addr + m->size) { 478 list_for_each_entry(m, &kclist_head, list) { 479 if (start >= m->addr && 480 start < m->addr + m->size) 481 break; 482 } 483 } 484 485 if (&m->list == &kclist_head) { 486 if (clear_user(buffer, tsz)) { 487 ret = -EFAULT; 488 goto out; 489 } 490 m = NULL; /* skip the list anchor */ 491 } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) { 492 if (clear_user(buffer, tsz)) { 493 ret = -EFAULT; 494 goto out; 495 } 496 } else if (m->type == KCORE_VMALLOC) { 497 vread(buf, (char *)start, tsz); 498 /* we have to zero-fill user buffer even if no read */ 499 if (copy_to_user(buffer, buf, tsz)) { 500 ret = -EFAULT; 501 goto out; 502 } 503 } else if (m->type == KCORE_USER) { 504 /* User page is handled prior to normal kernel page: */ 505 if (copy_to_user(buffer, (char *)start, tsz)) { 506 ret = -EFAULT; 507 goto out; 508 } 509 } else { 510 if (kern_addr_valid(start)) { 511 /* 512 * Using bounce buffer to bypass the 513 * hardened user copy kernel text checks. 514 */ 515 if (copy_from_kernel_nofault(buf, (void *)start, 516 tsz)) { 517 if (clear_user(buffer, tsz)) { 518 ret = -EFAULT; 519 goto out; 520 } 521 } else { 522 if (copy_to_user(buffer, buf, tsz)) { 523 ret = -EFAULT; 524 goto out; 525 } 526 } 527 } else { 528 if (clear_user(buffer, tsz)) { 529 ret = -EFAULT; 530 goto out; 531 } 532 } 533 } 534 buflen -= tsz; 535 *fpos += tsz; 536 buffer += tsz; 537 start += tsz; 538 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen); 539 } 540 541 out: 542 up_read(&kclist_lock); 543 if (ret) 544 return ret; 545 return orig_buflen - buflen; 546 } 547 548 static int open_kcore(struct inode *inode, struct file *filp) 549 { 550 int ret = security_locked_down(LOCKDOWN_KCORE); 551 552 if (!capable(CAP_SYS_RAWIO)) 553 return -EPERM; 554 555 if (ret) 556 return ret; 557 558 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL); 559 if (!filp->private_data) 560 return -ENOMEM; 561 562 if (kcore_need_update) 563 kcore_update_ram(); 564 if (i_size_read(inode) != proc_root_kcore->size) { 565 inode_lock(inode); 566 i_size_write(inode, proc_root_kcore->size); 567 inode_unlock(inode); 568 } 569 return 0; 570 } 571 572 static int release_kcore(struct inode *inode, struct file *file) 573 { 574 kfree(file->private_data); 575 return 0; 576 } 577 578 static const struct proc_ops kcore_proc_ops = { 579 .proc_read = read_kcore, 580 .proc_open = open_kcore, 581 .proc_release = release_kcore, 582 .proc_lseek = default_llseek, 583 }; 584 585 /* just remember that we have to update kcore */ 586 static int __meminit kcore_callback(struct notifier_block *self, 587 unsigned long action, void *arg) 588 { 589 switch (action) { 590 case MEM_ONLINE: 591 case MEM_OFFLINE: 592 kcore_need_update = 1; 593 break; 594 } 595 return NOTIFY_OK; 596 } 597 598 static struct notifier_block kcore_callback_nb __meminitdata = { 599 .notifier_call = kcore_callback, 600 .priority = 0, 601 }; 602 603 static struct kcore_list kcore_vmalloc; 604 605 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT 606 static struct kcore_list kcore_text; 607 /* 608 * If defined, special segment is used for mapping kernel text instead of 609 * direct-map area. We need to create special TEXT section. 610 */ 611 static void __init proc_kcore_text_init(void) 612 { 613 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT); 614 } 615 #else 616 static void __init proc_kcore_text_init(void) 617 { 618 } 619 #endif 620 621 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 622 /* 623 * MODULES_VADDR has no intersection with VMALLOC_ADDR. 624 */ 625 static struct kcore_list kcore_modules; 626 static void __init add_modules_range(void) 627 { 628 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { 629 kclist_add(&kcore_modules, (void *)MODULES_VADDR, 630 MODULES_END - MODULES_VADDR, KCORE_VMALLOC); 631 } 632 } 633 #else 634 static void __init add_modules_range(void) 635 { 636 } 637 #endif 638 639 static int __init proc_kcore_init(void) 640 { 641 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops); 642 if (!proc_root_kcore) { 643 pr_err("couldn't create /proc/kcore\n"); 644 return 0; /* Always returns 0. */ 645 } 646 /* Store text area if it's special */ 647 proc_kcore_text_init(); 648 /* Store vmalloc area */ 649 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 650 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC); 651 add_modules_range(); 652 /* Store direct-map area from physical memory map */ 653 kcore_update_ram(); 654 register_hotmemory_notifier(&kcore_callback_nb); 655 656 return 0; 657 } 658 fs_initcall(proc_kcore_init); 659