1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/proc/vmcore.c Interface for accessing the crash 4 * dump from the system's previous life. 5 * Heavily borrowed from fs/proc/kcore.c 6 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 7 * Copyright (C) IBM Corporation, 2004. All rights reserved 8 * 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/kcore.h> 13 #include <linux/user.h> 14 #include <linux/elf.h> 15 #include <linux/elfcore.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/highmem.h> 19 #include <linux/printk.h> 20 #include <linux/memblock.h> 21 #include <linux/init.h> 22 #include <linux/crash_dump.h> 23 #include <linux/list.h> 24 #include <linux/moduleparam.h> 25 #include <linux/mutex.h> 26 #include <linux/vmalloc.h> 27 #include <linux/pagemap.h> 28 #include <linux/uaccess.h> 29 #include <linux/mem_encrypt.h> 30 #include <asm/pgtable.h> 31 #include <asm/io.h> 32 #include "internal.h" 33 34 /* List representing chunks of contiguous memory areas and their offsets in 35 * vmcore file. 36 */ 37 static LIST_HEAD(vmcore_list); 38 39 /* Stores the pointer to the buffer containing kernel elf core headers. */ 40 static char *elfcorebuf; 41 static size_t elfcorebuf_sz; 42 static size_t elfcorebuf_sz_orig; 43 44 static char *elfnotes_buf; 45 static size_t elfnotes_sz; 46 /* Size of all notes minus the device dump notes */ 47 static size_t elfnotes_orig_sz; 48 49 /* Total size of vmcore file. */ 50 static u64 vmcore_size; 51 52 static struct proc_dir_entry *proc_vmcore; 53 54 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 55 /* Device Dump list and mutex to synchronize access to list */ 56 static LIST_HEAD(vmcoredd_list); 57 static DEFINE_MUTEX(vmcoredd_mutex); 58 59 static bool vmcoredd_disabled; 60 core_param(novmcoredd, vmcoredd_disabled, bool, 0); 61 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 62 63 /* Device Dump Size */ 64 static size_t vmcoredd_orig_sz; 65 66 /* 67 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error 68 * The called function has to take care of module refcounting. 69 */ 70 static int (*oldmem_pfn_is_ram)(unsigned long pfn); 71 72 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) 73 { 74 if (oldmem_pfn_is_ram) 75 return -EBUSY; 76 oldmem_pfn_is_ram = fn; 77 return 0; 78 } 79 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); 80 81 void unregister_oldmem_pfn_is_ram(void) 82 { 83 oldmem_pfn_is_ram = NULL; 84 wmb(); 85 } 86 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); 87 88 static int pfn_is_ram(unsigned long pfn) 89 { 90 int (*fn)(unsigned long pfn); 91 /* pfn is ram unless fn() checks pagetype */ 92 int ret = 1; 93 94 /* 95 * Ask hypervisor if the pfn is really ram. 96 * A ballooned page contains no data and reading from such a page 97 * will cause high load in the hypervisor. 98 */ 99 fn = oldmem_pfn_is_ram; 100 if (fn) 101 ret = fn(pfn); 102 103 return ret; 104 } 105 106 /* Reads a page from the oldmem device from given offset. */ 107 ssize_t read_from_oldmem(char *buf, size_t count, 108 u64 *ppos, int userbuf, 109 bool encrypted) 110 { 111 unsigned long pfn, offset; 112 size_t nr_bytes; 113 ssize_t read = 0, tmp; 114 115 if (!count) 116 return 0; 117 118 offset = (unsigned long)(*ppos % PAGE_SIZE); 119 pfn = (unsigned long)(*ppos / PAGE_SIZE); 120 121 do { 122 if (count > (PAGE_SIZE - offset)) 123 nr_bytes = PAGE_SIZE - offset; 124 else 125 nr_bytes = count; 126 127 /* If pfn is not ram, return zeros for sparse dump files */ 128 if (pfn_is_ram(pfn) == 0) 129 memset(buf, 0, nr_bytes); 130 else { 131 if (encrypted) 132 tmp = copy_oldmem_page_encrypted(pfn, buf, 133 nr_bytes, 134 offset, 135 userbuf); 136 else 137 tmp = copy_oldmem_page(pfn, buf, nr_bytes, 138 offset, userbuf); 139 140 if (tmp < 0) 141 return tmp; 142 } 143 *ppos += nr_bytes; 144 count -= nr_bytes; 145 buf += nr_bytes; 146 read += nr_bytes; 147 ++pfn; 148 offset = 0; 149 } while (count); 150 151 return read; 152 } 153 154 /* 155 * Architectures may override this function to allocate ELF header in 2nd kernel 156 */ 157 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) 158 { 159 return 0; 160 } 161 162 /* 163 * Architectures may override this function to free header 164 */ 165 void __weak elfcorehdr_free(unsigned long long addr) 166 {} 167 168 /* 169 * Architectures may override this function to read from ELF header 170 */ 171 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) 172 { 173 return read_from_oldmem(buf, count, ppos, 0, false); 174 } 175 176 /* 177 * Architectures may override this function to read from notes sections 178 */ 179 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) 180 { 181 return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active()); 182 } 183 184 /* 185 * Architectures may override this function to map oldmem 186 */ 187 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, 188 unsigned long from, unsigned long pfn, 189 unsigned long size, pgprot_t prot) 190 { 191 prot = pgprot_encrypted(prot); 192 return remap_pfn_range(vma, from, pfn, size, prot); 193 } 194 195 /* 196 * Architectures which support memory encryption override this. 197 */ 198 ssize_t __weak 199 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, 200 unsigned long offset, int userbuf) 201 { 202 return copy_oldmem_page(pfn, buf, csize, offset, userbuf); 203 } 204 205 /* 206 * Copy to either kernel or user space 207 */ 208 static int copy_to(void *target, void *src, size_t size, int userbuf) 209 { 210 if (userbuf) { 211 if (copy_to_user((char __user *) target, src, size)) 212 return -EFAULT; 213 } else { 214 memcpy(target, src, size); 215 } 216 return 0; 217 } 218 219 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 220 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) 221 { 222 struct vmcoredd_node *dump; 223 u64 offset = 0; 224 int ret = 0; 225 size_t tsz; 226 char *buf; 227 228 mutex_lock(&vmcoredd_mutex); 229 list_for_each_entry(dump, &vmcoredd_list, list) { 230 if (start < offset + dump->size) { 231 tsz = min(offset + (u64)dump->size - start, (u64)size); 232 buf = dump->buf + start - offset; 233 if (copy_to(dst, buf, tsz, userbuf)) { 234 ret = -EFAULT; 235 goto out_unlock; 236 } 237 238 size -= tsz; 239 start += tsz; 240 dst += tsz; 241 242 /* Leave now if buffer filled already */ 243 if (!size) 244 goto out_unlock; 245 } 246 offset += dump->size; 247 } 248 249 out_unlock: 250 mutex_unlock(&vmcoredd_mutex); 251 return ret; 252 } 253 254 #ifdef CONFIG_MMU 255 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, 256 u64 start, size_t size) 257 { 258 struct vmcoredd_node *dump; 259 u64 offset = 0; 260 int ret = 0; 261 size_t tsz; 262 char *buf; 263 264 mutex_lock(&vmcoredd_mutex); 265 list_for_each_entry(dump, &vmcoredd_list, list) { 266 if (start < offset + dump->size) { 267 tsz = min(offset + (u64)dump->size - start, (u64)size); 268 buf = dump->buf + start - offset; 269 if (remap_vmalloc_range_partial(vma, dst, buf, 0, 270 tsz)) { 271 ret = -EFAULT; 272 goto out_unlock; 273 } 274 275 size -= tsz; 276 start += tsz; 277 dst += tsz; 278 279 /* Leave now if buffer filled already */ 280 if (!size) 281 goto out_unlock; 282 } 283 offset += dump->size; 284 } 285 286 out_unlock: 287 mutex_unlock(&vmcoredd_mutex); 288 return ret; 289 } 290 #endif /* CONFIG_MMU */ 291 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 292 293 /* Read from the ELF header and then the crash dump. On error, negative value is 294 * returned otherwise number of bytes read are returned. 295 */ 296 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, 297 int userbuf) 298 { 299 ssize_t acc = 0, tmp; 300 size_t tsz; 301 u64 start; 302 struct vmcore *m = NULL; 303 304 if (buflen == 0 || *fpos >= vmcore_size) 305 return 0; 306 307 /* trim buflen to not go beyond EOF */ 308 if (buflen > vmcore_size - *fpos) 309 buflen = vmcore_size - *fpos; 310 311 /* Read ELF core header */ 312 if (*fpos < elfcorebuf_sz) { 313 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); 314 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf)) 315 return -EFAULT; 316 buflen -= tsz; 317 *fpos += tsz; 318 buffer += tsz; 319 acc += tsz; 320 321 /* leave now if filled buffer already */ 322 if (buflen == 0) 323 return acc; 324 } 325 326 /* Read Elf note segment */ 327 if (*fpos < elfcorebuf_sz + elfnotes_sz) { 328 void *kaddr; 329 330 /* We add device dumps before other elf notes because the 331 * other elf notes may not fill the elf notes buffer 332 * completely and we will end up with zero-filled data 333 * between the elf notes and the device dumps. Tools will 334 * then try to decode this zero-filled data as valid notes 335 * and we don't want that. Hence, adding device dumps before 336 * the other elf notes ensure that zero-filled data can be 337 * avoided. 338 */ 339 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 340 /* Read device dumps */ 341 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) { 342 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - 343 (size_t)*fpos, buflen); 344 start = *fpos - elfcorebuf_sz; 345 if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf)) 346 return -EFAULT; 347 348 buflen -= tsz; 349 *fpos += tsz; 350 buffer += tsz; 351 acc += tsz; 352 353 /* leave now if filled buffer already */ 354 if (!buflen) 355 return acc; 356 } 357 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 358 359 /* Read remaining elf notes */ 360 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); 361 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz; 362 if (copy_to(buffer, kaddr, tsz, userbuf)) 363 return -EFAULT; 364 365 buflen -= tsz; 366 *fpos += tsz; 367 buffer += tsz; 368 acc += tsz; 369 370 /* leave now if filled buffer already */ 371 if (buflen == 0) 372 return acc; 373 } 374 375 list_for_each_entry(m, &vmcore_list, list) { 376 if (*fpos < m->offset + m->size) { 377 tsz = (size_t)min_t(unsigned long long, 378 m->offset + m->size - *fpos, 379 buflen); 380 start = m->paddr + *fpos - m->offset; 381 tmp = read_from_oldmem(buffer, tsz, &start, 382 userbuf, mem_encrypt_active()); 383 if (tmp < 0) 384 return tmp; 385 buflen -= tsz; 386 *fpos += tsz; 387 buffer += tsz; 388 acc += tsz; 389 390 /* leave now if filled buffer already */ 391 if (buflen == 0) 392 return acc; 393 } 394 } 395 396 return acc; 397 } 398 399 static ssize_t read_vmcore(struct file *file, char __user *buffer, 400 size_t buflen, loff_t *fpos) 401 { 402 return __read_vmcore((__force char *) buffer, buflen, fpos, 1); 403 } 404 405 /* 406 * The vmcore fault handler uses the page cache and fills data using the 407 * standard __vmcore_read() function. 408 * 409 * On s390 the fault handler is used for memory regions that can't be mapped 410 * directly with remap_pfn_range(). 411 */ 412 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf) 413 { 414 #ifdef CONFIG_S390 415 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 416 pgoff_t index = vmf->pgoff; 417 struct page *page; 418 loff_t offset; 419 char *buf; 420 int rc; 421 422 page = find_or_create_page(mapping, index, GFP_KERNEL); 423 if (!page) 424 return VM_FAULT_OOM; 425 if (!PageUptodate(page)) { 426 offset = (loff_t) index << PAGE_SHIFT; 427 buf = __va((page_to_pfn(page) << PAGE_SHIFT)); 428 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); 429 if (rc < 0) { 430 unlock_page(page); 431 put_page(page); 432 return vmf_error(rc); 433 } 434 SetPageUptodate(page); 435 } 436 unlock_page(page); 437 vmf->page = page; 438 return 0; 439 #else 440 return VM_FAULT_SIGBUS; 441 #endif 442 } 443 444 static const struct vm_operations_struct vmcore_mmap_ops = { 445 .fault = mmap_vmcore_fault, 446 }; 447 448 /** 449 * vmcore_alloc_buf - allocate buffer in vmalloc memory 450 * @sizez: size of buffer 451 * 452 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap 453 * the buffer to user-space by means of remap_vmalloc_range(). 454 * 455 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is 456 * disabled and there's no need to allow users to mmap the buffer. 457 */ 458 static inline char *vmcore_alloc_buf(size_t size) 459 { 460 #ifdef CONFIG_MMU 461 return vmalloc_user(size); 462 #else 463 return vzalloc(size); 464 #endif 465 } 466 467 /* 468 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is 469 * essential for mmap_vmcore() in order to map physically 470 * non-contiguous objects (ELF header, ELF note segment and memory 471 * regions in the 1st kernel pointed to by PT_LOAD entries) into 472 * virtually contiguous user-space in ELF layout. 473 */ 474 #ifdef CONFIG_MMU 475 /* 476 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages 477 * reported as not being ram with the zero page. 478 * 479 * @vma: vm_area_struct describing requested mapping 480 * @from: start remapping from 481 * @pfn: page frame number to start remapping to 482 * @size: remapping size 483 * @prot: protection bits 484 * 485 * Returns zero on success, -EAGAIN on failure. 486 */ 487 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, 488 unsigned long from, unsigned long pfn, 489 unsigned long size, pgprot_t prot) 490 { 491 unsigned long map_size; 492 unsigned long pos_start, pos_end, pos; 493 unsigned long zeropage_pfn = my_zero_pfn(0); 494 size_t len = 0; 495 496 pos_start = pfn; 497 pos_end = pfn + (size >> PAGE_SHIFT); 498 499 for (pos = pos_start; pos < pos_end; ++pos) { 500 if (!pfn_is_ram(pos)) { 501 /* 502 * We hit a page which is not ram. Remap the continuous 503 * region between pos_start and pos-1 and replace 504 * the non-ram page at pos with the zero page. 505 */ 506 if (pos > pos_start) { 507 /* Remap continuous region */ 508 map_size = (pos - pos_start) << PAGE_SHIFT; 509 if (remap_oldmem_pfn_range(vma, from + len, 510 pos_start, map_size, 511 prot)) 512 goto fail; 513 len += map_size; 514 } 515 /* Remap the zero page */ 516 if (remap_oldmem_pfn_range(vma, from + len, 517 zeropage_pfn, 518 PAGE_SIZE, prot)) 519 goto fail; 520 len += PAGE_SIZE; 521 pos_start = pos + 1; 522 } 523 } 524 if (pos > pos_start) { 525 /* Remap the rest */ 526 map_size = (pos - pos_start) << PAGE_SHIFT; 527 if (remap_oldmem_pfn_range(vma, from + len, pos_start, 528 map_size, prot)) 529 goto fail; 530 } 531 return 0; 532 fail: 533 do_munmap(vma->vm_mm, from, len, NULL); 534 return -EAGAIN; 535 } 536 537 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, 538 unsigned long from, unsigned long pfn, 539 unsigned long size, pgprot_t prot) 540 { 541 /* 542 * Check if oldmem_pfn_is_ram was registered to avoid 543 * looping over all pages without a reason. 544 */ 545 if (oldmem_pfn_is_ram) 546 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot); 547 else 548 return remap_oldmem_pfn_range(vma, from, pfn, size, prot); 549 } 550 551 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) 552 { 553 size_t size = vma->vm_end - vma->vm_start; 554 u64 start, end, len, tsz; 555 struct vmcore *m; 556 557 start = (u64)vma->vm_pgoff << PAGE_SHIFT; 558 end = start + size; 559 560 if (size > vmcore_size || end > vmcore_size) 561 return -EINVAL; 562 563 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 564 return -EPERM; 565 566 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); 567 vma->vm_flags |= VM_MIXEDMAP; 568 vma->vm_ops = &vmcore_mmap_ops; 569 570 len = 0; 571 572 if (start < elfcorebuf_sz) { 573 u64 pfn; 574 575 tsz = min(elfcorebuf_sz - (size_t)start, size); 576 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT; 577 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz, 578 vma->vm_page_prot)) 579 return -EAGAIN; 580 size -= tsz; 581 start += tsz; 582 len += tsz; 583 584 if (size == 0) 585 return 0; 586 } 587 588 if (start < elfcorebuf_sz + elfnotes_sz) { 589 void *kaddr; 590 591 /* We add device dumps before other elf notes because the 592 * other elf notes may not fill the elf notes buffer 593 * completely and we will end up with zero-filled data 594 * between the elf notes and the device dumps. Tools will 595 * then try to decode this zero-filled data as valid notes 596 * and we don't want that. Hence, adding device dumps before 597 * the other elf notes ensure that zero-filled data can be 598 * avoided. This also ensures that the device dumps and 599 * other elf notes can be properly mmaped at page aligned 600 * address. 601 */ 602 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 603 /* Read device dumps */ 604 if (start < elfcorebuf_sz + vmcoredd_orig_sz) { 605 u64 start_off; 606 607 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - 608 (size_t)start, size); 609 start_off = start - elfcorebuf_sz; 610 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len, 611 start_off, tsz)) 612 goto fail; 613 614 size -= tsz; 615 start += tsz; 616 len += tsz; 617 618 /* leave now if filled buffer already */ 619 if (!size) 620 return 0; 621 } 622 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 623 624 /* Read remaining elf notes */ 625 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); 626 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; 627 if (remap_vmalloc_range_partial(vma, vma->vm_start + len, 628 kaddr, 0, tsz)) 629 goto fail; 630 631 size -= tsz; 632 start += tsz; 633 len += tsz; 634 635 if (size == 0) 636 return 0; 637 } 638 639 list_for_each_entry(m, &vmcore_list, list) { 640 if (start < m->offset + m->size) { 641 u64 paddr = 0; 642 643 tsz = (size_t)min_t(unsigned long long, 644 m->offset + m->size - start, size); 645 paddr = m->paddr + start - m->offset; 646 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len, 647 paddr >> PAGE_SHIFT, tsz, 648 vma->vm_page_prot)) 649 goto fail; 650 size -= tsz; 651 start += tsz; 652 len += tsz; 653 654 if (size == 0) 655 return 0; 656 } 657 } 658 659 return 0; 660 fail: 661 do_munmap(vma->vm_mm, vma->vm_start, len, NULL); 662 return -EAGAIN; 663 } 664 #else 665 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) 666 { 667 return -ENOSYS; 668 } 669 #endif 670 671 static const struct proc_ops vmcore_proc_ops = { 672 .proc_read = read_vmcore, 673 .proc_lseek = default_llseek, 674 .proc_mmap = mmap_vmcore, 675 }; 676 677 static struct vmcore* __init get_new_element(void) 678 { 679 return kzalloc(sizeof(struct vmcore), GFP_KERNEL); 680 } 681 682 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz, 683 struct list_head *vc_list) 684 { 685 u64 size; 686 struct vmcore *m; 687 688 size = elfsz + elfnotesegsz; 689 list_for_each_entry(m, vc_list, list) { 690 size += m->size; 691 } 692 return size; 693 } 694 695 /** 696 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry 697 * 698 * @ehdr_ptr: ELF header 699 * 700 * This function updates p_memsz member of each PT_NOTE entry in the 701 * program header table pointed to by @ehdr_ptr to real size of ELF 702 * note segment. 703 */ 704 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) 705 { 706 int i, rc=0; 707 Elf64_Phdr *phdr_ptr; 708 Elf64_Nhdr *nhdr_ptr; 709 710 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); 711 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 712 void *notes_section; 713 u64 offset, max_sz, sz, real_sz = 0; 714 if (phdr_ptr->p_type != PT_NOTE) 715 continue; 716 max_sz = phdr_ptr->p_memsz; 717 offset = phdr_ptr->p_offset; 718 notes_section = kmalloc(max_sz, GFP_KERNEL); 719 if (!notes_section) 720 return -ENOMEM; 721 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); 722 if (rc < 0) { 723 kfree(notes_section); 724 return rc; 725 } 726 nhdr_ptr = notes_section; 727 while (nhdr_ptr->n_namesz != 0) { 728 sz = sizeof(Elf64_Nhdr) + 729 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + 730 (((u64)nhdr_ptr->n_descsz + 3) & ~3); 731 if ((real_sz + sz) > max_sz) { 732 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", 733 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); 734 break; 735 } 736 real_sz += sz; 737 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); 738 } 739 kfree(notes_section); 740 phdr_ptr->p_memsz = real_sz; 741 if (real_sz == 0) { 742 pr_warn("Warning: Zero PT_NOTE entries found\n"); 743 } 744 } 745 746 return 0; 747 } 748 749 /** 750 * get_note_number_and_size_elf64 - get the number of PT_NOTE program 751 * headers and sum of real size of their ELF note segment headers and 752 * data. 753 * 754 * @ehdr_ptr: ELF header 755 * @nr_ptnote: buffer for the number of PT_NOTE program headers 756 * @sz_ptnote: buffer for size of unique PT_NOTE program header 757 * 758 * This function is used to merge multiple PT_NOTE program headers 759 * into a unique single one. The resulting unique entry will have 760 * @sz_ptnote in its phdr->p_mem. 761 * 762 * It is assumed that program headers with PT_NOTE type pointed to by 763 * @ehdr_ptr has already been updated by update_note_header_size_elf64 764 * and each of PT_NOTE program headers has actual ELF note segment 765 * size in its p_memsz member. 766 */ 767 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr, 768 int *nr_ptnote, u64 *sz_ptnote) 769 { 770 int i; 771 Elf64_Phdr *phdr_ptr; 772 773 *nr_ptnote = *sz_ptnote = 0; 774 775 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1); 776 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 777 if (phdr_ptr->p_type != PT_NOTE) 778 continue; 779 *nr_ptnote += 1; 780 *sz_ptnote += phdr_ptr->p_memsz; 781 } 782 783 return 0; 784 } 785 786 /** 787 * copy_notes_elf64 - copy ELF note segments in a given buffer 788 * 789 * @ehdr_ptr: ELF header 790 * @notes_buf: buffer into which ELF note segments are copied 791 * 792 * This function is used to copy ELF note segment in the 1st kernel 793 * into the buffer @notes_buf in the 2nd kernel. It is assumed that 794 * size of the buffer @notes_buf is equal to or larger than sum of the 795 * real ELF note segment headers and data. 796 * 797 * It is assumed that program headers with PT_NOTE type pointed to by 798 * @ehdr_ptr has already been updated by update_note_header_size_elf64 799 * and each of PT_NOTE program headers has actual ELF note segment 800 * size in its p_memsz member. 801 */ 802 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf) 803 { 804 int i, rc=0; 805 Elf64_Phdr *phdr_ptr; 806 807 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1); 808 809 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 810 u64 offset; 811 if (phdr_ptr->p_type != PT_NOTE) 812 continue; 813 offset = phdr_ptr->p_offset; 814 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, 815 &offset); 816 if (rc < 0) 817 return rc; 818 notes_buf += phdr_ptr->p_memsz; 819 } 820 821 return 0; 822 } 823 824 /* Merges all the PT_NOTE headers into one. */ 825 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, 826 char **notes_buf, size_t *notes_sz) 827 { 828 int i, nr_ptnote=0, rc=0; 829 char *tmp; 830 Elf64_Ehdr *ehdr_ptr; 831 Elf64_Phdr phdr; 832 u64 phdr_sz = 0, note_off; 833 834 ehdr_ptr = (Elf64_Ehdr *)elfptr; 835 836 rc = update_note_header_size_elf64(ehdr_ptr); 837 if (rc < 0) 838 return rc; 839 840 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz); 841 if (rc < 0) 842 return rc; 843 844 *notes_sz = roundup(phdr_sz, PAGE_SIZE); 845 *notes_buf = vmcore_alloc_buf(*notes_sz); 846 if (!*notes_buf) 847 return -ENOMEM; 848 849 rc = copy_notes_elf64(ehdr_ptr, *notes_buf); 850 if (rc < 0) 851 return rc; 852 853 /* Prepare merged PT_NOTE program header. */ 854 phdr.p_type = PT_NOTE; 855 phdr.p_flags = 0; 856 note_off = sizeof(Elf64_Ehdr) + 857 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); 858 phdr.p_offset = roundup(note_off, PAGE_SIZE); 859 phdr.p_vaddr = phdr.p_paddr = 0; 860 phdr.p_filesz = phdr.p_memsz = phdr_sz; 861 phdr.p_align = 0; 862 863 /* Add merged PT_NOTE program header*/ 864 tmp = elfptr + sizeof(Elf64_Ehdr); 865 memcpy(tmp, &phdr, sizeof(phdr)); 866 tmp += sizeof(phdr); 867 868 /* Remove unwanted PT_NOTE program headers. */ 869 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); 870 *elfsz = *elfsz - i; 871 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); 872 memset(elfptr + *elfsz, 0, i); 873 *elfsz = roundup(*elfsz, PAGE_SIZE); 874 875 /* Modify e_phnum to reflect merged headers. */ 876 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 877 878 /* Store the size of all notes. We need this to update the note 879 * header when the device dumps will be added. 880 */ 881 elfnotes_orig_sz = phdr.p_memsz; 882 883 return 0; 884 } 885 886 /** 887 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry 888 * 889 * @ehdr_ptr: ELF header 890 * 891 * This function updates p_memsz member of each PT_NOTE entry in the 892 * program header table pointed to by @ehdr_ptr to real size of ELF 893 * note segment. 894 */ 895 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) 896 { 897 int i, rc=0; 898 Elf32_Phdr *phdr_ptr; 899 Elf32_Nhdr *nhdr_ptr; 900 901 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); 902 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 903 void *notes_section; 904 u64 offset, max_sz, sz, real_sz = 0; 905 if (phdr_ptr->p_type != PT_NOTE) 906 continue; 907 max_sz = phdr_ptr->p_memsz; 908 offset = phdr_ptr->p_offset; 909 notes_section = kmalloc(max_sz, GFP_KERNEL); 910 if (!notes_section) 911 return -ENOMEM; 912 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset); 913 if (rc < 0) { 914 kfree(notes_section); 915 return rc; 916 } 917 nhdr_ptr = notes_section; 918 while (nhdr_ptr->n_namesz != 0) { 919 sz = sizeof(Elf32_Nhdr) + 920 (((u64)nhdr_ptr->n_namesz + 3) & ~3) + 921 (((u64)nhdr_ptr->n_descsz + 3) & ~3); 922 if ((real_sz + sz) > max_sz) { 923 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", 924 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); 925 break; 926 } 927 real_sz += sz; 928 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); 929 } 930 kfree(notes_section); 931 phdr_ptr->p_memsz = real_sz; 932 if (real_sz == 0) { 933 pr_warn("Warning: Zero PT_NOTE entries found\n"); 934 } 935 } 936 937 return 0; 938 } 939 940 /** 941 * get_note_number_and_size_elf32 - get the number of PT_NOTE program 942 * headers and sum of real size of their ELF note segment headers and 943 * data. 944 * 945 * @ehdr_ptr: ELF header 946 * @nr_ptnote: buffer for the number of PT_NOTE program headers 947 * @sz_ptnote: buffer for size of unique PT_NOTE program header 948 * 949 * This function is used to merge multiple PT_NOTE program headers 950 * into a unique single one. The resulting unique entry will have 951 * @sz_ptnote in its phdr->p_mem. 952 * 953 * It is assumed that program headers with PT_NOTE type pointed to by 954 * @ehdr_ptr has already been updated by update_note_header_size_elf32 955 * and each of PT_NOTE program headers has actual ELF note segment 956 * size in its p_memsz member. 957 */ 958 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr, 959 int *nr_ptnote, u64 *sz_ptnote) 960 { 961 int i; 962 Elf32_Phdr *phdr_ptr; 963 964 *nr_ptnote = *sz_ptnote = 0; 965 966 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1); 967 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 968 if (phdr_ptr->p_type != PT_NOTE) 969 continue; 970 *nr_ptnote += 1; 971 *sz_ptnote += phdr_ptr->p_memsz; 972 } 973 974 return 0; 975 } 976 977 /** 978 * copy_notes_elf32 - copy ELF note segments in a given buffer 979 * 980 * @ehdr_ptr: ELF header 981 * @notes_buf: buffer into which ELF note segments are copied 982 * 983 * This function is used to copy ELF note segment in the 1st kernel 984 * into the buffer @notes_buf in the 2nd kernel. It is assumed that 985 * size of the buffer @notes_buf is equal to or larger than sum of the 986 * real ELF note segment headers and data. 987 * 988 * It is assumed that program headers with PT_NOTE type pointed to by 989 * @ehdr_ptr has already been updated by update_note_header_size_elf32 990 * and each of PT_NOTE program headers has actual ELF note segment 991 * size in its p_memsz member. 992 */ 993 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf) 994 { 995 int i, rc=0; 996 Elf32_Phdr *phdr_ptr; 997 998 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1); 999 1000 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1001 u64 offset; 1002 if (phdr_ptr->p_type != PT_NOTE) 1003 continue; 1004 offset = phdr_ptr->p_offset; 1005 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz, 1006 &offset); 1007 if (rc < 0) 1008 return rc; 1009 notes_buf += phdr_ptr->p_memsz; 1010 } 1011 1012 return 0; 1013 } 1014 1015 /* Merges all the PT_NOTE headers into one. */ 1016 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, 1017 char **notes_buf, size_t *notes_sz) 1018 { 1019 int i, nr_ptnote=0, rc=0; 1020 char *tmp; 1021 Elf32_Ehdr *ehdr_ptr; 1022 Elf32_Phdr phdr; 1023 u64 phdr_sz = 0, note_off; 1024 1025 ehdr_ptr = (Elf32_Ehdr *)elfptr; 1026 1027 rc = update_note_header_size_elf32(ehdr_ptr); 1028 if (rc < 0) 1029 return rc; 1030 1031 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz); 1032 if (rc < 0) 1033 return rc; 1034 1035 *notes_sz = roundup(phdr_sz, PAGE_SIZE); 1036 *notes_buf = vmcore_alloc_buf(*notes_sz); 1037 if (!*notes_buf) 1038 return -ENOMEM; 1039 1040 rc = copy_notes_elf32(ehdr_ptr, *notes_buf); 1041 if (rc < 0) 1042 return rc; 1043 1044 /* Prepare merged PT_NOTE program header. */ 1045 phdr.p_type = PT_NOTE; 1046 phdr.p_flags = 0; 1047 note_off = sizeof(Elf32_Ehdr) + 1048 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); 1049 phdr.p_offset = roundup(note_off, PAGE_SIZE); 1050 phdr.p_vaddr = phdr.p_paddr = 0; 1051 phdr.p_filesz = phdr.p_memsz = phdr_sz; 1052 phdr.p_align = 0; 1053 1054 /* Add merged PT_NOTE program header*/ 1055 tmp = elfptr + sizeof(Elf32_Ehdr); 1056 memcpy(tmp, &phdr, sizeof(phdr)); 1057 tmp += sizeof(phdr); 1058 1059 /* Remove unwanted PT_NOTE program headers. */ 1060 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); 1061 *elfsz = *elfsz - i; 1062 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); 1063 memset(elfptr + *elfsz, 0, i); 1064 *elfsz = roundup(*elfsz, PAGE_SIZE); 1065 1066 /* Modify e_phnum to reflect merged headers. */ 1067 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 1068 1069 /* Store the size of all notes. We need this to update the note 1070 * header when the device dumps will be added. 1071 */ 1072 elfnotes_orig_sz = phdr.p_memsz; 1073 1074 return 0; 1075 } 1076 1077 /* Add memory chunks represented by program headers to vmcore list. Also update 1078 * the new offset fields of exported program headers. */ 1079 static int __init process_ptload_program_headers_elf64(char *elfptr, 1080 size_t elfsz, 1081 size_t elfnotes_sz, 1082 struct list_head *vc_list) 1083 { 1084 int i; 1085 Elf64_Ehdr *ehdr_ptr; 1086 Elf64_Phdr *phdr_ptr; 1087 loff_t vmcore_off; 1088 struct vmcore *new; 1089 1090 ehdr_ptr = (Elf64_Ehdr *)elfptr; 1091 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ 1092 1093 /* Skip Elf header, program headers and Elf note segment. */ 1094 vmcore_off = elfsz + elfnotes_sz; 1095 1096 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1097 u64 paddr, start, end, size; 1098 1099 if (phdr_ptr->p_type != PT_LOAD) 1100 continue; 1101 1102 paddr = phdr_ptr->p_offset; 1103 start = rounddown(paddr, PAGE_SIZE); 1104 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); 1105 size = end - start; 1106 1107 /* Add this contiguous chunk of memory to vmcore list.*/ 1108 new = get_new_element(); 1109 if (!new) 1110 return -ENOMEM; 1111 new->paddr = start; 1112 new->size = size; 1113 list_add_tail(&new->list, vc_list); 1114 1115 /* Update the program header offset. */ 1116 phdr_ptr->p_offset = vmcore_off + (paddr - start); 1117 vmcore_off = vmcore_off + size; 1118 } 1119 return 0; 1120 } 1121 1122 static int __init process_ptload_program_headers_elf32(char *elfptr, 1123 size_t elfsz, 1124 size_t elfnotes_sz, 1125 struct list_head *vc_list) 1126 { 1127 int i; 1128 Elf32_Ehdr *ehdr_ptr; 1129 Elf32_Phdr *phdr_ptr; 1130 loff_t vmcore_off; 1131 struct vmcore *new; 1132 1133 ehdr_ptr = (Elf32_Ehdr *)elfptr; 1134 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ 1135 1136 /* Skip Elf header, program headers and Elf note segment. */ 1137 vmcore_off = elfsz + elfnotes_sz; 1138 1139 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 1140 u64 paddr, start, end, size; 1141 1142 if (phdr_ptr->p_type != PT_LOAD) 1143 continue; 1144 1145 paddr = phdr_ptr->p_offset; 1146 start = rounddown(paddr, PAGE_SIZE); 1147 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); 1148 size = end - start; 1149 1150 /* Add this contiguous chunk of memory to vmcore list.*/ 1151 new = get_new_element(); 1152 if (!new) 1153 return -ENOMEM; 1154 new->paddr = start; 1155 new->size = size; 1156 list_add_tail(&new->list, vc_list); 1157 1158 /* Update the program header offset */ 1159 phdr_ptr->p_offset = vmcore_off + (paddr - start); 1160 vmcore_off = vmcore_off + size; 1161 } 1162 return 0; 1163 } 1164 1165 /* Sets offset fields of vmcore elements. */ 1166 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, 1167 struct list_head *vc_list) 1168 { 1169 loff_t vmcore_off; 1170 struct vmcore *m; 1171 1172 /* Skip Elf header, program headers and Elf note segment. */ 1173 vmcore_off = elfsz + elfnotes_sz; 1174 1175 list_for_each_entry(m, vc_list, list) { 1176 m->offset = vmcore_off; 1177 vmcore_off += m->size; 1178 } 1179 } 1180 1181 static void free_elfcorebuf(void) 1182 { 1183 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig)); 1184 elfcorebuf = NULL; 1185 vfree(elfnotes_buf); 1186 elfnotes_buf = NULL; 1187 } 1188 1189 static int __init parse_crash_elf64_headers(void) 1190 { 1191 int rc=0; 1192 Elf64_Ehdr ehdr; 1193 u64 addr; 1194 1195 addr = elfcorehdr_addr; 1196 1197 /* Read Elf header */ 1198 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr); 1199 if (rc < 0) 1200 return rc; 1201 1202 /* Do some basic Verification. */ 1203 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 1204 (ehdr.e_type != ET_CORE) || 1205 !vmcore_elf64_check_arch(&ehdr) || 1206 ehdr.e_ident[EI_CLASS] != ELFCLASS64 || 1207 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 1208 ehdr.e_version != EV_CURRENT || 1209 ehdr.e_ehsize != sizeof(Elf64_Ehdr) || 1210 ehdr.e_phentsize != sizeof(Elf64_Phdr) || 1211 ehdr.e_phnum == 0) { 1212 pr_warn("Warning: Core image elf header is not sane\n"); 1213 return -EINVAL; 1214 } 1215 1216 /* Read in all elf headers. */ 1217 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) + 1218 ehdr.e_phnum * sizeof(Elf64_Phdr); 1219 elfcorebuf_sz = elfcorebuf_sz_orig; 1220 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1221 get_order(elfcorebuf_sz_orig)); 1222 if (!elfcorebuf) 1223 return -ENOMEM; 1224 addr = elfcorehdr_addr; 1225 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); 1226 if (rc < 0) 1227 goto fail; 1228 1229 /* Merge all PT_NOTE headers into one. */ 1230 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, 1231 &elfnotes_buf, &elfnotes_sz); 1232 if (rc) 1233 goto fail; 1234 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, 1235 elfnotes_sz, &vmcore_list); 1236 if (rc) 1237 goto fail; 1238 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); 1239 return 0; 1240 fail: 1241 free_elfcorebuf(); 1242 return rc; 1243 } 1244 1245 static int __init parse_crash_elf32_headers(void) 1246 { 1247 int rc=0; 1248 Elf32_Ehdr ehdr; 1249 u64 addr; 1250 1251 addr = elfcorehdr_addr; 1252 1253 /* Read Elf header */ 1254 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr); 1255 if (rc < 0) 1256 return rc; 1257 1258 /* Do some basic Verification. */ 1259 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 1260 (ehdr.e_type != ET_CORE) || 1261 !vmcore_elf32_check_arch(&ehdr) || 1262 ehdr.e_ident[EI_CLASS] != ELFCLASS32|| 1263 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 1264 ehdr.e_version != EV_CURRENT || 1265 ehdr.e_ehsize != sizeof(Elf32_Ehdr) || 1266 ehdr.e_phentsize != sizeof(Elf32_Phdr) || 1267 ehdr.e_phnum == 0) { 1268 pr_warn("Warning: Core image elf header is not sane\n"); 1269 return -EINVAL; 1270 } 1271 1272 /* Read in all elf headers. */ 1273 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); 1274 elfcorebuf_sz = elfcorebuf_sz_orig; 1275 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1276 get_order(elfcorebuf_sz_orig)); 1277 if (!elfcorebuf) 1278 return -ENOMEM; 1279 addr = elfcorehdr_addr; 1280 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr); 1281 if (rc < 0) 1282 goto fail; 1283 1284 /* Merge all PT_NOTE headers into one. */ 1285 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, 1286 &elfnotes_buf, &elfnotes_sz); 1287 if (rc) 1288 goto fail; 1289 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, 1290 elfnotes_sz, &vmcore_list); 1291 if (rc) 1292 goto fail; 1293 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); 1294 return 0; 1295 fail: 1296 free_elfcorebuf(); 1297 return rc; 1298 } 1299 1300 static int __init parse_crash_elf_headers(void) 1301 { 1302 unsigned char e_ident[EI_NIDENT]; 1303 u64 addr; 1304 int rc=0; 1305 1306 addr = elfcorehdr_addr; 1307 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr); 1308 if (rc < 0) 1309 return rc; 1310 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { 1311 pr_warn("Warning: Core image elf header not found\n"); 1312 return -EINVAL; 1313 } 1314 1315 if (e_ident[EI_CLASS] == ELFCLASS64) { 1316 rc = parse_crash_elf64_headers(); 1317 if (rc) 1318 return rc; 1319 } else if (e_ident[EI_CLASS] == ELFCLASS32) { 1320 rc = parse_crash_elf32_headers(); 1321 if (rc) 1322 return rc; 1323 } else { 1324 pr_warn("Warning: Core image elf header is not sane\n"); 1325 return -EINVAL; 1326 } 1327 1328 /* Determine vmcore size. */ 1329 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, 1330 &vmcore_list); 1331 1332 return 0; 1333 } 1334 1335 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 1336 /** 1337 * vmcoredd_write_header - Write vmcore device dump header at the 1338 * beginning of the dump's buffer. 1339 * @buf: Output buffer where the note is written 1340 * @data: Dump info 1341 * @size: Size of the dump 1342 * 1343 * Fills beginning of the dump's buffer with vmcore device dump header. 1344 */ 1345 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data, 1346 u32 size) 1347 { 1348 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf; 1349 1350 vdd_hdr->n_namesz = sizeof(vdd_hdr->name); 1351 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name); 1352 vdd_hdr->n_type = NT_VMCOREDD; 1353 1354 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME, 1355 sizeof(vdd_hdr->name)); 1356 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name)); 1357 } 1358 1359 /** 1360 * vmcoredd_update_program_headers - Update all Elf program headers 1361 * @elfptr: Pointer to elf header 1362 * @elfnotesz: Size of elf notes aligned to page size 1363 * @vmcoreddsz: Size of device dumps to be added to elf note header 1364 * 1365 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size. 1366 * Also update the offsets of all the program headers after the elf note header. 1367 */ 1368 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz, 1369 size_t vmcoreddsz) 1370 { 1371 unsigned char *e_ident = (unsigned char *)elfptr; 1372 u64 start, end, size; 1373 loff_t vmcore_off; 1374 u32 i; 1375 1376 vmcore_off = elfcorebuf_sz + elfnotesz; 1377 1378 if (e_ident[EI_CLASS] == ELFCLASS64) { 1379 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr; 1380 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr)); 1381 1382 /* Update all program headers */ 1383 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 1384 if (phdr->p_type == PT_NOTE) { 1385 /* Update note size */ 1386 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; 1387 phdr->p_filesz = phdr->p_memsz; 1388 continue; 1389 } 1390 1391 start = rounddown(phdr->p_offset, PAGE_SIZE); 1392 end = roundup(phdr->p_offset + phdr->p_memsz, 1393 PAGE_SIZE); 1394 size = end - start; 1395 phdr->p_offset = vmcore_off + (phdr->p_offset - start); 1396 vmcore_off += size; 1397 } 1398 } else { 1399 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr; 1400 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr)); 1401 1402 /* Update all program headers */ 1403 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 1404 if (phdr->p_type == PT_NOTE) { 1405 /* Update note size */ 1406 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz; 1407 phdr->p_filesz = phdr->p_memsz; 1408 continue; 1409 } 1410 1411 start = rounddown(phdr->p_offset, PAGE_SIZE); 1412 end = roundup(phdr->p_offset + phdr->p_memsz, 1413 PAGE_SIZE); 1414 size = end - start; 1415 phdr->p_offset = vmcore_off + (phdr->p_offset - start); 1416 vmcore_off += size; 1417 } 1418 } 1419 } 1420 1421 /** 1422 * vmcoredd_update_size - Update the total size of the device dumps and update 1423 * Elf header 1424 * @dump_size: Size of the current device dump to be added to total size 1425 * 1426 * Update the total size of all the device dumps and update the Elf program 1427 * headers. Calculate the new offsets for the vmcore list and update the 1428 * total vmcore size. 1429 */ 1430 static void vmcoredd_update_size(size_t dump_size) 1431 { 1432 vmcoredd_orig_sz += dump_size; 1433 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz; 1434 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz, 1435 vmcoredd_orig_sz); 1436 1437 /* Update vmcore list offsets */ 1438 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); 1439 1440 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, 1441 &vmcore_list); 1442 proc_vmcore->size = vmcore_size; 1443 } 1444 1445 /** 1446 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore 1447 * @data: dump info. 1448 * 1449 * Allocate a buffer and invoke the calling driver's dump collect routine. 1450 * Write Elf note at the beginning of the buffer to indicate vmcore device 1451 * dump and add the dump to global list. 1452 */ 1453 int vmcore_add_device_dump(struct vmcoredd_data *data) 1454 { 1455 struct vmcoredd_node *dump; 1456 void *buf = NULL; 1457 size_t data_size; 1458 int ret; 1459 1460 if (vmcoredd_disabled) { 1461 pr_err_once("Device dump is disabled\n"); 1462 return -EINVAL; 1463 } 1464 1465 if (!data || !strlen(data->dump_name) || 1466 !data->vmcoredd_callback || !data->size) 1467 return -EINVAL; 1468 1469 dump = vzalloc(sizeof(*dump)); 1470 if (!dump) { 1471 ret = -ENOMEM; 1472 goto out_err; 1473 } 1474 1475 /* Keep size of the buffer page aligned so that it can be mmaped */ 1476 data_size = roundup(sizeof(struct vmcoredd_header) + data->size, 1477 PAGE_SIZE); 1478 1479 /* Allocate buffer for driver's to write their dumps */ 1480 buf = vmcore_alloc_buf(data_size); 1481 if (!buf) { 1482 ret = -ENOMEM; 1483 goto out_err; 1484 } 1485 1486 vmcoredd_write_header(buf, data, data_size - 1487 sizeof(struct vmcoredd_header)); 1488 1489 /* Invoke the driver's dump collection routing */ 1490 ret = data->vmcoredd_callback(data, buf + 1491 sizeof(struct vmcoredd_header)); 1492 if (ret) 1493 goto out_err; 1494 1495 dump->buf = buf; 1496 dump->size = data_size; 1497 1498 /* Add the dump to driver sysfs list */ 1499 mutex_lock(&vmcoredd_mutex); 1500 list_add_tail(&dump->list, &vmcoredd_list); 1501 mutex_unlock(&vmcoredd_mutex); 1502 1503 vmcoredd_update_size(data_size); 1504 return 0; 1505 1506 out_err: 1507 if (buf) 1508 vfree(buf); 1509 1510 if (dump) 1511 vfree(dump); 1512 1513 return ret; 1514 } 1515 EXPORT_SYMBOL(vmcore_add_device_dump); 1516 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 1517 1518 /* Free all dumps in vmcore device dump list */ 1519 static void vmcore_free_device_dumps(void) 1520 { 1521 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 1522 mutex_lock(&vmcoredd_mutex); 1523 while (!list_empty(&vmcoredd_list)) { 1524 struct vmcoredd_node *dump; 1525 1526 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node, 1527 list); 1528 list_del(&dump->list); 1529 vfree(dump->buf); 1530 vfree(dump); 1531 } 1532 mutex_unlock(&vmcoredd_mutex); 1533 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 1534 } 1535 1536 /* Init function for vmcore module. */ 1537 static int __init vmcore_init(void) 1538 { 1539 int rc = 0; 1540 1541 /* Allow architectures to allocate ELF header in 2nd kernel */ 1542 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size); 1543 if (rc) 1544 return rc; 1545 /* 1546 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel, 1547 * then capture the dump. 1548 */ 1549 if (!(is_vmcore_usable())) 1550 return rc; 1551 rc = parse_crash_elf_headers(); 1552 if (rc) { 1553 pr_warn("Kdump: vmcore not initialized\n"); 1554 return rc; 1555 } 1556 elfcorehdr_free(elfcorehdr_addr); 1557 elfcorehdr_addr = ELFCORE_ADDR_ERR; 1558 1559 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops); 1560 if (proc_vmcore) 1561 proc_vmcore->size = vmcore_size; 1562 return 0; 1563 } 1564 fs_initcall(vmcore_init); 1565 1566 /* Cleanup function for vmcore module. */ 1567 void vmcore_cleanup(void) 1568 { 1569 if (proc_vmcore) { 1570 proc_remove(proc_vmcore); 1571 proc_vmcore = NULL; 1572 } 1573 1574 /* clear the vmcore list. */ 1575 while (!list_empty(&vmcore_list)) { 1576 struct vmcore *m; 1577 1578 m = list_first_entry(&vmcore_list, struct vmcore, list); 1579 list_del(&m->list); 1580 kfree(m); 1581 } 1582 free_elfcorebuf(); 1583 1584 /* clear vmcore device dump list */ 1585 vmcore_free_device_dumps(); 1586 } 1587