1 /* 2 * Memory preserving reboot related code. 3 * 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 5 * Copyright (C) IBM Corporation, 2004. All rights reserved 6 */ 7 8 #include <linux/errno.h> 9 #include <linux/highmem.h> 10 #include <linux/crash_dump.h> 11 12 #include <asm/uaccess.h> 13 14 static void *kdump_buf_page; 15 16 /* Stores the physical address of elf header of crash image. */ 17 unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; 18 19 static inline bool is_crashed_pfn_valid(unsigned long pfn) 20 { 21 #ifndef CONFIG_X86_PAE 22 /* 23 * non-PAE kdump kernel executed from a PAE one will crop high pte 24 * bits and poke unwanted space counting again from address 0, we 25 * don't want that. pte must fit into unsigned long. In fact the 26 * test checks high 12 bits for being zero (pfn will be shifted left 27 * by PAGE_SHIFT). 28 */ 29 return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; 30 #else 31 return true; 32 #endif 33 } 34 35 /** 36 * copy_oldmem_page - copy one page from "oldmem" 37 * @pfn: page frame number to be copied 38 * @buf: target memory address for the copy; this can be in kernel address 39 * space or user address space (see @userbuf) 40 * @csize: number of bytes to copy 41 * @offset: offset in bytes into the page (based on pfn) to begin the copy 42 * @userbuf: if set, @buf is in user address space, use copy_to_user(), 43 * otherwise @buf is in kernel address space, use memcpy(). 44 * 45 * Copy a page from "oldmem". For this page, there is no pte mapped 46 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 47 * 48 * Calling copy_to_user() in atomic context is not desirable. Hence first 49 * copying the data to a pre-allocated kernel page and then copying to user 50 * space in non-atomic context. 51 */ 52 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 53 size_t csize, unsigned long offset, int userbuf) 54 { 55 void *vaddr; 56 57 if (!csize) 58 return 0; 59 60 if (!is_crashed_pfn_valid(pfn)) 61 return -EFAULT; 62 63 vaddr = kmap_atomic_pfn(pfn, KM_PTE0); 64 65 if (!userbuf) { 66 memcpy(buf, (vaddr + offset), csize); 67 kunmap_atomic(vaddr, KM_PTE0); 68 } else { 69 if (!kdump_buf_page) { 70 printk(KERN_WARNING "Kdump: Kdump buffer page not" 71 " allocated\n"); 72 kunmap_atomic(vaddr, KM_PTE0); 73 return -EFAULT; 74 } 75 copy_page(kdump_buf_page, vaddr); 76 kunmap_atomic(vaddr, KM_PTE0); 77 if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 78 return -EFAULT; 79 } 80 81 return csize; 82 } 83 84 static int __init kdump_buf_page_init(void) 85 { 86 int ret = 0; 87 88 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 89 if (!kdump_buf_page) { 90 printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" 91 " page\n"); 92 ret = -ENOMEM; 93 } 94 95 return ret; 96 } 97 arch_initcall(kdump_buf_page_init); 98