1 /* 2 * Memory preserving reboot related code. 3 * 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 5 * Copyright (C) IBM Corporation, 2004. All rights reserved 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/errno.h> 10 #include <linux/highmem.h> 11 #include <linux/crash_dump.h> 12 13 #include <asm/uaccess.h> 14 15 static void *kdump_buf_page; 16 17 /* Stores the physical address of elf header of crash image. */ 18 unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; 19 20 static inline bool is_crashed_pfn_valid(unsigned long pfn) 21 { 22 #ifndef CONFIG_X86_PAE 23 /* 24 * non-PAE kdump kernel executed from a PAE one will crop high pte 25 * bits and poke unwanted space counting again from address 0, we 26 * don't want that. pte must fit into unsigned long. In fact the 27 * test checks high 12 bits for being zero (pfn will be shifted left 28 * by PAGE_SHIFT). 29 */ 30 return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn; 31 #else 32 return true; 33 #endif 34 } 35 36 /** 37 * copy_oldmem_page - copy one page from "oldmem" 38 * @pfn: page frame number to be copied 39 * @buf: target memory address for the copy; this can be in kernel address 40 * space or user address space (see @userbuf) 41 * @csize: number of bytes to copy 42 * @offset: offset in bytes into the page (based on pfn) to begin the copy 43 * @userbuf: if set, @buf is in user address space, use copy_to_user(), 44 * otherwise @buf is in kernel address space, use memcpy(). 45 * 46 * Copy a page from "oldmem". For this page, there is no pte mapped 47 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 48 * 49 * Calling copy_to_user() in atomic context is not desirable. Hence first 50 * copying the data to a pre-allocated kernel page and then copying to user 51 * space in non-atomic context. 52 */ 53 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 54 size_t csize, unsigned long offset, int userbuf) 55 { 56 void *vaddr; 57 58 if (!csize) 59 return 0; 60 61 if (!is_crashed_pfn_valid(pfn)) 62 return -EFAULT; 63 64 vaddr = kmap_atomic_pfn(pfn); 65 66 if (!userbuf) { 67 memcpy(buf, (vaddr + offset), csize); 68 kunmap_atomic(vaddr, KM_PTE0); 69 } else { 70 if (!kdump_buf_page) { 71 printk(KERN_WARNING "Kdump: Kdump buffer page not" 72 " allocated\n"); 73 kunmap_atomic(vaddr, KM_PTE0); 74 return -EFAULT; 75 } 76 copy_page(kdump_buf_page, vaddr); 77 kunmap_atomic(vaddr, KM_PTE0); 78 if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 79 return -EFAULT; 80 } 81 82 return csize; 83 } 84 85 static int __init kdump_buf_page_init(void) 86 { 87 int ret = 0; 88 89 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 90 if (!kdump_buf_page) { 91 printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" 92 " page\n"); 93 ret = -ENOMEM; 94 } 95 96 return ret; 97 } 98 arch_initcall(kdump_buf_page_init); 99