1 #include <linux/highmem.h> 2 #include <linux/bootmem.h> 3 #include <linux/crash_dump.h> 4 #include <asm/uaccess.h> 5 #include <linux/slab.h> 6 7 static int __init parse_savemaxmem(char *p) 8 { 9 if (p) 10 saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; 11 12 return 1; 13 } 14 __setup("savemaxmem=", parse_savemaxmem); 15 16 17 static void *kdump_buf_page; 18 19 /** 20 * copy_oldmem_page - copy one page from "oldmem" 21 * @pfn: page frame number to be copied 22 * @buf: target memory address for the copy; this can be in kernel address 23 * space or user address space (see @userbuf) 24 * @csize: number of bytes to copy 25 * @offset: offset in bytes into the page (based on pfn) to begin the copy 26 * @userbuf: if set, @buf is in user address space, use copy_to_user(), 27 * otherwise @buf is in kernel address space, use memcpy(). 28 * 29 * Copy a page from "oldmem". For this page, there is no pte mapped 30 * in the current kernel. 31 * 32 * Calling copy_to_user() in atomic context is not desirable. Hence first 33 * copying the data to a pre-allocated kernel page and then copying to user 34 * space in non-atomic context. 35 */ 36 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 37 size_t csize, unsigned long offset, int userbuf) 38 { 39 void *vaddr; 40 41 if (!csize) 42 return 0; 43 44 vaddr = kmap_atomic_pfn(pfn); 45 46 if (!userbuf) { 47 memcpy(buf, (vaddr + offset), csize); 48 kunmap_atomic(vaddr); 49 } else { 50 if (!kdump_buf_page) { 51 pr_warning("Kdump: Kdump buffer page not allocated\n"); 52 53 return -EFAULT; 54 } 55 copy_page(kdump_buf_page, vaddr); 56 kunmap_atomic(vaddr); 57 if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 58 return -EFAULT; 59 } 60 61 return csize; 62 } 63 64 static int __init kdump_buf_page_init(void) 65 { 66 int ret = 0; 67 68 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 69 if (!kdump_buf_page) { 70 pr_warning("Kdump: Failed to allocate kdump buffer page\n"); 71 ret = -ENOMEM; 72 } 73 74 return ret; 75 } 76 arch_initcall(kdump_buf_page_init); 77