1 #include <linux/highmem.h> 2 #include <linux/bootmem.h> 3 #include <linux/crash_dump.h> 4 #include <linux/uaccess.h> 5 #include <linux/slab.h> 6 7 static void *kdump_buf_page; 8 9 /** 10 * copy_oldmem_page - copy one page from "oldmem" 11 * @pfn: page frame number to be copied 12 * @buf: target memory address for the copy; this can be in kernel address 13 * space or user address space (see @userbuf) 14 * @csize: number of bytes to copy 15 * @offset: offset in bytes into the page (based on pfn) to begin the copy 16 * @userbuf: if set, @buf is in user address space, use copy_to_user(), 17 * otherwise @buf is in kernel address space, use memcpy(). 18 * 19 * Copy a page from "oldmem". For this page, there is no pte mapped 20 * in the current kernel. 21 * 22 * Calling copy_to_user() in atomic context is not desirable. Hence first 23 * copying the data to a pre-allocated kernel page and then copying to user 24 * space in non-atomic context. 25 */ 26 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 27 size_t csize, unsigned long offset, int userbuf) 28 { 29 void *vaddr; 30 31 if (!csize) 32 return 0; 33 34 vaddr = kmap_atomic_pfn(pfn); 35 36 if (!userbuf) { 37 memcpy(buf, (vaddr + offset), csize); 38 kunmap_atomic(vaddr); 39 } else { 40 if (!kdump_buf_page) { 41 pr_warn("Kdump: Kdump buffer page not allocated\n"); 42 43 return -EFAULT; 44 } 45 copy_page(kdump_buf_page, vaddr); 46 kunmap_atomic(vaddr); 47 if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 48 return -EFAULT; 49 } 50 51 return csize; 52 } 53 54 static int __init kdump_buf_page_init(void) 55 { 56 int ret = 0; 57 58 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 59 if (!kdump_buf_page) { 60 pr_warn("Kdump: Failed to allocate kdump buffer page\n"); 61 ret = -ENOMEM; 62 } 63 64 return ret; 65 } 66 arch_initcall(kdump_buf_page_init); 67