1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/highmem.h> 3 #include <linux/bootmem.h> 4 #include <linux/crash_dump.h> 5 #include <linux/uaccess.h> 6 #include <linux/slab.h> 7 8 static void *kdump_buf_page; 9 10 /** 11 * copy_oldmem_page - copy one page from "oldmem" 12 * @pfn: page frame number to be copied 13 * @buf: target memory address for the copy; this can be in kernel address 14 * space or user address space (see @userbuf) 15 * @csize: number of bytes to copy 16 * @offset: offset in bytes into the page (based on pfn) to begin the copy 17 * @userbuf: if set, @buf is in user address space, use copy_to_user(), 18 * otherwise @buf is in kernel address space, use memcpy(). 19 * 20 * Copy a page from "oldmem". For this page, there is no pte mapped 21 * in the current kernel. 22 * 23 * Calling copy_to_user() in atomic context is not desirable. Hence first 24 * copying the data to a pre-allocated kernel page and then copying to user 25 * space in non-atomic context. 26 */ 27 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 28 size_t csize, unsigned long offset, int userbuf) 29 { 30 void *vaddr; 31 32 if (!csize) 33 return 0; 34 35 vaddr = kmap_atomic_pfn(pfn); 36 37 if (!userbuf) { 38 memcpy(buf, (vaddr + offset), csize); 39 kunmap_atomic(vaddr); 40 } else { 41 if (!kdump_buf_page) { 42 pr_warn("Kdump: Kdump buffer page not allocated\n"); 43 44 return -EFAULT; 45 } 46 copy_page(kdump_buf_page, vaddr); 47 kunmap_atomic(vaddr); 48 if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 49 return -EFAULT; 50 } 51 52 return csize; 53 } 54 55 static int __init kdump_buf_page_init(void) 56 { 57 int ret = 0; 58 59 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 60 if (!kdump_buf_page) { 61 pr_warn("Kdump: Failed to allocate kdump buffer page\n"); 62 ret = -ENOMEM; 63 } 64 65 return ret; 66 } 67 arch_initcall(kdump_buf_page_init); 68