1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Routines for doing kexec-based kdump. 4 * 5 * Copyright (C) 2005, IBM Corp. 6 * 7 * Created by: Michael Ellerman 8 */ 9 10 #undef DEBUG 11 12 #include <linux/crash_dump.h> 13 #include <linux/io.h> 14 #include <linux/memblock.h> 15 #include <asm/code-patching.h> 16 #include <asm/kdump.h> 17 #include <asm/prom.h> 18 #include <asm/firmware.h> 19 #include <linux/uaccess.h> 20 #include <asm/rtas.h> 21 22 #ifdef DEBUG 23 #include <asm/udbg.h> 24 #define DBG(fmt...) udbg_printf(fmt) 25 #else 26 #define DBG(fmt...) 27 #endif 28 29 #ifndef CONFIG_NONSTATIC_KERNEL 30 void __init reserve_kdump_trampoline(void) 31 { 32 memblock_reserve(0, KDUMP_RESERVE_LIMIT); 33 } 34 35 static void __init create_trampoline(unsigned long addr) 36 { 37 unsigned int *p = (unsigned int *)addr; 38 39 /* The maximum range of a single instruction branch, is the current 40 * instruction's address + (32 MB - 4) bytes. For the trampoline we 41 * need to branch to current address + 32 MB. So we insert a nop at 42 * the trampoline address, then the next instruction (+ 4 bytes) 43 * does a branch to (32 MB - 4). The net effect is that when we 44 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires 45 * two instructions it doesn't require any registers. 46 */ 47 patch_instruction(p, PPC_INST_NOP); 48 patch_branch(++p, addr + PHYSICAL_START, 0); 49 } 50 51 void __init setup_kdump_trampoline(void) 52 { 53 unsigned long i; 54 55 DBG(" -> setup_kdump_trampoline()\n"); 56 57 for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { 58 create_trampoline(i); 59 } 60 61 #ifdef CONFIG_PPC_PSERIES 62 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); 63 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); 64 #endif /* CONFIG_PPC_PSERIES */ 65 66 DBG(" <- setup_kdump_trampoline()\n"); 67 } 68 #endif /* CONFIG_NONSTATIC_KERNEL */ 69 70 static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, 71 unsigned long offset, int userbuf) 72 { 73 if (userbuf) { 74 if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) 75 return -EFAULT; 76 } else 77 memcpy(buf, (vaddr + offset), csize); 78 79 return csize; 80 } 81 82 /** 83 * copy_oldmem_page - copy one page from "oldmem" 84 * @pfn: page frame number to be copied 85 * @buf: target memory address for the copy; this can be in kernel address 86 * space or user address space (see @userbuf) 87 * @csize: number of bytes to copy 88 * @offset: offset in bytes into the page (based on pfn) to begin the copy 89 * @userbuf: if set, @buf is in user address space, use copy_to_user(), 90 * otherwise @buf is in kernel address space, use memcpy(). 91 * 92 * Copy a page from "oldmem". For this page, there is no pte mapped 93 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 94 */ 95 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 96 size_t csize, unsigned long offset, int userbuf) 97 { 98 void *vaddr; 99 phys_addr_t paddr; 100 101 if (!csize) 102 return 0; 103 104 csize = min_t(size_t, csize, PAGE_SIZE); 105 paddr = pfn << PAGE_SHIFT; 106 107 if (memblock_is_region_memory(paddr, csize)) { 108 vaddr = __va(paddr); 109 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 110 } else { 111 vaddr = ioremap_cache(paddr, PAGE_SIZE); 112 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 113 iounmap(vaddr); 114 } 115 116 return csize; 117 } 118 119 #ifdef CONFIG_PPC_RTAS 120 /* 121 * The crashkernel region will almost always overlap the RTAS region, so 122 * we have to be careful when shrinking the crashkernel region. 123 */ 124 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) 125 { 126 unsigned long addr; 127 const __be32 *basep, *sizep; 128 unsigned int rtas_start = 0, rtas_end = 0; 129 130 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); 131 sizep = of_get_property(rtas.dev, "rtas-size", NULL); 132 133 if (basep && sizep) { 134 rtas_start = be32_to_cpup(basep); 135 rtas_end = rtas_start + be32_to_cpup(sizep); 136 } 137 138 for (addr = begin; addr < end; addr += PAGE_SIZE) { 139 /* Does this page overlap with the RTAS region? */ 140 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) 141 continue; 142 143 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); 144 } 145 } 146 #endif 147