xref: /openbmc/linux/arch/powerpc/kernel/crash_dump.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Routines for doing kexec-based kdump.
3  *
4  * Copyright (C) 2005, IBM Corp.
5  *
6  * Created by: Michael Ellerman
7  *
8  * This source code is licensed under the GNU General Public License,
9  * Version 2.  See the file COPYING for more details.
10  */
11 
12 #undef DEBUG
13 
14 #include <linux/crash_dump.h>
15 #include <linux/bootmem.h>
16 #include <linux/memblock.h>
17 #include <asm/code-patching.h>
18 #include <asm/kdump.h>
19 #include <asm/prom.h>
20 #include <asm/firmware.h>
21 #include <asm/uaccess.h>
22 #include <asm/rtas.h>
23 
24 #ifdef DEBUG
25 #include <asm/udbg.h>
26 #define DBG(fmt...) udbg_printf(fmt)
27 #else
28 #define DBG(fmt...)
29 #endif
30 
31 /* Stores the physical address of elf header of crash image. */
32 unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
33 
34 #ifndef CONFIG_RELOCATABLE
35 void __init reserve_kdump_trampoline(void)
36 {
37 	memblock_reserve(0, KDUMP_RESERVE_LIMIT);
38 }
39 
40 static void __init create_trampoline(unsigned long addr)
41 {
42 	unsigned int *p = (unsigned int *)addr;
43 
44 	/* The maximum range of a single instruction branch, is the current
45 	 * instruction's address + (32 MB - 4) bytes. For the trampoline we
46 	 * need to branch to current address + 32 MB. So we insert a nop at
47 	 * the trampoline address, then the next instruction (+ 4 bytes)
48 	 * does a branch to (32 MB - 4). The net effect is that when we
49 	 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
50 	 * two instructions it doesn't require any registers.
51 	 */
52 	patch_instruction(p, PPC_INST_NOP);
53 	patch_branch(++p, addr + PHYSICAL_START, 0);
54 }
55 
56 void __init setup_kdump_trampoline(void)
57 {
58 	unsigned long i;
59 
60 	DBG(" -> setup_kdump_trampoline()\n");
61 
62 	for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
63 		create_trampoline(i);
64 	}
65 
66 #ifdef CONFIG_PPC_PSERIES
67 	create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
68 	create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
69 #endif /* CONFIG_PPC_PSERIES */
70 
71 	DBG(" <- setup_kdump_trampoline()\n");
72 }
73 #endif /* CONFIG_RELOCATABLE */
74 
75 /*
76  * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
77  * is_kdump_kernel() to determine if we are booting after a panic. Hence
78  * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
79  */
80 static int __init parse_elfcorehdr(char *p)
81 {
82 	if (p)
83 		elfcorehdr_addr = memparse(p, &p);
84 
85 	return 1;
86 }
87 __setup("elfcorehdr=", parse_elfcorehdr);
88 
89 static int __init parse_savemaxmem(char *p)
90 {
91 	if (p)
92 		saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
93 
94 	return 1;
95 }
96 __setup("savemaxmem=", parse_savemaxmem);
97 
98 
99 static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
100                                unsigned long offset, int userbuf)
101 {
102 	if (userbuf) {
103 		if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
104 			return -EFAULT;
105 	} else
106 		memcpy(buf, (vaddr + offset), csize);
107 
108 	return csize;
109 }
110 
111 /**
112  * copy_oldmem_page - copy one page from "oldmem"
113  * @pfn: page frame number to be copied
114  * @buf: target memory address for the copy; this can be in kernel address
115  *      space or user address space (see @userbuf)
116  * @csize: number of bytes to copy
117  * @offset: offset in bytes into the page (based on pfn) to begin the copy
118  * @userbuf: if set, @buf is in user address space, use copy_to_user(),
119  *      otherwise @buf is in kernel address space, use memcpy().
120  *
121  * Copy a page from "oldmem". For this page, there is no pte mapped
122  * in the current kernel. We stitch up a pte, similar to kmap_atomic.
123  */
124 ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
125 			size_t csize, unsigned long offset, int userbuf)
126 {
127 	void  *vaddr;
128 
129 	if (!csize)
130 		return 0;
131 
132 	csize = min_t(size_t, csize, PAGE_SIZE);
133 
134 	if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
135 		vaddr = __va(pfn << PAGE_SHIFT);
136 		csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
137 	} else {
138 		vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
139 		csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
140 		iounmap(vaddr);
141 	}
142 
143 	return csize;
144 }
145 
146 #ifdef CONFIG_PPC_RTAS
147 /*
148  * The crashkernel region will almost always overlap the RTAS region, so
149  * we have to be careful when shrinking the crashkernel region.
150  */
151 void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
152 {
153 	unsigned long addr;
154 	const u32 *basep, *sizep;
155 	unsigned int rtas_start = 0, rtas_end = 0;
156 
157 	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
158 	sizep = of_get_property(rtas.dev, "rtas-size", NULL);
159 
160 	if (basep && sizep) {
161 		rtas_start = *basep;
162 		rtas_end = *basep + *sizep;
163 	}
164 
165 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
166 		/* Does this page overlap with the RTAS region? */
167 		if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
168 			continue;
169 
170 		ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
171 		init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
172 		free_page((unsigned long)__va(addr));
173 		totalram_pages++;
174 	}
175 }
176 #endif
177