xref: /openbmc/linux/arch/powerpc/kernel/crash_dump.c (revision edcee77f)
10cc4746cSMichael Ellerman /*
20cc4746cSMichael Ellerman  * Routines for doing kexec-based kdump.
30cc4746cSMichael Ellerman  *
40cc4746cSMichael Ellerman  * Copyright (C) 2005, IBM Corp.
50cc4746cSMichael Ellerman  *
60cc4746cSMichael Ellerman  * Created by: Michael Ellerman
70cc4746cSMichael Ellerman  *
80cc4746cSMichael Ellerman  * This source code is licensed under the GNU General Public License,
90cc4746cSMichael Ellerman  * Version 2.  See the file COPYING for more details.
100cc4746cSMichael Ellerman  */
110cc4746cSMichael Ellerman 
120cc4746cSMichael Ellerman #undef DEBUG
130cc4746cSMichael Ellerman 
14cc532915SMichael Ellerman #include <linux/crash_dump.h>
15cc532915SMichael Ellerman #include <linux/bootmem.h>
16edcee77fSMichael Ellerman #include <linux/io.h>
1795f72d1eSYinghai Lu #include <linux/memblock.h>
18aaddd3eaSMichael Ellerman #include <asm/code-patching.h>
190cc4746cSMichael Ellerman #include <asm/kdump.h>
20d9b2b2a2SDavid S. Miller #include <asm/prom.h>
210cc4746cSMichael Ellerman #include <asm/firmware.h>
2254c32021SMichael Ellerman #include <asm/uaccess.h>
23d72e063bSAnton Blanchard #include <asm/rtas.h>
240cc4746cSMichael Ellerman 
250cc4746cSMichael Ellerman #ifdef DEBUG
260cc4746cSMichael Ellerman #include <asm/udbg.h>
270cc4746cSMichael Ellerman #define DBG(fmt...) udbg_printf(fmt)
280cc4746cSMichael Ellerman #else
290cc4746cSMichael Ellerman #define DBG(fmt...)
300cc4746cSMichael Ellerman #endif
310cc4746cSMichael Ellerman 
320f890c8dSSuzuki Poulose #ifndef CONFIG_NONSTATIC_KERNEL
33d56c3aaaSStephen Rothwell void __init reserve_kdump_trampoline(void)
3447310413SMichael Ellerman {
3595f72d1eSYinghai Lu 	memblock_reserve(0, KDUMP_RESERVE_LIMIT);
3647310413SMichael Ellerman }
3747310413SMichael Ellerman 
380cc4746cSMichael Ellerman static void __init create_trampoline(unsigned long addr)
390cc4746cSMichael Ellerman {
40e7a57273SMichael Ellerman 	unsigned int *p = (unsigned int *)addr;
41e7a57273SMichael Ellerman 
420cc4746cSMichael Ellerman 	/* The maximum range of a single instruction branch, is the current
430cc4746cSMichael Ellerman 	 * instruction's address + (32 MB - 4) bytes. For the trampoline we
440cc4746cSMichael Ellerman 	 * need to branch to current address + 32 MB. So we insert a nop at
450cc4746cSMichael Ellerman 	 * the trampoline address, then the next instruction (+ 4 bytes)
460cc4746cSMichael Ellerman 	 * does a branch to (32 MB - 4). The net effect is that when we
470cc4746cSMichael Ellerman 	 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
480cc4746cSMichael Ellerman 	 * two instructions it doesn't require any registers.
490cc4746cSMichael Ellerman 	 */
5016c57b36SKumar Gala 	patch_instruction(p, PPC_INST_NOP);
51e7a57273SMichael Ellerman 	patch_branch(++p, addr + PHYSICAL_START, 0);
520cc4746cSMichael Ellerman }
530cc4746cSMichael Ellerman 
5447310413SMichael Ellerman void __init setup_kdump_trampoline(void)
550cc4746cSMichael Ellerman {
560cc4746cSMichael Ellerman 	unsigned long i;
570cc4746cSMichael Ellerman 
5847310413SMichael Ellerman 	DBG(" -> setup_kdump_trampoline()\n");
590cc4746cSMichael Ellerman 
600cc4746cSMichael Ellerman 	for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
610cc4746cSMichael Ellerman 		create_trampoline(i);
620cc4746cSMichael Ellerman 	}
630cc4746cSMichael Ellerman 
649e4859efSStephen Rothwell #ifdef CONFIG_PPC_PSERIES
650cc4746cSMichael Ellerman 	create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
660cc4746cSMichael Ellerman 	create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
679e4859efSStephen Rothwell #endif /* CONFIG_PPC_PSERIES */
680cc4746cSMichael Ellerman 
6947310413SMichael Ellerman 	DBG(" <- setup_kdump_trampoline()\n");
700cc4746cSMichael Ellerman }
710f890c8dSSuzuki Poulose #endif /* CONFIG_NONSTATIC_KERNEL */
72cc532915SMichael Ellerman 
737230ced4SMichael Ellerman static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
747230ced4SMichael Ellerman                                unsigned long offset, int userbuf)
757230ced4SMichael Ellerman {
767230ced4SMichael Ellerman 	if (userbuf) {
777230ced4SMichael Ellerman 		if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
787230ced4SMichael Ellerman 			return -EFAULT;
797230ced4SMichael Ellerman 	} else
807230ced4SMichael Ellerman 		memcpy(buf, (vaddr + offset), csize);
817230ced4SMichael Ellerman 
827230ced4SMichael Ellerman 	return csize;
837230ced4SMichael Ellerman }
847230ced4SMichael Ellerman 
8540681b95SMichael Ellerman /**
8654c32021SMichael Ellerman  * copy_oldmem_page - copy one page from "oldmem"
8754c32021SMichael Ellerman  * @pfn: page frame number to be copied
8854c32021SMichael Ellerman  * @buf: target memory address for the copy; this can be in kernel address
8954c32021SMichael Ellerman  *      space or user address space (see @userbuf)
9054c32021SMichael Ellerman  * @csize: number of bytes to copy
9154c32021SMichael Ellerman  * @offset: offset in bytes into the page (based on pfn) to begin the copy
9254c32021SMichael Ellerman  * @userbuf: if set, @buf is in user address space, use copy_to_user(),
9354c32021SMichael Ellerman  *      otherwise @buf is in kernel address space, use memcpy().
9454c32021SMichael Ellerman  *
9554c32021SMichael Ellerman  * Copy a page from "oldmem". For this page, there is no pte mapped
9654c32021SMichael Ellerman  * in the current kernel. We stitch up a pte, similar to kmap_atomic.
9754c32021SMichael Ellerman  */
9854c32021SMichael Ellerman ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
9954c32021SMichael Ellerman 			size_t csize, unsigned long offset, int userbuf)
10054c32021SMichael Ellerman {
10154c32021SMichael Ellerman 	void  *vaddr;
102f5295bd8SLaurent Dufour 	phys_addr_t paddr;
10354c32021SMichael Ellerman 
10454c32021SMichael Ellerman 	if (!csize)
10554c32021SMichael Ellerman 		return 0;
10654c32021SMichael Ellerman 
107bbc8e30fSMatthew McClintock 	csize = min_t(size_t, csize, PAGE_SIZE);
108f5295bd8SLaurent Dufour 	paddr = pfn << PAGE_SHIFT;
1097230ced4SMichael Ellerman 
110f5295bd8SLaurent Dufour 	if (memblock_is_region_memory(paddr, csize)) {
111f5295bd8SLaurent Dufour 		vaddr = __va(paddr);
1127230ced4SMichael Ellerman 		csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
1137230ced4SMichael Ellerman 	} else {
114f5295bd8SLaurent Dufour 		vaddr = __ioremap(paddr, PAGE_SIZE, 0);
1157230ced4SMichael Ellerman 		csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
11654c32021SMichael Ellerman 		iounmap(vaddr);
11754c32021SMichael Ellerman 	}
11854c32021SMichael Ellerman 
11954c32021SMichael Ellerman 	return csize;
12054c32021SMichael Ellerman }
121d72e063bSAnton Blanchard 
122d72e063bSAnton Blanchard #ifdef CONFIG_PPC_RTAS
123d72e063bSAnton Blanchard /*
124d72e063bSAnton Blanchard  * The crashkernel region will almost always overlap the RTAS region, so
125d72e063bSAnton Blanchard  * we have to be careful when shrinking the crashkernel region.
126d72e063bSAnton Blanchard  */
127d72e063bSAnton Blanchard void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
128d72e063bSAnton Blanchard {
129d72e063bSAnton Blanchard 	unsigned long addr;
130a29e30efSAnton Blanchard 	const __be32 *basep, *sizep;
131d72e063bSAnton Blanchard 	unsigned int rtas_start = 0, rtas_end = 0;
132d72e063bSAnton Blanchard 
133d72e063bSAnton Blanchard 	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
134d72e063bSAnton Blanchard 	sizep = of_get_property(rtas.dev, "rtas-size", NULL);
135d72e063bSAnton Blanchard 
136d72e063bSAnton Blanchard 	if (basep && sizep) {
137a29e30efSAnton Blanchard 		rtas_start = be32_to_cpup(basep);
138a29e30efSAnton Blanchard 		rtas_end = rtas_start + be32_to_cpup(sizep);
139d72e063bSAnton Blanchard 	}
140d72e063bSAnton Blanchard 
141d72e063bSAnton Blanchard 	for (addr = begin; addr < end; addr += PAGE_SIZE) {
142d72e063bSAnton Blanchard 		/* Does this page overlap with the RTAS region? */
143d72e063bSAnton Blanchard 		if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
144d72e063bSAnton Blanchard 			continue;
145d72e063bSAnton Blanchard 
1465d585e5cSJiang Liu 		free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
147d72e063bSAnton Blanchard 	}
148d72e063bSAnton Blanchard }
149d72e063bSAnton Blanchard #endif
150