xref: /openbmc/linux/arch/powerpc/kernel/crash_dump.c (revision 95f72d1e)
10cc4746cSMichael Ellerman /*
20cc4746cSMichael Ellerman  * Routines for doing kexec-based kdump.
30cc4746cSMichael Ellerman  *
40cc4746cSMichael Ellerman  * Copyright (C) 2005, IBM Corp.
50cc4746cSMichael Ellerman  *
60cc4746cSMichael Ellerman  * Created by: Michael Ellerman
70cc4746cSMichael Ellerman  *
80cc4746cSMichael Ellerman  * This source code is licensed under the GNU General Public License,
90cc4746cSMichael Ellerman  * Version 2.  See the file COPYING for more details.
100cc4746cSMichael Ellerman  */
110cc4746cSMichael Ellerman 
120cc4746cSMichael Ellerman #undef DEBUG
130cc4746cSMichael Ellerman 
14cc532915SMichael Ellerman #include <linux/crash_dump.h>
15cc532915SMichael Ellerman #include <linux/bootmem.h>
1695f72d1eSYinghai Lu #include <linux/memblock.h>
17aaddd3eaSMichael Ellerman #include <asm/code-patching.h>
180cc4746cSMichael Ellerman #include <asm/kdump.h>
19d9b2b2a2SDavid S. Miller #include <asm/prom.h>
200cc4746cSMichael Ellerman #include <asm/firmware.h>
2154c32021SMichael Ellerman #include <asm/uaccess.h>
220cc4746cSMichael Ellerman 
230cc4746cSMichael Ellerman #ifdef DEBUG
240cc4746cSMichael Ellerman #include <asm/udbg.h>
250cc4746cSMichael Ellerman #define DBG(fmt...) udbg_printf(fmt)
260cc4746cSMichael Ellerman #else
270cc4746cSMichael Ellerman #define DBG(fmt...)
280cc4746cSMichael Ellerman #endif
290cc4746cSMichael Ellerman 
3057cac4d1SVivek Goyal /* Stores the physical address of elf header of crash image. */
3157cac4d1SVivek Goyal unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
3257cac4d1SVivek Goyal 
3354622f10SMohan Kumar M #ifndef CONFIG_RELOCATABLE
34d56c3aaaSStephen Rothwell void __init reserve_kdump_trampoline(void)
3547310413SMichael Ellerman {
3695f72d1eSYinghai Lu 	memblock_reserve(0, KDUMP_RESERVE_LIMIT);
3747310413SMichael Ellerman }
3847310413SMichael Ellerman 
390cc4746cSMichael Ellerman static void __init create_trampoline(unsigned long addr)
400cc4746cSMichael Ellerman {
41e7a57273SMichael Ellerman 	unsigned int *p = (unsigned int *)addr;
42e7a57273SMichael Ellerman 
430cc4746cSMichael Ellerman 	/* The maximum range of a single instruction branch, is the current
440cc4746cSMichael Ellerman 	 * instruction's address + (32 MB - 4) bytes. For the trampoline we
450cc4746cSMichael Ellerman 	 * need to branch to current address + 32 MB. So we insert a nop at
460cc4746cSMichael Ellerman 	 * the trampoline address, then the next instruction (+ 4 bytes)
470cc4746cSMichael Ellerman 	 * does a branch to (32 MB - 4). The net effect is that when we
480cc4746cSMichael Ellerman 	 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
490cc4746cSMichael Ellerman 	 * two instructions it doesn't require any registers.
500cc4746cSMichael Ellerman 	 */
5116c57b36SKumar Gala 	patch_instruction(p, PPC_INST_NOP);
52e7a57273SMichael Ellerman 	patch_branch(++p, addr + PHYSICAL_START, 0);
530cc4746cSMichael Ellerman }
540cc4746cSMichael Ellerman 
5547310413SMichael Ellerman void __init setup_kdump_trampoline(void)
560cc4746cSMichael Ellerman {
570cc4746cSMichael Ellerman 	unsigned long i;
580cc4746cSMichael Ellerman 
5947310413SMichael Ellerman 	DBG(" -> setup_kdump_trampoline()\n");
600cc4746cSMichael Ellerman 
610cc4746cSMichael Ellerman 	for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
620cc4746cSMichael Ellerman 		create_trampoline(i);
630cc4746cSMichael Ellerman 	}
640cc4746cSMichael Ellerman 
659e4859efSStephen Rothwell #ifdef CONFIG_PPC_PSERIES
660cc4746cSMichael Ellerman 	create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
670cc4746cSMichael Ellerman 	create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
689e4859efSStephen Rothwell #endif /* CONFIG_PPC_PSERIES */
690cc4746cSMichael Ellerman 
7047310413SMichael Ellerman 	DBG(" <- setup_kdump_trampoline()\n");
710cc4746cSMichael Ellerman }
7254622f10SMohan Kumar M #endif /* CONFIG_RELOCATABLE */
73cc532915SMichael Ellerman 
7457cac4d1SVivek Goyal /*
7557cac4d1SVivek Goyal  * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
7657cac4d1SVivek Goyal  * is_kdump_kernel() to determine if we are booting after a panic. Hence
7757cac4d1SVivek Goyal  * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
7857cac4d1SVivek Goyal  */
79cc532915SMichael Ellerman static int __init parse_elfcorehdr(char *p)
80cc532915SMichael Ellerman {
81cc532915SMichael Ellerman 	if (p)
82cc532915SMichael Ellerman 		elfcorehdr_addr = memparse(p, &p);
83cc532915SMichael Ellerman 
849b41046cSOGAWA Hirofumi 	return 1;
85cc532915SMichael Ellerman }
86cc532915SMichael Ellerman __setup("elfcorehdr=", parse_elfcorehdr);
87cc532915SMichael Ellerman 
88cc532915SMichael Ellerman static int __init parse_savemaxmem(char *p)
89cc532915SMichael Ellerman {
90cc532915SMichael Ellerman 	if (p)
91cc532915SMichael Ellerman 		saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
92cc532915SMichael Ellerman 
939b41046cSOGAWA Hirofumi 	return 1;
94cc532915SMichael Ellerman }
95cc532915SMichael Ellerman __setup("savemaxmem=", parse_savemaxmem);
9654c32021SMichael Ellerman 
977230ced4SMichael Ellerman 
987230ced4SMichael Ellerman static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
997230ced4SMichael Ellerman                                unsigned long offset, int userbuf)
1007230ced4SMichael Ellerman {
1017230ced4SMichael Ellerman 	if (userbuf) {
1027230ced4SMichael Ellerman 		if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
1037230ced4SMichael Ellerman 			return -EFAULT;
1047230ced4SMichael Ellerman 	} else
1057230ced4SMichael Ellerman 		memcpy(buf, (vaddr + offset), csize);
1067230ced4SMichael Ellerman 
1077230ced4SMichael Ellerman 	return csize;
1087230ced4SMichael Ellerman }
1097230ced4SMichael Ellerman 
11040681b95SMichael Ellerman /**
11154c32021SMichael Ellerman  * copy_oldmem_page - copy one page from "oldmem"
11254c32021SMichael Ellerman  * @pfn: page frame number to be copied
11354c32021SMichael Ellerman  * @buf: target memory address for the copy; this can be in kernel address
11454c32021SMichael Ellerman  *      space or user address space (see @userbuf)
11554c32021SMichael Ellerman  * @csize: number of bytes to copy
11654c32021SMichael Ellerman  * @offset: offset in bytes into the page (based on pfn) to begin the copy
11754c32021SMichael Ellerman  * @userbuf: if set, @buf is in user address space, use copy_to_user(),
11854c32021SMichael Ellerman  *      otherwise @buf is in kernel address space, use memcpy().
11954c32021SMichael Ellerman  *
12054c32021SMichael Ellerman  * Copy a page from "oldmem". For this page, there is no pte mapped
12154c32021SMichael Ellerman  * in the current kernel. We stitch up a pte, similar to kmap_atomic.
12254c32021SMichael Ellerman  */
12354c32021SMichael Ellerman ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
12454c32021SMichael Ellerman 			size_t csize, unsigned long offset, int userbuf)
12554c32021SMichael Ellerman {
12654c32021SMichael Ellerman 	void  *vaddr;
12754c32021SMichael Ellerman 
12854c32021SMichael Ellerman 	if (!csize)
12954c32021SMichael Ellerman 		return 0;
13054c32021SMichael Ellerman 
1317230ced4SMichael Ellerman 	csize = min(csize, PAGE_SIZE);
1327230ced4SMichael Ellerman 
1337230ced4SMichael Ellerman 	if (pfn < max_pfn) {
1347230ced4SMichael Ellerman 		vaddr = __va(pfn << PAGE_SHIFT);
1357230ced4SMichael Ellerman 		csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
1367230ced4SMichael Ellerman 	} else {
13754c32021SMichael Ellerman 		vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
1387230ced4SMichael Ellerman 		csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
13954c32021SMichael Ellerman 		iounmap(vaddr);
14054c32021SMichael Ellerman 	}
14154c32021SMichael Ellerman 
14254c32021SMichael Ellerman 	return csize;
14354c32021SMichael Ellerman }
144