140b0b3f8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20cc4746cSMichael Ellerman /*
30cc4746cSMichael Ellerman * Routines for doing kexec-based kdump.
40cc4746cSMichael Ellerman *
50cc4746cSMichael Ellerman * Copyright (C) 2005, IBM Corp.
60cc4746cSMichael Ellerman *
70cc4746cSMichael Ellerman * Created by: Michael Ellerman
80cc4746cSMichael Ellerman */
90cc4746cSMichael Ellerman
100cc4746cSMichael Ellerman #undef DEBUG
110cc4746cSMichael Ellerman
12cc532915SMichael Ellerman #include <linux/crash_dump.h>
13edcee77fSMichael Ellerman #include <linux/io.h>
1495f72d1eSYinghai Lu #include <linux/memblock.h>
15*e6f6390aSChristophe Leroy #include <linux/of.h>
16aaddd3eaSMichael Ellerman #include <asm/code-patching.h>
170cc4746cSMichael Ellerman #include <asm/kdump.h>
180cc4746cSMichael Ellerman #include <asm/firmware.h>
195d8de293SMatthew Wilcox (Oracle) #include <linux/uio.h>
20d72e063bSAnton Blanchard #include <asm/rtas.h>
2175346251SJordan Niethe #include <asm/inst.h>
220cc4746cSMichael Ellerman
230cc4746cSMichael Ellerman #ifdef DEBUG
240cc4746cSMichael Ellerman #include <asm/udbg.h>
250cc4746cSMichael Ellerman #define DBG(fmt...) udbg_printf(fmt)
260cc4746cSMichael Ellerman #else
270cc4746cSMichael Ellerman #define DBG(fmt...)
280cc4746cSMichael Ellerman #endif
290cc4746cSMichael Ellerman
300f890c8dSSuzuki Poulose #ifndef CONFIG_NONSTATIC_KERNEL
reserve_kdump_trampoline(void)31d56c3aaaSStephen Rothwell void __init reserve_kdump_trampoline(void)
3247310413SMichael Ellerman {
3395f72d1eSYinghai Lu memblock_reserve(0, KDUMP_RESERVE_LIMIT);
3447310413SMichael Ellerman }
3547310413SMichael Ellerman
create_trampoline(unsigned long addr)360cc4746cSMichael Ellerman static void __init create_trampoline(unsigned long addr)
370cc4746cSMichael Ellerman {
3869d4d6e5SChristophe Leroy u32 *p = (u32 *)addr;
39e7a57273SMichael Ellerman
400cc4746cSMichael Ellerman /* The maximum range of a single instruction branch, is the current
410cc4746cSMichael Ellerman * instruction's address + (32 MB - 4) bytes. For the trampoline we
420cc4746cSMichael Ellerman * need to branch to current address + 32 MB. So we insert a nop at
430cc4746cSMichael Ellerman * the trampoline address, then the next instruction (+ 4 bytes)
440cc4746cSMichael Ellerman * does a branch to (32 MB - 4). The net effect is that when we
450cc4746cSMichael Ellerman * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
460cc4746cSMichael Ellerman * two instructions it doesn't require any registers.
470cc4746cSMichael Ellerman */
48f30becb5SChristophe Leroy patch_instruction(p, ppc_inst(PPC_RAW_NOP()));
4969d4d6e5SChristophe Leroy patch_branch(p + 1, addr + PHYSICAL_START, 0);
500cc4746cSMichael Ellerman }
510cc4746cSMichael Ellerman
setup_kdump_trampoline(void)5247310413SMichael Ellerman void __init setup_kdump_trampoline(void)
530cc4746cSMichael Ellerman {
540cc4746cSMichael Ellerman unsigned long i;
550cc4746cSMichael Ellerman
5647310413SMichael Ellerman DBG(" -> setup_kdump_trampoline()\n");
570cc4746cSMichael Ellerman
580cc4746cSMichael Ellerman for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
590cc4746cSMichael Ellerman create_trampoline(i);
600cc4746cSMichael Ellerman }
610cc4746cSMichael Ellerman
629e4859efSStephen Rothwell #ifdef CONFIG_PPC_PSERIES
630cc4746cSMichael Ellerman create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
640cc4746cSMichael Ellerman create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
659e4859efSStephen Rothwell #endif /* CONFIG_PPC_PSERIES */
660cc4746cSMichael Ellerman
6747310413SMichael Ellerman DBG(" <- setup_kdump_trampoline()\n");
680cc4746cSMichael Ellerman }
690f890c8dSSuzuki Poulose #endif /* CONFIG_NONSTATIC_KERNEL */
70cc532915SMichael Ellerman
copy_oldmem_page(struct iov_iter * iter,unsigned long pfn,size_t csize,unsigned long offset)715d8de293SMatthew Wilcox (Oracle) ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
725d8de293SMatthew Wilcox (Oracle) size_t csize, unsigned long offset)
7354c32021SMichael Ellerman {
7454c32021SMichael Ellerman void *vaddr;
75f5295bd8SLaurent Dufour phys_addr_t paddr;
7654c32021SMichael Ellerman
7754c32021SMichael Ellerman if (!csize)
7854c32021SMichael Ellerman return 0;
7954c32021SMichael Ellerman
80bbc8e30fSMatthew McClintock csize = min_t(size_t, csize, PAGE_SIZE);
81f5295bd8SLaurent Dufour paddr = pfn << PAGE_SHIFT;
827230ced4SMichael Ellerman
83f5295bd8SLaurent Dufour if (memblock_is_region_memory(paddr, csize)) {
84f5295bd8SLaurent Dufour vaddr = __va(paddr);
855d8de293SMatthew Wilcox (Oracle) csize = copy_to_iter(vaddr + offset, csize, iter);
867230ced4SMichael Ellerman } else {
87aa91796eSChristophe Leroy vaddr = ioremap_cache(paddr, PAGE_SIZE);
885d8de293SMatthew Wilcox (Oracle) csize = copy_to_iter(vaddr + offset, csize, iter);
8954c32021SMichael Ellerman iounmap(vaddr);
9054c32021SMichael Ellerman }
9154c32021SMichael Ellerman
9254c32021SMichael Ellerman return csize;
9354c32021SMichael Ellerman }
94d72e063bSAnton Blanchard
95d72e063bSAnton Blanchard #ifdef CONFIG_PPC_RTAS
96d72e063bSAnton Blanchard /*
97d72e063bSAnton Blanchard * The crashkernel region will almost always overlap the RTAS region, so
98d72e063bSAnton Blanchard * we have to be careful when shrinking the crashkernel region.
99d72e063bSAnton Blanchard */
crash_free_reserved_phys_range(unsigned long begin,unsigned long end)100d72e063bSAnton Blanchard void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
101d72e063bSAnton Blanchard {
102d72e063bSAnton Blanchard unsigned long addr;
103a29e30efSAnton Blanchard const __be32 *basep, *sizep;
104d72e063bSAnton Blanchard unsigned int rtas_start = 0, rtas_end = 0;
105d72e063bSAnton Blanchard
106d72e063bSAnton Blanchard basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
107d72e063bSAnton Blanchard sizep = of_get_property(rtas.dev, "rtas-size", NULL);
108d72e063bSAnton Blanchard
109d72e063bSAnton Blanchard if (basep && sizep) {
110a29e30efSAnton Blanchard rtas_start = be32_to_cpup(basep);
111a29e30efSAnton Blanchard rtas_end = rtas_start + be32_to_cpup(sizep);
112d72e063bSAnton Blanchard }
113d72e063bSAnton Blanchard
114d72e063bSAnton Blanchard for (addr = begin; addr < end; addr += PAGE_SIZE) {
115d72e063bSAnton Blanchard /* Does this page overlap with the RTAS region? */
116d72e063bSAnton Blanchard if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
117d72e063bSAnton Blanchard continue;
118d72e063bSAnton Blanchard
1195d585e5cSJiang Liu free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
120d72e063bSAnton Blanchard }
121d72e063bSAnton Blanchard }
122d72e063bSAnton Blanchard #endif
123