140b0b3f8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20cc4746cSMichael Ellerman /* 30cc4746cSMichael Ellerman * Routines for doing kexec-based kdump. 40cc4746cSMichael Ellerman * 50cc4746cSMichael Ellerman * Copyright (C) 2005, IBM Corp. 60cc4746cSMichael Ellerman * 70cc4746cSMichael Ellerman * Created by: Michael Ellerman 80cc4746cSMichael Ellerman */ 90cc4746cSMichael Ellerman 100cc4746cSMichael Ellerman #undef DEBUG 110cc4746cSMichael Ellerman 12cc532915SMichael Ellerman #include <linux/crash_dump.h> 13edcee77fSMichael Ellerman #include <linux/io.h> 1495f72d1eSYinghai Lu #include <linux/memblock.h> 15aaddd3eaSMichael Ellerman #include <asm/code-patching.h> 160cc4746cSMichael Ellerman #include <asm/kdump.h> 17d9b2b2a2SDavid S. Miller #include <asm/prom.h> 180cc4746cSMichael Ellerman #include <asm/firmware.h> 197c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 20d72e063bSAnton Blanchard #include <asm/rtas.h> 2175346251SJordan Niethe #include <asm/inst.h> 220cc4746cSMichael Ellerman 230cc4746cSMichael Ellerman #ifdef DEBUG 240cc4746cSMichael Ellerman #include <asm/udbg.h> 250cc4746cSMichael Ellerman #define DBG(fmt...) udbg_printf(fmt) 260cc4746cSMichael Ellerman #else 270cc4746cSMichael Ellerman #define DBG(fmt...) 280cc4746cSMichael Ellerman #endif 290cc4746cSMichael Ellerman 300f890c8dSSuzuki Poulose #ifndef CONFIG_NONSTATIC_KERNEL 31d56c3aaaSStephen Rothwell void __init reserve_kdump_trampoline(void) 3247310413SMichael Ellerman { 3395f72d1eSYinghai Lu memblock_reserve(0, KDUMP_RESERVE_LIMIT); 3447310413SMichael Ellerman } 3547310413SMichael Ellerman 360cc4746cSMichael Ellerman static void __init create_trampoline(unsigned long addr) 370cc4746cSMichael Ellerman { 3894afd069SJordan Niethe struct ppc_inst *p = (struct ppc_inst *)addr; 39e7a57273SMichael Ellerman 400cc4746cSMichael Ellerman /* The maximum range of a single instruction branch, is the current 410cc4746cSMichael Ellerman * instruction's address + (32 MB - 4) bytes. For the trampoline we 420cc4746cSMichael Ellerman * need to branch to current address + 32 MB. So we insert a nop at 430cc4746cSMichael Ellerman * the trampoline address, then the next instruction (+ 4 bytes) 440cc4746cSMichael Ellerman * does a branch to (32 MB - 4). The net effect is that when we 450cc4746cSMichael Ellerman * branch to "addr" we jump to ("addr" + 32 MB). Although it requires 460cc4746cSMichael Ellerman * two instructions it doesn't require any registers. 470cc4746cSMichael Ellerman */ 4875346251SJordan Niethe patch_instruction(p, ppc_inst(PPC_INST_NOP)); 49e7a57273SMichael Ellerman patch_branch(++p, addr + PHYSICAL_START, 0); 500cc4746cSMichael Ellerman } 510cc4746cSMichael Ellerman 5247310413SMichael Ellerman void __init setup_kdump_trampoline(void) 530cc4746cSMichael Ellerman { 540cc4746cSMichael Ellerman unsigned long i; 550cc4746cSMichael Ellerman 5647310413SMichael Ellerman DBG(" -> setup_kdump_trampoline()\n"); 570cc4746cSMichael Ellerman 580cc4746cSMichael Ellerman for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { 590cc4746cSMichael Ellerman create_trampoline(i); 600cc4746cSMichael Ellerman } 610cc4746cSMichael Ellerman 629e4859efSStephen Rothwell #ifdef CONFIG_PPC_PSERIES 630cc4746cSMichael Ellerman create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); 640cc4746cSMichael Ellerman create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); 659e4859efSStephen Rothwell #endif /* CONFIG_PPC_PSERIES */ 660cc4746cSMichael Ellerman 6747310413SMichael Ellerman DBG(" <- setup_kdump_trampoline()\n"); 680cc4746cSMichael Ellerman } 690f890c8dSSuzuki Poulose #endif /* CONFIG_NONSTATIC_KERNEL */ 70cc532915SMichael Ellerman 717230ced4SMichael Ellerman static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, 727230ced4SMichael Ellerman unsigned long offset, int userbuf) 737230ced4SMichael Ellerman { 747230ced4SMichael Ellerman if (userbuf) { 757230ced4SMichael Ellerman if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) 767230ced4SMichael Ellerman return -EFAULT; 777230ced4SMichael Ellerman } else 787230ced4SMichael Ellerman memcpy(buf, (vaddr + offset), csize); 797230ced4SMichael Ellerman 807230ced4SMichael Ellerman return csize; 817230ced4SMichael Ellerman } 827230ced4SMichael Ellerman 8340681b95SMichael Ellerman /** 8454c32021SMichael Ellerman * copy_oldmem_page - copy one page from "oldmem" 8554c32021SMichael Ellerman * @pfn: page frame number to be copied 8654c32021SMichael Ellerman * @buf: target memory address for the copy; this can be in kernel address 8754c32021SMichael Ellerman * space or user address space (see @userbuf) 8854c32021SMichael Ellerman * @csize: number of bytes to copy 8954c32021SMichael Ellerman * @offset: offset in bytes into the page (based on pfn) to begin the copy 9054c32021SMichael Ellerman * @userbuf: if set, @buf is in user address space, use copy_to_user(), 9154c32021SMichael Ellerman * otherwise @buf is in kernel address space, use memcpy(). 9254c32021SMichael Ellerman * 9354c32021SMichael Ellerman * Copy a page from "oldmem". For this page, there is no pte mapped 9454c32021SMichael Ellerman * in the current kernel. We stitch up a pte, similar to kmap_atomic. 9554c32021SMichael Ellerman */ 9654c32021SMichael Ellerman ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 9754c32021SMichael Ellerman size_t csize, unsigned long offset, int userbuf) 9854c32021SMichael Ellerman { 9954c32021SMichael Ellerman void *vaddr; 100f5295bd8SLaurent Dufour phys_addr_t paddr; 10154c32021SMichael Ellerman 10254c32021SMichael Ellerman if (!csize) 10354c32021SMichael Ellerman return 0; 10454c32021SMichael Ellerman 105bbc8e30fSMatthew McClintock csize = min_t(size_t, csize, PAGE_SIZE); 106f5295bd8SLaurent Dufour paddr = pfn << PAGE_SHIFT; 1077230ced4SMichael Ellerman 108f5295bd8SLaurent Dufour if (memblock_is_region_memory(paddr, csize)) { 109f5295bd8SLaurent Dufour vaddr = __va(paddr); 1107230ced4SMichael Ellerman csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 1117230ced4SMichael Ellerman } else { 112aa91796eSChristophe Leroy vaddr = ioremap_cache(paddr, PAGE_SIZE); 1137230ced4SMichael Ellerman csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 11454c32021SMichael Ellerman iounmap(vaddr); 11554c32021SMichael Ellerman } 11654c32021SMichael Ellerman 11754c32021SMichael Ellerman return csize; 11854c32021SMichael Ellerman } 119d72e063bSAnton Blanchard 120d72e063bSAnton Blanchard #ifdef CONFIG_PPC_RTAS 121d72e063bSAnton Blanchard /* 122d72e063bSAnton Blanchard * The crashkernel region will almost always overlap the RTAS region, so 123d72e063bSAnton Blanchard * we have to be careful when shrinking the crashkernel region. 124d72e063bSAnton Blanchard */ 125d72e063bSAnton Blanchard void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) 126d72e063bSAnton Blanchard { 127d72e063bSAnton Blanchard unsigned long addr; 128a29e30efSAnton Blanchard const __be32 *basep, *sizep; 129d72e063bSAnton Blanchard unsigned int rtas_start = 0, rtas_end = 0; 130d72e063bSAnton Blanchard 131d72e063bSAnton Blanchard basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); 132d72e063bSAnton Blanchard sizep = of_get_property(rtas.dev, "rtas-size", NULL); 133d72e063bSAnton Blanchard 134d72e063bSAnton Blanchard if (basep && sizep) { 135a29e30efSAnton Blanchard rtas_start = be32_to_cpup(basep); 136a29e30efSAnton Blanchard rtas_end = rtas_start + be32_to_cpup(sizep); 137d72e063bSAnton Blanchard } 138d72e063bSAnton Blanchard 139d72e063bSAnton Blanchard for (addr = begin; addr < end; addr += PAGE_SIZE) { 140d72e063bSAnton Blanchard /* Does this page overlap with the RTAS region? */ 141d72e063bSAnton Blanchard if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) 142d72e063bSAnton Blanchard continue; 143d72e063bSAnton Blanchard 1445d585e5cSJiang Liu free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); 145d72e063bSAnton Blanchard } 146d72e063bSAnton Blanchard } 147d72e063bSAnton Blanchard #endif 148