1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * kexec for arm64 4 * 5 * Copyright (C) Linaro. 6 * Copyright (C) Huawei Futurewei Technologies. 7 */ 8 9#include <linux/kexec.h> 10#include <linux/linkage.h> 11 12#include <asm/assembler.h> 13#include <asm/kexec.h> 14#include <asm/page.h> 15#include <asm/sysreg.h> 16#include <asm/virt.h> 17 18.section ".kexec_relocate.text", "ax" 19/* 20 * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. 21 * 22 * The memory that the old kernel occupies may be overwritten when copying the 23 * new image to its final location. To assure that the 24 * arm64_relocate_new_kernel routine which does that copy is not overwritten, 25 * all code and data needed by arm64_relocate_new_kernel must be between the 26 * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The 27 * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec 28 * safe memory that has been set up to be preserved during the copy operation. 29 */ 30SYM_CODE_START(arm64_relocate_new_kernel) 31 /* Setup the list loop variables. */ 32 ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */ 33 ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */ 34 ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */ 35 mov x14, xzr /* x14 = entry ptr */ 36 mov x13, xzr /* x13 = copy dest */ 37 raw_dcache_line_size x15, x1 /* x15 = dcache line size */ 38 break_before_make_ttbr_switch x18, x17, x1, x2 /* set linear map */ 39.Lloop: 40 and x12, x16, PAGE_MASK /* x12 = addr */ 41 42 /* Test the entry flags. */ 43.Ltest_source: 44 tbz x16, IND_SOURCE_BIT, .Ltest_indirection 45 46 /* Invalidate dest page to PoC. */ 47 mov x2, x13 48 add x1, x2, #PAGE_SIZE 49 dcache_by_myline_op ivac, sy, x2, x1, x15, x20 50 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 51 b .Lnext 52.Ltest_indirection: 53 tbz x16, IND_INDIRECTION_BIT, .Ltest_destination 54 mov x14, x12 /* ptr = addr */ 55 b .Lnext 56.Ltest_destination: 57 tbz x16, IND_DESTINATION_BIT, .Lnext 58 mov x13, x12 /* dest = addr */ 59.Lnext: 60 ldr x16, [x14], #8 /* entry = *ptr++ */ 61 tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */ 62 /* wait for writes from copy_page to finish */ 63 dsb nsh 64 ic iallu 65 dsb nsh 66 isb 67 68 /* Start new image. */ 69 ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */ 70 cbz x1, .Lel1 71 ldr x1, [x0, #KIMAGE_START] /* relocation start */ 72 ldr x2, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ 73 mov x3, xzr 74 mov x4, xzr 75 mov x0, #HVC_SOFT_RESTART 76 hvc #0 /* Jumps from el2 */ 77.Lel1: 78 ldr x4, [x0, #KIMAGE_START] /* relocation start */ 79 ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */ 80 mov x2, xzr 81 mov x3, xzr 82 br x4 /* Jumps from el1 */ 83SYM_CODE_END(arm64_relocate_new_kernel) 84