1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * relocate_kernel.S - put the kernel image in place to boot 4 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> 5 */ 6 7#include <linux/linkage.h> 8#include <asm/page_types.h> 9#include <asm/kexec.h> 10#include <asm/processor-flags.h> 11#include <asm/pgtable_types.h> 12 13/* 14 * Must be relocatable PIC code callable as a C function 15 */ 16 17#define PTR(x) (x << 3) 18#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 19 20/* 21 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE 22 * ~ control_page + PAGE_SIZE are used as data storage and stack for 23 * jumping back 24 */ 25#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) 26 27/* Minimal CPU state */ 28#define RSP DATA(0x0) 29#define CR0 DATA(0x8) 30#define CR3 DATA(0x10) 31#define CR4 DATA(0x18) 32 33/* other data */ 34#define CP_PA_TABLE_PAGE DATA(0x20) 35#define CP_PA_SWAP_PAGE DATA(0x28) 36#define CP_PA_BACKUP_PAGES_MAP DATA(0x30) 37 38 .text 39 .align PAGE_SIZE 40 .code64 41SYM_CODE_START_NOALIGN(relocate_kernel) 42 /* 43 * %rdi indirection_page 44 * %rsi page_list 45 * %rdx start address 46 * %rcx preserve_context 47 * %r8 sme_active 48 */ 49 50 /* Save the CPU context, used for jumping back */ 51 pushq %rbx 52 pushq %rbp 53 pushq %r12 54 pushq %r13 55 pushq %r14 56 pushq %r15 57 pushf 58 59 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 60 movq %rsp, RSP(%r11) 61 movq %cr0, %rax 62 movq %rax, CR0(%r11) 63 movq %cr3, %rax 64 movq %rax, CR3(%r11) 65 movq %cr4, %rax 66 movq %rax, CR4(%r11) 67 68 /* Save CR4. Required to enable the right paging mode later. */ 69 movq %rax, %r13 70 71 /* zero out flags, and disable interrupts */ 72 pushq $0 73 popfq 74 75 /* Save SME active flag */ 76 movq %r8, %r12 77 78 /* 79 * get physical address of control page now 80 * this is impossible after page table switch 81 */ 82 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 83 84 /* get physical address of page table now too */ 85 movq PTR(PA_TABLE_PAGE)(%rsi), %r9 86 87 /* get physical address of swap page now */ 88 movq PTR(PA_SWAP_PAGE)(%rsi), %r10 89 90 /* save some information for jumping back */ 91 movq %r9, CP_PA_TABLE_PAGE(%r11) 92 movq %r10, CP_PA_SWAP_PAGE(%r11) 93 movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11) 94 95 /* Switch to the identity mapped page tables */ 96 movq %r9, %cr3 97 98 /* setup a new stack at the end of the physical control page */ 99 lea PAGE_SIZE(%r8), %rsp 100 101 /* jump to identity mapped page */ 102 addq $(identity_mapped - relocate_kernel), %r8 103 pushq %r8 104 ret 105SYM_CODE_END(relocate_kernel) 106 107SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) 108 /* set return address to 0 if not preserving context */ 109 pushq $0 110 /* store the start address on the stack */ 111 pushq %rdx 112 113 /* 114 * Set cr0 to a known state: 115 * - Paging enabled 116 * - Alignment check disabled 117 * - Write protect disabled 118 * - No task switch 119 * - Don't do FP software emulation. 120 * - Proctected mode enabled 121 */ 122 movq %cr0, %rax 123 andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax 124 orl $(X86_CR0_PG | X86_CR0_PE), %eax 125 movq %rax, %cr0 126 127 /* 128 * Set cr4 to a known state: 129 * - physical address extension enabled 130 * - 5-level paging, if it was enabled before 131 */ 132 movl $X86_CR4_PAE, %eax 133 testq $X86_CR4_LA57, %r13 134 jz 1f 135 orl $X86_CR4_LA57, %eax 1361: 137 movq %rax, %cr4 138 139 jmp 1f 1401: 141 142 /* Flush the TLB (needed?) */ 143 movq %r9, %cr3 144 145 /* 146 * If SME is active, there could be old encrypted cache line 147 * entries that will conflict with the now unencrypted memory 148 * used by kexec. Flush the caches before copying the kernel. 149 */ 150 testq %r12, %r12 151 jz 1f 152 wbinvd 1531: 154 155 movq %rcx, %r11 156 call swap_pages 157 158 /* 159 * To be certain of avoiding problems with self-modifying code 160 * I need to execute a serializing instruction here. 161 * So I flush the TLB by reloading %cr3 here, it's handy, 162 * and not processor dependent. 163 */ 164 movq %cr3, %rax 165 movq %rax, %cr3 166 167 /* 168 * set all of the registers to known values 169 * leave %rsp alone 170 */ 171 172 testq %r11, %r11 173 jnz 1f 174 xorl %eax, %eax 175 xorl %ebx, %ebx 176 xorl %ecx, %ecx 177 xorl %edx, %edx 178 xorl %esi, %esi 179 xorl %edi, %edi 180 xorl %ebp, %ebp 181 xorl %r8d, %r8d 182 xorl %r9d, %r9d 183 xorl %r10d, %r10d 184 xorl %r11d, %r11d 185 xorl %r12d, %r12d 186 xorl %r13d, %r13d 187 xorl %r14d, %r14d 188 xorl %r15d, %r15d 189 190 ret 191 1921: 193 popq %rdx 194 leaq PAGE_SIZE(%r10), %rsp 195 call *%rdx 196 197 /* get the re-entry point of the peer system */ 198 movq 0(%rsp), %rbp 199 call 1f 2001: 201 popq %r8 202 subq $(1b - relocate_kernel), %r8 203 movq CP_PA_SWAP_PAGE(%r8), %r10 204 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi 205 movq CP_PA_TABLE_PAGE(%r8), %rax 206 movq %rax, %cr3 207 lea PAGE_SIZE(%r8), %rsp 208 call swap_pages 209 movq $virtual_mapped, %rax 210 pushq %rax 211 ret 212SYM_CODE_END(identity_mapped) 213 214SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) 215 movq RSP(%r8), %rsp 216 movq CR4(%r8), %rax 217 movq %rax, %cr4 218 movq CR3(%r8), %rax 219 movq CR0(%r8), %r8 220 movq %rax, %cr3 221 movq %r8, %cr0 222 movq %rbp, %rax 223 224 popf 225 popq %r15 226 popq %r14 227 popq %r13 228 popq %r12 229 popq %rbp 230 popq %rbx 231 ret 232SYM_CODE_END(virtual_mapped) 233 234 /* Do the copies */ 235SYM_CODE_START_LOCAL_NOALIGN(swap_pages) 236 movq %rdi, %rcx /* Put the page_list in %rcx */ 237 xorl %edi, %edi 238 xorl %esi, %esi 239 jmp 1f 240 2410: /* top, read another word for the indirection page */ 242 243 movq (%rbx), %rcx 244 addq $8, %rbx 2451: 246 testb $0x1, %cl /* is it a destination page? */ 247 jz 2f 248 movq %rcx, %rdi 249 andq $0xfffffffffffff000, %rdi 250 jmp 0b 2512: 252 testb $0x2, %cl /* is it an indirection page? */ 253 jz 2f 254 movq %rcx, %rbx 255 andq $0xfffffffffffff000, %rbx 256 jmp 0b 2572: 258 testb $0x4, %cl /* is it the done indicator? */ 259 jz 2f 260 jmp 3f 2612: 262 testb $0x8, %cl /* is it the source indicator? */ 263 jz 0b /* Ignore it otherwise */ 264 movq %rcx, %rsi /* For ever source page do a copy */ 265 andq $0xfffffffffffff000, %rsi 266 267 movq %rdi, %rdx 268 movq %rsi, %rax 269 270 movq %r10, %rdi 271 movl $512, %ecx 272 rep ; movsq 273 274 movq %rax, %rdi 275 movq %rdx, %rsi 276 movl $512, %ecx 277 rep ; movsq 278 279 movq %rdx, %rdi 280 movq %r10, %rsi 281 movl $512, %ecx 282 rep ; movsq 283 284 lea PAGE_SIZE(%rax), %rsi 285 jmp 0b 2863: 287 ret 288SYM_CODE_END(swap_pages) 289 290 .globl kexec_control_code_size 291.set kexec_control_code_size, . - relocate_kernel 292