1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * relocate_kernel.S - put the kernel image in place to boot 4 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> 5 */ 6 7#include <linux/linkage.h> 8#include <asm/page_types.h> 9#include <asm/kexec.h> 10#include <asm/processor-flags.h> 11#include <asm/pgtable_types.h> 12 13/* 14 * Must be relocatable PIC code callable as a C function 15 */ 16 17#define PTR(x) (x << 3) 18#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 19 20/* 21 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE 22 * ~ control_page + PAGE_SIZE are used as data storage and stack for 23 * jumping back 24 */ 25#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) 26 27/* Minimal CPU state */ 28#define RSP DATA(0x0) 29#define CR0 DATA(0x8) 30#define CR3 DATA(0x10) 31#define CR4 DATA(0x18) 32 33/* other data */ 34#define CP_PA_TABLE_PAGE DATA(0x20) 35#define CP_PA_SWAP_PAGE DATA(0x28) 36#define CP_PA_BACKUP_PAGES_MAP DATA(0x30) 37 38 .text 39 .align PAGE_SIZE 40 .code64 41 .globl relocate_kernel 42relocate_kernel: 43 /* 44 * %rdi indirection_page 45 * %rsi page_list 46 * %rdx start address 47 * %rcx preserve_context 48 * %r8 sme_active 49 */ 50 51 /* Save the CPU context, used for jumping back */ 52 pushq %rbx 53 pushq %rbp 54 pushq %r12 55 pushq %r13 56 pushq %r14 57 pushq %r15 58 pushf 59 60 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 61 movq %rsp, RSP(%r11) 62 movq %cr0, %rax 63 movq %rax, CR0(%r11) 64 movq %cr3, %rax 65 movq %rax, CR3(%r11) 66 movq %cr4, %rax 67 movq %rax, CR4(%r11) 68 69 /* Save CR4. Required to enable the right paging mode later. */ 70 movq %rax, %r13 71 72 /* zero out flags, and disable interrupts */ 73 pushq $0 74 popfq 75 76 /* Save SME active flag */ 77 movq %r8, %r12 78 79 /* 80 * get physical address of control page now 81 * this is impossible after page table switch 82 */ 83 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 84 85 /* get physical address of page table now too */ 86 movq PTR(PA_TABLE_PAGE)(%rsi), %r9 87 88 /* get physical address of swap page now */ 89 movq PTR(PA_SWAP_PAGE)(%rsi), %r10 90 91 /* save some information for jumping back */ 92 movq %r9, CP_PA_TABLE_PAGE(%r11) 93 movq %r10, CP_PA_SWAP_PAGE(%r11) 94 movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11) 95 96 /* Switch to the identity mapped page tables */ 97 movq %r9, %cr3 98 99 /* setup a new stack at the end of the physical control page */ 100 lea PAGE_SIZE(%r8), %rsp 101 102 /* jump to identity mapped page */ 103 addq $(identity_mapped - relocate_kernel), %r8 104 pushq %r8 105 ret 106 107identity_mapped: 108 /* set return address to 0 if not preserving context */ 109 pushq $0 110 /* store the start address on the stack */ 111 pushq %rdx 112 113 /* 114 * Set cr0 to a known state: 115 * - Paging enabled 116 * - Alignment check disabled 117 * - Write protect disabled 118 * - No task switch 119 * - Don't do FP software emulation. 120 * - Proctected mode enabled 121 */ 122 movq %cr0, %rax 123 andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax 124 orl $(X86_CR0_PG | X86_CR0_PE), %eax 125 movq %rax, %cr0 126 127 /* 128 * Set cr4 to a known state: 129 * - physical address extension enabled 130 * - 5-level paging, if it was enabled before 131 */ 132 movl $X86_CR4_PAE, %eax 133 testq $X86_CR4_LA57, %r13 134 jz 1f 135 orl $X86_CR4_LA57, %eax 1361: 137 movq %rax, %cr4 138 139 jmp 1f 1401: 141 142 /* Flush the TLB (needed?) */ 143 movq %r9, %cr3 144 145 /* 146 * If SME is active, there could be old encrypted cache line 147 * entries that will conflict with the now unencrypted memory 148 * used by kexec. Flush the caches before copying the kernel. 149 */ 150 testq %r12, %r12 151 jz 1f 152 wbinvd 1531: 154 155 movq %rcx, %r11 156 call swap_pages 157 158 /* 159 * To be certain of avoiding problems with self-modifying code 160 * I need to execute a serializing instruction here. 161 * So I flush the TLB by reloading %cr3 here, it's handy, 162 * and not processor dependent. 163 */ 164 movq %cr3, %rax 165 movq %rax, %cr3 166 167 /* 168 * set all of the registers to known values 169 * leave %rsp alone 170 */ 171 172 testq %r11, %r11 173 jnz 1f 174 xorl %eax, %eax 175 xorl %ebx, %ebx 176 xorl %ecx, %ecx 177 xorl %edx, %edx 178 xorl %esi, %esi 179 xorl %edi, %edi 180 xorl %ebp, %ebp 181 xorl %r8d, %r8d 182 xorl %r9d, %r9d 183 xorl %r10d, %r10d 184 xorl %r11d, %r11d 185 xorl %r12d, %r12d 186 xorl %r13d, %r13d 187 xorl %r14d, %r14d 188 xorl %r15d, %r15d 189 190 ret 191 1921: 193 popq %rdx 194 leaq PAGE_SIZE(%r10), %rsp 195 call *%rdx 196 197 /* get the re-entry point of the peer system */ 198 movq 0(%rsp), %rbp 199 call 1f 2001: 201 popq %r8 202 subq $(1b - relocate_kernel), %r8 203 movq CP_PA_SWAP_PAGE(%r8), %r10 204 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi 205 movq CP_PA_TABLE_PAGE(%r8), %rax 206 movq %rax, %cr3 207 lea PAGE_SIZE(%r8), %rsp 208 call swap_pages 209 movq $virtual_mapped, %rax 210 pushq %rax 211 ret 212 213virtual_mapped: 214 movq RSP(%r8), %rsp 215 movq CR4(%r8), %rax 216 movq %rax, %cr4 217 movq CR3(%r8), %rax 218 movq CR0(%r8), %r8 219 movq %rax, %cr3 220 movq %r8, %cr0 221 movq %rbp, %rax 222 223 popf 224 popq %r15 225 popq %r14 226 popq %r13 227 popq %r12 228 popq %rbp 229 popq %rbx 230 ret 231 232 /* Do the copies */ 233swap_pages: 234 movq %rdi, %rcx /* Put the page_list in %rcx */ 235 xorl %edi, %edi 236 xorl %esi, %esi 237 jmp 1f 238 2390: /* top, read another word for the indirection page */ 240 241 movq (%rbx), %rcx 242 addq $8, %rbx 2431: 244 testb $0x1, %cl /* is it a destination page? */ 245 jz 2f 246 movq %rcx, %rdi 247 andq $0xfffffffffffff000, %rdi 248 jmp 0b 2492: 250 testb $0x2, %cl /* is it an indirection page? */ 251 jz 2f 252 movq %rcx, %rbx 253 andq $0xfffffffffffff000, %rbx 254 jmp 0b 2552: 256 testb $0x4, %cl /* is it the done indicator? */ 257 jz 2f 258 jmp 3f 2592: 260 testb $0x8, %cl /* is it the source indicator? */ 261 jz 0b /* Ignore it otherwise */ 262 movq %rcx, %rsi /* For ever source page do a copy */ 263 andq $0xfffffffffffff000, %rsi 264 265 movq %rdi, %rdx 266 movq %rsi, %rax 267 268 movq %r10, %rdi 269 movl $512, %ecx 270 rep ; movsq 271 272 movq %rax, %rdi 273 movq %rdx, %rsi 274 movl $512, %ecx 275 rep ; movsq 276 277 movq %rdx, %rdi 278 movq %r10, %rsi 279 movl $512, %ecx 280 rep ; movsq 281 282 lea PAGE_SIZE(%rax), %rsi 283 jmp 0b 2843: 285 ret 286 287 .globl kexec_control_code_size 288.set kexec_control_code_size, . - relocate_kernel 289