1/* 2 * relocate_kernel.S - put the kernel image in place to boot 3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com> 4 * 5 * This source code is licensed under the GNU General Public License, 6 * Version 2. See the file COPYING for more details. 7 */ 8 9#include <linux/linkage.h> 10#include <asm/page_types.h> 11#include <asm/kexec.h> 12#include <asm/processor-flags.h> 13#include <asm/pgtable_types.h> 14 15/* 16 * Must be relocatable PIC code callable as a C function 17 */ 18 19#define PTR(x) (x << 3) 20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 21 22/* 23 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE 24 * ~ control_page + PAGE_SIZE are used as data storage and stack for 25 * jumping back 26 */ 27#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) 28 29/* Minimal CPU state */ 30#define RSP DATA(0x0) 31#define CR0 DATA(0x8) 32#define CR3 DATA(0x10) 33#define CR4 DATA(0x18) 34 35/* other data */ 36#define CP_PA_TABLE_PAGE DATA(0x20) 37#define CP_PA_SWAP_PAGE DATA(0x28) 38#define CP_PA_BACKUP_PAGES_MAP DATA(0x30) 39 40 .text 41 .align PAGE_SIZE 42 .code64 43 .globl relocate_kernel 44relocate_kernel: 45 /* 46 * %rdi indirection_page 47 * %rsi page_list 48 * %rdx start address 49 * %rcx preserve_context 50 * %r8 sme_active 51 */ 52 53 /* Save the CPU context, used for jumping back */ 54 pushq %rbx 55 pushq %rbp 56 pushq %r12 57 pushq %r13 58 pushq %r14 59 pushq %r15 60 pushf 61 62 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11 63 movq %rsp, RSP(%r11) 64 movq %cr0, %rax 65 movq %rax, CR0(%r11) 66 movq %cr3, %rax 67 movq %rax, CR3(%r11) 68 movq %cr4, %rax 69 movq %rax, CR4(%r11) 70 71 /* zero out flags, and disable interrupts */ 72 pushq $0 73 popfq 74 75 /* Save SME active flag */ 76 movq %r8, %r12 77 78 /* 79 * get physical address of control page now 80 * this is impossible after page table switch 81 */ 82 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8 83 84 /* get physical address of page table now too */ 85 movq PTR(PA_TABLE_PAGE)(%rsi), %r9 86 87 /* get physical address of swap page now */ 88 movq PTR(PA_SWAP_PAGE)(%rsi), %r10 89 90 /* save some information for jumping back */ 91 movq %r9, CP_PA_TABLE_PAGE(%r11) 92 movq %r10, CP_PA_SWAP_PAGE(%r11) 93 movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11) 94 95 /* Switch to the identity mapped page tables */ 96 movq %r9, %cr3 97 98 /* setup a new stack at the end of the physical control page */ 99 lea PAGE_SIZE(%r8), %rsp 100 101 /* jump to identity mapped page */ 102 addq $(identity_mapped - relocate_kernel), %r8 103 pushq %r8 104 ret 105 106identity_mapped: 107 /* set return address to 0 if not preserving context */ 108 pushq $0 109 /* store the start address on the stack */ 110 pushq %rdx 111 112 /* 113 * Set cr0 to a known state: 114 * - Paging enabled 115 * - Alignment check disabled 116 * - Write protect disabled 117 * - No task switch 118 * - Don't do FP software emulation. 119 * - Proctected mode enabled 120 */ 121 movq %cr0, %rax 122 andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax 123 orl $(X86_CR0_PG | X86_CR0_PE), %eax 124 movq %rax, %cr0 125 126 /* 127 * Set cr4 to a known state: 128 * - physical address extension enabled 129 */ 130 movl $X86_CR4_PAE, %eax 131 movq %rax, %cr4 132 133 jmp 1f 1341: 135 136 /* Flush the TLB (needed?) */ 137 movq %r9, %cr3 138 139 /* 140 * If SME is active, there could be old encrypted cache line 141 * entries that will conflict with the now unencrypted memory 142 * used by kexec. Flush the caches before copying the kernel. 143 */ 144 testq %r12, %r12 145 jz 1f 146 wbinvd 1471: 148 149 movq %rcx, %r11 150 call swap_pages 151 152 /* 153 * To be certain of avoiding problems with self-modifying code 154 * I need to execute a serializing instruction here. 155 * So I flush the TLB by reloading %cr3 here, it's handy, 156 * and not processor dependent. 157 */ 158 movq %cr3, %rax 159 movq %rax, %cr3 160 161 /* 162 * set all of the registers to known values 163 * leave %rsp alone 164 */ 165 166 testq %r11, %r11 167 jnz 1f 168 xorl %eax, %eax 169 xorl %ebx, %ebx 170 xorl %ecx, %ecx 171 xorl %edx, %edx 172 xorl %esi, %esi 173 xorl %edi, %edi 174 xorl %ebp, %ebp 175 xorl %r8d, %r8d 176 xorl %r9d, %r9d 177 xorl %r10d, %r10d 178 xorl %r11d, %r11d 179 xorl %r12d, %r12d 180 xorl %r13d, %r13d 181 xorl %r14d, %r14d 182 xorl %r15d, %r15d 183 184 ret 185 1861: 187 popq %rdx 188 leaq PAGE_SIZE(%r10), %rsp 189 call *%rdx 190 191 /* get the re-entry point of the peer system */ 192 movq 0(%rsp), %rbp 193 call 1f 1941: 195 popq %r8 196 subq $(1b - relocate_kernel), %r8 197 movq CP_PA_SWAP_PAGE(%r8), %r10 198 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi 199 movq CP_PA_TABLE_PAGE(%r8), %rax 200 movq %rax, %cr3 201 lea PAGE_SIZE(%r8), %rsp 202 call swap_pages 203 movq $virtual_mapped, %rax 204 pushq %rax 205 ret 206 207virtual_mapped: 208 movq RSP(%r8), %rsp 209 movq CR4(%r8), %rax 210 movq %rax, %cr4 211 movq CR3(%r8), %rax 212 movq CR0(%r8), %r8 213 movq %rax, %cr3 214 movq %r8, %cr0 215 movq %rbp, %rax 216 217 popf 218 popq %r15 219 popq %r14 220 popq %r13 221 popq %r12 222 popq %rbp 223 popq %rbx 224 ret 225 226 /* Do the copies */ 227swap_pages: 228 movq %rdi, %rcx /* Put the page_list in %rcx */ 229 xorl %edi, %edi 230 xorl %esi, %esi 231 jmp 1f 232 2330: /* top, read another word for the indirection page */ 234 235 movq (%rbx), %rcx 236 addq $8, %rbx 2371: 238 testb $0x1, %cl /* is it a destination page? */ 239 jz 2f 240 movq %rcx, %rdi 241 andq $0xfffffffffffff000, %rdi 242 jmp 0b 2432: 244 testb $0x2, %cl /* is it an indirection page? */ 245 jz 2f 246 movq %rcx, %rbx 247 andq $0xfffffffffffff000, %rbx 248 jmp 0b 2492: 250 testb $0x4, %cl /* is it the done indicator? */ 251 jz 2f 252 jmp 3f 2532: 254 testb $0x8, %cl /* is it the source indicator? */ 255 jz 0b /* Ignore it otherwise */ 256 movq %rcx, %rsi /* For ever source page do a copy */ 257 andq $0xfffffffffffff000, %rsi 258 259 movq %rdi, %rdx 260 movq %rsi, %rax 261 262 movq %r10, %rdi 263 movl $512, %ecx 264 rep ; movsq 265 266 movq %rax, %rdi 267 movq %rdx, %rsi 268 movl $512, %ecx 269 rep ; movsq 270 271 movq %rdx, %rdi 272 movq %r10, %rsi 273 movl $512, %ecx 274 rep ; movsq 275 276 lea PAGE_SIZE(%rax), %rsi 277 jmp 0b 2783: 279 ret 280 281 .globl kexec_control_code_size 282.set kexec_control_code_size, . - relocate_kernel 283