1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * prepare to run common code 4 * 5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 6 */ 7 8 #define DISABLE_BRANCH_PROFILING 9 #include <linux/init.h> 10 #include <linux/linkage.h> 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/string.h> 14 #include <linux/percpu.h> 15 #include <linux/start_kernel.h> 16 #include <linux/io.h> 17 #include <linux/memblock.h> 18 #include <linux/mem_encrypt.h> 19 20 #include <asm/processor.h> 21 #include <asm/proto.h> 22 #include <asm/smp.h> 23 #include <asm/setup.h> 24 #include <asm/desc.h> 25 #include <asm/pgtable.h> 26 #include <asm/tlbflush.h> 27 #include <asm/sections.h> 28 #include <asm/kdebug.h> 29 #include <asm/e820/api.h> 30 #include <asm/bios_ebda.h> 31 #include <asm/bootparam_utils.h> 32 #include <asm/microcode.h> 33 #include <asm/kasan.h> 34 35 #ifdef CONFIG_X86_5LEVEL 36 #undef pgtable_l5_enabled 37 #define pgtable_l5_enabled __pgtable_l5_enabled 38 #endif 39 40 /* 41 * Manage page tables very early on. 42 */ 43 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; 44 static unsigned int __initdata next_early_pgt; 45 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); 46 47 #ifdef CONFIG_X86_5LEVEL 48 unsigned int __pgtable_l5_enabled __ro_after_init; 49 EXPORT_SYMBOL(__pgtable_l5_enabled); 50 unsigned int pgdir_shift __ro_after_init = 39; 51 EXPORT_SYMBOL(pgdir_shift); 52 unsigned int ptrs_per_p4d __ro_after_init = 1; 53 EXPORT_SYMBOL(ptrs_per_p4d); 54 #endif 55 56 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 57 unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; 58 EXPORT_SYMBOL(page_offset_base); 59 unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4; 60 EXPORT_SYMBOL(vmalloc_base); 61 unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; 62 EXPORT_SYMBOL(vmemmap_base); 63 #endif 64 65 #define __head __section(.head.text) 66 67 static void __head *fixup_pointer(void *ptr, unsigned long physaddr) 68 { 69 return ptr - (void *)_text + (void *)physaddr; 70 } 71 72 static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr) 73 { 74 return fixup_pointer(ptr, physaddr); 75 } 76 77 #ifdef CONFIG_X86_5LEVEL 78 static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr) 79 { 80 return fixup_pointer(ptr, physaddr); 81 } 82 83 static bool __head check_la57_support(unsigned long physaddr) 84 { 85 if (native_cpuid_eax(0) < 7) 86 return false; 87 88 if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) 89 return false; 90 91 *fixup_int(&pgtable_l5_enabled, physaddr) = 1; 92 *fixup_int(&pgdir_shift, physaddr) = 48; 93 *fixup_int(&ptrs_per_p4d, physaddr) = 512; 94 *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5; 95 *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5; 96 *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5; 97 98 return true; 99 } 100 #else 101 static bool __head check_la57_support(unsigned long physaddr) 102 { 103 return false; 104 } 105 #endif 106 107 /* Code in __startup_64() can be relocated during execution, but the compiler 108 * doesn't have to generate PC-relative relocations when accessing globals from 109 * that function. Clang actually does not generate them, which leads to 110 * boot-time crashes. To work around this problem, every global pointer must 111 * be adjusted using fixup_pointer(). 112 */ 113 unsigned long __head __startup_64(unsigned long physaddr, 114 struct boot_params *bp) 115 { 116 unsigned long load_delta, *p; 117 unsigned long pgtable_flags; 118 pgdval_t *pgd; 119 p4dval_t *p4d; 120 pudval_t *pud; 121 pmdval_t *pmd, pmd_entry; 122 pteval_t *mask_ptr; 123 bool la57; 124 int i; 125 unsigned int *next_pgt_ptr; 126 127 la57 = check_la57_support(physaddr); 128 129 /* Is the address too large? */ 130 if (physaddr >> MAX_PHYSMEM_BITS) 131 for (;;); 132 133 /* 134 * Compute the delta between the address I am compiled to run at 135 * and the address I am actually running at. 136 */ 137 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map); 138 139 /* Is the address not 2M aligned? */ 140 if (load_delta & ~PMD_PAGE_MASK) 141 for (;;); 142 143 /* Activate Secure Memory Encryption (SME) if supported and enabled */ 144 sme_enable(bp); 145 146 /* Include the SME encryption mask in the fixup value */ 147 load_delta += sme_get_me_mask(); 148 149 /* Fixup the physical addresses in the page table */ 150 151 pgd = fixup_pointer(&early_top_pgt, physaddr); 152 p = pgd + pgd_index(__START_KERNEL_map); 153 if (la57) 154 *p = (unsigned long)level4_kernel_pgt; 155 else 156 *p = (unsigned long)level3_kernel_pgt; 157 *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta; 158 159 if (la57) { 160 p4d = fixup_pointer(&level4_kernel_pgt, physaddr); 161 p4d[511] += load_delta; 162 } 163 164 pud = fixup_pointer(&level3_kernel_pgt, physaddr); 165 pud[510] += load_delta; 166 pud[511] += load_delta; 167 168 pmd = fixup_pointer(level2_fixmap_pgt, physaddr); 169 pmd[506] += load_delta; 170 171 /* 172 * Set up the identity mapping for the switchover. These 173 * entries should *NOT* have the global bit set! This also 174 * creates a bunch of nonsense entries but that is fine -- 175 * it avoids problems around wraparound. 176 */ 177 178 next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr); 179 pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr); 180 pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr); 181 182 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); 183 184 if (la57) { 185 p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); 186 187 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; 188 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; 189 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; 190 191 i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D; 192 p4d[i + 0] = (pgdval_t)pud + pgtable_flags; 193 p4d[i + 1] = (pgdval_t)pud + pgtable_flags; 194 } else { 195 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; 196 pgd[i + 0] = (pgdval_t)pud + pgtable_flags; 197 pgd[i + 1] = (pgdval_t)pud + pgtable_flags; 198 } 199 200 i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD; 201 pud[i + 0] = (pudval_t)pmd + pgtable_flags; 202 pud[i + 1] = (pudval_t)pmd + pgtable_flags; 203 204 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; 205 /* Filter out unsupported __PAGE_KERNEL_* bits: */ 206 mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr); 207 pmd_entry &= *mask_ptr; 208 pmd_entry += sme_get_me_mask(); 209 pmd_entry += physaddr; 210 211 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { 212 int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD; 213 pmd[idx] = pmd_entry + i * PMD_SIZE; 214 } 215 216 /* 217 * Fixup the kernel text+data virtual addresses. Note that 218 * we might write invalid pmds, when the kernel is relocated 219 * cleanup_highmap() fixes this up along with the mappings 220 * beyond _end. 221 */ 222 223 pmd = fixup_pointer(level2_kernel_pgt, physaddr); 224 for (i = 0; i < PTRS_PER_PMD; i++) { 225 if (pmd[i] & _PAGE_PRESENT) 226 pmd[i] += load_delta; 227 } 228 229 /* 230 * Fixup phys_base - remove the memory encryption mask to obtain 231 * the true physical address. 232 */ 233 *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask(); 234 235 /* Encrypt the kernel and related (if SME is active) */ 236 sme_encrypt_kernel(bp); 237 238 /* 239 * Return the SME encryption mask (if SME is active) to be used as a 240 * modifier for the initial pgdir entry programmed into CR3. 241 */ 242 return sme_get_me_mask(); 243 } 244 245 unsigned long __startup_secondary_64(void) 246 { 247 /* 248 * Return the SME encryption mask (if SME is active) to be used as a 249 * modifier for the initial pgdir entry programmed into CR3. 250 */ 251 return sme_get_me_mask(); 252 } 253 254 /* Wipe all early page tables except for the kernel symbol map */ 255 static void __init reset_early_page_tables(void) 256 { 257 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1)); 258 next_early_pgt = 0; 259 write_cr3(__sme_pa_nodebug(early_top_pgt)); 260 } 261 262 /* Create a new PMD entry */ 263 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd) 264 { 265 unsigned long physaddr = address - __PAGE_OFFSET; 266 pgdval_t pgd, *pgd_p; 267 p4dval_t p4d, *p4d_p; 268 pudval_t pud, *pud_p; 269 pmdval_t *pmd_p; 270 271 /* Invalid address or early pgt is done ? */ 272 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt)) 273 return -1; 274 275 again: 276 pgd_p = &early_top_pgt[pgd_index(address)].pgd; 277 pgd = *pgd_p; 278 279 /* 280 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is 281 * critical -- __PAGE_OFFSET would point us back into the dynamic 282 * range and we might end up looping forever... 283 */ 284 if (!pgtable_l5_enabled) 285 p4d_p = pgd_p; 286 else if (pgd) 287 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); 288 else { 289 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { 290 reset_early_page_tables(); 291 goto again; 292 } 293 294 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++]; 295 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); 296 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; 297 } 298 p4d_p += p4d_index(address); 299 p4d = *p4d_p; 300 301 if (p4d) 302 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); 303 else { 304 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { 305 reset_early_page_tables(); 306 goto again; 307 } 308 309 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; 310 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); 311 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; 312 } 313 pud_p += pud_index(address); 314 pud = *pud_p; 315 316 if (pud) 317 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); 318 else { 319 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { 320 reset_early_page_tables(); 321 goto again; 322 } 323 324 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; 325 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); 326 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; 327 } 328 pmd_p[pmd_index(address)] = pmd; 329 330 return 0; 331 } 332 333 int __init early_make_pgtable(unsigned long address) 334 { 335 unsigned long physaddr = address - __PAGE_OFFSET; 336 pmdval_t pmd; 337 338 pmd = (physaddr & PMD_MASK) + early_pmd_flags; 339 340 return __early_make_pgtable(address, pmd); 341 } 342 343 /* Don't add a printk in there. printk relies on the PDA which is not initialized 344 yet. */ 345 static void __init clear_bss(void) 346 { 347 memset(__bss_start, 0, 348 (unsigned long) __bss_stop - (unsigned long) __bss_start); 349 } 350 351 static unsigned long get_cmd_line_ptr(void) 352 { 353 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr; 354 355 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32; 356 357 return cmd_line_ptr; 358 } 359 360 static void __init copy_bootdata(char *real_mode_data) 361 { 362 char * command_line; 363 unsigned long cmd_line_ptr; 364 365 /* 366 * If SME is active, this will create decrypted mappings of the 367 * boot data in advance of the copy operations. 368 */ 369 sme_map_bootdata(real_mode_data); 370 371 memcpy(&boot_params, real_mode_data, sizeof boot_params); 372 sanitize_boot_params(&boot_params); 373 cmd_line_ptr = get_cmd_line_ptr(); 374 if (cmd_line_ptr) { 375 command_line = __va(cmd_line_ptr); 376 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 377 } 378 379 /* 380 * The old boot data is no longer needed and won't be reserved, 381 * freeing up that memory for use by the system. If SME is active, 382 * we need to remove the mappings that were created so that the 383 * memory doesn't remain mapped as decrypted. 384 */ 385 sme_unmap_bootdata(real_mode_data); 386 } 387 388 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) 389 { 390 /* 391 * Build-time sanity checks on the kernel image and module 392 * area mappings. (these are purely build-time and produce no code) 393 */ 394 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); 395 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); 396 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); 397 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); 398 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); 399 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); 400 MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 401 (__START_KERNEL & PGDIR_MASK))); 402 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); 403 404 cr4_init_shadow(); 405 406 /* Kill off the identity-map trampoline */ 407 reset_early_page_tables(); 408 409 clear_bss(); 410 411 clear_page(init_top_pgt); 412 413 /* 414 * SME support may update early_pmd_flags to include the memory 415 * encryption mask, so it needs to be called before anything 416 * that may generate a page fault. 417 */ 418 sme_early_init(); 419 420 kasan_early_init(); 421 422 idt_setup_early_handler(); 423 424 copy_bootdata(__va(real_mode_data)); 425 426 /* 427 * Load microcode early on BSP. 428 */ 429 load_ucode_bsp(); 430 431 /* set init_top_pgt kernel high mapping*/ 432 init_top_pgt[511] = early_top_pgt[511]; 433 434 x86_64_start_reservations(real_mode_data); 435 } 436 437 void __init x86_64_start_reservations(char *real_mode_data) 438 { 439 /* version is always not zero if it is copied */ 440 if (!boot_params.hdr.version) 441 copy_bootdata(__va(real_mode_data)); 442 443 x86_early_init_platform_quirks(); 444 445 switch (boot_params.hdr.hardware_subarch) { 446 case X86_SUBARCH_INTEL_MID: 447 x86_intel_mid_early_setup(); 448 break; 449 default: 450 break; 451 } 452 453 start_kernel(); 454 } 455