1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2250c2277SThomas Gleixner /*
3835c34a1SDave Jones * prepare to run common code
4250c2277SThomas Gleixner *
5250c2277SThomas Gleixner * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6250c2277SThomas Gleixner */
7250c2277SThomas Gleixner
8be3606ffSAndrey Ryabinin #define DISABLE_BRANCH_PROFILING
9ad3fe525SKirill A. Shutemov
10ad3fe525SKirill A. Shutemov /* cpu_feature_enabled() cannot be used this early */
11ad3fe525SKirill A. Shutemov #define USE_EARLY_PGTABLE_L5
12ad3fe525SKirill A. Shutemov
13250c2277SThomas Gleixner #include <linux/init.h>
14250c2277SThomas Gleixner #include <linux/linkage.h>
15250c2277SThomas Gleixner #include <linux/types.h>
16250c2277SThomas Gleixner #include <linux/kernel.h>
17250c2277SThomas Gleixner #include <linux/string.h>
18250c2277SThomas Gleixner #include <linux/percpu.h>
19eaf76e8bSThomas Gleixner #include <linux/start_kernel.h>
208b664aa6SHuang, Ying #include <linux/io.h>
2172d7c3b3SYinghai Lu #include <linux/memblock.h>
22e9d1d2bbSTom Lendacky #include <linux/cc_platform.h>
2365fddcfcSMike Rapoport #include <linux/pgtable.h>
24250c2277SThomas Gleixner
25250c2277SThomas Gleixner #include <asm/processor.h>
26250c2277SThomas Gleixner #include <asm/proto.h>
27250c2277SThomas Gleixner #include <asm/smp.h>
28250c2277SThomas Gleixner #include <asm/setup.h>
29250c2277SThomas Gleixner #include <asm/desc.h>
30250c2277SThomas Gleixner #include <asm/tlbflush.h>
31250c2277SThomas Gleixner #include <asm/sections.h>
32718fc13bSThomas Gleixner #include <asm/kdebug.h>
3366441bd3SIngo Molnar #include <asm/e820/api.h>
3447a3d5daSThomas Gleixner #include <asm/bios_ebda.h>
355dcd14ecSH. Peter Anvin #include <asm/bootparam_utils.h>
36feddc9deSFenghua Yu #include <asm/microcode.h>
37ef7f0d6aSAndrey Ryabinin #include <asm/kasan.h>
3805ab1d8aSFeng Tang #include <asm/fixmap.h>
39f5963ba7SJoerg Roedel #include <asm/realmode.h>
404b47cdbdSJoerg Roedel #include <asm/extable.h>
414b47cdbdSJoerg Roedel #include <asm/trapnr.h>
42e759959fSBrijesh Singh #include <asm/sev.h>
4359bd54a8SKuppuswamy Sathyanarayanan #include <asm/tdx.h>
44*5447cb97SHou Wenlong #include <asm/init.h>
45250c2277SThomas Gleixner
468170e6beSH. Peter Anvin /*
478170e6beSH. Peter Anvin * Manage page tables very early on.
488170e6beSH. Peter Anvin */
498170e6beSH. Peter Anvin extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
50c88d7150SKirill A. Shutemov static unsigned int __initdata next_early_pgt;
515e427ec2SLinus Torvalds pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
528170e6beSH. Peter Anvin
53e626e6bbSKirill A. Shutemov #ifdef CONFIG_X86_5LEVEL
5451be1335SKirill A. Shutemov unsigned int __pgtable_l5_enabled __ro_after_init;
55b16e770bSKirill A. Shutemov unsigned int pgdir_shift __ro_after_init = 39;
56c65e774fSKirill A. Shutemov EXPORT_SYMBOL(pgdir_shift);
57b16e770bSKirill A. Shutemov unsigned int ptrs_per_p4d __ro_after_init = 1;
58c65e774fSKirill A. Shutemov EXPORT_SYMBOL(ptrs_per_p4d);
59e626e6bbSKirill A. Shutemov #endif
60e626e6bbSKirill A. Shutemov
61eedb92abSKirill A. Shutemov #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
624fa5662bSKirill A. Shutemov unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
63eedb92abSKirill A. Shutemov EXPORT_SYMBOL(page_offset_base);
64a7412546SKirill A. Shutemov unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
65eedb92abSKirill A. Shutemov EXPORT_SYMBOL(vmalloc_base);
669b46a051SKirill A. Shutemov unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
67eedb92abSKirill A. Shutemov EXPORT_SYMBOL(vmemmap_base);
68eedb92abSKirill A. Shutemov #endif
69eedb92abSKirill A. Shutemov
70866b556eSJoerg Roedel /*
71866b556eSJoerg Roedel * GDT used on the boot CPU before switching to virtual addresses.
72866b556eSJoerg Roedel */
73866b556eSJoerg Roedel static struct desc_struct startup_gdt[GDT_ENTRIES] = {
74866b556eSJoerg Roedel [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
75866b556eSJoerg Roedel [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
76866b556eSJoerg Roedel [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
77866b556eSJoerg Roedel };
78866b556eSJoerg Roedel
79866b556eSJoerg Roedel /*
80866b556eSJoerg Roedel * Address needs to be set at runtime because it references the startup_gdt
81866b556eSJoerg Roedel * while the kernel still uses a direct mapping.
82866b556eSJoerg Roedel */
83866b556eSJoerg Roedel static struct desc_ptr startup_gdt_descr = {
848d75ebf2SYuntao Wang .size = sizeof(startup_gdt)-1,
85866b556eSJoerg Roedel .address = 0,
86866b556eSJoerg Roedel };
87866b556eSJoerg Roedel
fixup_pointer(void * ptr,unsigned long physaddr)8826179670SKirill A. Shutemov static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
89c88d7150SKirill A. Shutemov {
90c88d7150SKirill A. Shutemov return ptr - (void *)_text + (void *)physaddr;
91c88d7150SKirill A. Shutemov }
92c88d7150SKirill A. Shutemov
fixup_long(void * ptr,unsigned long physaddr)934fa5662bSKirill A. Shutemov static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
944fa5662bSKirill A. Shutemov {
954fa5662bSKirill A. Shutemov return fixup_pointer(ptr, physaddr);
964fa5662bSKirill A. Shutemov }
974fa5662bSKirill A. Shutemov
984c2b4058SKirill A. Shutemov #ifdef CONFIG_X86_5LEVEL
fixup_int(void * ptr,unsigned long physaddr)994c2b4058SKirill A. Shutemov static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
1004c2b4058SKirill A. Shutemov {
1014c2b4058SKirill A. Shutemov return fixup_pointer(ptr, physaddr);
1024c2b4058SKirill A. Shutemov }
1034c2b4058SKirill A. Shutemov
check_la57_support(unsigned long physaddr)1046f9dd329SKirill A. Shutemov static bool __head check_la57_support(unsigned long physaddr)
1054c2b4058SKirill A. Shutemov {
106372fddf7SKirill A. Shutemov /*
107d9f6e12fSIngo Molnar * 5-level paging is detected and enabled at kernel decompression
108372fddf7SKirill A. Shutemov * stage. Only check if it has been enabled there.
109372fddf7SKirill A. Shutemov */
110372fddf7SKirill A. Shutemov if (!(native_read_cr4() & X86_CR4_LA57))
1116f9dd329SKirill A. Shutemov return false;
1124c2b4058SKirill A. Shutemov
113ad3fe525SKirill A. Shutemov *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
114b16e770bSKirill A. Shutemov *fixup_int(&pgdir_shift, physaddr) = 48;
115b16e770bSKirill A. Shutemov *fixup_int(&ptrs_per_p4d, physaddr) = 512;
1164fa5662bSKirill A. Shutemov *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
117a7412546SKirill A. Shutemov *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
1189b46a051SKirill A. Shutemov *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
1196f9dd329SKirill A. Shutemov
1206f9dd329SKirill A. Shutemov return true;
1214c2b4058SKirill A. Shutemov }
1224c2b4058SKirill A. Shutemov #else
check_la57_support(unsigned long physaddr)1236f9dd329SKirill A. Shutemov static bool __head check_la57_support(unsigned long physaddr)
1246f9dd329SKirill A. Shutemov {
1256f9dd329SKirill A. Shutemov return false;
1266f9dd329SKirill A. Shutemov }
1274c2b4058SKirill A. Shutemov #endif
1284c2b4058SKirill A. Shutemov
sme_postprocess_startup(struct boot_params * bp,pmdval_t * pmd)1295f117033SMarco Bonelli static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
1305ed0a99bSBorislav Petkov {
1315ed0a99bSBorislav Petkov unsigned long vaddr, vaddr_end;
1325ed0a99bSBorislav Petkov int i;
1335ed0a99bSBorislav Petkov
1345ed0a99bSBorislav Petkov /* Encrypt the kernel and related (if SME is active) */
1355ed0a99bSBorislav Petkov sme_encrypt_kernel(bp);
1365ed0a99bSBorislav Petkov
1375ed0a99bSBorislav Petkov /*
1385ed0a99bSBorislav Petkov * Clear the memory encryption mask from the .bss..decrypted section.
1395ed0a99bSBorislav Petkov * The bss section will be memset to zero later in the initialization so
1405ed0a99bSBorislav Petkov * there is no need to zero it after changing the memory encryption
1415ed0a99bSBorislav Petkov * attribute.
1425ed0a99bSBorislav Petkov */
1435ed0a99bSBorislav Petkov if (sme_get_me_mask()) {
1445ed0a99bSBorislav Petkov vaddr = (unsigned long)__start_bss_decrypted;
1455ed0a99bSBorislav Petkov vaddr_end = (unsigned long)__end_bss_decrypted;
146efac0eedSBrijesh Singh
1475ed0a99bSBorislav Petkov for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
148efac0eedSBrijesh Singh /*
149efac0eedSBrijesh Singh * On SNP, transition the page to shared in the RMP table so that
150efac0eedSBrijesh Singh * it is consistent with the page table attribute change.
151efac0eedSBrijesh Singh *
152efac0eedSBrijesh Singh * __start_bss_decrypted has a virtual address in the high range
153efac0eedSBrijesh Singh * mapping (kernel .text). PVALIDATE, by way of
154efac0eedSBrijesh Singh * early_snp_set_memory_shared(), requires a valid virtual
155efac0eedSBrijesh Singh * address but the kernel is currently running off of the identity
156efac0eedSBrijesh Singh * mapping so use __pa() to get a *currently* valid virtual address.
157efac0eedSBrijesh Singh */
158efac0eedSBrijesh Singh early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD);
159efac0eedSBrijesh Singh
1605ed0a99bSBorislav Petkov i = pmd_index(vaddr);
1615ed0a99bSBorislav Petkov pmd[i] -= sme_get_me_mask();
1625ed0a99bSBorislav Petkov }
1635ed0a99bSBorislav Petkov }
1645ed0a99bSBorislav Petkov
1655ed0a99bSBorislav Petkov /*
1665ed0a99bSBorislav Petkov * Return the SME encryption mask (if SME is active) to be used as a
1675ed0a99bSBorislav Petkov * modifier for the initial pgdir entry programmed into CR3.
1685ed0a99bSBorislav Petkov */
1695ed0a99bSBorislav Petkov return sme_get_me_mask();
1705ed0a99bSBorislav Petkov }
1715ed0a99bSBorislav Petkov
1724a09f021SAlexander Potapenko /* Code in __startup_64() can be relocated during execution, but the compiler
1734a09f021SAlexander Potapenko * doesn't have to generate PC-relative relocations when accessing globals from
1744a09f021SAlexander Potapenko * that function. Clang actually does not generate them, which leads to
1754a09f021SAlexander Potapenko * boot-time crashes. To work around this problem, every global pointer must
1764a09f021SAlexander Potapenko * be adjusted using fixup_pointer().
1774a09f021SAlexander Potapenko */
__startup_64(unsigned long physaddr,struct boot_params * bp)178aca20d54STom Lendacky unsigned long __head __startup_64(unsigned long physaddr,
179aca20d54STom Lendacky struct boot_params *bp)
180c88d7150SKirill A. Shutemov {
1816f9dd329SKirill A. Shutemov unsigned long load_delta, *p;
1825868f365STom Lendacky unsigned long pgtable_flags;
183c88d7150SKirill A. Shutemov pgdval_t *pgd;
184032370b9SKirill A. Shutemov p4dval_t *p4d;
185c88d7150SKirill A. Shutemov pudval_t *pud;
186c88d7150SKirill A. Shutemov pmdval_t *pmd, pmd_entry;
1874a09f021SAlexander Potapenko pteval_t *mask_ptr;
1886f9dd329SKirill A. Shutemov bool la57;
189c88d7150SKirill A. Shutemov int i;
190187e91feSAlexander Potapenko unsigned int *next_pgt_ptr;
191c88d7150SKirill A. Shutemov
1926f9dd329SKirill A. Shutemov la57 = check_la57_support(physaddr);
1934c2b4058SKirill A. Shutemov
194c88d7150SKirill A. Shutemov /* Is the address too large? */
195c88d7150SKirill A. Shutemov if (physaddr >> MAX_PHYSMEM_BITS)
196c88d7150SKirill A. Shutemov for (;;);
197c88d7150SKirill A. Shutemov
198c88d7150SKirill A. Shutemov /*
199c88d7150SKirill A. Shutemov * Compute the delta between the address I am compiled to run at
200c88d7150SKirill A. Shutemov * and the address I am actually running at.
201c88d7150SKirill A. Shutemov */
202c88d7150SKirill A. Shutemov load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
203c88d7150SKirill A. Shutemov
204c88d7150SKirill A. Shutemov /* Is the address not 2M aligned? */
20582328227SPasha Tatashin if (load_delta & ~PMD_MASK)
206c88d7150SKirill A. Shutemov for (;;);
207c88d7150SKirill A. Shutemov
2085868f365STom Lendacky /* Include the SME encryption mask in the fixup value */
2095868f365STom Lendacky load_delta += sme_get_me_mask();
2105868f365STom Lendacky
211c88d7150SKirill A. Shutemov /* Fixup the physical addresses in the page table */
212c88d7150SKirill A. Shutemov
21365ade2f8SKirill A. Shutemov pgd = fixup_pointer(&early_top_pgt, physaddr);
2146f9dd329SKirill A. Shutemov p = pgd + pgd_index(__START_KERNEL_map);
2156f9dd329SKirill A. Shutemov if (la57)
2166f9dd329SKirill A. Shutemov *p = (unsigned long)level4_kernel_pgt;
2176f9dd329SKirill A. Shutemov else
2186f9dd329SKirill A. Shutemov *p = (unsigned long)level3_kernel_pgt;
2196f9dd329SKirill A. Shutemov *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
220c88d7150SKirill A. Shutemov
2216f9dd329SKirill A. Shutemov if (la57) {
222032370b9SKirill A. Shutemov p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
223032370b9SKirill A. Shutemov p4d[511] += load_delta;
224032370b9SKirill A. Shutemov }
225032370b9SKirill A. Shutemov
226c88d7150SKirill A. Shutemov pud = fixup_pointer(&level3_kernel_pgt, physaddr);
227c88d7150SKirill A. Shutemov pud[510] += load_delta;
228c88d7150SKirill A. Shutemov pud[511] += load_delta;
229c88d7150SKirill A. Shutemov
230c88d7150SKirill A. Shutemov pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
23105ab1d8aSFeng Tang for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
23205ab1d8aSFeng Tang pmd[i] += load_delta;
233c88d7150SKirill A. Shutemov
234c88d7150SKirill A. Shutemov /*
235c88d7150SKirill A. Shutemov * Set up the identity mapping for the switchover. These
236c88d7150SKirill A. Shutemov * entries should *NOT* have the global bit set! This also
237c88d7150SKirill A. Shutemov * creates a bunch of nonsense entries but that is fine --
238c88d7150SKirill A. Shutemov * it avoids problems around wraparound.
239c88d7150SKirill A. Shutemov */
240c88d7150SKirill A. Shutemov
241187e91feSAlexander Potapenko next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
242187e91feSAlexander Potapenko pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
243187e91feSAlexander Potapenko pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
244c88d7150SKirill A. Shutemov
24521729f81STom Lendacky pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
246c88d7150SKirill A. Shutemov
2476f9dd329SKirill A. Shutemov if (la57) {
248c1887159SKirill A. Shutemov p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
249c1887159SKirill A. Shutemov physaddr);
250032370b9SKirill A. Shutemov
251032370b9SKirill A. Shutemov i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
2525868f365STom Lendacky pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
2535868f365STom Lendacky pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
254032370b9SKirill A. Shutemov
25581c7ed29SKirill A. Shutemov i = physaddr >> P4D_SHIFT;
25681c7ed29SKirill A. Shutemov p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
25781c7ed29SKirill A. Shutemov p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
258032370b9SKirill A. Shutemov } else {
259c88d7150SKirill A. Shutemov i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
2605868f365STom Lendacky pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
2615868f365STom Lendacky pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
262032370b9SKirill A. Shutemov }
263c88d7150SKirill A. Shutemov
26481c7ed29SKirill A. Shutemov i = physaddr >> PUD_SHIFT;
26581c7ed29SKirill A. Shutemov pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
26681c7ed29SKirill A. Shutemov pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
267c88d7150SKirill A. Shutemov
268c88d7150SKirill A. Shutemov pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
269fb43d6cbSDave Hansen /* Filter out unsupported __PAGE_KERNEL_* bits: */
2704a09f021SAlexander Potapenko mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
2714a09f021SAlexander Potapenko pmd_entry &= *mask_ptr;
2725868f365STom Lendacky pmd_entry += sme_get_me_mask();
273c88d7150SKirill A. Shutemov pmd_entry += physaddr;
274c88d7150SKirill A. Shutemov
275c88d7150SKirill A. Shutemov for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
27681c7ed29SKirill A. Shutemov int idx = i + (physaddr >> PMD_SHIFT);
27781c7ed29SKirill A. Shutemov
27881c7ed29SKirill A. Shutemov pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
279c88d7150SKirill A. Shutemov }
280c88d7150SKirill A. Shutemov
281c88d7150SKirill A. Shutemov /*
282c88d7150SKirill A. Shutemov * Fixup the kernel text+data virtual addresses. Note that
283c88d7150SKirill A. Shutemov * we might write invalid pmds, when the kernel is relocated
284c88d7150SKirill A. Shutemov * cleanup_highmap() fixes this up along with the mappings
285c88d7150SKirill A. Shutemov * beyond _end.
2862aa85f24SSteve Wahl *
2872aa85f24SSteve Wahl * Only the region occupied by the kernel image has so far
2882aa85f24SSteve Wahl * been checked against the table of usable memory regions
2892aa85f24SSteve Wahl * provided by the firmware, so invalidate pages outside that
2902aa85f24SSteve Wahl * region. A page table entry that maps to a reserved area of
2912aa85f24SSteve Wahl * memory would allow processor speculation into that area,
2922aa85f24SSteve Wahl * and on some hardware (particularly the UV platform) even
2932aa85f24SSteve Wahl * speculative access to some reserved areas is caught as an
2942aa85f24SSteve Wahl * error, causing the BIOS to halt the system.
295c88d7150SKirill A. Shutemov */
296c88d7150SKirill A. Shutemov
297c88d7150SKirill A. Shutemov pmd = fixup_pointer(level2_kernel_pgt, physaddr);
2982aa85f24SSteve Wahl
2992aa85f24SSteve Wahl /* invalidate pages before the kernel image */
3002aa85f24SSteve Wahl for (i = 0; i < pmd_index((unsigned long)_text); i++)
3012aa85f24SSteve Wahl pmd[i] &= ~_PAGE_PRESENT;
3022aa85f24SSteve Wahl
3032aa85f24SSteve Wahl /* fixup pages that are part of the kernel image */
3042aa85f24SSteve Wahl for (; i <= pmd_index((unsigned long)_end); i++)
305c88d7150SKirill A. Shutemov if (pmd[i] & _PAGE_PRESENT)
306c88d7150SKirill A. Shutemov pmd[i] += load_delta;
3072aa85f24SSteve Wahl
3082aa85f24SSteve Wahl /* invalidate pages after the kernel image */
3092aa85f24SSteve Wahl for (; i < PTRS_PER_PMD; i++)
3102aa85f24SSteve Wahl pmd[i] &= ~_PAGE_PRESENT;
311c88d7150SKirill A. Shutemov
3125868f365STom Lendacky /*
3135868f365STom Lendacky * Fixup phys_base - remove the memory encryption mask to obtain
3145868f365STom Lendacky * the true physical address.
3155868f365STom Lendacky */
3164fa5662bSKirill A. Shutemov *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
3175868f365STom Lendacky
3185ed0a99bSBorislav Petkov return sme_postprocess_startup(bp, pmd);
3195868f365STom Lendacky }
3205868f365STom Lendacky
3218170e6beSH. Peter Anvin /* Wipe all early page tables except for the kernel symbol map */
reset_early_page_tables(void)3228170e6beSH. Peter Anvin static void __init reset_early_page_tables(void)
323250c2277SThomas Gleixner {
32465ade2f8SKirill A. Shutemov memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
3258170e6beSH. Peter Anvin next_early_pgt = 0;
32621729f81STom Lendacky write_cr3(__sme_pa_nodebug(early_top_pgt));
3278170e6beSH. Peter Anvin }
3288170e6beSH. Peter Anvin
3298170e6beSH. Peter Anvin /* Create a new PMD entry */
__early_make_pgtable(unsigned long address,pmdval_t pmd)3304b47cdbdSJoerg Roedel bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
3318170e6beSH. Peter Anvin {
3328170e6beSH. Peter Anvin unsigned long physaddr = address - __PAGE_OFFSET;
3338170e6beSH. Peter Anvin pgdval_t pgd, *pgd_p;
334032370b9SKirill A. Shutemov p4dval_t p4d, *p4d_p;
3356b9c75acSYinghai Lu pudval_t pud, *pud_p;
336b9d05200STom Lendacky pmdval_t *pmd_p;
3378170e6beSH. Peter Anvin
3388170e6beSH. Peter Anvin /* Invalid address or early pgt is done ? */
33965ade2f8SKirill A. Shutemov if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
3404b47cdbdSJoerg Roedel return false;
3418170e6beSH. Peter Anvin
3426b9c75acSYinghai Lu again:
34365ade2f8SKirill A. Shutemov pgd_p = &early_top_pgt[pgd_index(address)].pgd;
3448170e6beSH. Peter Anvin pgd = *pgd_p;
3458170e6beSH. Peter Anvin
3468170e6beSH. Peter Anvin /*
3478170e6beSH. Peter Anvin * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
3488170e6beSH. Peter Anvin * critical -- __PAGE_OFFSET would point us back into the dynamic
3498170e6beSH. Peter Anvin * range and we might end up looping forever...
3508170e6beSH. Peter Anvin */
351ed7588d5SKirill A. Shutemov if (!pgtable_l5_enabled())
352032370b9SKirill A. Shutemov p4d_p = pgd_p;
353032370b9SKirill A. Shutemov else if (pgd)
354032370b9SKirill A. Shutemov p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
355032370b9SKirill A. Shutemov else {
356032370b9SKirill A. Shutemov if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
357032370b9SKirill A. Shutemov reset_early_page_tables();
358032370b9SKirill A. Shutemov goto again;
359032370b9SKirill A. Shutemov }
360032370b9SKirill A. Shutemov
361032370b9SKirill A. Shutemov p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
362032370b9SKirill A. Shutemov memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
363032370b9SKirill A. Shutemov *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
364032370b9SKirill A. Shutemov }
365032370b9SKirill A. Shutemov p4d_p += p4d_index(address);
366032370b9SKirill A. Shutemov p4d = *p4d_p;
367032370b9SKirill A. Shutemov
368032370b9SKirill A. Shutemov if (p4d)
369032370b9SKirill A. Shutemov pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
3706b9c75acSYinghai Lu else {
3716b9c75acSYinghai Lu if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
3728170e6beSH. Peter Anvin reset_early_page_tables();
3736b9c75acSYinghai Lu goto again;
3746b9c75acSYinghai Lu }
3758170e6beSH. Peter Anvin
3768170e6beSH. Peter Anvin pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
377a91bbe01SAlexander Kuleshov memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
378032370b9SKirill A. Shutemov *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
3798170e6beSH. Peter Anvin }
3806b9c75acSYinghai Lu pud_p += pud_index(address);
3816b9c75acSYinghai Lu pud = *pud_p;
3828170e6beSH. Peter Anvin
3836b9c75acSYinghai Lu if (pud)
3846b9c75acSYinghai Lu pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
3856b9c75acSYinghai Lu else {
3866b9c75acSYinghai Lu if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
3876b9c75acSYinghai Lu reset_early_page_tables();
3886b9c75acSYinghai Lu goto again;
3898170e6beSH. Peter Anvin }
3908170e6beSH. Peter Anvin
3916b9c75acSYinghai Lu pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
392a91bbe01SAlexander Kuleshov memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
3938170e6beSH. Peter Anvin *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
3946b9c75acSYinghai Lu }
3956b9c75acSYinghai Lu pmd_p[pmd_index(address)] = pmd;
3968170e6beSH. Peter Anvin
3974b47cdbdSJoerg Roedel return true;
398250c2277SThomas Gleixner }
399250c2277SThomas Gleixner
early_make_pgtable(unsigned long address)4004b47cdbdSJoerg Roedel static bool __init early_make_pgtable(unsigned long address)
401b9d05200STom Lendacky {
402b9d05200STom Lendacky unsigned long physaddr = address - __PAGE_OFFSET;
403b9d05200STom Lendacky pmdval_t pmd;
404b9d05200STom Lendacky
405b9d05200STom Lendacky pmd = (physaddr & PMD_MASK) + early_pmd_flags;
406b9d05200STom Lendacky
407b9d05200STom Lendacky return __early_make_pgtable(address, pmd);
408b9d05200STom Lendacky }
409b9d05200STom Lendacky
do_early_exception(struct pt_regs * regs,int trapnr)4104b47cdbdSJoerg Roedel void __init do_early_exception(struct pt_regs *regs, int trapnr)
4114b47cdbdSJoerg Roedel {
4124b47cdbdSJoerg Roedel if (trapnr == X86_TRAP_PF &&
4134b47cdbdSJoerg Roedel early_make_pgtable(native_read_cr2()))
4144b47cdbdSJoerg Roedel return;
4154b47cdbdSJoerg Roedel
4161aa9aa8eSJoerg Roedel if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
4171aa9aa8eSJoerg Roedel trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
4181aa9aa8eSJoerg Roedel return;
4191aa9aa8eSJoerg Roedel
42032e72854SAndi Kleen if (trapnr == X86_TRAP_VE && tdx_early_handle_ve(regs))
42132e72854SAndi Kleen return;
42232e72854SAndi Kleen
4234b47cdbdSJoerg Roedel early_fixup_exception(regs, trapnr);
4244b47cdbdSJoerg Roedel }
4254b47cdbdSJoerg Roedel
426250c2277SThomas Gleixner /* Don't add a printk in there. printk relies on the PDA which is not initialized
427250c2277SThomas Gleixner yet. */
clear_bss(void)42896e8fc58SJuergen Gross void __init clear_bss(void)
429250c2277SThomas Gleixner {
430250c2277SThomas Gleixner memset(__bss_start, 0,
431250c2277SThomas Gleixner (unsigned long) __bss_stop - (unsigned long) __bss_start);
43238fa5479SJuergen Gross memset(__brk_base, 0,
43338fa5479SJuergen Gross (unsigned long) __brk_limit - (unsigned long) __brk_base);
434250c2277SThomas Gleixner }
435250c2277SThomas Gleixner
get_cmd_line_ptr(void)436f1da834cSYinghai Lu static unsigned long get_cmd_line_ptr(void)
437f1da834cSYinghai Lu {
438f1da834cSYinghai Lu unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
439f1da834cSYinghai Lu
440ee92d815SYinghai Lu cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
441ee92d815SYinghai Lu
442f1da834cSYinghai Lu return cmd_line_ptr;
443f1da834cSYinghai Lu }
444f1da834cSYinghai Lu
copy_bootdata(char * real_mode_data)445250c2277SThomas Gleixner static void __init copy_bootdata(char *real_mode_data)
446250c2277SThomas Gleixner {
447250c2277SThomas Gleixner char * command_line;
448f1da834cSYinghai Lu unsigned long cmd_line_ptr;
449250c2277SThomas Gleixner
450b9d05200STom Lendacky /*
451b9d05200STom Lendacky * If SME is active, this will create decrypted mappings of the
452b9d05200STom Lendacky * boot data in advance of the copy operations.
453b9d05200STom Lendacky */
454b9d05200STom Lendacky sme_map_bootdata(real_mode_data);
455b9d05200STom Lendacky
4560e96f31eSJordan Borgner memcpy(&boot_params, real_mode_data, sizeof(boot_params));
4575dcd14ecSH. Peter Anvin sanitize_boot_params(&boot_params);
458f1da834cSYinghai Lu cmd_line_ptr = get_cmd_line_ptr();
459f1da834cSYinghai Lu if (cmd_line_ptr) {
460f1da834cSYinghai Lu command_line = __va(cmd_line_ptr);
461250c2277SThomas Gleixner memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
462250c2277SThomas Gleixner }
463b9d05200STom Lendacky
464b9d05200STom Lendacky /*
465b9d05200STom Lendacky * The old boot data is no longer needed and won't be reserved,
466b9d05200STom Lendacky * freeing up that memory for use by the system. If SME is active,
467b9d05200STom Lendacky * we need to remove the mappings that were created so that the
468b9d05200STom Lendacky * memory doesn't remain mapped as decrypted.
469b9d05200STom Lendacky */
470b9d05200STom Lendacky sme_unmap_bootdata(real_mode_data);
47130c82645SH. Peter Anvin }
472250c2277SThomas Gleixner
x86_64_start_kernel(char * real_mode_data)4734208d2d7SJosh Poimboeuf asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode_data)
474250c2277SThomas Gleixner {
475b4e0409aSIngo Molnar /*
476b4e0409aSIngo Molnar * Build-time sanity checks on the kernel image and module
477b4e0409aSIngo Molnar * area mappings. (these are purely build-time and produce no code)
478b4e0409aSIngo Molnar */
4798e3c2a8cSBorislav Petkov BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
4808e3c2a8cSBorislav Petkov BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
481b4e0409aSIngo Molnar BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
4828e3c2a8cSBorislav Petkov BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
483b4e0409aSIngo Molnar BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
484b4e0409aSIngo Molnar BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
485c65e774fSKirill A. Shutemov MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
486b4e0409aSIngo Molnar (__START_KERNEL & PGDIR_MASK)));
48766d4bdf2SJan Beulich BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
488b4e0409aSIngo Molnar
4891e02ce4cSAndy Lutomirski cr4_init_shadow();
4901e02ce4cSAndy Lutomirski
4918170e6beSH. Peter Anvin /* Kill off the identity-map trampoline */
4928170e6beSH. Peter Anvin reset_early_page_tables();
4938170e6beSH. Peter Anvin
494250c2277SThomas Gleixner clear_bss();
495250c2277SThomas Gleixner
496b64dfcdeSBorislav Petkov /*
497b64dfcdeSBorislav Petkov * This needs to happen *before* kasan_early_init() because latter maps stuff
498b64dfcdeSBorislav Petkov * into that page.
499b64dfcdeSBorislav Petkov */
50065ade2f8SKirill A. Shutemov clear_page(init_top_pgt);
501d0f77d4dSAndrey Ryabinin
50221729f81STom Lendacky /*
50321729f81STom Lendacky * SME support may update early_pmd_flags to include the memory
50421729f81STom Lendacky * encryption mask, so it needs to be called before anything
50521729f81STom Lendacky * that may generate a page fault.
50621729f81STom Lendacky */
50721729f81STom Lendacky sme_early_init();
50821729f81STom Lendacky
5095d5aa3cfSAlexander Popov kasan_early_init();
5105d5aa3cfSAlexander Popov
511b64dfcdeSBorislav Petkov /*
512b64dfcdeSBorislav Petkov * Flush global TLB entries which could be left over from the trampoline page
513b64dfcdeSBorislav Petkov * table.
514b64dfcdeSBorislav Petkov *
515b64dfcdeSBorislav Petkov * This needs to happen *after* kasan_early_init() as KASAN-enabled .configs
516b64dfcdeSBorislav Petkov * instrument native_write_cr4() so KASAN must be initialized for that
517b64dfcdeSBorislav Petkov * instrumentation to work.
518b64dfcdeSBorislav Petkov */
519b64dfcdeSBorislav Petkov __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4));
520b64dfcdeSBorislav Petkov
521588787fdSThomas Gleixner idt_setup_early_handler();
522250c2277SThomas Gleixner
52359bd54a8SKuppuswamy Sathyanarayanan /* Needed before cc_platform_has() can be used for TDX */
52459bd54a8SKuppuswamy Sathyanarayanan tdx_early_init();
52559bd54a8SKuppuswamy Sathyanarayanan
526fa2bbce9SYinghai Lu copy_bootdata(__va(real_mode_data));
527fa2bbce9SYinghai Lu
528feddc9deSFenghua Yu /*
529feddc9deSFenghua Yu * Load microcode early on BSP.
530feddc9deSFenghua Yu */
531feddc9deSFenghua Yu load_ucode_bsp();
532feddc9deSFenghua Yu
53365ade2f8SKirill A. Shutemov /* set init_top_pgt kernel high mapping*/
53465ade2f8SKirill A. Shutemov init_top_pgt[511] = early_top_pgt[511];
5358170e6beSH. Peter Anvin
536f97013fdSJeremy Fitzhardinge x86_64_start_reservations(real_mode_data);
537f97013fdSJeremy Fitzhardinge }
538f97013fdSJeremy Fitzhardinge
x86_64_start_reservations(char * real_mode_data)5394208d2d7SJosh Poimboeuf void __init __noreturn x86_64_start_reservations(char *real_mode_data)
540f97013fdSJeremy Fitzhardinge {
541fa2bbce9SYinghai Lu /* version is always not zero if it is copied */
542fa2bbce9SYinghai Lu if (!boot_params.hdr.version)
543250c2277SThomas Gleixner copy_bootdata(__va(real_mode_data));
5449de819feSYinghai Lu
5458d152e7aSLuis R. Rodriguez x86_early_init_platform_quirks();
54675175278SAndi Kleen
5473fda5bb4SAndy Shevchenko switch (boot_params.hdr.hardware_subarch) {
5483fda5bb4SAndy Shevchenko case X86_SUBARCH_INTEL_MID:
5493fda5bb4SAndy Shevchenko x86_intel_mid_early_setup();
5503fda5bb4SAndy Shevchenko break;
5513fda5bb4SAndy Shevchenko default:
5523fda5bb4SAndy Shevchenko break;
5533fda5bb4SAndy Shevchenko }
5543fda5bb4SAndy Shevchenko
555250c2277SThomas Gleixner start_kernel();
556250c2277SThomas Gleixner }
557866b556eSJoerg Roedel
558866b556eSJoerg Roedel /*
559f5963ba7SJoerg Roedel * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
560f5963ba7SJoerg Roedel * used until the idt_table takes over. On the boot CPU this happens in
561f5963ba7SJoerg Roedel * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
562f5963ba7SJoerg Roedel * this happens in the functions called from head_64.S.
563f5963ba7SJoerg Roedel *
564f5963ba7SJoerg Roedel * The idt_table can't be used that early because all the code modifying it is
565f5963ba7SJoerg Roedel * in idt.c and can be instrumented by tracing or KASAN, which both don't work
566f5963ba7SJoerg Roedel * during early CPU bringup. Also the idt_table has the runtime vectors
567f5963ba7SJoerg Roedel * configured which require certain CPU state to be setup already (like TSS),
568f5963ba7SJoerg Roedel * which also hasn't happened yet in early CPU bringup.
569f5963ba7SJoerg Roedel */
570f5963ba7SJoerg Roedel static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
571f5963ba7SJoerg Roedel
572f5963ba7SJoerg Roedel static struct desc_ptr bringup_idt_descr = {
573f5963ba7SJoerg Roedel .size = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1,
574f5963ba7SJoerg Roedel .address = 0, /* Set at runtime */
575f5963ba7SJoerg Roedel };
576f5963ba7SJoerg Roedel
set_bringup_idt_handler(gate_desc * idt,int n,void * handler)57774d8d9d5SJoerg Roedel static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
57874d8d9d5SJoerg Roedel {
57974d8d9d5SJoerg Roedel #ifdef CONFIG_AMD_MEM_ENCRYPT
58074d8d9d5SJoerg Roedel struct idt_data data;
58174d8d9d5SJoerg Roedel gate_desc desc;
58274d8d9d5SJoerg Roedel
58374d8d9d5SJoerg Roedel init_idt_data(&data, n, handler);
58474d8d9d5SJoerg Roedel idt_init_desc(&desc, &data);
58574d8d9d5SJoerg Roedel native_write_idt_entry(idt, n, &desc);
58674d8d9d5SJoerg Roedel #endif
58774d8d9d5SJoerg Roedel }
58874d8d9d5SJoerg Roedel
589f5963ba7SJoerg Roedel /* This runs while still in the direct mapping */
startup_64_load_idt(unsigned long physbase)590f5963ba7SJoerg Roedel static void startup_64_load_idt(unsigned long physbase)
591f5963ba7SJoerg Roedel {
592f5963ba7SJoerg Roedel struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
59374d8d9d5SJoerg Roedel gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
594f5963ba7SJoerg Roedel
59574d8d9d5SJoerg Roedel
59674d8d9d5SJoerg Roedel if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
59774d8d9d5SJoerg Roedel void *handler;
59874d8d9d5SJoerg Roedel
59974d8d9d5SJoerg Roedel /* VMM Communication Exception */
60074d8d9d5SJoerg Roedel handler = fixup_pointer(vc_no_ghcb, physbase);
60174d8d9d5SJoerg Roedel set_bringup_idt_handler(idt, X86_TRAP_VC, handler);
60274d8d9d5SJoerg Roedel }
60374d8d9d5SJoerg Roedel
60474d8d9d5SJoerg Roedel desc->address = (unsigned long)idt;
605f5963ba7SJoerg Roedel native_load_idt(desc);
606f5963ba7SJoerg Roedel }
607f5963ba7SJoerg Roedel
608f5963ba7SJoerg Roedel /* This is used when running on kernel addresses */
early_setup_idt(void)609f5963ba7SJoerg Roedel void early_setup_idt(void)
610f5963ba7SJoerg Roedel {
6111aa9aa8eSJoerg Roedel /* VMM Communication Exception */
61295d33bfaSBrijesh Singh if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
61395d33bfaSBrijesh Singh setup_ghcb();
6141aa9aa8eSJoerg Roedel set_bringup_idt_handler(bringup_idt_table, X86_TRAP_VC, vc_boot_ghcb);
61595d33bfaSBrijesh Singh }
6161aa9aa8eSJoerg Roedel
617f5963ba7SJoerg Roedel bringup_idt_descr.address = (unsigned long)bringup_idt_table;
618f5963ba7SJoerg Roedel native_load_idt(&bringup_idt_descr);
619f5963ba7SJoerg Roedel }
620f5963ba7SJoerg Roedel
621f5963ba7SJoerg Roedel /*
622866b556eSJoerg Roedel * Setup boot CPU state needed before kernel switches to virtual addresses.
623866b556eSJoerg Roedel */
startup_64_setup_env(unsigned long physbase)624866b556eSJoerg Roedel void __head startup_64_setup_env(unsigned long physbase)
625866b556eSJoerg Roedel {
626866b556eSJoerg Roedel /* Load GDT */
627866b556eSJoerg Roedel startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase);
628866b556eSJoerg Roedel native_load_gdt(&startup_gdt_descr);
629866b556eSJoerg Roedel
630866b556eSJoerg Roedel /* New GDT is live - reload data segment registers */
631866b556eSJoerg Roedel asm volatile("movl %%eax, %%ds\n"
632866b556eSJoerg Roedel "movl %%eax, %%ss\n"
633866b556eSJoerg Roedel "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
634f5963ba7SJoerg Roedel
635f5963ba7SJoerg Roedel startup_64_load_idt(physbase);
636866b556eSJoerg Roedel }
637