xref: /openbmc/linux/arch/x86/kernel/head64.c (revision e0f6d1a5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  prepare to run common code
4  *
5  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6  */
7 
8 #define DISABLE_BRANCH_PROFILING
9 #include <linux/init.h>
10 #include <linux/linkage.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/percpu.h>
15 #include <linux/start_kernel.h>
16 #include <linux/io.h>
17 #include <linux/memblock.h>
18 #include <linux/mem_encrypt.h>
19 
20 #include <asm/processor.h>
21 #include <asm/proto.h>
22 #include <asm/smp.h>
23 #include <asm/setup.h>
24 #include <asm/desc.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlbflush.h>
27 #include <asm/sections.h>
28 #include <asm/kdebug.h>
29 #include <asm/e820/api.h>
30 #include <asm/bios_ebda.h>
31 #include <asm/bootparam_utils.h>
32 #include <asm/microcode.h>
33 #include <asm/kasan.h>
34 
35 #ifdef CONFIG_X86_5LEVEL
36 #undef pgtable_l5_enabled
37 #define pgtable_l5_enabled __pgtable_l5_enabled
38 #endif
39 
40 /*
41  * Manage page tables very early on.
42  */
43 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
44 static unsigned int __initdata next_early_pgt;
45 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
46 
47 #ifdef CONFIG_X86_5LEVEL
48 unsigned int __pgtable_l5_enabled __ro_after_init;
49 EXPORT_SYMBOL(__pgtable_l5_enabled);
50 unsigned int pgdir_shift __ro_after_init = 39;
51 EXPORT_SYMBOL(pgdir_shift);
52 unsigned int ptrs_per_p4d __ro_after_init = 1;
53 EXPORT_SYMBOL(ptrs_per_p4d);
54 #endif
55 
56 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
57 unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
58 EXPORT_SYMBOL(page_offset_base);
59 unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
60 EXPORT_SYMBOL(vmalloc_base);
61 unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
62 EXPORT_SYMBOL(vmemmap_base);
63 #endif
64 
65 #define __head	__section(.head.text)
66 
67 static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
68 {
69 	return ptr - (void *)_text + (void *)physaddr;
70 }
71 
72 static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
73 {
74 	return fixup_pointer(ptr, physaddr);
75 }
76 
77 #ifdef CONFIG_X86_5LEVEL
78 static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
79 {
80 	return fixup_pointer(ptr, physaddr);
81 }
82 
83 static bool __head check_la57_support(unsigned long physaddr)
84 {
85 	if (native_cpuid_eax(0) < 7)
86 		return false;
87 
88 	if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
89 		return false;
90 
91 	*fixup_int(&pgtable_l5_enabled, physaddr) = 1;
92 	*fixup_int(&pgdir_shift, physaddr) = 48;
93 	*fixup_int(&ptrs_per_p4d, physaddr) = 512;
94 	*fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
95 	*fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
96 	*fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
97 
98 	return true;
99 }
100 #else
101 static bool __head check_la57_support(unsigned long physaddr)
102 {
103 	return false;
104 }
105 #endif
106 
107 unsigned long __head __startup_64(unsigned long physaddr,
108 				  struct boot_params *bp)
109 {
110 	unsigned long load_delta, *p;
111 	unsigned long pgtable_flags;
112 	pgdval_t *pgd;
113 	p4dval_t *p4d;
114 	pudval_t *pud;
115 	pmdval_t *pmd, pmd_entry;
116 	bool la57;
117 	int i;
118 	unsigned int *next_pgt_ptr;
119 
120 	la57 = check_la57_support(physaddr);
121 
122 	/* Is the address too large? */
123 	if (physaddr >> MAX_PHYSMEM_BITS)
124 		for (;;);
125 
126 	/*
127 	 * Compute the delta between the address I am compiled to run at
128 	 * and the address I am actually running at.
129 	 */
130 	load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
131 
132 	/* Is the address not 2M aligned? */
133 	if (load_delta & ~PMD_PAGE_MASK)
134 		for (;;);
135 
136 	/* Activate Secure Memory Encryption (SME) if supported and enabled */
137 	sme_enable(bp);
138 
139 	/* Include the SME encryption mask in the fixup value */
140 	load_delta += sme_get_me_mask();
141 
142 	/* Fixup the physical addresses in the page table */
143 
144 	pgd = fixup_pointer(&early_top_pgt, physaddr);
145 	p = pgd + pgd_index(__START_KERNEL_map);
146 	if (la57)
147 		*p = (unsigned long)level4_kernel_pgt;
148 	else
149 		*p = (unsigned long)level3_kernel_pgt;
150 	*p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
151 
152 	if (la57) {
153 		p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
154 		p4d[511] += load_delta;
155 	}
156 
157 	pud = fixup_pointer(&level3_kernel_pgt, physaddr);
158 	pud[510] += load_delta;
159 	pud[511] += load_delta;
160 
161 	pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
162 	pmd[506] += load_delta;
163 
164 	/*
165 	 * Set up the identity mapping for the switchover.  These
166 	 * entries should *NOT* have the global bit set!  This also
167 	 * creates a bunch of nonsense entries but that is fine --
168 	 * it avoids problems around wraparound.
169 	 */
170 
171 	next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
172 	pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
173 	pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
174 
175 	pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
176 
177 	if (la57) {
178 		p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
179 
180 		i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
181 		pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
182 		pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
183 
184 		i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
185 		p4d[i + 0] = (pgdval_t)pud + pgtable_flags;
186 		p4d[i + 1] = (pgdval_t)pud + pgtable_flags;
187 	} else {
188 		i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
189 		pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
190 		pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
191 	}
192 
193 	i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
194 	pud[i + 0] = (pudval_t)pmd + pgtable_flags;
195 	pud[i + 1] = (pudval_t)pmd + pgtable_flags;
196 
197 	pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
198 	/* Filter out unsupported __PAGE_KERNEL_* bits: */
199 	pmd_entry &= __supported_pte_mask;
200 	pmd_entry += sme_get_me_mask();
201 	pmd_entry +=  physaddr;
202 
203 	for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
204 		int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
205 		pmd[idx] = pmd_entry + i * PMD_SIZE;
206 	}
207 
208 	/*
209 	 * Fixup the kernel text+data virtual addresses. Note that
210 	 * we might write invalid pmds, when the kernel is relocated
211 	 * cleanup_highmap() fixes this up along with the mappings
212 	 * beyond _end.
213 	 */
214 
215 	pmd = fixup_pointer(level2_kernel_pgt, physaddr);
216 	for (i = 0; i < PTRS_PER_PMD; i++) {
217 		if (pmd[i] & _PAGE_PRESENT)
218 			pmd[i] += load_delta;
219 	}
220 
221 	/*
222 	 * Fixup phys_base - remove the memory encryption mask to obtain
223 	 * the true physical address.
224 	 */
225 	*fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
226 
227 	/* Encrypt the kernel and related (if SME is active) */
228 	sme_encrypt_kernel(bp);
229 
230 	/*
231 	 * Return the SME encryption mask (if SME is active) to be used as a
232 	 * modifier for the initial pgdir entry programmed into CR3.
233 	 */
234 	return sme_get_me_mask();
235 }
236 
237 unsigned long __startup_secondary_64(void)
238 {
239 	/*
240 	 * Return the SME encryption mask (if SME is active) to be used as a
241 	 * modifier for the initial pgdir entry programmed into CR3.
242 	 */
243 	return sme_get_me_mask();
244 }
245 
246 /* Wipe all early page tables except for the kernel symbol map */
247 static void __init reset_early_page_tables(void)
248 {
249 	memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
250 	next_early_pgt = 0;
251 	write_cr3(__sme_pa_nodebug(early_top_pgt));
252 }
253 
254 /* Create a new PMD entry */
255 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
256 {
257 	unsigned long physaddr = address - __PAGE_OFFSET;
258 	pgdval_t pgd, *pgd_p;
259 	p4dval_t p4d, *p4d_p;
260 	pudval_t pud, *pud_p;
261 	pmdval_t *pmd_p;
262 
263 	/* Invalid address or early pgt is done ?  */
264 	if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
265 		return -1;
266 
267 again:
268 	pgd_p = &early_top_pgt[pgd_index(address)].pgd;
269 	pgd = *pgd_p;
270 
271 	/*
272 	 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
273 	 * critical -- __PAGE_OFFSET would point us back into the dynamic
274 	 * range and we might end up looping forever...
275 	 */
276 	if (!pgtable_l5_enabled)
277 		p4d_p = pgd_p;
278 	else if (pgd)
279 		p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
280 	else {
281 		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
282 			reset_early_page_tables();
283 			goto again;
284 		}
285 
286 		p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
287 		memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
288 		*pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
289 	}
290 	p4d_p += p4d_index(address);
291 	p4d = *p4d_p;
292 
293 	if (p4d)
294 		pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
295 	else {
296 		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
297 			reset_early_page_tables();
298 			goto again;
299 		}
300 
301 		pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
302 		memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
303 		*p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
304 	}
305 	pud_p += pud_index(address);
306 	pud = *pud_p;
307 
308 	if (pud)
309 		pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
310 	else {
311 		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
312 			reset_early_page_tables();
313 			goto again;
314 		}
315 
316 		pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
317 		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
318 		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
319 	}
320 	pmd_p[pmd_index(address)] = pmd;
321 
322 	return 0;
323 }
324 
325 int __init early_make_pgtable(unsigned long address)
326 {
327 	unsigned long physaddr = address - __PAGE_OFFSET;
328 	pmdval_t pmd;
329 
330 	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
331 
332 	return __early_make_pgtable(address, pmd);
333 }
334 
335 /* Don't add a printk in there. printk relies on the PDA which is not initialized
336    yet. */
337 static void __init clear_bss(void)
338 {
339 	memset(__bss_start, 0,
340 	       (unsigned long) __bss_stop - (unsigned long) __bss_start);
341 }
342 
343 static unsigned long get_cmd_line_ptr(void)
344 {
345 	unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
346 
347 	cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
348 
349 	return cmd_line_ptr;
350 }
351 
352 static void __init copy_bootdata(char *real_mode_data)
353 {
354 	char * command_line;
355 	unsigned long cmd_line_ptr;
356 
357 	/*
358 	 * If SME is active, this will create decrypted mappings of the
359 	 * boot data in advance of the copy operations.
360 	 */
361 	sme_map_bootdata(real_mode_data);
362 
363 	memcpy(&boot_params, real_mode_data, sizeof boot_params);
364 	sanitize_boot_params(&boot_params);
365 	cmd_line_ptr = get_cmd_line_ptr();
366 	if (cmd_line_ptr) {
367 		command_line = __va(cmd_line_ptr);
368 		memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
369 	}
370 
371 	/*
372 	 * The old boot data is no longer needed and won't be reserved,
373 	 * freeing up that memory for use by the system. If SME is active,
374 	 * we need to remove the mappings that were created so that the
375 	 * memory doesn't remain mapped as decrypted.
376 	 */
377 	sme_unmap_bootdata(real_mode_data);
378 }
379 
380 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
381 {
382 	/*
383 	 * Build-time sanity checks on the kernel image and module
384 	 * area mappings. (these are purely build-time and produce no code)
385 	 */
386 	BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
387 	BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
388 	BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
389 	BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
390 	BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
391 	BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
392 	MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
393 				(__START_KERNEL & PGDIR_MASK)));
394 	BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
395 
396 	cr4_init_shadow();
397 
398 	/* Kill off the identity-map trampoline */
399 	reset_early_page_tables();
400 
401 	clear_bss();
402 
403 	clear_page(init_top_pgt);
404 
405 	/*
406 	 * SME support may update early_pmd_flags to include the memory
407 	 * encryption mask, so it needs to be called before anything
408 	 * that may generate a page fault.
409 	 */
410 	sme_early_init();
411 
412 	kasan_early_init();
413 
414 	idt_setup_early_handler();
415 
416 	copy_bootdata(__va(real_mode_data));
417 
418 	/*
419 	 * Load microcode early on BSP.
420 	 */
421 	load_ucode_bsp();
422 
423 	/* set init_top_pgt kernel high mapping*/
424 	init_top_pgt[511] = early_top_pgt[511];
425 
426 	x86_64_start_reservations(real_mode_data);
427 }
428 
429 void __init x86_64_start_reservations(char *real_mode_data)
430 {
431 	/* version is always not zero if it is copied */
432 	if (!boot_params.hdr.version)
433 		copy_bootdata(__va(real_mode_data));
434 
435 	x86_early_init_platform_quirks();
436 
437 	switch (boot_params.hdr.hardware_subarch) {
438 	case X86_SUBARCH_INTEL_MID:
439 		x86_intel_mid_early_setup();
440 		break;
441 	default:
442 		break;
443 	}
444 
445 	start_kernel();
446 }
447