15f2bb016SJoerg Roedel // SPDX-License-Identifier: GPL-2.0
25f2bb016SJoerg Roedel /*
35f2bb016SJoerg Roedel  * This code is used on x86_64 to create page table identity mappings on
45f2bb016SJoerg Roedel  * demand by building up a new set of page tables (or appending to the
55f2bb016SJoerg Roedel  * existing ones), and then switching over to them when ready.
65f2bb016SJoerg Roedel  *
75f2bb016SJoerg Roedel  * Copyright (C) 2015-2016  Yinghai Lu
85f2bb016SJoerg Roedel  * Copyright (C)      2016  Kees Cook
95f2bb016SJoerg Roedel  */
105f2bb016SJoerg Roedel 
115f2bb016SJoerg Roedel /* No PAGE_TABLE_ISOLATION support needed either: */
125f2bb016SJoerg Roedel #undef CONFIG_PAGE_TABLE_ISOLATION
135f2bb016SJoerg Roedel 
148b0d3b3bSJoerg Roedel #include "error.h"
155f2bb016SJoerg Roedel #include "misc.h"
165f2bb016SJoerg Roedel 
175f2bb016SJoerg Roedel /* These actually do the work of building the kernel identity maps. */
185f2bb016SJoerg Roedel #include <linux/pgtable.h>
19c81d6002SJoerg Roedel #include <asm/cmpxchg.h>
208b0d3b3bSJoerg Roedel #include <asm/trap_pf.h>
218b0d3b3bSJoerg Roedel #include <asm/trapnr.h>
225f2bb016SJoerg Roedel #include <asm/init.h>
235f2bb016SJoerg Roedel /* Use the static base for this part of the boot process */
245f2bb016SJoerg Roedel #undef __PAGE_OFFSET
255f2bb016SJoerg Roedel #define __PAGE_OFFSET __PAGE_OFFSET_BASE
265f2bb016SJoerg Roedel #include "../../mm/ident_map.c"
275f2bb016SJoerg Roedel 
28b17a45b6SArvind Sankar #define _SETUP
29b17a45b6SArvind Sankar #include <asm/setup.h>	/* For COMMAND_LINE_SIZE */
30b17a45b6SArvind Sankar #undef _SETUP
31b17a45b6SArvind Sankar 
32b17a45b6SArvind Sankar extern unsigned long get_cmd_line_ptr(void);
33b17a45b6SArvind Sankar 
345f2bb016SJoerg Roedel /* Used by PAGE_KERN* macros: */
355f2bb016SJoerg Roedel pteval_t __default_kernel_pte_mask __read_mostly = ~0;
365f2bb016SJoerg Roedel 
375f2bb016SJoerg Roedel /* Used to track our page table allocation area. */
385f2bb016SJoerg Roedel struct alloc_pgt_data {
395f2bb016SJoerg Roedel 	unsigned char *pgt_buf;
405f2bb016SJoerg Roedel 	unsigned long pgt_buf_size;
415f2bb016SJoerg Roedel 	unsigned long pgt_buf_offset;
425f2bb016SJoerg Roedel };
435f2bb016SJoerg Roedel 
445f2bb016SJoerg Roedel /*
455f2bb016SJoerg Roedel  * Allocates space for a page table entry, using struct alloc_pgt_data
465f2bb016SJoerg Roedel  * above. Besides the local callers, this is used as the allocation
475f2bb016SJoerg Roedel  * callback in mapping_info below.
485f2bb016SJoerg Roedel  */
alloc_pgt_page(void * context)495f2bb016SJoerg Roedel static void *alloc_pgt_page(void *context)
505f2bb016SJoerg Roedel {
515f2bb016SJoerg Roedel 	struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
525f2bb016SJoerg Roedel 	unsigned char *entry;
535f2bb016SJoerg Roedel 
545f2bb016SJoerg Roedel 	/* Validate there is space available for a new page. */
555f2bb016SJoerg Roedel 	if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
565f2bb016SJoerg Roedel 		debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
575f2bb016SJoerg Roedel 		debug_putaddr(pages->pgt_buf_offset);
585f2bb016SJoerg Roedel 		debug_putaddr(pages->pgt_buf_size);
595f2bb016SJoerg Roedel 		return NULL;
605f2bb016SJoerg Roedel 	}
615f2bb016SJoerg Roedel 
62f530ee95SKirill A. Shutemov 	/* Consumed more tables than expected? */
63f530ee95SKirill A. Shutemov 	if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
64f530ee95SKirill A. Shutemov 		debug_putstr("pgt_buf running low in " __FILE__ "\n");
65f530ee95SKirill A. Shutemov 		debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
66f530ee95SKirill A. Shutemov 		debug_putaddr(pages->pgt_buf_offset);
67f530ee95SKirill A. Shutemov 		debug_putaddr(pages->pgt_buf_size);
68f530ee95SKirill A. Shutemov 	}
69f530ee95SKirill A. Shutemov 
705f2bb016SJoerg Roedel 	entry = pages->pgt_buf + pages->pgt_buf_offset;
715f2bb016SJoerg Roedel 	pages->pgt_buf_offset += PAGE_SIZE;
725f2bb016SJoerg Roedel 
735f2bb016SJoerg Roedel 	return entry;
745f2bb016SJoerg Roedel }
755f2bb016SJoerg Roedel 
765f2bb016SJoerg Roedel /* Used to track our allocated page tables. */
775f2bb016SJoerg Roedel static struct alloc_pgt_data pgt_data;
785f2bb016SJoerg Roedel 
795f2bb016SJoerg Roedel /* The top level page table entry pointer. */
805f2bb016SJoerg Roedel static unsigned long top_level_pgt;
815f2bb016SJoerg Roedel 
825f2bb016SJoerg Roedel phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
835f2bb016SJoerg Roedel 
845f2bb016SJoerg Roedel /*
855f2bb016SJoerg Roedel  * Mapping information structure passed to kernel_ident_mapping_init().
865f2bb016SJoerg Roedel  * Due to relocation, pointers must be assigned at run time not build time.
875f2bb016SJoerg Roedel  */
885f2bb016SJoerg Roedel static struct x86_mapping_info mapping_info;
895f2bb016SJoerg Roedel 
90ca0e22d4SJoerg Roedel /*
918570978eSJoerg Roedel  * Adds the specified range to the identity mappings.
92ca0e22d4SJoerg Roedel  */
kernel_add_identity_map(unsigned long start,unsigned long end)93a9ee679bSMichael Roth void kernel_add_identity_map(unsigned long start, unsigned long end)
94ca0e22d4SJoerg Roedel {
954b3fdca6SJoerg Roedel 	int ret;
964b3fdca6SJoerg Roedel 
97ca0e22d4SJoerg Roedel 	/* Align boundary to 2M. */
98ca0e22d4SJoerg Roedel 	start = round_down(start, PMD_SIZE);
99ca0e22d4SJoerg Roedel 	end = round_up(end, PMD_SIZE);
100ca0e22d4SJoerg Roedel 	if (start >= end)
101ca0e22d4SJoerg Roedel 		return;
102ca0e22d4SJoerg Roedel 
103ca0e22d4SJoerg Roedel 	/* Build the mapping. */
1044b3fdca6SJoerg Roedel 	ret = kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end);
1054b3fdca6SJoerg Roedel 	if (ret)
1064b3fdca6SJoerg Roedel 		error("Error: kernel_ident_mapping_init() failed\n");
107ca0e22d4SJoerg Roedel }
108ca0e22d4SJoerg Roedel 
1095f2bb016SJoerg Roedel /* Locates and clears a region for a new top level page table. */
initialize_identity_maps(void * rmode)110b17a45b6SArvind Sankar void initialize_identity_maps(void *rmode)
1115f2bb016SJoerg Roedel {
112b17a45b6SArvind Sankar 	unsigned long cmdline;
113b57feed2SMichael Roth 	struct setup_data *sd;
114b17a45b6SArvind Sankar 
1155f2bb016SJoerg Roedel 	/* Exclude the encryption mask from __PHYSICAL_MASK */
1165f2bb016SJoerg Roedel 	physical_mask &= ~sme_me_mask;
1175f2bb016SJoerg Roedel 
1185f2bb016SJoerg Roedel 	/* Init mapping_info with run-time function/buffer pointers. */
1195f2bb016SJoerg Roedel 	mapping_info.alloc_pgt_page = alloc_pgt_page;
1205f2bb016SJoerg Roedel 	mapping_info.context = &pgt_data;
1215f2bb016SJoerg Roedel 	mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
1225f2bb016SJoerg Roedel 	mapping_info.kernpg_flag = _KERNPG_TABLE;
1235f2bb016SJoerg Roedel 
1245f2bb016SJoerg Roedel 	/*
1255f2bb016SJoerg Roedel 	 * It should be impossible for this not to already be true,
1265f2bb016SJoerg Roedel 	 * but since calling this a second time would rewind the other
1275f2bb016SJoerg Roedel 	 * counters, let's just make sure this is reset too.
1285f2bb016SJoerg Roedel 	 */
1295f2bb016SJoerg Roedel 	pgt_data.pgt_buf_offset = 0;
1305f2bb016SJoerg Roedel 
1315f2bb016SJoerg Roedel 	/*
1325f2bb016SJoerg Roedel 	 * If we came here via startup_32(), cr3 will be _pgtable already
1335f2bb016SJoerg Roedel 	 * and we must append to the existing area instead of entirely
1345f2bb016SJoerg Roedel 	 * overwriting it.
1355f2bb016SJoerg Roedel 	 *
1365f2bb016SJoerg Roedel 	 * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
1375f2bb016SJoerg Roedel 	 * the top-level page table is allocated separately.
1385f2bb016SJoerg Roedel 	 *
1395f2bb016SJoerg Roedel 	 * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
1405f2bb016SJoerg Roedel 	 * cases. On 4-level paging it's equal to 'top_level_pgt'.
1415f2bb016SJoerg Roedel 	 */
1425f2bb016SJoerg Roedel 	top_level_pgt = read_cr3_pa();
1435f2bb016SJoerg Roedel 	if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
1445f2bb016SJoerg Roedel 		pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
1455f2bb016SJoerg Roedel 		pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
1465f2bb016SJoerg Roedel 		memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
1475f2bb016SJoerg Roedel 	} else {
1485f2bb016SJoerg Roedel 		pgt_data.pgt_buf = _pgtable;
1495f2bb016SJoerg Roedel 		pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
1505f2bb016SJoerg Roedel 		memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
1515f2bb016SJoerg Roedel 		top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
1525f2bb016SJoerg Roedel 	}
1535f2bb016SJoerg Roedel 
1545f2bb016SJoerg Roedel 	/*
155b17a45b6SArvind Sankar 	 * New page-table is set up - map the kernel image, boot_params and the
156b17a45b6SArvind Sankar 	 * command line. The uncompressed kernel requires boot_params and the
157b17a45b6SArvind Sankar 	 * command line to be mapped in the identity mapping. Map them
158b17a45b6SArvind Sankar 	 * explicitly here in case the compressed kernel does not touch them,
159b17a45b6SArvind Sankar 	 * or does not touch all the pages covering them.
1605f2bb016SJoerg Roedel 	 */
161a9ee679bSMichael Roth 	kernel_add_identity_map((unsigned long)_head, (unsigned long)_end);
162b17a45b6SArvind Sankar 	boot_params = rmode;
163a9ee679bSMichael Roth 	kernel_add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
164b17a45b6SArvind Sankar 	cmdline = get_cmd_line_ptr();
165a9ee679bSMichael Roth 	kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
166b17a45b6SArvind Sankar 
167b57feed2SMichael Roth 	/*
168b57feed2SMichael Roth 	 * Also map the setup_data entries passed via boot_params in case they
169b57feed2SMichael Roth 	 * need to be accessed by uncompressed kernel via the identity mapping.
170b57feed2SMichael Roth 	 */
171b57feed2SMichael Roth 	sd = (struct setup_data *)boot_params->hdr.setup_data;
172b57feed2SMichael Roth 	while (sd) {
173b57feed2SMichael Roth 		unsigned long sd_addr = (unsigned long)sd;
174b57feed2SMichael Roth 
175b57feed2SMichael Roth 		kernel_add_identity_map(sd_addr, sd_addr + sizeof(*sd) + sd->len);
176b57feed2SMichael Roth 		sd = (struct setup_data *)sd->next;
177b57feed2SMichael Roth 	}
178b57feed2SMichael Roth 
17976f61e1eSMichael Roth 	sev_prep_identity_maps(top_level_pgt);
18076f61e1eSMichael Roth 
181b17a45b6SArvind Sankar 	/* Load the new page-table. */
182ca0e22d4SJoerg Roedel 	write_cr3(top_level_pgt);
1838c29f016SNikunj A Dadhania 
1848c29f016SNikunj A Dadhania 	/*
1858c29f016SNikunj A Dadhania 	 * Now that the required page table mappings are established and a
1868c29f016SNikunj A Dadhania 	 * GHCB can be used, check for SNP guest/HV feature compatibility.
1878c29f016SNikunj A Dadhania 	 */
1888c29f016SNikunj A Dadhania 	snp_check_features();
1895f2bb016SJoerg Roedel }
1905f2bb016SJoerg Roedel 
split_large_pmd(struct x86_mapping_info * info,pmd_t * pmdp,unsigned long __address)191c81d6002SJoerg Roedel static pte_t *split_large_pmd(struct x86_mapping_info *info,
192c81d6002SJoerg Roedel 			      pmd_t *pmdp, unsigned long __address)
193c81d6002SJoerg Roedel {
194c81d6002SJoerg Roedel 	unsigned long page_flags;
195c81d6002SJoerg Roedel 	unsigned long address;
196c81d6002SJoerg Roedel 	pte_t *pte;
197c81d6002SJoerg Roedel 	pmd_t pmd;
198c81d6002SJoerg Roedel 	int i;
199c81d6002SJoerg Roedel 
200c81d6002SJoerg Roedel 	pte = (pte_t *)info->alloc_pgt_page(info->context);
201c81d6002SJoerg Roedel 	if (!pte)
202c81d6002SJoerg Roedel 		return NULL;
203c81d6002SJoerg Roedel 
204c81d6002SJoerg Roedel 	address     = __address & PMD_MASK;
205c81d6002SJoerg Roedel 	/* No large page - clear PSE flag */
206c81d6002SJoerg Roedel 	page_flags  = info->page_flag & ~_PAGE_PSE;
207c81d6002SJoerg Roedel 
208c81d6002SJoerg Roedel 	/* Populate the PTEs */
209c81d6002SJoerg Roedel 	for (i = 0; i < PTRS_PER_PMD; i++) {
210c81d6002SJoerg Roedel 		set_pte(&pte[i], __pte(address | page_flags));
211c81d6002SJoerg Roedel 		address += PAGE_SIZE;
212c81d6002SJoerg Roedel 	}
213c81d6002SJoerg Roedel 
214c81d6002SJoerg Roedel 	/*
215c81d6002SJoerg Roedel 	 * Ideally we need to clear the large PMD first and do a TLB
216c81d6002SJoerg Roedel 	 * flush before we write the new PMD. But the 2M range of the
217c81d6002SJoerg Roedel 	 * PMD might contain the code we execute and/or the stack
218c81d6002SJoerg Roedel 	 * we are on, so we can't do that. But that should be safe here
219c81d6002SJoerg Roedel 	 * because we are going from large to small mappings and we are
220c81d6002SJoerg Roedel 	 * also the only user of the page-table, so there is no chance
221c81d6002SJoerg Roedel 	 * of a TLB multihit.
222c81d6002SJoerg Roedel 	 */
223c81d6002SJoerg Roedel 	pmd = __pmd((unsigned long)pte | info->kernpg_flag);
224c81d6002SJoerg Roedel 	set_pmd(pmdp, pmd);
225c81d6002SJoerg Roedel 	/* Flush TLB to establish the new PMD */
226c81d6002SJoerg Roedel 	write_cr3(top_level_pgt);
227c81d6002SJoerg Roedel 
228c81d6002SJoerg Roedel 	return pte + pte_index(__address);
229c81d6002SJoerg Roedel }
230c81d6002SJoerg Roedel 
clflush_page(unsigned long address)231c81d6002SJoerg Roedel static void clflush_page(unsigned long address)
232c81d6002SJoerg Roedel {
233c81d6002SJoerg Roedel 	unsigned int flush_size;
234c81d6002SJoerg Roedel 	char *cl, *start, *end;
235c81d6002SJoerg Roedel 
236c81d6002SJoerg Roedel 	/*
237c81d6002SJoerg Roedel 	 * Hardcode cl-size to 64 - CPUID can't be used here because that might
238c81d6002SJoerg Roedel 	 * cause another #VC exception and the GHCB is not ready to use yet.
239c81d6002SJoerg Roedel 	 */
240c81d6002SJoerg Roedel 	flush_size = 64;
241c81d6002SJoerg Roedel 	start      = (char *)(address & PAGE_MASK);
242c81d6002SJoerg Roedel 	end        = start + PAGE_SIZE;
243c81d6002SJoerg Roedel 
244c81d6002SJoerg Roedel 	/*
245c81d6002SJoerg Roedel 	 * First make sure there are no pending writes on the cache-lines to
246c81d6002SJoerg Roedel 	 * flush.
247c81d6002SJoerg Roedel 	 */
248c81d6002SJoerg Roedel 	asm volatile("mfence" : : : "memory");
249c81d6002SJoerg Roedel 
250c81d6002SJoerg Roedel 	for (cl = start; cl != end; cl += flush_size)
251c81d6002SJoerg Roedel 		clflush(cl);
252c81d6002SJoerg Roedel }
253c81d6002SJoerg Roedel 
set_clr_page_flags(struct x86_mapping_info * info,unsigned long address,pteval_t set,pteval_t clr)254c81d6002SJoerg Roedel static int set_clr_page_flags(struct x86_mapping_info *info,
255c81d6002SJoerg Roedel 			      unsigned long address,
256c81d6002SJoerg Roedel 			      pteval_t set, pteval_t clr)
257c81d6002SJoerg Roedel {
258c81d6002SJoerg Roedel 	pgd_t *pgdp = (pgd_t *)top_level_pgt;
259c81d6002SJoerg Roedel 	p4d_t *p4dp;
260c81d6002SJoerg Roedel 	pud_t *pudp;
261c81d6002SJoerg Roedel 	pmd_t *pmdp;
262c81d6002SJoerg Roedel 	pte_t *ptep, pte;
263c81d6002SJoerg Roedel 
264c81d6002SJoerg Roedel 	/*
265c81d6002SJoerg Roedel 	 * First make sure there is a PMD mapping for 'address'.
266c81d6002SJoerg Roedel 	 * It should already exist, but keep things generic.
267c81d6002SJoerg Roedel 	 *
268c81d6002SJoerg Roedel 	 * To map the page just read from it and fault it in if there is no
269a9ee679bSMichael Roth 	 * mapping yet. kernel_add_identity_map() can't be called here because
270a9ee679bSMichael Roth 	 * that would unconditionally map the address on PMD level, destroying
271a9ee679bSMichael Roth 	 * any PTE-level mappings that might already exist. Use assembly here
272a9ee679bSMichael Roth 	 * so the access won't be optimized away.
273c81d6002SJoerg Roedel 	 */
274c81d6002SJoerg Roedel 	asm volatile("mov %[address], %%r9"
275c81d6002SJoerg Roedel 		     :: [address] "g" (*(unsigned long *)address)
276c81d6002SJoerg Roedel 		     : "r9", "memory");
277c81d6002SJoerg Roedel 
278c81d6002SJoerg Roedel 	/*
279c81d6002SJoerg Roedel 	 * The page is mapped at least with PMD size - so skip checks and walk
280c81d6002SJoerg Roedel 	 * directly to the PMD.
281c81d6002SJoerg Roedel 	 */
282c81d6002SJoerg Roedel 	p4dp = p4d_offset(pgdp, address);
283c81d6002SJoerg Roedel 	pudp = pud_offset(p4dp, address);
284c81d6002SJoerg Roedel 	pmdp = pmd_offset(pudp, address);
285c81d6002SJoerg Roedel 
286c81d6002SJoerg Roedel 	if (pmd_large(*pmdp))
287c81d6002SJoerg Roedel 		ptep = split_large_pmd(info, pmdp, address);
288c81d6002SJoerg Roedel 	else
289c81d6002SJoerg Roedel 		ptep = pte_offset_kernel(pmdp, address);
290c81d6002SJoerg Roedel 
291c81d6002SJoerg Roedel 	if (!ptep)
292c81d6002SJoerg Roedel 		return -ENOMEM;
293c81d6002SJoerg Roedel 
294c81d6002SJoerg Roedel 	/*
295c81d6002SJoerg Roedel 	 * Changing encryption attributes of a page requires to flush it from
296c81d6002SJoerg Roedel 	 * the caches.
297c81d6002SJoerg Roedel 	 */
2984f9c403eSBrijesh Singh 	if ((set | clr) & _PAGE_ENC) {
299c81d6002SJoerg Roedel 		clflush_page(address);
300c81d6002SJoerg Roedel 
3014f9c403eSBrijesh Singh 		/*
3024f9c403eSBrijesh Singh 		 * If the encryption attribute is being cleared, change the page state
3034f9c403eSBrijesh Singh 		 * to shared in the RMP table.
3044f9c403eSBrijesh Singh 		 */
3054f9c403eSBrijesh Singh 		if (clr)
3064f9c403eSBrijesh Singh 			snp_set_page_shared(__pa(address & PAGE_MASK));
3074f9c403eSBrijesh Singh 	}
3084f9c403eSBrijesh Singh 
309c81d6002SJoerg Roedel 	/* Update PTE */
310c81d6002SJoerg Roedel 	pte = *ptep;
311c81d6002SJoerg Roedel 	pte = pte_set_flags(pte, set);
312c81d6002SJoerg Roedel 	pte = pte_clear_flags(pte, clr);
313c81d6002SJoerg Roedel 	set_pte(ptep, pte);
314c81d6002SJoerg Roedel 
3154f9c403eSBrijesh Singh 	/*
3164f9c403eSBrijesh Singh 	 * If the encryption attribute is being set, then change the page state to
3174f9c403eSBrijesh Singh 	 * private in the RMP entry. The page state change must be done after the PTE
3184f9c403eSBrijesh Singh 	 * is updated.
3194f9c403eSBrijesh Singh 	 */
3204f9c403eSBrijesh Singh 	if (set & _PAGE_ENC)
3214f9c403eSBrijesh Singh 		snp_set_page_private(__pa(address & PAGE_MASK));
3224f9c403eSBrijesh Singh 
323c81d6002SJoerg Roedel 	/* Flush TLB after changing encryption attribute */
324c81d6002SJoerg Roedel 	write_cr3(top_level_pgt);
325c81d6002SJoerg Roedel 
326c81d6002SJoerg Roedel 	return 0;
327c81d6002SJoerg Roedel }
328c81d6002SJoerg Roedel 
set_page_decrypted(unsigned long address)329c81d6002SJoerg Roedel int set_page_decrypted(unsigned long address)
330c81d6002SJoerg Roedel {
331c81d6002SJoerg Roedel 	return set_clr_page_flags(&mapping_info, address, 0, _PAGE_ENC);
332c81d6002SJoerg Roedel }
333c81d6002SJoerg Roedel 
set_page_encrypted(unsigned long address)334c81d6002SJoerg Roedel int set_page_encrypted(unsigned long address)
335c81d6002SJoerg Roedel {
336c81d6002SJoerg Roedel 	return set_clr_page_flags(&mapping_info, address, _PAGE_ENC, 0);
337c81d6002SJoerg Roedel }
338c81d6002SJoerg Roedel 
set_page_non_present(unsigned long address)33969add17aSJoerg Roedel int set_page_non_present(unsigned long address)
34069add17aSJoerg Roedel {
34169add17aSJoerg Roedel 	return set_clr_page_flags(&mapping_info, address, 0, _PAGE_PRESENT);
34269add17aSJoerg Roedel }
34369add17aSJoerg Roedel 
do_pf_error(const char * msg,unsigned long error_code,unsigned long address,unsigned long ip)3448b0d3b3bSJoerg Roedel static void do_pf_error(const char *msg, unsigned long error_code,
3458b0d3b3bSJoerg Roedel 			unsigned long address, unsigned long ip)
3468b0d3b3bSJoerg Roedel {
3478b0d3b3bSJoerg Roedel 	error_putstr(msg);
3488b0d3b3bSJoerg Roedel 
3498b0d3b3bSJoerg Roedel 	error_putstr("\nError Code: ");
3508b0d3b3bSJoerg Roedel 	error_puthex(error_code);
3518b0d3b3bSJoerg Roedel 	error_putstr("\nCR2: 0x");
3528b0d3b3bSJoerg Roedel 	error_puthex(address);
3538b0d3b3bSJoerg Roedel 	error_putstr("\nRIP relative to _head: 0x");
3548b0d3b3bSJoerg Roedel 	error_puthex(ip - (unsigned long)_head);
3558b0d3b3bSJoerg Roedel 	error_putstr("\n");
3568b0d3b3bSJoerg Roedel 
3578b0d3b3bSJoerg Roedel 	error("Stopping.\n");
3588b0d3b3bSJoerg Roedel }
3598b0d3b3bSJoerg Roedel 
do_boot_page_fault(struct pt_regs * regs,unsigned long error_code)3608b0d3b3bSJoerg Roedel void do_boot_page_fault(struct pt_regs *regs, unsigned long error_code)
3618b0d3b3bSJoerg Roedel {
36269add17aSJoerg Roedel 	unsigned long address = native_read_cr2();
36369add17aSJoerg Roedel 	unsigned long end;
36469add17aSJoerg Roedel 	bool ghcb_fault;
36569add17aSJoerg Roedel 
36669add17aSJoerg Roedel 	ghcb_fault = sev_es_check_ghcb_fault(address);
36769add17aSJoerg Roedel 
36869add17aSJoerg Roedel 	address   &= PMD_MASK;
36969add17aSJoerg Roedel 	end        = address + PMD_SIZE;
3708b0d3b3bSJoerg Roedel 
3718b0d3b3bSJoerg Roedel 	/*
3728b0d3b3bSJoerg Roedel 	 * Check for unexpected error codes. Unexpected are:
3738b0d3b3bSJoerg Roedel 	 *	- Faults on present pages
3748b0d3b3bSJoerg Roedel 	 *	- User faults
3758b0d3b3bSJoerg Roedel 	 *	- Reserved bits set
3768b0d3b3bSJoerg Roedel 	 */
3778b0d3b3bSJoerg Roedel 	if (error_code & (X86_PF_PROT | X86_PF_USER | X86_PF_RSVD))
3788b0d3b3bSJoerg Roedel 		do_pf_error("Unexpected page-fault:", error_code, address, regs->ip);
37969add17aSJoerg Roedel 	else if (ghcb_fault)
38069add17aSJoerg Roedel 		do_pf_error("Page-fault on GHCB page:", error_code, address, regs->ip);
3818b0d3b3bSJoerg Roedel 
3828b0d3b3bSJoerg Roedel 	/*
3838b0d3b3bSJoerg Roedel 	 * Error code is sane - now identity map the 2M region around
3848b0d3b3bSJoerg Roedel 	 * the faulting address.
3858b0d3b3bSJoerg Roedel 	 */
386a9ee679bSMichael Roth 	kernel_add_identity_map(address, end);
3878b0d3b3bSJoerg Roedel }
388*f77cb047SJun'ichi Nomura 
do_boot_nmi_trap(struct pt_regs * regs,unsigned long error_code)389*f77cb047SJun'ichi Nomura void do_boot_nmi_trap(struct pt_regs *regs, unsigned long error_code)
390*f77cb047SJun'ichi Nomura {
391*f77cb047SJun'ichi Nomura 	/* Empty handler to ignore NMI during early boot */
392*f77cb047SJun'ichi Nomura }
393