xref: /openbmc/linux/arch/x86/mm/fault.c (revision b181f7029bd71238ac2754ce7052dffd69432085)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c61e211dSHarvey Harrison /*
3c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
4c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
6c61e211dSHarvey Harrison  */
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
868db0cf1SIngo Molnar #include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
104cdf8dbeSLinus Torvalds #include <linux/extable.h>		/* search_exception_tables	*/
1157c8a661SMike Rapoport #include <linux/memblock.h>		/* max_low_pfn			*/
121dc0da6eSAlexander Potapenko #include <linux/kfence.h>		/* kfence_handle_page_fault	*/
139326638cSMasami Hiramatsu #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
14a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
15cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
16f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
17268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
1856dd9470SFrederic Weisbecker #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
1970ffdb93SDavid Hildenbrand #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
20c46f5223SAndy Lutomirski #include <linux/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
2150a7ca3cSSouptick Joarder #include <linux/mm_types.h>
220bff0aaeSSuren Baghdasaryan #include <linux/mm.h>			/* find_and_lock_vma() */
23c61e211dSHarvey Harrison 
24019132ffSDave Hansen #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
25a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
26f40c3300SAndy Lutomirski #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
27f40c3300SAndy Lutomirski #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
28ba3e127eSBrian Gerst #include <asm/vm86.h>			/* struct vm86			*/
29019132ffSDave Hansen #include <asm/mmu_context.h>		/* vma_pkey()			*/
30c46f5223SAndy Lutomirski #include <asm/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
31a1a371c4SAndy Lutomirski #include <asm/desc.h>			/* store_idt(), ...		*/
32d876b673SThomas Gleixner #include <asm/cpu_entry_area.h>		/* exception stack		*/
33186525bdSIngo Molnar #include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
34ef68017eSAndy Lutomirski #include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
35334872a0SSean Christopherson #include <asm/vdso.h>			/* fixup_vdso_exception()	*/
3644b979faSPeter Zijlstra #include <asm/irq_stack.h>
37c61e211dSHarvey Harrison 
38d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS
39d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h>
40d34603b0SSeiji Aguchi 
41c61e211dSHarvey Harrison /*
42b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
43b319eed0SIngo Molnar  * handled by mmiotrace:
44b814d41fSIngo Molnar  */
459326638cSMasami Hiramatsu static nokprobe_inline int
kmmio_fault(struct pt_regs * regs,unsigned long addr)4662c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
4786069782SPekka Paalanen {
480fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
490fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
500fd0e3daSPekka Paalanen 			return -1;
510fd0e3daSPekka Paalanen 	return 0;
5286069782SPekka Paalanen }
5386069782SPekka Paalanen 
54c61e211dSHarvey Harrison /*
552d4a7167SIngo Molnar  * Prefetch quirks:
562d4a7167SIngo Molnar  *
572d4a7167SIngo Molnar  * 32-bit mode:
582d4a7167SIngo Molnar  *
59c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
6035f1c89bSAndy Lutomirski  *   Check that here and ignore it.  This is AMD erratum #91.
61c61e211dSHarvey Harrison  *
622d4a7167SIngo Molnar  * 64-bit mode:
632d4a7167SIngo Molnar  *
64c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
65c61e211dSHarvey Harrison  *   Check that here and ignore it.
66c61e211dSHarvey Harrison  *
672d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
68c61e211dSHarvey Harrison  */
69107a0367SIngo Molnar static inline int
check_prefetch_opcode(struct pt_regs * regs,unsigned char * instr,unsigned char opcode,int * prefetch)70107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
71107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
72c61e211dSHarvey Harrison {
73107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
74107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
75c61e211dSHarvey Harrison 
76c61e211dSHarvey Harrison 	switch (instr_hi) {
77c61e211dSHarvey Harrison 	case 0x20:
78c61e211dSHarvey Harrison 	case 0x30:
79c61e211dSHarvey Harrison 		/*
80c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
81c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
82c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
83c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
84c61e211dSHarvey Harrison 		 */
85107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
86c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
87c61e211dSHarvey Harrison 	case 0x40:
88c61e211dSHarvey Harrison 		/*
8935f1c89bSAndy Lutomirski 		 * In 64-bit mode 0x40..0x4F are valid REX prefixes
90c61e211dSHarvey Harrison 		 */
91318f5a2aSAndy Lutomirski 		return (!user_mode(regs) || user_64bit_mode(regs));
92c61e211dSHarvey Harrison #endif
93c61e211dSHarvey Harrison 	case 0x60:
94c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
95107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
96c61e211dSHarvey Harrison 	case 0xF0:
97c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
98107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
99c61e211dSHarvey Harrison 	case 0x00:
100c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
10125f12ae4SChristoph Hellwig 		if (get_kernel_nofault(opcode, instr))
102107a0367SIngo Molnar 			return 0;
103107a0367SIngo Molnar 
104107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
105107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
106107a0367SIngo Molnar 		return 0;
107107a0367SIngo Molnar 	default:
108107a0367SIngo Molnar 		return 0;
109107a0367SIngo Molnar 	}
110107a0367SIngo Molnar }
111107a0367SIngo Molnar 
is_amd_k8_pre_npt(void)112d24df8ecSAndy Lutomirski static bool is_amd_k8_pre_npt(void)
113d24df8ecSAndy Lutomirski {
114d24df8ecSAndy Lutomirski 	struct cpuinfo_x86 *c = &boot_cpu_data;
115d24df8ecSAndy Lutomirski 
116d24df8ecSAndy Lutomirski 	return unlikely(IS_ENABLED(CONFIG_CPU_SUP_AMD) &&
117d24df8ecSAndy Lutomirski 			c->x86_vendor == X86_VENDOR_AMD &&
118d24df8ecSAndy Lutomirski 			c->x86 == 0xf && c->x86_model < 0x40);
119d24df8ecSAndy Lutomirski }
120d24df8ecSAndy Lutomirski 
121107a0367SIngo Molnar static int
is_prefetch(struct pt_regs * regs,unsigned long error_code,unsigned long addr)122107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
123107a0367SIngo Molnar {
124107a0367SIngo Molnar 	unsigned char *max_instr;
125107a0367SIngo Molnar 	unsigned char *instr;
126107a0367SIngo Molnar 	int prefetch = 0;
127107a0367SIngo Molnar 
128d24df8ecSAndy Lutomirski 	/* Erratum #91 affects AMD K8, pre-NPT CPUs */
129d24df8ecSAndy Lutomirski 	if (!is_amd_k8_pre_npt())
130d24df8ecSAndy Lutomirski 		return 0;
131d24df8ecSAndy Lutomirski 
132107a0367SIngo Molnar 	/*
133107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
134107a0367SIngo Molnar 	 * do not ignore the fault:
135107a0367SIngo Molnar 	 */
1361067f030SRicardo Neri 	if (error_code & X86_PF_INSTR)
137107a0367SIngo Molnar 		return 0;
138107a0367SIngo Molnar 
139107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
140107a0367SIngo Molnar 	max_instr = instr + 15;
141107a0367SIngo Molnar 
14235f1c89bSAndy Lutomirski 	/*
14335f1c89bSAndy Lutomirski 	 * This code has historically always bailed out if IP points to a
14435f1c89bSAndy Lutomirski 	 * not-present page (e.g. due to a race).  No one has ever
14535f1c89bSAndy Lutomirski 	 * complained about this.
14635f1c89bSAndy Lutomirski 	 */
14735f1c89bSAndy Lutomirski 	pagefault_disable();
148107a0367SIngo Molnar 
149107a0367SIngo Molnar 	while (instr < max_instr) {
150107a0367SIngo Molnar 		unsigned char opcode;
151c61e211dSHarvey Harrison 
15235f1c89bSAndy Lutomirski 		if (user_mode(regs)) {
153944fad45SLukas Bulwahn 			if (get_user(opcode, (unsigned char __user *) instr))
15435f1c89bSAndy Lutomirski 				break;
15535f1c89bSAndy Lutomirski 		} else {
15625f12ae4SChristoph Hellwig 			if (get_kernel_nofault(opcode, instr))
157c61e211dSHarvey Harrison 				break;
15835f1c89bSAndy Lutomirski 		}
159107a0367SIngo Molnar 
160107a0367SIngo Molnar 		instr++;
161107a0367SIngo Molnar 
162107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
163c61e211dSHarvey Harrison 			break;
164c61e211dSHarvey Harrison 	}
16535f1c89bSAndy Lutomirski 
16635f1c89bSAndy Lutomirski 	pagefault_enable();
167c61e211dSHarvey Harrison 	return prefetch;
168c61e211dSHarvey Harrison }
169c61e211dSHarvey Harrison 
170f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
171f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1722d4a7167SIngo Molnar 
173f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
vmalloc_sync_one(pgd_t * pgd,unsigned long address)174f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
175f2f13a85SIngo Molnar {
176f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
177f2f13a85SIngo Molnar 	pgd_t *pgd_k;
178e0c4f675SKirill A. Shutemov 	p4d_t *p4d, *p4d_k;
179f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
180f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
181f2f13a85SIngo Molnar 
182f2f13a85SIngo Molnar 	pgd += index;
183f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
184f2f13a85SIngo Molnar 
185f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
186f2f13a85SIngo Molnar 		return NULL;
187f2f13a85SIngo Molnar 
188f2f13a85SIngo Molnar 	/*
189f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
190f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
191e0c4f675SKirill A. Shutemov 	 * set_p4d/set_pud.
192f2f13a85SIngo Molnar 	 */
193e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
194e0c4f675SKirill A. Shutemov 	p4d_k = p4d_offset(pgd_k, address);
195e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d_k))
196e0c4f675SKirill A. Shutemov 		return NULL;
197e0c4f675SKirill A. Shutemov 
198e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
199e0c4f675SKirill A. Shutemov 	pud_k = pud_offset(p4d_k, address);
200f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
201f2f13a85SIngo Molnar 		return NULL;
202f2f13a85SIngo Molnar 
203f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
204f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
2058e998fc2SJoerg Roedel 
2068e998fc2SJoerg Roedel 	if (pmd_present(*pmd) != pmd_present(*pmd_k))
2078e998fc2SJoerg Roedel 		set_pmd(pmd, *pmd_k);
2088e998fc2SJoerg Roedel 
209f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
210f2f13a85SIngo Molnar 		return NULL;
211b8bcfe99SJeremy Fitzhardinge 	else
21251b75b5bSJoerg Roedel 		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
213f2f13a85SIngo Molnar 
214f2f13a85SIngo Molnar 	return pmd_k;
215f2f13a85SIngo Molnar }
216f2f13a85SIngo Molnar 
2174819e15fSJoerg Roedel /*
2184819e15fSJoerg Roedel  *   Handle a fault on the vmalloc or module mapping area
2194819e15fSJoerg Roedel  *
2204819e15fSJoerg Roedel  *   This is needed because there is a race condition between the time
2214819e15fSJoerg Roedel  *   when the vmalloc mapping code updates the PMD to the point in time
2224819e15fSJoerg Roedel  *   where it synchronizes this update with the other page-tables in the
2234819e15fSJoerg Roedel  *   system.
2244819e15fSJoerg Roedel  *
2254819e15fSJoerg Roedel  *   In this race window another thread/CPU can map an area on the same
2264819e15fSJoerg Roedel  *   PMD, finds it already present and does not synchronize it with the
2274819e15fSJoerg Roedel  *   rest of the system yet. As a result v[mz]alloc might return areas
2284819e15fSJoerg Roedel  *   which are not mapped in every page-table in the system, causing an
2294819e15fSJoerg Roedel  *   unhandled page-fault when they are accessed.
2304819e15fSJoerg Roedel  */
vmalloc_fault(unsigned long address)2314819e15fSJoerg Roedel static noinline int vmalloc_fault(unsigned long address)
2324819e15fSJoerg Roedel {
2334819e15fSJoerg Roedel 	unsigned long pgd_paddr;
2344819e15fSJoerg Roedel 	pmd_t *pmd_k;
2354819e15fSJoerg Roedel 	pte_t *pte_k;
2364819e15fSJoerg Roedel 
2374819e15fSJoerg Roedel 	/* Make sure we are in vmalloc area: */
2384819e15fSJoerg Roedel 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
2394819e15fSJoerg Roedel 		return -1;
2404819e15fSJoerg Roedel 
2414819e15fSJoerg Roedel 	/*
2424819e15fSJoerg Roedel 	 * Synchronize this task's top level page-table
2434819e15fSJoerg Roedel 	 * with the 'reference' page table.
2444819e15fSJoerg Roedel 	 *
2454819e15fSJoerg Roedel 	 * Do _not_ use "current" here. We might be inside
2464819e15fSJoerg Roedel 	 * an interrupt in the middle of a task switch..
2474819e15fSJoerg Roedel 	 */
2484819e15fSJoerg Roedel 	pgd_paddr = read_cr3_pa();
2494819e15fSJoerg Roedel 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
2504819e15fSJoerg Roedel 	if (!pmd_k)
2514819e15fSJoerg Roedel 		return -1;
2524819e15fSJoerg Roedel 
2534819e15fSJoerg Roedel 	if (pmd_large(*pmd_k))
2544819e15fSJoerg Roedel 		return 0;
2554819e15fSJoerg Roedel 
2564819e15fSJoerg Roedel 	pte_k = pte_offset_kernel(pmd_k, address);
2574819e15fSJoerg Roedel 	if (!pte_present(*pte_k))
2584819e15fSJoerg Roedel 		return -1;
2594819e15fSJoerg Roedel 
2604819e15fSJoerg Roedel 	return 0;
2614819e15fSJoerg Roedel }
2624819e15fSJoerg Roedel NOKPROBE_SYMBOL(vmalloc_fault);
2634819e15fSJoerg Roedel 
arch_sync_kernel_mappings(unsigned long start,unsigned long end)2641e15d374SAlexander Potapenko void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
265f2f13a85SIngo Molnar {
26686cf69f1SJoerg Roedel 	unsigned long addr;
267f2f13a85SIngo Molnar 
26886cf69f1SJoerg Roedel 	for (addr = start & PMD_MASK;
26986cf69f1SJoerg Roedel 	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
27086cf69f1SJoerg Roedel 	     addr += PMD_SIZE) {
271f2f13a85SIngo Molnar 		struct page *page;
272f2f13a85SIngo Molnar 
273a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
274f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
275617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
276617d34d9SJeremy Fitzhardinge 
277a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
278617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
279617d34d9SJeremy Fitzhardinge 
280617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
28186cf69f1SJoerg Roedel 			vmalloc_sync_one(page_address(page), addr);
282617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
283f2f13a85SIngo Molnar 		}
284a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
285f2f13a85SIngo Molnar 	}
286f2f13a85SIngo Molnar }
287f2f13a85SIngo Molnar 
low_pfn(unsigned long pfn)288087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
289087975b0SAkinobu Mita {
290087975b0SAkinobu Mita 	return pfn < max_low_pfn;
291087975b0SAkinobu Mita }
292087975b0SAkinobu Mita 
dump_pagetable(unsigned long address)293cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
294c61e211dSHarvey Harrison {
2956c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
296087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
297e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
298e0c4f675SKirill A. Shutemov 	pud_t *pud;
299087975b0SAkinobu Mita 	pmd_t *pmd;
300087975b0SAkinobu Mita 	pte_t *pte;
3012d4a7167SIngo Molnar 
302c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
30339e48d9bSJan Beulich 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
304087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
305087975b0SAkinobu Mita 		goto out;
30639e48d9bSJan Beulich #define pr_pde pr_cont
30739e48d9bSJan Beulich #else
30839e48d9bSJan Beulich #define pr_pde pr_info
309c61e211dSHarvey Harrison #endif
310e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
311e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
312e0c4f675SKirill A. Shutemov 	pmd = pmd_offset(pud, address);
31339e48d9bSJan Beulich 	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
31439e48d9bSJan Beulich #undef pr_pde
315c61e211dSHarvey Harrison 
316c61e211dSHarvey Harrison 	/*
317c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
318c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
319c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3202d4a7167SIngo Molnar 	 * it's allocated already:
321c61e211dSHarvey Harrison 	 */
322087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
323087975b0SAkinobu Mita 		goto out;
3242d4a7167SIngo Molnar 
325087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
32639e48d9bSJan Beulich 	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
327087975b0SAkinobu Mita out:
32839e48d9bSJan Beulich 	pr_cont("\n");
329f2f13a85SIngo Molnar }
330f2f13a85SIngo Molnar 
331f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
332f2f13a85SIngo Molnar 
333e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD
334f2f13a85SIngo Molnar static const char errata93_warning[] =
335ad361c98SJoe Perches KERN_ERR
336ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
337ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
338ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
339ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
340e05139f2SJan Beulich #endif
341f2f13a85SIngo Molnar 
bad_address(void * p)342f2f13a85SIngo Molnar static int bad_address(void *p)
343f2f13a85SIngo Molnar {
344f2f13a85SIngo Molnar 	unsigned long dummy;
345f2f13a85SIngo Molnar 
34625f12ae4SChristoph Hellwig 	return get_kernel_nofault(dummy, (unsigned long *)p);
347f2f13a85SIngo Molnar }
348f2f13a85SIngo Molnar 
dump_pagetable(unsigned long address)349f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
350f2f13a85SIngo Molnar {
3516c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
352087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
353e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
354c61e211dSHarvey Harrison 	pud_t *pud;
355c61e211dSHarvey Harrison 	pmd_t *pmd;
356c61e211dSHarvey Harrison 	pte_t *pte;
357c61e211dSHarvey Harrison 
3582d4a7167SIngo Molnar 	if (bad_address(pgd))
3592d4a7167SIngo Molnar 		goto bad;
3602d4a7167SIngo Molnar 
36139e48d9bSJan Beulich 	pr_info("PGD %lx ", pgd_val(*pgd));
3622d4a7167SIngo Molnar 
3632d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
3642d4a7167SIngo Molnar 		goto out;
365c61e211dSHarvey Harrison 
366e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
367e0c4f675SKirill A. Shutemov 	if (bad_address(p4d))
368e0c4f675SKirill A. Shutemov 		goto bad;
369e0c4f675SKirill A. Shutemov 
37039e48d9bSJan Beulich 	pr_cont("P4D %lx ", p4d_val(*p4d));
371e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d) || p4d_large(*p4d))
372e0c4f675SKirill A. Shutemov 		goto out;
373e0c4f675SKirill A. Shutemov 
374e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
3752d4a7167SIngo Molnar 	if (bad_address(pud))
3762d4a7167SIngo Molnar 		goto bad;
3772d4a7167SIngo Molnar 
37839e48d9bSJan Beulich 	pr_cont("PUD %lx ", pud_val(*pud));
379907835e6SPeter Xu 	if (!pud_present(*pud) || pud_leaf(*pud))
3802d4a7167SIngo Molnar 		goto out;
381c61e211dSHarvey Harrison 
382c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
3832d4a7167SIngo Molnar 	if (bad_address(pmd))
3842d4a7167SIngo Molnar 		goto bad;
3852d4a7167SIngo Molnar 
38639e48d9bSJan Beulich 	pr_cont("PMD %lx ", pmd_val(*pmd));
3872d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
3882d4a7167SIngo Molnar 		goto out;
389c61e211dSHarvey Harrison 
390c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
3912d4a7167SIngo Molnar 	if (bad_address(pte))
3922d4a7167SIngo Molnar 		goto bad;
3932d4a7167SIngo Molnar 
39439e48d9bSJan Beulich 	pr_cont("PTE %lx", pte_val(*pte));
3952d4a7167SIngo Molnar out:
39639e48d9bSJan Beulich 	pr_cont("\n");
397c61e211dSHarvey Harrison 	return;
398c61e211dSHarvey Harrison bad:
39939e48d9bSJan Beulich 	pr_info("BAD\n");
400c61e211dSHarvey Harrison }
401c61e211dSHarvey Harrison 
402f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
403c61e211dSHarvey Harrison 
4042d4a7167SIngo Molnar /*
4052d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
4062d4a7167SIngo Molnar  *
4072d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
4082d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
4092d4a7167SIngo Molnar  *
4102d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
4112d4a7167SIngo Molnar  *
4122d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
4132d4a7167SIngo Molnar  * Try to work around it here.
4142d4a7167SIngo Molnar  *
4152d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
4162d4a7167SIngo Molnar  * Does nothing on 32-bit.
417c61e211dSHarvey Harrison  */
is_errata93(struct pt_regs * regs,unsigned long address)418c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
419c61e211dSHarvey Harrison {
420e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
421e05139f2SJan Beulich 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
422e05139f2SJan Beulich 	    || boot_cpu_data.x86 != 0xf)
423e05139f2SJan Beulich 		return 0;
424e05139f2SJan Beulich 
42503c81ea3SAndy Lutomirski 	if (user_mode(regs))
42603c81ea3SAndy Lutomirski 		return 0;
42703c81ea3SAndy Lutomirski 
428c61e211dSHarvey Harrison 	if (address != regs->ip)
429c61e211dSHarvey Harrison 		return 0;
4302d4a7167SIngo Molnar 
431c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
432c61e211dSHarvey Harrison 		return 0;
4332d4a7167SIngo Molnar 
434c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
435c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
436c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
437a454ab31SIngo Molnar 		printk_once(errata93_warning);
438c61e211dSHarvey Harrison 		regs->ip = address;
439c61e211dSHarvey Harrison 		return 1;
440c61e211dSHarvey Harrison 	}
441c61e211dSHarvey Harrison #endif
442c61e211dSHarvey Harrison 	return 0;
443c61e211dSHarvey Harrison }
444c61e211dSHarvey Harrison 
445c61e211dSHarvey Harrison /*
4462d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
4472d4a7167SIngo Molnar  * to illegal addresses >4GB.
4482d4a7167SIngo Molnar  *
4492d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
4502d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
451c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
452c61e211dSHarvey Harrison  */
is_errata100(struct pt_regs * regs,unsigned long address)453c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
454c61e211dSHarvey Harrison {
455c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
4562d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
457c61e211dSHarvey Harrison 		return 1;
458c61e211dSHarvey Harrison #endif
459c61e211dSHarvey Harrison 	return 0;
460c61e211dSHarvey Harrison }
461c61e211dSHarvey Harrison 
4623e77abdaSThomas Gleixner /* Pentium F0 0F C7 C8 bug workaround: */
is_f00f_bug(struct pt_regs * regs,unsigned long error_code,unsigned long address)463f42a40fdSAndy Lutomirski static int is_f00f_bug(struct pt_regs *regs, unsigned long error_code,
464f42a40fdSAndy Lutomirski 		       unsigned long address)
465c61e211dSHarvey Harrison {
466c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
467f42a40fdSAndy Lutomirski 	if (boot_cpu_has_bug(X86_BUG_F00F) && !(error_code & X86_PF_USER) &&
468f42a40fdSAndy Lutomirski 	    idt_is_f00f_address(address)) {
46949893c5cSThomas Gleixner 		handle_invalid_op(regs);
470c61e211dSHarvey Harrison 		return 1;
471c61e211dSHarvey Harrison 	}
472c61e211dSHarvey Harrison #endif
473c61e211dSHarvey Harrison 	return 0;
474c61e211dSHarvey Harrison }
475c61e211dSHarvey Harrison 
show_ldttss(const struct desc_ptr * gdt,const char * name,u16 index)476a1a371c4SAndy Lutomirski static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
477a1a371c4SAndy Lutomirski {
478a1a371c4SAndy Lutomirski 	u32 offset = (index >> 3) * sizeof(struct desc_struct);
479a1a371c4SAndy Lutomirski 	unsigned long addr;
480a1a371c4SAndy Lutomirski 	struct ldttss_desc desc;
481a1a371c4SAndy Lutomirski 
482a1a371c4SAndy Lutomirski 	if (index == 0) {
483a1a371c4SAndy Lutomirski 		pr_alert("%s: NULL\n", name);
484a1a371c4SAndy Lutomirski 		return;
485a1a371c4SAndy Lutomirski 	}
486a1a371c4SAndy Lutomirski 
487a1a371c4SAndy Lutomirski 	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
488a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
489a1a371c4SAndy Lutomirski 		return;
490a1a371c4SAndy Lutomirski 	}
491a1a371c4SAndy Lutomirski 
492fe557319SChristoph Hellwig 	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
493a1a371c4SAndy Lutomirski 			      sizeof(struct ldttss_desc))) {
494a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
495a1a371c4SAndy Lutomirski 			 name, index);
496a1a371c4SAndy Lutomirski 		return;
497a1a371c4SAndy Lutomirski 	}
498a1a371c4SAndy Lutomirski 
4995ccd3528SColin Ian King 	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
500a1a371c4SAndy Lutomirski #ifdef CONFIG_X86_64
501a1a371c4SAndy Lutomirski 	addr |= ((u64)desc.base3 << 32);
502a1a371c4SAndy Lutomirski #endif
503a1a371c4SAndy Lutomirski 	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
504a1a371c4SAndy Lutomirski 		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
505a1a371c4SAndy Lutomirski }
506a1a371c4SAndy Lutomirski 
5072d4a7167SIngo Molnar static void
show_fault_oops(struct pt_regs * regs,unsigned long error_code,unsigned long address)508a2aa52abSIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
509c61e211dSHarvey Harrison {
510c61e211dSHarvey Harrison 	if (!oops_may_print())
511c61e211dSHarvey Harrison 		return;
512c61e211dSHarvey Harrison 
5131067f030SRicardo Neri 	if (error_code & X86_PF_INSTR) {
51493809be8SHarvey Harrison 		unsigned int level;
515426e34ccSMatt Fleming 		pgd_t *pgd;
516426e34ccSMatt Fleming 		pte_t *pte;
5172d4a7167SIngo Molnar 
5186c690ee1SAndy Lutomirski 		pgd = __va(read_cr3_pa());
519426e34ccSMatt Fleming 		pgd += pgd_index(address);
520426e34ccSMatt Fleming 
521426e34ccSMatt Fleming 		pte = lookup_address_in_pgd(pgd, address, &level);
522c61e211dSHarvey Harrison 
5238f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
524d79d0d8aSDmitry Vyukov 			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
525d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
526eff50c34SJiri Kosina 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
527eff50c34SJiri Kosina 				(pgd_flags(*pgd) & _PAGE_USER) &&
5281e02ce4cSAndy Lutomirski 				(__read_cr4() & X86_CR4_SMEP))
529d79d0d8aSDmitry Vyukov 			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
530d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
531c61e211dSHarvey Harrison 	}
532fd40d6e3SHarvey Harrison 
533f28b11a2SSean Christopherson 	if (address < PAGE_SIZE && !user_mode(regs))
534ea2f8d60SBorislav Petkov 		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
535f28b11a2SSean Christopherson 			(void *)address);
536f28b11a2SSean Christopherson 	else
537ea2f8d60SBorislav Petkov 		pr_alert("BUG: unable to handle page fault for address: %px\n",
5384188f063SDmitry Vyukov 			(void *)address);
5392d4a7167SIngo Molnar 
540ea2f8d60SBorislav Petkov 	pr_alert("#PF: %s %s in %s mode\n",
54118ea35c5SSean Christopherson 		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
54218ea35c5SSean Christopherson 		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
54318ea35c5SSean Christopherson 		 (error_code & X86_PF_WRITE) ? "write access" :
54418ea35c5SSean Christopherson 					       "read access",
54518ea35c5SSean Christopherson 			     user_mode(regs) ? "user" : "kernel");
54618ea35c5SSean Christopherson 	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
54718ea35c5SSean Christopherson 		 !(error_code & X86_PF_PROT) ? "not-present page" :
54818ea35c5SSean Christopherson 		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
54918ea35c5SSean Christopherson 		 (error_code & X86_PF_PK)    ? "protection keys violation" :
55018ea35c5SSean Christopherson 					       "permissions violation");
551a2aa52abSIngo Molnar 
552a1a371c4SAndy Lutomirski 	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
553a1a371c4SAndy Lutomirski 		struct desc_ptr idt, gdt;
554a1a371c4SAndy Lutomirski 		u16 ldtr, tr;
555a1a371c4SAndy Lutomirski 
556a1a371c4SAndy Lutomirski 		/*
557a1a371c4SAndy Lutomirski 		 * This can happen for quite a few reasons.  The more obvious
558a1a371c4SAndy Lutomirski 		 * ones are faults accessing the GDT, or LDT.  Perhaps
559a1a371c4SAndy Lutomirski 		 * surprisingly, if the CPU tries to deliver a benign or
560a1a371c4SAndy Lutomirski 		 * contributory exception from user code and gets a page fault
561a1a371c4SAndy Lutomirski 		 * during delivery, the page fault can be delivered as though
562a1a371c4SAndy Lutomirski 		 * it originated directly from user code.  This could happen
563a1a371c4SAndy Lutomirski 		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
564a1a371c4SAndy Lutomirski 		 * kernel or IST stack.
565a1a371c4SAndy Lutomirski 		 */
566a1a371c4SAndy Lutomirski 		store_idt(&idt);
567a1a371c4SAndy Lutomirski 
568a1a371c4SAndy Lutomirski 		/* Usable even on Xen PV -- it's just slow. */
569a1a371c4SAndy Lutomirski 		native_store_gdt(&gdt);
570a1a371c4SAndy Lutomirski 
571a1a371c4SAndy Lutomirski 		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
572a1a371c4SAndy Lutomirski 			 idt.address, idt.size, gdt.address, gdt.size);
573a1a371c4SAndy Lutomirski 
574a1a371c4SAndy Lutomirski 		store_ldt(ldtr);
575a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "LDTR", ldtr);
576a1a371c4SAndy Lutomirski 
577a1a371c4SAndy Lutomirski 		store_tr(tr);
578a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "TR", tr);
579a1a371c4SAndy Lutomirski 	}
580a1a371c4SAndy Lutomirski 
581c61e211dSHarvey Harrison 	dump_pagetable(address);
582c61e211dSHarvey Harrison }
583c61e211dSHarvey Harrison 
5842d4a7167SIngo Molnar static noinline void
pgtable_bad(struct pt_regs * regs,unsigned long error_code,unsigned long address)5852d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
5862d4a7167SIngo Molnar 	    unsigned long address)
587c61e211dSHarvey Harrison {
5882d4a7167SIngo Molnar 	struct task_struct *tsk;
5892d4a7167SIngo Molnar 	unsigned long flags;
5902d4a7167SIngo Molnar 	int sig;
5912d4a7167SIngo Molnar 
5922d4a7167SIngo Molnar 	flags = oops_begin();
5932d4a7167SIngo Molnar 	tsk = current;
5942d4a7167SIngo Molnar 	sig = SIGKILL;
595c61e211dSHarvey Harrison 
596c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
59792181f19SNick Piggin 	       tsk->comm, address);
598c61e211dSHarvey Harrison 	dump_pagetable(address);
5992d4a7167SIngo Molnar 
600c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
601874d93d1SAlexander van Heukelum 		sig = 0;
6022d4a7167SIngo Molnar 
603874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
604c61e211dSHarvey Harrison }
605c61e211dSHarvey Harrison 
sanitize_error_code(unsigned long address,unsigned long * error_code)606cd072dabSSean Christopherson static void sanitize_error_code(unsigned long address,
607cd072dabSSean Christopherson 				unsigned long *error_code)
608e49d3cbeSAndy Lutomirski {
609e49d3cbeSAndy Lutomirski 	/*
610e49d3cbeSAndy Lutomirski 	 * To avoid leaking information about the kernel page
611e49d3cbeSAndy Lutomirski 	 * table layout, pretend that user-mode accesses to
612e49d3cbeSAndy Lutomirski 	 * kernel addresses are always protection faults.
613e0a446ceSAndy Lutomirski 	 *
614e0a446ceSAndy Lutomirski 	 * NB: This means that failed vsyscalls with vsyscall=none
615e0a446ceSAndy Lutomirski 	 * will have the PROT bit.  This doesn't leak any
616e0a446ceSAndy Lutomirski 	 * information and does not appear to cause any problems.
617e49d3cbeSAndy Lutomirski 	 */
618e49d3cbeSAndy Lutomirski 	if (address >= TASK_SIZE_MAX)
619cd072dabSSean Christopherson 		*error_code |= X86_PF_PROT;
620cd072dabSSean Christopherson }
621cd072dabSSean Christopherson 
set_signal_archinfo(unsigned long address,unsigned long error_code)622cd072dabSSean Christopherson static void set_signal_archinfo(unsigned long address,
623cd072dabSSean Christopherson 				unsigned long error_code)
624cd072dabSSean Christopherson {
625cd072dabSSean Christopherson 	struct task_struct *tsk = current;
626e49d3cbeSAndy Lutomirski 
627e49d3cbeSAndy Lutomirski 	tsk->thread.trap_nr = X86_TRAP_PF;
628e49d3cbeSAndy Lutomirski 	tsk->thread.error_code = error_code | X86_PF_USER;
629e49d3cbeSAndy Lutomirski 	tsk->thread.cr2 = address;
630e49d3cbeSAndy Lutomirski }
631e49d3cbeSAndy Lutomirski 
6322d4a7167SIngo Molnar static noinline void
page_fault_oops(struct pt_regs * regs,unsigned long error_code,unsigned long address)6332cc624b0SAndy Lutomirski page_fault_oops(struct pt_regs *regs, unsigned long error_code,
6342cc624b0SAndy Lutomirski 		unsigned long address)
63592181f19SNick Piggin {
63644b979faSPeter Zijlstra #ifdef CONFIG_VMAP_STACK
63744b979faSPeter Zijlstra 	struct stack_info info;
63844b979faSPeter Zijlstra #endif
63992181f19SNick Piggin 	unsigned long flags;
64092181f19SNick Piggin 	int sig;
64192181f19SNick Piggin 
642ebb53e25SAndy Lutomirski 	if (user_mode(regs)) {
643ebb53e25SAndy Lutomirski 		/*
6442cc624b0SAndy Lutomirski 		 * Implicit kernel access from user mode?  Skip the stack
6452cc624b0SAndy Lutomirski 		 * overflow and EFI special cases.
6462cc624b0SAndy Lutomirski 		 */
6472cc624b0SAndy Lutomirski 		goto oops;
6482cc624b0SAndy Lutomirski 	}
6492cc624b0SAndy Lutomirski 
6502cc624b0SAndy Lutomirski #ifdef CONFIG_VMAP_STACK
6512cc624b0SAndy Lutomirski 	/*
6522cc624b0SAndy Lutomirski 	 * Stack overflow?  During boot, we can fault near the initial
6532cc624b0SAndy Lutomirski 	 * stack in the direct map, but that's not an overflow -- check
6542cc624b0SAndy Lutomirski 	 * that we're in vmalloc space to avoid this.
6552cc624b0SAndy Lutomirski 	 */
6562cc624b0SAndy Lutomirski 	if (is_vmalloc_addr((void *)address) &&
65744b979faSPeter Zijlstra 	    get_stack_guard_info((void *)address, &info)) {
6582cc624b0SAndy Lutomirski 		/*
6592cc624b0SAndy Lutomirski 		 * We're likely to be running with very little stack space
6602cc624b0SAndy Lutomirski 		 * left.  It's plausible that we'd hit this condition but
6612cc624b0SAndy Lutomirski 		 * double-fault even before we get this far, in which case
6622cc624b0SAndy Lutomirski 		 * we're fine: the double-fault handler will deal with it.
6632cc624b0SAndy Lutomirski 		 *
6642cc624b0SAndy Lutomirski 		 * We don't want to make it all the way into the oops code
6652cc624b0SAndy Lutomirski 		 * and then double-fault, though, because we're likely to
6662cc624b0SAndy Lutomirski 		 * break the console driver and lose most of the stack dump.
6672cc624b0SAndy Lutomirski 		 */
66844b979faSPeter Zijlstra 		call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*),
66944b979faSPeter Zijlstra 			      handle_stack_overflow,
67044b979faSPeter Zijlstra 			      ASM_CALL_ARG3,
67144b979faSPeter Zijlstra 			      , [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info));
67244b979faSPeter Zijlstra 
6732cc624b0SAndy Lutomirski 		unreachable();
6742cc624b0SAndy Lutomirski 	}
6752cc624b0SAndy Lutomirski #endif
6762cc624b0SAndy Lutomirski 
6772cc624b0SAndy Lutomirski 	/*
678c46f5223SAndy Lutomirski 	 * Buggy firmware could access regions which might page fault.  If
679c46f5223SAndy Lutomirski 	 * this happens, EFI has a special OOPS path that will try to
680c46f5223SAndy Lutomirski 	 * avoid hanging the system.
6812cc624b0SAndy Lutomirski 	 */
6822cc624b0SAndy Lutomirski 	if (IS_ENABLED(CONFIG_EFI))
683c46f5223SAndy Lutomirski 		efi_crash_gracefully_on_page_fault(address);
6842cc624b0SAndy Lutomirski 
6851dc0da6eSAlexander Potapenko 	/* Only not-present faults should be handled by KFENCE. */
686bc8fbc5fSMarco Elver 	if (!(error_code & X86_PF_PROT) &&
687bc8fbc5fSMarco Elver 	    kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs))
6881dc0da6eSAlexander Potapenko 		return;
6891dc0da6eSAlexander Potapenko 
6902cc624b0SAndy Lutomirski oops:
6912cc624b0SAndy Lutomirski 	/*
6922cc624b0SAndy Lutomirski 	 * Oops. The kernel tried to access some bad page. We'll have to
6932cc624b0SAndy Lutomirski 	 * terminate things with extreme prejudice:
6942cc624b0SAndy Lutomirski 	 */
6952cc624b0SAndy Lutomirski 	flags = oops_begin();
6962cc624b0SAndy Lutomirski 
6972cc624b0SAndy Lutomirski 	show_fault_oops(regs, error_code, address);
6982cc624b0SAndy Lutomirski 
6992cc624b0SAndy Lutomirski 	if (task_stack_end_corrupted(current))
7002cc624b0SAndy Lutomirski 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7012cc624b0SAndy Lutomirski 
7022cc624b0SAndy Lutomirski 	sig = SIGKILL;
7032cc624b0SAndy Lutomirski 	if (__die("Oops", regs, error_code))
7042cc624b0SAndy Lutomirski 		sig = 0;
7052cc624b0SAndy Lutomirski 
7062cc624b0SAndy Lutomirski 	/* Executive summary in case the body of the oops scrolled away */
7072cc624b0SAndy Lutomirski 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
7082cc624b0SAndy Lutomirski 
7092cc624b0SAndy Lutomirski 	oops_end(flags, regs, sig);
7102cc624b0SAndy Lutomirski }
7112cc624b0SAndy Lutomirski 
7122cc624b0SAndy Lutomirski static noinline void
kernelmode_fixup_or_oops(struct pt_regs * regs,unsigned long error_code,unsigned long address,int signal,int si_code,u32 pkey)7136456a2a6SAndy Lutomirski kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
714d4ffd5dfSJiashuo Liang 			 unsigned long address, int signal, int si_code,
715d4ffd5dfSJiashuo Liang 			 u32 pkey)
7162cc624b0SAndy Lutomirski {
7176456a2a6SAndy Lutomirski 	WARN_ON_ONCE(user_mode(regs));
718ebb53e25SAndy Lutomirski 
71992181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
720*214301d0SLinus Torvalds 	if (fixup_exception(regs, X86_TRAP_PF, error_code, address))
721c026b359SPeter Zijlstra 		return;
722c026b359SPeter Zijlstra 
723c026b359SPeter Zijlstra 	/*
7242cc624b0SAndy Lutomirski 	 * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
7252cc624b0SAndy Lutomirski 	 * instruction.
72692181f19SNick Piggin 	 */
72792181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
72892181f19SNick Piggin 		return;
72992181f19SNick Piggin 
7302cc624b0SAndy Lutomirski 	page_fault_oops(regs, error_code, address);
73192181f19SNick Piggin }
73292181f19SNick Piggin 
7332d4a7167SIngo Molnar /*
7342d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
7352d4a7167SIngo Molnar  * sysctl is set:
7362d4a7167SIngo Molnar  */
7372d4a7167SIngo Molnar static inline void
show_signal_msg(struct pt_regs * regs,unsigned long error_code,unsigned long address,struct task_struct * tsk)7382d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
7392d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
7402d4a7167SIngo Molnar {
741ba54d856SBorislav Petkov 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
742c926087eSRik van Riel 	/* This is a racy snapshot, but it's better than nothing. */
743c926087eSRik van Riel 	int cpu = raw_smp_processor_id();
744ba54d856SBorislav Petkov 
7452d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
7462d4a7167SIngo Molnar 		return;
7472d4a7167SIngo Molnar 
7482d4a7167SIngo Molnar 	if (!printk_ratelimit())
7492d4a7167SIngo Molnar 		return;
7502d4a7167SIngo Molnar 
75110a7e9d8SKees Cook 	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
752ba54d856SBorislav Petkov 		loglvl, tsk->comm, task_pid_nr(tsk), address,
7532d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
7542d4a7167SIngo Molnar 
7552d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
7562d4a7167SIngo Molnar 
757c926087eSRik van Riel 	/*
758c926087eSRik van Riel 	 * Dump the likely CPU where the fatal segfault happened.
759c926087eSRik van Riel 	 * This can help identify faulty hardware.
760c926087eSRik van Riel 	 */
761c926087eSRik van Riel 	printk(KERN_CONT " likely on CPU %d (core %d, socket %d)", cpu,
762c926087eSRik van Riel 	       topology_core_id(cpu), topology_physical_package_id(cpu));
763c926087eSRik van Riel 
764c926087eSRik van Riel 
7652d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
766ba54d856SBorislav Petkov 
767342db04aSJann Horn 	show_opcodes(regs, loglvl);
7682d4a7167SIngo Molnar }
7692d4a7167SIngo Molnar 
7702d4a7167SIngo Molnar static void
__bad_area_nosemaphore(struct pt_regs * regs,unsigned long error_code,unsigned long address,u32 pkey,int si_code)7712d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
772419ceeb1SEric W. Biederman 		       unsigned long address, u32 pkey, int si_code)
77392181f19SNick Piggin {
77492181f19SNick Piggin 	struct task_struct *tsk = current;
77592181f19SNick Piggin 
7765042d40aSAndy Lutomirski 	if (!user_mode(regs)) {
777d4ffd5dfSJiashuo Liang 		kernelmode_fixup_or_oops(regs, error_code, address,
778d4ffd5dfSJiashuo Liang 					 SIGSEGV, si_code, pkey);
7795042d40aSAndy Lutomirski 		return;
7805042d40aSAndy Lutomirski 	}
7815042d40aSAndy Lutomirski 
7825042d40aSAndy Lutomirski 	if (!(error_code & X86_PF_USER)) {
7835042d40aSAndy Lutomirski 		/* Implicit user access to kernel memory -- just oops */
7845042d40aSAndy Lutomirski 		page_fault_oops(regs, error_code, address);
7855042d40aSAndy Lutomirski 		return;
7865042d40aSAndy Lutomirski 	}
7875042d40aSAndy Lutomirski 
78892181f19SNick Piggin 	/*
7895042d40aSAndy Lutomirski 	 * User mode accesses just cause a SIGSEGV.
7902d4a7167SIngo Molnar 	 * It's possible to have interrupts off here:
79192181f19SNick Piggin 	 */
79292181f19SNick Piggin 	local_irq_enable();
79392181f19SNick Piggin 
79492181f19SNick Piggin 	/*
79592181f19SNick Piggin 	 * Valid to do another page fault here because this one came
7962d4a7167SIngo Molnar 	 * from user space:
79792181f19SNick Piggin 	 */
79892181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
79992181f19SNick Piggin 		return;
80092181f19SNick Piggin 
80192181f19SNick Piggin 	if (is_errata100(regs, address))
80292181f19SNick Piggin 		return;
80392181f19SNick Piggin 
804cd072dabSSean Christopherson 	sanitize_error_code(address, &error_code);
8053ae36655SAndy Lutomirski 
806334872a0SSean Christopherson 	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
807334872a0SSean Christopherson 		return;
808334872a0SSean Christopherson 
809e575a86fSKees Cook 	if (likely(show_unhandled_signals))
8102d4a7167SIngo Molnar 		show_signal_msg(regs, error_code, address, tsk);
81192181f19SNick Piggin 
812e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
8132d4a7167SIngo Molnar 
8149db812dbSEric W. Biederman 	if (si_code == SEGV_PKUERR)
815419ceeb1SEric W. Biederman 		force_sig_pkuerr((void __user *)address, pkey);
8165405b42cSJiashuo Liang 	else
8172e1661d2SEric W. Biederman 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
8182d4a7167SIngo Molnar 
819ca4c6a98SThomas Gleixner 	local_irq_disable();
82092181f19SNick Piggin }
82192181f19SNick Piggin 
8222d4a7167SIngo Molnar static noinline void
bad_area_nosemaphore(struct pt_regs * regs,unsigned long error_code,unsigned long address)8232d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
824768fd9c6SEric W. Biederman 		     unsigned long address)
82592181f19SNick Piggin {
826419ceeb1SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
82792181f19SNick Piggin }
82892181f19SNick Piggin 
8292d4a7167SIngo Molnar static void
__bad_area(struct pt_regs * regs,unsigned long error_code,unsigned long address,u32 pkey,int si_code)8302d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
831419ceeb1SEric W. Biederman 	   unsigned long address, u32 pkey, int si_code)
83292181f19SNick Piggin {
83392181f19SNick Piggin 	struct mm_struct *mm = current->mm;
83492181f19SNick Piggin 	/*
83592181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
83692181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
83792181f19SNick Piggin 	 */
838d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
83992181f19SNick Piggin 
840aba1ecd3SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
84192181f19SNick Piggin }
84292181f19SNick Piggin 
bad_area_access_from_pkeys(unsigned long error_code,struct vm_area_struct * vma)84333a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code,
84433a709b2SDave Hansen 		struct vm_area_struct *vma)
84533a709b2SDave Hansen {
84607f146f5SDave Hansen 	/* This code is always called on the current mm */
84707f146f5SDave Hansen 	bool foreign = false;
84807f146f5SDave Hansen 
8498a1dc55aSThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
85033a709b2SDave Hansen 		return false;
8511067f030SRicardo Neri 	if (error_code & X86_PF_PK)
85233a709b2SDave Hansen 		return true;
85307f146f5SDave Hansen 	/* this checks permission keys on the VMA: */
8541067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
8551067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
85607f146f5SDave Hansen 		return true;
85733a709b2SDave Hansen 	return false;
85892181f19SNick Piggin }
85992181f19SNick Piggin 
8602d4a7167SIngo Molnar static noinline void
bad_area_access_error(struct pt_regs * regs,unsigned long error_code,unsigned long address,struct vm_area_struct * vma)8612d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
8627b2d0dbaSDave Hansen 		      unsigned long address, struct vm_area_struct *vma)
86392181f19SNick Piggin {
864019132ffSDave Hansen 	/*
865019132ffSDave Hansen 	 * This OSPKE check is not strictly necessary at runtime.
866019132ffSDave Hansen 	 * But, doing it this way allows compiler optimizations
867019132ffSDave Hansen 	 * if pkeys are compiled out.
868019132ffSDave Hansen 	 */
869aba1ecd3SEric W. Biederman 	if (bad_area_access_from_pkeys(error_code, vma)) {
8709db812dbSEric W. Biederman 		/*
8719db812dbSEric W. Biederman 		 * A protection key fault means that the PKRU value did not allow
8729db812dbSEric W. Biederman 		 * access to some PTE.  Userspace can figure out what PKRU was
8739db812dbSEric W. Biederman 		 * from the XSAVE state.  This function captures the pkey from
8749db812dbSEric W. Biederman 		 * the vma and passes it to userspace so userspace can discover
8759db812dbSEric W. Biederman 		 * which protection key was set on the PTE.
8769db812dbSEric W. Biederman 		 *
8779db812dbSEric W. Biederman 		 * If we get here, we know that the hardware signaled a X86_PF_PK
8789db812dbSEric W. Biederman 		 * fault and that there was a VMA once we got in the fault
8799db812dbSEric W. Biederman 		 * handler.  It does *not* guarantee that the VMA we find here
8809db812dbSEric W. Biederman 		 * was the one that we faulted on.
8819db812dbSEric W. Biederman 		 *
8829db812dbSEric W. Biederman 		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
8839db812dbSEric W. Biederman 		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
8849db812dbSEric W. Biederman 		 * 3. T1   : faults...
8859db812dbSEric W. Biederman 		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
886c1e8d7c6SMichel Lespinasse 		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
8879db812dbSEric W. Biederman 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
8889db812dbSEric W. Biederman 		 *	     faulted on a pte with its pkey=4.
8899db812dbSEric W. Biederman 		 */
890aba1ecd3SEric W. Biederman 		u32 pkey = vma_pkey(vma);
8919db812dbSEric W. Biederman 
892419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
893aba1ecd3SEric W. Biederman 	} else {
894419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
895aba1ecd3SEric W. Biederman 	}
89692181f19SNick Piggin }
89792181f19SNick Piggin 
8982d4a7167SIngo Molnar static void
do_sigbus(struct pt_regs * regs,unsigned long error_code,unsigned long address,vm_fault_t fault)899a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
9003d353901SSouptick Joarder 	  vm_fault_t fault)
90192181f19SNick Piggin {
9022d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
90356e62cd2SAndy Lutomirski 	if (!user_mode(regs)) {
904d4ffd5dfSJiashuo Liang 		kernelmode_fixup_or_oops(regs, error_code, address,
905d4ffd5dfSJiashuo Liang 					 SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
90696054569SLinus Torvalds 		return;
90796054569SLinus Torvalds 	}
9082d4a7167SIngo Molnar 
909cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
91092181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
91192181f19SNick Piggin 		return;
9122d4a7167SIngo Molnar 
913cd072dabSSean Christopherson 	sanitize_error_code(address, &error_code);
914cd072dabSSean Christopherson 
915334872a0SSean Christopherson 	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
916334872a0SSean Christopherson 		return;
917334872a0SSean Christopherson 
918e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
9192d4a7167SIngo Molnar 
920a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
921f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
922318759b4SEric W. Biederman 		struct task_struct *tsk = current;
92340e55394SEric W. Biederman 		unsigned lsb = 0;
92440e55394SEric W. Biederman 
92540e55394SEric W. Biederman 		pr_err(
926a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
927a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
92840e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON_LARGE)
92940e55394SEric W. Biederman 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
93040e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON)
93140e55394SEric W. Biederman 			lsb = PAGE_SHIFT;
932f8eac901SEric W. Biederman 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
93340e55394SEric W. Biederman 		return;
934a6e04aa9SAndi Kleen 	}
935a6e04aa9SAndi Kleen #endif
9362e1661d2SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
93792181f19SNick Piggin }
93892181f19SNick Piggin 
spurious_kernel_fault_check(unsigned long error_code,pte_t * pte)9398fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
940d8b57bb7SThomas Gleixner {
9411067f030SRicardo Neri 	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
942d8b57bb7SThomas Gleixner 		return 0;
9432d4a7167SIngo Molnar 
9441067f030SRicardo Neri 	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
945d8b57bb7SThomas Gleixner 		return 0;
946d8b57bb7SThomas Gleixner 
947d8b57bb7SThomas Gleixner 	return 1;
948d8b57bb7SThomas Gleixner }
949d8b57bb7SThomas Gleixner 
950c61e211dSHarvey Harrison /*
9512d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
9522d4a7167SIngo Molnar  *
9532d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
9542d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
9552d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
9562d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
9572d4a7167SIngo Molnar  * on other processors.
9582d4a7167SIngo Molnar  *
95931668511SDavid Vrabel  * Spurious faults may only occur if the TLB contains an entry with
96031668511SDavid Vrabel  * fewer permission than the page table entry.  Non-present (P = 0)
96131668511SDavid Vrabel  * and reserved bit (R = 1) faults are never spurious.
96231668511SDavid Vrabel  *
9635b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
9645b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
96531668511SDavid Vrabel  *
96631668511SDavid Vrabel  * Returns non-zero if a spurious fault was handled, zero otherwise.
96731668511SDavid Vrabel  *
96831668511SDavid Vrabel  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
96931668511SDavid Vrabel  * (Optional Invalidation).
9705b727a3bSJeremy Fitzhardinge  */
9719326638cSMasami Hiramatsu static noinline int
spurious_kernel_fault(unsigned long error_code,unsigned long address)9728fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address)
9735b727a3bSJeremy Fitzhardinge {
9745b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
975e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
9765b727a3bSJeremy Fitzhardinge 	pud_t *pud;
9775b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
9785b727a3bSJeremy Fitzhardinge 	pte_t *pte;
9793c3e5694SSteven Rostedt 	int ret;
9805b727a3bSJeremy Fitzhardinge 
98131668511SDavid Vrabel 	/*
98231668511SDavid Vrabel 	 * Only writes to RO or instruction fetches from NX may cause
98331668511SDavid Vrabel 	 * spurious faults.
98431668511SDavid Vrabel 	 *
98531668511SDavid Vrabel 	 * These could be from user or supervisor accesses but the TLB
98631668511SDavid Vrabel 	 * is only lazily flushed after a kernel mapping protection
98731668511SDavid Vrabel 	 * change, so user accesses are not expected to cause spurious
98831668511SDavid Vrabel 	 * faults.
98931668511SDavid Vrabel 	 */
9901067f030SRicardo Neri 	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
9911067f030SRicardo Neri 	    error_code != (X86_PF_INSTR | X86_PF_PROT))
9925b727a3bSJeremy Fitzhardinge 		return 0;
9935b727a3bSJeremy Fitzhardinge 
9945b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
9955b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
9965b727a3bSJeremy Fitzhardinge 		return 0;
9975b727a3bSJeremy Fitzhardinge 
998e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
999e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d))
1000e0c4f675SKirill A. Shutemov 		return 0;
1001e0c4f675SKirill A. Shutemov 
1002e0c4f675SKirill A. Shutemov 	if (p4d_large(*p4d))
10038fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1004e0c4f675SKirill A. Shutemov 
1005e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
10065b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
10075b727a3bSJeremy Fitzhardinge 		return 0;
10085b727a3bSJeremy Fitzhardinge 
1009907835e6SPeter Xu 	if (pud_leaf(*pud))
10108fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1011d8b57bb7SThomas Gleixner 
10125b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
10135b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
10145b727a3bSJeremy Fitzhardinge 		return 0;
10155b727a3bSJeremy Fitzhardinge 
1016d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
10178fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1018d8b57bb7SThomas Gleixner 
10195b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
1020954f8571SAndrea Arcangeli 	if (!pte_present(*pte))
10215b727a3bSJeremy Fitzhardinge 		return 0;
10225b727a3bSJeremy Fitzhardinge 
10238fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, pte);
10243c3e5694SSteven Rostedt 	if (!ret)
10253c3e5694SSteven Rostedt 		return 0;
10263c3e5694SSteven Rostedt 
10273c3e5694SSteven Rostedt 	/*
10282d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
10292d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
10303c3e5694SSteven Rostedt 	 */
10318fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
10323c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
10332d4a7167SIngo Molnar 
10343c3e5694SSteven Rostedt 	return ret;
10355b727a3bSJeremy Fitzhardinge }
10368fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault);
10375b727a3bSJeremy Fitzhardinge 
1038c61e211dSHarvey Harrison int show_unhandled_signals = 1;
1039c61e211dSHarvey Harrison 
10402d4a7167SIngo Molnar static inline int
access_error(unsigned long error_code,struct vm_area_struct * vma)104168da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
104292181f19SNick Piggin {
104307f146f5SDave Hansen 	/* This is only called for the current mm, so: */
104407f146f5SDave Hansen 	bool foreign = false;
1045e8c6226dSDave Hansen 
1046e8c6226dSDave Hansen 	/*
1047e8c6226dSDave Hansen 	 * Read or write was blocked by protection keys.  This is
1048e8c6226dSDave Hansen 	 * always an unconditional error and can never result in
1049e8c6226dSDave Hansen 	 * a follow-up action to resolve the fault, like a COW.
1050e8c6226dSDave Hansen 	 */
10511067f030SRicardo Neri 	if (error_code & X86_PF_PK)
1052e8c6226dSDave Hansen 		return 1;
1053e8c6226dSDave Hansen 
105433a709b2SDave Hansen 	/*
105574faeee0SSean Christopherson 	 * SGX hardware blocked the access.  This usually happens
105674faeee0SSean Christopherson 	 * when the enclave memory contents have been destroyed, like
105774faeee0SSean Christopherson 	 * after a suspend/resume cycle. In any case, the kernel can't
105874faeee0SSean Christopherson 	 * fix the cause of the fault.  Handle the fault as an access
105974faeee0SSean Christopherson 	 * error even in cases where no actual access violation
106074faeee0SSean Christopherson 	 * occurred.  This allows userspace to rebuild the enclave in
106174faeee0SSean Christopherson 	 * response to the signal.
106274faeee0SSean Christopherson 	 */
106374faeee0SSean Christopherson 	if (unlikely(error_code & X86_PF_SGX))
106474faeee0SSean Christopherson 		return 1;
106574faeee0SSean Christopherson 
106674faeee0SSean Christopherson 	/*
106707f146f5SDave Hansen 	 * Make sure to check the VMA so that we do not perform
10681067f030SRicardo Neri 	 * faults just to hit a X86_PF_PK as soon as we fill in a
106907f146f5SDave Hansen 	 * page.
107007f146f5SDave Hansen 	 */
10711067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
10721067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
107307f146f5SDave Hansen 		return 1;
107433a709b2SDave Hansen 
1075fd5439e0SRick Edgecombe 	/*
1076fd5439e0SRick Edgecombe 	 * Shadow stack accesses (PF_SHSTK=1) are only permitted to
1077fd5439e0SRick Edgecombe 	 * shadow stack VMAs. All other accesses result in an error.
1078fd5439e0SRick Edgecombe 	 */
1079fd5439e0SRick Edgecombe 	if (error_code & X86_PF_SHSTK) {
1080fd5439e0SRick Edgecombe 		if (unlikely(!(vma->vm_flags & VM_SHADOW_STACK)))
1081fd5439e0SRick Edgecombe 			return 1;
1082fd5439e0SRick Edgecombe 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
1083fd5439e0SRick Edgecombe 			return 1;
1084fd5439e0SRick Edgecombe 		return 0;
1085fd5439e0SRick Edgecombe 	}
1086fd5439e0SRick Edgecombe 
10871067f030SRicardo Neri 	if (error_code & X86_PF_WRITE) {
10882d4a7167SIngo Molnar 		/* write, present and write, not present: */
1089fd5439e0SRick Edgecombe 		if (unlikely(vma->vm_flags & VM_SHADOW_STACK))
1090fd5439e0SRick Edgecombe 			return 1;
109192181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
109292181f19SNick Piggin 			return 1;
10932d4a7167SIngo Molnar 		return 0;
10942d4a7167SIngo Molnar 	}
10952d4a7167SIngo Molnar 
10962d4a7167SIngo Molnar 	/* read, present: */
10971067f030SRicardo Neri 	if (unlikely(error_code & X86_PF_PROT))
109892181f19SNick Piggin 		return 1;
10992d4a7167SIngo Molnar 
11002d4a7167SIngo Molnar 	/* read, not present: */
11013122e80eSAnshuman Khandual 	if (unlikely(!vma_is_accessible(vma)))
110292181f19SNick Piggin 		return 1;
110392181f19SNick Piggin 
110492181f19SNick Piggin 	return 0;
110592181f19SNick Piggin }
110692181f19SNick Piggin 
fault_in_kernel_space(unsigned long address)110730063810STony Luck bool fault_in_kernel_space(unsigned long address)
11080973a06cSHiroshi Shimamoto {
11093ae0ad92SDave Hansen 	/*
11103ae0ad92SDave Hansen 	 * On 64-bit systems, the vsyscall page is at an address above
11113ae0ad92SDave Hansen 	 * TASK_SIZE_MAX, but is not considered part of the kernel
11123ae0ad92SDave Hansen 	 * address space.
11133ae0ad92SDave Hansen 	 */
11143ae0ad92SDave Hansen 	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
11153ae0ad92SDave Hansen 		return false;
11163ae0ad92SDave Hansen 
1117d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
11180973a06cSHiroshi Shimamoto }
11190973a06cSHiroshi Shimamoto 
1120c61e211dSHarvey Harrison /*
11218fed6200SDave Hansen  * Called for all faults where 'address' is part of the kernel address
11228fed6200SDave Hansen  * space.  Might get called for faults that originate from *code* that
11238fed6200SDave Hansen  * ran in userspace or the kernel.
1124c61e211dSHarvey Harrison  */
11258fed6200SDave Hansen static void
do_kern_addr_fault(struct pt_regs * regs,unsigned long hw_error_code,unsigned long address)11268fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
11270ac09f9fSJiri Olsa 		   unsigned long address)
1128c61e211dSHarvey Harrison {
11298fed6200SDave Hansen 	/*
1130367e3f1dSDave Hansen 	 * Protection keys exceptions only happen on user pages.  We
1131367e3f1dSDave Hansen 	 * have no user pages in the kernel portion of the address
1132367e3f1dSDave Hansen 	 * space, so do not expect them here.
1133367e3f1dSDave Hansen 	 */
1134367e3f1dSDave Hansen 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1135367e3f1dSDave Hansen 
11364819e15fSJoerg Roedel #ifdef CONFIG_X86_32
11374819e15fSJoerg Roedel 	/*
11384819e15fSJoerg Roedel 	 * We can fault-in kernel-space virtual memory on-demand. The
11394819e15fSJoerg Roedel 	 * 'reference' page table is init_mm.pgd.
11404819e15fSJoerg Roedel 	 *
11414819e15fSJoerg Roedel 	 * NOTE! We MUST NOT take any locks for this case. We may
11424819e15fSJoerg Roedel 	 * be in an interrupt or a critical region, and should
11434819e15fSJoerg Roedel 	 * only copy the information from the master page table,
11444819e15fSJoerg Roedel 	 * nothing more.
11454819e15fSJoerg Roedel 	 *
11464819e15fSJoerg Roedel 	 * Before doing this on-demand faulting, ensure that the
11474819e15fSJoerg Roedel 	 * fault is not any of the following:
11484819e15fSJoerg Roedel 	 * 1. A fault on a PTE with a reserved bit set.
11494819e15fSJoerg Roedel 	 * 2. A fault caused by a user-mode access.  (Do not demand-
11504819e15fSJoerg Roedel 	 *    fault kernel memory due to user-mode accesses).
11514819e15fSJoerg Roedel 	 * 3. A fault caused by a page-level protection violation.
11524819e15fSJoerg Roedel 	 *    (A demand fault would be on a non-present page which
11534819e15fSJoerg Roedel 	 *     would have X86_PF_PROT==0).
11544819e15fSJoerg Roedel 	 *
11554819e15fSJoerg Roedel 	 * This is only needed to close a race condition on x86-32 in
11564819e15fSJoerg Roedel 	 * the vmalloc mapping/unmapping code. See the comment above
11574819e15fSJoerg Roedel 	 * vmalloc_fault() for details. On x86-64 the race does not
11584819e15fSJoerg Roedel 	 * exist as the vmalloc mappings don't need to be synchronized
11594819e15fSJoerg Roedel 	 * there.
11604819e15fSJoerg Roedel 	 */
11614819e15fSJoerg Roedel 	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
11624819e15fSJoerg Roedel 		if (vmalloc_fault(address) >= 0)
11634819e15fSJoerg Roedel 			return;
11644819e15fSJoerg Roedel 	}
11654819e15fSJoerg Roedel #endif
11664819e15fSJoerg Roedel 
1167f42a40fdSAndy Lutomirski 	if (is_f00f_bug(regs, hw_error_code, address))
1168f42a40fdSAndy Lutomirski 		return;
1169f42a40fdSAndy Lutomirski 
11708fed6200SDave Hansen 	/* Was the fault spurious, caused by lazy TLB invalidation? */
11718fed6200SDave Hansen 	if (spurious_kernel_fault(hw_error_code, address))
11728fed6200SDave Hansen 		return;
11738fed6200SDave Hansen 
11748fed6200SDave Hansen 	/* kprobes don't want to hook the spurious faults: */
117500afe830SPeter Zijlstra 	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
11768fed6200SDave Hansen 		return;
11778fed6200SDave Hansen 
11788fed6200SDave Hansen 	/*
11798fed6200SDave Hansen 	 * Note, despite being a "bad area", there are quite a few
11808fed6200SDave Hansen 	 * acceptable reasons to get here, such as erratum fixups
11818fed6200SDave Hansen 	 * and handling kernel code that can fault, like get_user().
11828fed6200SDave Hansen 	 *
11838fed6200SDave Hansen 	 * Don't take the mm semaphore here. If we fixup a prefetch
11848fed6200SDave Hansen 	 * fault we could otherwise deadlock:
11858fed6200SDave Hansen 	 */
1186ba9f6f89SLinus Torvalds 	bad_area_nosemaphore(regs, hw_error_code, address);
11878fed6200SDave Hansen }
11888fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault);
11898fed6200SDave Hansen 
119056e62cd2SAndy Lutomirski /*
119156e62cd2SAndy Lutomirski  * Handle faults in the user portion of the address space.  Nothing in here
119256e62cd2SAndy Lutomirski  * should check X86_PF_USER without a specific justification: for almost
119356e62cd2SAndy Lutomirski  * all purposes, we should treat a normal kernel access to user memory
119456e62cd2SAndy Lutomirski  * (e.g. get_user(), put_user(), etc.) the same as the WRUSS instruction.
119556e62cd2SAndy Lutomirski  * The one exception is AC flag handling, which is, per the x86
119656e62cd2SAndy Lutomirski  * architecture, special for WRUSS.
119756e62cd2SAndy Lutomirski  */
1198aa37c51bSDave Hansen static inline
do_user_addr_fault(struct pt_regs * regs,unsigned long error_code,unsigned long address)1199aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs,
1200ec352711SAndy Lutomirski 			unsigned long error_code,
1201c61e211dSHarvey Harrison 			unsigned long address)
1202c61e211dSHarvey Harrison {
1203c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
1204c61e211dSHarvey Harrison 	struct task_struct *tsk;
12052d4a7167SIngo Molnar 	struct mm_struct *mm;
1206968614fcSPeter Xu 	vm_fault_t fault;
1207dde16072SPeter Xu 	unsigned int flags = FAULT_FLAG_DEFAULT;
1208c61e211dSHarvey Harrison 
1209c61e211dSHarvey Harrison 	tsk = current;
1210c61e211dSHarvey Harrison 	mm = tsk->mm;
12112d4a7167SIngo Molnar 
121203c81ea3SAndy Lutomirski 	if (unlikely((error_code & (X86_PF_USER | X86_PF_INSTR)) == X86_PF_INSTR)) {
121303c81ea3SAndy Lutomirski 		/*
121403c81ea3SAndy Lutomirski 		 * Whoops, this is kernel mode code trying to execute from
121503c81ea3SAndy Lutomirski 		 * user memory.  Unless this is AMD erratum #93, which
121603c81ea3SAndy Lutomirski 		 * corrupts RIP such that it looks like a user address,
121703c81ea3SAndy Lutomirski 		 * this is unrecoverable.  Don't even try to look up the
121866fcd988SAndy Lutomirski 		 * VMA or look for extable entries.
121903c81ea3SAndy Lutomirski 		 */
122003c81ea3SAndy Lutomirski 		if (is_errata93(regs, address))
122103c81ea3SAndy Lutomirski 			return;
122203c81ea3SAndy Lutomirski 
122366fcd988SAndy Lutomirski 		page_fault_oops(regs, error_code, address);
122403c81ea3SAndy Lutomirski 		return;
122503c81ea3SAndy Lutomirski 	}
122603c81ea3SAndy Lutomirski 
12272d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
122800afe830SPeter Zijlstra 	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
12299be260a6SMasami Hiramatsu 		return;
1230e00b12e6SPeter Zijlstra 
12315b0c2cacSDave Hansen 	/*
12325b0c2cacSDave Hansen 	 * Reserved bits are never expected to be set on
12335b0c2cacSDave Hansen 	 * entries in the user portion of the page tables.
12345b0c2cacSDave Hansen 	 */
1235ec352711SAndy Lutomirski 	if (unlikely(error_code & X86_PF_RSVD))
1236ec352711SAndy Lutomirski 		pgtable_bad(regs, error_code, address);
1237e00b12e6SPeter Zijlstra 
12385b0c2cacSDave Hansen 	/*
1239e50928d7SAndy Lutomirski 	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1240e50928d7SAndy Lutomirski 	 * pages in the user address space.  The odd case here is WRUSS,
1241e50928d7SAndy Lutomirski 	 * which, according to the preliminary documentation, does not respect
1242e50928d7SAndy Lutomirski 	 * SMAP and will have the USER bit set so, in all cases, SMAP
1243e50928d7SAndy Lutomirski 	 * enforcement appears to be consistent with the USER bit.
12445b0c2cacSDave Hansen 	 */
1245a15781b5SAndy Lutomirski 	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1246ec352711SAndy Lutomirski 		     !(error_code & X86_PF_USER) &&
1247ca247283SAndy Lutomirski 		     !(regs->flags & X86_EFLAGS_AC))) {
1248ca247283SAndy Lutomirski 		/*
1249ca247283SAndy Lutomirski 		 * No extable entry here.  This was a kernel access to an
1250ca247283SAndy Lutomirski 		 * invalid pointer.  get_kernel_nofault() will not get here.
1251ca247283SAndy Lutomirski 		 */
1252ca247283SAndy Lutomirski 		page_fault_oops(regs, error_code, address);
1253e00b12e6SPeter Zijlstra 		return;
1254e00b12e6SPeter Zijlstra 	}
1255e00b12e6SPeter Zijlstra 
1256e00b12e6SPeter Zijlstra 	/*
1257e00b12e6SPeter Zijlstra 	 * If we're in an interrupt, have no user context or are running
125870ffdb93SDavid Hildenbrand 	 * in a region with pagefaults disabled then we must not take the fault
1259e00b12e6SPeter Zijlstra 	 */
126070ffdb93SDavid Hildenbrand 	if (unlikely(faulthandler_disabled() || !mm)) {
1261ec352711SAndy Lutomirski 		bad_area_nosemaphore(regs, error_code, address);
1262e00b12e6SPeter Zijlstra 		return;
1263e00b12e6SPeter Zijlstra 	}
1264e00b12e6SPeter Zijlstra 
1265c61e211dSHarvey Harrison 	/*
1266891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1267891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1268891cffbdSLinus Torvalds 	 *
1269891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
12702d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1271c61e211dSHarvey Harrison 	 */
1272f39b6f0eSAndy Lutomirski 	if (user_mode(regs)) {
1273891cffbdSLinus Torvalds 		local_irq_enable();
1274759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
12752d4a7167SIngo Molnar 	} else {
12762d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1277c61e211dSHarvey Harrison 			local_irq_enable();
12782d4a7167SIngo Molnar 	}
1279c61e211dSHarvey Harrison 
1280a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
12817dd1fcc2SPeter Zijlstra 
1282fd5439e0SRick Edgecombe 	/*
1283fd5439e0SRick Edgecombe 	 * Read-only permissions can not be expressed in shadow stack PTEs.
1284fd5439e0SRick Edgecombe 	 * Treat all shadow stack accesses as WRITE faults. This ensures
1285fd5439e0SRick Edgecombe 	 * that the MM will prepare everything (e.g., break COW) such that
1286fd5439e0SRick Edgecombe 	 * maybe_mkwrite() can create a proper shadow stack PTE.
1287fd5439e0SRick Edgecombe 	 */
1288fd5439e0SRick Edgecombe 	if (error_code & X86_PF_SHSTK)
1289fd5439e0SRick Edgecombe 		flags |= FAULT_FLAG_WRITE;
1290ec352711SAndy Lutomirski 	if (error_code & X86_PF_WRITE)
1291759496baSJohannes Weiner 		flags |= FAULT_FLAG_WRITE;
1292ec352711SAndy Lutomirski 	if (error_code & X86_PF_INSTR)
1293d61172b4SDave Hansen 		flags |= FAULT_FLAG_INSTRUCTION;
1294759496baSJohannes Weiner 
12953ae0ad92SDave Hansen #ifdef CONFIG_X86_64
12963a1dfe6eSIngo Molnar 	/*
1297918ce325SAndy Lutomirski 	 * Faults in the vsyscall page might need emulation.  The
1298918ce325SAndy Lutomirski 	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1299918ce325SAndy Lutomirski 	 * considered to be part of the user address space.
1300c61e211dSHarvey Harrison 	 *
13013ae0ad92SDave Hansen 	 * The vsyscall page does not have a "real" VMA, so do this
13023ae0ad92SDave Hansen 	 * emulation before we go searching for VMAs.
1303e0a446ceSAndy Lutomirski 	 *
1304e0a446ceSAndy Lutomirski 	 * PKRU never rejects instruction fetches, so we don't need
1305e0a446ceSAndy Lutomirski 	 * to consider the PF_PK bit.
13063ae0ad92SDave Hansen 	 */
1307918ce325SAndy Lutomirski 	if (is_vsyscall_vaddr(address)) {
1308ec352711SAndy Lutomirski 		if (emulate_vsyscall(error_code, regs, address))
13093ae0ad92SDave Hansen 			return;
13103ae0ad92SDave Hansen 	}
13113ae0ad92SDave Hansen #endif
13123ae0ad92SDave Hansen 
13130bff0aaeSSuren Baghdasaryan 	if (!(flags & FAULT_FLAG_USER))
13140bff0aaeSSuren Baghdasaryan 		goto lock_mmap;
13150bff0aaeSSuren Baghdasaryan 
13160bff0aaeSSuren Baghdasaryan 	vma = lock_vma_under_rcu(mm, address);
13170bff0aaeSSuren Baghdasaryan 	if (!vma)
13180bff0aaeSSuren Baghdasaryan 		goto lock_mmap;
13190bff0aaeSSuren Baghdasaryan 
13200bff0aaeSSuren Baghdasaryan 	if (unlikely(access_error(error_code, vma))) {
13210bff0aaeSSuren Baghdasaryan 		vma_end_read(vma);
13220bff0aaeSSuren Baghdasaryan 		goto lock_mmap;
13230bff0aaeSSuren Baghdasaryan 	}
13240bff0aaeSSuren Baghdasaryan 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
13254089eef0SSuren Baghdasaryan 	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
13260bff0aaeSSuren Baghdasaryan 		vma_end_read(vma);
13270bff0aaeSSuren Baghdasaryan 
13280bff0aaeSSuren Baghdasaryan 	if (!(fault & VM_FAULT_RETRY)) {
13290bff0aaeSSuren Baghdasaryan 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
13300bff0aaeSSuren Baghdasaryan 		goto done;
13310bff0aaeSSuren Baghdasaryan 	}
13320bff0aaeSSuren Baghdasaryan 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
13330bff0aaeSSuren Baghdasaryan 
13340bff0aaeSSuren Baghdasaryan 	/* Quick path to respond to signals */
13350bff0aaeSSuren Baghdasaryan 	if (fault_signal_pending(fault, regs)) {
13360bff0aaeSSuren Baghdasaryan 		if (!user_mode(regs))
13370bff0aaeSSuren Baghdasaryan 			kernelmode_fixup_or_oops(regs, error_code, address,
13380bff0aaeSSuren Baghdasaryan 						 SIGBUS, BUS_ADRERR,
13390bff0aaeSSuren Baghdasaryan 						 ARCH_DEFAULT_PKEY);
13400bff0aaeSSuren Baghdasaryan 		return;
13410bff0aaeSSuren Baghdasaryan 	}
13420bff0aaeSSuren Baghdasaryan lock_mmap:
13430bff0aaeSSuren Baghdasaryan 
1344d065bd81SMichel Lespinasse retry:
1345c2508ec5SLinus Torvalds 	vma = lock_mm_and_find_vma(mm, address, regs);
134692181f19SNick Piggin 	if (unlikely(!vma)) {
1347c2508ec5SLinus Torvalds 		bad_area_nosemaphore(regs, error_code, address);
134892181f19SNick Piggin 		return;
134992181f19SNick Piggin 	}
135092181f19SNick Piggin 
1351c61e211dSHarvey Harrison 	/*
1352c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1353c61e211dSHarvey Harrison 	 * we can handle it..
1354c61e211dSHarvey Harrison 	 */
1355ec352711SAndy Lutomirski 	if (unlikely(access_error(error_code, vma))) {
1356ec352711SAndy Lutomirski 		bad_area_access_error(regs, error_code, address, vma);
135792181f19SNick Piggin 		return;
1358c61e211dSHarvey Harrison 	}
1359c61e211dSHarvey Harrison 
1360c61e211dSHarvey Harrison 	/*
1361c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1362c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
13639a95f3cfSPaul Cassella 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1364c1e8d7c6SMichel Lespinasse 	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1365cb0631fdSVlastimil Babka 	 *
1366c1e8d7c6SMichel Lespinasse 	 * Note that handle_userfault() may also release and reacquire mmap_lock
1367cb0631fdSVlastimil Babka 	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1368cb0631fdSVlastimil Babka 	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1369cb0631fdSVlastimil Babka 	 * (potentially after handling any pending signal during the return to
1370cb0631fdSVlastimil Babka 	 * userland). The return to userland is identified whenever
1371cb0631fdSVlastimil Babka 	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1372c61e211dSHarvey Harrison 	 */
1373968614fcSPeter Xu 	fault = handle_mm_fault(vma, address, flags, regs);
13742d4a7167SIngo Molnar 
137539678191SPeter Xu 	if (fault_signal_pending(fault, regs)) {
1376ef2544fbSAndy Lutomirski 		/*
1377ef2544fbSAndy Lutomirski 		 * Quick path to respond to signals.  The core mm code
1378ef2544fbSAndy Lutomirski 		 * has unlocked the mm for us if we get here.
1379ef2544fbSAndy Lutomirski 		 */
138039678191SPeter Xu 		if (!user_mode(regs))
13816456a2a6SAndy Lutomirski 			kernelmode_fixup_or_oops(regs, error_code, address,
1382d4ffd5dfSJiashuo Liang 						 SIGBUS, BUS_ADRERR,
1383d4ffd5dfSJiashuo Liang 						 ARCH_DEFAULT_PKEY);
138439678191SPeter Xu 		return;
138539678191SPeter Xu 	}
138639678191SPeter Xu 
1387d9272525SPeter Xu 	/* The fault is fully completed (including releasing mmap lock) */
1388d9272525SPeter Xu 	if (fault & VM_FAULT_COMPLETED)
1389d9272525SPeter Xu 		return;
1390d9272525SPeter Xu 
13913a13c4d7SJohannes Weiner 	/*
1392c1e8d7c6SMichel Lespinasse 	 * If we need to retry the mmap_lock has already been released,
139326178ec1SLinus Torvalds 	 * and if there is a fatal signal pending there is no guarantee
139426178ec1SLinus Torvalds 	 * that we made any progress. Handle this case first.
13953a13c4d7SJohannes Weiner 	 */
139636ef159fSQi Zheng 	if (unlikely(fault & VM_FAULT_RETRY)) {
139726178ec1SLinus Torvalds 		flags |= FAULT_FLAG_TRIED;
139826178ec1SLinus Torvalds 		goto retry;
139926178ec1SLinus Torvalds 	}
140026178ec1SLinus Torvalds 
1401d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
14020bff0aaeSSuren Baghdasaryan done:
1403ec352711SAndy Lutomirski 	if (likely(!(fault & VM_FAULT_ERROR)))
140437b23e05SKOSAKI Motohiro 		return;
1405ec352711SAndy Lutomirski 
140656e62cd2SAndy Lutomirski 	if (fatal_signal_pending(current) && !user_mode(regs)) {
1407d4ffd5dfSJiashuo Liang 		kernelmode_fixup_or_oops(regs, error_code, address,
1408d4ffd5dfSJiashuo Liang 					 0, 0, ARCH_DEFAULT_PKEY);
1409ec352711SAndy Lutomirski 		return;
1410ec352711SAndy Lutomirski 	}
1411ec352711SAndy Lutomirski 
1412ec352711SAndy Lutomirski 	if (fault & VM_FAULT_OOM) {
1413ec352711SAndy Lutomirski 		/* Kernel mode? Handle exceptions or die: */
141456e62cd2SAndy Lutomirski 		if (!user_mode(regs)) {
14156456a2a6SAndy Lutomirski 			kernelmode_fixup_or_oops(regs, error_code, address,
1416d4ffd5dfSJiashuo Liang 						 SIGSEGV, SEGV_MAPERR,
1417d4ffd5dfSJiashuo Liang 						 ARCH_DEFAULT_PKEY);
1418ec352711SAndy Lutomirski 			return;
1419ec352711SAndy Lutomirski 		}
1420ec352711SAndy Lutomirski 
1421ec352711SAndy Lutomirski 		/*
1422ec352711SAndy Lutomirski 		 * We ran out of memory, call the OOM killer, and return the
1423ec352711SAndy Lutomirski 		 * userspace (which will retry the fault, or kill us if we got
1424ec352711SAndy Lutomirski 		 * oom-killed):
1425ec352711SAndy Lutomirski 		 */
1426ec352711SAndy Lutomirski 		pagefault_out_of_memory();
1427ec352711SAndy Lutomirski 	} else {
1428ec352711SAndy Lutomirski 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1429ec352711SAndy Lutomirski 			     VM_FAULT_HWPOISON_LARGE))
1430ec352711SAndy Lutomirski 			do_sigbus(regs, error_code, address, fault);
1431ec352711SAndy Lutomirski 		else if (fault & VM_FAULT_SIGSEGV)
1432ec352711SAndy Lutomirski 			bad_area_nosemaphore(regs, error_code, address);
1433ec352711SAndy Lutomirski 		else
1434ec352711SAndy Lutomirski 			BUG();
143537b23e05SKOSAKI Motohiro 	}
1436c61e211dSHarvey Harrison }
1437aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault);
1438aa37c51bSDave Hansen 
1439a0d14b89SPeter Zijlstra static __always_inline void
trace_page_fault_entries(struct pt_regs * regs,unsigned long error_code,unsigned long address)1440a0d14b89SPeter Zijlstra trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1441a0d14b89SPeter Zijlstra 			 unsigned long address)
1442d34603b0SSeiji Aguchi {
1443a0d14b89SPeter Zijlstra 	if (!trace_pagefault_enabled())
1444a0d14b89SPeter Zijlstra 		return;
1445a0d14b89SPeter Zijlstra 
1446d34603b0SSeiji Aguchi 	if (user_mode(regs))
1447d4078e23SPeter Zijlstra 		trace_page_fault_user(address, regs, error_code);
1448d34603b0SSeiji Aguchi 	else
1449d4078e23SPeter Zijlstra 		trace_page_fault_kernel(address, regs, error_code);
1450d34603b0SSeiji Aguchi }
1451d34603b0SSeiji Aguchi 
145291eeafeaSThomas Gleixner static __always_inline void
handle_page_fault(struct pt_regs * regs,unsigned long error_code,unsigned long address)145391eeafeaSThomas Gleixner handle_page_fault(struct pt_regs *regs, unsigned long error_code,
1454ee6352b2SFrederic Weisbecker 			      unsigned long address)
145511a7ffb0SThomas Gleixner {
145691eeafeaSThomas Gleixner 	trace_page_fault_entries(regs, error_code, address);
145791eeafeaSThomas Gleixner 
145891eeafeaSThomas Gleixner 	if (unlikely(kmmio_fault(regs, address)))
145991eeafeaSThomas Gleixner 		return;
146091eeafeaSThomas Gleixner 
146191eeafeaSThomas Gleixner 	/* Was the fault on kernel-controlled part of the address space? */
146291eeafeaSThomas Gleixner 	if (unlikely(fault_in_kernel_space(address))) {
146391eeafeaSThomas Gleixner 		do_kern_addr_fault(regs, error_code, address);
146491eeafeaSThomas Gleixner 	} else {
146591eeafeaSThomas Gleixner 		do_user_addr_fault(regs, error_code, address);
146691eeafeaSThomas Gleixner 		/*
146791eeafeaSThomas Gleixner 		 * User address page fault handling might have reenabled
146891eeafeaSThomas Gleixner 		 * interrupts. Fixing up all potential exit points of
146991eeafeaSThomas Gleixner 		 * do_user_addr_fault() and its leaf functions is just not
147091eeafeaSThomas Gleixner 		 * doable w/o creating an unholy mess or turning the code
147191eeafeaSThomas Gleixner 		 * upside down.
147291eeafeaSThomas Gleixner 		 */
147391eeafeaSThomas Gleixner 		local_irq_disable();
147491eeafeaSThomas Gleixner 	}
147591eeafeaSThomas Gleixner }
147691eeafeaSThomas Gleixner 
DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)147791eeafeaSThomas Gleixner DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
147891eeafeaSThomas Gleixner {
147991eeafeaSThomas Gleixner 	unsigned long address = read_cr2();
1480a27a0a55SThomas Gleixner 	irqentry_state_t state;
148191eeafeaSThomas Gleixner 
1482da1c55f1SMichel Lespinasse 	prefetchw(&current->mm->mmap_lock);
148391eeafeaSThomas Gleixner 
1484ef68017eSAndy Lutomirski 	/*
148566af4f5cSVitaly Kuznetsov 	 * KVM uses #PF vector to deliver 'page not present' events to guests
148666af4f5cSVitaly Kuznetsov 	 * (asynchronous page fault mechanism). The event happens when a
148766af4f5cSVitaly Kuznetsov 	 * userspace task is trying to access some valid (from guest's point of
148866af4f5cSVitaly Kuznetsov 	 * view) memory which is not currently mapped by the host (e.g. the
148966af4f5cSVitaly Kuznetsov 	 * memory is swapped out). Note, the corresponding "page ready" event
1490163b0991SIngo Molnar 	 * which is injected when the memory becomes available, is delivered via
149166af4f5cSVitaly Kuznetsov 	 * an interrupt mechanism and not a #PF exception
149266af4f5cSVitaly Kuznetsov 	 * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
1493ef68017eSAndy Lutomirski 	 *
1494ef68017eSAndy Lutomirski 	 * We are relying on the interrupted context being sane (valid RSP,
1495ef68017eSAndy Lutomirski 	 * relevant locks not held, etc.), which is fine as long as the
1496ef68017eSAndy Lutomirski 	 * interrupted context had IF=1.  We are also relying on the KVM
1497ef68017eSAndy Lutomirski 	 * async pf type field and CR2 being read consistently instead of
1498ef68017eSAndy Lutomirski 	 * getting values from real and async page faults mixed up.
1499ef68017eSAndy Lutomirski 	 *
1500ef68017eSAndy Lutomirski 	 * Fingers crossed.
150191eeafeaSThomas Gleixner 	 *
150291eeafeaSThomas Gleixner 	 * The async #PF handling code takes care of idtentry handling
150391eeafeaSThomas Gleixner 	 * itself.
1504ef68017eSAndy Lutomirski 	 */
1505ef68017eSAndy Lutomirski 	if (kvm_handle_async_pf(regs, (u32)address))
1506ef68017eSAndy Lutomirski 		return;
1507ef68017eSAndy Lutomirski 
1508ca4c6a98SThomas Gleixner 	/*
150991eeafeaSThomas Gleixner 	 * Entry handling for valid #PF from kernel mode is slightly
15106f0e6c15SFrederic Weisbecker 	 * different: RCU is already watching and ct_irq_enter() must not
151191eeafeaSThomas Gleixner 	 * be invoked because a kernel fault on a user space address might
151291eeafeaSThomas Gleixner 	 * sleep.
151391eeafeaSThomas Gleixner 	 *
151491eeafeaSThomas Gleixner 	 * In case the fault hit a RCU idle region the conditional entry
151591eeafeaSThomas Gleixner 	 * code reenabled RCU to avoid subsequent wreckage which helps
1516d9f6e12fSIngo Molnar 	 * debuggability.
1517ca4c6a98SThomas Gleixner 	 */
1518a27a0a55SThomas Gleixner 	state = irqentry_enter(regs);
151991eeafeaSThomas Gleixner 
152091eeafeaSThomas Gleixner 	instrumentation_begin();
152191eeafeaSThomas Gleixner 	handle_page_fault(regs, error_code, address);
152291eeafeaSThomas Gleixner 	instrumentation_end();
152391eeafeaSThomas Gleixner 
1524a27a0a55SThomas Gleixner 	irqentry_exit(regs, state);
1525ca4c6a98SThomas Gleixner }
1526