xref: /openbmc/linux/arch/x86/mm/fault.c (revision c2508ec5a58db67093f4fb8bf89a9a7c53a109e9)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c61e211dSHarvey Harrison /*
3c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
4c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
6c61e211dSHarvey Harrison  */
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
868db0cf1SIngo Molnar #include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
104cdf8dbeSLinus Torvalds #include <linux/extable.h>		/* search_exception_tables	*/
1157c8a661SMike Rapoport #include <linux/memblock.h>		/* max_low_pfn			*/
121dc0da6eSAlexander Potapenko #include <linux/kfence.h>		/* kfence_handle_page_fault	*/
139326638cSMasami Hiramatsu #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
14a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
15cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
16f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
17268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
1856dd9470SFrederic Weisbecker #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
1970ffdb93SDavid Hildenbrand #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
20c46f5223SAndy Lutomirski #include <linux/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
2150a7ca3cSSouptick Joarder #include <linux/mm_types.h>
220bff0aaeSSuren Baghdasaryan #include <linux/mm.h>			/* find_and_lock_vma() */
23c61e211dSHarvey Harrison 
24019132ffSDave Hansen #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
25a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
26f40c3300SAndy Lutomirski #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
27f40c3300SAndy Lutomirski #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
28ba3e127eSBrian Gerst #include <asm/vm86.h>			/* struct vm86			*/
29019132ffSDave Hansen #include <asm/mmu_context.h>		/* vma_pkey()			*/
30c46f5223SAndy Lutomirski #include <asm/efi.h>			/* efi_crash_gracefully_on_page_fault()*/
31a1a371c4SAndy Lutomirski #include <asm/desc.h>			/* store_idt(), ...		*/
32d876b673SThomas Gleixner #include <asm/cpu_entry_area.h>		/* exception stack		*/
33186525bdSIngo Molnar #include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
34ef68017eSAndy Lutomirski #include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
35334872a0SSean Christopherson #include <asm/vdso.h>			/* fixup_vdso_exception()	*/
3644b979faSPeter Zijlstra #include <asm/irq_stack.h>
37c61e211dSHarvey Harrison 
38d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS
39d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h>
40d34603b0SSeiji Aguchi 
41c61e211dSHarvey Harrison /*
42b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
43b319eed0SIngo Molnar  * handled by mmiotrace:
44b814d41fSIngo Molnar  */
459326638cSMasami Hiramatsu static nokprobe_inline int
4662c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
4786069782SPekka Paalanen {
480fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
490fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
500fd0e3daSPekka Paalanen 			return -1;
510fd0e3daSPekka Paalanen 	return 0;
5286069782SPekka Paalanen }
5386069782SPekka Paalanen 
54c61e211dSHarvey Harrison /*
552d4a7167SIngo Molnar  * Prefetch quirks:
562d4a7167SIngo Molnar  *
572d4a7167SIngo Molnar  * 32-bit mode:
582d4a7167SIngo Molnar  *
59c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
6035f1c89bSAndy Lutomirski  *   Check that here and ignore it.  This is AMD erratum #91.
61c61e211dSHarvey Harrison  *
622d4a7167SIngo Molnar  * 64-bit mode:
632d4a7167SIngo Molnar  *
64c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
65c61e211dSHarvey Harrison  *   Check that here and ignore it.
66c61e211dSHarvey Harrison  *
672d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
68c61e211dSHarvey Harrison  */
69107a0367SIngo Molnar static inline int
70107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
71107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
72c61e211dSHarvey Harrison {
73107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
74107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
75c61e211dSHarvey Harrison 
76c61e211dSHarvey Harrison 	switch (instr_hi) {
77c61e211dSHarvey Harrison 	case 0x20:
78c61e211dSHarvey Harrison 	case 0x30:
79c61e211dSHarvey Harrison 		/*
80c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
81c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
82c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
83c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
84c61e211dSHarvey Harrison 		 */
85107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
86c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
87c61e211dSHarvey Harrison 	case 0x40:
88c61e211dSHarvey Harrison 		/*
8935f1c89bSAndy Lutomirski 		 * In 64-bit mode 0x40..0x4F are valid REX prefixes
90c61e211dSHarvey Harrison 		 */
91318f5a2aSAndy Lutomirski 		return (!user_mode(regs) || user_64bit_mode(regs));
92c61e211dSHarvey Harrison #endif
93c61e211dSHarvey Harrison 	case 0x60:
94c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
95107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
96c61e211dSHarvey Harrison 	case 0xF0:
97c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
98107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
99c61e211dSHarvey Harrison 	case 0x00:
100c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
10125f12ae4SChristoph Hellwig 		if (get_kernel_nofault(opcode, instr))
102107a0367SIngo Molnar 			return 0;
103107a0367SIngo Molnar 
104107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
105107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
106107a0367SIngo Molnar 		return 0;
107107a0367SIngo Molnar 	default:
108107a0367SIngo Molnar 		return 0;
109107a0367SIngo Molnar 	}
110107a0367SIngo Molnar }
111107a0367SIngo Molnar 
112d24df8ecSAndy Lutomirski static bool is_amd_k8_pre_npt(void)
113d24df8ecSAndy Lutomirski {
114d24df8ecSAndy Lutomirski 	struct cpuinfo_x86 *c = &boot_cpu_data;
115d24df8ecSAndy Lutomirski 
116d24df8ecSAndy Lutomirski 	return unlikely(IS_ENABLED(CONFIG_CPU_SUP_AMD) &&
117d24df8ecSAndy Lutomirski 			c->x86_vendor == X86_VENDOR_AMD &&
118d24df8ecSAndy Lutomirski 			c->x86 == 0xf && c->x86_model < 0x40);
119d24df8ecSAndy Lutomirski }
120d24df8ecSAndy Lutomirski 
121107a0367SIngo Molnar static int
122107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
123107a0367SIngo Molnar {
124107a0367SIngo Molnar 	unsigned char *max_instr;
125107a0367SIngo Molnar 	unsigned char *instr;
126107a0367SIngo Molnar 	int prefetch = 0;
127107a0367SIngo Molnar 
128d24df8ecSAndy Lutomirski 	/* Erratum #91 affects AMD K8, pre-NPT CPUs */
129d24df8ecSAndy Lutomirski 	if (!is_amd_k8_pre_npt())
130d24df8ecSAndy Lutomirski 		return 0;
131d24df8ecSAndy Lutomirski 
132107a0367SIngo Molnar 	/*
133107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
134107a0367SIngo Molnar 	 * do not ignore the fault:
135107a0367SIngo Molnar 	 */
1361067f030SRicardo Neri 	if (error_code & X86_PF_INSTR)
137107a0367SIngo Molnar 		return 0;
138107a0367SIngo Molnar 
139107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
140107a0367SIngo Molnar 	max_instr = instr + 15;
141107a0367SIngo Molnar 
14235f1c89bSAndy Lutomirski 	/*
14335f1c89bSAndy Lutomirski 	 * This code has historically always bailed out if IP points to a
14435f1c89bSAndy Lutomirski 	 * not-present page (e.g. due to a race).  No one has ever
14535f1c89bSAndy Lutomirski 	 * complained about this.
14635f1c89bSAndy Lutomirski 	 */
14735f1c89bSAndy Lutomirski 	pagefault_disable();
148107a0367SIngo Molnar 
149107a0367SIngo Molnar 	while (instr < max_instr) {
150107a0367SIngo Molnar 		unsigned char opcode;
151c61e211dSHarvey Harrison 
15235f1c89bSAndy Lutomirski 		if (user_mode(regs)) {
153944fad45SLukas Bulwahn 			if (get_user(opcode, (unsigned char __user *) instr))
15435f1c89bSAndy Lutomirski 				break;
15535f1c89bSAndy Lutomirski 		} else {
15625f12ae4SChristoph Hellwig 			if (get_kernel_nofault(opcode, instr))
157c61e211dSHarvey Harrison 				break;
15835f1c89bSAndy Lutomirski 		}
159107a0367SIngo Molnar 
160107a0367SIngo Molnar 		instr++;
161107a0367SIngo Molnar 
162107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
163c61e211dSHarvey Harrison 			break;
164c61e211dSHarvey Harrison 	}
16535f1c89bSAndy Lutomirski 
16635f1c89bSAndy Lutomirski 	pagefault_enable();
167c61e211dSHarvey Harrison 	return prefetch;
168c61e211dSHarvey Harrison }
169c61e211dSHarvey Harrison 
170f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
171f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1722d4a7167SIngo Molnar 
173f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
174f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
175f2f13a85SIngo Molnar {
176f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
177f2f13a85SIngo Molnar 	pgd_t *pgd_k;
178e0c4f675SKirill A. Shutemov 	p4d_t *p4d, *p4d_k;
179f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
180f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
181f2f13a85SIngo Molnar 
182f2f13a85SIngo Molnar 	pgd += index;
183f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
184f2f13a85SIngo Molnar 
185f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
186f2f13a85SIngo Molnar 		return NULL;
187f2f13a85SIngo Molnar 
188f2f13a85SIngo Molnar 	/*
189f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
190f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
191e0c4f675SKirill A. Shutemov 	 * set_p4d/set_pud.
192f2f13a85SIngo Molnar 	 */
193e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
194e0c4f675SKirill A. Shutemov 	p4d_k = p4d_offset(pgd_k, address);
195e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d_k))
196e0c4f675SKirill A. Shutemov 		return NULL;
197e0c4f675SKirill A. Shutemov 
198e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
199e0c4f675SKirill A. Shutemov 	pud_k = pud_offset(p4d_k, address);
200f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
201f2f13a85SIngo Molnar 		return NULL;
202f2f13a85SIngo Molnar 
203f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
204f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
2058e998fc2SJoerg Roedel 
2068e998fc2SJoerg Roedel 	if (pmd_present(*pmd) != pmd_present(*pmd_k))
2078e998fc2SJoerg Roedel 		set_pmd(pmd, *pmd_k);
2088e998fc2SJoerg Roedel 
209f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
210f2f13a85SIngo Molnar 		return NULL;
211b8bcfe99SJeremy Fitzhardinge 	else
21251b75b5bSJoerg Roedel 		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
213f2f13a85SIngo Molnar 
214f2f13a85SIngo Molnar 	return pmd_k;
215f2f13a85SIngo Molnar }
216f2f13a85SIngo Molnar 
2174819e15fSJoerg Roedel /*
2184819e15fSJoerg Roedel  *   Handle a fault on the vmalloc or module mapping area
2194819e15fSJoerg Roedel  *
2204819e15fSJoerg Roedel  *   This is needed because there is a race condition between the time
2214819e15fSJoerg Roedel  *   when the vmalloc mapping code updates the PMD to the point in time
2224819e15fSJoerg Roedel  *   where it synchronizes this update with the other page-tables in the
2234819e15fSJoerg Roedel  *   system.
2244819e15fSJoerg Roedel  *
2254819e15fSJoerg Roedel  *   In this race window another thread/CPU can map an area on the same
2264819e15fSJoerg Roedel  *   PMD, finds it already present and does not synchronize it with the
2274819e15fSJoerg Roedel  *   rest of the system yet. As a result v[mz]alloc might return areas
2284819e15fSJoerg Roedel  *   which are not mapped in every page-table in the system, causing an
2294819e15fSJoerg Roedel  *   unhandled page-fault when they are accessed.
2304819e15fSJoerg Roedel  */
2314819e15fSJoerg Roedel static noinline int vmalloc_fault(unsigned long address)
2324819e15fSJoerg Roedel {
2334819e15fSJoerg Roedel 	unsigned long pgd_paddr;
2344819e15fSJoerg Roedel 	pmd_t *pmd_k;
2354819e15fSJoerg Roedel 	pte_t *pte_k;
2364819e15fSJoerg Roedel 
2374819e15fSJoerg Roedel 	/* Make sure we are in vmalloc area: */
2384819e15fSJoerg Roedel 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
2394819e15fSJoerg Roedel 		return -1;
2404819e15fSJoerg Roedel 
2414819e15fSJoerg Roedel 	/*
2424819e15fSJoerg Roedel 	 * Synchronize this task's top level page-table
2434819e15fSJoerg Roedel 	 * with the 'reference' page table.
2444819e15fSJoerg Roedel 	 *
2454819e15fSJoerg Roedel 	 * Do _not_ use "current" here. We might be inside
2464819e15fSJoerg Roedel 	 * an interrupt in the middle of a task switch..
2474819e15fSJoerg Roedel 	 */
2484819e15fSJoerg Roedel 	pgd_paddr = read_cr3_pa();
2494819e15fSJoerg Roedel 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
2504819e15fSJoerg Roedel 	if (!pmd_k)
2514819e15fSJoerg Roedel 		return -1;
2524819e15fSJoerg Roedel 
2534819e15fSJoerg Roedel 	if (pmd_large(*pmd_k))
2544819e15fSJoerg Roedel 		return 0;
2554819e15fSJoerg Roedel 
2564819e15fSJoerg Roedel 	pte_k = pte_offset_kernel(pmd_k, address);
2574819e15fSJoerg Roedel 	if (!pte_present(*pte_k))
2584819e15fSJoerg Roedel 		return -1;
2594819e15fSJoerg Roedel 
2604819e15fSJoerg Roedel 	return 0;
2614819e15fSJoerg Roedel }
2624819e15fSJoerg Roedel NOKPROBE_SYMBOL(vmalloc_fault);
2634819e15fSJoerg Roedel 
2641e15d374SAlexander Potapenko void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
265f2f13a85SIngo Molnar {
26686cf69f1SJoerg Roedel 	unsigned long addr;
267f2f13a85SIngo Molnar 
26886cf69f1SJoerg Roedel 	for (addr = start & PMD_MASK;
26986cf69f1SJoerg Roedel 	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
27086cf69f1SJoerg Roedel 	     addr += PMD_SIZE) {
271f2f13a85SIngo Molnar 		struct page *page;
272f2f13a85SIngo Molnar 
273a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
274f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
275617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
276617d34d9SJeremy Fitzhardinge 
277a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
278617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
279617d34d9SJeremy Fitzhardinge 
280617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
28186cf69f1SJoerg Roedel 			vmalloc_sync_one(page_address(page), addr);
282617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
283f2f13a85SIngo Molnar 		}
284a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
285f2f13a85SIngo Molnar 	}
286f2f13a85SIngo Molnar }
287f2f13a85SIngo Molnar 
288087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
289087975b0SAkinobu Mita {
290087975b0SAkinobu Mita 	return pfn < max_low_pfn;
291087975b0SAkinobu Mita }
292087975b0SAkinobu Mita 
293cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
294c61e211dSHarvey Harrison {
2956c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
296087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
297e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
298e0c4f675SKirill A. Shutemov 	pud_t *pud;
299087975b0SAkinobu Mita 	pmd_t *pmd;
300087975b0SAkinobu Mita 	pte_t *pte;
3012d4a7167SIngo Molnar 
302c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
30339e48d9bSJan Beulich 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
304087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
305087975b0SAkinobu Mita 		goto out;
30639e48d9bSJan Beulich #define pr_pde pr_cont
30739e48d9bSJan Beulich #else
30839e48d9bSJan Beulich #define pr_pde pr_info
309c61e211dSHarvey Harrison #endif
310e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
311e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
312e0c4f675SKirill A. Shutemov 	pmd = pmd_offset(pud, address);
31339e48d9bSJan Beulich 	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
31439e48d9bSJan Beulich #undef pr_pde
315c61e211dSHarvey Harrison 
316c61e211dSHarvey Harrison 	/*
317c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
318c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
319c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3202d4a7167SIngo Molnar 	 * it's allocated already:
321c61e211dSHarvey Harrison 	 */
322087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
323087975b0SAkinobu Mita 		goto out;
3242d4a7167SIngo Molnar 
325087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
32639e48d9bSJan Beulich 	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
327087975b0SAkinobu Mita out:
32839e48d9bSJan Beulich 	pr_cont("\n");
329f2f13a85SIngo Molnar }
330f2f13a85SIngo Molnar 
331f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
332f2f13a85SIngo Molnar 
333e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD
334f2f13a85SIngo Molnar static const char errata93_warning[] =
335ad361c98SJoe Perches KERN_ERR
336ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
337ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
338ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
339ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
340e05139f2SJan Beulich #endif
341f2f13a85SIngo Molnar 
342f2f13a85SIngo Molnar static int bad_address(void *p)
343f2f13a85SIngo Molnar {
344f2f13a85SIngo Molnar 	unsigned long dummy;
345f2f13a85SIngo Molnar 
34625f12ae4SChristoph Hellwig 	return get_kernel_nofault(dummy, (unsigned long *)p);
347f2f13a85SIngo Molnar }
348f2f13a85SIngo Molnar 
349f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
350f2f13a85SIngo Molnar {
3516c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
352087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
353e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
354c61e211dSHarvey Harrison 	pud_t *pud;
355c61e211dSHarvey Harrison 	pmd_t *pmd;
356c61e211dSHarvey Harrison 	pte_t *pte;
357c61e211dSHarvey Harrison 
3582d4a7167SIngo Molnar 	if (bad_address(pgd))
3592d4a7167SIngo Molnar 		goto bad;
3602d4a7167SIngo Molnar 
36139e48d9bSJan Beulich 	pr_info("PGD %lx ", pgd_val(*pgd));
3622d4a7167SIngo Molnar 
3632d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
3642d4a7167SIngo Molnar 		goto out;
365c61e211dSHarvey Harrison 
366e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
367e0c4f675SKirill A. Shutemov 	if (bad_address(p4d))
368e0c4f675SKirill A. Shutemov 		goto bad;
369e0c4f675SKirill A. Shutemov 
37039e48d9bSJan Beulich 	pr_cont("P4D %lx ", p4d_val(*p4d));
371e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d) || p4d_large(*p4d))
372e0c4f675SKirill A. Shutemov 		goto out;
373e0c4f675SKirill A. Shutemov 
374e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
3752d4a7167SIngo Molnar 	if (bad_address(pud))
3762d4a7167SIngo Molnar 		goto bad;
3772d4a7167SIngo Molnar 
37839e48d9bSJan Beulich 	pr_cont("PUD %lx ", pud_val(*pud));
379b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
3802d4a7167SIngo Molnar 		goto out;
381c61e211dSHarvey Harrison 
382c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
3832d4a7167SIngo Molnar 	if (bad_address(pmd))
3842d4a7167SIngo Molnar 		goto bad;
3852d4a7167SIngo Molnar 
38639e48d9bSJan Beulich 	pr_cont("PMD %lx ", pmd_val(*pmd));
3872d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
3882d4a7167SIngo Molnar 		goto out;
389c61e211dSHarvey Harrison 
390c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
3912d4a7167SIngo Molnar 	if (bad_address(pte))
3922d4a7167SIngo Molnar 		goto bad;
3932d4a7167SIngo Molnar 
39439e48d9bSJan Beulich 	pr_cont("PTE %lx", pte_val(*pte));
3952d4a7167SIngo Molnar out:
39639e48d9bSJan Beulich 	pr_cont("\n");
397c61e211dSHarvey Harrison 	return;
398c61e211dSHarvey Harrison bad:
39939e48d9bSJan Beulich 	pr_info("BAD\n");
400c61e211dSHarvey Harrison }
401c61e211dSHarvey Harrison 
402f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
403c61e211dSHarvey Harrison 
4042d4a7167SIngo Molnar /*
4052d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
4062d4a7167SIngo Molnar  *
4072d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
4082d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
4092d4a7167SIngo Molnar  *
4102d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
4112d4a7167SIngo Molnar  *
4122d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
4132d4a7167SIngo Molnar  * Try to work around it here.
4142d4a7167SIngo Molnar  *
4152d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
4162d4a7167SIngo Molnar  * Does nothing on 32-bit.
417c61e211dSHarvey Harrison  */
418c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
419c61e211dSHarvey Harrison {
420e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
421e05139f2SJan Beulich 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
422e05139f2SJan Beulich 	    || boot_cpu_data.x86 != 0xf)
423e05139f2SJan Beulich 		return 0;
424e05139f2SJan Beulich 
42503c81ea3SAndy Lutomirski 	if (user_mode(regs))
42603c81ea3SAndy Lutomirski 		return 0;
42703c81ea3SAndy Lutomirski 
428c61e211dSHarvey Harrison 	if (address != regs->ip)
429c61e211dSHarvey Harrison 		return 0;
4302d4a7167SIngo Molnar 
431c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
432c61e211dSHarvey Harrison 		return 0;
4332d4a7167SIngo Molnar 
434c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
435c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
436c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
437a454ab31SIngo Molnar 		printk_once(errata93_warning);
438c61e211dSHarvey Harrison 		regs->ip = address;
439c61e211dSHarvey Harrison 		return 1;
440c61e211dSHarvey Harrison 	}
441c61e211dSHarvey Harrison #endif
442c61e211dSHarvey Harrison 	return 0;
443c61e211dSHarvey Harrison }
444c61e211dSHarvey Harrison 
445c61e211dSHarvey Harrison /*
4462d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
4472d4a7167SIngo Molnar  * to illegal addresses >4GB.
4482d4a7167SIngo Molnar  *
4492d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
4502d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
451c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
452c61e211dSHarvey Harrison  */
453c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
454c61e211dSHarvey Harrison {
455c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
4562d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
457c61e211dSHarvey Harrison 		return 1;
458c61e211dSHarvey Harrison #endif
459c61e211dSHarvey Harrison 	return 0;
460c61e211dSHarvey Harrison }
461c61e211dSHarvey Harrison 
4623e77abdaSThomas Gleixner /* Pentium F0 0F C7 C8 bug workaround: */
463f42a40fdSAndy Lutomirski static int is_f00f_bug(struct pt_regs *regs, unsigned long error_code,
464f42a40fdSAndy Lutomirski 		       unsigned long address)
465c61e211dSHarvey Harrison {
466c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
467f42a40fdSAndy Lutomirski 	if (boot_cpu_has_bug(X86_BUG_F00F) && !(error_code & X86_PF_USER) &&
468f42a40fdSAndy Lutomirski 	    idt_is_f00f_address(address)) {
46949893c5cSThomas Gleixner 		handle_invalid_op(regs);
470c61e211dSHarvey Harrison 		return 1;
471c61e211dSHarvey Harrison 	}
472c61e211dSHarvey Harrison #endif
473c61e211dSHarvey Harrison 	return 0;
474c61e211dSHarvey Harrison }
475c61e211dSHarvey Harrison 
476a1a371c4SAndy Lutomirski static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
477a1a371c4SAndy Lutomirski {
478a1a371c4SAndy Lutomirski 	u32 offset = (index >> 3) * sizeof(struct desc_struct);
479a1a371c4SAndy Lutomirski 	unsigned long addr;
480a1a371c4SAndy Lutomirski 	struct ldttss_desc desc;
481a1a371c4SAndy Lutomirski 
482a1a371c4SAndy Lutomirski 	if (index == 0) {
483a1a371c4SAndy Lutomirski 		pr_alert("%s: NULL\n", name);
484a1a371c4SAndy Lutomirski 		return;
485a1a371c4SAndy Lutomirski 	}
486a1a371c4SAndy Lutomirski 
487a1a371c4SAndy Lutomirski 	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
488a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
489a1a371c4SAndy Lutomirski 		return;
490a1a371c4SAndy Lutomirski 	}
491a1a371c4SAndy Lutomirski 
492fe557319SChristoph Hellwig 	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
493a1a371c4SAndy Lutomirski 			      sizeof(struct ldttss_desc))) {
494a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
495a1a371c4SAndy Lutomirski 			 name, index);
496a1a371c4SAndy Lutomirski 		return;
497a1a371c4SAndy Lutomirski 	}
498a1a371c4SAndy Lutomirski 
4995ccd3528SColin Ian King 	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
500a1a371c4SAndy Lutomirski #ifdef CONFIG_X86_64
501a1a371c4SAndy Lutomirski 	addr |= ((u64)desc.base3 << 32);
502a1a371c4SAndy Lutomirski #endif
503a1a371c4SAndy Lutomirski 	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
504a1a371c4SAndy Lutomirski 		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
505a1a371c4SAndy Lutomirski }
506a1a371c4SAndy Lutomirski 
5072d4a7167SIngo Molnar static void
508a2aa52abSIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
509c61e211dSHarvey Harrison {
510c61e211dSHarvey Harrison 	if (!oops_may_print())
511c61e211dSHarvey Harrison 		return;
512c61e211dSHarvey Harrison 
5131067f030SRicardo Neri 	if (error_code & X86_PF_INSTR) {
51493809be8SHarvey Harrison 		unsigned int level;
515426e34ccSMatt Fleming 		pgd_t *pgd;
516426e34ccSMatt Fleming 		pte_t *pte;
5172d4a7167SIngo Molnar 
5186c690ee1SAndy Lutomirski 		pgd = __va(read_cr3_pa());
519426e34ccSMatt Fleming 		pgd += pgd_index(address);
520426e34ccSMatt Fleming 
521426e34ccSMatt Fleming 		pte = lookup_address_in_pgd(pgd, address, &level);
522c61e211dSHarvey Harrison 
5238f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
524d79d0d8aSDmitry Vyukov 			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
525d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
526eff50c34SJiri Kosina 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
527eff50c34SJiri Kosina 				(pgd_flags(*pgd) & _PAGE_USER) &&
5281e02ce4cSAndy Lutomirski 				(__read_cr4() & X86_CR4_SMEP))
529d79d0d8aSDmitry Vyukov 			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
530d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
531c61e211dSHarvey Harrison 	}
532fd40d6e3SHarvey Harrison 
533f28b11a2SSean Christopherson 	if (address < PAGE_SIZE && !user_mode(regs))
534ea2f8d60SBorislav Petkov 		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
535f28b11a2SSean Christopherson 			(void *)address);
536f28b11a2SSean Christopherson 	else
537ea2f8d60SBorislav Petkov 		pr_alert("BUG: unable to handle page fault for address: %px\n",
5384188f063SDmitry Vyukov 			(void *)address);
5392d4a7167SIngo Molnar 
540ea2f8d60SBorislav Petkov 	pr_alert("#PF: %s %s in %s mode\n",
54118ea35c5SSean Christopherson 		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
54218ea35c5SSean Christopherson 		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
54318ea35c5SSean Christopherson 		 (error_code & X86_PF_WRITE) ? "write access" :
54418ea35c5SSean Christopherson 					       "read access",
54518ea35c5SSean Christopherson 			     user_mode(regs) ? "user" : "kernel");
54618ea35c5SSean Christopherson 	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
54718ea35c5SSean Christopherson 		 !(error_code & X86_PF_PROT) ? "not-present page" :
54818ea35c5SSean Christopherson 		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
54918ea35c5SSean Christopherson 		 (error_code & X86_PF_PK)    ? "protection keys violation" :
55018ea35c5SSean Christopherson 					       "permissions violation");
551a2aa52abSIngo Molnar 
552a1a371c4SAndy Lutomirski 	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
553a1a371c4SAndy Lutomirski 		struct desc_ptr idt, gdt;
554a1a371c4SAndy Lutomirski 		u16 ldtr, tr;
555a1a371c4SAndy Lutomirski 
556a1a371c4SAndy Lutomirski 		/*
557a1a371c4SAndy Lutomirski 		 * This can happen for quite a few reasons.  The more obvious
558a1a371c4SAndy Lutomirski 		 * ones are faults accessing the GDT, or LDT.  Perhaps
559a1a371c4SAndy Lutomirski 		 * surprisingly, if the CPU tries to deliver a benign or
560a1a371c4SAndy Lutomirski 		 * contributory exception from user code and gets a page fault
561a1a371c4SAndy Lutomirski 		 * during delivery, the page fault can be delivered as though
562a1a371c4SAndy Lutomirski 		 * it originated directly from user code.  This could happen
563a1a371c4SAndy Lutomirski 		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
564a1a371c4SAndy Lutomirski 		 * kernel or IST stack.
565a1a371c4SAndy Lutomirski 		 */
566a1a371c4SAndy Lutomirski 		store_idt(&idt);
567a1a371c4SAndy Lutomirski 
568a1a371c4SAndy Lutomirski 		/* Usable even on Xen PV -- it's just slow. */
569a1a371c4SAndy Lutomirski 		native_store_gdt(&gdt);
570a1a371c4SAndy Lutomirski 
571a1a371c4SAndy Lutomirski 		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
572a1a371c4SAndy Lutomirski 			 idt.address, idt.size, gdt.address, gdt.size);
573a1a371c4SAndy Lutomirski 
574a1a371c4SAndy Lutomirski 		store_ldt(ldtr);
575a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "LDTR", ldtr);
576a1a371c4SAndy Lutomirski 
577a1a371c4SAndy Lutomirski 		store_tr(tr);
578a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "TR", tr);
579a1a371c4SAndy Lutomirski 	}
580a1a371c4SAndy Lutomirski 
581c61e211dSHarvey Harrison 	dump_pagetable(address);
582c61e211dSHarvey Harrison }
583c61e211dSHarvey Harrison 
5842d4a7167SIngo Molnar static noinline void
5852d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
5862d4a7167SIngo Molnar 	    unsigned long address)
587c61e211dSHarvey Harrison {
5882d4a7167SIngo Molnar 	struct task_struct *tsk;
5892d4a7167SIngo Molnar 	unsigned long flags;
5902d4a7167SIngo Molnar 	int sig;
5912d4a7167SIngo Molnar 
5922d4a7167SIngo Molnar 	flags = oops_begin();
5932d4a7167SIngo Molnar 	tsk = current;
5942d4a7167SIngo Molnar 	sig = SIGKILL;
595c61e211dSHarvey Harrison 
596c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
59792181f19SNick Piggin 	       tsk->comm, address);
598c61e211dSHarvey Harrison 	dump_pagetable(address);
5992d4a7167SIngo Molnar 
600c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
601874d93d1SAlexander van Heukelum 		sig = 0;
6022d4a7167SIngo Molnar 
603874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
604c61e211dSHarvey Harrison }
605c61e211dSHarvey Harrison 
606cd072dabSSean Christopherson static void sanitize_error_code(unsigned long address,
607cd072dabSSean Christopherson 				unsigned long *error_code)
608e49d3cbeSAndy Lutomirski {
609e49d3cbeSAndy Lutomirski 	/*
610e49d3cbeSAndy Lutomirski 	 * To avoid leaking information about the kernel page
611e49d3cbeSAndy Lutomirski 	 * table layout, pretend that user-mode accesses to
612e49d3cbeSAndy Lutomirski 	 * kernel addresses are always protection faults.
613e0a446ceSAndy Lutomirski 	 *
614e0a446ceSAndy Lutomirski 	 * NB: This means that failed vsyscalls with vsyscall=none
615e0a446ceSAndy Lutomirski 	 * will have the PROT bit.  This doesn't leak any
616e0a446ceSAndy Lutomirski 	 * information and does not appear to cause any problems.
617e49d3cbeSAndy Lutomirski 	 */
618e49d3cbeSAndy Lutomirski 	if (address >= TASK_SIZE_MAX)
619cd072dabSSean Christopherson 		*error_code |= X86_PF_PROT;
620cd072dabSSean Christopherson }
621cd072dabSSean Christopherson 
622cd072dabSSean Christopherson static void set_signal_archinfo(unsigned long address,
623cd072dabSSean Christopherson 				unsigned long error_code)
624cd072dabSSean Christopherson {
625cd072dabSSean Christopherson 	struct task_struct *tsk = current;
626e49d3cbeSAndy Lutomirski 
627e49d3cbeSAndy Lutomirski 	tsk->thread.trap_nr = X86_TRAP_PF;
628e49d3cbeSAndy Lutomirski 	tsk->thread.error_code = error_code | X86_PF_USER;
629e49d3cbeSAndy Lutomirski 	tsk->thread.cr2 = address;
630e49d3cbeSAndy Lutomirski }
631e49d3cbeSAndy Lutomirski 
6322d4a7167SIngo Molnar static noinline void
6332cc624b0SAndy Lutomirski page_fault_oops(struct pt_regs *regs, unsigned long error_code,
6342cc624b0SAndy Lutomirski 		unsigned long address)
63592181f19SNick Piggin {
63644b979faSPeter Zijlstra #ifdef CONFIG_VMAP_STACK
63744b979faSPeter Zijlstra 	struct stack_info info;
63844b979faSPeter Zijlstra #endif
63992181f19SNick Piggin 	unsigned long flags;
64092181f19SNick Piggin 	int sig;
64192181f19SNick Piggin 
642ebb53e25SAndy Lutomirski 	if (user_mode(regs)) {
643ebb53e25SAndy Lutomirski 		/*
6442cc624b0SAndy Lutomirski 		 * Implicit kernel access from user mode?  Skip the stack
6452cc624b0SAndy Lutomirski 		 * overflow and EFI special cases.
6462cc624b0SAndy Lutomirski 		 */
6472cc624b0SAndy Lutomirski 		goto oops;
6482cc624b0SAndy Lutomirski 	}
6492cc624b0SAndy Lutomirski 
6502cc624b0SAndy Lutomirski #ifdef CONFIG_VMAP_STACK
6512cc624b0SAndy Lutomirski 	/*
6522cc624b0SAndy Lutomirski 	 * Stack overflow?  During boot, we can fault near the initial
6532cc624b0SAndy Lutomirski 	 * stack in the direct map, but that's not an overflow -- check
6542cc624b0SAndy Lutomirski 	 * that we're in vmalloc space to avoid this.
6552cc624b0SAndy Lutomirski 	 */
6562cc624b0SAndy Lutomirski 	if (is_vmalloc_addr((void *)address) &&
65744b979faSPeter Zijlstra 	    get_stack_guard_info((void *)address, &info)) {
6582cc624b0SAndy Lutomirski 		/*
6592cc624b0SAndy Lutomirski 		 * We're likely to be running with very little stack space
6602cc624b0SAndy Lutomirski 		 * left.  It's plausible that we'd hit this condition but
6612cc624b0SAndy Lutomirski 		 * double-fault even before we get this far, in which case
6622cc624b0SAndy Lutomirski 		 * we're fine: the double-fault handler will deal with it.
6632cc624b0SAndy Lutomirski 		 *
6642cc624b0SAndy Lutomirski 		 * We don't want to make it all the way into the oops code
6652cc624b0SAndy Lutomirski 		 * and then double-fault, though, because we're likely to
6662cc624b0SAndy Lutomirski 		 * break the console driver and lose most of the stack dump.
6672cc624b0SAndy Lutomirski 		 */
66844b979faSPeter Zijlstra 		call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*),
66944b979faSPeter Zijlstra 			      handle_stack_overflow,
67044b979faSPeter Zijlstra 			      ASM_CALL_ARG3,
67144b979faSPeter Zijlstra 			      , [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info));
67244b979faSPeter Zijlstra 
6732cc624b0SAndy Lutomirski 		unreachable();
6742cc624b0SAndy Lutomirski 	}
6752cc624b0SAndy Lutomirski #endif
6762cc624b0SAndy Lutomirski 
6772cc624b0SAndy Lutomirski 	/*
678c46f5223SAndy Lutomirski 	 * Buggy firmware could access regions which might page fault.  If
679c46f5223SAndy Lutomirski 	 * this happens, EFI has a special OOPS path that will try to
680c46f5223SAndy Lutomirski 	 * avoid hanging the system.
6812cc624b0SAndy Lutomirski 	 */
6822cc624b0SAndy Lutomirski 	if (IS_ENABLED(CONFIG_EFI))
683c46f5223SAndy Lutomirski 		efi_crash_gracefully_on_page_fault(address);
6842cc624b0SAndy Lutomirski 
6851dc0da6eSAlexander Potapenko 	/* Only not-present faults should be handled by KFENCE. */
686bc8fbc5fSMarco Elver 	if (!(error_code & X86_PF_PROT) &&
687bc8fbc5fSMarco Elver 	    kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs))
6881dc0da6eSAlexander Potapenko 		return;
6891dc0da6eSAlexander Potapenko 
6902cc624b0SAndy Lutomirski oops:
6912cc624b0SAndy Lutomirski 	/*
6922cc624b0SAndy Lutomirski 	 * Oops. The kernel tried to access some bad page. We'll have to
6932cc624b0SAndy Lutomirski 	 * terminate things with extreme prejudice:
6942cc624b0SAndy Lutomirski 	 */
6952cc624b0SAndy Lutomirski 	flags = oops_begin();
6962cc624b0SAndy Lutomirski 
6972cc624b0SAndy Lutomirski 	show_fault_oops(regs, error_code, address);
6982cc624b0SAndy Lutomirski 
6992cc624b0SAndy Lutomirski 	if (task_stack_end_corrupted(current))
7002cc624b0SAndy Lutomirski 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7012cc624b0SAndy Lutomirski 
7022cc624b0SAndy Lutomirski 	sig = SIGKILL;
7032cc624b0SAndy Lutomirski 	if (__die("Oops", regs, error_code))
7042cc624b0SAndy Lutomirski 		sig = 0;
7052cc624b0SAndy Lutomirski 
7062cc624b0SAndy Lutomirski 	/* Executive summary in case the body of the oops scrolled away */
7072cc624b0SAndy Lutomirski 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
7082cc624b0SAndy Lutomirski 
7092cc624b0SAndy Lutomirski 	oops_end(flags, regs, sig);
7102cc624b0SAndy Lutomirski }
7112cc624b0SAndy Lutomirski 
7122cc624b0SAndy Lutomirski static noinline void
7136456a2a6SAndy Lutomirski kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
714d4ffd5dfSJiashuo Liang 			 unsigned long address, int signal, int si_code,
715d4ffd5dfSJiashuo Liang 			 u32 pkey)
7162cc624b0SAndy Lutomirski {
7176456a2a6SAndy Lutomirski 	WARN_ON_ONCE(user_mode(regs));
718ebb53e25SAndy Lutomirski 
71992181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
72081fd9c18SJann Horn 	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
721c026b359SPeter Zijlstra 		/*
722c026b359SPeter Zijlstra 		 * Any interrupt that takes a fault gets the fixup. This makes
723c026b359SPeter Zijlstra 		 * the below recursive fault logic only apply to a faults from
724c026b359SPeter Zijlstra 		 * task context.
725c026b359SPeter Zijlstra 		 */
726c026b359SPeter Zijlstra 		if (in_interrupt())
727c026b359SPeter Zijlstra 			return;
728c026b359SPeter Zijlstra 
729c026b359SPeter Zijlstra 		/*
730c026b359SPeter Zijlstra 		 * Per the above we're !in_interrupt(), aka. task context.
731c026b359SPeter Zijlstra 		 *
732c026b359SPeter Zijlstra 		 * In this case we need to make sure we're not recursively
733c026b359SPeter Zijlstra 		 * faulting through the emulate_vsyscall() logic.
734c026b359SPeter Zijlstra 		 */
7352a53ccbcSIngo Molnar 		if (current->thread.sig_on_uaccess_err && signal) {
736cd072dabSSean Christopherson 			sanitize_error_code(address, &error_code);
737cd072dabSSean Christopherson 
738e49d3cbeSAndy Lutomirski 			set_signal_archinfo(address, error_code);
7394fc34901SAndy Lutomirski 
740d4ffd5dfSJiashuo Liang 			if (si_code == SEGV_PKUERR) {
741d4ffd5dfSJiashuo Liang 				force_sig_pkuerr((void __user *)address, pkey);
742d4ffd5dfSJiashuo Liang 			} else {
7434fc34901SAndy Lutomirski 				/* XXX: hwpoison faults will set the wrong code. */
7442e1661d2SEric W. Biederman 				force_sig_fault(signal, si_code, (void __user *)address);
7454fc34901SAndy Lutomirski 			}
746d4ffd5dfSJiashuo Liang 		}
747c026b359SPeter Zijlstra 
748c026b359SPeter Zijlstra 		/*
749c026b359SPeter Zijlstra 		 * Barring that, we can do the fixup and be happy.
750c026b359SPeter Zijlstra 		 */
75192181f19SNick Piggin 		return;
7524fc34901SAndy Lutomirski 	}
75392181f19SNick Piggin 
7546271cfdfSAndy Lutomirski 	/*
7552cc624b0SAndy Lutomirski 	 * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
7562cc624b0SAndy Lutomirski 	 * instruction.
75792181f19SNick Piggin 	 */
75892181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
75992181f19SNick Piggin 		return;
76092181f19SNick Piggin 
7612cc624b0SAndy Lutomirski 	page_fault_oops(regs, error_code, address);
76292181f19SNick Piggin }
76392181f19SNick Piggin 
7642d4a7167SIngo Molnar /*
7652d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
7662d4a7167SIngo Molnar  * sysctl is set:
7672d4a7167SIngo Molnar  */
7682d4a7167SIngo Molnar static inline void
7692d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
7702d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
7712d4a7167SIngo Molnar {
772ba54d856SBorislav Petkov 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
773c926087eSRik van Riel 	/* This is a racy snapshot, but it's better than nothing. */
774c926087eSRik van Riel 	int cpu = raw_smp_processor_id();
775ba54d856SBorislav Petkov 
7762d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
7772d4a7167SIngo Molnar 		return;
7782d4a7167SIngo Molnar 
7792d4a7167SIngo Molnar 	if (!printk_ratelimit())
7802d4a7167SIngo Molnar 		return;
7812d4a7167SIngo Molnar 
78210a7e9d8SKees Cook 	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
783ba54d856SBorislav Petkov 		loglvl, tsk->comm, task_pid_nr(tsk), address,
7842d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
7852d4a7167SIngo Molnar 
7862d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
7872d4a7167SIngo Molnar 
788c926087eSRik van Riel 	/*
789c926087eSRik van Riel 	 * Dump the likely CPU where the fatal segfault happened.
790c926087eSRik van Riel 	 * This can help identify faulty hardware.
791c926087eSRik van Riel 	 */
792c926087eSRik van Riel 	printk(KERN_CONT " likely on CPU %d (core %d, socket %d)", cpu,
793c926087eSRik van Riel 	       topology_core_id(cpu), topology_physical_package_id(cpu));
794c926087eSRik van Riel 
795c926087eSRik van Riel 
7962d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
797ba54d856SBorislav Petkov 
798342db04aSJann Horn 	show_opcodes(regs, loglvl);
7992d4a7167SIngo Molnar }
8002d4a7167SIngo Molnar 
80102e983b7SDave Hansen /*
80202e983b7SDave Hansen  * The (legacy) vsyscall page is the long page in the kernel portion
80302e983b7SDave Hansen  * of the address space that has user-accessible permissions.
80402e983b7SDave Hansen  */
80502e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr)
80602e983b7SDave Hansen {
8073ae0ad92SDave Hansen 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
80802e983b7SDave Hansen }
80902e983b7SDave Hansen 
8102d4a7167SIngo Molnar static void
8112d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
812419ceeb1SEric W. Biederman 		       unsigned long address, u32 pkey, int si_code)
81392181f19SNick Piggin {
81492181f19SNick Piggin 	struct task_struct *tsk = current;
81592181f19SNick Piggin 
8165042d40aSAndy Lutomirski 	if (!user_mode(regs)) {
817d4ffd5dfSJiashuo Liang 		kernelmode_fixup_or_oops(regs, error_code, address,
818d4ffd5dfSJiashuo Liang 					 SIGSEGV, si_code, pkey);
8195042d40aSAndy Lutomirski 		return;
8205042d40aSAndy Lutomirski 	}
8215042d40aSAndy Lutomirski 
8225042d40aSAndy Lutomirski 	if (!(error_code & X86_PF_USER)) {
8235042d40aSAndy Lutomirski 		/* Implicit user access to kernel memory -- just oops */
8245042d40aSAndy Lutomirski 		page_fault_oops(regs, error_code, address);
8255042d40aSAndy Lutomirski 		return;
8265042d40aSAndy Lutomirski 	}
8275042d40aSAndy Lutomirski 
82892181f19SNick Piggin 	/*
8295042d40aSAndy Lutomirski 	 * User mode accesses just cause a SIGSEGV.
8302d4a7167SIngo Molnar 	 * It's possible to have interrupts off here:
83192181f19SNick Piggin 	 */
83292181f19SNick Piggin 	local_irq_enable();
83392181f19SNick Piggin 
83492181f19SNick Piggin 	/*
83592181f19SNick Piggin 	 * Valid to do another page fault here because this one came
8362d4a7167SIngo Molnar 	 * from user space:
83792181f19SNick Piggin 	 */
83892181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
83992181f19SNick Piggin 		return;
84092181f19SNick Piggin 
84192181f19SNick Piggin 	if (is_errata100(regs, address))
84292181f19SNick Piggin 		return;
84392181f19SNick Piggin 
844cd072dabSSean Christopherson 	sanitize_error_code(address, &error_code);
8453ae36655SAndy Lutomirski 
846334872a0SSean Christopherson 	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
847334872a0SSean Christopherson 		return;
848334872a0SSean Christopherson 
849e575a86fSKees Cook 	if (likely(show_unhandled_signals))
8502d4a7167SIngo Molnar 		show_signal_msg(regs, error_code, address, tsk);
85192181f19SNick Piggin 
852e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
8532d4a7167SIngo Molnar 
8549db812dbSEric W. Biederman 	if (si_code == SEGV_PKUERR)
855419ceeb1SEric W. Biederman 		force_sig_pkuerr((void __user *)address, pkey);
8565405b42cSJiashuo Liang 	else
8572e1661d2SEric W. Biederman 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
8582d4a7167SIngo Molnar 
859ca4c6a98SThomas Gleixner 	local_irq_disable();
86092181f19SNick Piggin }
86192181f19SNick Piggin 
8622d4a7167SIngo Molnar static noinline void
8632d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
864768fd9c6SEric W. Biederman 		     unsigned long address)
86592181f19SNick Piggin {
866419ceeb1SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
86792181f19SNick Piggin }
86892181f19SNick Piggin 
8692d4a7167SIngo Molnar static void
8702d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
871419ceeb1SEric W. Biederman 	   unsigned long address, u32 pkey, int si_code)
87292181f19SNick Piggin {
87392181f19SNick Piggin 	struct mm_struct *mm = current->mm;
87492181f19SNick Piggin 	/*
87592181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
87692181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
87792181f19SNick Piggin 	 */
878d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
87992181f19SNick Piggin 
880aba1ecd3SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
88192181f19SNick Piggin }
88292181f19SNick Piggin 
88333a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code,
88433a709b2SDave Hansen 		struct vm_area_struct *vma)
88533a709b2SDave Hansen {
88607f146f5SDave Hansen 	/* This code is always called on the current mm */
88707f146f5SDave Hansen 	bool foreign = false;
88807f146f5SDave Hansen 
8898a1dc55aSThomas Gleixner 	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
89033a709b2SDave Hansen 		return false;
8911067f030SRicardo Neri 	if (error_code & X86_PF_PK)
89233a709b2SDave Hansen 		return true;
89307f146f5SDave Hansen 	/* this checks permission keys on the VMA: */
8941067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
8951067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
89607f146f5SDave Hansen 		return true;
89733a709b2SDave Hansen 	return false;
89892181f19SNick Piggin }
89992181f19SNick Piggin 
9002d4a7167SIngo Molnar static noinline void
9012d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
9027b2d0dbaSDave Hansen 		      unsigned long address, struct vm_area_struct *vma)
90392181f19SNick Piggin {
904019132ffSDave Hansen 	/*
905019132ffSDave Hansen 	 * This OSPKE check is not strictly necessary at runtime.
906019132ffSDave Hansen 	 * But, doing it this way allows compiler optimizations
907019132ffSDave Hansen 	 * if pkeys are compiled out.
908019132ffSDave Hansen 	 */
909aba1ecd3SEric W. Biederman 	if (bad_area_access_from_pkeys(error_code, vma)) {
9109db812dbSEric W. Biederman 		/*
9119db812dbSEric W. Biederman 		 * A protection key fault means that the PKRU value did not allow
9129db812dbSEric W. Biederman 		 * access to some PTE.  Userspace can figure out what PKRU was
9139db812dbSEric W. Biederman 		 * from the XSAVE state.  This function captures the pkey from
9149db812dbSEric W. Biederman 		 * the vma and passes it to userspace so userspace can discover
9159db812dbSEric W. Biederman 		 * which protection key was set on the PTE.
9169db812dbSEric W. Biederman 		 *
9179db812dbSEric W. Biederman 		 * If we get here, we know that the hardware signaled a X86_PF_PK
9189db812dbSEric W. Biederman 		 * fault and that there was a VMA once we got in the fault
9199db812dbSEric W. Biederman 		 * handler.  It does *not* guarantee that the VMA we find here
9209db812dbSEric W. Biederman 		 * was the one that we faulted on.
9219db812dbSEric W. Biederman 		 *
9229db812dbSEric W. Biederman 		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
9239db812dbSEric W. Biederman 		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
9249db812dbSEric W. Biederman 		 * 3. T1   : faults...
9259db812dbSEric W. Biederman 		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
926c1e8d7c6SMichel Lespinasse 		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
9279db812dbSEric W. Biederman 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
9289db812dbSEric W. Biederman 		 *	     faulted on a pte with its pkey=4.
9299db812dbSEric W. Biederman 		 */
930aba1ecd3SEric W. Biederman 		u32 pkey = vma_pkey(vma);
9319db812dbSEric W. Biederman 
932419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
933aba1ecd3SEric W. Biederman 	} else {
934419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
935aba1ecd3SEric W. Biederman 	}
93692181f19SNick Piggin }
93792181f19SNick Piggin 
9382d4a7167SIngo Molnar static void
939a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
9403d353901SSouptick Joarder 	  vm_fault_t fault)
94192181f19SNick Piggin {
9422d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
94356e62cd2SAndy Lutomirski 	if (!user_mode(regs)) {
944d4ffd5dfSJiashuo Liang 		kernelmode_fixup_or_oops(regs, error_code, address,
945d4ffd5dfSJiashuo Liang 					 SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
94696054569SLinus Torvalds 		return;
94796054569SLinus Torvalds 	}
9482d4a7167SIngo Molnar 
949cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
95092181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
95192181f19SNick Piggin 		return;
9522d4a7167SIngo Molnar 
953cd072dabSSean Christopherson 	sanitize_error_code(address, &error_code);
954cd072dabSSean Christopherson 
955334872a0SSean Christopherson 	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
956334872a0SSean Christopherson 		return;
957334872a0SSean Christopherson 
958e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
9592d4a7167SIngo Molnar 
960a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
961f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
962318759b4SEric W. Biederman 		struct task_struct *tsk = current;
96340e55394SEric W. Biederman 		unsigned lsb = 0;
96440e55394SEric W. Biederman 
96540e55394SEric W. Biederman 		pr_err(
966a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
967a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
96840e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON_LARGE)
96940e55394SEric W. Biederman 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
97040e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON)
97140e55394SEric W. Biederman 			lsb = PAGE_SHIFT;
972f8eac901SEric W. Biederman 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
97340e55394SEric W. Biederman 		return;
974a6e04aa9SAndi Kleen 	}
975a6e04aa9SAndi Kleen #endif
9762e1661d2SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
97792181f19SNick Piggin }
97892181f19SNick Piggin 
9798fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
980d8b57bb7SThomas Gleixner {
9811067f030SRicardo Neri 	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
982d8b57bb7SThomas Gleixner 		return 0;
9832d4a7167SIngo Molnar 
9841067f030SRicardo Neri 	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
985d8b57bb7SThomas Gleixner 		return 0;
986d8b57bb7SThomas Gleixner 
987d8b57bb7SThomas Gleixner 	return 1;
988d8b57bb7SThomas Gleixner }
989d8b57bb7SThomas Gleixner 
990c61e211dSHarvey Harrison /*
9912d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
9922d4a7167SIngo Molnar  *
9932d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
9942d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
9952d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
9962d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
9972d4a7167SIngo Molnar  * on other processors.
9982d4a7167SIngo Molnar  *
99931668511SDavid Vrabel  * Spurious faults may only occur if the TLB contains an entry with
100031668511SDavid Vrabel  * fewer permission than the page table entry.  Non-present (P = 0)
100131668511SDavid Vrabel  * and reserved bit (R = 1) faults are never spurious.
100231668511SDavid Vrabel  *
10035b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
10045b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
100531668511SDavid Vrabel  *
100631668511SDavid Vrabel  * Returns non-zero if a spurious fault was handled, zero otherwise.
100731668511SDavid Vrabel  *
100831668511SDavid Vrabel  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
100931668511SDavid Vrabel  * (Optional Invalidation).
10105b727a3bSJeremy Fitzhardinge  */
10119326638cSMasami Hiramatsu static noinline int
10128fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address)
10135b727a3bSJeremy Fitzhardinge {
10145b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
1015e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
10165b727a3bSJeremy Fitzhardinge 	pud_t *pud;
10175b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
10185b727a3bSJeremy Fitzhardinge 	pte_t *pte;
10193c3e5694SSteven Rostedt 	int ret;
10205b727a3bSJeremy Fitzhardinge 
102131668511SDavid Vrabel 	/*
102231668511SDavid Vrabel 	 * Only writes to RO or instruction fetches from NX may cause
102331668511SDavid Vrabel 	 * spurious faults.
102431668511SDavid Vrabel 	 *
102531668511SDavid Vrabel 	 * These could be from user or supervisor accesses but the TLB
102631668511SDavid Vrabel 	 * is only lazily flushed after a kernel mapping protection
102731668511SDavid Vrabel 	 * change, so user accesses are not expected to cause spurious
102831668511SDavid Vrabel 	 * faults.
102931668511SDavid Vrabel 	 */
10301067f030SRicardo Neri 	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
10311067f030SRicardo Neri 	    error_code != (X86_PF_INSTR | X86_PF_PROT))
10325b727a3bSJeremy Fitzhardinge 		return 0;
10335b727a3bSJeremy Fitzhardinge 
10345b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
10355b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
10365b727a3bSJeremy Fitzhardinge 		return 0;
10375b727a3bSJeremy Fitzhardinge 
1038e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
1039e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d))
1040e0c4f675SKirill A. Shutemov 		return 0;
1041e0c4f675SKirill A. Shutemov 
1042e0c4f675SKirill A. Shutemov 	if (p4d_large(*p4d))
10438fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1044e0c4f675SKirill A. Shutemov 
1045e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
10465b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
10475b727a3bSJeremy Fitzhardinge 		return 0;
10485b727a3bSJeremy Fitzhardinge 
1049d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
10508fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1051d8b57bb7SThomas Gleixner 
10525b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
10535b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
10545b727a3bSJeremy Fitzhardinge 		return 0;
10555b727a3bSJeremy Fitzhardinge 
1056d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
10578fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1058d8b57bb7SThomas Gleixner 
10595b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
1060954f8571SAndrea Arcangeli 	if (!pte_present(*pte))
10615b727a3bSJeremy Fitzhardinge 		return 0;
10625b727a3bSJeremy Fitzhardinge 
10638fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, pte);
10643c3e5694SSteven Rostedt 	if (!ret)
10653c3e5694SSteven Rostedt 		return 0;
10663c3e5694SSteven Rostedt 
10673c3e5694SSteven Rostedt 	/*
10682d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
10692d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
10703c3e5694SSteven Rostedt 	 */
10718fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
10723c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
10732d4a7167SIngo Molnar 
10743c3e5694SSteven Rostedt 	return ret;
10755b727a3bSJeremy Fitzhardinge }
10768fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault);
10775b727a3bSJeremy Fitzhardinge 
1078c61e211dSHarvey Harrison int show_unhandled_signals = 1;
1079c61e211dSHarvey Harrison 
10802d4a7167SIngo Molnar static inline int
108168da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
108292181f19SNick Piggin {
108307f146f5SDave Hansen 	/* This is only called for the current mm, so: */
108407f146f5SDave Hansen 	bool foreign = false;
1085e8c6226dSDave Hansen 
1086e8c6226dSDave Hansen 	/*
1087e8c6226dSDave Hansen 	 * Read or write was blocked by protection keys.  This is
1088e8c6226dSDave Hansen 	 * always an unconditional error and can never result in
1089e8c6226dSDave Hansen 	 * a follow-up action to resolve the fault, like a COW.
1090e8c6226dSDave Hansen 	 */
10911067f030SRicardo Neri 	if (error_code & X86_PF_PK)
1092e8c6226dSDave Hansen 		return 1;
1093e8c6226dSDave Hansen 
109433a709b2SDave Hansen 	/*
109574faeee0SSean Christopherson 	 * SGX hardware blocked the access.  This usually happens
109674faeee0SSean Christopherson 	 * when the enclave memory contents have been destroyed, like
109774faeee0SSean Christopherson 	 * after a suspend/resume cycle. In any case, the kernel can't
109874faeee0SSean Christopherson 	 * fix the cause of the fault.  Handle the fault as an access
109974faeee0SSean Christopherson 	 * error even in cases where no actual access violation
110074faeee0SSean Christopherson 	 * occurred.  This allows userspace to rebuild the enclave in
110174faeee0SSean Christopherson 	 * response to the signal.
110274faeee0SSean Christopherson 	 */
110374faeee0SSean Christopherson 	if (unlikely(error_code & X86_PF_SGX))
110474faeee0SSean Christopherson 		return 1;
110574faeee0SSean Christopherson 
110674faeee0SSean Christopherson 	/*
110707f146f5SDave Hansen 	 * Make sure to check the VMA so that we do not perform
11081067f030SRicardo Neri 	 * faults just to hit a X86_PF_PK as soon as we fill in a
110907f146f5SDave Hansen 	 * page.
111007f146f5SDave Hansen 	 */
11111067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
11121067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
111307f146f5SDave Hansen 		return 1;
111433a709b2SDave Hansen 
11151067f030SRicardo Neri 	if (error_code & X86_PF_WRITE) {
11162d4a7167SIngo Molnar 		/* write, present and write, not present: */
111792181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
111892181f19SNick Piggin 			return 1;
11192d4a7167SIngo Molnar 		return 0;
11202d4a7167SIngo Molnar 	}
11212d4a7167SIngo Molnar 
11222d4a7167SIngo Molnar 	/* read, present: */
11231067f030SRicardo Neri 	if (unlikely(error_code & X86_PF_PROT))
112492181f19SNick Piggin 		return 1;
11252d4a7167SIngo Molnar 
11262d4a7167SIngo Molnar 	/* read, not present: */
11273122e80eSAnshuman Khandual 	if (unlikely(!vma_is_accessible(vma)))
112892181f19SNick Piggin 		return 1;
112992181f19SNick Piggin 
113092181f19SNick Piggin 	return 0;
113192181f19SNick Piggin }
113292181f19SNick Piggin 
113330063810STony Luck bool fault_in_kernel_space(unsigned long address)
11340973a06cSHiroshi Shimamoto {
11353ae0ad92SDave Hansen 	/*
11363ae0ad92SDave Hansen 	 * On 64-bit systems, the vsyscall page is at an address above
11373ae0ad92SDave Hansen 	 * TASK_SIZE_MAX, but is not considered part of the kernel
11383ae0ad92SDave Hansen 	 * address space.
11393ae0ad92SDave Hansen 	 */
11403ae0ad92SDave Hansen 	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
11413ae0ad92SDave Hansen 		return false;
11423ae0ad92SDave Hansen 
1143d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
11440973a06cSHiroshi Shimamoto }
11450973a06cSHiroshi Shimamoto 
1146c61e211dSHarvey Harrison /*
11478fed6200SDave Hansen  * Called for all faults where 'address' is part of the kernel address
11488fed6200SDave Hansen  * space.  Might get called for faults that originate from *code* that
11498fed6200SDave Hansen  * ran in userspace or the kernel.
1150c61e211dSHarvey Harrison  */
11518fed6200SDave Hansen static void
11528fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
11530ac09f9fSJiri Olsa 		   unsigned long address)
1154c61e211dSHarvey Harrison {
11558fed6200SDave Hansen 	/*
1156367e3f1dSDave Hansen 	 * Protection keys exceptions only happen on user pages.  We
1157367e3f1dSDave Hansen 	 * have no user pages in the kernel portion of the address
1158367e3f1dSDave Hansen 	 * space, so do not expect them here.
1159367e3f1dSDave Hansen 	 */
1160367e3f1dSDave Hansen 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1161367e3f1dSDave Hansen 
11624819e15fSJoerg Roedel #ifdef CONFIG_X86_32
11634819e15fSJoerg Roedel 	/*
11644819e15fSJoerg Roedel 	 * We can fault-in kernel-space virtual memory on-demand. The
11654819e15fSJoerg Roedel 	 * 'reference' page table is init_mm.pgd.
11664819e15fSJoerg Roedel 	 *
11674819e15fSJoerg Roedel 	 * NOTE! We MUST NOT take any locks for this case. We may
11684819e15fSJoerg Roedel 	 * be in an interrupt or a critical region, and should
11694819e15fSJoerg Roedel 	 * only copy the information from the master page table,
11704819e15fSJoerg Roedel 	 * nothing more.
11714819e15fSJoerg Roedel 	 *
11724819e15fSJoerg Roedel 	 * Before doing this on-demand faulting, ensure that the
11734819e15fSJoerg Roedel 	 * fault is not any of the following:
11744819e15fSJoerg Roedel 	 * 1. A fault on a PTE with a reserved bit set.
11754819e15fSJoerg Roedel 	 * 2. A fault caused by a user-mode access.  (Do not demand-
11764819e15fSJoerg Roedel 	 *    fault kernel memory due to user-mode accesses).
11774819e15fSJoerg Roedel 	 * 3. A fault caused by a page-level protection violation.
11784819e15fSJoerg Roedel 	 *    (A demand fault would be on a non-present page which
11794819e15fSJoerg Roedel 	 *     would have X86_PF_PROT==0).
11804819e15fSJoerg Roedel 	 *
11814819e15fSJoerg Roedel 	 * This is only needed to close a race condition on x86-32 in
11824819e15fSJoerg Roedel 	 * the vmalloc mapping/unmapping code. See the comment above
11834819e15fSJoerg Roedel 	 * vmalloc_fault() for details. On x86-64 the race does not
11844819e15fSJoerg Roedel 	 * exist as the vmalloc mappings don't need to be synchronized
11854819e15fSJoerg Roedel 	 * there.
11864819e15fSJoerg Roedel 	 */
11874819e15fSJoerg Roedel 	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
11884819e15fSJoerg Roedel 		if (vmalloc_fault(address) >= 0)
11894819e15fSJoerg Roedel 			return;
11904819e15fSJoerg Roedel 	}
11914819e15fSJoerg Roedel #endif
11924819e15fSJoerg Roedel 
1193f42a40fdSAndy Lutomirski 	if (is_f00f_bug(regs, hw_error_code, address))
1194f42a40fdSAndy Lutomirski 		return;
1195f42a40fdSAndy Lutomirski 
11968fed6200SDave Hansen 	/* Was the fault spurious, caused by lazy TLB invalidation? */
11978fed6200SDave Hansen 	if (spurious_kernel_fault(hw_error_code, address))
11988fed6200SDave Hansen 		return;
11998fed6200SDave Hansen 
12008fed6200SDave Hansen 	/* kprobes don't want to hook the spurious faults: */
120100afe830SPeter Zijlstra 	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
12028fed6200SDave Hansen 		return;
12038fed6200SDave Hansen 
12048fed6200SDave Hansen 	/*
12058fed6200SDave Hansen 	 * Note, despite being a "bad area", there are quite a few
12068fed6200SDave Hansen 	 * acceptable reasons to get here, such as erratum fixups
12078fed6200SDave Hansen 	 * and handling kernel code that can fault, like get_user().
12088fed6200SDave Hansen 	 *
12098fed6200SDave Hansen 	 * Don't take the mm semaphore here. If we fixup a prefetch
12108fed6200SDave Hansen 	 * fault we could otherwise deadlock:
12118fed6200SDave Hansen 	 */
1212ba9f6f89SLinus Torvalds 	bad_area_nosemaphore(regs, hw_error_code, address);
12138fed6200SDave Hansen }
12148fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault);
12158fed6200SDave Hansen 
121656e62cd2SAndy Lutomirski /*
121756e62cd2SAndy Lutomirski  * Handle faults in the user portion of the address space.  Nothing in here
121856e62cd2SAndy Lutomirski  * should check X86_PF_USER without a specific justification: for almost
121956e62cd2SAndy Lutomirski  * all purposes, we should treat a normal kernel access to user memory
122056e62cd2SAndy Lutomirski  * (e.g. get_user(), put_user(), etc.) the same as the WRUSS instruction.
122156e62cd2SAndy Lutomirski  * The one exception is AC flag handling, which is, per the x86
122256e62cd2SAndy Lutomirski  * architecture, special for WRUSS.
122356e62cd2SAndy Lutomirski  */
1224aa37c51bSDave Hansen static inline
1225aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs,
1226ec352711SAndy Lutomirski 			unsigned long error_code,
1227c61e211dSHarvey Harrison 			unsigned long address)
1228c61e211dSHarvey Harrison {
1229c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
1230c61e211dSHarvey Harrison 	struct task_struct *tsk;
12312d4a7167SIngo Molnar 	struct mm_struct *mm;
1232968614fcSPeter Xu 	vm_fault_t fault;
1233dde16072SPeter Xu 	unsigned int flags = FAULT_FLAG_DEFAULT;
1234c61e211dSHarvey Harrison 
1235c61e211dSHarvey Harrison 	tsk = current;
1236c61e211dSHarvey Harrison 	mm = tsk->mm;
12372d4a7167SIngo Molnar 
123803c81ea3SAndy Lutomirski 	if (unlikely((error_code & (X86_PF_USER | X86_PF_INSTR)) == X86_PF_INSTR)) {
123903c81ea3SAndy Lutomirski 		/*
124003c81ea3SAndy Lutomirski 		 * Whoops, this is kernel mode code trying to execute from
124103c81ea3SAndy Lutomirski 		 * user memory.  Unless this is AMD erratum #93, which
124203c81ea3SAndy Lutomirski 		 * corrupts RIP such that it looks like a user address,
124303c81ea3SAndy Lutomirski 		 * this is unrecoverable.  Don't even try to look up the
124466fcd988SAndy Lutomirski 		 * VMA or look for extable entries.
124503c81ea3SAndy Lutomirski 		 */
124603c81ea3SAndy Lutomirski 		if (is_errata93(regs, address))
124703c81ea3SAndy Lutomirski 			return;
124803c81ea3SAndy Lutomirski 
124966fcd988SAndy Lutomirski 		page_fault_oops(regs, error_code, address);
125003c81ea3SAndy Lutomirski 		return;
125103c81ea3SAndy Lutomirski 	}
125203c81ea3SAndy Lutomirski 
12532d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
125400afe830SPeter Zijlstra 	if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
12559be260a6SMasami Hiramatsu 		return;
1256e00b12e6SPeter Zijlstra 
12575b0c2cacSDave Hansen 	/*
12585b0c2cacSDave Hansen 	 * Reserved bits are never expected to be set on
12595b0c2cacSDave Hansen 	 * entries in the user portion of the page tables.
12605b0c2cacSDave Hansen 	 */
1261ec352711SAndy Lutomirski 	if (unlikely(error_code & X86_PF_RSVD))
1262ec352711SAndy Lutomirski 		pgtable_bad(regs, error_code, address);
1263e00b12e6SPeter Zijlstra 
12645b0c2cacSDave Hansen 	/*
1265e50928d7SAndy Lutomirski 	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1266e50928d7SAndy Lutomirski 	 * pages in the user address space.  The odd case here is WRUSS,
1267e50928d7SAndy Lutomirski 	 * which, according to the preliminary documentation, does not respect
1268e50928d7SAndy Lutomirski 	 * SMAP and will have the USER bit set so, in all cases, SMAP
1269e50928d7SAndy Lutomirski 	 * enforcement appears to be consistent with the USER bit.
12705b0c2cacSDave Hansen 	 */
1271a15781b5SAndy Lutomirski 	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1272ec352711SAndy Lutomirski 		     !(error_code & X86_PF_USER) &&
1273ca247283SAndy Lutomirski 		     !(regs->flags & X86_EFLAGS_AC))) {
1274ca247283SAndy Lutomirski 		/*
1275ca247283SAndy Lutomirski 		 * No extable entry here.  This was a kernel access to an
1276ca247283SAndy Lutomirski 		 * invalid pointer.  get_kernel_nofault() will not get here.
1277ca247283SAndy Lutomirski 		 */
1278ca247283SAndy Lutomirski 		page_fault_oops(regs, error_code, address);
1279e00b12e6SPeter Zijlstra 		return;
1280e00b12e6SPeter Zijlstra 	}
1281e00b12e6SPeter Zijlstra 
1282e00b12e6SPeter Zijlstra 	/*
1283e00b12e6SPeter Zijlstra 	 * If we're in an interrupt, have no user context or are running
128470ffdb93SDavid Hildenbrand 	 * in a region with pagefaults disabled then we must not take the fault
1285e00b12e6SPeter Zijlstra 	 */
128670ffdb93SDavid Hildenbrand 	if (unlikely(faulthandler_disabled() || !mm)) {
1287ec352711SAndy Lutomirski 		bad_area_nosemaphore(regs, error_code, address);
1288e00b12e6SPeter Zijlstra 		return;
1289e00b12e6SPeter Zijlstra 	}
1290e00b12e6SPeter Zijlstra 
1291c61e211dSHarvey Harrison 	/*
1292891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1293891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1294891cffbdSLinus Torvalds 	 *
1295891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
12962d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1297c61e211dSHarvey Harrison 	 */
1298f39b6f0eSAndy Lutomirski 	if (user_mode(regs)) {
1299891cffbdSLinus Torvalds 		local_irq_enable();
1300759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
13012d4a7167SIngo Molnar 	} else {
13022d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1303c61e211dSHarvey Harrison 			local_irq_enable();
13042d4a7167SIngo Molnar 	}
1305c61e211dSHarvey Harrison 
1306a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
13077dd1fcc2SPeter Zijlstra 
1308ec352711SAndy Lutomirski 	if (error_code & X86_PF_WRITE)
1309759496baSJohannes Weiner 		flags |= FAULT_FLAG_WRITE;
1310ec352711SAndy Lutomirski 	if (error_code & X86_PF_INSTR)
1311d61172b4SDave Hansen 		flags |= FAULT_FLAG_INSTRUCTION;
1312759496baSJohannes Weiner 
13133ae0ad92SDave Hansen #ifdef CONFIG_X86_64
13143a1dfe6eSIngo Molnar 	/*
1315918ce325SAndy Lutomirski 	 * Faults in the vsyscall page might need emulation.  The
1316918ce325SAndy Lutomirski 	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1317918ce325SAndy Lutomirski 	 * considered to be part of the user address space.
1318c61e211dSHarvey Harrison 	 *
13193ae0ad92SDave Hansen 	 * The vsyscall page does not have a "real" VMA, so do this
13203ae0ad92SDave Hansen 	 * emulation before we go searching for VMAs.
1321e0a446ceSAndy Lutomirski 	 *
1322e0a446ceSAndy Lutomirski 	 * PKRU never rejects instruction fetches, so we don't need
1323e0a446ceSAndy Lutomirski 	 * to consider the PF_PK bit.
13243ae0ad92SDave Hansen 	 */
1325918ce325SAndy Lutomirski 	if (is_vsyscall_vaddr(address)) {
1326ec352711SAndy Lutomirski 		if (emulate_vsyscall(error_code, regs, address))
13273ae0ad92SDave Hansen 			return;
13283ae0ad92SDave Hansen 	}
13293ae0ad92SDave Hansen #endif
13303ae0ad92SDave Hansen 
13310bff0aaeSSuren Baghdasaryan #ifdef CONFIG_PER_VMA_LOCK
13320bff0aaeSSuren Baghdasaryan 	if (!(flags & FAULT_FLAG_USER))
13330bff0aaeSSuren Baghdasaryan 		goto lock_mmap;
13340bff0aaeSSuren Baghdasaryan 
13350bff0aaeSSuren Baghdasaryan 	vma = lock_vma_under_rcu(mm, address);
13360bff0aaeSSuren Baghdasaryan 	if (!vma)
13370bff0aaeSSuren Baghdasaryan 		goto lock_mmap;
13380bff0aaeSSuren Baghdasaryan 
13390bff0aaeSSuren Baghdasaryan 	if (unlikely(access_error(error_code, vma))) {
13400bff0aaeSSuren Baghdasaryan 		vma_end_read(vma);
13410bff0aaeSSuren Baghdasaryan 		goto lock_mmap;
13420bff0aaeSSuren Baghdasaryan 	}
13430bff0aaeSSuren Baghdasaryan 	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
13440bff0aaeSSuren Baghdasaryan 	vma_end_read(vma);
13450bff0aaeSSuren Baghdasaryan 
13460bff0aaeSSuren Baghdasaryan 	if (!(fault & VM_FAULT_RETRY)) {
13470bff0aaeSSuren Baghdasaryan 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
13480bff0aaeSSuren Baghdasaryan 		goto done;
13490bff0aaeSSuren Baghdasaryan 	}
13500bff0aaeSSuren Baghdasaryan 	count_vm_vma_lock_event(VMA_LOCK_RETRY);
13510bff0aaeSSuren Baghdasaryan 
13520bff0aaeSSuren Baghdasaryan 	/* Quick path to respond to signals */
13530bff0aaeSSuren Baghdasaryan 	if (fault_signal_pending(fault, regs)) {
13540bff0aaeSSuren Baghdasaryan 		if (!user_mode(regs))
13550bff0aaeSSuren Baghdasaryan 			kernelmode_fixup_or_oops(regs, error_code, address,
13560bff0aaeSSuren Baghdasaryan 						 SIGBUS, BUS_ADRERR,
13570bff0aaeSSuren Baghdasaryan 						 ARCH_DEFAULT_PKEY);
13580bff0aaeSSuren Baghdasaryan 		return;
13590bff0aaeSSuren Baghdasaryan 	}
13600bff0aaeSSuren Baghdasaryan lock_mmap:
13610bff0aaeSSuren Baghdasaryan #endif /* CONFIG_PER_VMA_LOCK */
13620bff0aaeSSuren Baghdasaryan 
1363d065bd81SMichel Lespinasse retry:
1364*c2508ec5SLinus Torvalds 	vma = lock_mm_and_find_vma(mm, address, regs);
136592181f19SNick Piggin 	if (unlikely(!vma)) {
1366*c2508ec5SLinus Torvalds 		bad_area_nosemaphore(regs, error_code, address);
136792181f19SNick Piggin 		return;
136892181f19SNick Piggin 	}
136992181f19SNick Piggin 
1370c61e211dSHarvey Harrison 	/*
1371c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1372c61e211dSHarvey Harrison 	 * we can handle it..
1373c61e211dSHarvey Harrison 	 */
1374ec352711SAndy Lutomirski 	if (unlikely(access_error(error_code, vma))) {
1375ec352711SAndy Lutomirski 		bad_area_access_error(regs, error_code, address, vma);
137692181f19SNick Piggin 		return;
1377c61e211dSHarvey Harrison 	}
1378c61e211dSHarvey Harrison 
1379c61e211dSHarvey Harrison 	/*
1380c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1381c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
13829a95f3cfSPaul Cassella 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1383c1e8d7c6SMichel Lespinasse 	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1384cb0631fdSVlastimil Babka 	 *
1385c1e8d7c6SMichel Lespinasse 	 * Note that handle_userfault() may also release and reacquire mmap_lock
1386cb0631fdSVlastimil Babka 	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1387cb0631fdSVlastimil Babka 	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1388cb0631fdSVlastimil Babka 	 * (potentially after handling any pending signal during the return to
1389cb0631fdSVlastimil Babka 	 * userland). The return to userland is identified whenever
1390cb0631fdSVlastimil Babka 	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1391c61e211dSHarvey Harrison 	 */
1392968614fcSPeter Xu 	fault = handle_mm_fault(vma, address, flags, regs);
13932d4a7167SIngo Molnar 
139439678191SPeter Xu 	if (fault_signal_pending(fault, regs)) {
1395ef2544fbSAndy Lutomirski 		/*
1396ef2544fbSAndy Lutomirski 		 * Quick path to respond to signals.  The core mm code
1397ef2544fbSAndy Lutomirski 		 * has unlocked the mm for us if we get here.
1398ef2544fbSAndy Lutomirski 		 */
139939678191SPeter Xu 		if (!user_mode(regs))
14006456a2a6SAndy Lutomirski 			kernelmode_fixup_or_oops(regs, error_code, address,
1401d4ffd5dfSJiashuo Liang 						 SIGBUS, BUS_ADRERR,
1402d4ffd5dfSJiashuo Liang 						 ARCH_DEFAULT_PKEY);
140339678191SPeter Xu 		return;
140439678191SPeter Xu 	}
140539678191SPeter Xu 
1406d9272525SPeter Xu 	/* The fault is fully completed (including releasing mmap lock) */
1407d9272525SPeter Xu 	if (fault & VM_FAULT_COMPLETED)
1408d9272525SPeter Xu 		return;
1409d9272525SPeter Xu 
14103a13c4d7SJohannes Weiner 	/*
1411c1e8d7c6SMichel Lespinasse 	 * If we need to retry the mmap_lock has already been released,
141226178ec1SLinus Torvalds 	 * and if there is a fatal signal pending there is no guarantee
141326178ec1SLinus Torvalds 	 * that we made any progress. Handle this case first.
14143a13c4d7SJohannes Weiner 	 */
141536ef159fSQi Zheng 	if (unlikely(fault & VM_FAULT_RETRY)) {
141626178ec1SLinus Torvalds 		flags |= FAULT_FLAG_TRIED;
141726178ec1SLinus Torvalds 		goto retry;
141826178ec1SLinus Torvalds 	}
141926178ec1SLinus Torvalds 
1420d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
14210bff0aaeSSuren Baghdasaryan #ifdef CONFIG_PER_VMA_LOCK
14220bff0aaeSSuren Baghdasaryan done:
14230bff0aaeSSuren Baghdasaryan #endif
1424ec352711SAndy Lutomirski 	if (likely(!(fault & VM_FAULT_ERROR)))
142537b23e05SKOSAKI Motohiro 		return;
1426ec352711SAndy Lutomirski 
142756e62cd2SAndy Lutomirski 	if (fatal_signal_pending(current) && !user_mode(regs)) {
1428d4ffd5dfSJiashuo Liang 		kernelmode_fixup_or_oops(regs, error_code, address,
1429d4ffd5dfSJiashuo Liang 					 0, 0, ARCH_DEFAULT_PKEY);
1430ec352711SAndy Lutomirski 		return;
1431ec352711SAndy Lutomirski 	}
1432ec352711SAndy Lutomirski 
1433ec352711SAndy Lutomirski 	if (fault & VM_FAULT_OOM) {
1434ec352711SAndy Lutomirski 		/* Kernel mode? Handle exceptions or die: */
143556e62cd2SAndy Lutomirski 		if (!user_mode(regs)) {
14366456a2a6SAndy Lutomirski 			kernelmode_fixup_or_oops(regs, error_code, address,
1437d4ffd5dfSJiashuo Liang 						 SIGSEGV, SEGV_MAPERR,
1438d4ffd5dfSJiashuo Liang 						 ARCH_DEFAULT_PKEY);
1439ec352711SAndy Lutomirski 			return;
1440ec352711SAndy Lutomirski 		}
1441ec352711SAndy Lutomirski 
1442ec352711SAndy Lutomirski 		/*
1443ec352711SAndy Lutomirski 		 * We ran out of memory, call the OOM killer, and return the
1444ec352711SAndy Lutomirski 		 * userspace (which will retry the fault, or kill us if we got
1445ec352711SAndy Lutomirski 		 * oom-killed):
1446ec352711SAndy Lutomirski 		 */
1447ec352711SAndy Lutomirski 		pagefault_out_of_memory();
1448ec352711SAndy Lutomirski 	} else {
1449ec352711SAndy Lutomirski 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1450ec352711SAndy Lutomirski 			     VM_FAULT_HWPOISON_LARGE))
1451ec352711SAndy Lutomirski 			do_sigbus(regs, error_code, address, fault);
1452ec352711SAndy Lutomirski 		else if (fault & VM_FAULT_SIGSEGV)
1453ec352711SAndy Lutomirski 			bad_area_nosemaphore(regs, error_code, address);
1454ec352711SAndy Lutomirski 		else
1455ec352711SAndy Lutomirski 			BUG();
145637b23e05SKOSAKI Motohiro 	}
1457c61e211dSHarvey Harrison }
1458aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault);
1459aa37c51bSDave Hansen 
1460a0d14b89SPeter Zijlstra static __always_inline void
1461a0d14b89SPeter Zijlstra trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1462a0d14b89SPeter Zijlstra 			 unsigned long address)
1463d34603b0SSeiji Aguchi {
1464a0d14b89SPeter Zijlstra 	if (!trace_pagefault_enabled())
1465a0d14b89SPeter Zijlstra 		return;
1466a0d14b89SPeter Zijlstra 
1467d34603b0SSeiji Aguchi 	if (user_mode(regs))
1468d4078e23SPeter Zijlstra 		trace_page_fault_user(address, regs, error_code);
1469d34603b0SSeiji Aguchi 	else
1470d4078e23SPeter Zijlstra 		trace_page_fault_kernel(address, regs, error_code);
1471d34603b0SSeiji Aguchi }
1472d34603b0SSeiji Aguchi 
147391eeafeaSThomas Gleixner static __always_inline void
147491eeafeaSThomas Gleixner handle_page_fault(struct pt_regs *regs, unsigned long error_code,
1475ee6352b2SFrederic Weisbecker 			      unsigned long address)
147611a7ffb0SThomas Gleixner {
147791eeafeaSThomas Gleixner 	trace_page_fault_entries(regs, error_code, address);
147891eeafeaSThomas Gleixner 
147991eeafeaSThomas Gleixner 	if (unlikely(kmmio_fault(regs, address)))
148091eeafeaSThomas Gleixner 		return;
148191eeafeaSThomas Gleixner 
148291eeafeaSThomas Gleixner 	/* Was the fault on kernel-controlled part of the address space? */
148391eeafeaSThomas Gleixner 	if (unlikely(fault_in_kernel_space(address))) {
148491eeafeaSThomas Gleixner 		do_kern_addr_fault(regs, error_code, address);
148591eeafeaSThomas Gleixner 	} else {
148691eeafeaSThomas Gleixner 		do_user_addr_fault(regs, error_code, address);
148791eeafeaSThomas Gleixner 		/*
148891eeafeaSThomas Gleixner 		 * User address page fault handling might have reenabled
148991eeafeaSThomas Gleixner 		 * interrupts. Fixing up all potential exit points of
149091eeafeaSThomas Gleixner 		 * do_user_addr_fault() and its leaf functions is just not
149191eeafeaSThomas Gleixner 		 * doable w/o creating an unholy mess or turning the code
149291eeafeaSThomas Gleixner 		 * upside down.
149391eeafeaSThomas Gleixner 		 */
149491eeafeaSThomas Gleixner 		local_irq_disable();
149591eeafeaSThomas Gleixner 	}
149691eeafeaSThomas Gleixner }
149791eeafeaSThomas Gleixner 
149891eeafeaSThomas Gleixner DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
149991eeafeaSThomas Gleixner {
150091eeafeaSThomas Gleixner 	unsigned long address = read_cr2();
1501a27a0a55SThomas Gleixner 	irqentry_state_t state;
150291eeafeaSThomas Gleixner 
1503da1c55f1SMichel Lespinasse 	prefetchw(&current->mm->mmap_lock);
150491eeafeaSThomas Gleixner 
1505ef68017eSAndy Lutomirski 	/*
150666af4f5cSVitaly Kuznetsov 	 * KVM uses #PF vector to deliver 'page not present' events to guests
150766af4f5cSVitaly Kuznetsov 	 * (asynchronous page fault mechanism). The event happens when a
150866af4f5cSVitaly Kuznetsov 	 * userspace task is trying to access some valid (from guest's point of
150966af4f5cSVitaly Kuznetsov 	 * view) memory which is not currently mapped by the host (e.g. the
151066af4f5cSVitaly Kuznetsov 	 * memory is swapped out). Note, the corresponding "page ready" event
1511163b0991SIngo Molnar 	 * which is injected when the memory becomes available, is delivered via
151266af4f5cSVitaly Kuznetsov 	 * an interrupt mechanism and not a #PF exception
151366af4f5cSVitaly Kuznetsov 	 * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
1514ef68017eSAndy Lutomirski 	 *
1515ef68017eSAndy Lutomirski 	 * We are relying on the interrupted context being sane (valid RSP,
1516ef68017eSAndy Lutomirski 	 * relevant locks not held, etc.), which is fine as long as the
1517ef68017eSAndy Lutomirski 	 * interrupted context had IF=1.  We are also relying on the KVM
1518ef68017eSAndy Lutomirski 	 * async pf type field and CR2 being read consistently instead of
1519ef68017eSAndy Lutomirski 	 * getting values from real and async page faults mixed up.
1520ef68017eSAndy Lutomirski 	 *
1521ef68017eSAndy Lutomirski 	 * Fingers crossed.
152291eeafeaSThomas Gleixner 	 *
152391eeafeaSThomas Gleixner 	 * The async #PF handling code takes care of idtentry handling
152491eeafeaSThomas Gleixner 	 * itself.
1525ef68017eSAndy Lutomirski 	 */
1526ef68017eSAndy Lutomirski 	if (kvm_handle_async_pf(regs, (u32)address))
1527ef68017eSAndy Lutomirski 		return;
1528ef68017eSAndy Lutomirski 
1529ca4c6a98SThomas Gleixner 	/*
153091eeafeaSThomas Gleixner 	 * Entry handling for valid #PF from kernel mode is slightly
15316f0e6c15SFrederic Weisbecker 	 * different: RCU is already watching and ct_irq_enter() must not
153291eeafeaSThomas Gleixner 	 * be invoked because a kernel fault on a user space address might
153391eeafeaSThomas Gleixner 	 * sleep.
153491eeafeaSThomas Gleixner 	 *
153591eeafeaSThomas Gleixner 	 * In case the fault hit a RCU idle region the conditional entry
153691eeafeaSThomas Gleixner 	 * code reenabled RCU to avoid subsequent wreckage which helps
1537d9f6e12fSIngo Molnar 	 * debuggability.
1538ca4c6a98SThomas Gleixner 	 */
1539a27a0a55SThomas Gleixner 	state = irqentry_enter(regs);
154091eeafeaSThomas Gleixner 
154191eeafeaSThomas Gleixner 	instrumentation_begin();
154291eeafeaSThomas Gleixner 	handle_page_fault(regs, error_code, address);
154391eeafeaSThomas Gleixner 	instrumentation_end();
154491eeafeaSThomas Gleixner 
1545a27a0a55SThomas Gleixner 	irqentry_exit(regs, state);
1546ca4c6a98SThomas Gleixner }
1547