xref: /openbmc/linux/arch/x86/mm/fault.c (revision 35f1c89b0cce247bf0213df243ed902989b1dcda)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c61e211dSHarvey Harrison /*
3c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
4c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
6c61e211dSHarvey Harrison  */
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
868db0cf1SIngo Molnar #include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
104cdf8dbeSLinus Torvalds #include <linux/extable.h>		/* search_exception_tables	*/
1157c8a661SMike Rapoport #include <linux/memblock.h>		/* max_low_pfn			*/
129326638cSMasami Hiramatsu #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
13a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
14cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
15f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
16268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
193425d934SSai Praneeth #include <linux/efi.h>			/* efi_recover_from_page_fault()*/
2050a7ca3cSSouptick Joarder #include <linux/mm_types.h>
21c61e211dSHarvey Harrison 
22019132ffSDave Hansen #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
23a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
24f40c3300SAndy Lutomirski #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
25f40c3300SAndy Lutomirski #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
26ba3e127eSBrian Gerst #include <asm/vm86.h>			/* struct vm86			*/
27019132ffSDave Hansen #include <asm/mmu_context.h>		/* vma_pkey()			*/
283425d934SSai Praneeth #include <asm/efi.h>			/* efi_recover_from_page_fault()*/
29a1a371c4SAndy Lutomirski #include <asm/desc.h>			/* store_idt(), ...		*/
30d876b673SThomas Gleixner #include <asm/cpu_entry_area.h>		/* exception stack		*/
31186525bdSIngo Molnar #include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
32ef68017eSAndy Lutomirski #include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
33334872a0SSean Christopherson #include <asm/vdso.h>			/* fixup_vdso_exception()	*/
34c61e211dSHarvey Harrison 
35d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS
36d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h>
37d34603b0SSeiji Aguchi 
38c61e211dSHarvey Harrison /*
39b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
40b319eed0SIngo Molnar  * handled by mmiotrace:
41b814d41fSIngo Molnar  */
429326638cSMasami Hiramatsu static nokprobe_inline int
4362c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
4486069782SPekka Paalanen {
450fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
460fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
470fd0e3daSPekka Paalanen 			return -1;
480fd0e3daSPekka Paalanen 	return 0;
4986069782SPekka Paalanen }
5086069782SPekka Paalanen 
51c61e211dSHarvey Harrison /*
522d4a7167SIngo Molnar  * Prefetch quirks:
532d4a7167SIngo Molnar  *
542d4a7167SIngo Molnar  * 32-bit mode:
552d4a7167SIngo Molnar  *
56c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
57*35f1c89bSAndy Lutomirski  *   Check that here and ignore it.  This is AMD erratum #91.
58c61e211dSHarvey Harrison  *
592d4a7167SIngo Molnar  * 64-bit mode:
602d4a7167SIngo Molnar  *
61c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
62c61e211dSHarvey Harrison  *   Check that here and ignore it.
63c61e211dSHarvey Harrison  *
642d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
65c61e211dSHarvey Harrison  */
66107a0367SIngo Molnar static inline int
67107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
68107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
69c61e211dSHarvey Harrison {
70107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
71107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
72c61e211dSHarvey Harrison 
73c61e211dSHarvey Harrison 	switch (instr_hi) {
74c61e211dSHarvey Harrison 	case 0x20:
75c61e211dSHarvey Harrison 	case 0x30:
76c61e211dSHarvey Harrison 		/*
77c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
78c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
79c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
80c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
81c61e211dSHarvey Harrison 		 */
82107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
83c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
84c61e211dSHarvey Harrison 	case 0x40:
85c61e211dSHarvey Harrison 		/*
86*35f1c89bSAndy Lutomirski 		 * In 64-bit mode 0x40..0x4F are valid REX prefixes
87c61e211dSHarvey Harrison 		 */
88318f5a2aSAndy Lutomirski 		return (!user_mode(regs) || user_64bit_mode(regs));
89c61e211dSHarvey Harrison #endif
90c61e211dSHarvey Harrison 	case 0x60:
91c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
92107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
93c61e211dSHarvey Harrison 	case 0xF0:
94c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
95107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
96c61e211dSHarvey Harrison 	case 0x00:
97c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
9825f12ae4SChristoph Hellwig 		if (get_kernel_nofault(opcode, instr))
99107a0367SIngo Molnar 			return 0;
100107a0367SIngo Molnar 
101107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
102107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
103107a0367SIngo Molnar 		return 0;
104107a0367SIngo Molnar 	default:
105107a0367SIngo Molnar 		return 0;
106107a0367SIngo Molnar 	}
107107a0367SIngo Molnar }
108107a0367SIngo Molnar 
109107a0367SIngo Molnar static int
110107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
111107a0367SIngo Molnar {
112107a0367SIngo Molnar 	unsigned char *max_instr;
113107a0367SIngo Molnar 	unsigned char *instr;
114107a0367SIngo Molnar 	int prefetch = 0;
115107a0367SIngo Molnar 
116107a0367SIngo Molnar 	/*
117107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
118107a0367SIngo Molnar 	 * do not ignore the fault:
119107a0367SIngo Molnar 	 */
1201067f030SRicardo Neri 	if (error_code & X86_PF_INSTR)
121107a0367SIngo Molnar 		return 0;
122107a0367SIngo Molnar 
123107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
124107a0367SIngo Molnar 	max_instr = instr + 15;
125107a0367SIngo Molnar 
126*35f1c89bSAndy Lutomirski 	/*
127*35f1c89bSAndy Lutomirski 	 * This code has historically always bailed out if IP points to a
128*35f1c89bSAndy Lutomirski 	 * not-present page (e.g. due to a race).  No one has ever
129*35f1c89bSAndy Lutomirski 	 * complained about this.
130*35f1c89bSAndy Lutomirski 	 */
131*35f1c89bSAndy Lutomirski 	pagefault_disable();
132107a0367SIngo Molnar 
133107a0367SIngo Molnar 	while (instr < max_instr) {
134107a0367SIngo Molnar 		unsigned char opcode;
135c61e211dSHarvey Harrison 
136*35f1c89bSAndy Lutomirski 		if (user_mode(regs)) {
137*35f1c89bSAndy Lutomirski 			if (get_user(opcode, instr))
138*35f1c89bSAndy Lutomirski 				break;
139*35f1c89bSAndy Lutomirski 		} else {
14025f12ae4SChristoph Hellwig 			if (get_kernel_nofault(opcode, instr))
141c61e211dSHarvey Harrison 				break;
142*35f1c89bSAndy Lutomirski 		}
143107a0367SIngo Molnar 
144107a0367SIngo Molnar 		instr++;
145107a0367SIngo Molnar 
146107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
147c61e211dSHarvey Harrison 			break;
148c61e211dSHarvey Harrison 	}
149*35f1c89bSAndy Lutomirski 
150*35f1c89bSAndy Lutomirski 	pagefault_enable();
151c61e211dSHarvey Harrison 	return prefetch;
152c61e211dSHarvey Harrison }
153c61e211dSHarvey Harrison 
154f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
155f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1562d4a7167SIngo Molnar 
157f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
158f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
159f2f13a85SIngo Molnar {
160f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
161f2f13a85SIngo Molnar 	pgd_t *pgd_k;
162e0c4f675SKirill A. Shutemov 	p4d_t *p4d, *p4d_k;
163f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
164f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
165f2f13a85SIngo Molnar 
166f2f13a85SIngo Molnar 	pgd += index;
167f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
168f2f13a85SIngo Molnar 
169f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
170f2f13a85SIngo Molnar 		return NULL;
171f2f13a85SIngo Molnar 
172f2f13a85SIngo Molnar 	/*
173f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
174f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
175e0c4f675SKirill A. Shutemov 	 * set_p4d/set_pud.
176f2f13a85SIngo Molnar 	 */
177e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
178e0c4f675SKirill A. Shutemov 	p4d_k = p4d_offset(pgd_k, address);
179e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d_k))
180e0c4f675SKirill A. Shutemov 		return NULL;
181e0c4f675SKirill A. Shutemov 
182e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
183e0c4f675SKirill A. Shutemov 	pud_k = pud_offset(p4d_k, address);
184f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
185f2f13a85SIngo Molnar 		return NULL;
186f2f13a85SIngo Molnar 
187f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
188f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
1898e998fc2SJoerg Roedel 
1908e998fc2SJoerg Roedel 	if (pmd_present(*pmd) != pmd_present(*pmd_k))
1918e998fc2SJoerg Roedel 		set_pmd(pmd, *pmd_k);
1928e998fc2SJoerg Roedel 
193f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
194f2f13a85SIngo Molnar 		return NULL;
195b8bcfe99SJeremy Fitzhardinge 	else
19651b75b5bSJoerg Roedel 		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
197f2f13a85SIngo Molnar 
198f2f13a85SIngo Molnar 	return pmd_k;
199f2f13a85SIngo Molnar }
200f2f13a85SIngo Molnar 
2014819e15fSJoerg Roedel /*
2024819e15fSJoerg Roedel  *   Handle a fault on the vmalloc or module mapping area
2034819e15fSJoerg Roedel  *
2044819e15fSJoerg Roedel  *   This is needed because there is a race condition between the time
2054819e15fSJoerg Roedel  *   when the vmalloc mapping code updates the PMD to the point in time
2064819e15fSJoerg Roedel  *   where it synchronizes this update with the other page-tables in the
2074819e15fSJoerg Roedel  *   system.
2084819e15fSJoerg Roedel  *
2094819e15fSJoerg Roedel  *   In this race window another thread/CPU can map an area on the same
2104819e15fSJoerg Roedel  *   PMD, finds it already present and does not synchronize it with the
2114819e15fSJoerg Roedel  *   rest of the system yet. As a result v[mz]alloc might return areas
2124819e15fSJoerg Roedel  *   which are not mapped in every page-table in the system, causing an
2134819e15fSJoerg Roedel  *   unhandled page-fault when they are accessed.
2144819e15fSJoerg Roedel  */
2154819e15fSJoerg Roedel static noinline int vmalloc_fault(unsigned long address)
2164819e15fSJoerg Roedel {
2174819e15fSJoerg Roedel 	unsigned long pgd_paddr;
2184819e15fSJoerg Roedel 	pmd_t *pmd_k;
2194819e15fSJoerg Roedel 	pte_t *pte_k;
2204819e15fSJoerg Roedel 
2214819e15fSJoerg Roedel 	/* Make sure we are in vmalloc area: */
2224819e15fSJoerg Roedel 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
2234819e15fSJoerg Roedel 		return -1;
2244819e15fSJoerg Roedel 
2254819e15fSJoerg Roedel 	/*
2264819e15fSJoerg Roedel 	 * Synchronize this task's top level page-table
2274819e15fSJoerg Roedel 	 * with the 'reference' page table.
2284819e15fSJoerg Roedel 	 *
2294819e15fSJoerg Roedel 	 * Do _not_ use "current" here. We might be inside
2304819e15fSJoerg Roedel 	 * an interrupt in the middle of a task switch..
2314819e15fSJoerg Roedel 	 */
2324819e15fSJoerg Roedel 	pgd_paddr = read_cr3_pa();
2334819e15fSJoerg Roedel 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
2344819e15fSJoerg Roedel 	if (!pmd_k)
2354819e15fSJoerg Roedel 		return -1;
2364819e15fSJoerg Roedel 
2374819e15fSJoerg Roedel 	if (pmd_large(*pmd_k))
2384819e15fSJoerg Roedel 		return 0;
2394819e15fSJoerg Roedel 
2404819e15fSJoerg Roedel 	pte_k = pte_offset_kernel(pmd_k, address);
2414819e15fSJoerg Roedel 	if (!pte_present(*pte_k))
2424819e15fSJoerg Roedel 		return -1;
2434819e15fSJoerg Roedel 
2444819e15fSJoerg Roedel 	return 0;
2454819e15fSJoerg Roedel }
2464819e15fSJoerg Roedel NOKPROBE_SYMBOL(vmalloc_fault);
2474819e15fSJoerg Roedel 
24886cf69f1SJoerg Roedel void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
249f2f13a85SIngo Molnar {
25086cf69f1SJoerg Roedel 	unsigned long addr;
251f2f13a85SIngo Molnar 
25286cf69f1SJoerg Roedel 	for (addr = start & PMD_MASK;
25386cf69f1SJoerg Roedel 	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
25486cf69f1SJoerg Roedel 	     addr += PMD_SIZE) {
255f2f13a85SIngo Molnar 		struct page *page;
256f2f13a85SIngo Molnar 
257a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
258f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
259617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
260617d34d9SJeremy Fitzhardinge 
261a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
262617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
263617d34d9SJeremy Fitzhardinge 
264617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
26586cf69f1SJoerg Roedel 			vmalloc_sync_one(page_address(page), addr);
266617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
267f2f13a85SIngo Molnar 		}
268a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
269f2f13a85SIngo Molnar 	}
270f2f13a85SIngo Molnar }
271f2f13a85SIngo Molnar 
272f2f13a85SIngo Molnar /*
273f2f13a85SIngo Molnar  * Did it hit the DOS screen memory VA from vm86 mode?
274f2f13a85SIngo Molnar  */
275f2f13a85SIngo Molnar static inline void
276f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
277f2f13a85SIngo Molnar 		 struct task_struct *tsk)
278f2f13a85SIngo Molnar {
2799fda6a06SBrian Gerst #ifdef CONFIG_VM86
280f2f13a85SIngo Molnar 	unsigned long bit;
281f2f13a85SIngo Molnar 
2829fda6a06SBrian Gerst 	if (!v8086_mode(regs) || !tsk->thread.vm86)
283f2f13a85SIngo Molnar 		return;
284f2f13a85SIngo Molnar 
285f2f13a85SIngo Molnar 	bit = (address - 0xA0000) >> PAGE_SHIFT;
286f2f13a85SIngo Molnar 	if (bit < 32)
2879fda6a06SBrian Gerst 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
2889fda6a06SBrian Gerst #endif
289f2f13a85SIngo Molnar }
290c61e211dSHarvey Harrison 
291087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
292087975b0SAkinobu Mita {
293087975b0SAkinobu Mita 	return pfn < max_low_pfn;
294087975b0SAkinobu Mita }
295087975b0SAkinobu Mita 
296cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
297c61e211dSHarvey Harrison {
2986c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
299087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
300e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
301e0c4f675SKirill A. Shutemov 	pud_t *pud;
302087975b0SAkinobu Mita 	pmd_t *pmd;
303087975b0SAkinobu Mita 	pte_t *pte;
3042d4a7167SIngo Molnar 
305c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
30639e48d9bSJan Beulich 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
307087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
308087975b0SAkinobu Mita 		goto out;
30939e48d9bSJan Beulich #define pr_pde pr_cont
31039e48d9bSJan Beulich #else
31139e48d9bSJan Beulich #define pr_pde pr_info
312c61e211dSHarvey Harrison #endif
313e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
314e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
315e0c4f675SKirill A. Shutemov 	pmd = pmd_offset(pud, address);
31639e48d9bSJan Beulich 	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
31739e48d9bSJan Beulich #undef pr_pde
318c61e211dSHarvey Harrison 
319c61e211dSHarvey Harrison 	/*
320c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
321c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
322c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3232d4a7167SIngo Molnar 	 * it's allocated already:
324c61e211dSHarvey Harrison 	 */
325087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
326087975b0SAkinobu Mita 		goto out;
3272d4a7167SIngo Molnar 
328087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
32939e48d9bSJan Beulich 	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
330087975b0SAkinobu Mita out:
33139e48d9bSJan Beulich 	pr_cont("\n");
332f2f13a85SIngo Molnar }
333f2f13a85SIngo Molnar 
334f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
335f2f13a85SIngo Molnar 
336e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD
337f2f13a85SIngo Molnar static const char errata93_warning[] =
338ad361c98SJoe Perches KERN_ERR
339ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
340ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
341ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
342ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
343e05139f2SJan Beulich #endif
344f2f13a85SIngo Molnar 
345f2f13a85SIngo Molnar /*
346f2f13a85SIngo Molnar  * No vm86 mode in 64-bit mode:
347f2f13a85SIngo Molnar  */
348f2f13a85SIngo Molnar static inline void
349f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
350f2f13a85SIngo Molnar 		 struct task_struct *tsk)
351f2f13a85SIngo Molnar {
352f2f13a85SIngo Molnar }
353f2f13a85SIngo Molnar 
354f2f13a85SIngo Molnar static int bad_address(void *p)
355f2f13a85SIngo Molnar {
356f2f13a85SIngo Molnar 	unsigned long dummy;
357f2f13a85SIngo Molnar 
35825f12ae4SChristoph Hellwig 	return get_kernel_nofault(dummy, (unsigned long *)p);
359f2f13a85SIngo Molnar }
360f2f13a85SIngo Molnar 
361f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
362f2f13a85SIngo Molnar {
3636c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
364087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
365e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
366c61e211dSHarvey Harrison 	pud_t *pud;
367c61e211dSHarvey Harrison 	pmd_t *pmd;
368c61e211dSHarvey Harrison 	pte_t *pte;
369c61e211dSHarvey Harrison 
3702d4a7167SIngo Molnar 	if (bad_address(pgd))
3712d4a7167SIngo Molnar 		goto bad;
3722d4a7167SIngo Molnar 
37339e48d9bSJan Beulich 	pr_info("PGD %lx ", pgd_val(*pgd));
3742d4a7167SIngo Molnar 
3752d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
3762d4a7167SIngo Molnar 		goto out;
377c61e211dSHarvey Harrison 
378e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
379e0c4f675SKirill A. Shutemov 	if (bad_address(p4d))
380e0c4f675SKirill A. Shutemov 		goto bad;
381e0c4f675SKirill A. Shutemov 
38239e48d9bSJan Beulich 	pr_cont("P4D %lx ", p4d_val(*p4d));
383e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d) || p4d_large(*p4d))
384e0c4f675SKirill A. Shutemov 		goto out;
385e0c4f675SKirill A. Shutemov 
386e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
3872d4a7167SIngo Molnar 	if (bad_address(pud))
3882d4a7167SIngo Molnar 		goto bad;
3892d4a7167SIngo Molnar 
39039e48d9bSJan Beulich 	pr_cont("PUD %lx ", pud_val(*pud));
391b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
3922d4a7167SIngo Molnar 		goto out;
393c61e211dSHarvey Harrison 
394c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
3952d4a7167SIngo Molnar 	if (bad_address(pmd))
3962d4a7167SIngo Molnar 		goto bad;
3972d4a7167SIngo Molnar 
39839e48d9bSJan Beulich 	pr_cont("PMD %lx ", pmd_val(*pmd));
3992d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
4002d4a7167SIngo Molnar 		goto out;
401c61e211dSHarvey Harrison 
402c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
4032d4a7167SIngo Molnar 	if (bad_address(pte))
4042d4a7167SIngo Molnar 		goto bad;
4052d4a7167SIngo Molnar 
40639e48d9bSJan Beulich 	pr_cont("PTE %lx", pte_val(*pte));
4072d4a7167SIngo Molnar out:
40839e48d9bSJan Beulich 	pr_cont("\n");
409c61e211dSHarvey Harrison 	return;
410c61e211dSHarvey Harrison bad:
41139e48d9bSJan Beulich 	pr_info("BAD\n");
412c61e211dSHarvey Harrison }
413c61e211dSHarvey Harrison 
414f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
415c61e211dSHarvey Harrison 
4162d4a7167SIngo Molnar /*
4172d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
4182d4a7167SIngo Molnar  *
4192d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
4202d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
4212d4a7167SIngo Molnar  *
4222d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
4232d4a7167SIngo Molnar  *
4242d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
4252d4a7167SIngo Molnar  * Try to work around it here.
4262d4a7167SIngo Molnar  *
4272d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
4282d4a7167SIngo Molnar  * Does nothing on 32-bit.
429c61e211dSHarvey Harrison  */
430c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
431c61e211dSHarvey Harrison {
432e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
433e05139f2SJan Beulich 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
434e05139f2SJan Beulich 	    || boot_cpu_data.x86 != 0xf)
435e05139f2SJan Beulich 		return 0;
436e05139f2SJan Beulich 
437c61e211dSHarvey Harrison 	if (address != regs->ip)
438c61e211dSHarvey Harrison 		return 0;
4392d4a7167SIngo Molnar 
440c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
441c61e211dSHarvey Harrison 		return 0;
4422d4a7167SIngo Molnar 
443c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
444c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
445c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
446a454ab31SIngo Molnar 		printk_once(errata93_warning);
447c61e211dSHarvey Harrison 		regs->ip = address;
448c61e211dSHarvey Harrison 		return 1;
449c61e211dSHarvey Harrison 	}
450c61e211dSHarvey Harrison #endif
451c61e211dSHarvey Harrison 	return 0;
452c61e211dSHarvey Harrison }
453c61e211dSHarvey Harrison 
454c61e211dSHarvey Harrison /*
4552d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
4562d4a7167SIngo Molnar  * to illegal addresses >4GB.
4572d4a7167SIngo Molnar  *
4582d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
4592d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
460c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
461c61e211dSHarvey Harrison  */
462c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
463c61e211dSHarvey Harrison {
464c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
4652d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
466c61e211dSHarvey Harrison 		return 1;
467c61e211dSHarvey Harrison #endif
468c61e211dSHarvey Harrison 	return 0;
469c61e211dSHarvey Harrison }
470c61e211dSHarvey Harrison 
4713e77abdaSThomas Gleixner /* Pentium F0 0F C7 C8 bug workaround: */
472c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
473c61e211dSHarvey Harrison {
474c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
4753e77abdaSThomas Gleixner 	if (boot_cpu_has_bug(X86_BUG_F00F) && idt_is_f00f_address(address)) {
47649893c5cSThomas Gleixner 		handle_invalid_op(regs);
477c61e211dSHarvey Harrison 		return 1;
478c61e211dSHarvey Harrison 	}
479c61e211dSHarvey Harrison #endif
480c61e211dSHarvey Harrison 	return 0;
481c61e211dSHarvey Harrison }
482c61e211dSHarvey Harrison 
483a1a371c4SAndy Lutomirski static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
484a1a371c4SAndy Lutomirski {
485a1a371c4SAndy Lutomirski 	u32 offset = (index >> 3) * sizeof(struct desc_struct);
486a1a371c4SAndy Lutomirski 	unsigned long addr;
487a1a371c4SAndy Lutomirski 	struct ldttss_desc desc;
488a1a371c4SAndy Lutomirski 
489a1a371c4SAndy Lutomirski 	if (index == 0) {
490a1a371c4SAndy Lutomirski 		pr_alert("%s: NULL\n", name);
491a1a371c4SAndy Lutomirski 		return;
492a1a371c4SAndy Lutomirski 	}
493a1a371c4SAndy Lutomirski 
494a1a371c4SAndy Lutomirski 	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
495a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
496a1a371c4SAndy Lutomirski 		return;
497a1a371c4SAndy Lutomirski 	}
498a1a371c4SAndy Lutomirski 
499fe557319SChristoph Hellwig 	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
500a1a371c4SAndy Lutomirski 			      sizeof(struct ldttss_desc))) {
501a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
502a1a371c4SAndy Lutomirski 			 name, index);
503a1a371c4SAndy Lutomirski 		return;
504a1a371c4SAndy Lutomirski 	}
505a1a371c4SAndy Lutomirski 
5065ccd3528SColin Ian King 	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
507a1a371c4SAndy Lutomirski #ifdef CONFIG_X86_64
508a1a371c4SAndy Lutomirski 	addr |= ((u64)desc.base3 << 32);
509a1a371c4SAndy Lutomirski #endif
510a1a371c4SAndy Lutomirski 	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
511a1a371c4SAndy Lutomirski 		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
512a1a371c4SAndy Lutomirski }
513a1a371c4SAndy Lutomirski 
5142d4a7167SIngo Molnar static void
515a2aa52abSIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
516c61e211dSHarvey Harrison {
517c61e211dSHarvey Harrison 	if (!oops_may_print())
518c61e211dSHarvey Harrison 		return;
519c61e211dSHarvey Harrison 
5201067f030SRicardo Neri 	if (error_code & X86_PF_INSTR) {
52193809be8SHarvey Harrison 		unsigned int level;
522426e34ccSMatt Fleming 		pgd_t *pgd;
523426e34ccSMatt Fleming 		pte_t *pte;
5242d4a7167SIngo Molnar 
5256c690ee1SAndy Lutomirski 		pgd = __va(read_cr3_pa());
526426e34ccSMatt Fleming 		pgd += pgd_index(address);
527426e34ccSMatt Fleming 
528426e34ccSMatt Fleming 		pte = lookup_address_in_pgd(pgd, address, &level);
529c61e211dSHarvey Harrison 
5308f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
531d79d0d8aSDmitry Vyukov 			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
532d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
533eff50c34SJiri Kosina 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
534eff50c34SJiri Kosina 				(pgd_flags(*pgd) & _PAGE_USER) &&
5351e02ce4cSAndy Lutomirski 				(__read_cr4() & X86_CR4_SMEP))
536d79d0d8aSDmitry Vyukov 			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
537d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
538c61e211dSHarvey Harrison 	}
539fd40d6e3SHarvey Harrison 
540f28b11a2SSean Christopherson 	if (address < PAGE_SIZE && !user_mode(regs))
541ea2f8d60SBorislav Petkov 		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
542f28b11a2SSean Christopherson 			(void *)address);
543f28b11a2SSean Christopherson 	else
544ea2f8d60SBorislav Petkov 		pr_alert("BUG: unable to handle page fault for address: %px\n",
5454188f063SDmitry Vyukov 			(void *)address);
5462d4a7167SIngo Molnar 
547ea2f8d60SBorislav Petkov 	pr_alert("#PF: %s %s in %s mode\n",
54818ea35c5SSean Christopherson 		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
54918ea35c5SSean Christopherson 		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
55018ea35c5SSean Christopherson 		 (error_code & X86_PF_WRITE) ? "write access" :
55118ea35c5SSean Christopherson 					       "read access",
55218ea35c5SSean Christopherson 			     user_mode(regs) ? "user" : "kernel");
55318ea35c5SSean Christopherson 	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
55418ea35c5SSean Christopherson 		 !(error_code & X86_PF_PROT) ? "not-present page" :
55518ea35c5SSean Christopherson 		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
55618ea35c5SSean Christopherson 		 (error_code & X86_PF_PK)    ? "protection keys violation" :
55718ea35c5SSean Christopherson 					       "permissions violation");
558a2aa52abSIngo Molnar 
559a1a371c4SAndy Lutomirski 	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
560a1a371c4SAndy Lutomirski 		struct desc_ptr idt, gdt;
561a1a371c4SAndy Lutomirski 		u16 ldtr, tr;
562a1a371c4SAndy Lutomirski 
563a1a371c4SAndy Lutomirski 		/*
564a1a371c4SAndy Lutomirski 		 * This can happen for quite a few reasons.  The more obvious
565a1a371c4SAndy Lutomirski 		 * ones are faults accessing the GDT, or LDT.  Perhaps
566a1a371c4SAndy Lutomirski 		 * surprisingly, if the CPU tries to deliver a benign or
567a1a371c4SAndy Lutomirski 		 * contributory exception from user code and gets a page fault
568a1a371c4SAndy Lutomirski 		 * during delivery, the page fault can be delivered as though
569a1a371c4SAndy Lutomirski 		 * it originated directly from user code.  This could happen
570a1a371c4SAndy Lutomirski 		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
571a1a371c4SAndy Lutomirski 		 * kernel or IST stack.
572a1a371c4SAndy Lutomirski 		 */
573a1a371c4SAndy Lutomirski 		store_idt(&idt);
574a1a371c4SAndy Lutomirski 
575a1a371c4SAndy Lutomirski 		/* Usable even on Xen PV -- it's just slow. */
576a1a371c4SAndy Lutomirski 		native_store_gdt(&gdt);
577a1a371c4SAndy Lutomirski 
578a1a371c4SAndy Lutomirski 		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
579a1a371c4SAndy Lutomirski 			 idt.address, idt.size, gdt.address, gdt.size);
580a1a371c4SAndy Lutomirski 
581a1a371c4SAndy Lutomirski 		store_ldt(ldtr);
582a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "LDTR", ldtr);
583a1a371c4SAndy Lutomirski 
584a1a371c4SAndy Lutomirski 		store_tr(tr);
585a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "TR", tr);
586a1a371c4SAndy Lutomirski 	}
587a1a371c4SAndy Lutomirski 
588c61e211dSHarvey Harrison 	dump_pagetable(address);
589c61e211dSHarvey Harrison }
590c61e211dSHarvey Harrison 
5912d4a7167SIngo Molnar static noinline void
5922d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
5932d4a7167SIngo Molnar 	    unsigned long address)
594c61e211dSHarvey Harrison {
5952d4a7167SIngo Molnar 	struct task_struct *tsk;
5962d4a7167SIngo Molnar 	unsigned long flags;
5972d4a7167SIngo Molnar 	int sig;
5982d4a7167SIngo Molnar 
5992d4a7167SIngo Molnar 	flags = oops_begin();
6002d4a7167SIngo Molnar 	tsk = current;
6012d4a7167SIngo Molnar 	sig = SIGKILL;
602c61e211dSHarvey Harrison 
603c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
60492181f19SNick Piggin 	       tsk->comm, address);
605c61e211dSHarvey Harrison 	dump_pagetable(address);
6062d4a7167SIngo Molnar 
607c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
608874d93d1SAlexander van Heukelum 		sig = 0;
6092d4a7167SIngo Molnar 
610874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
611c61e211dSHarvey Harrison }
612c61e211dSHarvey Harrison 
613cd072dabSSean Christopherson static void sanitize_error_code(unsigned long address,
614cd072dabSSean Christopherson 				unsigned long *error_code)
615e49d3cbeSAndy Lutomirski {
616e49d3cbeSAndy Lutomirski 	/*
617e49d3cbeSAndy Lutomirski 	 * To avoid leaking information about the kernel page
618e49d3cbeSAndy Lutomirski 	 * table layout, pretend that user-mode accesses to
619e49d3cbeSAndy Lutomirski 	 * kernel addresses are always protection faults.
620e0a446ceSAndy Lutomirski 	 *
621e0a446ceSAndy Lutomirski 	 * NB: This means that failed vsyscalls with vsyscall=none
622e0a446ceSAndy Lutomirski 	 * will have the PROT bit.  This doesn't leak any
623e0a446ceSAndy Lutomirski 	 * information and does not appear to cause any problems.
624e49d3cbeSAndy Lutomirski 	 */
625e49d3cbeSAndy Lutomirski 	if (address >= TASK_SIZE_MAX)
626cd072dabSSean Christopherson 		*error_code |= X86_PF_PROT;
627cd072dabSSean Christopherson }
628cd072dabSSean Christopherson 
629cd072dabSSean Christopherson static void set_signal_archinfo(unsigned long address,
630cd072dabSSean Christopherson 				unsigned long error_code)
631cd072dabSSean Christopherson {
632cd072dabSSean Christopherson 	struct task_struct *tsk = current;
633e49d3cbeSAndy Lutomirski 
634e49d3cbeSAndy Lutomirski 	tsk->thread.trap_nr = X86_TRAP_PF;
635e49d3cbeSAndy Lutomirski 	tsk->thread.error_code = error_code | X86_PF_USER;
636e49d3cbeSAndy Lutomirski 	tsk->thread.cr2 = address;
637e49d3cbeSAndy Lutomirski }
638e49d3cbeSAndy Lutomirski 
6392d4a7167SIngo Molnar static noinline void
6402d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code,
6414fc34901SAndy Lutomirski 	   unsigned long address, int signal, int si_code)
64292181f19SNick Piggin {
64392181f19SNick Piggin 	struct task_struct *tsk = current;
64492181f19SNick Piggin 	unsigned long flags;
64592181f19SNick Piggin 	int sig;
64692181f19SNick Piggin 
647ebb53e25SAndy Lutomirski 	if (user_mode(regs)) {
648ebb53e25SAndy Lutomirski 		/*
649ebb53e25SAndy Lutomirski 		 * This is an implicit supervisor-mode access from user
650ebb53e25SAndy Lutomirski 		 * mode.  Bypass all the kernel-mode recovery code and just
651ebb53e25SAndy Lutomirski 		 * OOPS.
652ebb53e25SAndy Lutomirski 		 */
653ebb53e25SAndy Lutomirski 		goto oops;
654ebb53e25SAndy Lutomirski 	}
655ebb53e25SAndy Lutomirski 
65692181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
65781fd9c18SJann Horn 	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
658c026b359SPeter Zijlstra 		/*
659c026b359SPeter Zijlstra 		 * Any interrupt that takes a fault gets the fixup. This makes
660c026b359SPeter Zijlstra 		 * the below recursive fault logic only apply to a faults from
661c026b359SPeter Zijlstra 		 * task context.
662c026b359SPeter Zijlstra 		 */
663c026b359SPeter Zijlstra 		if (in_interrupt())
664c026b359SPeter Zijlstra 			return;
665c026b359SPeter Zijlstra 
666c026b359SPeter Zijlstra 		/*
667c026b359SPeter Zijlstra 		 * Per the above we're !in_interrupt(), aka. task context.
668c026b359SPeter Zijlstra 		 *
669c026b359SPeter Zijlstra 		 * In this case we need to make sure we're not recursively
670c026b359SPeter Zijlstra 		 * faulting through the emulate_vsyscall() logic.
671c026b359SPeter Zijlstra 		 */
6722a53ccbcSIngo Molnar 		if (current->thread.sig_on_uaccess_err && signal) {
673cd072dabSSean Christopherson 			sanitize_error_code(address, &error_code);
674cd072dabSSean Christopherson 
675e49d3cbeSAndy Lutomirski 			set_signal_archinfo(address, error_code);
6764fc34901SAndy Lutomirski 
6774fc34901SAndy Lutomirski 			/* XXX: hwpoison faults will set the wrong code. */
6782e1661d2SEric W. Biederman 			force_sig_fault(signal, si_code, (void __user *)address);
6794fc34901SAndy Lutomirski 		}
680c026b359SPeter Zijlstra 
681c026b359SPeter Zijlstra 		/*
682c026b359SPeter Zijlstra 		 * Barring that, we can do the fixup and be happy.
683c026b359SPeter Zijlstra 		 */
68492181f19SNick Piggin 		return;
6854fc34901SAndy Lutomirski 	}
68692181f19SNick Piggin 
6876271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK
6886271cfdfSAndy Lutomirski 	/*
6896271cfdfSAndy Lutomirski 	 * Stack overflow?  During boot, we can fault near the initial
6906271cfdfSAndy Lutomirski 	 * stack in the direct map, but that's not an overflow -- check
6916271cfdfSAndy Lutomirski 	 * that we're in vmalloc space to avoid this.
6926271cfdfSAndy Lutomirski 	 */
6936271cfdfSAndy Lutomirski 	if (is_vmalloc_addr((void *)address) &&
6946271cfdfSAndy Lutomirski 	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
6956271cfdfSAndy Lutomirski 	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
696d876b673SThomas Gleixner 		unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
6976271cfdfSAndy Lutomirski 		/*
6986271cfdfSAndy Lutomirski 		 * We're likely to be running with very little stack space
6996271cfdfSAndy Lutomirski 		 * left.  It's plausible that we'd hit this condition but
7006271cfdfSAndy Lutomirski 		 * double-fault even before we get this far, in which case
7016271cfdfSAndy Lutomirski 		 * we're fine: the double-fault handler will deal with it.
7026271cfdfSAndy Lutomirski 		 *
7036271cfdfSAndy Lutomirski 		 * We don't want to make it all the way into the oops code
7046271cfdfSAndy Lutomirski 		 * and then double-fault, though, because we're likely to
7056271cfdfSAndy Lutomirski 		 * break the console driver and lose most of the stack dump.
7066271cfdfSAndy Lutomirski 		 */
7076271cfdfSAndy Lutomirski 		asm volatile ("movq %[stack], %%rsp\n\t"
7086271cfdfSAndy Lutomirski 			      "call handle_stack_overflow\n\t"
7096271cfdfSAndy Lutomirski 			      "1: jmp 1b"
710f5caf621SJosh Poimboeuf 			      : ASM_CALL_CONSTRAINT
7116271cfdfSAndy Lutomirski 			      : "D" ("kernel stack overflow (page fault)"),
7126271cfdfSAndy Lutomirski 				"S" (regs), "d" (address),
7136271cfdfSAndy Lutomirski 				[stack] "rm" (stack));
7146271cfdfSAndy Lutomirski 		unreachable();
7156271cfdfSAndy Lutomirski 	}
7166271cfdfSAndy Lutomirski #endif
7176271cfdfSAndy Lutomirski 
71892181f19SNick Piggin 	/*
7192d4a7167SIngo Molnar 	 * 32-bit:
7202d4a7167SIngo Molnar 	 *
72192181f19SNick Piggin 	 *   Valid to do another page fault here, because if this fault
72292181f19SNick Piggin 	 *   had been triggered by is_prefetch fixup_exception would have
72392181f19SNick Piggin 	 *   handled it.
72492181f19SNick Piggin 	 *
7252d4a7167SIngo Molnar 	 * 64-bit:
7262d4a7167SIngo Molnar 	 *
72792181f19SNick Piggin 	 *   Hall of shame of CPU/BIOS bugs.
72892181f19SNick Piggin 	 */
72992181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
73092181f19SNick Piggin 		return;
73192181f19SNick Piggin 
73292181f19SNick Piggin 	if (is_errata93(regs, address))
73392181f19SNick Piggin 		return;
73492181f19SNick Piggin 
73592181f19SNick Piggin 	/*
7363425d934SSai Praneeth 	 * Buggy firmware could access regions which might page fault, try to
7373425d934SSai Praneeth 	 * recover from such faults.
7383425d934SSai Praneeth 	 */
7393425d934SSai Praneeth 	if (IS_ENABLED(CONFIG_EFI))
7403425d934SSai Praneeth 		efi_recover_from_page_fault(address);
7413425d934SSai Praneeth 
742ebb53e25SAndy Lutomirski oops:
7433425d934SSai Praneeth 	/*
74492181f19SNick Piggin 	 * Oops. The kernel tried to access some bad page. We'll have to
7452d4a7167SIngo Molnar 	 * terminate things with extreme prejudice:
74692181f19SNick Piggin 	 */
74792181f19SNick Piggin 	flags = oops_begin();
74892181f19SNick Piggin 
74992181f19SNick Piggin 	show_fault_oops(regs, error_code, address);
75092181f19SNick Piggin 
751a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(tsk))
752b0f4c4b3SPrarit Bhargava 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
75319803078SIngo Molnar 
75492181f19SNick Piggin 	sig = SIGKILL;
75592181f19SNick Piggin 	if (__die("Oops", regs, error_code))
75692181f19SNick Piggin 		sig = 0;
7572d4a7167SIngo Molnar 
75892181f19SNick Piggin 	/* Executive summary in case the body of the oops scrolled away */
759b0f4c4b3SPrarit Bhargava 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
7602d4a7167SIngo Molnar 
76192181f19SNick Piggin 	oops_end(flags, regs, sig);
76292181f19SNick Piggin }
76392181f19SNick Piggin 
7642d4a7167SIngo Molnar /*
7652d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
7662d4a7167SIngo Molnar  * sysctl is set:
7672d4a7167SIngo Molnar  */
7682d4a7167SIngo Molnar static inline void
7692d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
7702d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
7712d4a7167SIngo Molnar {
772ba54d856SBorislav Petkov 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
773ba54d856SBorislav Petkov 
7742d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
7752d4a7167SIngo Molnar 		return;
7762d4a7167SIngo Molnar 
7772d4a7167SIngo Molnar 	if (!printk_ratelimit())
7782d4a7167SIngo Molnar 		return;
7792d4a7167SIngo Molnar 
78010a7e9d8SKees Cook 	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
781ba54d856SBorislav Petkov 		loglvl, tsk->comm, task_pid_nr(tsk), address,
7822d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
7832d4a7167SIngo Molnar 
7842d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
7852d4a7167SIngo Molnar 
7862d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
787ba54d856SBorislav Petkov 
788342db04aSJann Horn 	show_opcodes(regs, loglvl);
7892d4a7167SIngo Molnar }
7902d4a7167SIngo Molnar 
79102e983b7SDave Hansen /*
79202e983b7SDave Hansen  * The (legacy) vsyscall page is the long page in the kernel portion
79302e983b7SDave Hansen  * of the address space that has user-accessible permissions.
79402e983b7SDave Hansen  */
79502e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr)
79602e983b7SDave Hansen {
7973ae0ad92SDave Hansen 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
79802e983b7SDave Hansen }
79902e983b7SDave Hansen 
8002d4a7167SIngo Molnar static void
8012d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
802419ceeb1SEric W. Biederman 		       unsigned long address, u32 pkey, int si_code)
80392181f19SNick Piggin {
80492181f19SNick Piggin 	struct task_struct *tsk = current;
80592181f19SNick Piggin 
80692181f19SNick Piggin 	/* User mode accesses just cause a SIGSEGV */
8076ea59b07SAndy Lutomirski 	if (user_mode(regs) && (error_code & X86_PF_USER)) {
80892181f19SNick Piggin 		/*
8092d4a7167SIngo Molnar 		 * It's possible to have interrupts off here:
81092181f19SNick Piggin 		 */
81192181f19SNick Piggin 		local_irq_enable();
81292181f19SNick Piggin 
81392181f19SNick Piggin 		/*
81492181f19SNick Piggin 		 * Valid to do another page fault here because this one came
8152d4a7167SIngo Molnar 		 * from user space:
81692181f19SNick Piggin 		 */
81792181f19SNick Piggin 		if (is_prefetch(regs, error_code, address))
81892181f19SNick Piggin 			return;
81992181f19SNick Piggin 
82092181f19SNick Piggin 		if (is_errata100(regs, address))
82192181f19SNick Piggin 			return;
82292181f19SNick Piggin 
823cd072dabSSean Christopherson 		sanitize_error_code(address, &error_code);
8243ae36655SAndy Lutomirski 
825334872a0SSean Christopherson 		if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
826334872a0SSean Christopherson 			return;
827334872a0SSean Christopherson 
828e575a86fSKees Cook 		if (likely(show_unhandled_signals))
8292d4a7167SIngo Molnar 			show_signal_msg(regs, error_code, address, tsk);
83092181f19SNick Piggin 
831e49d3cbeSAndy Lutomirski 		set_signal_archinfo(address, error_code);
8322d4a7167SIngo Molnar 
8339db812dbSEric W. Biederman 		if (si_code == SEGV_PKUERR)
834419ceeb1SEric W. Biederman 			force_sig_pkuerr((void __user *)address, pkey);
8359db812dbSEric W. Biederman 
8362e1661d2SEric W. Biederman 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
8372d4a7167SIngo Molnar 
838ca4c6a98SThomas Gleixner 		local_irq_disable();
839ca4c6a98SThomas Gleixner 
84092181f19SNick Piggin 		return;
84192181f19SNick Piggin 	}
84292181f19SNick Piggin 
84392181f19SNick Piggin 	if (is_f00f_bug(regs, address))
84492181f19SNick Piggin 		return;
84592181f19SNick Piggin 
8464fc34901SAndy Lutomirski 	no_context(regs, error_code, address, SIGSEGV, si_code);
84792181f19SNick Piggin }
84892181f19SNick Piggin 
8492d4a7167SIngo Molnar static noinline void
8502d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
851768fd9c6SEric W. Biederman 		     unsigned long address)
85292181f19SNick Piggin {
853419ceeb1SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
85492181f19SNick Piggin }
85592181f19SNick Piggin 
8562d4a7167SIngo Molnar static void
8572d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
858419ceeb1SEric W. Biederman 	   unsigned long address, u32 pkey, int si_code)
85992181f19SNick Piggin {
86092181f19SNick Piggin 	struct mm_struct *mm = current->mm;
86192181f19SNick Piggin 	/*
86292181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
86392181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
86492181f19SNick Piggin 	 */
865d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
86692181f19SNick Piggin 
867aba1ecd3SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
86892181f19SNick Piggin }
86992181f19SNick Piggin 
8702d4a7167SIngo Molnar static noinline void
8712d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
87292181f19SNick Piggin {
873419ceeb1SEric W. Biederman 	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
87492181f19SNick Piggin }
87592181f19SNick Piggin 
87633a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code,
87733a709b2SDave Hansen 		struct vm_area_struct *vma)
87833a709b2SDave Hansen {
87907f146f5SDave Hansen 	/* This code is always called on the current mm */
88007f146f5SDave Hansen 	bool foreign = false;
88107f146f5SDave Hansen 
88233a709b2SDave Hansen 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
88333a709b2SDave Hansen 		return false;
8841067f030SRicardo Neri 	if (error_code & X86_PF_PK)
88533a709b2SDave Hansen 		return true;
88607f146f5SDave Hansen 	/* this checks permission keys on the VMA: */
8871067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
8881067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
88907f146f5SDave Hansen 		return true;
89033a709b2SDave Hansen 	return false;
89192181f19SNick Piggin }
89292181f19SNick Piggin 
8932d4a7167SIngo Molnar static noinline void
8942d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
8957b2d0dbaSDave Hansen 		      unsigned long address, struct vm_area_struct *vma)
89692181f19SNick Piggin {
897019132ffSDave Hansen 	/*
898019132ffSDave Hansen 	 * This OSPKE check is not strictly necessary at runtime.
899019132ffSDave Hansen 	 * But, doing it this way allows compiler optimizations
900019132ffSDave Hansen 	 * if pkeys are compiled out.
901019132ffSDave Hansen 	 */
902aba1ecd3SEric W. Biederman 	if (bad_area_access_from_pkeys(error_code, vma)) {
9039db812dbSEric W. Biederman 		/*
9049db812dbSEric W. Biederman 		 * A protection key fault means that the PKRU value did not allow
9059db812dbSEric W. Biederman 		 * access to some PTE.  Userspace can figure out what PKRU was
9069db812dbSEric W. Biederman 		 * from the XSAVE state.  This function captures the pkey from
9079db812dbSEric W. Biederman 		 * the vma and passes it to userspace so userspace can discover
9089db812dbSEric W. Biederman 		 * which protection key was set on the PTE.
9099db812dbSEric W. Biederman 		 *
9109db812dbSEric W. Biederman 		 * If we get here, we know that the hardware signaled a X86_PF_PK
9119db812dbSEric W. Biederman 		 * fault and that there was a VMA once we got in the fault
9129db812dbSEric W. Biederman 		 * handler.  It does *not* guarantee that the VMA we find here
9139db812dbSEric W. Biederman 		 * was the one that we faulted on.
9149db812dbSEric W. Biederman 		 *
9159db812dbSEric W. Biederman 		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
9169db812dbSEric W. Biederman 		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
9179db812dbSEric W. Biederman 		 * 3. T1   : faults...
9189db812dbSEric W. Biederman 		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
919c1e8d7c6SMichel Lespinasse 		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
9209db812dbSEric W. Biederman 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
9219db812dbSEric W. Biederman 		 *	     faulted on a pte with its pkey=4.
9229db812dbSEric W. Biederman 		 */
923aba1ecd3SEric W. Biederman 		u32 pkey = vma_pkey(vma);
9249db812dbSEric W. Biederman 
925419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
926aba1ecd3SEric W. Biederman 	} else {
927419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
928aba1ecd3SEric W. Biederman 	}
92992181f19SNick Piggin }
93092181f19SNick Piggin 
9312d4a7167SIngo Molnar static void
932a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
9333d353901SSouptick Joarder 	  vm_fault_t fault)
93492181f19SNick Piggin {
9352d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
9361067f030SRicardo Neri 	if (!(error_code & X86_PF_USER)) {
9374fc34901SAndy Lutomirski 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
93896054569SLinus Torvalds 		return;
93996054569SLinus Torvalds 	}
9402d4a7167SIngo Molnar 
941cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
94292181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
94392181f19SNick Piggin 		return;
9442d4a7167SIngo Molnar 
945cd072dabSSean Christopherson 	sanitize_error_code(address, &error_code);
946cd072dabSSean Christopherson 
947334872a0SSean Christopherson 	if (fixup_vdso_exception(regs, X86_TRAP_PF, error_code, address))
948334872a0SSean Christopherson 		return;
949334872a0SSean Christopherson 
950e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
9512d4a7167SIngo Molnar 
952a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
953f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
954318759b4SEric W. Biederman 		struct task_struct *tsk = current;
95540e55394SEric W. Biederman 		unsigned lsb = 0;
95640e55394SEric W. Biederman 
95740e55394SEric W. Biederman 		pr_err(
958a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
959a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
96040e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON_LARGE)
96140e55394SEric W. Biederman 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
96240e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON)
96340e55394SEric W. Biederman 			lsb = PAGE_SHIFT;
964f8eac901SEric W. Biederman 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
96540e55394SEric W. Biederman 		return;
966a6e04aa9SAndi Kleen 	}
967a6e04aa9SAndi Kleen #endif
9682e1661d2SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
96992181f19SNick Piggin }
97092181f19SNick Piggin 
9713a13c4d7SJohannes Weiner static noinline void
9722d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code,
97325c102d8SEric W. Biederman 	       unsigned long address, vm_fault_t fault)
97492181f19SNick Piggin {
9751067f030SRicardo Neri 	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
9764fc34901SAndy Lutomirski 		no_context(regs, error_code, address, 0, 0);
9773a13c4d7SJohannes Weiner 		return;
978b80ef10eSKOSAKI Motohiro 	}
979b80ef10eSKOSAKI Motohiro 
9802d4a7167SIngo Molnar 	if (fault & VM_FAULT_OOM) {
981f8626854SAndrey Vagin 		/* Kernel mode? Handle exceptions or die: */
9821067f030SRicardo Neri 		if (!(error_code & X86_PF_USER)) {
9834fc34901SAndy Lutomirski 			no_context(regs, error_code, address,
9844fc34901SAndy Lutomirski 				   SIGSEGV, SEGV_MAPERR);
9853a13c4d7SJohannes Weiner 			return;
986f8626854SAndrey Vagin 		}
987f8626854SAndrey Vagin 
988c2d23f91SDavid Rientjes 		/*
989c2d23f91SDavid Rientjes 		 * We ran out of memory, call the OOM killer, and return the
990c2d23f91SDavid Rientjes 		 * userspace (which will retry the fault, or kill us if we got
991c2d23f91SDavid Rientjes 		 * oom-killed):
992c2d23f91SDavid Rientjes 		 */
993c2d23f91SDavid Rientjes 		pagefault_out_of_memory();
9942d4a7167SIngo Molnar 	} else {
995f672b49bSAndi Kleen 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
996f672b49bSAndi Kleen 			     VM_FAULT_HWPOISON_LARGE))
99727274f73SEric W. Biederman 			do_sigbus(regs, error_code, address, fault);
99833692f27SLinus Torvalds 		else if (fault & VM_FAULT_SIGSEGV)
999768fd9c6SEric W. Biederman 			bad_area_nosemaphore(regs, error_code, address);
100092181f19SNick Piggin 		else
100192181f19SNick Piggin 			BUG();
100292181f19SNick Piggin 	}
10032d4a7167SIngo Molnar }
100492181f19SNick Piggin 
10058fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
1006d8b57bb7SThomas Gleixner {
10071067f030SRicardo Neri 	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
1008d8b57bb7SThomas Gleixner 		return 0;
10092d4a7167SIngo Molnar 
10101067f030SRicardo Neri 	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
1011d8b57bb7SThomas Gleixner 		return 0;
1012d8b57bb7SThomas Gleixner 
1013d8b57bb7SThomas Gleixner 	return 1;
1014d8b57bb7SThomas Gleixner }
1015d8b57bb7SThomas Gleixner 
1016c61e211dSHarvey Harrison /*
10172d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
10182d4a7167SIngo Molnar  *
10192d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
10202d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
10212d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
10222d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
10232d4a7167SIngo Molnar  * on other processors.
10242d4a7167SIngo Molnar  *
102531668511SDavid Vrabel  * Spurious faults may only occur if the TLB contains an entry with
102631668511SDavid Vrabel  * fewer permission than the page table entry.  Non-present (P = 0)
102731668511SDavid Vrabel  * and reserved bit (R = 1) faults are never spurious.
102831668511SDavid Vrabel  *
10295b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
10305b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
103131668511SDavid Vrabel  *
103231668511SDavid Vrabel  * Returns non-zero if a spurious fault was handled, zero otherwise.
103331668511SDavid Vrabel  *
103431668511SDavid Vrabel  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
103531668511SDavid Vrabel  * (Optional Invalidation).
10365b727a3bSJeremy Fitzhardinge  */
10379326638cSMasami Hiramatsu static noinline int
10388fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address)
10395b727a3bSJeremy Fitzhardinge {
10405b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
1041e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
10425b727a3bSJeremy Fitzhardinge 	pud_t *pud;
10435b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
10445b727a3bSJeremy Fitzhardinge 	pte_t *pte;
10453c3e5694SSteven Rostedt 	int ret;
10465b727a3bSJeremy Fitzhardinge 
104731668511SDavid Vrabel 	/*
104831668511SDavid Vrabel 	 * Only writes to RO or instruction fetches from NX may cause
104931668511SDavid Vrabel 	 * spurious faults.
105031668511SDavid Vrabel 	 *
105131668511SDavid Vrabel 	 * These could be from user or supervisor accesses but the TLB
105231668511SDavid Vrabel 	 * is only lazily flushed after a kernel mapping protection
105331668511SDavid Vrabel 	 * change, so user accesses are not expected to cause spurious
105431668511SDavid Vrabel 	 * faults.
105531668511SDavid Vrabel 	 */
10561067f030SRicardo Neri 	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
10571067f030SRicardo Neri 	    error_code != (X86_PF_INSTR | X86_PF_PROT))
10585b727a3bSJeremy Fitzhardinge 		return 0;
10595b727a3bSJeremy Fitzhardinge 
10605b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
10615b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
10625b727a3bSJeremy Fitzhardinge 		return 0;
10635b727a3bSJeremy Fitzhardinge 
1064e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
1065e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d))
1066e0c4f675SKirill A. Shutemov 		return 0;
1067e0c4f675SKirill A. Shutemov 
1068e0c4f675SKirill A. Shutemov 	if (p4d_large(*p4d))
10698fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1070e0c4f675SKirill A. Shutemov 
1071e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
10725b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
10735b727a3bSJeremy Fitzhardinge 		return 0;
10745b727a3bSJeremy Fitzhardinge 
1075d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
10768fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1077d8b57bb7SThomas Gleixner 
10785b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
10795b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
10805b727a3bSJeremy Fitzhardinge 		return 0;
10815b727a3bSJeremy Fitzhardinge 
1082d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
10838fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1084d8b57bb7SThomas Gleixner 
10855b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
1086954f8571SAndrea Arcangeli 	if (!pte_present(*pte))
10875b727a3bSJeremy Fitzhardinge 		return 0;
10885b727a3bSJeremy Fitzhardinge 
10898fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, pte);
10903c3e5694SSteven Rostedt 	if (!ret)
10913c3e5694SSteven Rostedt 		return 0;
10923c3e5694SSteven Rostedt 
10933c3e5694SSteven Rostedt 	/*
10942d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
10952d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
10963c3e5694SSteven Rostedt 	 */
10978fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
10983c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
10992d4a7167SIngo Molnar 
11003c3e5694SSteven Rostedt 	return ret;
11015b727a3bSJeremy Fitzhardinge }
11028fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault);
11035b727a3bSJeremy Fitzhardinge 
1104c61e211dSHarvey Harrison int show_unhandled_signals = 1;
1105c61e211dSHarvey Harrison 
11062d4a7167SIngo Molnar static inline int
110768da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
110892181f19SNick Piggin {
110907f146f5SDave Hansen 	/* This is only called for the current mm, so: */
111007f146f5SDave Hansen 	bool foreign = false;
1111e8c6226dSDave Hansen 
1112e8c6226dSDave Hansen 	/*
1113e8c6226dSDave Hansen 	 * Read or write was blocked by protection keys.  This is
1114e8c6226dSDave Hansen 	 * always an unconditional error and can never result in
1115e8c6226dSDave Hansen 	 * a follow-up action to resolve the fault, like a COW.
1116e8c6226dSDave Hansen 	 */
11171067f030SRicardo Neri 	if (error_code & X86_PF_PK)
1118e8c6226dSDave Hansen 		return 1;
1119e8c6226dSDave Hansen 
112033a709b2SDave Hansen 	/*
112174faeee0SSean Christopherson 	 * SGX hardware blocked the access.  This usually happens
112274faeee0SSean Christopherson 	 * when the enclave memory contents have been destroyed, like
112374faeee0SSean Christopherson 	 * after a suspend/resume cycle. In any case, the kernel can't
112474faeee0SSean Christopherson 	 * fix the cause of the fault.  Handle the fault as an access
112574faeee0SSean Christopherson 	 * error even in cases where no actual access violation
112674faeee0SSean Christopherson 	 * occurred.  This allows userspace to rebuild the enclave in
112774faeee0SSean Christopherson 	 * response to the signal.
112874faeee0SSean Christopherson 	 */
112974faeee0SSean Christopherson 	if (unlikely(error_code & X86_PF_SGX))
113074faeee0SSean Christopherson 		return 1;
113174faeee0SSean Christopherson 
113274faeee0SSean Christopherson 	/*
113307f146f5SDave Hansen 	 * Make sure to check the VMA so that we do not perform
11341067f030SRicardo Neri 	 * faults just to hit a X86_PF_PK as soon as we fill in a
113507f146f5SDave Hansen 	 * page.
113607f146f5SDave Hansen 	 */
11371067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
11381067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
113907f146f5SDave Hansen 		return 1;
114033a709b2SDave Hansen 
11411067f030SRicardo Neri 	if (error_code & X86_PF_WRITE) {
11422d4a7167SIngo Molnar 		/* write, present and write, not present: */
114392181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
114492181f19SNick Piggin 			return 1;
11452d4a7167SIngo Molnar 		return 0;
11462d4a7167SIngo Molnar 	}
11472d4a7167SIngo Molnar 
11482d4a7167SIngo Molnar 	/* read, present: */
11491067f030SRicardo Neri 	if (unlikely(error_code & X86_PF_PROT))
115092181f19SNick Piggin 		return 1;
11512d4a7167SIngo Molnar 
11522d4a7167SIngo Molnar 	/* read, not present: */
11533122e80eSAnshuman Khandual 	if (unlikely(!vma_is_accessible(vma)))
115492181f19SNick Piggin 		return 1;
115592181f19SNick Piggin 
115692181f19SNick Piggin 	return 0;
115792181f19SNick Piggin }
115892181f19SNick Piggin 
115930063810STony Luck bool fault_in_kernel_space(unsigned long address)
11600973a06cSHiroshi Shimamoto {
11613ae0ad92SDave Hansen 	/*
11623ae0ad92SDave Hansen 	 * On 64-bit systems, the vsyscall page is at an address above
11633ae0ad92SDave Hansen 	 * TASK_SIZE_MAX, but is not considered part of the kernel
11643ae0ad92SDave Hansen 	 * address space.
11653ae0ad92SDave Hansen 	 */
11663ae0ad92SDave Hansen 	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
11673ae0ad92SDave Hansen 		return false;
11683ae0ad92SDave Hansen 
1169d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
11700973a06cSHiroshi Shimamoto }
11710973a06cSHiroshi Shimamoto 
1172c61e211dSHarvey Harrison /*
11738fed6200SDave Hansen  * Called for all faults where 'address' is part of the kernel address
11748fed6200SDave Hansen  * space.  Might get called for faults that originate from *code* that
11758fed6200SDave Hansen  * ran in userspace or the kernel.
1176c61e211dSHarvey Harrison  */
11778fed6200SDave Hansen static void
11788fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
11790ac09f9fSJiri Olsa 		   unsigned long address)
1180c61e211dSHarvey Harrison {
11818fed6200SDave Hansen 	/*
1182367e3f1dSDave Hansen 	 * Protection keys exceptions only happen on user pages.  We
1183367e3f1dSDave Hansen 	 * have no user pages in the kernel portion of the address
1184367e3f1dSDave Hansen 	 * space, so do not expect them here.
1185367e3f1dSDave Hansen 	 */
1186367e3f1dSDave Hansen 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1187367e3f1dSDave Hansen 
11884819e15fSJoerg Roedel #ifdef CONFIG_X86_32
11894819e15fSJoerg Roedel 	/*
11904819e15fSJoerg Roedel 	 * We can fault-in kernel-space virtual memory on-demand. The
11914819e15fSJoerg Roedel 	 * 'reference' page table is init_mm.pgd.
11924819e15fSJoerg Roedel 	 *
11934819e15fSJoerg Roedel 	 * NOTE! We MUST NOT take any locks for this case. We may
11944819e15fSJoerg Roedel 	 * be in an interrupt or a critical region, and should
11954819e15fSJoerg Roedel 	 * only copy the information from the master page table,
11964819e15fSJoerg Roedel 	 * nothing more.
11974819e15fSJoerg Roedel 	 *
11984819e15fSJoerg Roedel 	 * Before doing this on-demand faulting, ensure that the
11994819e15fSJoerg Roedel 	 * fault is not any of the following:
12004819e15fSJoerg Roedel 	 * 1. A fault on a PTE with a reserved bit set.
12014819e15fSJoerg Roedel 	 * 2. A fault caused by a user-mode access.  (Do not demand-
12024819e15fSJoerg Roedel 	 *    fault kernel memory due to user-mode accesses).
12034819e15fSJoerg Roedel 	 * 3. A fault caused by a page-level protection violation.
12044819e15fSJoerg Roedel 	 *    (A demand fault would be on a non-present page which
12054819e15fSJoerg Roedel 	 *     would have X86_PF_PROT==0).
12064819e15fSJoerg Roedel 	 *
12074819e15fSJoerg Roedel 	 * This is only needed to close a race condition on x86-32 in
12084819e15fSJoerg Roedel 	 * the vmalloc mapping/unmapping code. See the comment above
12094819e15fSJoerg Roedel 	 * vmalloc_fault() for details. On x86-64 the race does not
12104819e15fSJoerg Roedel 	 * exist as the vmalloc mappings don't need to be synchronized
12114819e15fSJoerg Roedel 	 * there.
12124819e15fSJoerg Roedel 	 */
12134819e15fSJoerg Roedel 	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
12144819e15fSJoerg Roedel 		if (vmalloc_fault(address) >= 0)
12154819e15fSJoerg Roedel 			return;
12164819e15fSJoerg Roedel 	}
12174819e15fSJoerg Roedel #endif
12184819e15fSJoerg Roedel 
12198fed6200SDave Hansen 	/* Was the fault spurious, caused by lazy TLB invalidation? */
12208fed6200SDave Hansen 	if (spurious_kernel_fault(hw_error_code, address))
12218fed6200SDave Hansen 		return;
12228fed6200SDave Hansen 
12238fed6200SDave Hansen 	/* kprobes don't want to hook the spurious faults: */
1224b98cca44SAnshuman Khandual 	if (kprobe_page_fault(regs, X86_TRAP_PF))
12258fed6200SDave Hansen 		return;
12268fed6200SDave Hansen 
12278fed6200SDave Hansen 	/*
12288fed6200SDave Hansen 	 * Note, despite being a "bad area", there are quite a few
12298fed6200SDave Hansen 	 * acceptable reasons to get here, such as erratum fixups
12308fed6200SDave Hansen 	 * and handling kernel code that can fault, like get_user().
12318fed6200SDave Hansen 	 *
12328fed6200SDave Hansen 	 * Don't take the mm semaphore here. If we fixup a prefetch
12338fed6200SDave Hansen 	 * fault we could otherwise deadlock:
12348fed6200SDave Hansen 	 */
1235ba9f6f89SLinus Torvalds 	bad_area_nosemaphore(regs, hw_error_code, address);
12368fed6200SDave Hansen }
12378fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault);
12388fed6200SDave Hansen 
1239aa37c51bSDave Hansen /* Handle faults in the user portion of the address space */
1240aa37c51bSDave Hansen static inline
1241aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs,
1242aa37c51bSDave Hansen 			unsigned long hw_error_code,
1243c61e211dSHarvey Harrison 			unsigned long address)
1244c61e211dSHarvey Harrison {
1245c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
1246c61e211dSHarvey Harrison 	struct task_struct *tsk;
12472d4a7167SIngo Molnar 	struct mm_struct *mm;
1248968614fcSPeter Xu 	vm_fault_t fault;
1249dde16072SPeter Xu 	unsigned int flags = FAULT_FLAG_DEFAULT;
1250c61e211dSHarvey Harrison 
1251c61e211dSHarvey Harrison 	tsk = current;
1252c61e211dSHarvey Harrison 	mm = tsk->mm;
12532d4a7167SIngo Molnar 
12542d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
1255b98cca44SAnshuman Khandual 	if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
12569be260a6SMasami Hiramatsu 		return;
1257e00b12e6SPeter Zijlstra 
12585b0c2cacSDave Hansen 	/*
12595b0c2cacSDave Hansen 	 * Reserved bits are never expected to be set on
12605b0c2cacSDave Hansen 	 * entries in the user portion of the page tables.
12615b0c2cacSDave Hansen 	 */
1262164477c2SDave Hansen 	if (unlikely(hw_error_code & X86_PF_RSVD))
1263164477c2SDave Hansen 		pgtable_bad(regs, hw_error_code, address);
1264e00b12e6SPeter Zijlstra 
12655b0c2cacSDave Hansen 	/*
1266e50928d7SAndy Lutomirski 	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1267e50928d7SAndy Lutomirski 	 * pages in the user address space.  The odd case here is WRUSS,
1268e50928d7SAndy Lutomirski 	 * which, according to the preliminary documentation, does not respect
1269e50928d7SAndy Lutomirski 	 * SMAP and will have the USER bit set so, in all cases, SMAP
1270e50928d7SAndy Lutomirski 	 * enforcement appears to be consistent with the USER bit.
12715b0c2cacSDave Hansen 	 */
1272a15781b5SAndy Lutomirski 	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1273a15781b5SAndy Lutomirski 		     !(hw_error_code & X86_PF_USER) &&
1274e50928d7SAndy Lutomirski 		     !(regs->flags & X86_EFLAGS_AC)))
1275a15781b5SAndy Lutomirski 	{
1276ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1277e00b12e6SPeter Zijlstra 		return;
1278e00b12e6SPeter Zijlstra 	}
1279e00b12e6SPeter Zijlstra 
1280e00b12e6SPeter Zijlstra 	/*
1281e00b12e6SPeter Zijlstra 	 * If we're in an interrupt, have no user context or are running
128270ffdb93SDavid Hildenbrand 	 * in a region with pagefaults disabled then we must not take the fault
1283e00b12e6SPeter Zijlstra 	 */
128470ffdb93SDavid Hildenbrand 	if (unlikely(faulthandler_disabled() || !mm)) {
1285ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1286e00b12e6SPeter Zijlstra 		return;
1287e00b12e6SPeter Zijlstra 	}
1288e00b12e6SPeter Zijlstra 
1289c61e211dSHarvey Harrison 	/*
1290891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1291891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1292891cffbdSLinus Torvalds 	 *
1293891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
12942d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1295c61e211dSHarvey Harrison 	 */
1296f39b6f0eSAndy Lutomirski 	if (user_mode(regs)) {
1297891cffbdSLinus Torvalds 		local_irq_enable();
1298759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
12992d4a7167SIngo Molnar 	} else {
13002d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1301c61e211dSHarvey Harrison 			local_irq_enable();
13022d4a7167SIngo Molnar 	}
1303c61e211dSHarvey Harrison 
1304a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
13057dd1fcc2SPeter Zijlstra 
13060ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_WRITE)
1307759496baSJohannes Weiner 		flags |= FAULT_FLAG_WRITE;
13080ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_INSTR)
1309d61172b4SDave Hansen 		flags |= FAULT_FLAG_INSTRUCTION;
1310759496baSJohannes Weiner 
13113ae0ad92SDave Hansen #ifdef CONFIG_X86_64
13123a1dfe6eSIngo Molnar 	/*
1313918ce325SAndy Lutomirski 	 * Faults in the vsyscall page might need emulation.  The
1314918ce325SAndy Lutomirski 	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1315918ce325SAndy Lutomirski 	 * considered to be part of the user address space.
1316c61e211dSHarvey Harrison 	 *
13173ae0ad92SDave Hansen 	 * The vsyscall page does not have a "real" VMA, so do this
13183ae0ad92SDave Hansen 	 * emulation before we go searching for VMAs.
1319e0a446ceSAndy Lutomirski 	 *
1320e0a446ceSAndy Lutomirski 	 * PKRU never rejects instruction fetches, so we don't need
1321e0a446ceSAndy Lutomirski 	 * to consider the PF_PK bit.
13223ae0ad92SDave Hansen 	 */
1323918ce325SAndy Lutomirski 	if (is_vsyscall_vaddr(address)) {
1324918ce325SAndy Lutomirski 		if (emulate_vsyscall(hw_error_code, regs, address))
13253ae0ad92SDave Hansen 			return;
13263ae0ad92SDave Hansen 	}
13273ae0ad92SDave Hansen #endif
13283ae0ad92SDave Hansen 
1329c61e211dSHarvey Harrison 	/*
133088259744SDave Hansen 	 * Kernel-mode access to the user address space should only occur
133188259744SDave Hansen 	 * on well-defined single instructions listed in the exception
133288259744SDave Hansen 	 * tables.  But, an erroneous kernel fault occurring outside one of
1333c1e8d7c6SMichel Lespinasse 	 * those areas which also holds mmap_lock might deadlock attempting
133488259744SDave Hansen 	 * to validate the fault against the address space.
1335c61e211dSHarvey Harrison 	 *
133688259744SDave Hansen 	 * Only do the expensive exception table search when we might be at
133788259744SDave Hansen 	 * risk of a deadlock.  This happens if we
1338c1e8d7c6SMichel Lespinasse 	 * 1. Failed to acquire mmap_lock, and
13396344be60SAndy Lutomirski 	 * 2. The access did not originate in userspace.
1340c61e211dSHarvey Harrison 	 */
1341d8ed45c5SMichel Lespinasse 	if (unlikely(!mmap_read_trylock(mm))) {
13426344be60SAndy Lutomirski 		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
134388259744SDave Hansen 			/*
134488259744SDave Hansen 			 * Fault from code in kernel from
134588259744SDave Hansen 			 * which we do not expect faults.
134688259744SDave Hansen 			 */
13470ed32f1aSAndy Lutomirski 			bad_area_nosemaphore(regs, hw_error_code, address);
134892181f19SNick Piggin 			return;
134992181f19SNick Piggin 		}
1350d065bd81SMichel Lespinasse retry:
1351d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
135201006074SPeter Zijlstra 	} else {
135301006074SPeter Zijlstra 		/*
13542d4a7167SIngo Molnar 		 * The above down_read_trylock() might have succeeded in
13552d4a7167SIngo Molnar 		 * which case we'll have missed the might_sleep() from
13562d4a7167SIngo Molnar 		 * down_read():
135701006074SPeter Zijlstra 		 */
135801006074SPeter Zijlstra 		might_sleep();
1359c61e211dSHarvey Harrison 	}
1360c61e211dSHarvey Harrison 
1361c61e211dSHarvey Harrison 	vma = find_vma(mm, address);
136292181f19SNick Piggin 	if (unlikely(!vma)) {
13630ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
136492181f19SNick Piggin 		return;
136592181f19SNick Piggin 	}
136692181f19SNick Piggin 	if (likely(vma->vm_start <= address))
1367c61e211dSHarvey Harrison 		goto good_area;
136892181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
13690ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
137092181f19SNick Piggin 		return;
137192181f19SNick Piggin 	}
137292181f19SNick Piggin 	if (unlikely(expand_stack(vma, address))) {
13730ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
137492181f19SNick Piggin 		return;
137592181f19SNick Piggin 	}
137692181f19SNick Piggin 
1377c61e211dSHarvey Harrison 	/*
1378c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1379c61e211dSHarvey Harrison 	 * we can handle it..
1380c61e211dSHarvey Harrison 	 */
1381c61e211dSHarvey Harrison good_area:
13820ed32f1aSAndy Lutomirski 	if (unlikely(access_error(hw_error_code, vma))) {
13830ed32f1aSAndy Lutomirski 		bad_area_access_error(regs, hw_error_code, address, vma);
138492181f19SNick Piggin 		return;
1385c61e211dSHarvey Harrison 	}
1386c61e211dSHarvey Harrison 
1387c61e211dSHarvey Harrison 	/*
1388c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1389c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
13909a95f3cfSPaul Cassella 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1391c1e8d7c6SMichel Lespinasse 	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1392cb0631fdSVlastimil Babka 	 *
1393c1e8d7c6SMichel Lespinasse 	 * Note that handle_userfault() may also release and reacquire mmap_lock
1394cb0631fdSVlastimil Babka 	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1395cb0631fdSVlastimil Babka 	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1396cb0631fdSVlastimil Babka 	 * (potentially after handling any pending signal during the return to
1397cb0631fdSVlastimil Babka 	 * userland). The return to userland is identified whenever
1398cb0631fdSVlastimil Babka 	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1399c61e211dSHarvey Harrison 	 */
1400968614fcSPeter Xu 	fault = handle_mm_fault(vma, address, flags, regs);
14012d4a7167SIngo Molnar 
140239678191SPeter Xu 	/* Quick path to respond to signals */
140339678191SPeter Xu 	if (fault_signal_pending(fault, regs)) {
140439678191SPeter Xu 		if (!user_mode(regs))
140539678191SPeter Xu 			no_context(regs, hw_error_code, address, SIGBUS,
140639678191SPeter Xu 				   BUS_ADRERR);
140739678191SPeter Xu 		return;
140839678191SPeter Xu 	}
140939678191SPeter Xu 
14103a13c4d7SJohannes Weiner 	/*
1411c1e8d7c6SMichel Lespinasse 	 * If we need to retry the mmap_lock has already been released,
141226178ec1SLinus Torvalds 	 * and if there is a fatal signal pending there is no guarantee
141326178ec1SLinus Torvalds 	 * that we made any progress. Handle this case first.
14143a13c4d7SJohannes Weiner 	 */
141539678191SPeter Xu 	if (unlikely((fault & VM_FAULT_RETRY) &&
141639678191SPeter Xu 		     (flags & FAULT_FLAG_ALLOW_RETRY))) {
141726178ec1SLinus Torvalds 		flags |= FAULT_FLAG_TRIED;
141826178ec1SLinus Torvalds 		goto retry;
141926178ec1SLinus Torvalds 	}
142026178ec1SLinus Torvalds 
1421d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
142226178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_ERROR)) {
14230ed32f1aSAndy Lutomirski 		mm_fault_error(regs, hw_error_code, address, fault);
142437b23e05SKOSAKI Motohiro 		return;
142537b23e05SKOSAKI Motohiro 	}
142637b23e05SKOSAKI Motohiro 
14278c938f9fSIngo Molnar 	check_v8086_mode(regs, address, tsk);
1428c61e211dSHarvey Harrison }
1429aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault);
1430aa37c51bSDave Hansen 
1431a0d14b89SPeter Zijlstra static __always_inline void
1432a0d14b89SPeter Zijlstra trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1433a0d14b89SPeter Zijlstra 			 unsigned long address)
1434d34603b0SSeiji Aguchi {
1435a0d14b89SPeter Zijlstra 	if (!trace_pagefault_enabled())
1436a0d14b89SPeter Zijlstra 		return;
1437a0d14b89SPeter Zijlstra 
1438d34603b0SSeiji Aguchi 	if (user_mode(regs))
1439d4078e23SPeter Zijlstra 		trace_page_fault_user(address, regs, error_code);
1440d34603b0SSeiji Aguchi 	else
1441d4078e23SPeter Zijlstra 		trace_page_fault_kernel(address, regs, error_code);
1442d34603b0SSeiji Aguchi }
1443d34603b0SSeiji Aguchi 
144491eeafeaSThomas Gleixner static __always_inline void
144591eeafeaSThomas Gleixner handle_page_fault(struct pt_regs *regs, unsigned long error_code,
1446ee6352b2SFrederic Weisbecker 			      unsigned long address)
144711a7ffb0SThomas Gleixner {
144891eeafeaSThomas Gleixner 	trace_page_fault_entries(regs, error_code, address);
144991eeafeaSThomas Gleixner 
145091eeafeaSThomas Gleixner 	if (unlikely(kmmio_fault(regs, address)))
145191eeafeaSThomas Gleixner 		return;
145291eeafeaSThomas Gleixner 
145391eeafeaSThomas Gleixner 	/* Was the fault on kernel-controlled part of the address space? */
145491eeafeaSThomas Gleixner 	if (unlikely(fault_in_kernel_space(address))) {
145591eeafeaSThomas Gleixner 		do_kern_addr_fault(regs, error_code, address);
145691eeafeaSThomas Gleixner 	} else {
145791eeafeaSThomas Gleixner 		do_user_addr_fault(regs, error_code, address);
145891eeafeaSThomas Gleixner 		/*
145991eeafeaSThomas Gleixner 		 * User address page fault handling might have reenabled
146091eeafeaSThomas Gleixner 		 * interrupts. Fixing up all potential exit points of
146191eeafeaSThomas Gleixner 		 * do_user_addr_fault() and its leaf functions is just not
146291eeafeaSThomas Gleixner 		 * doable w/o creating an unholy mess or turning the code
146391eeafeaSThomas Gleixner 		 * upside down.
146491eeafeaSThomas Gleixner 		 */
146591eeafeaSThomas Gleixner 		local_irq_disable();
146691eeafeaSThomas Gleixner 	}
146791eeafeaSThomas Gleixner }
146891eeafeaSThomas Gleixner 
146991eeafeaSThomas Gleixner DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
147091eeafeaSThomas Gleixner {
147191eeafeaSThomas Gleixner 	unsigned long address = read_cr2();
1472a27a0a55SThomas Gleixner 	irqentry_state_t state;
147391eeafeaSThomas Gleixner 
1474da1c55f1SMichel Lespinasse 	prefetchw(&current->mm->mmap_lock);
147591eeafeaSThomas Gleixner 
1476ef68017eSAndy Lutomirski 	/*
147766af4f5cSVitaly Kuznetsov 	 * KVM uses #PF vector to deliver 'page not present' events to guests
147866af4f5cSVitaly Kuznetsov 	 * (asynchronous page fault mechanism). The event happens when a
147966af4f5cSVitaly Kuznetsov 	 * userspace task is trying to access some valid (from guest's point of
148066af4f5cSVitaly Kuznetsov 	 * view) memory which is not currently mapped by the host (e.g. the
148166af4f5cSVitaly Kuznetsov 	 * memory is swapped out). Note, the corresponding "page ready" event
148266af4f5cSVitaly Kuznetsov 	 * which is injected when the memory becomes available, is delived via
148366af4f5cSVitaly Kuznetsov 	 * an interrupt mechanism and not a #PF exception
148466af4f5cSVitaly Kuznetsov 	 * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
1485ef68017eSAndy Lutomirski 	 *
1486ef68017eSAndy Lutomirski 	 * We are relying on the interrupted context being sane (valid RSP,
1487ef68017eSAndy Lutomirski 	 * relevant locks not held, etc.), which is fine as long as the
1488ef68017eSAndy Lutomirski 	 * interrupted context had IF=1.  We are also relying on the KVM
1489ef68017eSAndy Lutomirski 	 * async pf type field and CR2 being read consistently instead of
1490ef68017eSAndy Lutomirski 	 * getting values from real and async page faults mixed up.
1491ef68017eSAndy Lutomirski 	 *
1492ef68017eSAndy Lutomirski 	 * Fingers crossed.
149391eeafeaSThomas Gleixner 	 *
149491eeafeaSThomas Gleixner 	 * The async #PF handling code takes care of idtentry handling
149591eeafeaSThomas Gleixner 	 * itself.
1496ef68017eSAndy Lutomirski 	 */
1497ef68017eSAndy Lutomirski 	if (kvm_handle_async_pf(regs, (u32)address))
1498ef68017eSAndy Lutomirski 		return;
1499ef68017eSAndy Lutomirski 
1500ca4c6a98SThomas Gleixner 	/*
150191eeafeaSThomas Gleixner 	 * Entry handling for valid #PF from kernel mode is slightly
150291eeafeaSThomas Gleixner 	 * different: RCU is already watching and rcu_irq_enter() must not
150391eeafeaSThomas Gleixner 	 * be invoked because a kernel fault on a user space address might
150491eeafeaSThomas Gleixner 	 * sleep.
150591eeafeaSThomas Gleixner 	 *
150691eeafeaSThomas Gleixner 	 * In case the fault hit a RCU idle region the conditional entry
150791eeafeaSThomas Gleixner 	 * code reenabled RCU to avoid subsequent wreckage which helps
150891eeafeaSThomas Gleixner 	 * debugability.
1509ca4c6a98SThomas Gleixner 	 */
1510a27a0a55SThomas Gleixner 	state = irqentry_enter(regs);
151191eeafeaSThomas Gleixner 
151291eeafeaSThomas Gleixner 	instrumentation_begin();
151391eeafeaSThomas Gleixner 	handle_page_fault(regs, error_code, address);
151491eeafeaSThomas Gleixner 	instrumentation_end();
151591eeafeaSThomas Gleixner 
1516a27a0a55SThomas Gleixner 	irqentry_exit(regs, state);
1517ca4c6a98SThomas Gleixner }
1518