xref: /openbmc/linux/arch/x86/mm/fault.c (revision 74faeee06db81a06add0def6a394210c8fef0ab7)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c61e211dSHarvey Harrison /*
3c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
4c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
6c61e211dSHarvey Harrison  */
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
868db0cf1SIngo Molnar #include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
104cdf8dbeSLinus Torvalds #include <linux/extable.h>		/* search_exception_tables	*/
1157c8a661SMike Rapoport #include <linux/memblock.h>		/* max_low_pfn			*/
129326638cSMasami Hiramatsu #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
13a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
14cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
15f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
16268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
193425d934SSai Praneeth #include <linux/efi.h>			/* efi_recover_from_page_fault()*/
2050a7ca3cSSouptick Joarder #include <linux/mm_types.h>
21c61e211dSHarvey Harrison 
22019132ffSDave Hansen #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
23a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
24f40c3300SAndy Lutomirski #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
25f40c3300SAndy Lutomirski #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
26ba3e127eSBrian Gerst #include <asm/vm86.h>			/* struct vm86			*/
27019132ffSDave Hansen #include <asm/mmu_context.h>		/* vma_pkey()			*/
283425d934SSai Praneeth #include <asm/efi.h>			/* efi_recover_from_page_fault()*/
29a1a371c4SAndy Lutomirski #include <asm/desc.h>			/* store_idt(), ...		*/
30d876b673SThomas Gleixner #include <asm/cpu_entry_area.h>		/* exception stack		*/
31186525bdSIngo Molnar #include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
32ef68017eSAndy Lutomirski #include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
33c61e211dSHarvey Harrison 
34d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS
35d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h>
36d34603b0SSeiji Aguchi 
37c61e211dSHarvey Harrison /*
38b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
39b319eed0SIngo Molnar  * handled by mmiotrace:
40b814d41fSIngo Molnar  */
419326638cSMasami Hiramatsu static nokprobe_inline int
4262c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
4386069782SPekka Paalanen {
440fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
450fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
460fd0e3daSPekka Paalanen 			return -1;
470fd0e3daSPekka Paalanen 	return 0;
4886069782SPekka Paalanen }
4986069782SPekka Paalanen 
50c61e211dSHarvey Harrison /*
512d4a7167SIngo Molnar  * Prefetch quirks:
522d4a7167SIngo Molnar  *
532d4a7167SIngo Molnar  * 32-bit mode:
542d4a7167SIngo Molnar  *
55c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
56c61e211dSHarvey Harrison  *   Check that here and ignore it.
57c61e211dSHarvey Harrison  *
582d4a7167SIngo Molnar  * 64-bit mode:
592d4a7167SIngo Molnar  *
60c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
61c61e211dSHarvey Harrison  *   Check that here and ignore it.
62c61e211dSHarvey Harrison  *
632d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
64c61e211dSHarvey Harrison  */
65107a0367SIngo Molnar static inline int
66107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
67107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
68c61e211dSHarvey Harrison {
69107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
70107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
71c61e211dSHarvey Harrison 
72c61e211dSHarvey Harrison 	switch (instr_hi) {
73c61e211dSHarvey Harrison 	case 0x20:
74c61e211dSHarvey Harrison 	case 0x30:
75c61e211dSHarvey Harrison 		/*
76c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
77c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
78c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
79c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
80c61e211dSHarvey Harrison 		 */
81107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
82c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
83c61e211dSHarvey Harrison 	case 0x40:
84c61e211dSHarvey Harrison 		/*
85c61e211dSHarvey Harrison 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
86c61e211dSHarvey Harrison 		 * Need to figure out under what instruction mode the
87c61e211dSHarvey Harrison 		 * instruction was issued. Could check the LDT for lm,
88c61e211dSHarvey Harrison 		 * but for now it's good enough to assume that long
89c61e211dSHarvey Harrison 		 * mode only uses well known segments or kernel.
90c61e211dSHarvey Harrison 		 */
91318f5a2aSAndy Lutomirski 		return (!user_mode(regs) || user_64bit_mode(regs));
92c61e211dSHarvey Harrison #endif
93c61e211dSHarvey Harrison 	case 0x60:
94c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
95107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
96c61e211dSHarvey Harrison 	case 0xF0:
97c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
98107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
99c61e211dSHarvey Harrison 	case 0x00:
100c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
10125f12ae4SChristoph Hellwig 		if (get_kernel_nofault(opcode, instr))
102107a0367SIngo Molnar 			return 0;
103107a0367SIngo Molnar 
104107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
105107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
106107a0367SIngo Molnar 		return 0;
107107a0367SIngo Molnar 	default:
108107a0367SIngo Molnar 		return 0;
109107a0367SIngo Molnar 	}
110107a0367SIngo Molnar }
111107a0367SIngo Molnar 
112107a0367SIngo Molnar static int
113107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
114107a0367SIngo Molnar {
115107a0367SIngo Molnar 	unsigned char *max_instr;
116107a0367SIngo Molnar 	unsigned char *instr;
117107a0367SIngo Molnar 	int prefetch = 0;
118107a0367SIngo Molnar 
119107a0367SIngo Molnar 	/*
120107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
121107a0367SIngo Molnar 	 * do not ignore the fault:
122107a0367SIngo Molnar 	 */
1231067f030SRicardo Neri 	if (error_code & X86_PF_INSTR)
124107a0367SIngo Molnar 		return 0;
125107a0367SIngo Molnar 
126107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
127107a0367SIngo Molnar 	max_instr = instr + 15;
128107a0367SIngo Molnar 
129d31bf07fSAndy Lutomirski 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
130107a0367SIngo Molnar 		return 0;
131107a0367SIngo Molnar 
132107a0367SIngo Molnar 	while (instr < max_instr) {
133107a0367SIngo Molnar 		unsigned char opcode;
134c61e211dSHarvey Harrison 
13525f12ae4SChristoph Hellwig 		if (get_kernel_nofault(opcode, instr))
136c61e211dSHarvey Harrison 			break;
137107a0367SIngo Molnar 
138107a0367SIngo Molnar 		instr++;
139107a0367SIngo Molnar 
140107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
141c61e211dSHarvey Harrison 			break;
142c61e211dSHarvey Harrison 	}
143c61e211dSHarvey Harrison 	return prefetch;
144c61e211dSHarvey Harrison }
145c61e211dSHarvey Harrison 
146f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
147f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1482d4a7167SIngo Molnar 
149f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
150f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
151f2f13a85SIngo Molnar {
152f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
153f2f13a85SIngo Molnar 	pgd_t *pgd_k;
154e0c4f675SKirill A. Shutemov 	p4d_t *p4d, *p4d_k;
155f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
156f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
157f2f13a85SIngo Molnar 
158f2f13a85SIngo Molnar 	pgd += index;
159f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
160f2f13a85SIngo Molnar 
161f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
162f2f13a85SIngo Molnar 		return NULL;
163f2f13a85SIngo Molnar 
164f2f13a85SIngo Molnar 	/*
165f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
166f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
167e0c4f675SKirill A. Shutemov 	 * set_p4d/set_pud.
168f2f13a85SIngo Molnar 	 */
169e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
170e0c4f675SKirill A. Shutemov 	p4d_k = p4d_offset(pgd_k, address);
171e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d_k))
172e0c4f675SKirill A. Shutemov 		return NULL;
173e0c4f675SKirill A. Shutemov 
174e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
175e0c4f675SKirill A. Shutemov 	pud_k = pud_offset(p4d_k, address);
176f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
177f2f13a85SIngo Molnar 		return NULL;
178f2f13a85SIngo Molnar 
179f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
180f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
1818e998fc2SJoerg Roedel 
1828e998fc2SJoerg Roedel 	if (pmd_present(*pmd) != pmd_present(*pmd_k))
1838e998fc2SJoerg Roedel 		set_pmd(pmd, *pmd_k);
1848e998fc2SJoerg Roedel 
185f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
186f2f13a85SIngo Molnar 		return NULL;
187b8bcfe99SJeremy Fitzhardinge 	else
18851b75b5bSJoerg Roedel 		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
189f2f13a85SIngo Molnar 
190f2f13a85SIngo Molnar 	return pmd_k;
191f2f13a85SIngo Molnar }
192f2f13a85SIngo Molnar 
1934819e15fSJoerg Roedel /*
1944819e15fSJoerg Roedel  *   Handle a fault on the vmalloc or module mapping area
1954819e15fSJoerg Roedel  *
1964819e15fSJoerg Roedel  *   This is needed because there is a race condition between the time
1974819e15fSJoerg Roedel  *   when the vmalloc mapping code updates the PMD to the point in time
1984819e15fSJoerg Roedel  *   where it synchronizes this update with the other page-tables in the
1994819e15fSJoerg Roedel  *   system.
2004819e15fSJoerg Roedel  *
2014819e15fSJoerg Roedel  *   In this race window another thread/CPU can map an area on the same
2024819e15fSJoerg Roedel  *   PMD, finds it already present and does not synchronize it with the
2034819e15fSJoerg Roedel  *   rest of the system yet. As a result v[mz]alloc might return areas
2044819e15fSJoerg Roedel  *   which are not mapped in every page-table in the system, causing an
2054819e15fSJoerg Roedel  *   unhandled page-fault when they are accessed.
2064819e15fSJoerg Roedel  */
2074819e15fSJoerg Roedel static noinline int vmalloc_fault(unsigned long address)
2084819e15fSJoerg Roedel {
2094819e15fSJoerg Roedel 	unsigned long pgd_paddr;
2104819e15fSJoerg Roedel 	pmd_t *pmd_k;
2114819e15fSJoerg Roedel 	pte_t *pte_k;
2124819e15fSJoerg Roedel 
2134819e15fSJoerg Roedel 	/* Make sure we are in vmalloc area: */
2144819e15fSJoerg Roedel 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
2154819e15fSJoerg Roedel 		return -1;
2164819e15fSJoerg Roedel 
2174819e15fSJoerg Roedel 	/*
2184819e15fSJoerg Roedel 	 * Synchronize this task's top level page-table
2194819e15fSJoerg Roedel 	 * with the 'reference' page table.
2204819e15fSJoerg Roedel 	 *
2214819e15fSJoerg Roedel 	 * Do _not_ use "current" here. We might be inside
2224819e15fSJoerg Roedel 	 * an interrupt in the middle of a task switch..
2234819e15fSJoerg Roedel 	 */
2244819e15fSJoerg Roedel 	pgd_paddr = read_cr3_pa();
2254819e15fSJoerg Roedel 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
2264819e15fSJoerg Roedel 	if (!pmd_k)
2274819e15fSJoerg Roedel 		return -1;
2284819e15fSJoerg Roedel 
2294819e15fSJoerg Roedel 	if (pmd_large(*pmd_k))
2304819e15fSJoerg Roedel 		return 0;
2314819e15fSJoerg Roedel 
2324819e15fSJoerg Roedel 	pte_k = pte_offset_kernel(pmd_k, address);
2334819e15fSJoerg Roedel 	if (!pte_present(*pte_k))
2344819e15fSJoerg Roedel 		return -1;
2354819e15fSJoerg Roedel 
2364819e15fSJoerg Roedel 	return 0;
2374819e15fSJoerg Roedel }
2384819e15fSJoerg Roedel NOKPROBE_SYMBOL(vmalloc_fault);
2394819e15fSJoerg Roedel 
24086cf69f1SJoerg Roedel void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
241f2f13a85SIngo Molnar {
24286cf69f1SJoerg Roedel 	unsigned long addr;
243f2f13a85SIngo Molnar 
24486cf69f1SJoerg Roedel 	for (addr = start & PMD_MASK;
24586cf69f1SJoerg Roedel 	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
24686cf69f1SJoerg Roedel 	     addr += PMD_SIZE) {
247f2f13a85SIngo Molnar 		struct page *page;
248f2f13a85SIngo Molnar 
249a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
250f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
251617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
252617d34d9SJeremy Fitzhardinge 
253a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
254617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
255617d34d9SJeremy Fitzhardinge 
256617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
25786cf69f1SJoerg Roedel 			vmalloc_sync_one(page_address(page), addr);
258617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
259f2f13a85SIngo Molnar 		}
260a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
261f2f13a85SIngo Molnar 	}
262f2f13a85SIngo Molnar }
263f2f13a85SIngo Molnar 
264f2f13a85SIngo Molnar /*
265f2f13a85SIngo Molnar  * Did it hit the DOS screen memory VA from vm86 mode?
266f2f13a85SIngo Molnar  */
267f2f13a85SIngo Molnar static inline void
268f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
269f2f13a85SIngo Molnar 		 struct task_struct *tsk)
270f2f13a85SIngo Molnar {
2719fda6a06SBrian Gerst #ifdef CONFIG_VM86
272f2f13a85SIngo Molnar 	unsigned long bit;
273f2f13a85SIngo Molnar 
2749fda6a06SBrian Gerst 	if (!v8086_mode(regs) || !tsk->thread.vm86)
275f2f13a85SIngo Molnar 		return;
276f2f13a85SIngo Molnar 
277f2f13a85SIngo Molnar 	bit = (address - 0xA0000) >> PAGE_SHIFT;
278f2f13a85SIngo Molnar 	if (bit < 32)
2799fda6a06SBrian Gerst 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
2809fda6a06SBrian Gerst #endif
281f2f13a85SIngo Molnar }
282c61e211dSHarvey Harrison 
283087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
284087975b0SAkinobu Mita {
285087975b0SAkinobu Mita 	return pfn < max_low_pfn;
286087975b0SAkinobu Mita }
287087975b0SAkinobu Mita 
288cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
289c61e211dSHarvey Harrison {
2906c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
291087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
292e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
293e0c4f675SKirill A. Shutemov 	pud_t *pud;
294087975b0SAkinobu Mita 	pmd_t *pmd;
295087975b0SAkinobu Mita 	pte_t *pte;
2962d4a7167SIngo Molnar 
297c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
29839e48d9bSJan Beulich 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
299087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
300087975b0SAkinobu Mita 		goto out;
30139e48d9bSJan Beulich #define pr_pde pr_cont
30239e48d9bSJan Beulich #else
30339e48d9bSJan Beulich #define pr_pde pr_info
304c61e211dSHarvey Harrison #endif
305e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
306e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
307e0c4f675SKirill A. Shutemov 	pmd = pmd_offset(pud, address);
30839e48d9bSJan Beulich 	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
30939e48d9bSJan Beulich #undef pr_pde
310c61e211dSHarvey Harrison 
311c61e211dSHarvey Harrison 	/*
312c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
313c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
314c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3152d4a7167SIngo Molnar 	 * it's allocated already:
316c61e211dSHarvey Harrison 	 */
317087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
318087975b0SAkinobu Mita 		goto out;
3192d4a7167SIngo Molnar 
320087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
32139e48d9bSJan Beulich 	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
322087975b0SAkinobu Mita out:
32339e48d9bSJan Beulich 	pr_cont("\n");
324f2f13a85SIngo Molnar }
325f2f13a85SIngo Molnar 
326f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
327f2f13a85SIngo Molnar 
328e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD
329f2f13a85SIngo Molnar static const char errata93_warning[] =
330ad361c98SJoe Perches KERN_ERR
331ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
332ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
333ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
334ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
335e05139f2SJan Beulich #endif
336f2f13a85SIngo Molnar 
337f2f13a85SIngo Molnar /*
338f2f13a85SIngo Molnar  * No vm86 mode in 64-bit mode:
339f2f13a85SIngo Molnar  */
340f2f13a85SIngo Molnar static inline void
341f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
342f2f13a85SIngo Molnar 		 struct task_struct *tsk)
343f2f13a85SIngo Molnar {
344f2f13a85SIngo Molnar }
345f2f13a85SIngo Molnar 
346f2f13a85SIngo Molnar static int bad_address(void *p)
347f2f13a85SIngo Molnar {
348f2f13a85SIngo Molnar 	unsigned long dummy;
349f2f13a85SIngo Molnar 
35025f12ae4SChristoph Hellwig 	return get_kernel_nofault(dummy, (unsigned long *)p);
351f2f13a85SIngo Molnar }
352f2f13a85SIngo Molnar 
353f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
354f2f13a85SIngo Molnar {
3556c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
356087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
357e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
358c61e211dSHarvey Harrison 	pud_t *pud;
359c61e211dSHarvey Harrison 	pmd_t *pmd;
360c61e211dSHarvey Harrison 	pte_t *pte;
361c61e211dSHarvey Harrison 
3622d4a7167SIngo Molnar 	if (bad_address(pgd))
3632d4a7167SIngo Molnar 		goto bad;
3642d4a7167SIngo Molnar 
36539e48d9bSJan Beulich 	pr_info("PGD %lx ", pgd_val(*pgd));
3662d4a7167SIngo Molnar 
3672d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
3682d4a7167SIngo Molnar 		goto out;
369c61e211dSHarvey Harrison 
370e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
371e0c4f675SKirill A. Shutemov 	if (bad_address(p4d))
372e0c4f675SKirill A. Shutemov 		goto bad;
373e0c4f675SKirill A. Shutemov 
37439e48d9bSJan Beulich 	pr_cont("P4D %lx ", p4d_val(*p4d));
375e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d) || p4d_large(*p4d))
376e0c4f675SKirill A. Shutemov 		goto out;
377e0c4f675SKirill A. Shutemov 
378e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
3792d4a7167SIngo Molnar 	if (bad_address(pud))
3802d4a7167SIngo Molnar 		goto bad;
3812d4a7167SIngo Molnar 
38239e48d9bSJan Beulich 	pr_cont("PUD %lx ", pud_val(*pud));
383b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
3842d4a7167SIngo Molnar 		goto out;
385c61e211dSHarvey Harrison 
386c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
3872d4a7167SIngo Molnar 	if (bad_address(pmd))
3882d4a7167SIngo Molnar 		goto bad;
3892d4a7167SIngo Molnar 
39039e48d9bSJan Beulich 	pr_cont("PMD %lx ", pmd_val(*pmd));
3912d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
3922d4a7167SIngo Molnar 		goto out;
393c61e211dSHarvey Harrison 
394c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
3952d4a7167SIngo Molnar 	if (bad_address(pte))
3962d4a7167SIngo Molnar 		goto bad;
3972d4a7167SIngo Molnar 
39839e48d9bSJan Beulich 	pr_cont("PTE %lx", pte_val(*pte));
3992d4a7167SIngo Molnar out:
40039e48d9bSJan Beulich 	pr_cont("\n");
401c61e211dSHarvey Harrison 	return;
402c61e211dSHarvey Harrison bad:
40339e48d9bSJan Beulich 	pr_info("BAD\n");
404c61e211dSHarvey Harrison }
405c61e211dSHarvey Harrison 
406f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
407c61e211dSHarvey Harrison 
4082d4a7167SIngo Molnar /*
4092d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
4102d4a7167SIngo Molnar  *
4112d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
4122d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
4132d4a7167SIngo Molnar  *
4142d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
4152d4a7167SIngo Molnar  *
4162d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
4172d4a7167SIngo Molnar  * Try to work around it here.
4182d4a7167SIngo Molnar  *
4192d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
4202d4a7167SIngo Molnar  * Does nothing on 32-bit.
421c61e211dSHarvey Harrison  */
422c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
423c61e211dSHarvey Harrison {
424e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
425e05139f2SJan Beulich 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
426e05139f2SJan Beulich 	    || boot_cpu_data.x86 != 0xf)
427e05139f2SJan Beulich 		return 0;
428e05139f2SJan Beulich 
429c61e211dSHarvey Harrison 	if (address != regs->ip)
430c61e211dSHarvey Harrison 		return 0;
4312d4a7167SIngo Molnar 
432c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
433c61e211dSHarvey Harrison 		return 0;
4342d4a7167SIngo Molnar 
435c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
436c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
437c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
438a454ab31SIngo Molnar 		printk_once(errata93_warning);
439c61e211dSHarvey Harrison 		regs->ip = address;
440c61e211dSHarvey Harrison 		return 1;
441c61e211dSHarvey Harrison 	}
442c61e211dSHarvey Harrison #endif
443c61e211dSHarvey Harrison 	return 0;
444c61e211dSHarvey Harrison }
445c61e211dSHarvey Harrison 
446c61e211dSHarvey Harrison /*
4472d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
4482d4a7167SIngo Molnar  * to illegal addresses >4GB.
4492d4a7167SIngo Molnar  *
4502d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
4512d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
452c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
453c61e211dSHarvey Harrison  */
454c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
455c61e211dSHarvey Harrison {
456c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
4572d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
458c61e211dSHarvey Harrison 		return 1;
459c61e211dSHarvey Harrison #endif
460c61e211dSHarvey Harrison 	return 0;
461c61e211dSHarvey Harrison }
462c61e211dSHarvey Harrison 
4633e77abdaSThomas Gleixner /* Pentium F0 0F C7 C8 bug workaround: */
464c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
465c61e211dSHarvey Harrison {
466c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
4673e77abdaSThomas Gleixner 	if (boot_cpu_has_bug(X86_BUG_F00F) && idt_is_f00f_address(address)) {
46849893c5cSThomas Gleixner 		handle_invalid_op(regs);
469c61e211dSHarvey Harrison 		return 1;
470c61e211dSHarvey Harrison 	}
471c61e211dSHarvey Harrison #endif
472c61e211dSHarvey Harrison 	return 0;
473c61e211dSHarvey Harrison }
474c61e211dSHarvey Harrison 
475a1a371c4SAndy Lutomirski static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
476a1a371c4SAndy Lutomirski {
477a1a371c4SAndy Lutomirski 	u32 offset = (index >> 3) * sizeof(struct desc_struct);
478a1a371c4SAndy Lutomirski 	unsigned long addr;
479a1a371c4SAndy Lutomirski 	struct ldttss_desc desc;
480a1a371c4SAndy Lutomirski 
481a1a371c4SAndy Lutomirski 	if (index == 0) {
482a1a371c4SAndy Lutomirski 		pr_alert("%s: NULL\n", name);
483a1a371c4SAndy Lutomirski 		return;
484a1a371c4SAndy Lutomirski 	}
485a1a371c4SAndy Lutomirski 
486a1a371c4SAndy Lutomirski 	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
487a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
488a1a371c4SAndy Lutomirski 		return;
489a1a371c4SAndy Lutomirski 	}
490a1a371c4SAndy Lutomirski 
491fe557319SChristoph Hellwig 	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
492a1a371c4SAndy Lutomirski 			      sizeof(struct ldttss_desc))) {
493a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
494a1a371c4SAndy Lutomirski 			 name, index);
495a1a371c4SAndy Lutomirski 		return;
496a1a371c4SAndy Lutomirski 	}
497a1a371c4SAndy Lutomirski 
4985ccd3528SColin Ian King 	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
499a1a371c4SAndy Lutomirski #ifdef CONFIG_X86_64
500a1a371c4SAndy Lutomirski 	addr |= ((u64)desc.base3 << 32);
501a1a371c4SAndy Lutomirski #endif
502a1a371c4SAndy Lutomirski 	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
503a1a371c4SAndy Lutomirski 		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
504a1a371c4SAndy Lutomirski }
505a1a371c4SAndy Lutomirski 
5062d4a7167SIngo Molnar static void
507a2aa52abSIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
508c61e211dSHarvey Harrison {
509c61e211dSHarvey Harrison 	if (!oops_may_print())
510c61e211dSHarvey Harrison 		return;
511c61e211dSHarvey Harrison 
5121067f030SRicardo Neri 	if (error_code & X86_PF_INSTR) {
51393809be8SHarvey Harrison 		unsigned int level;
514426e34ccSMatt Fleming 		pgd_t *pgd;
515426e34ccSMatt Fleming 		pte_t *pte;
5162d4a7167SIngo Molnar 
5176c690ee1SAndy Lutomirski 		pgd = __va(read_cr3_pa());
518426e34ccSMatt Fleming 		pgd += pgd_index(address);
519426e34ccSMatt Fleming 
520426e34ccSMatt Fleming 		pte = lookup_address_in_pgd(pgd, address, &level);
521c61e211dSHarvey Harrison 
5228f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
523d79d0d8aSDmitry Vyukov 			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
524d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
525eff50c34SJiri Kosina 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
526eff50c34SJiri Kosina 				(pgd_flags(*pgd) & _PAGE_USER) &&
5271e02ce4cSAndy Lutomirski 				(__read_cr4() & X86_CR4_SMEP))
528d79d0d8aSDmitry Vyukov 			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
529d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
530c61e211dSHarvey Harrison 	}
531fd40d6e3SHarvey Harrison 
532f28b11a2SSean Christopherson 	if (address < PAGE_SIZE && !user_mode(regs))
533ea2f8d60SBorislav Petkov 		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
534f28b11a2SSean Christopherson 			(void *)address);
535f28b11a2SSean Christopherson 	else
536ea2f8d60SBorislav Petkov 		pr_alert("BUG: unable to handle page fault for address: %px\n",
5374188f063SDmitry Vyukov 			(void *)address);
5382d4a7167SIngo Molnar 
539ea2f8d60SBorislav Petkov 	pr_alert("#PF: %s %s in %s mode\n",
54018ea35c5SSean Christopherson 		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
54118ea35c5SSean Christopherson 		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
54218ea35c5SSean Christopherson 		 (error_code & X86_PF_WRITE) ? "write access" :
54318ea35c5SSean Christopherson 					       "read access",
54418ea35c5SSean Christopherson 			     user_mode(regs) ? "user" : "kernel");
54518ea35c5SSean Christopherson 	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
54618ea35c5SSean Christopherson 		 !(error_code & X86_PF_PROT) ? "not-present page" :
54718ea35c5SSean Christopherson 		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
54818ea35c5SSean Christopherson 		 (error_code & X86_PF_PK)    ? "protection keys violation" :
54918ea35c5SSean Christopherson 					       "permissions violation");
550a2aa52abSIngo Molnar 
551a1a371c4SAndy Lutomirski 	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
552a1a371c4SAndy Lutomirski 		struct desc_ptr idt, gdt;
553a1a371c4SAndy Lutomirski 		u16 ldtr, tr;
554a1a371c4SAndy Lutomirski 
555a1a371c4SAndy Lutomirski 		/*
556a1a371c4SAndy Lutomirski 		 * This can happen for quite a few reasons.  The more obvious
557a1a371c4SAndy Lutomirski 		 * ones are faults accessing the GDT, or LDT.  Perhaps
558a1a371c4SAndy Lutomirski 		 * surprisingly, if the CPU tries to deliver a benign or
559a1a371c4SAndy Lutomirski 		 * contributory exception from user code and gets a page fault
560a1a371c4SAndy Lutomirski 		 * during delivery, the page fault can be delivered as though
561a1a371c4SAndy Lutomirski 		 * it originated directly from user code.  This could happen
562a1a371c4SAndy Lutomirski 		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
563a1a371c4SAndy Lutomirski 		 * kernel or IST stack.
564a1a371c4SAndy Lutomirski 		 */
565a1a371c4SAndy Lutomirski 		store_idt(&idt);
566a1a371c4SAndy Lutomirski 
567a1a371c4SAndy Lutomirski 		/* Usable even on Xen PV -- it's just slow. */
568a1a371c4SAndy Lutomirski 		native_store_gdt(&gdt);
569a1a371c4SAndy Lutomirski 
570a1a371c4SAndy Lutomirski 		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
571a1a371c4SAndy Lutomirski 			 idt.address, idt.size, gdt.address, gdt.size);
572a1a371c4SAndy Lutomirski 
573a1a371c4SAndy Lutomirski 		store_ldt(ldtr);
574a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "LDTR", ldtr);
575a1a371c4SAndy Lutomirski 
576a1a371c4SAndy Lutomirski 		store_tr(tr);
577a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "TR", tr);
578a1a371c4SAndy Lutomirski 	}
579a1a371c4SAndy Lutomirski 
580c61e211dSHarvey Harrison 	dump_pagetable(address);
581c61e211dSHarvey Harrison }
582c61e211dSHarvey Harrison 
5832d4a7167SIngo Molnar static noinline void
5842d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
5852d4a7167SIngo Molnar 	    unsigned long address)
586c61e211dSHarvey Harrison {
5872d4a7167SIngo Molnar 	struct task_struct *tsk;
5882d4a7167SIngo Molnar 	unsigned long flags;
5892d4a7167SIngo Molnar 	int sig;
5902d4a7167SIngo Molnar 
5912d4a7167SIngo Molnar 	flags = oops_begin();
5922d4a7167SIngo Molnar 	tsk = current;
5932d4a7167SIngo Molnar 	sig = SIGKILL;
594c61e211dSHarvey Harrison 
595c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
59692181f19SNick Piggin 	       tsk->comm, address);
597c61e211dSHarvey Harrison 	dump_pagetable(address);
5982d4a7167SIngo Molnar 
599c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
600874d93d1SAlexander van Heukelum 		sig = 0;
6012d4a7167SIngo Molnar 
602874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
603c61e211dSHarvey Harrison }
604c61e211dSHarvey Harrison 
605e49d3cbeSAndy Lutomirski static void set_signal_archinfo(unsigned long address,
606e49d3cbeSAndy Lutomirski 				unsigned long error_code)
607e49d3cbeSAndy Lutomirski {
608e49d3cbeSAndy Lutomirski 	struct task_struct *tsk = current;
609e49d3cbeSAndy Lutomirski 
610e49d3cbeSAndy Lutomirski 	/*
611e49d3cbeSAndy Lutomirski 	 * To avoid leaking information about the kernel page
612e49d3cbeSAndy Lutomirski 	 * table layout, pretend that user-mode accesses to
613e49d3cbeSAndy Lutomirski 	 * kernel addresses are always protection faults.
614e0a446ceSAndy Lutomirski 	 *
615e0a446ceSAndy Lutomirski 	 * NB: This means that failed vsyscalls with vsyscall=none
616e0a446ceSAndy Lutomirski 	 * will have the PROT bit.  This doesn't leak any
617e0a446ceSAndy Lutomirski 	 * information and does not appear to cause any problems.
618e49d3cbeSAndy Lutomirski 	 */
619e49d3cbeSAndy Lutomirski 	if (address >= TASK_SIZE_MAX)
620e49d3cbeSAndy Lutomirski 		error_code |= X86_PF_PROT;
621e49d3cbeSAndy Lutomirski 
622e49d3cbeSAndy Lutomirski 	tsk->thread.trap_nr = X86_TRAP_PF;
623e49d3cbeSAndy Lutomirski 	tsk->thread.error_code = error_code | X86_PF_USER;
624e49d3cbeSAndy Lutomirski 	tsk->thread.cr2 = address;
625e49d3cbeSAndy Lutomirski }
626e49d3cbeSAndy Lutomirski 
6272d4a7167SIngo Molnar static noinline void
6282d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code,
6294fc34901SAndy Lutomirski 	   unsigned long address, int signal, int si_code)
63092181f19SNick Piggin {
63192181f19SNick Piggin 	struct task_struct *tsk = current;
63292181f19SNick Piggin 	unsigned long flags;
63392181f19SNick Piggin 	int sig;
63492181f19SNick Piggin 
635ebb53e25SAndy Lutomirski 	if (user_mode(regs)) {
636ebb53e25SAndy Lutomirski 		/*
637ebb53e25SAndy Lutomirski 		 * This is an implicit supervisor-mode access from user
638ebb53e25SAndy Lutomirski 		 * mode.  Bypass all the kernel-mode recovery code and just
639ebb53e25SAndy Lutomirski 		 * OOPS.
640ebb53e25SAndy Lutomirski 		 */
641ebb53e25SAndy Lutomirski 		goto oops;
642ebb53e25SAndy Lutomirski 	}
643ebb53e25SAndy Lutomirski 
64492181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
64581fd9c18SJann Horn 	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
646c026b359SPeter Zijlstra 		/*
647c026b359SPeter Zijlstra 		 * Any interrupt that takes a fault gets the fixup. This makes
648c026b359SPeter Zijlstra 		 * the below recursive fault logic only apply to a faults from
649c026b359SPeter Zijlstra 		 * task context.
650c026b359SPeter Zijlstra 		 */
651c026b359SPeter Zijlstra 		if (in_interrupt())
652c026b359SPeter Zijlstra 			return;
653c026b359SPeter Zijlstra 
654c026b359SPeter Zijlstra 		/*
655c026b359SPeter Zijlstra 		 * Per the above we're !in_interrupt(), aka. task context.
656c026b359SPeter Zijlstra 		 *
657c026b359SPeter Zijlstra 		 * In this case we need to make sure we're not recursively
658c026b359SPeter Zijlstra 		 * faulting through the emulate_vsyscall() logic.
659c026b359SPeter Zijlstra 		 */
6602a53ccbcSIngo Molnar 		if (current->thread.sig_on_uaccess_err && signal) {
661e49d3cbeSAndy Lutomirski 			set_signal_archinfo(address, error_code);
6624fc34901SAndy Lutomirski 
6634fc34901SAndy Lutomirski 			/* XXX: hwpoison faults will set the wrong code. */
6642e1661d2SEric W. Biederman 			force_sig_fault(signal, si_code, (void __user *)address);
6654fc34901SAndy Lutomirski 		}
666c026b359SPeter Zijlstra 
667c026b359SPeter Zijlstra 		/*
668c026b359SPeter Zijlstra 		 * Barring that, we can do the fixup and be happy.
669c026b359SPeter Zijlstra 		 */
67092181f19SNick Piggin 		return;
6714fc34901SAndy Lutomirski 	}
67292181f19SNick Piggin 
6736271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK
6746271cfdfSAndy Lutomirski 	/*
6756271cfdfSAndy Lutomirski 	 * Stack overflow?  During boot, we can fault near the initial
6766271cfdfSAndy Lutomirski 	 * stack in the direct map, but that's not an overflow -- check
6776271cfdfSAndy Lutomirski 	 * that we're in vmalloc space to avoid this.
6786271cfdfSAndy Lutomirski 	 */
6796271cfdfSAndy Lutomirski 	if (is_vmalloc_addr((void *)address) &&
6806271cfdfSAndy Lutomirski 	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
6816271cfdfSAndy Lutomirski 	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
682d876b673SThomas Gleixner 		unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
6836271cfdfSAndy Lutomirski 		/*
6846271cfdfSAndy Lutomirski 		 * We're likely to be running with very little stack space
6856271cfdfSAndy Lutomirski 		 * left.  It's plausible that we'd hit this condition but
6866271cfdfSAndy Lutomirski 		 * double-fault even before we get this far, in which case
6876271cfdfSAndy Lutomirski 		 * we're fine: the double-fault handler will deal with it.
6886271cfdfSAndy Lutomirski 		 *
6896271cfdfSAndy Lutomirski 		 * We don't want to make it all the way into the oops code
6906271cfdfSAndy Lutomirski 		 * and then double-fault, though, because we're likely to
6916271cfdfSAndy Lutomirski 		 * break the console driver and lose most of the stack dump.
6926271cfdfSAndy Lutomirski 		 */
6936271cfdfSAndy Lutomirski 		asm volatile ("movq %[stack], %%rsp\n\t"
6946271cfdfSAndy Lutomirski 			      "call handle_stack_overflow\n\t"
6956271cfdfSAndy Lutomirski 			      "1: jmp 1b"
696f5caf621SJosh Poimboeuf 			      : ASM_CALL_CONSTRAINT
6976271cfdfSAndy Lutomirski 			      : "D" ("kernel stack overflow (page fault)"),
6986271cfdfSAndy Lutomirski 				"S" (regs), "d" (address),
6996271cfdfSAndy Lutomirski 				[stack] "rm" (stack));
7006271cfdfSAndy Lutomirski 		unreachable();
7016271cfdfSAndy Lutomirski 	}
7026271cfdfSAndy Lutomirski #endif
7036271cfdfSAndy Lutomirski 
70492181f19SNick Piggin 	/*
7052d4a7167SIngo Molnar 	 * 32-bit:
7062d4a7167SIngo Molnar 	 *
70792181f19SNick Piggin 	 *   Valid to do another page fault here, because if this fault
70892181f19SNick Piggin 	 *   had been triggered by is_prefetch fixup_exception would have
70992181f19SNick Piggin 	 *   handled it.
71092181f19SNick Piggin 	 *
7112d4a7167SIngo Molnar 	 * 64-bit:
7122d4a7167SIngo Molnar 	 *
71392181f19SNick Piggin 	 *   Hall of shame of CPU/BIOS bugs.
71492181f19SNick Piggin 	 */
71592181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
71692181f19SNick Piggin 		return;
71792181f19SNick Piggin 
71892181f19SNick Piggin 	if (is_errata93(regs, address))
71992181f19SNick Piggin 		return;
72092181f19SNick Piggin 
72192181f19SNick Piggin 	/*
7223425d934SSai Praneeth 	 * Buggy firmware could access regions which might page fault, try to
7233425d934SSai Praneeth 	 * recover from such faults.
7243425d934SSai Praneeth 	 */
7253425d934SSai Praneeth 	if (IS_ENABLED(CONFIG_EFI))
7263425d934SSai Praneeth 		efi_recover_from_page_fault(address);
7273425d934SSai Praneeth 
728ebb53e25SAndy Lutomirski oops:
7293425d934SSai Praneeth 	/*
73092181f19SNick Piggin 	 * Oops. The kernel tried to access some bad page. We'll have to
7312d4a7167SIngo Molnar 	 * terminate things with extreme prejudice:
73292181f19SNick Piggin 	 */
73392181f19SNick Piggin 	flags = oops_begin();
73492181f19SNick Piggin 
73592181f19SNick Piggin 	show_fault_oops(regs, error_code, address);
73692181f19SNick Piggin 
737a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(tsk))
738b0f4c4b3SPrarit Bhargava 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
73919803078SIngo Molnar 
74092181f19SNick Piggin 	sig = SIGKILL;
74192181f19SNick Piggin 	if (__die("Oops", regs, error_code))
74292181f19SNick Piggin 		sig = 0;
7432d4a7167SIngo Molnar 
74492181f19SNick Piggin 	/* Executive summary in case the body of the oops scrolled away */
745b0f4c4b3SPrarit Bhargava 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
7462d4a7167SIngo Molnar 
74792181f19SNick Piggin 	oops_end(flags, regs, sig);
74892181f19SNick Piggin }
74992181f19SNick Piggin 
7502d4a7167SIngo Molnar /*
7512d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
7522d4a7167SIngo Molnar  * sysctl is set:
7532d4a7167SIngo Molnar  */
7542d4a7167SIngo Molnar static inline void
7552d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
7562d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
7572d4a7167SIngo Molnar {
758ba54d856SBorislav Petkov 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
759ba54d856SBorislav Petkov 
7602d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
7612d4a7167SIngo Molnar 		return;
7622d4a7167SIngo Molnar 
7632d4a7167SIngo Molnar 	if (!printk_ratelimit())
7642d4a7167SIngo Molnar 		return;
7652d4a7167SIngo Molnar 
76610a7e9d8SKees Cook 	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
767ba54d856SBorislav Petkov 		loglvl, tsk->comm, task_pid_nr(tsk), address,
7682d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
7692d4a7167SIngo Molnar 
7702d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
7712d4a7167SIngo Molnar 
7722d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
773ba54d856SBorislav Petkov 
774342db04aSJann Horn 	show_opcodes(regs, loglvl);
7752d4a7167SIngo Molnar }
7762d4a7167SIngo Molnar 
77702e983b7SDave Hansen /*
77802e983b7SDave Hansen  * The (legacy) vsyscall page is the long page in the kernel portion
77902e983b7SDave Hansen  * of the address space that has user-accessible permissions.
78002e983b7SDave Hansen  */
78102e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr)
78202e983b7SDave Hansen {
7833ae0ad92SDave Hansen 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
78402e983b7SDave Hansen }
78502e983b7SDave Hansen 
7862d4a7167SIngo Molnar static void
7872d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
788419ceeb1SEric W. Biederman 		       unsigned long address, u32 pkey, int si_code)
78992181f19SNick Piggin {
79092181f19SNick Piggin 	struct task_struct *tsk = current;
79192181f19SNick Piggin 
79292181f19SNick Piggin 	/* User mode accesses just cause a SIGSEGV */
7936ea59b07SAndy Lutomirski 	if (user_mode(regs) && (error_code & X86_PF_USER)) {
79492181f19SNick Piggin 		/*
7952d4a7167SIngo Molnar 		 * It's possible to have interrupts off here:
79692181f19SNick Piggin 		 */
79792181f19SNick Piggin 		local_irq_enable();
79892181f19SNick Piggin 
79992181f19SNick Piggin 		/*
80092181f19SNick Piggin 		 * Valid to do another page fault here because this one came
8012d4a7167SIngo Molnar 		 * from user space:
80292181f19SNick Piggin 		 */
80392181f19SNick Piggin 		if (is_prefetch(regs, error_code, address))
80492181f19SNick Piggin 			return;
80592181f19SNick Piggin 
80692181f19SNick Piggin 		if (is_errata100(regs, address))
80792181f19SNick Piggin 			return;
80892181f19SNick Piggin 
809dc4fac84SAndy Lutomirski 		/*
810dc4fac84SAndy Lutomirski 		 * To avoid leaking information about the kernel page table
811dc4fac84SAndy Lutomirski 		 * layout, pretend that user-mode accesses to kernel addresses
812dc4fac84SAndy Lutomirski 		 * are always protection faults.
813dc4fac84SAndy Lutomirski 		 */
814dc4fac84SAndy Lutomirski 		if (address >= TASK_SIZE_MAX)
8151067f030SRicardo Neri 			error_code |= X86_PF_PROT;
8163ae36655SAndy Lutomirski 
817e575a86fSKees Cook 		if (likely(show_unhandled_signals))
8182d4a7167SIngo Molnar 			show_signal_msg(regs, error_code, address, tsk);
81992181f19SNick Piggin 
820e49d3cbeSAndy Lutomirski 		set_signal_archinfo(address, error_code);
8212d4a7167SIngo Molnar 
8229db812dbSEric W. Biederman 		if (si_code == SEGV_PKUERR)
823419ceeb1SEric W. Biederman 			force_sig_pkuerr((void __user *)address, pkey);
8249db812dbSEric W. Biederman 
8252e1661d2SEric W. Biederman 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
8262d4a7167SIngo Molnar 
827ca4c6a98SThomas Gleixner 		local_irq_disable();
828ca4c6a98SThomas Gleixner 
82992181f19SNick Piggin 		return;
83092181f19SNick Piggin 	}
83192181f19SNick Piggin 
83292181f19SNick Piggin 	if (is_f00f_bug(regs, address))
83392181f19SNick Piggin 		return;
83492181f19SNick Piggin 
8354fc34901SAndy Lutomirski 	no_context(regs, error_code, address, SIGSEGV, si_code);
83692181f19SNick Piggin }
83792181f19SNick Piggin 
8382d4a7167SIngo Molnar static noinline void
8392d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
840768fd9c6SEric W. Biederman 		     unsigned long address)
84192181f19SNick Piggin {
842419ceeb1SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
84392181f19SNick Piggin }
84492181f19SNick Piggin 
8452d4a7167SIngo Molnar static void
8462d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
847419ceeb1SEric W. Biederman 	   unsigned long address, u32 pkey, int si_code)
84892181f19SNick Piggin {
84992181f19SNick Piggin 	struct mm_struct *mm = current->mm;
85092181f19SNick Piggin 	/*
85192181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
85292181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
85392181f19SNick Piggin 	 */
854d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
85592181f19SNick Piggin 
856aba1ecd3SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
85792181f19SNick Piggin }
85892181f19SNick Piggin 
8592d4a7167SIngo Molnar static noinline void
8602d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
86192181f19SNick Piggin {
862419ceeb1SEric W. Biederman 	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
86392181f19SNick Piggin }
86492181f19SNick Piggin 
86533a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code,
86633a709b2SDave Hansen 		struct vm_area_struct *vma)
86733a709b2SDave Hansen {
86807f146f5SDave Hansen 	/* This code is always called on the current mm */
86907f146f5SDave Hansen 	bool foreign = false;
87007f146f5SDave Hansen 
87133a709b2SDave Hansen 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
87233a709b2SDave Hansen 		return false;
8731067f030SRicardo Neri 	if (error_code & X86_PF_PK)
87433a709b2SDave Hansen 		return true;
87507f146f5SDave Hansen 	/* this checks permission keys on the VMA: */
8761067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
8771067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
87807f146f5SDave Hansen 		return true;
87933a709b2SDave Hansen 	return false;
88092181f19SNick Piggin }
88192181f19SNick Piggin 
8822d4a7167SIngo Molnar static noinline void
8832d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
8847b2d0dbaSDave Hansen 		      unsigned long address, struct vm_area_struct *vma)
88592181f19SNick Piggin {
886019132ffSDave Hansen 	/*
887019132ffSDave Hansen 	 * This OSPKE check is not strictly necessary at runtime.
888019132ffSDave Hansen 	 * But, doing it this way allows compiler optimizations
889019132ffSDave Hansen 	 * if pkeys are compiled out.
890019132ffSDave Hansen 	 */
891aba1ecd3SEric W. Biederman 	if (bad_area_access_from_pkeys(error_code, vma)) {
8929db812dbSEric W. Biederman 		/*
8939db812dbSEric W. Biederman 		 * A protection key fault means that the PKRU value did not allow
8949db812dbSEric W. Biederman 		 * access to some PTE.  Userspace can figure out what PKRU was
8959db812dbSEric W. Biederman 		 * from the XSAVE state.  This function captures the pkey from
8969db812dbSEric W. Biederman 		 * the vma and passes it to userspace so userspace can discover
8979db812dbSEric W. Biederman 		 * which protection key was set on the PTE.
8989db812dbSEric W. Biederman 		 *
8999db812dbSEric W. Biederman 		 * If we get here, we know that the hardware signaled a X86_PF_PK
9009db812dbSEric W. Biederman 		 * fault and that there was a VMA once we got in the fault
9019db812dbSEric W. Biederman 		 * handler.  It does *not* guarantee that the VMA we find here
9029db812dbSEric W. Biederman 		 * was the one that we faulted on.
9039db812dbSEric W. Biederman 		 *
9049db812dbSEric W. Biederman 		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
9059db812dbSEric W. Biederman 		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
9069db812dbSEric W. Biederman 		 * 3. T1   : faults...
9079db812dbSEric W. Biederman 		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
908c1e8d7c6SMichel Lespinasse 		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
9099db812dbSEric W. Biederman 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
9109db812dbSEric W. Biederman 		 *	     faulted on a pte with its pkey=4.
9119db812dbSEric W. Biederman 		 */
912aba1ecd3SEric W. Biederman 		u32 pkey = vma_pkey(vma);
9139db812dbSEric W. Biederman 
914419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
915aba1ecd3SEric W. Biederman 	} else {
916419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
917aba1ecd3SEric W. Biederman 	}
91892181f19SNick Piggin }
91992181f19SNick Piggin 
9202d4a7167SIngo Molnar static void
921a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
9223d353901SSouptick Joarder 	  vm_fault_t fault)
92392181f19SNick Piggin {
9242d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
9251067f030SRicardo Neri 	if (!(error_code & X86_PF_USER)) {
9264fc34901SAndy Lutomirski 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
92796054569SLinus Torvalds 		return;
92896054569SLinus Torvalds 	}
9292d4a7167SIngo Molnar 
930cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
93192181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
93292181f19SNick Piggin 		return;
9332d4a7167SIngo Molnar 
934e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
9352d4a7167SIngo Molnar 
936a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
937f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
938318759b4SEric W. Biederman 		struct task_struct *tsk = current;
93940e55394SEric W. Biederman 		unsigned lsb = 0;
94040e55394SEric W. Biederman 
94140e55394SEric W. Biederman 		pr_err(
942a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
943a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
94440e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON_LARGE)
94540e55394SEric W. Biederman 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
94640e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON)
94740e55394SEric W. Biederman 			lsb = PAGE_SHIFT;
948f8eac901SEric W. Biederman 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
94940e55394SEric W. Biederman 		return;
950a6e04aa9SAndi Kleen 	}
951a6e04aa9SAndi Kleen #endif
9522e1661d2SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
95392181f19SNick Piggin }
95492181f19SNick Piggin 
9553a13c4d7SJohannes Weiner static noinline void
9562d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code,
95725c102d8SEric W. Biederman 	       unsigned long address, vm_fault_t fault)
95892181f19SNick Piggin {
9591067f030SRicardo Neri 	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
9604fc34901SAndy Lutomirski 		no_context(regs, error_code, address, 0, 0);
9613a13c4d7SJohannes Weiner 		return;
962b80ef10eSKOSAKI Motohiro 	}
963b80ef10eSKOSAKI Motohiro 
9642d4a7167SIngo Molnar 	if (fault & VM_FAULT_OOM) {
965f8626854SAndrey Vagin 		/* Kernel mode? Handle exceptions or die: */
9661067f030SRicardo Neri 		if (!(error_code & X86_PF_USER)) {
9674fc34901SAndy Lutomirski 			no_context(regs, error_code, address,
9684fc34901SAndy Lutomirski 				   SIGSEGV, SEGV_MAPERR);
9693a13c4d7SJohannes Weiner 			return;
970f8626854SAndrey Vagin 		}
971f8626854SAndrey Vagin 
972c2d23f91SDavid Rientjes 		/*
973c2d23f91SDavid Rientjes 		 * We ran out of memory, call the OOM killer, and return the
974c2d23f91SDavid Rientjes 		 * userspace (which will retry the fault, or kill us if we got
975c2d23f91SDavid Rientjes 		 * oom-killed):
976c2d23f91SDavid Rientjes 		 */
977c2d23f91SDavid Rientjes 		pagefault_out_of_memory();
9782d4a7167SIngo Molnar 	} else {
979f672b49bSAndi Kleen 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
980f672b49bSAndi Kleen 			     VM_FAULT_HWPOISON_LARGE))
98127274f73SEric W. Biederman 			do_sigbus(regs, error_code, address, fault);
98233692f27SLinus Torvalds 		else if (fault & VM_FAULT_SIGSEGV)
983768fd9c6SEric W. Biederman 			bad_area_nosemaphore(regs, error_code, address);
98492181f19SNick Piggin 		else
98592181f19SNick Piggin 			BUG();
98692181f19SNick Piggin 	}
9872d4a7167SIngo Molnar }
98892181f19SNick Piggin 
9898fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
990d8b57bb7SThomas Gleixner {
9911067f030SRicardo Neri 	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
992d8b57bb7SThomas Gleixner 		return 0;
9932d4a7167SIngo Molnar 
9941067f030SRicardo Neri 	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
995d8b57bb7SThomas Gleixner 		return 0;
996d8b57bb7SThomas Gleixner 
997d8b57bb7SThomas Gleixner 	return 1;
998d8b57bb7SThomas Gleixner }
999d8b57bb7SThomas Gleixner 
1000c61e211dSHarvey Harrison /*
10012d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
10022d4a7167SIngo Molnar  *
10032d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
10042d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
10052d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
10062d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
10072d4a7167SIngo Molnar  * on other processors.
10082d4a7167SIngo Molnar  *
100931668511SDavid Vrabel  * Spurious faults may only occur if the TLB contains an entry with
101031668511SDavid Vrabel  * fewer permission than the page table entry.  Non-present (P = 0)
101131668511SDavid Vrabel  * and reserved bit (R = 1) faults are never spurious.
101231668511SDavid Vrabel  *
10135b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
10145b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
101531668511SDavid Vrabel  *
101631668511SDavid Vrabel  * Returns non-zero if a spurious fault was handled, zero otherwise.
101731668511SDavid Vrabel  *
101831668511SDavid Vrabel  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
101931668511SDavid Vrabel  * (Optional Invalidation).
10205b727a3bSJeremy Fitzhardinge  */
10219326638cSMasami Hiramatsu static noinline int
10228fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address)
10235b727a3bSJeremy Fitzhardinge {
10245b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
1025e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
10265b727a3bSJeremy Fitzhardinge 	pud_t *pud;
10275b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
10285b727a3bSJeremy Fitzhardinge 	pte_t *pte;
10293c3e5694SSteven Rostedt 	int ret;
10305b727a3bSJeremy Fitzhardinge 
103131668511SDavid Vrabel 	/*
103231668511SDavid Vrabel 	 * Only writes to RO or instruction fetches from NX may cause
103331668511SDavid Vrabel 	 * spurious faults.
103431668511SDavid Vrabel 	 *
103531668511SDavid Vrabel 	 * These could be from user or supervisor accesses but the TLB
103631668511SDavid Vrabel 	 * is only lazily flushed after a kernel mapping protection
103731668511SDavid Vrabel 	 * change, so user accesses are not expected to cause spurious
103831668511SDavid Vrabel 	 * faults.
103931668511SDavid Vrabel 	 */
10401067f030SRicardo Neri 	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
10411067f030SRicardo Neri 	    error_code != (X86_PF_INSTR | X86_PF_PROT))
10425b727a3bSJeremy Fitzhardinge 		return 0;
10435b727a3bSJeremy Fitzhardinge 
10445b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
10455b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
10465b727a3bSJeremy Fitzhardinge 		return 0;
10475b727a3bSJeremy Fitzhardinge 
1048e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
1049e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d))
1050e0c4f675SKirill A. Shutemov 		return 0;
1051e0c4f675SKirill A. Shutemov 
1052e0c4f675SKirill A. Shutemov 	if (p4d_large(*p4d))
10538fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1054e0c4f675SKirill A. Shutemov 
1055e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
10565b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
10575b727a3bSJeremy Fitzhardinge 		return 0;
10585b727a3bSJeremy Fitzhardinge 
1059d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
10608fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1061d8b57bb7SThomas Gleixner 
10625b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
10635b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
10645b727a3bSJeremy Fitzhardinge 		return 0;
10655b727a3bSJeremy Fitzhardinge 
1066d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
10678fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1068d8b57bb7SThomas Gleixner 
10695b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
1070954f8571SAndrea Arcangeli 	if (!pte_present(*pte))
10715b727a3bSJeremy Fitzhardinge 		return 0;
10725b727a3bSJeremy Fitzhardinge 
10738fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, pte);
10743c3e5694SSteven Rostedt 	if (!ret)
10753c3e5694SSteven Rostedt 		return 0;
10763c3e5694SSteven Rostedt 
10773c3e5694SSteven Rostedt 	/*
10782d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
10792d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
10803c3e5694SSteven Rostedt 	 */
10818fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
10823c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
10832d4a7167SIngo Molnar 
10843c3e5694SSteven Rostedt 	return ret;
10855b727a3bSJeremy Fitzhardinge }
10868fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault);
10875b727a3bSJeremy Fitzhardinge 
1088c61e211dSHarvey Harrison int show_unhandled_signals = 1;
1089c61e211dSHarvey Harrison 
10902d4a7167SIngo Molnar static inline int
109168da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
109292181f19SNick Piggin {
109307f146f5SDave Hansen 	/* This is only called for the current mm, so: */
109407f146f5SDave Hansen 	bool foreign = false;
1095e8c6226dSDave Hansen 
1096e8c6226dSDave Hansen 	/*
1097e8c6226dSDave Hansen 	 * Read or write was blocked by protection keys.  This is
1098e8c6226dSDave Hansen 	 * always an unconditional error and can never result in
1099e8c6226dSDave Hansen 	 * a follow-up action to resolve the fault, like a COW.
1100e8c6226dSDave Hansen 	 */
11011067f030SRicardo Neri 	if (error_code & X86_PF_PK)
1102e8c6226dSDave Hansen 		return 1;
1103e8c6226dSDave Hansen 
110433a709b2SDave Hansen 	/*
1105*74faeee0SSean Christopherson 	 * SGX hardware blocked the access.  This usually happens
1106*74faeee0SSean Christopherson 	 * when the enclave memory contents have been destroyed, like
1107*74faeee0SSean Christopherson 	 * after a suspend/resume cycle. In any case, the kernel can't
1108*74faeee0SSean Christopherson 	 * fix the cause of the fault.  Handle the fault as an access
1109*74faeee0SSean Christopherson 	 * error even in cases where no actual access violation
1110*74faeee0SSean Christopherson 	 * occurred.  This allows userspace to rebuild the enclave in
1111*74faeee0SSean Christopherson 	 * response to the signal.
1112*74faeee0SSean Christopherson 	 */
1113*74faeee0SSean Christopherson 	if (unlikely(error_code & X86_PF_SGX))
1114*74faeee0SSean Christopherson 		return 1;
1115*74faeee0SSean Christopherson 
1116*74faeee0SSean Christopherson 	/*
111707f146f5SDave Hansen 	 * Make sure to check the VMA so that we do not perform
11181067f030SRicardo Neri 	 * faults just to hit a X86_PF_PK as soon as we fill in a
111907f146f5SDave Hansen 	 * page.
112007f146f5SDave Hansen 	 */
11211067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
11221067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
112307f146f5SDave Hansen 		return 1;
112433a709b2SDave Hansen 
11251067f030SRicardo Neri 	if (error_code & X86_PF_WRITE) {
11262d4a7167SIngo Molnar 		/* write, present and write, not present: */
112792181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
112892181f19SNick Piggin 			return 1;
11292d4a7167SIngo Molnar 		return 0;
11302d4a7167SIngo Molnar 	}
11312d4a7167SIngo Molnar 
11322d4a7167SIngo Molnar 	/* read, present: */
11331067f030SRicardo Neri 	if (unlikely(error_code & X86_PF_PROT))
113492181f19SNick Piggin 		return 1;
11352d4a7167SIngo Molnar 
11362d4a7167SIngo Molnar 	/* read, not present: */
11373122e80eSAnshuman Khandual 	if (unlikely(!vma_is_accessible(vma)))
113892181f19SNick Piggin 		return 1;
113992181f19SNick Piggin 
114092181f19SNick Piggin 	return 0;
114192181f19SNick Piggin }
114292181f19SNick Piggin 
114330063810STony Luck bool fault_in_kernel_space(unsigned long address)
11440973a06cSHiroshi Shimamoto {
11453ae0ad92SDave Hansen 	/*
11463ae0ad92SDave Hansen 	 * On 64-bit systems, the vsyscall page is at an address above
11473ae0ad92SDave Hansen 	 * TASK_SIZE_MAX, but is not considered part of the kernel
11483ae0ad92SDave Hansen 	 * address space.
11493ae0ad92SDave Hansen 	 */
11503ae0ad92SDave Hansen 	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
11513ae0ad92SDave Hansen 		return false;
11523ae0ad92SDave Hansen 
1153d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
11540973a06cSHiroshi Shimamoto }
11550973a06cSHiroshi Shimamoto 
1156c61e211dSHarvey Harrison /*
11578fed6200SDave Hansen  * Called for all faults where 'address' is part of the kernel address
11588fed6200SDave Hansen  * space.  Might get called for faults that originate from *code* that
11598fed6200SDave Hansen  * ran in userspace or the kernel.
1160c61e211dSHarvey Harrison  */
11618fed6200SDave Hansen static void
11628fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
11630ac09f9fSJiri Olsa 		   unsigned long address)
1164c61e211dSHarvey Harrison {
11658fed6200SDave Hansen 	/*
1166367e3f1dSDave Hansen 	 * Protection keys exceptions only happen on user pages.  We
1167367e3f1dSDave Hansen 	 * have no user pages in the kernel portion of the address
1168367e3f1dSDave Hansen 	 * space, so do not expect them here.
1169367e3f1dSDave Hansen 	 */
1170367e3f1dSDave Hansen 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1171367e3f1dSDave Hansen 
11724819e15fSJoerg Roedel #ifdef CONFIG_X86_32
11734819e15fSJoerg Roedel 	/*
11744819e15fSJoerg Roedel 	 * We can fault-in kernel-space virtual memory on-demand. The
11754819e15fSJoerg Roedel 	 * 'reference' page table is init_mm.pgd.
11764819e15fSJoerg Roedel 	 *
11774819e15fSJoerg Roedel 	 * NOTE! We MUST NOT take any locks for this case. We may
11784819e15fSJoerg Roedel 	 * be in an interrupt or a critical region, and should
11794819e15fSJoerg Roedel 	 * only copy the information from the master page table,
11804819e15fSJoerg Roedel 	 * nothing more.
11814819e15fSJoerg Roedel 	 *
11824819e15fSJoerg Roedel 	 * Before doing this on-demand faulting, ensure that the
11834819e15fSJoerg Roedel 	 * fault is not any of the following:
11844819e15fSJoerg Roedel 	 * 1. A fault on a PTE with a reserved bit set.
11854819e15fSJoerg Roedel 	 * 2. A fault caused by a user-mode access.  (Do not demand-
11864819e15fSJoerg Roedel 	 *    fault kernel memory due to user-mode accesses).
11874819e15fSJoerg Roedel 	 * 3. A fault caused by a page-level protection violation.
11884819e15fSJoerg Roedel 	 *    (A demand fault would be on a non-present page which
11894819e15fSJoerg Roedel 	 *     would have X86_PF_PROT==0).
11904819e15fSJoerg Roedel 	 *
11914819e15fSJoerg Roedel 	 * This is only needed to close a race condition on x86-32 in
11924819e15fSJoerg Roedel 	 * the vmalloc mapping/unmapping code. See the comment above
11934819e15fSJoerg Roedel 	 * vmalloc_fault() for details. On x86-64 the race does not
11944819e15fSJoerg Roedel 	 * exist as the vmalloc mappings don't need to be synchronized
11954819e15fSJoerg Roedel 	 * there.
11964819e15fSJoerg Roedel 	 */
11974819e15fSJoerg Roedel 	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
11984819e15fSJoerg Roedel 		if (vmalloc_fault(address) >= 0)
11994819e15fSJoerg Roedel 			return;
12004819e15fSJoerg Roedel 	}
12014819e15fSJoerg Roedel #endif
12024819e15fSJoerg Roedel 
12038fed6200SDave Hansen 	/* Was the fault spurious, caused by lazy TLB invalidation? */
12048fed6200SDave Hansen 	if (spurious_kernel_fault(hw_error_code, address))
12058fed6200SDave Hansen 		return;
12068fed6200SDave Hansen 
12078fed6200SDave Hansen 	/* kprobes don't want to hook the spurious faults: */
1208b98cca44SAnshuman Khandual 	if (kprobe_page_fault(regs, X86_TRAP_PF))
12098fed6200SDave Hansen 		return;
12108fed6200SDave Hansen 
12118fed6200SDave Hansen 	/*
12128fed6200SDave Hansen 	 * Note, despite being a "bad area", there are quite a few
12138fed6200SDave Hansen 	 * acceptable reasons to get here, such as erratum fixups
12148fed6200SDave Hansen 	 * and handling kernel code that can fault, like get_user().
12158fed6200SDave Hansen 	 *
12168fed6200SDave Hansen 	 * Don't take the mm semaphore here. If we fixup a prefetch
12178fed6200SDave Hansen 	 * fault we could otherwise deadlock:
12188fed6200SDave Hansen 	 */
1219ba9f6f89SLinus Torvalds 	bad_area_nosemaphore(regs, hw_error_code, address);
12208fed6200SDave Hansen }
12218fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault);
12228fed6200SDave Hansen 
1223aa37c51bSDave Hansen /* Handle faults in the user portion of the address space */
1224aa37c51bSDave Hansen static inline
1225aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs,
1226aa37c51bSDave Hansen 			unsigned long hw_error_code,
1227c61e211dSHarvey Harrison 			unsigned long address)
1228c61e211dSHarvey Harrison {
1229c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
1230c61e211dSHarvey Harrison 	struct task_struct *tsk;
12312d4a7167SIngo Molnar 	struct mm_struct *mm;
1232968614fcSPeter Xu 	vm_fault_t fault;
1233dde16072SPeter Xu 	unsigned int flags = FAULT_FLAG_DEFAULT;
1234c61e211dSHarvey Harrison 
1235c61e211dSHarvey Harrison 	tsk = current;
1236c61e211dSHarvey Harrison 	mm = tsk->mm;
12372d4a7167SIngo Molnar 
12382d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
1239b98cca44SAnshuman Khandual 	if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
12409be260a6SMasami Hiramatsu 		return;
1241e00b12e6SPeter Zijlstra 
12425b0c2cacSDave Hansen 	/*
12435b0c2cacSDave Hansen 	 * Reserved bits are never expected to be set on
12445b0c2cacSDave Hansen 	 * entries in the user portion of the page tables.
12455b0c2cacSDave Hansen 	 */
1246164477c2SDave Hansen 	if (unlikely(hw_error_code & X86_PF_RSVD))
1247164477c2SDave Hansen 		pgtable_bad(regs, hw_error_code, address);
1248e00b12e6SPeter Zijlstra 
12495b0c2cacSDave Hansen 	/*
1250e50928d7SAndy Lutomirski 	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1251e50928d7SAndy Lutomirski 	 * pages in the user address space.  The odd case here is WRUSS,
1252e50928d7SAndy Lutomirski 	 * which, according to the preliminary documentation, does not respect
1253e50928d7SAndy Lutomirski 	 * SMAP and will have the USER bit set so, in all cases, SMAP
1254e50928d7SAndy Lutomirski 	 * enforcement appears to be consistent with the USER bit.
12555b0c2cacSDave Hansen 	 */
1256a15781b5SAndy Lutomirski 	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1257a15781b5SAndy Lutomirski 		     !(hw_error_code & X86_PF_USER) &&
1258e50928d7SAndy Lutomirski 		     !(regs->flags & X86_EFLAGS_AC)))
1259a15781b5SAndy Lutomirski 	{
1260ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1261e00b12e6SPeter Zijlstra 		return;
1262e00b12e6SPeter Zijlstra 	}
1263e00b12e6SPeter Zijlstra 
1264e00b12e6SPeter Zijlstra 	/*
1265e00b12e6SPeter Zijlstra 	 * If we're in an interrupt, have no user context or are running
126670ffdb93SDavid Hildenbrand 	 * in a region with pagefaults disabled then we must not take the fault
1267e00b12e6SPeter Zijlstra 	 */
126870ffdb93SDavid Hildenbrand 	if (unlikely(faulthandler_disabled() || !mm)) {
1269ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1270e00b12e6SPeter Zijlstra 		return;
1271e00b12e6SPeter Zijlstra 	}
1272e00b12e6SPeter Zijlstra 
1273c61e211dSHarvey Harrison 	/*
1274891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1275891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1276891cffbdSLinus Torvalds 	 *
1277891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
12782d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1279c61e211dSHarvey Harrison 	 */
1280f39b6f0eSAndy Lutomirski 	if (user_mode(regs)) {
1281891cffbdSLinus Torvalds 		local_irq_enable();
1282759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
12832d4a7167SIngo Molnar 	} else {
12842d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1285c61e211dSHarvey Harrison 			local_irq_enable();
12862d4a7167SIngo Molnar 	}
1287c61e211dSHarvey Harrison 
1288a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
12897dd1fcc2SPeter Zijlstra 
12900ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_WRITE)
1291759496baSJohannes Weiner 		flags |= FAULT_FLAG_WRITE;
12920ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_INSTR)
1293d61172b4SDave Hansen 		flags |= FAULT_FLAG_INSTRUCTION;
1294759496baSJohannes Weiner 
12953ae0ad92SDave Hansen #ifdef CONFIG_X86_64
12963a1dfe6eSIngo Molnar 	/*
1297918ce325SAndy Lutomirski 	 * Faults in the vsyscall page might need emulation.  The
1298918ce325SAndy Lutomirski 	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1299918ce325SAndy Lutomirski 	 * considered to be part of the user address space.
1300c61e211dSHarvey Harrison 	 *
13013ae0ad92SDave Hansen 	 * The vsyscall page does not have a "real" VMA, so do this
13023ae0ad92SDave Hansen 	 * emulation before we go searching for VMAs.
1303e0a446ceSAndy Lutomirski 	 *
1304e0a446ceSAndy Lutomirski 	 * PKRU never rejects instruction fetches, so we don't need
1305e0a446ceSAndy Lutomirski 	 * to consider the PF_PK bit.
13063ae0ad92SDave Hansen 	 */
1307918ce325SAndy Lutomirski 	if (is_vsyscall_vaddr(address)) {
1308918ce325SAndy Lutomirski 		if (emulate_vsyscall(hw_error_code, regs, address))
13093ae0ad92SDave Hansen 			return;
13103ae0ad92SDave Hansen 	}
13113ae0ad92SDave Hansen #endif
13123ae0ad92SDave Hansen 
1313c61e211dSHarvey Harrison 	/*
131488259744SDave Hansen 	 * Kernel-mode access to the user address space should only occur
131588259744SDave Hansen 	 * on well-defined single instructions listed in the exception
131688259744SDave Hansen 	 * tables.  But, an erroneous kernel fault occurring outside one of
1317c1e8d7c6SMichel Lespinasse 	 * those areas which also holds mmap_lock might deadlock attempting
131888259744SDave Hansen 	 * to validate the fault against the address space.
1319c61e211dSHarvey Harrison 	 *
132088259744SDave Hansen 	 * Only do the expensive exception table search when we might be at
132188259744SDave Hansen 	 * risk of a deadlock.  This happens if we
1322c1e8d7c6SMichel Lespinasse 	 * 1. Failed to acquire mmap_lock, and
13236344be60SAndy Lutomirski 	 * 2. The access did not originate in userspace.
1324c61e211dSHarvey Harrison 	 */
1325d8ed45c5SMichel Lespinasse 	if (unlikely(!mmap_read_trylock(mm))) {
13266344be60SAndy Lutomirski 		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
132788259744SDave Hansen 			/*
132888259744SDave Hansen 			 * Fault from code in kernel from
132988259744SDave Hansen 			 * which we do not expect faults.
133088259744SDave Hansen 			 */
13310ed32f1aSAndy Lutomirski 			bad_area_nosemaphore(regs, hw_error_code, address);
133292181f19SNick Piggin 			return;
133392181f19SNick Piggin 		}
1334d065bd81SMichel Lespinasse retry:
1335d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
133601006074SPeter Zijlstra 	} else {
133701006074SPeter Zijlstra 		/*
13382d4a7167SIngo Molnar 		 * The above down_read_trylock() might have succeeded in
13392d4a7167SIngo Molnar 		 * which case we'll have missed the might_sleep() from
13402d4a7167SIngo Molnar 		 * down_read():
134101006074SPeter Zijlstra 		 */
134201006074SPeter Zijlstra 		might_sleep();
1343c61e211dSHarvey Harrison 	}
1344c61e211dSHarvey Harrison 
1345c61e211dSHarvey Harrison 	vma = find_vma(mm, address);
134692181f19SNick Piggin 	if (unlikely(!vma)) {
13470ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
134892181f19SNick Piggin 		return;
134992181f19SNick Piggin 	}
135092181f19SNick Piggin 	if (likely(vma->vm_start <= address))
1351c61e211dSHarvey Harrison 		goto good_area;
135292181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
13530ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
135492181f19SNick Piggin 		return;
135592181f19SNick Piggin 	}
135692181f19SNick Piggin 	if (unlikely(expand_stack(vma, address))) {
13570ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
135892181f19SNick Piggin 		return;
135992181f19SNick Piggin 	}
136092181f19SNick Piggin 
1361c61e211dSHarvey Harrison 	/*
1362c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1363c61e211dSHarvey Harrison 	 * we can handle it..
1364c61e211dSHarvey Harrison 	 */
1365c61e211dSHarvey Harrison good_area:
13660ed32f1aSAndy Lutomirski 	if (unlikely(access_error(hw_error_code, vma))) {
13670ed32f1aSAndy Lutomirski 		bad_area_access_error(regs, hw_error_code, address, vma);
136892181f19SNick Piggin 		return;
1369c61e211dSHarvey Harrison 	}
1370c61e211dSHarvey Harrison 
1371c61e211dSHarvey Harrison 	/*
1372c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1373c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
13749a95f3cfSPaul Cassella 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1375c1e8d7c6SMichel Lespinasse 	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1376cb0631fdSVlastimil Babka 	 *
1377c1e8d7c6SMichel Lespinasse 	 * Note that handle_userfault() may also release and reacquire mmap_lock
1378cb0631fdSVlastimil Babka 	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1379cb0631fdSVlastimil Babka 	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1380cb0631fdSVlastimil Babka 	 * (potentially after handling any pending signal during the return to
1381cb0631fdSVlastimil Babka 	 * userland). The return to userland is identified whenever
1382cb0631fdSVlastimil Babka 	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1383c61e211dSHarvey Harrison 	 */
1384968614fcSPeter Xu 	fault = handle_mm_fault(vma, address, flags, regs);
13852d4a7167SIngo Molnar 
138639678191SPeter Xu 	/* Quick path to respond to signals */
138739678191SPeter Xu 	if (fault_signal_pending(fault, regs)) {
138839678191SPeter Xu 		if (!user_mode(regs))
138939678191SPeter Xu 			no_context(regs, hw_error_code, address, SIGBUS,
139039678191SPeter Xu 				   BUS_ADRERR);
139139678191SPeter Xu 		return;
139239678191SPeter Xu 	}
139339678191SPeter Xu 
13943a13c4d7SJohannes Weiner 	/*
1395c1e8d7c6SMichel Lespinasse 	 * If we need to retry the mmap_lock has already been released,
139626178ec1SLinus Torvalds 	 * and if there is a fatal signal pending there is no guarantee
139726178ec1SLinus Torvalds 	 * that we made any progress. Handle this case first.
13983a13c4d7SJohannes Weiner 	 */
139939678191SPeter Xu 	if (unlikely((fault & VM_FAULT_RETRY) &&
140039678191SPeter Xu 		     (flags & FAULT_FLAG_ALLOW_RETRY))) {
140126178ec1SLinus Torvalds 		flags |= FAULT_FLAG_TRIED;
140226178ec1SLinus Torvalds 		goto retry;
140326178ec1SLinus Torvalds 	}
140426178ec1SLinus Torvalds 
1405d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
140626178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_ERROR)) {
14070ed32f1aSAndy Lutomirski 		mm_fault_error(regs, hw_error_code, address, fault);
140837b23e05SKOSAKI Motohiro 		return;
140937b23e05SKOSAKI Motohiro 	}
141037b23e05SKOSAKI Motohiro 
14118c938f9fSIngo Molnar 	check_v8086_mode(regs, address, tsk);
1412c61e211dSHarvey Harrison }
1413aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault);
1414aa37c51bSDave Hansen 
1415a0d14b89SPeter Zijlstra static __always_inline void
1416a0d14b89SPeter Zijlstra trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1417a0d14b89SPeter Zijlstra 			 unsigned long address)
1418d34603b0SSeiji Aguchi {
1419a0d14b89SPeter Zijlstra 	if (!trace_pagefault_enabled())
1420a0d14b89SPeter Zijlstra 		return;
1421a0d14b89SPeter Zijlstra 
1422d34603b0SSeiji Aguchi 	if (user_mode(regs))
1423d4078e23SPeter Zijlstra 		trace_page_fault_user(address, regs, error_code);
1424d34603b0SSeiji Aguchi 	else
1425d4078e23SPeter Zijlstra 		trace_page_fault_kernel(address, regs, error_code);
1426d34603b0SSeiji Aguchi }
1427d34603b0SSeiji Aguchi 
142891eeafeaSThomas Gleixner static __always_inline void
142991eeafeaSThomas Gleixner handle_page_fault(struct pt_regs *regs, unsigned long error_code,
1430ee6352b2SFrederic Weisbecker 			      unsigned long address)
143111a7ffb0SThomas Gleixner {
143291eeafeaSThomas Gleixner 	trace_page_fault_entries(regs, error_code, address);
143391eeafeaSThomas Gleixner 
143491eeafeaSThomas Gleixner 	if (unlikely(kmmio_fault(regs, address)))
143591eeafeaSThomas Gleixner 		return;
143691eeafeaSThomas Gleixner 
143791eeafeaSThomas Gleixner 	/* Was the fault on kernel-controlled part of the address space? */
143891eeafeaSThomas Gleixner 	if (unlikely(fault_in_kernel_space(address))) {
143991eeafeaSThomas Gleixner 		do_kern_addr_fault(regs, error_code, address);
144091eeafeaSThomas Gleixner 	} else {
144191eeafeaSThomas Gleixner 		do_user_addr_fault(regs, error_code, address);
144291eeafeaSThomas Gleixner 		/*
144391eeafeaSThomas Gleixner 		 * User address page fault handling might have reenabled
144491eeafeaSThomas Gleixner 		 * interrupts. Fixing up all potential exit points of
144591eeafeaSThomas Gleixner 		 * do_user_addr_fault() and its leaf functions is just not
144691eeafeaSThomas Gleixner 		 * doable w/o creating an unholy mess or turning the code
144791eeafeaSThomas Gleixner 		 * upside down.
144891eeafeaSThomas Gleixner 		 */
144991eeafeaSThomas Gleixner 		local_irq_disable();
145091eeafeaSThomas Gleixner 	}
145191eeafeaSThomas Gleixner }
145291eeafeaSThomas Gleixner 
145391eeafeaSThomas Gleixner DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
145491eeafeaSThomas Gleixner {
145591eeafeaSThomas Gleixner 	unsigned long address = read_cr2();
1456a27a0a55SThomas Gleixner 	irqentry_state_t state;
145791eeafeaSThomas Gleixner 
1458da1c55f1SMichel Lespinasse 	prefetchw(&current->mm->mmap_lock);
145991eeafeaSThomas Gleixner 
1460ef68017eSAndy Lutomirski 	/*
146166af4f5cSVitaly Kuznetsov 	 * KVM uses #PF vector to deliver 'page not present' events to guests
146266af4f5cSVitaly Kuznetsov 	 * (asynchronous page fault mechanism). The event happens when a
146366af4f5cSVitaly Kuznetsov 	 * userspace task is trying to access some valid (from guest's point of
146466af4f5cSVitaly Kuznetsov 	 * view) memory which is not currently mapped by the host (e.g. the
146566af4f5cSVitaly Kuznetsov 	 * memory is swapped out). Note, the corresponding "page ready" event
146666af4f5cSVitaly Kuznetsov 	 * which is injected when the memory becomes available, is delived via
146766af4f5cSVitaly Kuznetsov 	 * an interrupt mechanism and not a #PF exception
146866af4f5cSVitaly Kuznetsov 	 * (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
1469ef68017eSAndy Lutomirski 	 *
1470ef68017eSAndy Lutomirski 	 * We are relying on the interrupted context being sane (valid RSP,
1471ef68017eSAndy Lutomirski 	 * relevant locks not held, etc.), which is fine as long as the
1472ef68017eSAndy Lutomirski 	 * interrupted context had IF=1.  We are also relying on the KVM
1473ef68017eSAndy Lutomirski 	 * async pf type field and CR2 being read consistently instead of
1474ef68017eSAndy Lutomirski 	 * getting values from real and async page faults mixed up.
1475ef68017eSAndy Lutomirski 	 *
1476ef68017eSAndy Lutomirski 	 * Fingers crossed.
147791eeafeaSThomas Gleixner 	 *
147891eeafeaSThomas Gleixner 	 * The async #PF handling code takes care of idtentry handling
147991eeafeaSThomas Gleixner 	 * itself.
1480ef68017eSAndy Lutomirski 	 */
1481ef68017eSAndy Lutomirski 	if (kvm_handle_async_pf(regs, (u32)address))
1482ef68017eSAndy Lutomirski 		return;
1483ef68017eSAndy Lutomirski 
1484ca4c6a98SThomas Gleixner 	/*
148591eeafeaSThomas Gleixner 	 * Entry handling for valid #PF from kernel mode is slightly
148691eeafeaSThomas Gleixner 	 * different: RCU is already watching and rcu_irq_enter() must not
148791eeafeaSThomas Gleixner 	 * be invoked because a kernel fault on a user space address might
148891eeafeaSThomas Gleixner 	 * sleep.
148991eeafeaSThomas Gleixner 	 *
149091eeafeaSThomas Gleixner 	 * In case the fault hit a RCU idle region the conditional entry
149191eeafeaSThomas Gleixner 	 * code reenabled RCU to avoid subsequent wreckage which helps
149291eeafeaSThomas Gleixner 	 * debugability.
1493ca4c6a98SThomas Gleixner 	 */
1494a27a0a55SThomas Gleixner 	state = irqentry_enter(regs);
149591eeafeaSThomas Gleixner 
149691eeafeaSThomas Gleixner 	instrumentation_begin();
149791eeafeaSThomas Gleixner 	handle_page_fault(regs, error_code, address);
149891eeafeaSThomas Gleixner 	instrumentation_end();
149991eeafeaSThomas Gleixner 
1500a27a0a55SThomas Gleixner 	irqentry_exit(regs, state);
1501ca4c6a98SThomas Gleixner }
1502