xref: /openbmc/linux/arch/x86/mm/fault.c (revision fe557319aa06c23cffc9346000f119547e0f289a)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c61e211dSHarvey Harrison /*
3c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
4c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
6c61e211dSHarvey Harrison  */
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
868db0cf1SIngo Molnar #include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
104cdf8dbeSLinus Torvalds #include <linux/extable.h>		/* search_exception_tables	*/
1157c8a661SMike Rapoport #include <linux/memblock.h>		/* max_low_pfn			*/
129326638cSMasami Hiramatsu #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
13a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
14cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
15f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
16268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
193425d934SSai Praneeth #include <linux/efi.h>			/* efi_recover_from_page_fault()*/
2050a7ca3cSSouptick Joarder #include <linux/mm_types.h>
21c61e211dSHarvey Harrison 
22019132ffSDave Hansen #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
23a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
24a2bcd473SIngo Molnar #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
25f40c3300SAndy Lutomirski #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
26f40c3300SAndy Lutomirski #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
27ba3e127eSBrian Gerst #include <asm/vm86.h>			/* struct vm86			*/
28019132ffSDave Hansen #include <asm/mmu_context.h>		/* vma_pkey()			*/
293425d934SSai Praneeth #include <asm/efi.h>			/* efi_recover_from_page_fault()*/
30a1a371c4SAndy Lutomirski #include <asm/desc.h>			/* store_idt(), ...		*/
31d876b673SThomas Gleixner #include <asm/cpu_entry_area.h>		/* exception stack		*/
32186525bdSIngo Molnar #include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
33ef68017eSAndy Lutomirski #include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
34c61e211dSHarvey Harrison 
35d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS
36d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h>
37d34603b0SSeiji Aguchi 
38c61e211dSHarvey Harrison /*
39b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
40b319eed0SIngo Molnar  * handled by mmiotrace:
41b814d41fSIngo Molnar  */
429326638cSMasami Hiramatsu static nokprobe_inline int
4362c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
4486069782SPekka Paalanen {
450fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
460fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
470fd0e3daSPekka Paalanen 			return -1;
480fd0e3daSPekka Paalanen 	return 0;
4986069782SPekka Paalanen }
5086069782SPekka Paalanen 
51c61e211dSHarvey Harrison /*
522d4a7167SIngo Molnar  * Prefetch quirks:
532d4a7167SIngo Molnar  *
542d4a7167SIngo Molnar  * 32-bit mode:
552d4a7167SIngo Molnar  *
56c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
57c61e211dSHarvey Harrison  *   Check that here and ignore it.
58c61e211dSHarvey Harrison  *
592d4a7167SIngo Molnar  * 64-bit mode:
602d4a7167SIngo Molnar  *
61c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
62c61e211dSHarvey Harrison  *   Check that here and ignore it.
63c61e211dSHarvey Harrison  *
642d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
65c61e211dSHarvey Harrison  */
66107a0367SIngo Molnar static inline int
67107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
68107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
69c61e211dSHarvey Harrison {
70107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
71107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
72c61e211dSHarvey Harrison 
73c61e211dSHarvey Harrison 	switch (instr_hi) {
74c61e211dSHarvey Harrison 	case 0x20:
75c61e211dSHarvey Harrison 	case 0x30:
76c61e211dSHarvey Harrison 		/*
77c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
78c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
79c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
80c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
81c61e211dSHarvey Harrison 		 */
82107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
83c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
84c61e211dSHarvey Harrison 	case 0x40:
85c61e211dSHarvey Harrison 		/*
86c61e211dSHarvey Harrison 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
87c61e211dSHarvey Harrison 		 * Need to figure out under what instruction mode the
88c61e211dSHarvey Harrison 		 * instruction was issued. Could check the LDT for lm,
89c61e211dSHarvey Harrison 		 * but for now it's good enough to assume that long
90c61e211dSHarvey Harrison 		 * mode only uses well known segments or kernel.
91c61e211dSHarvey Harrison 		 */
92318f5a2aSAndy Lutomirski 		return (!user_mode(regs) || user_64bit_mode(regs));
93c61e211dSHarvey Harrison #endif
94c61e211dSHarvey Harrison 	case 0x60:
95c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
96107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
97c61e211dSHarvey Harrison 	case 0xF0:
98c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
99107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
100c61e211dSHarvey Harrison 	case 0x00:
101c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
102107a0367SIngo Molnar 		if (probe_kernel_address(instr, opcode))
103107a0367SIngo Molnar 			return 0;
104107a0367SIngo Molnar 
105107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
106107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
107107a0367SIngo Molnar 		return 0;
108107a0367SIngo Molnar 	default:
109107a0367SIngo Molnar 		return 0;
110107a0367SIngo Molnar 	}
111107a0367SIngo Molnar }
112107a0367SIngo Molnar 
113107a0367SIngo Molnar static int
114107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
115107a0367SIngo Molnar {
116107a0367SIngo Molnar 	unsigned char *max_instr;
117107a0367SIngo Molnar 	unsigned char *instr;
118107a0367SIngo Molnar 	int prefetch = 0;
119107a0367SIngo Molnar 
120107a0367SIngo Molnar 	/*
121107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
122107a0367SIngo Molnar 	 * do not ignore the fault:
123107a0367SIngo Molnar 	 */
1241067f030SRicardo Neri 	if (error_code & X86_PF_INSTR)
125107a0367SIngo Molnar 		return 0;
126107a0367SIngo Molnar 
127107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
128107a0367SIngo Molnar 	max_instr = instr + 15;
129107a0367SIngo Molnar 
130d31bf07fSAndy Lutomirski 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
131107a0367SIngo Molnar 		return 0;
132107a0367SIngo Molnar 
133107a0367SIngo Molnar 	while (instr < max_instr) {
134107a0367SIngo Molnar 		unsigned char opcode;
135c61e211dSHarvey Harrison 
136c61e211dSHarvey Harrison 		if (probe_kernel_address(instr, opcode))
137c61e211dSHarvey Harrison 			break;
138107a0367SIngo Molnar 
139107a0367SIngo Molnar 		instr++;
140107a0367SIngo Molnar 
141107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
142c61e211dSHarvey Harrison 			break;
143c61e211dSHarvey Harrison 	}
144c61e211dSHarvey Harrison 	return prefetch;
145c61e211dSHarvey Harrison }
146c61e211dSHarvey Harrison 
147f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
148f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1492d4a7167SIngo Molnar 
150f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
151f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
152f2f13a85SIngo Molnar {
153f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
154f2f13a85SIngo Molnar 	pgd_t *pgd_k;
155e0c4f675SKirill A. Shutemov 	p4d_t *p4d, *p4d_k;
156f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
157f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
158f2f13a85SIngo Molnar 
159f2f13a85SIngo Molnar 	pgd += index;
160f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
161f2f13a85SIngo Molnar 
162f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
163f2f13a85SIngo Molnar 		return NULL;
164f2f13a85SIngo Molnar 
165f2f13a85SIngo Molnar 	/*
166f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
167f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
168e0c4f675SKirill A. Shutemov 	 * set_p4d/set_pud.
169f2f13a85SIngo Molnar 	 */
170e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
171e0c4f675SKirill A. Shutemov 	p4d_k = p4d_offset(pgd_k, address);
172e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d_k))
173e0c4f675SKirill A. Shutemov 		return NULL;
174e0c4f675SKirill A. Shutemov 
175e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
176e0c4f675SKirill A. Shutemov 	pud_k = pud_offset(p4d_k, address);
177f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
178f2f13a85SIngo Molnar 		return NULL;
179f2f13a85SIngo Molnar 
180f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
181f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
1828e998fc2SJoerg Roedel 
1838e998fc2SJoerg Roedel 	if (pmd_present(*pmd) != pmd_present(*pmd_k))
1848e998fc2SJoerg Roedel 		set_pmd(pmd, *pmd_k);
1858e998fc2SJoerg Roedel 
186f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
187f2f13a85SIngo Molnar 		return NULL;
188b8bcfe99SJeremy Fitzhardinge 	else
18951b75b5bSJoerg Roedel 		BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
190f2f13a85SIngo Molnar 
191f2f13a85SIngo Molnar 	return pmd_k;
192f2f13a85SIngo Molnar }
193f2f13a85SIngo Molnar 
19486cf69f1SJoerg Roedel void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
195f2f13a85SIngo Molnar {
19686cf69f1SJoerg Roedel 	unsigned long addr;
197f2f13a85SIngo Molnar 
19886cf69f1SJoerg Roedel 	for (addr = start & PMD_MASK;
19986cf69f1SJoerg Roedel 	     addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
20086cf69f1SJoerg Roedel 	     addr += PMD_SIZE) {
201f2f13a85SIngo Molnar 		struct page *page;
202f2f13a85SIngo Molnar 
203a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
204f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
205617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
206617d34d9SJeremy Fitzhardinge 
207a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
208617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
209617d34d9SJeremy Fitzhardinge 
210617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
21186cf69f1SJoerg Roedel 			vmalloc_sync_one(page_address(page), addr);
212617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
213f2f13a85SIngo Molnar 		}
214a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
215f2f13a85SIngo Molnar 	}
216f2f13a85SIngo Molnar }
217f2f13a85SIngo Molnar 
218f2f13a85SIngo Molnar /*
219f2f13a85SIngo Molnar  * Did it hit the DOS screen memory VA from vm86 mode?
220f2f13a85SIngo Molnar  */
221f2f13a85SIngo Molnar static inline void
222f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
223f2f13a85SIngo Molnar 		 struct task_struct *tsk)
224f2f13a85SIngo Molnar {
2259fda6a06SBrian Gerst #ifdef CONFIG_VM86
226f2f13a85SIngo Molnar 	unsigned long bit;
227f2f13a85SIngo Molnar 
2289fda6a06SBrian Gerst 	if (!v8086_mode(regs) || !tsk->thread.vm86)
229f2f13a85SIngo Molnar 		return;
230f2f13a85SIngo Molnar 
231f2f13a85SIngo Molnar 	bit = (address - 0xA0000) >> PAGE_SHIFT;
232f2f13a85SIngo Molnar 	if (bit < 32)
2339fda6a06SBrian Gerst 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
2349fda6a06SBrian Gerst #endif
235f2f13a85SIngo Molnar }
236c61e211dSHarvey Harrison 
237087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
238087975b0SAkinobu Mita {
239087975b0SAkinobu Mita 	return pfn < max_low_pfn;
240087975b0SAkinobu Mita }
241087975b0SAkinobu Mita 
242cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
243c61e211dSHarvey Harrison {
2446c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
245087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
246e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
247e0c4f675SKirill A. Shutemov 	pud_t *pud;
248087975b0SAkinobu Mita 	pmd_t *pmd;
249087975b0SAkinobu Mita 	pte_t *pte;
2502d4a7167SIngo Molnar 
251c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
25239e48d9bSJan Beulich 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
253087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
254087975b0SAkinobu Mita 		goto out;
25539e48d9bSJan Beulich #define pr_pde pr_cont
25639e48d9bSJan Beulich #else
25739e48d9bSJan Beulich #define pr_pde pr_info
258c61e211dSHarvey Harrison #endif
259e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
260e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
261e0c4f675SKirill A. Shutemov 	pmd = pmd_offset(pud, address);
26239e48d9bSJan Beulich 	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
26339e48d9bSJan Beulich #undef pr_pde
264c61e211dSHarvey Harrison 
265c61e211dSHarvey Harrison 	/*
266c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
267c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
268c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
2692d4a7167SIngo Molnar 	 * it's allocated already:
270c61e211dSHarvey Harrison 	 */
271087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
272087975b0SAkinobu Mita 		goto out;
2732d4a7167SIngo Molnar 
274087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
27539e48d9bSJan Beulich 	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
276087975b0SAkinobu Mita out:
27739e48d9bSJan Beulich 	pr_cont("\n");
278f2f13a85SIngo Molnar }
279f2f13a85SIngo Molnar 
280f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
281f2f13a85SIngo Molnar 
282e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD
283f2f13a85SIngo Molnar static const char errata93_warning[] =
284ad361c98SJoe Perches KERN_ERR
285ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
286ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
287ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
288ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
289e05139f2SJan Beulich #endif
290f2f13a85SIngo Molnar 
291f2f13a85SIngo Molnar /*
292f2f13a85SIngo Molnar  * No vm86 mode in 64-bit mode:
293f2f13a85SIngo Molnar  */
294f2f13a85SIngo Molnar static inline void
295f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
296f2f13a85SIngo Molnar 		 struct task_struct *tsk)
297f2f13a85SIngo Molnar {
298f2f13a85SIngo Molnar }
299f2f13a85SIngo Molnar 
300f2f13a85SIngo Molnar static int bad_address(void *p)
301f2f13a85SIngo Molnar {
302f2f13a85SIngo Molnar 	unsigned long dummy;
303f2f13a85SIngo Molnar 
304f2f13a85SIngo Molnar 	return probe_kernel_address((unsigned long *)p, dummy);
305f2f13a85SIngo Molnar }
306f2f13a85SIngo Molnar 
307f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
308f2f13a85SIngo Molnar {
3096c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
310087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
311e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
312c61e211dSHarvey Harrison 	pud_t *pud;
313c61e211dSHarvey Harrison 	pmd_t *pmd;
314c61e211dSHarvey Harrison 	pte_t *pte;
315c61e211dSHarvey Harrison 
3162d4a7167SIngo Molnar 	if (bad_address(pgd))
3172d4a7167SIngo Molnar 		goto bad;
3182d4a7167SIngo Molnar 
31939e48d9bSJan Beulich 	pr_info("PGD %lx ", pgd_val(*pgd));
3202d4a7167SIngo Molnar 
3212d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
3222d4a7167SIngo Molnar 		goto out;
323c61e211dSHarvey Harrison 
324e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
325e0c4f675SKirill A. Shutemov 	if (bad_address(p4d))
326e0c4f675SKirill A. Shutemov 		goto bad;
327e0c4f675SKirill A. Shutemov 
32839e48d9bSJan Beulich 	pr_cont("P4D %lx ", p4d_val(*p4d));
329e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d) || p4d_large(*p4d))
330e0c4f675SKirill A. Shutemov 		goto out;
331e0c4f675SKirill A. Shutemov 
332e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
3332d4a7167SIngo Molnar 	if (bad_address(pud))
3342d4a7167SIngo Molnar 		goto bad;
3352d4a7167SIngo Molnar 
33639e48d9bSJan Beulich 	pr_cont("PUD %lx ", pud_val(*pud));
337b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
3382d4a7167SIngo Molnar 		goto out;
339c61e211dSHarvey Harrison 
340c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
3412d4a7167SIngo Molnar 	if (bad_address(pmd))
3422d4a7167SIngo Molnar 		goto bad;
3432d4a7167SIngo Molnar 
34439e48d9bSJan Beulich 	pr_cont("PMD %lx ", pmd_val(*pmd));
3452d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
3462d4a7167SIngo Molnar 		goto out;
347c61e211dSHarvey Harrison 
348c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
3492d4a7167SIngo Molnar 	if (bad_address(pte))
3502d4a7167SIngo Molnar 		goto bad;
3512d4a7167SIngo Molnar 
35239e48d9bSJan Beulich 	pr_cont("PTE %lx", pte_val(*pte));
3532d4a7167SIngo Molnar out:
35439e48d9bSJan Beulich 	pr_cont("\n");
355c61e211dSHarvey Harrison 	return;
356c61e211dSHarvey Harrison bad:
35739e48d9bSJan Beulich 	pr_info("BAD\n");
358c61e211dSHarvey Harrison }
359c61e211dSHarvey Harrison 
360f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
361c61e211dSHarvey Harrison 
3622d4a7167SIngo Molnar /*
3632d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
3642d4a7167SIngo Molnar  *
3652d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
3662d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
3672d4a7167SIngo Molnar  *
3682d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
3692d4a7167SIngo Molnar  *
3702d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
3712d4a7167SIngo Molnar  * Try to work around it here.
3722d4a7167SIngo Molnar  *
3732d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
3742d4a7167SIngo Molnar  * Does nothing on 32-bit.
375c61e211dSHarvey Harrison  */
376c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
377c61e211dSHarvey Harrison {
378e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
379e05139f2SJan Beulich 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
380e05139f2SJan Beulich 	    || boot_cpu_data.x86 != 0xf)
381e05139f2SJan Beulich 		return 0;
382e05139f2SJan Beulich 
383c61e211dSHarvey Harrison 	if (address != regs->ip)
384c61e211dSHarvey Harrison 		return 0;
3852d4a7167SIngo Molnar 
386c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
387c61e211dSHarvey Harrison 		return 0;
3882d4a7167SIngo Molnar 
389c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
390c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
391c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
392a454ab31SIngo Molnar 		printk_once(errata93_warning);
393c61e211dSHarvey Harrison 		regs->ip = address;
394c61e211dSHarvey Harrison 		return 1;
395c61e211dSHarvey Harrison 	}
396c61e211dSHarvey Harrison #endif
397c61e211dSHarvey Harrison 	return 0;
398c61e211dSHarvey Harrison }
399c61e211dSHarvey Harrison 
400c61e211dSHarvey Harrison /*
4012d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
4022d4a7167SIngo Molnar  * to illegal addresses >4GB.
4032d4a7167SIngo Molnar  *
4042d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
4052d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
406c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
407c61e211dSHarvey Harrison  */
408c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
409c61e211dSHarvey Harrison {
410c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
4112d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
412c61e211dSHarvey Harrison 		return 1;
413c61e211dSHarvey Harrison #endif
414c61e211dSHarvey Harrison 	return 0;
415c61e211dSHarvey Harrison }
416c61e211dSHarvey Harrison 
4173e77abdaSThomas Gleixner /* Pentium F0 0F C7 C8 bug workaround: */
418c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
419c61e211dSHarvey Harrison {
420c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
4213e77abdaSThomas Gleixner 	if (boot_cpu_has_bug(X86_BUG_F00F) && idt_is_f00f_address(address)) {
42249893c5cSThomas Gleixner 		handle_invalid_op(regs);
423c61e211dSHarvey Harrison 		return 1;
424c61e211dSHarvey Harrison 	}
425c61e211dSHarvey Harrison #endif
426c61e211dSHarvey Harrison 	return 0;
427c61e211dSHarvey Harrison }
428c61e211dSHarvey Harrison 
429a1a371c4SAndy Lutomirski static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
430a1a371c4SAndy Lutomirski {
431a1a371c4SAndy Lutomirski 	u32 offset = (index >> 3) * sizeof(struct desc_struct);
432a1a371c4SAndy Lutomirski 	unsigned long addr;
433a1a371c4SAndy Lutomirski 	struct ldttss_desc desc;
434a1a371c4SAndy Lutomirski 
435a1a371c4SAndy Lutomirski 	if (index == 0) {
436a1a371c4SAndy Lutomirski 		pr_alert("%s: NULL\n", name);
437a1a371c4SAndy Lutomirski 		return;
438a1a371c4SAndy Lutomirski 	}
439a1a371c4SAndy Lutomirski 
440a1a371c4SAndy Lutomirski 	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
441a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
442a1a371c4SAndy Lutomirski 		return;
443a1a371c4SAndy Lutomirski 	}
444a1a371c4SAndy Lutomirski 
445*fe557319SChristoph Hellwig 	if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
446a1a371c4SAndy Lutomirski 			      sizeof(struct ldttss_desc))) {
447a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
448a1a371c4SAndy Lutomirski 			 name, index);
449a1a371c4SAndy Lutomirski 		return;
450a1a371c4SAndy Lutomirski 	}
451a1a371c4SAndy Lutomirski 
4525ccd3528SColin Ian King 	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
453a1a371c4SAndy Lutomirski #ifdef CONFIG_X86_64
454a1a371c4SAndy Lutomirski 	addr |= ((u64)desc.base3 << 32);
455a1a371c4SAndy Lutomirski #endif
456a1a371c4SAndy Lutomirski 	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
457a1a371c4SAndy Lutomirski 		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
458a1a371c4SAndy Lutomirski }
459a1a371c4SAndy Lutomirski 
4602d4a7167SIngo Molnar static void
461a2aa52abSIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
462c61e211dSHarvey Harrison {
463c61e211dSHarvey Harrison 	if (!oops_may_print())
464c61e211dSHarvey Harrison 		return;
465c61e211dSHarvey Harrison 
4661067f030SRicardo Neri 	if (error_code & X86_PF_INSTR) {
46793809be8SHarvey Harrison 		unsigned int level;
468426e34ccSMatt Fleming 		pgd_t *pgd;
469426e34ccSMatt Fleming 		pte_t *pte;
4702d4a7167SIngo Molnar 
4716c690ee1SAndy Lutomirski 		pgd = __va(read_cr3_pa());
472426e34ccSMatt Fleming 		pgd += pgd_index(address);
473426e34ccSMatt Fleming 
474426e34ccSMatt Fleming 		pte = lookup_address_in_pgd(pgd, address, &level);
475c61e211dSHarvey Harrison 
4768f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
477d79d0d8aSDmitry Vyukov 			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
478d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
479eff50c34SJiri Kosina 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
480eff50c34SJiri Kosina 				(pgd_flags(*pgd) & _PAGE_USER) &&
4811e02ce4cSAndy Lutomirski 				(__read_cr4() & X86_CR4_SMEP))
482d79d0d8aSDmitry Vyukov 			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
483d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
484c61e211dSHarvey Harrison 	}
485fd40d6e3SHarvey Harrison 
486f28b11a2SSean Christopherson 	if (address < PAGE_SIZE && !user_mode(regs))
487ea2f8d60SBorislav Petkov 		pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
488f28b11a2SSean Christopherson 			(void *)address);
489f28b11a2SSean Christopherson 	else
490ea2f8d60SBorislav Petkov 		pr_alert("BUG: unable to handle page fault for address: %px\n",
4914188f063SDmitry Vyukov 			(void *)address);
4922d4a7167SIngo Molnar 
493ea2f8d60SBorislav Petkov 	pr_alert("#PF: %s %s in %s mode\n",
49418ea35c5SSean Christopherson 		 (error_code & X86_PF_USER)  ? "user" : "supervisor",
49518ea35c5SSean Christopherson 		 (error_code & X86_PF_INSTR) ? "instruction fetch" :
49618ea35c5SSean Christopherson 		 (error_code & X86_PF_WRITE) ? "write access" :
49718ea35c5SSean Christopherson 					       "read access",
49818ea35c5SSean Christopherson 			     user_mode(regs) ? "user" : "kernel");
49918ea35c5SSean Christopherson 	pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
50018ea35c5SSean Christopherson 		 !(error_code & X86_PF_PROT) ? "not-present page" :
50118ea35c5SSean Christopherson 		 (error_code & X86_PF_RSVD)  ? "reserved bit violation" :
50218ea35c5SSean Christopherson 		 (error_code & X86_PF_PK)    ? "protection keys violation" :
50318ea35c5SSean Christopherson 					       "permissions violation");
504a2aa52abSIngo Molnar 
505a1a371c4SAndy Lutomirski 	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
506a1a371c4SAndy Lutomirski 		struct desc_ptr idt, gdt;
507a1a371c4SAndy Lutomirski 		u16 ldtr, tr;
508a1a371c4SAndy Lutomirski 
509a1a371c4SAndy Lutomirski 		/*
510a1a371c4SAndy Lutomirski 		 * This can happen for quite a few reasons.  The more obvious
511a1a371c4SAndy Lutomirski 		 * ones are faults accessing the GDT, or LDT.  Perhaps
512a1a371c4SAndy Lutomirski 		 * surprisingly, if the CPU tries to deliver a benign or
513a1a371c4SAndy Lutomirski 		 * contributory exception from user code and gets a page fault
514a1a371c4SAndy Lutomirski 		 * during delivery, the page fault can be delivered as though
515a1a371c4SAndy Lutomirski 		 * it originated directly from user code.  This could happen
516a1a371c4SAndy Lutomirski 		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
517a1a371c4SAndy Lutomirski 		 * kernel or IST stack.
518a1a371c4SAndy Lutomirski 		 */
519a1a371c4SAndy Lutomirski 		store_idt(&idt);
520a1a371c4SAndy Lutomirski 
521a1a371c4SAndy Lutomirski 		/* Usable even on Xen PV -- it's just slow. */
522a1a371c4SAndy Lutomirski 		native_store_gdt(&gdt);
523a1a371c4SAndy Lutomirski 
524a1a371c4SAndy Lutomirski 		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
525a1a371c4SAndy Lutomirski 			 idt.address, idt.size, gdt.address, gdt.size);
526a1a371c4SAndy Lutomirski 
527a1a371c4SAndy Lutomirski 		store_ldt(ldtr);
528a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "LDTR", ldtr);
529a1a371c4SAndy Lutomirski 
530a1a371c4SAndy Lutomirski 		store_tr(tr);
531a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "TR", tr);
532a1a371c4SAndy Lutomirski 	}
533a1a371c4SAndy Lutomirski 
534c61e211dSHarvey Harrison 	dump_pagetable(address);
535c61e211dSHarvey Harrison }
536c61e211dSHarvey Harrison 
5372d4a7167SIngo Molnar static noinline void
5382d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
5392d4a7167SIngo Molnar 	    unsigned long address)
540c61e211dSHarvey Harrison {
5412d4a7167SIngo Molnar 	struct task_struct *tsk;
5422d4a7167SIngo Molnar 	unsigned long flags;
5432d4a7167SIngo Molnar 	int sig;
5442d4a7167SIngo Molnar 
5452d4a7167SIngo Molnar 	flags = oops_begin();
5462d4a7167SIngo Molnar 	tsk = current;
5472d4a7167SIngo Molnar 	sig = SIGKILL;
548c61e211dSHarvey Harrison 
549c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
55092181f19SNick Piggin 	       tsk->comm, address);
551c61e211dSHarvey Harrison 	dump_pagetable(address);
5522d4a7167SIngo Molnar 
553c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
554874d93d1SAlexander van Heukelum 		sig = 0;
5552d4a7167SIngo Molnar 
556874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
557c61e211dSHarvey Harrison }
558c61e211dSHarvey Harrison 
559e49d3cbeSAndy Lutomirski static void set_signal_archinfo(unsigned long address,
560e49d3cbeSAndy Lutomirski 				unsigned long error_code)
561e49d3cbeSAndy Lutomirski {
562e49d3cbeSAndy Lutomirski 	struct task_struct *tsk = current;
563e49d3cbeSAndy Lutomirski 
564e49d3cbeSAndy Lutomirski 	/*
565e49d3cbeSAndy Lutomirski 	 * To avoid leaking information about the kernel page
566e49d3cbeSAndy Lutomirski 	 * table layout, pretend that user-mode accesses to
567e49d3cbeSAndy Lutomirski 	 * kernel addresses are always protection faults.
568e0a446ceSAndy Lutomirski 	 *
569e0a446ceSAndy Lutomirski 	 * NB: This means that failed vsyscalls with vsyscall=none
570e0a446ceSAndy Lutomirski 	 * will have the PROT bit.  This doesn't leak any
571e0a446ceSAndy Lutomirski 	 * information and does not appear to cause any problems.
572e49d3cbeSAndy Lutomirski 	 */
573e49d3cbeSAndy Lutomirski 	if (address >= TASK_SIZE_MAX)
574e49d3cbeSAndy Lutomirski 		error_code |= X86_PF_PROT;
575e49d3cbeSAndy Lutomirski 
576e49d3cbeSAndy Lutomirski 	tsk->thread.trap_nr = X86_TRAP_PF;
577e49d3cbeSAndy Lutomirski 	tsk->thread.error_code = error_code | X86_PF_USER;
578e49d3cbeSAndy Lutomirski 	tsk->thread.cr2 = address;
579e49d3cbeSAndy Lutomirski }
580e49d3cbeSAndy Lutomirski 
5812d4a7167SIngo Molnar static noinline void
5822d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code,
5834fc34901SAndy Lutomirski 	   unsigned long address, int signal, int si_code)
58492181f19SNick Piggin {
58592181f19SNick Piggin 	struct task_struct *tsk = current;
58692181f19SNick Piggin 	unsigned long flags;
58792181f19SNick Piggin 	int sig;
58892181f19SNick Piggin 
589ebb53e25SAndy Lutomirski 	if (user_mode(regs)) {
590ebb53e25SAndy Lutomirski 		/*
591ebb53e25SAndy Lutomirski 		 * This is an implicit supervisor-mode access from user
592ebb53e25SAndy Lutomirski 		 * mode.  Bypass all the kernel-mode recovery code and just
593ebb53e25SAndy Lutomirski 		 * OOPS.
594ebb53e25SAndy Lutomirski 		 */
595ebb53e25SAndy Lutomirski 		goto oops;
596ebb53e25SAndy Lutomirski 	}
597ebb53e25SAndy Lutomirski 
59892181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
59981fd9c18SJann Horn 	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
600c026b359SPeter Zijlstra 		/*
601c026b359SPeter Zijlstra 		 * Any interrupt that takes a fault gets the fixup. This makes
602c026b359SPeter Zijlstra 		 * the below recursive fault logic only apply to a faults from
603c026b359SPeter Zijlstra 		 * task context.
604c026b359SPeter Zijlstra 		 */
605c026b359SPeter Zijlstra 		if (in_interrupt())
606c026b359SPeter Zijlstra 			return;
607c026b359SPeter Zijlstra 
608c026b359SPeter Zijlstra 		/*
609c026b359SPeter Zijlstra 		 * Per the above we're !in_interrupt(), aka. task context.
610c026b359SPeter Zijlstra 		 *
611c026b359SPeter Zijlstra 		 * In this case we need to make sure we're not recursively
612c026b359SPeter Zijlstra 		 * faulting through the emulate_vsyscall() logic.
613c026b359SPeter Zijlstra 		 */
6142a53ccbcSIngo Molnar 		if (current->thread.sig_on_uaccess_err && signal) {
615e49d3cbeSAndy Lutomirski 			set_signal_archinfo(address, error_code);
6164fc34901SAndy Lutomirski 
6174fc34901SAndy Lutomirski 			/* XXX: hwpoison faults will set the wrong code. */
6182e1661d2SEric W. Biederman 			force_sig_fault(signal, si_code, (void __user *)address);
6194fc34901SAndy Lutomirski 		}
620c026b359SPeter Zijlstra 
621c026b359SPeter Zijlstra 		/*
622c026b359SPeter Zijlstra 		 * Barring that, we can do the fixup and be happy.
623c026b359SPeter Zijlstra 		 */
62492181f19SNick Piggin 		return;
6254fc34901SAndy Lutomirski 	}
62692181f19SNick Piggin 
6276271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK
6286271cfdfSAndy Lutomirski 	/*
6296271cfdfSAndy Lutomirski 	 * Stack overflow?  During boot, we can fault near the initial
6306271cfdfSAndy Lutomirski 	 * stack in the direct map, but that's not an overflow -- check
6316271cfdfSAndy Lutomirski 	 * that we're in vmalloc space to avoid this.
6326271cfdfSAndy Lutomirski 	 */
6336271cfdfSAndy Lutomirski 	if (is_vmalloc_addr((void *)address) &&
6346271cfdfSAndy Lutomirski 	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
6356271cfdfSAndy Lutomirski 	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
636d876b673SThomas Gleixner 		unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
6376271cfdfSAndy Lutomirski 		/*
6386271cfdfSAndy Lutomirski 		 * We're likely to be running with very little stack space
6396271cfdfSAndy Lutomirski 		 * left.  It's plausible that we'd hit this condition but
6406271cfdfSAndy Lutomirski 		 * double-fault even before we get this far, in which case
6416271cfdfSAndy Lutomirski 		 * we're fine: the double-fault handler will deal with it.
6426271cfdfSAndy Lutomirski 		 *
6436271cfdfSAndy Lutomirski 		 * We don't want to make it all the way into the oops code
6446271cfdfSAndy Lutomirski 		 * and then double-fault, though, because we're likely to
6456271cfdfSAndy Lutomirski 		 * break the console driver and lose most of the stack dump.
6466271cfdfSAndy Lutomirski 		 */
6476271cfdfSAndy Lutomirski 		asm volatile ("movq %[stack], %%rsp\n\t"
6486271cfdfSAndy Lutomirski 			      "call handle_stack_overflow\n\t"
6496271cfdfSAndy Lutomirski 			      "1: jmp 1b"
650f5caf621SJosh Poimboeuf 			      : ASM_CALL_CONSTRAINT
6516271cfdfSAndy Lutomirski 			      : "D" ("kernel stack overflow (page fault)"),
6526271cfdfSAndy Lutomirski 				"S" (regs), "d" (address),
6536271cfdfSAndy Lutomirski 				[stack] "rm" (stack));
6546271cfdfSAndy Lutomirski 		unreachable();
6556271cfdfSAndy Lutomirski 	}
6566271cfdfSAndy Lutomirski #endif
6576271cfdfSAndy Lutomirski 
65892181f19SNick Piggin 	/*
6592d4a7167SIngo Molnar 	 * 32-bit:
6602d4a7167SIngo Molnar 	 *
66192181f19SNick Piggin 	 *   Valid to do another page fault here, because if this fault
66292181f19SNick Piggin 	 *   had been triggered by is_prefetch fixup_exception would have
66392181f19SNick Piggin 	 *   handled it.
66492181f19SNick Piggin 	 *
6652d4a7167SIngo Molnar 	 * 64-bit:
6662d4a7167SIngo Molnar 	 *
66792181f19SNick Piggin 	 *   Hall of shame of CPU/BIOS bugs.
66892181f19SNick Piggin 	 */
66992181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
67092181f19SNick Piggin 		return;
67192181f19SNick Piggin 
67292181f19SNick Piggin 	if (is_errata93(regs, address))
67392181f19SNick Piggin 		return;
67492181f19SNick Piggin 
67592181f19SNick Piggin 	/*
6763425d934SSai Praneeth 	 * Buggy firmware could access regions which might page fault, try to
6773425d934SSai Praneeth 	 * recover from such faults.
6783425d934SSai Praneeth 	 */
6793425d934SSai Praneeth 	if (IS_ENABLED(CONFIG_EFI))
6803425d934SSai Praneeth 		efi_recover_from_page_fault(address);
6813425d934SSai Praneeth 
682ebb53e25SAndy Lutomirski oops:
6833425d934SSai Praneeth 	/*
68492181f19SNick Piggin 	 * Oops. The kernel tried to access some bad page. We'll have to
6852d4a7167SIngo Molnar 	 * terminate things with extreme prejudice:
68692181f19SNick Piggin 	 */
68792181f19SNick Piggin 	flags = oops_begin();
68892181f19SNick Piggin 
68992181f19SNick Piggin 	show_fault_oops(regs, error_code, address);
69092181f19SNick Piggin 
691a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(tsk))
692b0f4c4b3SPrarit Bhargava 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
69319803078SIngo Molnar 
69492181f19SNick Piggin 	sig = SIGKILL;
69592181f19SNick Piggin 	if (__die("Oops", regs, error_code))
69692181f19SNick Piggin 		sig = 0;
6972d4a7167SIngo Molnar 
69892181f19SNick Piggin 	/* Executive summary in case the body of the oops scrolled away */
699b0f4c4b3SPrarit Bhargava 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
7002d4a7167SIngo Molnar 
70192181f19SNick Piggin 	oops_end(flags, regs, sig);
70292181f19SNick Piggin }
70392181f19SNick Piggin 
7042d4a7167SIngo Molnar /*
7052d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
7062d4a7167SIngo Molnar  * sysctl is set:
7072d4a7167SIngo Molnar  */
7082d4a7167SIngo Molnar static inline void
7092d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
7102d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
7112d4a7167SIngo Molnar {
712ba54d856SBorislav Petkov 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
713ba54d856SBorislav Petkov 
7142d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
7152d4a7167SIngo Molnar 		return;
7162d4a7167SIngo Molnar 
7172d4a7167SIngo Molnar 	if (!printk_ratelimit())
7182d4a7167SIngo Molnar 		return;
7192d4a7167SIngo Molnar 
72010a7e9d8SKees Cook 	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
721ba54d856SBorislav Petkov 		loglvl, tsk->comm, task_pid_nr(tsk), address,
7222d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
7232d4a7167SIngo Molnar 
7242d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
7252d4a7167SIngo Molnar 
7262d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
727ba54d856SBorislav Petkov 
728342db04aSJann Horn 	show_opcodes(regs, loglvl);
7292d4a7167SIngo Molnar }
7302d4a7167SIngo Molnar 
73102e983b7SDave Hansen /*
73202e983b7SDave Hansen  * The (legacy) vsyscall page is the long page in the kernel portion
73302e983b7SDave Hansen  * of the address space that has user-accessible permissions.
73402e983b7SDave Hansen  */
73502e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr)
73602e983b7SDave Hansen {
7373ae0ad92SDave Hansen 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
73802e983b7SDave Hansen }
73902e983b7SDave Hansen 
7402d4a7167SIngo Molnar static void
7412d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
742419ceeb1SEric W. Biederman 		       unsigned long address, u32 pkey, int si_code)
74392181f19SNick Piggin {
74492181f19SNick Piggin 	struct task_struct *tsk = current;
74592181f19SNick Piggin 
74692181f19SNick Piggin 	/* User mode accesses just cause a SIGSEGV */
7476ea59b07SAndy Lutomirski 	if (user_mode(regs) && (error_code & X86_PF_USER)) {
74892181f19SNick Piggin 		/*
7492d4a7167SIngo Molnar 		 * It's possible to have interrupts off here:
75092181f19SNick Piggin 		 */
75192181f19SNick Piggin 		local_irq_enable();
75292181f19SNick Piggin 
75392181f19SNick Piggin 		/*
75492181f19SNick Piggin 		 * Valid to do another page fault here because this one came
7552d4a7167SIngo Molnar 		 * from user space:
75692181f19SNick Piggin 		 */
75792181f19SNick Piggin 		if (is_prefetch(regs, error_code, address))
75892181f19SNick Piggin 			return;
75992181f19SNick Piggin 
76092181f19SNick Piggin 		if (is_errata100(regs, address))
76192181f19SNick Piggin 			return;
76292181f19SNick Piggin 
763dc4fac84SAndy Lutomirski 		/*
764dc4fac84SAndy Lutomirski 		 * To avoid leaking information about the kernel page table
765dc4fac84SAndy Lutomirski 		 * layout, pretend that user-mode accesses to kernel addresses
766dc4fac84SAndy Lutomirski 		 * are always protection faults.
767dc4fac84SAndy Lutomirski 		 */
768dc4fac84SAndy Lutomirski 		if (address >= TASK_SIZE_MAX)
7691067f030SRicardo Neri 			error_code |= X86_PF_PROT;
7703ae36655SAndy Lutomirski 
771e575a86fSKees Cook 		if (likely(show_unhandled_signals))
7722d4a7167SIngo Molnar 			show_signal_msg(regs, error_code, address, tsk);
77392181f19SNick Piggin 
774e49d3cbeSAndy Lutomirski 		set_signal_archinfo(address, error_code);
7752d4a7167SIngo Molnar 
7769db812dbSEric W. Biederman 		if (si_code == SEGV_PKUERR)
777419ceeb1SEric W. Biederman 			force_sig_pkuerr((void __user *)address, pkey);
7789db812dbSEric W. Biederman 
7792e1661d2SEric W. Biederman 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
7802d4a7167SIngo Molnar 
781ca4c6a98SThomas Gleixner 		local_irq_disable();
782ca4c6a98SThomas Gleixner 
78392181f19SNick Piggin 		return;
78492181f19SNick Piggin 	}
78592181f19SNick Piggin 
78692181f19SNick Piggin 	if (is_f00f_bug(regs, address))
78792181f19SNick Piggin 		return;
78892181f19SNick Piggin 
7894fc34901SAndy Lutomirski 	no_context(regs, error_code, address, SIGSEGV, si_code);
79092181f19SNick Piggin }
79192181f19SNick Piggin 
7922d4a7167SIngo Molnar static noinline void
7932d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
794768fd9c6SEric W. Biederman 		     unsigned long address)
79592181f19SNick Piggin {
796419ceeb1SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
79792181f19SNick Piggin }
79892181f19SNick Piggin 
7992d4a7167SIngo Molnar static void
8002d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
801419ceeb1SEric W. Biederman 	   unsigned long address, u32 pkey, int si_code)
80292181f19SNick Piggin {
80392181f19SNick Piggin 	struct mm_struct *mm = current->mm;
80492181f19SNick Piggin 	/*
80592181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
80692181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
80792181f19SNick Piggin 	 */
808d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
80992181f19SNick Piggin 
810aba1ecd3SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
81192181f19SNick Piggin }
81292181f19SNick Piggin 
8132d4a7167SIngo Molnar static noinline void
8142d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
81592181f19SNick Piggin {
816419ceeb1SEric W. Biederman 	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
81792181f19SNick Piggin }
81892181f19SNick Piggin 
81933a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code,
82033a709b2SDave Hansen 		struct vm_area_struct *vma)
82133a709b2SDave Hansen {
82207f146f5SDave Hansen 	/* This code is always called on the current mm */
82307f146f5SDave Hansen 	bool foreign = false;
82407f146f5SDave Hansen 
82533a709b2SDave Hansen 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
82633a709b2SDave Hansen 		return false;
8271067f030SRicardo Neri 	if (error_code & X86_PF_PK)
82833a709b2SDave Hansen 		return true;
82907f146f5SDave Hansen 	/* this checks permission keys on the VMA: */
8301067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
8311067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
83207f146f5SDave Hansen 		return true;
83333a709b2SDave Hansen 	return false;
83492181f19SNick Piggin }
83592181f19SNick Piggin 
8362d4a7167SIngo Molnar static noinline void
8372d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
8387b2d0dbaSDave Hansen 		      unsigned long address, struct vm_area_struct *vma)
83992181f19SNick Piggin {
840019132ffSDave Hansen 	/*
841019132ffSDave Hansen 	 * This OSPKE check is not strictly necessary at runtime.
842019132ffSDave Hansen 	 * But, doing it this way allows compiler optimizations
843019132ffSDave Hansen 	 * if pkeys are compiled out.
844019132ffSDave Hansen 	 */
845aba1ecd3SEric W. Biederman 	if (bad_area_access_from_pkeys(error_code, vma)) {
8469db812dbSEric W. Biederman 		/*
8479db812dbSEric W. Biederman 		 * A protection key fault means that the PKRU value did not allow
8489db812dbSEric W. Biederman 		 * access to some PTE.  Userspace can figure out what PKRU was
8499db812dbSEric W. Biederman 		 * from the XSAVE state.  This function captures the pkey from
8509db812dbSEric W. Biederman 		 * the vma and passes it to userspace so userspace can discover
8519db812dbSEric W. Biederman 		 * which protection key was set on the PTE.
8529db812dbSEric W. Biederman 		 *
8539db812dbSEric W. Biederman 		 * If we get here, we know that the hardware signaled a X86_PF_PK
8549db812dbSEric W. Biederman 		 * fault and that there was a VMA once we got in the fault
8559db812dbSEric W. Biederman 		 * handler.  It does *not* guarantee that the VMA we find here
8569db812dbSEric W. Biederman 		 * was the one that we faulted on.
8579db812dbSEric W. Biederman 		 *
8589db812dbSEric W. Biederman 		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
8599db812dbSEric W. Biederman 		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
8609db812dbSEric W. Biederman 		 * 3. T1   : faults...
8619db812dbSEric W. Biederman 		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
862c1e8d7c6SMichel Lespinasse 		 * 5. T1   : enters fault handler, takes mmap_lock, etc...
8639db812dbSEric W. Biederman 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
8649db812dbSEric W. Biederman 		 *	     faulted on a pte with its pkey=4.
8659db812dbSEric W. Biederman 		 */
866aba1ecd3SEric W. Biederman 		u32 pkey = vma_pkey(vma);
8679db812dbSEric W. Biederman 
868419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
869aba1ecd3SEric W. Biederman 	} else {
870419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
871aba1ecd3SEric W. Biederman 	}
87292181f19SNick Piggin }
87392181f19SNick Piggin 
8742d4a7167SIngo Molnar static void
875a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
8763d353901SSouptick Joarder 	  vm_fault_t fault)
87792181f19SNick Piggin {
8782d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
8791067f030SRicardo Neri 	if (!(error_code & X86_PF_USER)) {
8804fc34901SAndy Lutomirski 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
88196054569SLinus Torvalds 		return;
88296054569SLinus Torvalds 	}
8832d4a7167SIngo Molnar 
884cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
88592181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
88692181f19SNick Piggin 		return;
8872d4a7167SIngo Molnar 
888e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
8892d4a7167SIngo Molnar 
890a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
891f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
892318759b4SEric W. Biederman 		struct task_struct *tsk = current;
89340e55394SEric W. Biederman 		unsigned lsb = 0;
89440e55394SEric W. Biederman 
89540e55394SEric W. Biederman 		pr_err(
896a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
897a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
89840e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON_LARGE)
89940e55394SEric W. Biederman 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
90040e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON)
90140e55394SEric W. Biederman 			lsb = PAGE_SHIFT;
902f8eac901SEric W. Biederman 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
90340e55394SEric W. Biederman 		return;
904a6e04aa9SAndi Kleen 	}
905a6e04aa9SAndi Kleen #endif
9062e1661d2SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
90792181f19SNick Piggin }
90892181f19SNick Piggin 
9093a13c4d7SJohannes Weiner static noinline void
9102d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code,
91125c102d8SEric W. Biederman 	       unsigned long address, vm_fault_t fault)
91292181f19SNick Piggin {
9131067f030SRicardo Neri 	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
9144fc34901SAndy Lutomirski 		no_context(regs, error_code, address, 0, 0);
9153a13c4d7SJohannes Weiner 		return;
916b80ef10eSKOSAKI Motohiro 	}
917b80ef10eSKOSAKI Motohiro 
9182d4a7167SIngo Molnar 	if (fault & VM_FAULT_OOM) {
919f8626854SAndrey Vagin 		/* Kernel mode? Handle exceptions or die: */
9201067f030SRicardo Neri 		if (!(error_code & X86_PF_USER)) {
9214fc34901SAndy Lutomirski 			no_context(regs, error_code, address,
9224fc34901SAndy Lutomirski 				   SIGSEGV, SEGV_MAPERR);
9233a13c4d7SJohannes Weiner 			return;
924f8626854SAndrey Vagin 		}
925f8626854SAndrey Vagin 
926c2d23f91SDavid Rientjes 		/*
927c2d23f91SDavid Rientjes 		 * We ran out of memory, call the OOM killer, and return the
928c2d23f91SDavid Rientjes 		 * userspace (which will retry the fault, or kill us if we got
929c2d23f91SDavid Rientjes 		 * oom-killed):
930c2d23f91SDavid Rientjes 		 */
931c2d23f91SDavid Rientjes 		pagefault_out_of_memory();
9322d4a7167SIngo Molnar 	} else {
933f672b49bSAndi Kleen 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
934f672b49bSAndi Kleen 			     VM_FAULT_HWPOISON_LARGE))
93527274f73SEric W. Biederman 			do_sigbus(regs, error_code, address, fault);
93633692f27SLinus Torvalds 		else if (fault & VM_FAULT_SIGSEGV)
937768fd9c6SEric W. Biederman 			bad_area_nosemaphore(regs, error_code, address);
93892181f19SNick Piggin 		else
93992181f19SNick Piggin 			BUG();
94092181f19SNick Piggin 	}
9412d4a7167SIngo Molnar }
94292181f19SNick Piggin 
9438fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
944d8b57bb7SThomas Gleixner {
9451067f030SRicardo Neri 	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
946d8b57bb7SThomas Gleixner 		return 0;
9472d4a7167SIngo Molnar 
9481067f030SRicardo Neri 	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
949d8b57bb7SThomas Gleixner 		return 0;
950d8b57bb7SThomas Gleixner 
951d8b57bb7SThomas Gleixner 	return 1;
952d8b57bb7SThomas Gleixner }
953d8b57bb7SThomas Gleixner 
954c61e211dSHarvey Harrison /*
9552d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
9562d4a7167SIngo Molnar  *
9572d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
9582d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
9592d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
9602d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
9612d4a7167SIngo Molnar  * on other processors.
9622d4a7167SIngo Molnar  *
96331668511SDavid Vrabel  * Spurious faults may only occur if the TLB contains an entry with
96431668511SDavid Vrabel  * fewer permission than the page table entry.  Non-present (P = 0)
96531668511SDavid Vrabel  * and reserved bit (R = 1) faults are never spurious.
96631668511SDavid Vrabel  *
9675b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
9685b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
96931668511SDavid Vrabel  *
97031668511SDavid Vrabel  * Returns non-zero if a spurious fault was handled, zero otherwise.
97131668511SDavid Vrabel  *
97231668511SDavid Vrabel  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
97331668511SDavid Vrabel  * (Optional Invalidation).
9745b727a3bSJeremy Fitzhardinge  */
9759326638cSMasami Hiramatsu static noinline int
9768fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address)
9775b727a3bSJeremy Fitzhardinge {
9785b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
979e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
9805b727a3bSJeremy Fitzhardinge 	pud_t *pud;
9815b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
9825b727a3bSJeremy Fitzhardinge 	pte_t *pte;
9833c3e5694SSteven Rostedt 	int ret;
9845b727a3bSJeremy Fitzhardinge 
98531668511SDavid Vrabel 	/*
98631668511SDavid Vrabel 	 * Only writes to RO or instruction fetches from NX may cause
98731668511SDavid Vrabel 	 * spurious faults.
98831668511SDavid Vrabel 	 *
98931668511SDavid Vrabel 	 * These could be from user or supervisor accesses but the TLB
99031668511SDavid Vrabel 	 * is only lazily flushed after a kernel mapping protection
99131668511SDavid Vrabel 	 * change, so user accesses are not expected to cause spurious
99231668511SDavid Vrabel 	 * faults.
99331668511SDavid Vrabel 	 */
9941067f030SRicardo Neri 	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
9951067f030SRicardo Neri 	    error_code != (X86_PF_INSTR | X86_PF_PROT))
9965b727a3bSJeremy Fitzhardinge 		return 0;
9975b727a3bSJeremy Fitzhardinge 
9985b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
9995b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
10005b727a3bSJeremy Fitzhardinge 		return 0;
10015b727a3bSJeremy Fitzhardinge 
1002e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
1003e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d))
1004e0c4f675SKirill A. Shutemov 		return 0;
1005e0c4f675SKirill A. Shutemov 
1006e0c4f675SKirill A. Shutemov 	if (p4d_large(*p4d))
10078fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1008e0c4f675SKirill A. Shutemov 
1009e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
10105b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
10115b727a3bSJeremy Fitzhardinge 		return 0;
10125b727a3bSJeremy Fitzhardinge 
1013d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
10148fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1015d8b57bb7SThomas Gleixner 
10165b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
10175b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
10185b727a3bSJeremy Fitzhardinge 		return 0;
10195b727a3bSJeremy Fitzhardinge 
1020d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
10218fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1022d8b57bb7SThomas Gleixner 
10235b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
1024954f8571SAndrea Arcangeli 	if (!pte_present(*pte))
10255b727a3bSJeremy Fitzhardinge 		return 0;
10265b727a3bSJeremy Fitzhardinge 
10278fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, pte);
10283c3e5694SSteven Rostedt 	if (!ret)
10293c3e5694SSteven Rostedt 		return 0;
10303c3e5694SSteven Rostedt 
10313c3e5694SSteven Rostedt 	/*
10322d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
10332d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
10343c3e5694SSteven Rostedt 	 */
10358fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
10363c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
10372d4a7167SIngo Molnar 
10383c3e5694SSteven Rostedt 	return ret;
10395b727a3bSJeremy Fitzhardinge }
10408fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault);
10415b727a3bSJeremy Fitzhardinge 
1042c61e211dSHarvey Harrison int show_unhandled_signals = 1;
1043c61e211dSHarvey Harrison 
10442d4a7167SIngo Molnar static inline int
104568da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
104692181f19SNick Piggin {
104707f146f5SDave Hansen 	/* This is only called for the current mm, so: */
104807f146f5SDave Hansen 	bool foreign = false;
1049e8c6226dSDave Hansen 
1050e8c6226dSDave Hansen 	/*
1051e8c6226dSDave Hansen 	 * Read or write was blocked by protection keys.  This is
1052e8c6226dSDave Hansen 	 * always an unconditional error and can never result in
1053e8c6226dSDave Hansen 	 * a follow-up action to resolve the fault, like a COW.
1054e8c6226dSDave Hansen 	 */
10551067f030SRicardo Neri 	if (error_code & X86_PF_PK)
1056e8c6226dSDave Hansen 		return 1;
1057e8c6226dSDave Hansen 
105833a709b2SDave Hansen 	/*
105907f146f5SDave Hansen 	 * Make sure to check the VMA so that we do not perform
10601067f030SRicardo Neri 	 * faults just to hit a X86_PF_PK as soon as we fill in a
106107f146f5SDave Hansen 	 * page.
106207f146f5SDave Hansen 	 */
10631067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
10641067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
106507f146f5SDave Hansen 		return 1;
106633a709b2SDave Hansen 
10671067f030SRicardo Neri 	if (error_code & X86_PF_WRITE) {
10682d4a7167SIngo Molnar 		/* write, present and write, not present: */
106992181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
107092181f19SNick Piggin 			return 1;
10712d4a7167SIngo Molnar 		return 0;
10722d4a7167SIngo Molnar 	}
10732d4a7167SIngo Molnar 
10742d4a7167SIngo Molnar 	/* read, present: */
10751067f030SRicardo Neri 	if (unlikely(error_code & X86_PF_PROT))
107692181f19SNick Piggin 		return 1;
10772d4a7167SIngo Molnar 
10782d4a7167SIngo Molnar 	/* read, not present: */
10793122e80eSAnshuman Khandual 	if (unlikely(!vma_is_accessible(vma)))
108092181f19SNick Piggin 		return 1;
108192181f19SNick Piggin 
108292181f19SNick Piggin 	return 0;
108392181f19SNick Piggin }
108492181f19SNick Piggin 
10850973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address)
10860973a06cSHiroshi Shimamoto {
10873ae0ad92SDave Hansen 	/*
10883ae0ad92SDave Hansen 	 * On 64-bit systems, the vsyscall page is at an address above
10893ae0ad92SDave Hansen 	 * TASK_SIZE_MAX, but is not considered part of the kernel
10903ae0ad92SDave Hansen 	 * address space.
10913ae0ad92SDave Hansen 	 */
10923ae0ad92SDave Hansen 	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
10933ae0ad92SDave Hansen 		return false;
10943ae0ad92SDave Hansen 
1095d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
10960973a06cSHiroshi Shimamoto }
10970973a06cSHiroshi Shimamoto 
1098c61e211dSHarvey Harrison /*
10998fed6200SDave Hansen  * Called for all faults where 'address' is part of the kernel address
11008fed6200SDave Hansen  * space.  Might get called for faults that originate from *code* that
11018fed6200SDave Hansen  * ran in userspace or the kernel.
1102c61e211dSHarvey Harrison  */
11038fed6200SDave Hansen static void
11048fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
11050ac09f9fSJiri Olsa 		   unsigned long address)
1106c61e211dSHarvey Harrison {
11078fed6200SDave Hansen 	/*
1108367e3f1dSDave Hansen 	 * Protection keys exceptions only happen on user pages.  We
1109367e3f1dSDave Hansen 	 * have no user pages in the kernel portion of the address
1110367e3f1dSDave Hansen 	 * space, so do not expect them here.
1111367e3f1dSDave Hansen 	 */
1112367e3f1dSDave Hansen 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1113367e3f1dSDave Hansen 
11148fed6200SDave Hansen 	/* Was the fault spurious, caused by lazy TLB invalidation? */
11158fed6200SDave Hansen 	if (spurious_kernel_fault(hw_error_code, address))
11168fed6200SDave Hansen 		return;
11178fed6200SDave Hansen 
11188fed6200SDave Hansen 	/* kprobes don't want to hook the spurious faults: */
1119b98cca44SAnshuman Khandual 	if (kprobe_page_fault(regs, X86_TRAP_PF))
11208fed6200SDave Hansen 		return;
11218fed6200SDave Hansen 
11228fed6200SDave Hansen 	/*
11238fed6200SDave Hansen 	 * Note, despite being a "bad area", there are quite a few
11248fed6200SDave Hansen 	 * acceptable reasons to get here, such as erratum fixups
11258fed6200SDave Hansen 	 * and handling kernel code that can fault, like get_user().
11268fed6200SDave Hansen 	 *
11278fed6200SDave Hansen 	 * Don't take the mm semaphore here. If we fixup a prefetch
11288fed6200SDave Hansen 	 * fault we could otherwise deadlock:
11298fed6200SDave Hansen 	 */
1130ba9f6f89SLinus Torvalds 	bad_area_nosemaphore(regs, hw_error_code, address);
11318fed6200SDave Hansen }
11328fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault);
11338fed6200SDave Hansen 
1134aa37c51bSDave Hansen /* Handle faults in the user portion of the address space */
1135aa37c51bSDave Hansen static inline
1136aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs,
1137aa37c51bSDave Hansen 			unsigned long hw_error_code,
1138c61e211dSHarvey Harrison 			unsigned long address)
1139c61e211dSHarvey Harrison {
1140c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
1141c61e211dSHarvey Harrison 	struct task_struct *tsk;
11422d4a7167SIngo Molnar 	struct mm_struct *mm;
114350a7ca3cSSouptick Joarder 	vm_fault_t fault, major = 0;
1144dde16072SPeter Xu 	unsigned int flags = FAULT_FLAG_DEFAULT;
1145c61e211dSHarvey Harrison 
1146c61e211dSHarvey Harrison 	tsk = current;
1147c61e211dSHarvey Harrison 	mm = tsk->mm;
11482d4a7167SIngo Molnar 
11492d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
1150b98cca44SAnshuman Khandual 	if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
11519be260a6SMasami Hiramatsu 		return;
1152e00b12e6SPeter Zijlstra 
11535b0c2cacSDave Hansen 	/*
11545b0c2cacSDave Hansen 	 * Reserved bits are never expected to be set on
11555b0c2cacSDave Hansen 	 * entries in the user portion of the page tables.
11565b0c2cacSDave Hansen 	 */
1157164477c2SDave Hansen 	if (unlikely(hw_error_code & X86_PF_RSVD))
1158164477c2SDave Hansen 		pgtable_bad(regs, hw_error_code, address);
1159e00b12e6SPeter Zijlstra 
11605b0c2cacSDave Hansen 	/*
1161e50928d7SAndy Lutomirski 	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1162e50928d7SAndy Lutomirski 	 * pages in the user address space.  The odd case here is WRUSS,
1163e50928d7SAndy Lutomirski 	 * which, according to the preliminary documentation, does not respect
1164e50928d7SAndy Lutomirski 	 * SMAP and will have the USER bit set so, in all cases, SMAP
1165e50928d7SAndy Lutomirski 	 * enforcement appears to be consistent with the USER bit.
11665b0c2cacSDave Hansen 	 */
1167a15781b5SAndy Lutomirski 	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1168a15781b5SAndy Lutomirski 		     !(hw_error_code & X86_PF_USER) &&
1169e50928d7SAndy Lutomirski 		     !(regs->flags & X86_EFLAGS_AC)))
1170a15781b5SAndy Lutomirski 	{
1171ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1172e00b12e6SPeter Zijlstra 		return;
1173e00b12e6SPeter Zijlstra 	}
1174e00b12e6SPeter Zijlstra 
1175e00b12e6SPeter Zijlstra 	/*
1176e00b12e6SPeter Zijlstra 	 * If we're in an interrupt, have no user context or are running
117770ffdb93SDavid Hildenbrand 	 * in a region with pagefaults disabled then we must not take the fault
1178e00b12e6SPeter Zijlstra 	 */
117970ffdb93SDavid Hildenbrand 	if (unlikely(faulthandler_disabled() || !mm)) {
1180ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1181e00b12e6SPeter Zijlstra 		return;
1182e00b12e6SPeter Zijlstra 	}
1183e00b12e6SPeter Zijlstra 
1184c61e211dSHarvey Harrison 	/*
1185891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1186891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1187891cffbdSLinus Torvalds 	 *
1188891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
11892d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1190c61e211dSHarvey Harrison 	 */
1191f39b6f0eSAndy Lutomirski 	if (user_mode(regs)) {
1192891cffbdSLinus Torvalds 		local_irq_enable();
1193759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
11942d4a7167SIngo Molnar 	} else {
11952d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1196c61e211dSHarvey Harrison 			local_irq_enable();
11972d4a7167SIngo Molnar 	}
1198c61e211dSHarvey Harrison 
1199a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
12007dd1fcc2SPeter Zijlstra 
12010ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_WRITE)
1202759496baSJohannes Weiner 		flags |= FAULT_FLAG_WRITE;
12030ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_INSTR)
1204d61172b4SDave Hansen 		flags |= FAULT_FLAG_INSTRUCTION;
1205759496baSJohannes Weiner 
12063ae0ad92SDave Hansen #ifdef CONFIG_X86_64
12073a1dfe6eSIngo Molnar 	/*
1208918ce325SAndy Lutomirski 	 * Faults in the vsyscall page might need emulation.  The
1209918ce325SAndy Lutomirski 	 * vsyscall page is at a high address (>PAGE_OFFSET), but is
1210918ce325SAndy Lutomirski 	 * considered to be part of the user address space.
1211c61e211dSHarvey Harrison 	 *
12123ae0ad92SDave Hansen 	 * The vsyscall page does not have a "real" VMA, so do this
12133ae0ad92SDave Hansen 	 * emulation before we go searching for VMAs.
1214e0a446ceSAndy Lutomirski 	 *
1215e0a446ceSAndy Lutomirski 	 * PKRU never rejects instruction fetches, so we don't need
1216e0a446ceSAndy Lutomirski 	 * to consider the PF_PK bit.
12173ae0ad92SDave Hansen 	 */
1218918ce325SAndy Lutomirski 	if (is_vsyscall_vaddr(address)) {
1219918ce325SAndy Lutomirski 		if (emulate_vsyscall(hw_error_code, regs, address))
12203ae0ad92SDave Hansen 			return;
12213ae0ad92SDave Hansen 	}
12223ae0ad92SDave Hansen #endif
12233ae0ad92SDave Hansen 
1224c61e211dSHarvey Harrison 	/*
122588259744SDave Hansen 	 * Kernel-mode access to the user address space should only occur
122688259744SDave Hansen 	 * on well-defined single instructions listed in the exception
122788259744SDave Hansen 	 * tables.  But, an erroneous kernel fault occurring outside one of
1228c1e8d7c6SMichel Lespinasse 	 * those areas which also holds mmap_lock might deadlock attempting
122988259744SDave Hansen 	 * to validate the fault against the address space.
1230c61e211dSHarvey Harrison 	 *
123188259744SDave Hansen 	 * Only do the expensive exception table search when we might be at
123288259744SDave Hansen 	 * risk of a deadlock.  This happens if we
1233c1e8d7c6SMichel Lespinasse 	 * 1. Failed to acquire mmap_lock, and
12346344be60SAndy Lutomirski 	 * 2. The access did not originate in userspace.
1235c61e211dSHarvey Harrison 	 */
1236d8ed45c5SMichel Lespinasse 	if (unlikely(!mmap_read_trylock(mm))) {
12376344be60SAndy Lutomirski 		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
123888259744SDave Hansen 			/*
123988259744SDave Hansen 			 * Fault from code in kernel from
124088259744SDave Hansen 			 * which we do not expect faults.
124188259744SDave Hansen 			 */
12420ed32f1aSAndy Lutomirski 			bad_area_nosemaphore(regs, hw_error_code, address);
124392181f19SNick Piggin 			return;
124492181f19SNick Piggin 		}
1245d065bd81SMichel Lespinasse retry:
1246d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
124701006074SPeter Zijlstra 	} else {
124801006074SPeter Zijlstra 		/*
12492d4a7167SIngo Molnar 		 * The above down_read_trylock() might have succeeded in
12502d4a7167SIngo Molnar 		 * which case we'll have missed the might_sleep() from
12512d4a7167SIngo Molnar 		 * down_read():
125201006074SPeter Zijlstra 		 */
125301006074SPeter Zijlstra 		might_sleep();
1254c61e211dSHarvey Harrison 	}
1255c61e211dSHarvey Harrison 
1256c61e211dSHarvey Harrison 	vma = find_vma(mm, address);
125792181f19SNick Piggin 	if (unlikely(!vma)) {
12580ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
125992181f19SNick Piggin 		return;
126092181f19SNick Piggin 	}
126192181f19SNick Piggin 	if (likely(vma->vm_start <= address))
1262c61e211dSHarvey Harrison 		goto good_area;
126392181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
12640ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
126592181f19SNick Piggin 		return;
126692181f19SNick Piggin 	}
126792181f19SNick Piggin 	if (unlikely(expand_stack(vma, address))) {
12680ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
126992181f19SNick Piggin 		return;
127092181f19SNick Piggin 	}
127192181f19SNick Piggin 
1272c61e211dSHarvey Harrison 	/*
1273c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1274c61e211dSHarvey Harrison 	 * we can handle it..
1275c61e211dSHarvey Harrison 	 */
1276c61e211dSHarvey Harrison good_area:
12770ed32f1aSAndy Lutomirski 	if (unlikely(access_error(hw_error_code, vma))) {
12780ed32f1aSAndy Lutomirski 		bad_area_access_error(regs, hw_error_code, address, vma);
127992181f19SNick Piggin 		return;
1280c61e211dSHarvey Harrison 	}
1281c61e211dSHarvey Harrison 
1282c61e211dSHarvey Harrison 	/*
1283c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1284c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
12859a95f3cfSPaul Cassella 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1286c1e8d7c6SMichel Lespinasse 	 * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
1287cb0631fdSVlastimil Babka 	 *
1288c1e8d7c6SMichel Lespinasse 	 * Note that handle_userfault() may also release and reacquire mmap_lock
1289cb0631fdSVlastimil Babka 	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1290cb0631fdSVlastimil Babka 	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1291cb0631fdSVlastimil Babka 	 * (potentially after handling any pending signal during the return to
1292cb0631fdSVlastimil Babka 	 * userland). The return to userland is identified whenever
1293cb0631fdSVlastimil Babka 	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1294c61e211dSHarvey Harrison 	 */
1295dcddffd4SKirill A. Shutemov 	fault = handle_mm_fault(vma, address, flags);
129626178ec1SLinus Torvalds 	major |= fault & VM_FAULT_MAJOR;
12972d4a7167SIngo Molnar 
129839678191SPeter Xu 	/* Quick path to respond to signals */
129939678191SPeter Xu 	if (fault_signal_pending(fault, regs)) {
130039678191SPeter Xu 		if (!user_mode(regs))
130139678191SPeter Xu 			no_context(regs, hw_error_code, address, SIGBUS,
130239678191SPeter Xu 				   BUS_ADRERR);
130339678191SPeter Xu 		return;
130439678191SPeter Xu 	}
130539678191SPeter Xu 
13063a13c4d7SJohannes Weiner 	/*
1307c1e8d7c6SMichel Lespinasse 	 * If we need to retry the mmap_lock has already been released,
130826178ec1SLinus Torvalds 	 * and if there is a fatal signal pending there is no guarantee
130926178ec1SLinus Torvalds 	 * that we made any progress. Handle this case first.
13103a13c4d7SJohannes Weiner 	 */
131139678191SPeter Xu 	if (unlikely((fault & VM_FAULT_RETRY) &&
131239678191SPeter Xu 		     (flags & FAULT_FLAG_ALLOW_RETRY))) {
131326178ec1SLinus Torvalds 		flags |= FAULT_FLAG_TRIED;
131426178ec1SLinus Torvalds 		goto retry;
131526178ec1SLinus Torvalds 	}
131626178ec1SLinus Torvalds 
1317d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
131826178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_ERROR)) {
13190ed32f1aSAndy Lutomirski 		mm_fault_error(regs, hw_error_code, address, fault);
132037b23e05SKOSAKI Motohiro 		return;
132137b23e05SKOSAKI Motohiro 	}
132237b23e05SKOSAKI Motohiro 
132337b23e05SKOSAKI Motohiro 	/*
132426178ec1SLinus Torvalds 	 * Major/minor page fault accounting. If any of the events
132526178ec1SLinus Torvalds 	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1326d065bd81SMichel Lespinasse 	 */
132726178ec1SLinus Torvalds 	if (major) {
1328c61e211dSHarvey Harrison 		tsk->maj_flt++;
132926178ec1SLinus Torvalds 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
1330ac17dc8eSPeter Zijlstra 	} else {
1331c61e211dSHarvey Harrison 		tsk->min_flt++;
133226178ec1SLinus Torvalds 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1333d065bd81SMichel Lespinasse 	}
1334c61e211dSHarvey Harrison 
13358c938f9fSIngo Molnar 	check_v8086_mode(regs, address, tsk);
1336c61e211dSHarvey Harrison }
1337aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault);
1338aa37c51bSDave Hansen 
1339a0d14b89SPeter Zijlstra static __always_inline void
1340a0d14b89SPeter Zijlstra trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
1341a0d14b89SPeter Zijlstra 			 unsigned long address)
1342d34603b0SSeiji Aguchi {
1343a0d14b89SPeter Zijlstra 	if (!trace_pagefault_enabled())
1344a0d14b89SPeter Zijlstra 		return;
1345a0d14b89SPeter Zijlstra 
1346d34603b0SSeiji Aguchi 	if (user_mode(regs))
1347d4078e23SPeter Zijlstra 		trace_page_fault_user(address, regs, error_code);
1348d34603b0SSeiji Aguchi 	else
1349d4078e23SPeter Zijlstra 		trace_page_fault_kernel(address, regs, error_code);
1350d34603b0SSeiji Aguchi }
1351d34603b0SSeiji Aguchi 
135291eeafeaSThomas Gleixner static __always_inline void
135391eeafeaSThomas Gleixner handle_page_fault(struct pt_regs *regs, unsigned long error_code,
1354ee6352b2SFrederic Weisbecker 			      unsigned long address)
135511a7ffb0SThomas Gleixner {
135691eeafeaSThomas Gleixner 	trace_page_fault_entries(regs, error_code, address);
135791eeafeaSThomas Gleixner 
135891eeafeaSThomas Gleixner 	if (unlikely(kmmio_fault(regs, address)))
135991eeafeaSThomas Gleixner 		return;
136091eeafeaSThomas Gleixner 
136191eeafeaSThomas Gleixner 	/* Was the fault on kernel-controlled part of the address space? */
136291eeafeaSThomas Gleixner 	if (unlikely(fault_in_kernel_space(address))) {
136391eeafeaSThomas Gleixner 		do_kern_addr_fault(regs, error_code, address);
136491eeafeaSThomas Gleixner 	} else {
136591eeafeaSThomas Gleixner 		do_user_addr_fault(regs, error_code, address);
136691eeafeaSThomas Gleixner 		/*
136791eeafeaSThomas Gleixner 		 * User address page fault handling might have reenabled
136891eeafeaSThomas Gleixner 		 * interrupts. Fixing up all potential exit points of
136991eeafeaSThomas Gleixner 		 * do_user_addr_fault() and its leaf functions is just not
137091eeafeaSThomas Gleixner 		 * doable w/o creating an unholy mess or turning the code
137191eeafeaSThomas Gleixner 		 * upside down.
137291eeafeaSThomas Gleixner 		 */
137391eeafeaSThomas Gleixner 		local_irq_disable();
137491eeafeaSThomas Gleixner 	}
137591eeafeaSThomas Gleixner }
137691eeafeaSThomas Gleixner 
137791eeafeaSThomas Gleixner DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
137891eeafeaSThomas Gleixner {
137991eeafeaSThomas Gleixner 	unsigned long address = read_cr2();
138091eeafeaSThomas Gleixner 	bool rcu_exit;
138191eeafeaSThomas Gleixner 
1382da1c55f1SMichel Lespinasse 	prefetchw(&current->mm->mmap_lock);
138391eeafeaSThomas Gleixner 
1384ef68017eSAndy Lutomirski 	/*
1385ef68017eSAndy Lutomirski 	 * KVM has two types of events that are, logically, interrupts, but
1386ef68017eSAndy Lutomirski 	 * are unfortunately delivered using the #PF vector.  These events are
1387ef68017eSAndy Lutomirski 	 * "you just accessed valid memory, but the host doesn't have it right
1388ef68017eSAndy Lutomirski 	 * now, so I'll put you to sleep if you continue" and "that memory
1389ef68017eSAndy Lutomirski 	 * you tried to access earlier is available now."
1390ef68017eSAndy Lutomirski 	 *
1391ef68017eSAndy Lutomirski 	 * We are relying on the interrupted context being sane (valid RSP,
1392ef68017eSAndy Lutomirski 	 * relevant locks not held, etc.), which is fine as long as the
1393ef68017eSAndy Lutomirski 	 * interrupted context had IF=1.  We are also relying on the KVM
1394ef68017eSAndy Lutomirski 	 * async pf type field and CR2 being read consistently instead of
1395ef68017eSAndy Lutomirski 	 * getting values from real and async page faults mixed up.
1396ef68017eSAndy Lutomirski 	 *
1397ef68017eSAndy Lutomirski 	 * Fingers crossed.
139891eeafeaSThomas Gleixner 	 *
139991eeafeaSThomas Gleixner 	 * The async #PF handling code takes care of idtentry handling
140091eeafeaSThomas Gleixner 	 * itself.
1401ef68017eSAndy Lutomirski 	 */
1402ef68017eSAndy Lutomirski 	if (kvm_handle_async_pf(regs, (u32)address))
1403ef68017eSAndy Lutomirski 		return;
1404ef68017eSAndy Lutomirski 
1405ca4c6a98SThomas Gleixner 	/*
140691eeafeaSThomas Gleixner 	 * Entry handling for valid #PF from kernel mode is slightly
140791eeafeaSThomas Gleixner 	 * different: RCU is already watching and rcu_irq_enter() must not
140891eeafeaSThomas Gleixner 	 * be invoked because a kernel fault on a user space address might
140991eeafeaSThomas Gleixner 	 * sleep.
141091eeafeaSThomas Gleixner 	 *
141191eeafeaSThomas Gleixner 	 * In case the fault hit a RCU idle region the conditional entry
141291eeafeaSThomas Gleixner 	 * code reenabled RCU to avoid subsequent wreckage which helps
141391eeafeaSThomas Gleixner 	 * debugability.
1414ca4c6a98SThomas Gleixner 	 */
141591eeafeaSThomas Gleixner 	rcu_exit = idtentry_enter_cond_rcu(regs);
141691eeafeaSThomas Gleixner 
141791eeafeaSThomas Gleixner 	instrumentation_begin();
141891eeafeaSThomas Gleixner 	handle_page_fault(regs, error_code, address);
141991eeafeaSThomas Gleixner 	instrumentation_end();
142091eeafeaSThomas Gleixner 
142191eeafeaSThomas Gleixner 	idtentry_exit_cond_rcu(regs, rcu_exit);
1422ca4c6a98SThomas Gleixner }
1423