xref: /openbmc/linux/arch/x86/mm/fault.c (revision 5ccd35287edae4107475a141a477a6a4ecbe1cab)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c61e211dSHarvey Harrison /*
3c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
4c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
6c61e211dSHarvey Harrison  */
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
868db0cf1SIngo Molnar #include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
104cdf8dbeSLinus Torvalds #include <linux/extable.h>		/* search_exception_tables	*/
1157c8a661SMike Rapoport #include <linux/memblock.h>		/* max_low_pfn			*/
129326638cSMasami Hiramatsu #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
13a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
14cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
15f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
16268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
193425d934SSai Praneeth #include <linux/efi.h>			/* efi_recover_from_page_fault()*/
2050a7ca3cSSouptick Joarder #include <linux/mm_types.h>
21c61e211dSHarvey Harrison 
22019132ffSDave Hansen #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
23a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
24a2bcd473SIngo Molnar #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
25f40c3300SAndy Lutomirski #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
26f40c3300SAndy Lutomirski #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
27ba3e127eSBrian Gerst #include <asm/vm86.h>			/* struct vm86			*/
28019132ffSDave Hansen #include <asm/mmu_context.h>		/* vma_pkey()			*/
293425d934SSai Praneeth #include <asm/efi.h>			/* efi_recover_from_page_fault()*/
30a1a371c4SAndy Lutomirski #include <asm/desc.h>			/* store_idt(), ...		*/
31c61e211dSHarvey Harrison 
32d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS
33d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h>
34d34603b0SSeiji Aguchi 
35c61e211dSHarvey Harrison /*
36b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
37b319eed0SIngo Molnar  * handled by mmiotrace:
38b814d41fSIngo Molnar  */
399326638cSMasami Hiramatsu static nokprobe_inline int
4062c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
4186069782SPekka Paalanen {
420fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
430fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
440fd0e3daSPekka Paalanen 			return -1;
450fd0e3daSPekka Paalanen 	return 0;
4686069782SPekka Paalanen }
4786069782SPekka Paalanen 
489326638cSMasami Hiramatsu static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
49c61e211dSHarvey Harrison {
50a980c0efSJann Horn 	if (!kprobes_built_in())
51a980c0efSJann Horn 		return 0;
52a980c0efSJann Horn 	if (user_mode(regs))
53a980c0efSJann Horn 		return 0;
54a980c0efSJann Horn 	/*
55a980c0efSJann Horn 	 * To be potentially processing a kprobe fault and to be allowed to call
56a980c0efSJann Horn 	 * kprobe_running(), we have to be non-preemptible.
57a980c0efSJann Horn 	 */
58a980c0efSJann Horn 	if (preemptible())
59a980c0efSJann Horn 		return 0;
60a980c0efSJann Horn 	if (!kprobe_running())
61a980c0efSJann Horn 		return 0;
62a980c0efSJann Horn 	return kprobe_fault_handler(regs, X86_TRAP_PF);
63c61e211dSHarvey Harrison }
64c61e211dSHarvey Harrison 
65c61e211dSHarvey Harrison /*
662d4a7167SIngo Molnar  * Prefetch quirks:
672d4a7167SIngo Molnar  *
682d4a7167SIngo Molnar  * 32-bit mode:
692d4a7167SIngo Molnar  *
70c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
71c61e211dSHarvey Harrison  *   Check that here and ignore it.
72c61e211dSHarvey Harrison  *
732d4a7167SIngo Molnar  * 64-bit mode:
742d4a7167SIngo Molnar  *
75c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
76c61e211dSHarvey Harrison  *   Check that here and ignore it.
77c61e211dSHarvey Harrison  *
782d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
79c61e211dSHarvey Harrison  */
80107a0367SIngo Molnar static inline int
81107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
82107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
83c61e211dSHarvey Harrison {
84107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
85107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
86c61e211dSHarvey Harrison 
87c61e211dSHarvey Harrison 	switch (instr_hi) {
88c61e211dSHarvey Harrison 	case 0x20:
89c61e211dSHarvey Harrison 	case 0x30:
90c61e211dSHarvey Harrison 		/*
91c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
92c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
93c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
94c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
95c61e211dSHarvey Harrison 		 */
96107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
97c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
98c61e211dSHarvey Harrison 	case 0x40:
99c61e211dSHarvey Harrison 		/*
100c61e211dSHarvey Harrison 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
101c61e211dSHarvey Harrison 		 * Need to figure out under what instruction mode the
102c61e211dSHarvey Harrison 		 * instruction was issued. Could check the LDT for lm,
103c61e211dSHarvey Harrison 		 * but for now it's good enough to assume that long
104c61e211dSHarvey Harrison 		 * mode only uses well known segments or kernel.
105c61e211dSHarvey Harrison 		 */
106318f5a2aSAndy Lutomirski 		return (!user_mode(regs) || user_64bit_mode(regs));
107c61e211dSHarvey Harrison #endif
108c61e211dSHarvey Harrison 	case 0x60:
109c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
110107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
111c61e211dSHarvey Harrison 	case 0xF0:
112c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
113107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
114c61e211dSHarvey Harrison 	case 0x00:
115c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
116107a0367SIngo Molnar 		if (probe_kernel_address(instr, opcode))
117107a0367SIngo Molnar 			return 0;
118107a0367SIngo Molnar 
119107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
120107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
121107a0367SIngo Molnar 		return 0;
122107a0367SIngo Molnar 	default:
123107a0367SIngo Molnar 		return 0;
124107a0367SIngo Molnar 	}
125107a0367SIngo Molnar }
126107a0367SIngo Molnar 
127107a0367SIngo Molnar static int
128107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
129107a0367SIngo Molnar {
130107a0367SIngo Molnar 	unsigned char *max_instr;
131107a0367SIngo Molnar 	unsigned char *instr;
132107a0367SIngo Molnar 	int prefetch = 0;
133107a0367SIngo Molnar 
134107a0367SIngo Molnar 	/*
135107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
136107a0367SIngo Molnar 	 * do not ignore the fault:
137107a0367SIngo Molnar 	 */
1381067f030SRicardo Neri 	if (error_code & X86_PF_INSTR)
139107a0367SIngo Molnar 		return 0;
140107a0367SIngo Molnar 
141107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
142107a0367SIngo Molnar 	max_instr = instr + 15;
143107a0367SIngo Molnar 
144d31bf07fSAndy Lutomirski 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
145107a0367SIngo Molnar 		return 0;
146107a0367SIngo Molnar 
147107a0367SIngo Molnar 	while (instr < max_instr) {
148107a0367SIngo Molnar 		unsigned char opcode;
149c61e211dSHarvey Harrison 
150c61e211dSHarvey Harrison 		if (probe_kernel_address(instr, opcode))
151c61e211dSHarvey Harrison 			break;
152107a0367SIngo Molnar 
153107a0367SIngo Molnar 		instr++;
154107a0367SIngo Molnar 
155107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
156c61e211dSHarvey Harrison 			break;
157c61e211dSHarvey Harrison 	}
158c61e211dSHarvey Harrison 	return prefetch;
159c61e211dSHarvey Harrison }
160c61e211dSHarvey Harrison 
161f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
162f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1632d4a7167SIngo Molnar 
164f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
165f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
166f2f13a85SIngo Molnar {
167f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
168f2f13a85SIngo Molnar 	pgd_t *pgd_k;
169e0c4f675SKirill A. Shutemov 	p4d_t *p4d, *p4d_k;
170f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
171f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
172f2f13a85SIngo Molnar 
173f2f13a85SIngo Molnar 	pgd += index;
174f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
175f2f13a85SIngo Molnar 
176f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
177f2f13a85SIngo Molnar 		return NULL;
178f2f13a85SIngo Molnar 
179f2f13a85SIngo Molnar 	/*
180f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
181f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
182e0c4f675SKirill A. Shutemov 	 * set_p4d/set_pud.
183f2f13a85SIngo Molnar 	 */
184e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
185e0c4f675SKirill A. Shutemov 	p4d_k = p4d_offset(pgd_k, address);
186e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d_k))
187e0c4f675SKirill A. Shutemov 		return NULL;
188e0c4f675SKirill A. Shutemov 
189e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
190e0c4f675SKirill A. Shutemov 	pud_k = pud_offset(p4d_k, address);
191f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
192f2f13a85SIngo Molnar 		return NULL;
193f2f13a85SIngo Molnar 
194f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
195f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
196f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
197f2f13a85SIngo Molnar 		return NULL;
198f2f13a85SIngo Molnar 
199b8bcfe99SJeremy Fitzhardinge 	if (!pmd_present(*pmd))
200f2f13a85SIngo Molnar 		set_pmd(pmd, *pmd_k);
201b8bcfe99SJeremy Fitzhardinge 	else
202f2f13a85SIngo Molnar 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
203f2f13a85SIngo Molnar 
204f2f13a85SIngo Molnar 	return pmd_k;
205f2f13a85SIngo Molnar }
206f2f13a85SIngo Molnar 
207f2f13a85SIngo Molnar void vmalloc_sync_all(void)
208f2f13a85SIngo Molnar {
209f2f13a85SIngo Molnar 	unsigned long address;
210f2f13a85SIngo Molnar 
211f2f13a85SIngo Molnar 	if (SHARED_KERNEL_PMD)
212f2f13a85SIngo Molnar 		return;
213f2f13a85SIngo Molnar 
214f2f13a85SIngo Molnar 	for (address = VMALLOC_START & PMD_MASK;
215dc4fac84SAndy Lutomirski 	     address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
216f2f13a85SIngo Molnar 	     address += PMD_SIZE) {
217f2f13a85SIngo Molnar 		struct page *page;
218f2f13a85SIngo Molnar 
219a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
220f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
221617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
222f01f7c56SBorislav Petkov 			pmd_t *ret;
223617d34d9SJeremy Fitzhardinge 
224a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
225617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
226617d34d9SJeremy Fitzhardinge 
227617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
228617d34d9SJeremy Fitzhardinge 			ret = vmalloc_sync_one(page_address(page), address);
229617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
230617d34d9SJeremy Fitzhardinge 
231617d34d9SJeremy Fitzhardinge 			if (!ret)
232f2f13a85SIngo Molnar 				break;
233f2f13a85SIngo Molnar 		}
234a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
235f2f13a85SIngo Molnar 	}
236f2f13a85SIngo Molnar }
237f2f13a85SIngo Molnar 
238f2f13a85SIngo Molnar /*
239f2f13a85SIngo Molnar  * 32-bit:
240f2f13a85SIngo Molnar  *
241f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc or module mapping area
242f2f13a85SIngo Molnar  */
2439326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address)
244f2f13a85SIngo Molnar {
245f2f13a85SIngo Molnar 	unsigned long pgd_paddr;
246f2f13a85SIngo Molnar 	pmd_t *pmd_k;
247f2f13a85SIngo Molnar 	pte_t *pte_k;
248f2f13a85SIngo Molnar 
249f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
250f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
251f2f13a85SIngo Molnar 		return -1;
252f2f13a85SIngo Molnar 
253f2f13a85SIngo Molnar 	/*
254f2f13a85SIngo Molnar 	 * Synchronize this task's top level page-table
255f2f13a85SIngo Molnar 	 * with the 'reference' page table.
256f2f13a85SIngo Molnar 	 *
257f2f13a85SIngo Molnar 	 * Do _not_ use "current" here. We might be inside
258f2f13a85SIngo Molnar 	 * an interrupt in the middle of a task switch..
259f2f13a85SIngo Molnar 	 */
2606c690ee1SAndy Lutomirski 	pgd_paddr = read_cr3_pa();
261f2f13a85SIngo Molnar 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
262f2f13a85SIngo Molnar 	if (!pmd_k)
263f2f13a85SIngo Molnar 		return -1;
264f2f13a85SIngo Molnar 
26518a95521SToshi Kani 	if (pmd_large(*pmd_k))
266f4eafd8bSToshi Kani 		return 0;
267f4eafd8bSToshi Kani 
268f2f13a85SIngo Molnar 	pte_k = pte_offset_kernel(pmd_k, address);
269f2f13a85SIngo Molnar 	if (!pte_present(*pte_k))
270f2f13a85SIngo Molnar 		return -1;
271f2f13a85SIngo Molnar 
272f2f13a85SIngo Molnar 	return 0;
273f2f13a85SIngo Molnar }
2749326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault);
275f2f13a85SIngo Molnar 
276f2f13a85SIngo Molnar /*
277f2f13a85SIngo Molnar  * Did it hit the DOS screen memory VA from vm86 mode?
278f2f13a85SIngo Molnar  */
279f2f13a85SIngo Molnar static inline void
280f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
281f2f13a85SIngo Molnar 		 struct task_struct *tsk)
282f2f13a85SIngo Molnar {
2839fda6a06SBrian Gerst #ifdef CONFIG_VM86
284f2f13a85SIngo Molnar 	unsigned long bit;
285f2f13a85SIngo Molnar 
2869fda6a06SBrian Gerst 	if (!v8086_mode(regs) || !tsk->thread.vm86)
287f2f13a85SIngo Molnar 		return;
288f2f13a85SIngo Molnar 
289f2f13a85SIngo Molnar 	bit = (address - 0xA0000) >> PAGE_SHIFT;
290f2f13a85SIngo Molnar 	if (bit < 32)
2919fda6a06SBrian Gerst 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
2929fda6a06SBrian Gerst #endif
293f2f13a85SIngo Molnar }
294c61e211dSHarvey Harrison 
295087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
296087975b0SAkinobu Mita {
297087975b0SAkinobu Mita 	return pfn < max_low_pfn;
298087975b0SAkinobu Mita }
299087975b0SAkinobu Mita 
300cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
301c61e211dSHarvey Harrison {
3026c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
303087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
304e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
305e0c4f675SKirill A. Shutemov 	pud_t *pud;
306087975b0SAkinobu Mita 	pmd_t *pmd;
307087975b0SAkinobu Mita 	pte_t *pte;
3082d4a7167SIngo Molnar 
309c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
31039e48d9bSJan Beulich 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
311087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
312087975b0SAkinobu Mita 		goto out;
31339e48d9bSJan Beulich #define pr_pde pr_cont
31439e48d9bSJan Beulich #else
31539e48d9bSJan Beulich #define pr_pde pr_info
316c61e211dSHarvey Harrison #endif
317e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
318e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
319e0c4f675SKirill A. Shutemov 	pmd = pmd_offset(pud, address);
32039e48d9bSJan Beulich 	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
32139e48d9bSJan Beulich #undef pr_pde
322c61e211dSHarvey Harrison 
323c61e211dSHarvey Harrison 	/*
324c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
325c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
326c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3272d4a7167SIngo Molnar 	 * it's allocated already:
328c61e211dSHarvey Harrison 	 */
329087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
330087975b0SAkinobu Mita 		goto out;
3312d4a7167SIngo Molnar 
332087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
33339e48d9bSJan Beulich 	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
334087975b0SAkinobu Mita out:
33539e48d9bSJan Beulich 	pr_cont("\n");
336f2f13a85SIngo Molnar }
337f2f13a85SIngo Molnar 
338f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
339f2f13a85SIngo Molnar 
340f2f13a85SIngo Molnar void vmalloc_sync_all(void)
341f2f13a85SIngo Molnar {
3425372e155SKirill A. Shutemov 	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
343f2f13a85SIngo Molnar }
344f2f13a85SIngo Molnar 
345f2f13a85SIngo Molnar /*
346f2f13a85SIngo Molnar  * 64-bit:
347f2f13a85SIngo Molnar  *
348f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc area
349f2f13a85SIngo Molnar  */
3509326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address)
351f2f13a85SIngo Molnar {
352565977a3SToshi Kani 	pgd_t *pgd, *pgd_k;
353565977a3SToshi Kani 	p4d_t *p4d, *p4d_k;
354565977a3SToshi Kani 	pud_t *pud;
355565977a3SToshi Kani 	pmd_t *pmd;
356565977a3SToshi Kani 	pte_t *pte;
357f2f13a85SIngo Molnar 
358f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
359f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
360f2f13a85SIngo Molnar 		return -1;
361f2f13a85SIngo Molnar 
362ebc8827fSFrederic Weisbecker 	WARN_ON_ONCE(in_nmi());
363ebc8827fSFrederic Weisbecker 
364f2f13a85SIngo Molnar 	/*
365f2f13a85SIngo Molnar 	 * Copy kernel mappings over when needed. This can also
366f2f13a85SIngo Molnar 	 * happen within a race in page table update. In the later
367f2f13a85SIngo Molnar 	 * case just flush:
368f2f13a85SIngo Molnar 	 */
3696c690ee1SAndy Lutomirski 	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
370565977a3SToshi Kani 	pgd_k = pgd_offset_k(address);
371565977a3SToshi Kani 	if (pgd_none(*pgd_k))
372f2f13a85SIngo Molnar 		return -1;
373f2f13a85SIngo Molnar 
374ed7588d5SKirill A. Shutemov 	if (pgtable_l5_enabled()) {
3751160c277SSamu Kallio 		if (pgd_none(*pgd)) {
376565977a3SToshi Kani 			set_pgd(pgd, *pgd_k);
3771160c277SSamu Kallio 			arch_flush_lazy_mmu_mode();
37836b3a772SAndy Lutomirski 		} else {
379565977a3SToshi Kani 			BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
3801160c277SSamu Kallio 		}
38136b3a772SAndy Lutomirski 	}
382f2f13a85SIngo Molnar 
383b50858ceSKirill A. Shutemov 	/* With 4-level paging, copying happens on the p4d level. */
384b50858ceSKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
385565977a3SToshi Kani 	p4d_k = p4d_offset(pgd_k, address);
386565977a3SToshi Kani 	if (p4d_none(*p4d_k))
387b50858ceSKirill A. Shutemov 		return -1;
388b50858ceSKirill A. Shutemov 
389ed7588d5SKirill A. Shutemov 	if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
390565977a3SToshi Kani 		set_p4d(p4d, *p4d_k);
391b50858ceSKirill A. Shutemov 		arch_flush_lazy_mmu_mode();
392b50858ceSKirill A. Shutemov 	} else {
393565977a3SToshi Kani 		BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
394b50858ceSKirill A. Shutemov 	}
395b50858ceSKirill A. Shutemov 
39636b3a772SAndy Lutomirski 	BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
397f2f13a85SIngo Molnar 
398b50858ceSKirill A. Shutemov 	pud = pud_offset(p4d, address);
399565977a3SToshi Kani 	if (pud_none(*pud))
400f2f13a85SIngo Molnar 		return -1;
401f2f13a85SIngo Molnar 
40218a95521SToshi Kani 	if (pud_large(*pud))
403f4eafd8bSToshi Kani 		return 0;
404f4eafd8bSToshi Kani 
405f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
406565977a3SToshi Kani 	if (pmd_none(*pmd))
407f2f13a85SIngo Molnar 		return -1;
408f2f13a85SIngo Molnar 
40918a95521SToshi Kani 	if (pmd_large(*pmd))
410f4eafd8bSToshi Kani 		return 0;
411f4eafd8bSToshi Kani 
412f2f13a85SIngo Molnar 	pte = pte_offset_kernel(pmd, address);
413565977a3SToshi Kani 	if (!pte_present(*pte))
414565977a3SToshi Kani 		return -1;
415f2f13a85SIngo Molnar 
416f2f13a85SIngo Molnar 	return 0;
417f2f13a85SIngo Molnar }
4189326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault);
419f2f13a85SIngo Molnar 
420e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD
421f2f13a85SIngo Molnar static const char errata93_warning[] =
422ad361c98SJoe Perches KERN_ERR
423ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
424ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
425ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
426ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
427e05139f2SJan Beulich #endif
428f2f13a85SIngo Molnar 
429f2f13a85SIngo Molnar /*
430f2f13a85SIngo Molnar  * No vm86 mode in 64-bit mode:
431f2f13a85SIngo Molnar  */
432f2f13a85SIngo Molnar static inline void
433f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
434f2f13a85SIngo Molnar 		 struct task_struct *tsk)
435f2f13a85SIngo Molnar {
436f2f13a85SIngo Molnar }
437f2f13a85SIngo Molnar 
438f2f13a85SIngo Molnar static int bad_address(void *p)
439f2f13a85SIngo Molnar {
440f2f13a85SIngo Molnar 	unsigned long dummy;
441f2f13a85SIngo Molnar 
442f2f13a85SIngo Molnar 	return probe_kernel_address((unsigned long *)p, dummy);
443f2f13a85SIngo Molnar }
444f2f13a85SIngo Molnar 
445f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
446f2f13a85SIngo Molnar {
4476c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
448087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
449e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
450c61e211dSHarvey Harrison 	pud_t *pud;
451c61e211dSHarvey Harrison 	pmd_t *pmd;
452c61e211dSHarvey Harrison 	pte_t *pte;
453c61e211dSHarvey Harrison 
4542d4a7167SIngo Molnar 	if (bad_address(pgd))
4552d4a7167SIngo Molnar 		goto bad;
4562d4a7167SIngo Molnar 
45739e48d9bSJan Beulich 	pr_info("PGD %lx ", pgd_val(*pgd));
4582d4a7167SIngo Molnar 
4592d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
4602d4a7167SIngo Molnar 		goto out;
461c61e211dSHarvey Harrison 
462e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
463e0c4f675SKirill A. Shutemov 	if (bad_address(p4d))
464e0c4f675SKirill A. Shutemov 		goto bad;
465e0c4f675SKirill A. Shutemov 
46639e48d9bSJan Beulich 	pr_cont("P4D %lx ", p4d_val(*p4d));
467e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d) || p4d_large(*p4d))
468e0c4f675SKirill A. Shutemov 		goto out;
469e0c4f675SKirill A. Shutemov 
470e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
4712d4a7167SIngo Molnar 	if (bad_address(pud))
4722d4a7167SIngo Molnar 		goto bad;
4732d4a7167SIngo Molnar 
47439e48d9bSJan Beulich 	pr_cont("PUD %lx ", pud_val(*pud));
475b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
4762d4a7167SIngo Molnar 		goto out;
477c61e211dSHarvey Harrison 
478c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
4792d4a7167SIngo Molnar 	if (bad_address(pmd))
4802d4a7167SIngo Molnar 		goto bad;
4812d4a7167SIngo Molnar 
48239e48d9bSJan Beulich 	pr_cont("PMD %lx ", pmd_val(*pmd));
4832d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
4842d4a7167SIngo Molnar 		goto out;
485c61e211dSHarvey Harrison 
486c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
4872d4a7167SIngo Molnar 	if (bad_address(pte))
4882d4a7167SIngo Molnar 		goto bad;
4892d4a7167SIngo Molnar 
49039e48d9bSJan Beulich 	pr_cont("PTE %lx", pte_val(*pte));
4912d4a7167SIngo Molnar out:
49239e48d9bSJan Beulich 	pr_cont("\n");
493c61e211dSHarvey Harrison 	return;
494c61e211dSHarvey Harrison bad:
49539e48d9bSJan Beulich 	pr_info("BAD\n");
496c61e211dSHarvey Harrison }
497c61e211dSHarvey Harrison 
498f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
499c61e211dSHarvey Harrison 
5002d4a7167SIngo Molnar /*
5012d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
5022d4a7167SIngo Molnar  *
5032d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
5042d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
5052d4a7167SIngo Molnar  *
5062d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
5072d4a7167SIngo Molnar  *
5082d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
5092d4a7167SIngo Molnar  * Try to work around it here.
5102d4a7167SIngo Molnar  *
5112d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
5122d4a7167SIngo Molnar  * Does nothing on 32-bit.
513c61e211dSHarvey Harrison  */
514c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
515c61e211dSHarvey Harrison {
516e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
517e05139f2SJan Beulich 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
518e05139f2SJan Beulich 	    || boot_cpu_data.x86 != 0xf)
519e05139f2SJan Beulich 		return 0;
520e05139f2SJan Beulich 
521c61e211dSHarvey Harrison 	if (address != regs->ip)
522c61e211dSHarvey Harrison 		return 0;
5232d4a7167SIngo Molnar 
524c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
525c61e211dSHarvey Harrison 		return 0;
5262d4a7167SIngo Molnar 
527c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
528c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
529c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
530a454ab31SIngo Molnar 		printk_once(errata93_warning);
531c61e211dSHarvey Harrison 		regs->ip = address;
532c61e211dSHarvey Harrison 		return 1;
533c61e211dSHarvey Harrison 	}
534c61e211dSHarvey Harrison #endif
535c61e211dSHarvey Harrison 	return 0;
536c61e211dSHarvey Harrison }
537c61e211dSHarvey Harrison 
538c61e211dSHarvey Harrison /*
5392d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
5402d4a7167SIngo Molnar  * to illegal addresses >4GB.
5412d4a7167SIngo Molnar  *
5422d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
5432d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
544c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
545c61e211dSHarvey Harrison  */
546c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
547c61e211dSHarvey Harrison {
548c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
5492d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
550c61e211dSHarvey Harrison 		return 1;
551c61e211dSHarvey Harrison #endif
552c61e211dSHarvey Harrison 	return 0;
553c61e211dSHarvey Harrison }
554c61e211dSHarvey Harrison 
555c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
556c61e211dSHarvey Harrison {
557c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
558c61e211dSHarvey Harrison 	unsigned long nr;
5592d4a7167SIngo Molnar 
560c61e211dSHarvey Harrison 	/*
5612d4a7167SIngo Molnar 	 * Pentium F0 0F C7 C8 bug workaround:
562c61e211dSHarvey Harrison 	 */
563e2604b49SBorislav Petkov 	if (boot_cpu_has_bug(X86_BUG_F00F)) {
564c61e211dSHarvey Harrison 		nr = (address - idt_descr.address) >> 3;
565c61e211dSHarvey Harrison 
566c61e211dSHarvey Harrison 		if (nr == 6) {
567c61e211dSHarvey Harrison 			do_invalid_op(regs, 0);
568c61e211dSHarvey Harrison 			return 1;
569c61e211dSHarvey Harrison 		}
570c61e211dSHarvey Harrison 	}
571c61e211dSHarvey Harrison #endif
572c61e211dSHarvey Harrison 	return 0;
573c61e211dSHarvey Harrison }
574c61e211dSHarvey Harrison 
575a1a371c4SAndy Lutomirski static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
576a1a371c4SAndy Lutomirski {
577a1a371c4SAndy Lutomirski 	u32 offset = (index >> 3) * sizeof(struct desc_struct);
578a1a371c4SAndy Lutomirski 	unsigned long addr;
579a1a371c4SAndy Lutomirski 	struct ldttss_desc desc;
580a1a371c4SAndy Lutomirski 
581a1a371c4SAndy Lutomirski 	if (index == 0) {
582a1a371c4SAndy Lutomirski 		pr_alert("%s: NULL\n", name);
583a1a371c4SAndy Lutomirski 		return;
584a1a371c4SAndy Lutomirski 	}
585a1a371c4SAndy Lutomirski 
586a1a371c4SAndy Lutomirski 	if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
587a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
588a1a371c4SAndy Lutomirski 		return;
589a1a371c4SAndy Lutomirski 	}
590a1a371c4SAndy Lutomirski 
591a1a371c4SAndy Lutomirski 	if (probe_kernel_read(&desc, (void *)(gdt->address + offset),
592a1a371c4SAndy Lutomirski 			      sizeof(struct ldttss_desc))) {
593a1a371c4SAndy Lutomirski 		pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
594a1a371c4SAndy Lutomirski 			 name, index);
595a1a371c4SAndy Lutomirski 		return;
596a1a371c4SAndy Lutomirski 	}
597a1a371c4SAndy Lutomirski 
598*5ccd3528SColin Ian King 	addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
599a1a371c4SAndy Lutomirski #ifdef CONFIG_X86_64
600a1a371c4SAndy Lutomirski 	addr |= ((u64)desc.base3 << 32);
601a1a371c4SAndy Lutomirski #endif
602a1a371c4SAndy Lutomirski 	pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
603a1a371c4SAndy Lutomirski 		 name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
604a1a371c4SAndy Lutomirski }
605a1a371c4SAndy Lutomirski 
606a2aa52abSIngo Molnar /*
607a2aa52abSIngo Molnar  * This helper function transforms the #PF error_code bits into
608a2aa52abSIngo Molnar  * "[PROT] [USER]" type of descriptive, almost human-readable error strings:
609a2aa52abSIngo Molnar  */
610a2aa52abSIngo Molnar static void err_str_append(unsigned long error_code, char *buf, unsigned long mask, const char *txt)
611a1a371c4SAndy Lutomirski {
612a2aa52abSIngo Molnar 	if (error_code & mask) {
613a1a371c4SAndy Lutomirski 		if (buf[0])
614a1a371c4SAndy Lutomirski 			strcat(buf, " ");
615a1a371c4SAndy Lutomirski 		strcat(buf, txt);
616a1a371c4SAndy Lutomirski 	}
617a1a371c4SAndy Lutomirski }
618a1a371c4SAndy Lutomirski 
6192d4a7167SIngo Molnar static void
620a2aa52abSIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
621c61e211dSHarvey Harrison {
622a2aa52abSIngo Molnar 	char err_txt[64];
623a1a371c4SAndy Lutomirski 
624c61e211dSHarvey Harrison 	if (!oops_may_print())
625c61e211dSHarvey Harrison 		return;
626c61e211dSHarvey Harrison 
6271067f030SRicardo Neri 	if (error_code & X86_PF_INSTR) {
62893809be8SHarvey Harrison 		unsigned int level;
629426e34ccSMatt Fleming 		pgd_t *pgd;
630426e34ccSMatt Fleming 		pte_t *pte;
6312d4a7167SIngo Molnar 
6326c690ee1SAndy Lutomirski 		pgd = __va(read_cr3_pa());
633426e34ccSMatt Fleming 		pgd += pgd_index(address);
634426e34ccSMatt Fleming 
635426e34ccSMatt Fleming 		pte = lookup_address_in_pgd(pgd, address, &level);
636c61e211dSHarvey Harrison 
6378f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
638d79d0d8aSDmitry Vyukov 			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
639d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
640eff50c34SJiri Kosina 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
641eff50c34SJiri Kosina 				(pgd_flags(*pgd) & _PAGE_USER) &&
6421e02ce4cSAndy Lutomirski 				(__read_cr4() & X86_CR4_SMEP))
643d79d0d8aSDmitry Vyukov 			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
644d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
645c61e211dSHarvey Harrison 	}
646fd40d6e3SHarvey Harrison 
6474188f063SDmitry Vyukov 	pr_alert("BUG: unable to handle kernel %s at %px\n",
6484188f063SDmitry Vyukov 		 address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
6494188f063SDmitry Vyukov 		 (void *)address);
6502d4a7167SIngo Molnar 
651a2aa52abSIngo Molnar 	err_txt[0] = 0;
652a2aa52abSIngo Molnar 
653a2aa52abSIngo Molnar 	/*
654a2aa52abSIngo Molnar 	 * Note: length of these appended strings including the separation space and the
655a2aa52abSIngo Molnar 	 * zero delimiter must fit into err_txt[].
656a2aa52abSIngo Molnar 	 */
657a2aa52abSIngo Molnar 	err_str_append(error_code, err_txt, X86_PF_PROT,  "[PROT]" );
658a2aa52abSIngo Molnar 	err_str_append(error_code, err_txt, X86_PF_WRITE, "[WRITE]");
659a2aa52abSIngo Molnar 	err_str_append(error_code, err_txt, X86_PF_USER,  "[USER]" );
660a2aa52abSIngo Molnar 	err_str_append(error_code, err_txt, X86_PF_RSVD,  "[RSVD]" );
661a2aa52abSIngo Molnar 	err_str_append(error_code, err_txt, X86_PF_INSTR, "[INSTR]");
662a2aa52abSIngo Molnar 	err_str_append(error_code, err_txt, X86_PF_PK,    "[PK]"   );
663a2aa52abSIngo Molnar 
664a2aa52abSIngo Molnar 	pr_alert("#PF error: %s\n", error_code ? err_txt : "[normal kernel read fault]");
665a2aa52abSIngo Molnar 
666a1a371c4SAndy Lutomirski 	if (!(error_code & X86_PF_USER) && user_mode(regs)) {
667a1a371c4SAndy Lutomirski 		struct desc_ptr idt, gdt;
668a1a371c4SAndy Lutomirski 		u16 ldtr, tr;
669a1a371c4SAndy Lutomirski 
670a1a371c4SAndy Lutomirski 		pr_alert("This was a system access from user code\n");
671a1a371c4SAndy Lutomirski 
672a1a371c4SAndy Lutomirski 		/*
673a1a371c4SAndy Lutomirski 		 * This can happen for quite a few reasons.  The more obvious
674a1a371c4SAndy Lutomirski 		 * ones are faults accessing the GDT, or LDT.  Perhaps
675a1a371c4SAndy Lutomirski 		 * surprisingly, if the CPU tries to deliver a benign or
676a1a371c4SAndy Lutomirski 		 * contributory exception from user code and gets a page fault
677a1a371c4SAndy Lutomirski 		 * during delivery, the page fault can be delivered as though
678a1a371c4SAndy Lutomirski 		 * it originated directly from user code.  This could happen
679a1a371c4SAndy Lutomirski 		 * due to wrong permissions on the IDT, GDT, LDT, TSS, or
680a1a371c4SAndy Lutomirski 		 * kernel or IST stack.
681a1a371c4SAndy Lutomirski 		 */
682a1a371c4SAndy Lutomirski 		store_idt(&idt);
683a1a371c4SAndy Lutomirski 
684a1a371c4SAndy Lutomirski 		/* Usable even on Xen PV -- it's just slow. */
685a1a371c4SAndy Lutomirski 		native_store_gdt(&gdt);
686a1a371c4SAndy Lutomirski 
687a1a371c4SAndy Lutomirski 		pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
688a1a371c4SAndy Lutomirski 			 idt.address, idt.size, gdt.address, gdt.size);
689a1a371c4SAndy Lutomirski 
690a1a371c4SAndy Lutomirski 		store_ldt(ldtr);
691a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "LDTR", ldtr);
692a1a371c4SAndy Lutomirski 
693a1a371c4SAndy Lutomirski 		store_tr(tr);
694a1a371c4SAndy Lutomirski 		show_ldttss(&gdt, "TR", tr);
695a1a371c4SAndy Lutomirski 	}
696a1a371c4SAndy Lutomirski 
697c61e211dSHarvey Harrison 	dump_pagetable(address);
698c61e211dSHarvey Harrison }
699c61e211dSHarvey Harrison 
7002d4a7167SIngo Molnar static noinline void
7012d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
7022d4a7167SIngo Molnar 	    unsigned long address)
703c61e211dSHarvey Harrison {
7042d4a7167SIngo Molnar 	struct task_struct *tsk;
7052d4a7167SIngo Molnar 	unsigned long flags;
7062d4a7167SIngo Molnar 	int sig;
7072d4a7167SIngo Molnar 
7082d4a7167SIngo Molnar 	flags = oops_begin();
7092d4a7167SIngo Molnar 	tsk = current;
7102d4a7167SIngo Molnar 	sig = SIGKILL;
711c61e211dSHarvey Harrison 
712c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
71392181f19SNick Piggin 	       tsk->comm, address);
714c61e211dSHarvey Harrison 	dump_pagetable(address);
7152d4a7167SIngo Molnar 
716c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
717874d93d1SAlexander van Heukelum 		sig = 0;
7182d4a7167SIngo Molnar 
719874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
720c61e211dSHarvey Harrison }
721c61e211dSHarvey Harrison 
722e49d3cbeSAndy Lutomirski static void set_signal_archinfo(unsigned long address,
723e49d3cbeSAndy Lutomirski 				unsigned long error_code)
724e49d3cbeSAndy Lutomirski {
725e49d3cbeSAndy Lutomirski 	struct task_struct *tsk = current;
726e49d3cbeSAndy Lutomirski 
727e49d3cbeSAndy Lutomirski 	/*
728e49d3cbeSAndy Lutomirski 	 * To avoid leaking information about the kernel page
729e49d3cbeSAndy Lutomirski 	 * table layout, pretend that user-mode accesses to
730e49d3cbeSAndy Lutomirski 	 * kernel addresses are always protection faults.
731e49d3cbeSAndy Lutomirski 	 */
732e49d3cbeSAndy Lutomirski 	if (address >= TASK_SIZE_MAX)
733e49d3cbeSAndy Lutomirski 		error_code |= X86_PF_PROT;
734e49d3cbeSAndy Lutomirski 
735e49d3cbeSAndy Lutomirski 	tsk->thread.trap_nr = X86_TRAP_PF;
736e49d3cbeSAndy Lutomirski 	tsk->thread.error_code = error_code | X86_PF_USER;
737e49d3cbeSAndy Lutomirski 	tsk->thread.cr2 = address;
738e49d3cbeSAndy Lutomirski }
739e49d3cbeSAndy Lutomirski 
7402d4a7167SIngo Molnar static noinline void
7412d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code,
7424fc34901SAndy Lutomirski 	   unsigned long address, int signal, int si_code)
74392181f19SNick Piggin {
74492181f19SNick Piggin 	struct task_struct *tsk = current;
74592181f19SNick Piggin 	unsigned long flags;
74692181f19SNick Piggin 	int sig;
74792181f19SNick Piggin 
748ebb53e25SAndy Lutomirski 	if (user_mode(regs)) {
749ebb53e25SAndy Lutomirski 		/*
750ebb53e25SAndy Lutomirski 		 * This is an implicit supervisor-mode access from user
751ebb53e25SAndy Lutomirski 		 * mode.  Bypass all the kernel-mode recovery code and just
752ebb53e25SAndy Lutomirski 		 * OOPS.
753ebb53e25SAndy Lutomirski 		 */
754ebb53e25SAndy Lutomirski 		goto oops;
755ebb53e25SAndy Lutomirski 	}
756ebb53e25SAndy Lutomirski 
75792181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
75881fd9c18SJann Horn 	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
759c026b359SPeter Zijlstra 		/*
760c026b359SPeter Zijlstra 		 * Any interrupt that takes a fault gets the fixup. This makes
761c026b359SPeter Zijlstra 		 * the below recursive fault logic only apply to a faults from
762c026b359SPeter Zijlstra 		 * task context.
763c026b359SPeter Zijlstra 		 */
764c026b359SPeter Zijlstra 		if (in_interrupt())
765c026b359SPeter Zijlstra 			return;
766c026b359SPeter Zijlstra 
767c026b359SPeter Zijlstra 		/*
768c026b359SPeter Zijlstra 		 * Per the above we're !in_interrupt(), aka. task context.
769c026b359SPeter Zijlstra 		 *
770c026b359SPeter Zijlstra 		 * In this case we need to make sure we're not recursively
771c026b359SPeter Zijlstra 		 * faulting through the emulate_vsyscall() logic.
772c026b359SPeter Zijlstra 		 */
7732a53ccbcSIngo Molnar 		if (current->thread.sig_on_uaccess_err && signal) {
774e49d3cbeSAndy Lutomirski 			set_signal_archinfo(address, error_code);
7754fc34901SAndy Lutomirski 
7764fc34901SAndy Lutomirski 			/* XXX: hwpoison faults will set the wrong code. */
777b4fd52f2SEric W. Biederman 			force_sig_fault(signal, si_code, (void __user *)address,
778b4fd52f2SEric W. Biederman 					tsk);
7794fc34901SAndy Lutomirski 		}
780c026b359SPeter Zijlstra 
781c026b359SPeter Zijlstra 		/*
782c026b359SPeter Zijlstra 		 * Barring that, we can do the fixup and be happy.
783c026b359SPeter Zijlstra 		 */
78492181f19SNick Piggin 		return;
7854fc34901SAndy Lutomirski 	}
78692181f19SNick Piggin 
7876271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK
7886271cfdfSAndy Lutomirski 	/*
7896271cfdfSAndy Lutomirski 	 * Stack overflow?  During boot, we can fault near the initial
7906271cfdfSAndy Lutomirski 	 * stack in the direct map, but that's not an overflow -- check
7916271cfdfSAndy Lutomirski 	 * that we're in vmalloc space to avoid this.
7926271cfdfSAndy Lutomirski 	 */
7936271cfdfSAndy Lutomirski 	if (is_vmalloc_addr((void *)address) &&
7946271cfdfSAndy Lutomirski 	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
7956271cfdfSAndy Lutomirski 	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
7966271cfdfSAndy Lutomirski 		unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
7976271cfdfSAndy Lutomirski 		/*
7986271cfdfSAndy Lutomirski 		 * We're likely to be running with very little stack space
7996271cfdfSAndy Lutomirski 		 * left.  It's plausible that we'd hit this condition but
8006271cfdfSAndy Lutomirski 		 * double-fault even before we get this far, in which case
8016271cfdfSAndy Lutomirski 		 * we're fine: the double-fault handler will deal with it.
8026271cfdfSAndy Lutomirski 		 *
8036271cfdfSAndy Lutomirski 		 * We don't want to make it all the way into the oops code
8046271cfdfSAndy Lutomirski 		 * and then double-fault, though, because we're likely to
8056271cfdfSAndy Lutomirski 		 * break the console driver and lose most of the stack dump.
8066271cfdfSAndy Lutomirski 		 */
8076271cfdfSAndy Lutomirski 		asm volatile ("movq %[stack], %%rsp\n\t"
8086271cfdfSAndy Lutomirski 			      "call handle_stack_overflow\n\t"
8096271cfdfSAndy Lutomirski 			      "1: jmp 1b"
810f5caf621SJosh Poimboeuf 			      : ASM_CALL_CONSTRAINT
8116271cfdfSAndy Lutomirski 			      : "D" ("kernel stack overflow (page fault)"),
8126271cfdfSAndy Lutomirski 				"S" (regs), "d" (address),
8136271cfdfSAndy Lutomirski 				[stack] "rm" (stack));
8146271cfdfSAndy Lutomirski 		unreachable();
8156271cfdfSAndy Lutomirski 	}
8166271cfdfSAndy Lutomirski #endif
8176271cfdfSAndy Lutomirski 
81892181f19SNick Piggin 	/*
8192d4a7167SIngo Molnar 	 * 32-bit:
8202d4a7167SIngo Molnar 	 *
82192181f19SNick Piggin 	 *   Valid to do another page fault here, because if this fault
82292181f19SNick Piggin 	 *   had been triggered by is_prefetch fixup_exception would have
82392181f19SNick Piggin 	 *   handled it.
82492181f19SNick Piggin 	 *
8252d4a7167SIngo Molnar 	 * 64-bit:
8262d4a7167SIngo Molnar 	 *
82792181f19SNick Piggin 	 *   Hall of shame of CPU/BIOS bugs.
82892181f19SNick Piggin 	 */
82992181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
83092181f19SNick Piggin 		return;
83192181f19SNick Piggin 
83292181f19SNick Piggin 	if (is_errata93(regs, address))
83392181f19SNick Piggin 		return;
83492181f19SNick Piggin 
83592181f19SNick Piggin 	/*
8363425d934SSai Praneeth 	 * Buggy firmware could access regions which might page fault, try to
8373425d934SSai Praneeth 	 * recover from such faults.
8383425d934SSai Praneeth 	 */
8393425d934SSai Praneeth 	if (IS_ENABLED(CONFIG_EFI))
8403425d934SSai Praneeth 		efi_recover_from_page_fault(address);
8413425d934SSai Praneeth 
842ebb53e25SAndy Lutomirski oops:
8433425d934SSai Praneeth 	/*
84492181f19SNick Piggin 	 * Oops. The kernel tried to access some bad page. We'll have to
8452d4a7167SIngo Molnar 	 * terminate things with extreme prejudice:
84692181f19SNick Piggin 	 */
84792181f19SNick Piggin 	flags = oops_begin();
84892181f19SNick Piggin 
84992181f19SNick Piggin 	show_fault_oops(regs, error_code, address);
85092181f19SNick Piggin 
851a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(tsk))
852b0f4c4b3SPrarit Bhargava 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
85319803078SIngo Molnar 
85492181f19SNick Piggin 	sig = SIGKILL;
85592181f19SNick Piggin 	if (__die("Oops", regs, error_code))
85692181f19SNick Piggin 		sig = 0;
8572d4a7167SIngo Molnar 
85892181f19SNick Piggin 	/* Executive summary in case the body of the oops scrolled away */
859b0f4c4b3SPrarit Bhargava 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
8602d4a7167SIngo Molnar 
86192181f19SNick Piggin 	oops_end(flags, regs, sig);
86292181f19SNick Piggin }
86392181f19SNick Piggin 
8642d4a7167SIngo Molnar /*
8652d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
8662d4a7167SIngo Molnar  * sysctl is set:
8672d4a7167SIngo Molnar  */
8682d4a7167SIngo Molnar static inline void
8692d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
8702d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
8712d4a7167SIngo Molnar {
872ba54d856SBorislav Petkov 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
873ba54d856SBorislav Petkov 
8742d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
8752d4a7167SIngo Molnar 		return;
8762d4a7167SIngo Molnar 
8772d4a7167SIngo Molnar 	if (!printk_ratelimit())
8782d4a7167SIngo Molnar 		return;
8792d4a7167SIngo Molnar 
88010a7e9d8SKees Cook 	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
881ba54d856SBorislav Petkov 		loglvl, tsk->comm, task_pid_nr(tsk), address,
8822d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
8832d4a7167SIngo Molnar 
8842d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
8852d4a7167SIngo Molnar 
8862d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
887ba54d856SBorislav Petkov 
888342db04aSJann Horn 	show_opcodes(regs, loglvl);
8892d4a7167SIngo Molnar }
8902d4a7167SIngo Molnar 
89102e983b7SDave Hansen /*
89202e983b7SDave Hansen  * The (legacy) vsyscall page is the long page in the kernel portion
89302e983b7SDave Hansen  * of the address space that has user-accessible permissions.
89402e983b7SDave Hansen  */
89502e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr)
89602e983b7SDave Hansen {
8973ae0ad92SDave Hansen 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
89802e983b7SDave Hansen }
89902e983b7SDave Hansen 
9002d4a7167SIngo Molnar static void
9012d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
902419ceeb1SEric W. Biederman 		       unsigned long address, u32 pkey, int si_code)
90392181f19SNick Piggin {
90492181f19SNick Piggin 	struct task_struct *tsk = current;
90592181f19SNick Piggin 
90692181f19SNick Piggin 	/* User mode accesses just cause a SIGSEGV */
9076ea59b07SAndy Lutomirski 	if (user_mode(regs) && (error_code & X86_PF_USER)) {
90892181f19SNick Piggin 		/*
9092d4a7167SIngo Molnar 		 * It's possible to have interrupts off here:
91092181f19SNick Piggin 		 */
91192181f19SNick Piggin 		local_irq_enable();
91292181f19SNick Piggin 
91392181f19SNick Piggin 		/*
91492181f19SNick Piggin 		 * Valid to do another page fault here because this one came
9152d4a7167SIngo Molnar 		 * from user space:
91692181f19SNick Piggin 		 */
91792181f19SNick Piggin 		if (is_prefetch(regs, error_code, address))
91892181f19SNick Piggin 			return;
91992181f19SNick Piggin 
92092181f19SNick Piggin 		if (is_errata100(regs, address))
92192181f19SNick Piggin 			return;
92292181f19SNick Piggin 
923dc4fac84SAndy Lutomirski 		/*
924dc4fac84SAndy Lutomirski 		 * To avoid leaking information about the kernel page table
925dc4fac84SAndy Lutomirski 		 * layout, pretend that user-mode accesses to kernel addresses
926dc4fac84SAndy Lutomirski 		 * are always protection faults.
927dc4fac84SAndy Lutomirski 		 */
928dc4fac84SAndy Lutomirski 		if (address >= TASK_SIZE_MAX)
9291067f030SRicardo Neri 			error_code |= X86_PF_PROT;
9303ae36655SAndy Lutomirski 
931e575a86fSKees Cook 		if (likely(show_unhandled_signals))
9322d4a7167SIngo Molnar 			show_signal_msg(regs, error_code, address, tsk);
93392181f19SNick Piggin 
934e49d3cbeSAndy Lutomirski 		set_signal_archinfo(address, error_code);
9352d4a7167SIngo Molnar 
9369db812dbSEric W. Biederman 		if (si_code == SEGV_PKUERR)
937419ceeb1SEric W. Biederman 			force_sig_pkuerr((void __user *)address, pkey);
9389db812dbSEric W. Biederman 
939b4fd52f2SEric W. Biederman 		force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
9402d4a7167SIngo Molnar 
94192181f19SNick Piggin 		return;
94292181f19SNick Piggin 	}
94392181f19SNick Piggin 
94492181f19SNick Piggin 	if (is_f00f_bug(regs, address))
94592181f19SNick Piggin 		return;
94692181f19SNick Piggin 
9474fc34901SAndy Lutomirski 	no_context(regs, error_code, address, SIGSEGV, si_code);
94892181f19SNick Piggin }
94992181f19SNick Piggin 
9502d4a7167SIngo Molnar static noinline void
9512d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
952768fd9c6SEric W. Biederman 		     unsigned long address)
95392181f19SNick Piggin {
954419ceeb1SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
95592181f19SNick Piggin }
95692181f19SNick Piggin 
9572d4a7167SIngo Molnar static void
9582d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
959419ceeb1SEric W. Biederman 	   unsigned long address, u32 pkey, int si_code)
96092181f19SNick Piggin {
96192181f19SNick Piggin 	struct mm_struct *mm = current->mm;
96292181f19SNick Piggin 	/*
96392181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
96492181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
96592181f19SNick Piggin 	 */
96692181f19SNick Piggin 	up_read(&mm->mmap_sem);
96792181f19SNick Piggin 
968aba1ecd3SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
96992181f19SNick Piggin }
97092181f19SNick Piggin 
9712d4a7167SIngo Molnar static noinline void
9722d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
97392181f19SNick Piggin {
974419ceeb1SEric W. Biederman 	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
97592181f19SNick Piggin }
97692181f19SNick Piggin 
97733a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code,
97833a709b2SDave Hansen 		struct vm_area_struct *vma)
97933a709b2SDave Hansen {
98007f146f5SDave Hansen 	/* This code is always called on the current mm */
98107f146f5SDave Hansen 	bool foreign = false;
98207f146f5SDave Hansen 
98333a709b2SDave Hansen 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
98433a709b2SDave Hansen 		return false;
9851067f030SRicardo Neri 	if (error_code & X86_PF_PK)
98633a709b2SDave Hansen 		return true;
98707f146f5SDave Hansen 	/* this checks permission keys on the VMA: */
9881067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
9891067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
99007f146f5SDave Hansen 		return true;
99133a709b2SDave Hansen 	return false;
99292181f19SNick Piggin }
99392181f19SNick Piggin 
9942d4a7167SIngo Molnar static noinline void
9952d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
9967b2d0dbaSDave Hansen 		      unsigned long address, struct vm_area_struct *vma)
99792181f19SNick Piggin {
998019132ffSDave Hansen 	/*
999019132ffSDave Hansen 	 * This OSPKE check is not strictly necessary at runtime.
1000019132ffSDave Hansen 	 * But, doing it this way allows compiler optimizations
1001019132ffSDave Hansen 	 * if pkeys are compiled out.
1002019132ffSDave Hansen 	 */
1003aba1ecd3SEric W. Biederman 	if (bad_area_access_from_pkeys(error_code, vma)) {
10049db812dbSEric W. Biederman 		/*
10059db812dbSEric W. Biederman 		 * A protection key fault means that the PKRU value did not allow
10069db812dbSEric W. Biederman 		 * access to some PTE.  Userspace can figure out what PKRU was
10079db812dbSEric W. Biederman 		 * from the XSAVE state.  This function captures the pkey from
10089db812dbSEric W. Biederman 		 * the vma and passes it to userspace so userspace can discover
10099db812dbSEric W. Biederman 		 * which protection key was set on the PTE.
10109db812dbSEric W. Biederman 		 *
10119db812dbSEric W. Biederman 		 * If we get here, we know that the hardware signaled a X86_PF_PK
10129db812dbSEric W. Biederman 		 * fault and that there was a VMA once we got in the fault
10139db812dbSEric W. Biederman 		 * handler.  It does *not* guarantee that the VMA we find here
10149db812dbSEric W. Biederman 		 * was the one that we faulted on.
10159db812dbSEric W. Biederman 		 *
10169db812dbSEric W. Biederman 		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
10179db812dbSEric W. Biederman 		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
10189db812dbSEric W. Biederman 		 * 3. T1   : faults...
10199db812dbSEric W. Biederman 		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
10209db812dbSEric W. Biederman 		 * 5. T1   : enters fault handler, takes mmap_sem, etc...
10219db812dbSEric W. Biederman 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
10229db812dbSEric W. Biederman 		 *	     faulted on a pte with its pkey=4.
10239db812dbSEric W. Biederman 		 */
1024aba1ecd3SEric W. Biederman 		u32 pkey = vma_pkey(vma);
10259db812dbSEric W. Biederman 
1026419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
1027aba1ecd3SEric W. Biederman 	} else {
1028419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
1029aba1ecd3SEric W. Biederman 	}
103092181f19SNick Piggin }
103192181f19SNick Piggin 
10322d4a7167SIngo Molnar static void
1033a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
103427274f73SEric W. Biederman 	  unsigned int fault)
103592181f19SNick Piggin {
103692181f19SNick Piggin 	struct task_struct *tsk = current;
103792181f19SNick Piggin 
10382d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
10391067f030SRicardo Neri 	if (!(error_code & X86_PF_USER)) {
10404fc34901SAndy Lutomirski 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
104196054569SLinus Torvalds 		return;
104296054569SLinus Torvalds 	}
10432d4a7167SIngo Molnar 
1044cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
104592181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
104692181f19SNick Piggin 		return;
10472d4a7167SIngo Molnar 
1048e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
10492d4a7167SIngo Molnar 
1050a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
1051f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
105240e55394SEric W. Biederman 		unsigned lsb = 0;
105340e55394SEric W. Biederman 
105440e55394SEric W. Biederman 		pr_err(
1055a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
1056a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
105740e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON_LARGE)
105840e55394SEric W. Biederman 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
105940e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON)
106040e55394SEric W. Biederman 			lsb = PAGE_SHIFT;
106140e55394SEric W. Biederman 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, tsk);
106240e55394SEric W. Biederman 		return;
1063a6e04aa9SAndi Kleen 	}
1064a6e04aa9SAndi Kleen #endif
1065b4fd52f2SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
106692181f19SNick Piggin }
106792181f19SNick Piggin 
10683a13c4d7SJohannes Weiner static noinline void
10692d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code,
107025c102d8SEric W. Biederman 	       unsigned long address, vm_fault_t fault)
107192181f19SNick Piggin {
10721067f030SRicardo Neri 	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
10734fc34901SAndy Lutomirski 		no_context(regs, error_code, address, 0, 0);
10743a13c4d7SJohannes Weiner 		return;
1075b80ef10eSKOSAKI Motohiro 	}
1076b80ef10eSKOSAKI Motohiro 
10772d4a7167SIngo Molnar 	if (fault & VM_FAULT_OOM) {
1078f8626854SAndrey Vagin 		/* Kernel mode? Handle exceptions or die: */
10791067f030SRicardo Neri 		if (!(error_code & X86_PF_USER)) {
10804fc34901SAndy Lutomirski 			no_context(regs, error_code, address,
10814fc34901SAndy Lutomirski 				   SIGSEGV, SEGV_MAPERR);
10823a13c4d7SJohannes Weiner 			return;
1083f8626854SAndrey Vagin 		}
1084f8626854SAndrey Vagin 
1085c2d23f91SDavid Rientjes 		/*
1086c2d23f91SDavid Rientjes 		 * We ran out of memory, call the OOM killer, and return the
1087c2d23f91SDavid Rientjes 		 * userspace (which will retry the fault, or kill us if we got
1088c2d23f91SDavid Rientjes 		 * oom-killed):
1089c2d23f91SDavid Rientjes 		 */
1090c2d23f91SDavid Rientjes 		pagefault_out_of_memory();
10912d4a7167SIngo Molnar 	} else {
1092f672b49bSAndi Kleen 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1093f672b49bSAndi Kleen 			     VM_FAULT_HWPOISON_LARGE))
109427274f73SEric W. Biederman 			do_sigbus(regs, error_code, address, fault);
109533692f27SLinus Torvalds 		else if (fault & VM_FAULT_SIGSEGV)
1096768fd9c6SEric W. Biederman 			bad_area_nosemaphore(regs, error_code, address);
109792181f19SNick Piggin 		else
109892181f19SNick Piggin 			BUG();
109992181f19SNick Piggin 	}
11002d4a7167SIngo Molnar }
110192181f19SNick Piggin 
11028fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
1103d8b57bb7SThomas Gleixner {
11041067f030SRicardo Neri 	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
1105d8b57bb7SThomas Gleixner 		return 0;
11062d4a7167SIngo Molnar 
11071067f030SRicardo Neri 	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
1108d8b57bb7SThomas Gleixner 		return 0;
1109d8b57bb7SThomas Gleixner 
1110d8b57bb7SThomas Gleixner 	return 1;
1111d8b57bb7SThomas Gleixner }
1112d8b57bb7SThomas Gleixner 
1113c61e211dSHarvey Harrison /*
11142d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
11152d4a7167SIngo Molnar  *
11162d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
11172d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
11182d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
11192d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
11202d4a7167SIngo Molnar  * on other processors.
11212d4a7167SIngo Molnar  *
112231668511SDavid Vrabel  * Spurious faults may only occur if the TLB contains an entry with
112331668511SDavid Vrabel  * fewer permission than the page table entry.  Non-present (P = 0)
112431668511SDavid Vrabel  * and reserved bit (R = 1) faults are never spurious.
112531668511SDavid Vrabel  *
11265b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
11275b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
112831668511SDavid Vrabel  *
112931668511SDavid Vrabel  * Returns non-zero if a spurious fault was handled, zero otherwise.
113031668511SDavid Vrabel  *
113131668511SDavid Vrabel  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
113231668511SDavid Vrabel  * (Optional Invalidation).
11335b727a3bSJeremy Fitzhardinge  */
11349326638cSMasami Hiramatsu static noinline int
11358fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address)
11365b727a3bSJeremy Fitzhardinge {
11375b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
1138e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
11395b727a3bSJeremy Fitzhardinge 	pud_t *pud;
11405b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
11415b727a3bSJeremy Fitzhardinge 	pte_t *pte;
11423c3e5694SSteven Rostedt 	int ret;
11435b727a3bSJeremy Fitzhardinge 
114431668511SDavid Vrabel 	/*
114531668511SDavid Vrabel 	 * Only writes to RO or instruction fetches from NX may cause
114631668511SDavid Vrabel 	 * spurious faults.
114731668511SDavid Vrabel 	 *
114831668511SDavid Vrabel 	 * These could be from user or supervisor accesses but the TLB
114931668511SDavid Vrabel 	 * is only lazily flushed after a kernel mapping protection
115031668511SDavid Vrabel 	 * change, so user accesses are not expected to cause spurious
115131668511SDavid Vrabel 	 * faults.
115231668511SDavid Vrabel 	 */
11531067f030SRicardo Neri 	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
11541067f030SRicardo Neri 	    error_code != (X86_PF_INSTR | X86_PF_PROT))
11555b727a3bSJeremy Fitzhardinge 		return 0;
11565b727a3bSJeremy Fitzhardinge 
11575b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
11585b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
11595b727a3bSJeremy Fitzhardinge 		return 0;
11605b727a3bSJeremy Fitzhardinge 
1161e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
1162e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d))
1163e0c4f675SKirill A. Shutemov 		return 0;
1164e0c4f675SKirill A. Shutemov 
1165e0c4f675SKirill A. Shutemov 	if (p4d_large(*p4d))
11668fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1167e0c4f675SKirill A. Shutemov 
1168e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
11695b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
11705b727a3bSJeremy Fitzhardinge 		return 0;
11715b727a3bSJeremy Fitzhardinge 
1172d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
11738fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1174d8b57bb7SThomas Gleixner 
11755b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
11765b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
11775b727a3bSJeremy Fitzhardinge 		return 0;
11785b727a3bSJeremy Fitzhardinge 
1179d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
11808fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1181d8b57bb7SThomas Gleixner 
11825b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
1183954f8571SAndrea Arcangeli 	if (!pte_present(*pte))
11845b727a3bSJeremy Fitzhardinge 		return 0;
11855b727a3bSJeremy Fitzhardinge 
11868fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, pte);
11873c3e5694SSteven Rostedt 	if (!ret)
11883c3e5694SSteven Rostedt 		return 0;
11893c3e5694SSteven Rostedt 
11903c3e5694SSteven Rostedt 	/*
11912d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
11922d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
11933c3e5694SSteven Rostedt 	 */
11948fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
11953c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
11962d4a7167SIngo Molnar 
11973c3e5694SSteven Rostedt 	return ret;
11985b727a3bSJeremy Fitzhardinge }
11998fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault);
12005b727a3bSJeremy Fitzhardinge 
1201c61e211dSHarvey Harrison int show_unhandled_signals = 1;
1202c61e211dSHarvey Harrison 
12032d4a7167SIngo Molnar static inline int
120468da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
120592181f19SNick Piggin {
120607f146f5SDave Hansen 	/* This is only called for the current mm, so: */
120707f146f5SDave Hansen 	bool foreign = false;
1208e8c6226dSDave Hansen 
1209e8c6226dSDave Hansen 	/*
1210e8c6226dSDave Hansen 	 * Read or write was blocked by protection keys.  This is
1211e8c6226dSDave Hansen 	 * always an unconditional error and can never result in
1212e8c6226dSDave Hansen 	 * a follow-up action to resolve the fault, like a COW.
1213e8c6226dSDave Hansen 	 */
12141067f030SRicardo Neri 	if (error_code & X86_PF_PK)
1215e8c6226dSDave Hansen 		return 1;
1216e8c6226dSDave Hansen 
121733a709b2SDave Hansen 	/*
121807f146f5SDave Hansen 	 * Make sure to check the VMA so that we do not perform
12191067f030SRicardo Neri 	 * faults just to hit a X86_PF_PK as soon as we fill in a
122007f146f5SDave Hansen 	 * page.
122107f146f5SDave Hansen 	 */
12221067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
12231067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
122407f146f5SDave Hansen 		return 1;
122533a709b2SDave Hansen 
12261067f030SRicardo Neri 	if (error_code & X86_PF_WRITE) {
12272d4a7167SIngo Molnar 		/* write, present and write, not present: */
122892181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
122992181f19SNick Piggin 			return 1;
12302d4a7167SIngo Molnar 		return 0;
12312d4a7167SIngo Molnar 	}
12322d4a7167SIngo Molnar 
12332d4a7167SIngo Molnar 	/* read, present: */
12341067f030SRicardo Neri 	if (unlikely(error_code & X86_PF_PROT))
123592181f19SNick Piggin 		return 1;
12362d4a7167SIngo Molnar 
12372d4a7167SIngo Molnar 	/* read, not present: */
123892181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
123992181f19SNick Piggin 		return 1;
124092181f19SNick Piggin 
124192181f19SNick Piggin 	return 0;
124292181f19SNick Piggin }
124392181f19SNick Piggin 
12440973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address)
12450973a06cSHiroshi Shimamoto {
12463ae0ad92SDave Hansen 	/*
12473ae0ad92SDave Hansen 	 * On 64-bit systems, the vsyscall page is at an address above
12483ae0ad92SDave Hansen 	 * TASK_SIZE_MAX, but is not considered part of the kernel
12493ae0ad92SDave Hansen 	 * address space.
12503ae0ad92SDave Hansen 	 */
12513ae0ad92SDave Hansen 	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
12523ae0ad92SDave Hansen 		return false;
12533ae0ad92SDave Hansen 
1254d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
12550973a06cSHiroshi Shimamoto }
12560973a06cSHiroshi Shimamoto 
1257c61e211dSHarvey Harrison /*
12588fed6200SDave Hansen  * Called for all faults where 'address' is part of the kernel address
12598fed6200SDave Hansen  * space.  Might get called for faults that originate from *code* that
12608fed6200SDave Hansen  * ran in userspace or the kernel.
1261c61e211dSHarvey Harrison  */
12628fed6200SDave Hansen static void
12638fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
12640ac09f9fSJiri Olsa 		   unsigned long address)
1265c61e211dSHarvey Harrison {
12668fed6200SDave Hansen 	/*
1267367e3f1dSDave Hansen 	 * Protection keys exceptions only happen on user pages.  We
1268367e3f1dSDave Hansen 	 * have no user pages in the kernel portion of the address
1269367e3f1dSDave Hansen 	 * space, so do not expect them here.
1270367e3f1dSDave Hansen 	 */
1271367e3f1dSDave Hansen 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1272367e3f1dSDave Hansen 
1273367e3f1dSDave Hansen 	/*
12748fed6200SDave Hansen 	 * We can fault-in kernel-space virtual memory on-demand. The
12758fed6200SDave Hansen 	 * 'reference' page table is init_mm.pgd.
12768fed6200SDave Hansen 	 *
12778fed6200SDave Hansen 	 * NOTE! We MUST NOT take any locks for this case. We may
12788fed6200SDave Hansen 	 * be in an interrupt or a critical region, and should
12798fed6200SDave Hansen 	 * only copy the information from the master page table,
12808fed6200SDave Hansen 	 * nothing more.
12818fed6200SDave Hansen 	 *
12828fed6200SDave Hansen 	 * Before doing this on-demand faulting, ensure that the
12838fed6200SDave Hansen 	 * fault is not any of the following:
12848fed6200SDave Hansen 	 * 1. A fault on a PTE with a reserved bit set.
12858fed6200SDave Hansen 	 * 2. A fault caused by a user-mode access.  (Do not demand-
12868fed6200SDave Hansen 	 *    fault kernel memory due to user-mode accesses).
12878fed6200SDave Hansen 	 * 3. A fault caused by a page-level protection violation.
12888fed6200SDave Hansen 	 *    (A demand fault would be on a non-present page which
12898fed6200SDave Hansen 	 *     would have X86_PF_PROT==0).
12908fed6200SDave Hansen 	 */
12918fed6200SDave Hansen 	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
12928fed6200SDave Hansen 		if (vmalloc_fault(address) >= 0)
12938fed6200SDave Hansen 			return;
12948fed6200SDave Hansen 	}
12958fed6200SDave Hansen 
12968fed6200SDave Hansen 	/* Was the fault spurious, caused by lazy TLB invalidation? */
12978fed6200SDave Hansen 	if (spurious_kernel_fault(hw_error_code, address))
12988fed6200SDave Hansen 		return;
12998fed6200SDave Hansen 
13008fed6200SDave Hansen 	/* kprobes don't want to hook the spurious faults: */
13018fed6200SDave Hansen 	if (kprobes_fault(regs))
13028fed6200SDave Hansen 		return;
13038fed6200SDave Hansen 
13048fed6200SDave Hansen 	/*
13058fed6200SDave Hansen 	 * Note, despite being a "bad area", there are quite a few
13068fed6200SDave Hansen 	 * acceptable reasons to get here, such as erratum fixups
13078fed6200SDave Hansen 	 * and handling kernel code that can fault, like get_user().
13088fed6200SDave Hansen 	 *
13098fed6200SDave Hansen 	 * Don't take the mm semaphore here. If we fixup a prefetch
13108fed6200SDave Hansen 	 * fault we could otherwise deadlock:
13118fed6200SDave Hansen 	 */
1312ba9f6f89SLinus Torvalds 	bad_area_nosemaphore(regs, hw_error_code, address);
13138fed6200SDave Hansen }
13148fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault);
13158fed6200SDave Hansen 
1316aa37c51bSDave Hansen /* Handle faults in the user portion of the address space */
1317aa37c51bSDave Hansen static inline
1318aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs,
1319aa37c51bSDave Hansen 			unsigned long hw_error_code,
1320c61e211dSHarvey Harrison 			unsigned long address)
1321c61e211dSHarvey Harrison {
1322c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
1323c61e211dSHarvey Harrison 	struct task_struct *tsk;
13242d4a7167SIngo Molnar 	struct mm_struct *mm;
132550a7ca3cSSouptick Joarder 	vm_fault_t fault, major = 0;
1326759496baSJohannes Weiner 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1327c61e211dSHarvey Harrison 
1328c61e211dSHarvey Harrison 	tsk = current;
1329c61e211dSHarvey Harrison 	mm = tsk->mm;
13302d4a7167SIngo Molnar 
13312d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
1332e00b12e6SPeter Zijlstra 	if (unlikely(kprobes_fault(regs)))
13339be260a6SMasami Hiramatsu 		return;
1334e00b12e6SPeter Zijlstra 
13355b0c2cacSDave Hansen 	/*
13365b0c2cacSDave Hansen 	 * Reserved bits are never expected to be set on
13375b0c2cacSDave Hansen 	 * entries in the user portion of the page tables.
13385b0c2cacSDave Hansen 	 */
1339164477c2SDave Hansen 	if (unlikely(hw_error_code & X86_PF_RSVD))
1340164477c2SDave Hansen 		pgtable_bad(regs, hw_error_code, address);
1341e00b12e6SPeter Zijlstra 
13425b0c2cacSDave Hansen 	/*
1343e50928d7SAndy Lutomirski 	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1344e50928d7SAndy Lutomirski 	 * pages in the user address space.  The odd case here is WRUSS,
1345e50928d7SAndy Lutomirski 	 * which, according to the preliminary documentation, does not respect
1346e50928d7SAndy Lutomirski 	 * SMAP and will have the USER bit set so, in all cases, SMAP
1347e50928d7SAndy Lutomirski 	 * enforcement appears to be consistent with the USER bit.
13485b0c2cacSDave Hansen 	 */
1349a15781b5SAndy Lutomirski 	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1350a15781b5SAndy Lutomirski 		     !(hw_error_code & X86_PF_USER) &&
1351e50928d7SAndy Lutomirski 		     !(regs->flags & X86_EFLAGS_AC)))
1352a15781b5SAndy Lutomirski 	{
1353ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1354e00b12e6SPeter Zijlstra 		return;
1355e00b12e6SPeter Zijlstra 	}
1356e00b12e6SPeter Zijlstra 
1357e00b12e6SPeter Zijlstra 	/*
1358e00b12e6SPeter Zijlstra 	 * If we're in an interrupt, have no user context or are running
135970ffdb93SDavid Hildenbrand 	 * in a region with pagefaults disabled then we must not take the fault
1360e00b12e6SPeter Zijlstra 	 */
136170ffdb93SDavid Hildenbrand 	if (unlikely(faulthandler_disabled() || !mm)) {
1362ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1363e00b12e6SPeter Zijlstra 		return;
1364e00b12e6SPeter Zijlstra 	}
1365e00b12e6SPeter Zijlstra 
1366c61e211dSHarvey Harrison 	/*
1367891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1368891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1369891cffbdSLinus Torvalds 	 *
1370891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
13712d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1372c61e211dSHarvey Harrison 	 */
1373f39b6f0eSAndy Lutomirski 	if (user_mode(regs)) {
1374891cffbdSLinus Torvalds 		local_irq_enable();
1375759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
13762d4a7167SIngo Molnar 	} else {
13772d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1378c61e211dSHarvey Harrison 			local_irq_enable();
13792d4a7167SIngo Molnar 	}
1380c61e211dSHarvey Harrison 
1381a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
13827dd1fcc2SPeter Zijlstra 
13830ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_WRITE)
1384759496baSJohannes Weiner 		flags |= FAULT_FLAG_WRITE;
13850ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_INSTR)
1386d61172b4SDave Hansen 		flags |= FAULT_FLAG_INSTRUCTION;
1387759496baSJohannes Weiner 
13883ae0ad92SDave Hansen #ifdef CONFIG_X86_64
13893a1dfe6eSIngo Molnar 	/*
13903ae0ad92SDave Hansen 	 * Instruction fetch faults in the vsyscall page might need
13913ae0ad92SDave Hansen 	 * emulation.  The vsyscall page is at a high address
13923ae0ad92SDave Hansen 	 * (>PAGE_OFFSET), but is considered to be part of the user
13933ae0ad92SDave Hansen 	 * address space.
1394c61e211dSHarvey Harrison 	 *
13953ae0ad92SDave Hansen 	 * The vsyscall page does not have a "real" VMA, so do this
13963ae0ad92SDave Hansen 	 * emulation before we go searching for VMAs.
13973ae0ad92SDave Hansen 	 */
13980ed32f1aSAndy Lutomirski 	if ((hw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) {
13993ae0ad92SDave Hansen 		if (emulate_vsyscall(regs, address))
14003ae0ad92SDave Hansen 			return;
14013ae0ad92SDave Hansen 	}
14023ae0ad92SDave Hansen #endif
14033ae0ad92SDave Hansen 
1404c61e211dSHarvey Harrison 	/*
140588259744SDave Hansen 	 * Kernel-mode access to the user address space should only occur
140688259744SDave Hansen 	 * on well-defined single instructions listed in the exception
140788259744SDave Hansen 	 * tables.  But, an erroneous kernel fault occurring outside one of
140888259744SDave Hansen 	 * those areas which also holds mmap_sem might deadlock attempting
140988259744SDave Hansen 	 * to validate the fault against the address space.
1410c61e211dSHarvey Harrison 	 *
141188259744SDave Hansen 	 * Only do the expensive exception table search when we might be at
141288259744SDave Hansen 	 * risk of a deadlock.  This happens if we
141388259744SDave Hansen 	 * 1. Failed to acquire mmap_sem, and
14146344be60SAndy Lutomirski 	 * 2. The access did not originate in userspace.
1415c61e211dSHarvey Harrison 	 */
141692181f19SNick Piggin 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
14176344be60SAndy Lutomirski 		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
141888259744SDave Hansen 			/*
141988259744SDave Hansen 			 * Fault from code in kernel from
142088259744SDave Hansen 			 * which we do not expect faults.
142188259744SDave Hansen 			 */
14220ed32f1aSAndy Lutomirski 			bad_area_nosemaphore(regs, hw_error_code, address);
142392181f19SNick Piggin 			return;
142492181f19SNick Piggin 		}
1425d065bd81SMichel Lespinasse retry:
1426c61e211dSHarvey Harrison 		down_read(&mm->mmap_sem);
142701006074SPeter Zijlstra 	} else {
142801006074SPeter Zijlstra 		/*
14292d4a7167SIngo Molnar 		 * The above down_read_trylock() might have succeeded in
14302d4a7167SIngo Molnar 		 * which case we'll have missed the might_sleep() from
14312d4a7167SIngo Molnar 		 * down_read():
143201006074SPeter Zijlstra 		 */
143301006074SPeter Zijlstra 		might_sleep();
1434c61e211dSHarvey Harrison 	}
1435c61e211dSHarvey Harrison 
1436c61e211dSHarvey Harrison 	vma = find_vma(mm, address);
143792181f19SNick Piggin 	if (unlikely(!vma)) {
14380ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
143992181f19SNick Piggin 		return;
144092181f19SNick Piggin 	}
144192181f19SNick Piggin 	if (likely(vma->vm_start <= address))
1442c61e211dSHarvey Harrison 		goto good_area;
144392181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
14440ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
144592181f19SNick Piggin 		return;
144692181f19SNick Piggin 	}
144792181f19SNick Piggin 	if (unlikely(expand_stack(vma, address))) {
14480ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
144992181f19SNick Piggin 		return;
145092181f19SNick Piggin 	}
145192181f19SNick Piggin 
1452c61e211dSHarvey Harrison 	/*
1453c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1454c61e211dSHarvey Harrison 	 * we can handle it..
1455c61e211dSHarvey Harrison 	 */
1456c61e211dSHarvey Harrison good_area:
14570ed32f1aSAndy Lutomirski 	if (unlikely(access_error(hw_error_code, vma))) {
14580ed32f1aSAndy Lutomirski 		bad_area_access_error(regs, hw_error_code, address, vma);
145992181f19SNick Piggin 		return;
1460c61e211dSHarvey Harrison 	}
1461c61e211dSHarvey Harrison 
1462c61e211dSHarvey Harrison 	/*
1463c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1464c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
14659a95f3cfSPaul Cassella 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
14669a95f3cfSPaul Cassella 	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1467cb0631fdSVlastimil Babka 	 *
1468cb0631fdSVlastimil Babka 	 * Note that handle_userfault() may also release and reacquire mmap_sem
1469cb0631fdSVlastimil Babka 	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1470cb0631fdSVlastimil Babka 	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1471cb0631fdSVlastimil Babka 	 * (potentially after handling any pending signal during the return to
1472cb0631fdSVlastimil Babka 	 * userland). The return to userland is identified whenever
1473cb0631fdSVlastimil Babka 	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1474c61e211dSHarvey Harrison 	 */
1475dcddffd4SKirill A. Shutemov 	fault = handle_mm_fault(vma, address, flags);
147626178ec1SLinus Torvalds 	major |= fault & VM_FAULT_MAJOR;
14772d4a7167SIngo Molnar 
14783a13c4d7SJohannes Weiner 	/*
147926178ec1SLinus Torvalds 	 * If we need to retry the mmap_sem has already been released,
148026178ec1SLinus Torvalds 	 * and if there is a fatal signal pending there is no guarantee
148126178ec1SLinus Torvalds 	 * that we made any progress. Handle this case first.
14823a13c4d7SJohannes Weiner 	 */
148326178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_RETRY)) {
148426178ec1SLinus Torvalds 		/* Retry at most once */
148526178ec1SLinus Torvalds 		if (flags & FAULT_FLAG_ALLOW_RETRY) {
148626178ec1SLinus Torvalds 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
148726178ec1SLinus Torvalds 			flags |= FAULT_FLAG_TRIED;
148826178ec1SLinus Torvalds 			if (!fatal_signal_pending(tsk))
148926178ec1SLinus Torvalds 				goto retry;
149026178ec1SLinus Torvalds 		}
149126178ec1SLinus Torvalds 
149226178ec1SLinus Torvalds 		/* User mode? Just return to handle the fatal exception */
1493cf3c0a15SLinus Torvalds 		if (flags & FAULT_FLAG_USER)
14943a13c4d7SJohannes Weiner 			return;
14953a13c4d7SJohannes Weiner 
149626178ec1SLinus Torvalds 		/* Not returning to user mode? Handle exceptions or die: */
14970ed32f1aSAndy Lutomirski 		no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR);
149826178ec1SLinus Torvalds 		return;
149926178ec1SLinus Torvalds 	}
150026178ec1SLinus Torvalds 
15017fb08ecaSLinus Torvalds 	up_read(&mm->mmap_sem);
150226178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_ERROR)) {
15030ed32f1aSAndy Lutomirski 		mm_fault_error(regs, hw_error_code, address, fault);
150437b23e05SKOSAKI Motohiro 		return;
150537b23e05SKOSAKI Motohiro 	}
150637b23e05SKOSAKI Motohiro 
150737b23e05SKOSAKI Motohiro 	/*
150826178ec1SLinus Torvalds 	 * Major/minor page fault accounting. If any of the events
150926178ec1SLinus Torvalds 	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1510d065bd81SMichel Lespinasse 	 */
151126178ec1SLinus Torvalds 	if (major) {
1512c61e211dSHarvey Harrison 		tsk->maj_flt++;
151326178ec1SLinus Torvalds 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
1514ac17dc8eSPeter Zijlstra 	} else {
1515c61e211dSHarvey Harrison 		tsk->min_flt++;
151626178ec1SLinus Torvalds 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1517d065bd81SMichel Lespinasse 	}
1518c61e211dSHarvey Harrison 
15198c938f9fSIngo Molnar 	check_v8086_mode(regs, address, tsk);
1520c61e211dSHarvey Harrison }
1521aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault);
1522aa37c51bSDave Hansen 
1523aa37c51bSDave Hansen /*
1524aa37c51bSDave Hansen  * This routine handles page faults.  It determines the address,
1525aa37c51bSDave Hansen  * and the problem, and then passes it off to one of the appropriate
1526aa37c51bSDave Hansen  * routines.
1527aa37c51bSDave Hansen  */
1528aa37c51bSDave Hansen static noinline void
1529aa37c51bSDave Hansen __do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
1530aa37c51bSDave Hansen 		unsigned long address)
1531aa37c51bSDave Hansen {
1532aa37c51bSDave Hansen 	prefetchw(&current->mm->mmap_sem);
1533aa37c51bSDave Hansen 
1534aa37c51bSDave Hansen 	if (unlikely(kmmio_fault(regs, address)))
1535aa37c51bSDave Hansen 		return;
1536aa37c51bSDave Hansen 
1537aa37c51bSDave Hansen 	/* Was the fault on kernel-controlled part of the address space? */
1538aa37c51bSDave Hansen 	if (unlikely(fault_in_kernel_space(address)))
1539aa37c51bSDave Hansen 		do_kern_addr_fault(regs, hw_error_code, address);
1540aa37c51bSDave Hansen 	else
1541aa37c51bSDave Hansen 		do_user_addr_fault(regs, hw_error_code, address);
1542aa37c51bSDave Hansen }
15439326638cSMasami Hiramatsu NOKPROBE_SYMBOL(__do_page_fault);
15446ba3c97aSFrederic Weisbecker 
15459326638cSMasami Hiramatsu static nokprobe_inline void
15469326638cSMasami Hiramatsu trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1547d34603b0SSeiji Aguchi 			 unsigned long error_code)
1548d34603b0SSeiji Aguchi {
1549d34603b0SSeiji Aguchi 	if (user_mode(regs))
1550d4078e23SPeter Zijlstra 		trace_page_fault_user(address, regs, error_code);
1551d34603b0SSeiji Aguchi 	else
1552d4078e23SPeter Zijlstra 		trace_page_fault_kernel(address, regs, error_code);
1553d34603b0SSeiji Aguchi }
1554d34603b0SSeiji Aguchi 
15550ac09f9fSJiri Olsa /*
155611a7ffb0SThomas Gleixner  * We must have this function blacklisted from kprobes, tagged with notrace
155711a7ffb0SThomas Gleixner  * and call read_cr2() before calling anything else. To avoid calling any
155811a7ffb0SThomas Gleixner  * kind of tracing machinery before we've observed the CR2 value.
155911a7ffb0SThomas Gleixner  *
156011a7ffb0SThomas Gleixner  * exception_{enter,exit}() contains all sorts of tracepoints.
15610ac09f9fSJiri Olsa  */
156211a7ffb0SThomas Gleixner dotraplinkage void notrace
156311a7ffb0SThomas Gleixner do_page_fault(struct pt_regs *regs, unsigned long error_code)
156411a7ffb0SThomas Gleixner {
156511a7ffb0SThomas Gleixner 	unsigned long address = read_cr2(); /* Get the faulting address */
1566d4078e23SPeter Zijlstra 	enum ctx_state prev_state;
156725c74b10SSeiji Aguchi 
156825c74b10SSeiji Aguchi 	prev_state = exception_enter();
156980954747SThomas Gleixner 	if (trace_pagefault_enabled())
1570d4078e23SPeter Zijlstra 		trace_page_fault_entries(address, regs, error_code);
157111a7ffb0SThomas Gleixner 
15720ac09f9fSJiri Olsa 	__do_page_fault(regs, error_code, address);
157325c74b10SSeiji Aguchi 	exception_exit(prev_state);
157425c74b10SSeiji Aguchi }
157511a7ffb0SThomas Gleixner NOKPROBE_SYMBOL(do_page_fault);
1576