xref: /openbmc/linux/arch/x86/mm/fault.c (revision ebb53e2597e2dc7637ab213df006e99681b6ee25)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2c61e211dSHarvey Harrison /*
3c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
4c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
5f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
6c61e211dSHarvey Harrison  */
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
868db0cf1SIngo Molnar #include <linux/sched/task_stack.h>	/* task_stack_*(), ...		*/
9a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
104cdf8dbeSLinus Torvalds #include <linux/extable.h>		/* search_exception_tables	*/
1157c8a661SMike Rapoport #include <linux/memblock.h>		/* max_low_pfn			*/
129326638cSMasami Hiramatsu #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
13a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
14cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
15f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
16268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
1756dd9470SFrederic Weisbecker #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
1870ffdb93SDavid Hildenbrand #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
193425d934SSai Praneeth #include <linux/efi.h>			/* efi_recover_from_page_fault()*/
2050a7ca3cSSouptick Joarder #include <linux/mm_types.h>
21c61e211dSHarvey Harrison 
22019132ffSDave Hansen #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
23a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
24a2bcd473SIngo Molnar #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
25f40c3300SAndy Lutomirski #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
26f40c3300SAndy Lutomirski #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
27ba3e127eSBrian Gerst #include <asm/vm86.h>			/* struct vm86			*/
28019132ffSDave Hansen #include <asm/mmu_context.h>		/* vma_pkey()			*/
293425d934SSai Praneeth #include <asm/efi.h>			/* efi_recover_from_page_fault()*/
30c61e211dSHarvey Harrison 
31d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS
32d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h>
33d34603b0SSeiji Aguchi 
34c61e211dSHarvey Harrison /*
35b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
36b319eed0SIngo Molnar  * handled by mmiotrace:
37b814d41fSIngo Molnar  */
389326638cSMasami Hiramatsu static nokprobe_inline int
3962c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
4086069782SPekka Paalanen {
410fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
420fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
430fd0e3daSPekka Paalanen 			return -1;
440fd0e3daSPekka Paalanen 	return 0;
4586069782SPekka Paalanen }
4686069782SPekka Paalanen 
479326638cSMasami Hiramatsu static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
48c61e211dSHarvey Harrison {
49a980c0efSJann Horn 	if (!kprobes_built_in())
50a980c0efSJann Horn 		return 0;
51a980c0efSJann Horn 	if (user_mode(regs))
52a980c0efSJann Horn 		return 0;
53a980c0efSJann Horn 	/*
54a980c0efSJann Horn 	 * To be potentially processing a kprobe fault and to be allowed to call
55a980c0efSJann Horn 	 * kprobe_running(), we have to be non-preemptible.
56a980c0efSJann Horn 	 */
57a980c0efSJann Horn 	if (preemptible())
58a980c0efSJann Horn 		return 0;
59a980c0efSJann Horn 	if (!kprobe_running())
60a980c0efSJann Horn 		return 0;
61a980c0efSJann Horn 	return kprobe_fault_handler(regs, X86_TRAP_PF);
62c61e211dSHarvey Harrison }
63c61e211dSHarvey Harrison 
64c61e211dSHarvey Harrison /*
652d4a7167SIngo Molnar  * Prefetch quirks:
662d4a7167SIngo Molnar  *
672d4a7167SIngo Molnar  * 32-bit mode:
682d4a7167SIngo Molnar  *
69c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
70c61e211dSHarvey Harrison  *   Check that here and ignore it.
71c61e211dSHarvey Harrison  *
722d4a7167SIngo Molnar  * 64-bit mode:
732d4a7167SIngo Molnar  *
74c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
75c61e211dSHarvey Harrison  *   Check that here and ignore it.
76c61e211dSHarvey Harrison  *
772d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
78c61e211dSHarvey Harrison  */
79107a0367SIngo Molnar static inline int
80107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
81107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
82c61e211dSHarvey Harrison {
83107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
84107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
85c61e211dSHarvey Harrison 
86c61e211dSHarvey Harrison 	switch (instr_hi) {
87c61e211dSHarvey Harrison 	case 0x20:
88c61e211dSHarvey Harrison 	case 0x30:
89c61e211dSHarvey Harrison 		/*
90c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
91c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
92c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
93c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
94c61e211dSHarvey Harrison 		 */
95107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
96c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
97c61e211dSHarvey Harrison 	case 0x40:
98c61e211dSHarvey Harrison 		/*
99c61e211dSHarvey Harrison 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
100c61e211dSHarvey Harrison 		 * Need to figure out under what instruction mode the
101c61e211dSHarvey Harrison 		 * instruction was issued. Could check the LDT for lm,
102c61e211dSHarvey Harrison 		 * but for now it's good enough to assume that long
103c61e211dSHarvey Harrison 		 * mode only uses well known segments or kernel.
104c61e211dSHarvey Harrison 		 */
105318f5a2aSAndy Lutomirski 		return (!user_mode(regs) || user_64bit_mode(regs));
106c61e211dSHarvey Harrison #endif
107c61e211dSHarvey Harrison 	case 0x60:
108c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
109107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
110c61e211dSHarvey Harrison 	case 0xF0:
111c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
112107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
113c61e211dSHarvey Harrison 	case 0x00:
114c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
115107a0367SIngo Molnar 		if (probe_kernel_address(instr, opcode))
116107a0367SIngo Molnar 			return 0;
117107a0367SIngo Molnar 
118107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
119107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
120107a0367SIngo Molnar 		return 0;
121107a0367SIngo Molnar 	default:
122107a0367SIngo Molnar 		return 0;
123107a0367SIngo Molnar 	}
124107a0367SIngo Molnar }
125107a0367SIngo Molnar 
126107a0367SIngo Molnar static int
127107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
128107a0367SIngo Molnar {
129107a0367SIngo Molnar 	unsigned char *max_instr;
130107a0367SIngo Molnar 	unsigned char *instr;
131107a0367SIngo Molnar 	int prefetch = 0;
132107a0367SIngo Molnar 
133107a0367SIngo Molnar 	/*
134107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
135107a0367SIngo Molnar 	 * do not ignore the fault:
136107a0367SIngo Molnar 	 */
1371067f030SRicardo Neri 	if (error_code & X86_PF_INSTR)
138107a0367SIngo Molnar 		return 0;
139107a0367SIngo Molnar 
140107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
141107a0367SIngo Molnar 	max_instr = instr + 15;
142107a0367SIngo Molnar 
143d31bf07fSAndy Lutomirski 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
144107a0367SIngo Molnar 		return 0;
145107a0367SIngo Molnar 
146107a0367SIngo Molnar 	while (instr < max_instr) {
147107a0367SIngo Molnar 		unsigned char opcode;
148c61e211dSHarvey Harrison 
149c61e211dSHarvey Harrison 		if (probe_kernel_address(instr, opcode))
150c61e211dSHarvey Harrison 			break;
151107a0367SIngo Molnar 
152107a0367SIngo Molnar 		instr++;
153107a0367SIngo Molnar 
154107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
155c61e211dSHarvey Harrison 			break;
156c61e211dSHarvey Harrison 	}
157c61e211dSHarvey Harrison 	return prefetch;
158c61e211dSHarvey Harrison }
159c61e211dSHarvey Harrison 
160f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
161f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1622d4a7167SIngo Molnar 
163f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
164f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
165f2f13a85SIngo Molnar {
166f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
167f2f13a85SIngo Molnar 	pgd_t *pgd_k;
168e0c4f675SKirill A. Shutemov 	p4d_t *p4d, *p4d_k;
169f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
170f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
171f2f13a85SIngo Molnar 
172f2f13a85SIngo Molnar 	pgd += index;
173f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
174f2f13a85SIngo Molnar 
175f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
176f2f13a85SIngo Molnar 		return NULL;
177f2f13a85SIngo Molnar 
178f2f13a85SIngo Molnar 	/*
179f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
180f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
181e0c4f675SKirill A. Shutemov 	 * set_p4d/set_pud.
182f2f13a85SIngo Molnar 	 */
183e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
184e0c4f675SKirill A. Shutemov 	p4d_k = p4d_offset(pgd_k, address);
185e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d_k))
186e0c4f675SKirill A. Shutemov 		return NULL;
187e0c4f675SKirill A. Shutemov 
188e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
189e0c4f675SKirill A. Shutemov 	pud_k = pud_offset(p4d_k, address);
190f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
191f2f13a85SIngo Molnar 		return NULL;
192f2f13a85SIngo Molnar 
193f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
194f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
195f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
196f2f13a85SIngo Molnar 		return NULL;
197f2f13a85SIngo Molnar 
198b8bcfe99SJeremy Fitzhardinge 	if (!pmd_present(*pmd))
199f2f13a85SIngo Molnar 		set_pmd(pmd, *pmd_k);
200b8bcfe99SJeremy Fitzhardinge 	else
201f2f13a85SIngo Molnar 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
202f2f13a85SIngo Molnar 
203f2f13a85SIngo Molnar 	return pmd_k;
204f2f13a85SIngo Molnar }
205f2f13a85SIngo Molnar 
206f2f13a85SIngo Molnar void vmalloc_sync_all(void)
207f2f13a85SIngo Molnar {
208f2f13a85SIngo Molnar 	unsigned long address;
209f2f13a85SIngo Molnar 
210f2f13a85SIngo Molnar 	if (SHARED_KERNEL_PMD)
211f2f13a85SIngo Molnar 		return;
212f2f13a85SIngo Molnar 
213f2f13a85SIngo Molnar 	for (address = VMALLOC_START & PMD_MASK;
214dc4fac84SAndy Lutomirski 	     address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
215f2f13a85SIngo Molnar 	     address += PMD_SIZE) {
216f2f13a85SIngo Molnar 		struct page *page;
217f2f13a85SIngo Molnar 
218a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
219f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
220617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
221f01f7c56SBorislav Petkov 			pmd_t *ret;
222617d34d9SJeremy Fitzhardinge 
223a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
224617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
225617d34d9SJeremy Fitzhardinge 
226617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
227617d34d9SJeremy Fitzhardinge 			ret = vmalloc_sync_one(page_address(page), address);
228617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
229617d34d9SJeremy Fitzhardinge 
230617d34d9SJeremy Fitzhardinge 			if (!ret)
231f2f13a85SIngo Molnar 				break;
232f2f13a85SIngo Molnar 		}
233a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
234f2f13a85SIngo Molnar 	}
235f2f13a85SIngo Molnar }
236f2f13a85SIngo Molnar 
237f2f13a85SIngo Molnar /*
238f2f13a85SIngo Molnar  * 32-bit:
239f2f13a85SIngo Molnar  *
240f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc or module mapping area
241f2f13a85SIngo Molnar  */
2429326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address)
243f2f13a85SIngo Molnar {
244f2f13a85SIngo Molnar 	unsigned long pgd_paddr;
245f2f13a85SIngo Molnar 	pmd_t *pmd_k;
246f2f13a85SIngo Molnar 	pte_t *pte_k;
247f2f13a85SIngo Molnar 
248f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
249f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
250f2f13a85SIngo Molnar 		return -1;
251f2f13a85SIngo Molnar 
252f2f13a85SIngo Molnar 	/*
253f2f13a85SIngo Molnar 	 * Synchronize this task's top level page-table
254f2f13a85SIngo Molnar 	 * with the 'reference' page table.
255f2f13a85SIngo Molnar 	 *
256f2f13a85SIngo Molnar 	 * Do _not_ use "current" here. We might be inside
257f2f13a85SIngo Molnar 	 * an interrupt in the middle of a task switch..
258f2f13a85SIngo Molnar 	 */
2596c690ee1SAndy Lutomirski 	pgd_paddr = read_cr3_pa();
260f2f13a85SIngo Molnar 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
261f2f13a85SIngo Molnar 	if (!pmd_k)
262f2f13a85SIngo Molnar 		return -1;
263f2f13a85SIngo Molnar 
26418a95521SToshi Kani 	if (pmd_large(*pmd_k))
265f4eafd8bSToshi Kani 		return 0;
266f4eafd8bSToshi Kani 
267f2f13a85SIngo Molnar 	pte_k = pte_offset_kernel(pmd_k, address);
268f2f13a85SIngo Molnar 	if (!pte_present(*pte_k))
269f2f13a85SIngo Molnar 		return -1;
270f2f13a85SIngo Molnar 
271f2f13a85SIngo Molnar 	return 0;
272f2f13a85SIngo Molnar }
2739326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault);
274f2f13a85SIngo Molnar 
275f2f13a85SIngo Molnar /*
276f2f13a85SIngo Molnar  * Did it hit the DOS screen memory VA from vm86 mode?
277f2f13a85SIngo Molnar  */
278f2f13a85SIngo Molnar static inline void
279f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
280f2f13a85SIngo Molnar 		 struct task_struct *tsk)
281f2f13a85SIngo Molnar {
2829fda6a06SBrian Gerst #ifdef CONFIG_VM86
283f2f13a85SIngo Molnar 	unsigned long bit;
284f2f13a85SIngo Molnar 
2859fda6a06SBrian Gerst 	if (!v8086_mode(regs) || !tsk->thread.vm86)
286f2f13a85SIngo Molnar 		return;
287f2f13a85SIngo Molnar 
288f2f13a85SIngo Molnar 	bit = (address - 0xA0000) >> PAGE_SHIFT;
289f2f13a85SIngo Molnar 	if (bit < 32)
2909fda6a06SBrian Gerst 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
2919fda6a06SBrian Gerst #endif
292f2f13a85SIngo Molnar }
293c61e211dSHarvey Harrison 
294087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
295087975b0SAkinobu Mita {
296087975b0SAkinobu Mita 	return pfn < max_low_pfn;
297087975b0SAkinobu Mita }
298087975b0SAkinobu Mita 
299cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
300c61e211dSHarvey Harrison {
3016c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
302087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
303e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
304e0c4f675SKirill A. Shutemov 	pud_t *pud;
305087975b0SAkinobu Mita 	pmd_t *pmd;
306087975b0SAkinobu Mita 	pte_t *pte;
3072d4a7167SIngo Molnar 
308c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
30939e48d9bSJan Beulich 	pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
310087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
311087975b0SAkinobu Mita 		goto out;
31239e48d9bSJan Beulich #define pr_pde pr_cont
31339e48d9bSJan Beulich #else
31439e48d9bSJan Beulich #define pr_pde pr_info
315c61e211dSHarvey Harrison #endif
316e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
317e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
318e0c4f675SKirill A. Shutemov 	pmd = pmd_offset(pud, address);
31939e48d9bSJan Beulich 	pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
32039e48d9bSJan Beulich #undef pr_pde
321c61e211dSHarvey Harrison 
322c61e211dSHarvey Harrison 	/*
323c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
324c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
325c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3262d4a7167SIngo Molnar 	 * it's allocated already:
327c61e211dSHarvey Harrison 	 */
328087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
329087975b0SAkinobu Mita 		goto out;
3302d4a7167SIngo Molnar 
331087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
33239e48d9bSJan Beulich 	pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
333087975b0SAkinobu Mita out:
33439e48d9bSJan Beulich 	pr_cont("\n");
335f2f13a85SIngo Molnar }
336f2f13a85SIngo Molnar 
337f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
338f2f13a85SIngo Molnar 
339f2f13a85SIngo Molnar void vmalloc_sync_all(void)
340f2f13a85SIngo Molnar {
3415372e155SKirill A. Shutemov 	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
342f2f13a85SIngo Molnar }
343f2f13a85SIngo Molnar 
344f2f13a85SIngo Molnar /*
345f2f13a85SIngo Molnar  * 64-bit:
346f2f13a85SIngo Molnar  *
347f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc area
348f2f13a85SIngo Molnar  */
3499326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address)
350f2f13a85SIngo Molnar {
351565977a3SToshi Kani 	pgd_t *pgd, *pgd_k;
352565977a3SToshi Kani 	p4d_t *p4d, *p4d_k;
353565977a3SToshi Kani 	pud_t *pud;
354565977a3SToshi Kani 	pmd_t *pmd;
355565977a3SToshi Kani 	pte_t *pte;
356f2f13a85SIngo Molnar 
357f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
358f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
359f2f13a85SIngo Molnar 		return -1;
360f2f13a85SIngo Molnar 
361ebc8827fSFrederic Weisbecker 	WARN_ON_ONCE(in_nmi());
362ebc8827fSFrederic Weisbecker 
363f2f13a85SIngo Molnar 	/*
364f2f13a85SIngo Molnar 	 * Copy kernel mappings over when needed. This can also
365f2f13a85SIngo Molnar 	 * happen within a race in page table update. In the later
366f2f13a85SIngo Molnar 	 * case just flush:
367f2f13a85SIngo Molnar 	 */
3686c690ee1SAndy Lutomirski 	pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
369565977a3SToshi Kani 	pgd_k = pgd_offset_k(address);
370565977a3SToshi Kani 	if (pgd_none(*pgd_k))
371f2f13a85SIngo Molnar 		return -1;
372f2f13a85SIngo Molnar 
373ed7588d5SKirill A. Shutemov 	if (pgtable_l5_enabled()) {
3741160c277SSamu Kallio 		if (pgd_none(*pgd)) {
375565977a3SToshi Kani 			set_pgd(pgd, *pgd_k);
3761160c277SSamu Kallio 			arch_flush_lazy_mmu_mode();
37736b3a772SAndy Lutomirski 		} else {
378565977a3SToshi Kani 			BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
3791160c277SSamu Kallio 		}
38036b3a772SAndy Lutomirski 	}
381f2f13a85SIngo Molnar 
382b50858ceSKirill A. Shutemov 	/* With 4-level paging, copying happens on the p4d level. */
383b50858ceSKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
384565977a3SToshi Kani 	p4d_k = p4d_offset(pgd_k, address);
385565977a3SToshi Kani 	if (p4d_none(*p4d_k))
386b50858ceSKirill A. Shutemov 		return -1;
387b50858ceSKirill A. Shutemov 
388ed7588d5SKirill A. Shutemov 	if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
389565977a3SToshi Kani 		set_p4d(p4d, *p4d_k);
390b50858ceSKirill A. Shutemov 		arch_flush_lazy_mmu_mode();
391b50858ceSKirill A. Shutemov 	} else {
392565977a3SToshi Kani 		BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
393b50858ceSKirill A. Shutemov 	}
394b50858ceSKirill A. Shutemov 
39536b3a772SAndy Lutomirski 	BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
396f2f13a85SIngo Molnar 
397b50858ceSKirill A. Shutemov 	pud = pud_offset(p4d, address);
398565977a3SToshi Kani 	if (pud_none(*pud))
399f2f13a85SIngo Molnar 		return -1;
400f2f13a85SIngo Molnar 
40118a95521SToshi Kani 	if (pud_large(*pud))
402f4eafd8bSToshi Kani 		return 0;
403f4eafd8bSToshi Kani 
404f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
405565977a3SToshi Kani 	if (pmd_none(*pmd))
406f2f13a85SIngo Molnar 		return -1;
407f2f13a85SIngo Molnar 
40818a95521SToshi Kani 	if (pmd_large(*pmd))
409f4eafd8bSToshi Kani 		return 0;
410f4eafd8bSToshi Kani 
411f2f13a85SIngo Molnar 	pte = pte_offset_kernel(pmd, address);
412565977a3SToshi Kani 	if (!pte_present(*pte))
413565977a3SToshi Kani 		return -1;
414f2f13a85SIngo Molnar 
415f2f13a85SIngo Molnar 	return 0;
416f2f13a85SIngo Molnar }
4179326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault);
418f2f13a85SIngo Molnar 
419e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD
420f2f13a85SIngo Molnar static const char errata93_warning[] =
421ad361c98SJoe Perches KERN_ERR
422ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
423ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
424ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
425ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
426e05139f2SJan Beulich #endif
427f2f13a85SIngo Molnar 
428f2f13a85SIngo Molnar /*
429f2f13a85SIngo Molnar  * No vm86 mode in 64-bit mode:
430f2f13a85SIngo Molnar  */
431f2f13a85SIngo Molnar static inline void
432f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
433f2f13a85SIngo Molnar 		 struct task_struct *tsk)
434f2f13a85SIngo Molnar {
435f2f13a85SIngo Molnar }
436f2f13a85SIngo Molnar 
437f2f13a85SIngo Molnar static int bad_address(void *p)
438f2f13a85SIngo Molnar {
439f2f13a85SIngo Molnar 	unsigned long dummy;
440f2f13a85SIngo Molnar 
441f2f13a85SIngo Molnar 	return probe_kernel_address((unsigned long *)p, dummy);
442f2f13a85SIngo Molnar }
443f2f13a85SIngo Molnar 
444f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
445f2f13a85SIngo Molnar {
4466c690ee1SAndy Lutomirski 	pgd_t *base = __va(read_cr3_pa());
447087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
448e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
449c61e211dSHarvey Harrison 	pud_t *pud;
450c61e211dSHarvey Harrison 	pmd_t *pmd;
451c61e211dSHarvey Harrison 	pte_t *pte;
452c61e211dSHarvey Harrison 
4532d4a7167SIngo Molnar 	if (bad_address(pgd))
4542d4a7167SIngo Molnar 		goto bad;
4552d4a7167SIngo Molnar 
45639e48d9bSJan Beulich 	pr_info("PGD %lx ", pgd_val(*pgd));
4572d4a7167SIngo Molnar 
4582d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
4592d4a7167SIngo Molnar 		goto out;
460c61e211dSHarvey Harrison 
461e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
462e0c4f675SKirill A. Shutemov 	if (bad_address(p4d))
463e0c4f675SKirill A. Shutemov 		goto bad;
464e0c4f675SKirill A. Shutemov 
46539e48d9bSJan Beulich 	pr_cont("P4D %lx ", p4d_val(*p4d));
466e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d) || p4d_large(*p4d))
467e0c4f675SKirill A. Shutemov 		goto out;
468e0c4f675SKirill A. Shutemov 
469e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
4702d4a7167SIngo Molnar 	if (bad_address(pud))
4712d4a7167SIngo Molnar 		goto bad;
4722d4a7167SIngo Molnar 
47339e48d9bSJan Beulich 	pr_cont("PUD %lx ", pud_val(*pud));
474b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
4752d4a7167SIngo Molnar 		goto out;
476c61e211dSHarvey Harrison 
477c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
4782d4a7167SIngo Molnar 	if (bad_address(pmd))
4792d4a7167SIngo Molnar 		goto bad;
4802d4a7167SIngo Molnar 
48139e48d9bSJan Beulich 	pr_cont("PMD %lx ", pmd_val(*pmd));
4822d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
4832d4a7167SIngo Molnar 		goto out;
484c61e211dSHarvey Harrison 
485c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
4862d4a7167SIngo Molnar 	if (bad_address(pte))
4872d4a7167SIngo Molnar 		goto bad;
4882d4a7167SIngo Molnar 
48939e48d9bSJan Beulich 	pr_cont("PTE %lx", pte_val(*pte));
4902d4a7167SIngo Molnar out:
49139e48d9bSJan Beulich 	pr_cont("\n");
492c61e211dSHarvey Harrison 	return;
493c61e211dSHarvey Harrison bad:
49439e48d9bSJan Beulich 	pr_info("BAD\n");
495c61e211dSHarvey Harrison }
496c61e211dSHarvey Harrison 
497f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
498c61e211dSHarvey Harrison 
4992d4a7167SIngo Molnar /*
5002d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
5012d4a7167SIngo Molnar  *
5022d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
5032d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
5042d4a7167SIngo Molnar  *
5052d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
5062d4a7167SIngo Molnar  *
5072d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
5082d4a7167SIngo Molnar  * Try to work around it here.
5092d4a7167SIngo Molnar  *
5102d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
5112d4a7167SIngo Molnar  * Does nothing on 32-bit.
512c61e211dSHarvey Harrison  */
513c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
514c61e211dSHarvey Harrison {
515e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
516e05139f2SJan Beulich 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
517e05139f2SJan Beulich 	    || boot_cpu_data.x86 != 0xf)
518e05139f2SJan Beulich 		return 0;
519e05139f2SJan Beulich 
520c61e211dSHarvey Harrison 	if (address != regs->ip)
521c61e211dSHarvey Harrison 		return 0;
5222d4a7167SIngo Molnar 
523c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
524c61e211dSHarvey Harrison 		return 0;
5252d4a7167SIngo Molnar 
526c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
527c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
528c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
529a454ab31SIngo Molnar 		printk_once(errata93_warning);
530c61e211dSHarvey Harrison 		regs->ip = address;
531c61e211dSHarvey Harrison 		return 1;
532c61e211dSHarvey Harrison 	}
533c61e211dSHarvey Harrison #endif
534c61e211dSHarvey Harrison 	return 0;
535c61e211dSHarvey Harrison }
536c61e211dSHarvey Harrison 
537c61e211dSHarvey Harrison /*
5382d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
5392d4a7167SIngo Molnar  * to illegal addresses >4GB.
5402d4a7167SIngo Molnar  *
5412d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
5422d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
543c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
544c61e211dSHarvey Harrison  */
545c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
546c61e211dSHarvey Harrison {
547c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
5482d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
549c61e211dSHarvey Harrison 		return 1;
550c61e211dSHarvey Harrison #endif
551c61e211dSHarvey Harrison 	return 0;
552c61e211dSHarvey Harrison }
553c61e211dSHarvey Harrison 
554c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
555c61e211dSHarvey Harrison {
556c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
557c61e211dSHarvey Harrison 	unsigned long nr;
5582d4a7167SIngo Molnar 
559c61e211dSHarvey Harrison 	/*
5602d4a7167SIngo Molnar 	 * Pentium F0 0F C7 C8 bug workaround:
561c61e211dSHarvey Harrison 	 */
562e2604b49SBorislav Petkov 	if (boot_cpu_has_bug(X86_BUG_F00F)) {
563c61e211dSHarvey Harrison 		nr = (address - idt_descr.address) >> 3;
564c61e211dSHarvey Harrison 
565c61e211dSHarvey Harrison 		if (nr == 6) {
566c61e211dSHarvey Harrison 			do_invalid_op(regs, 0);
567c61e211dSHarvey Harrison 			return 1;
568c61e211dSHarvey Harrison 		}
569c61e211dSHarvey Harrison 	}
570c61e211dSHarvey Harrison #endif
571c61e211dSHarvey Harrison 	return 0;
572c61e211dSHarvey Harrison }
573c61e211dSHarvey Harrison 
5742d4a7167SIngo Molnar static void
5752d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code,
576c61e211dSHarvey Harrison 		unsigned long address)
577c61e211dSHarvey Harrison {
578c61e211dSHarvey Harrison 	if (!oops_may_print())
579c61e211dSHarvey Harrison 		return;
580c61e211dSHarvey Harrison 
5811067f030SRicardo Neri 	if (error_code & X86_PF_INSTR) {
58293809be8SHarvey Harrison 		unsigned int level;
583426e34ccSMatt Fleming 		pgd_t *pgd;
584426e34ccSMatt Fleming 		pte_t *pte;
5852d4a7167SIngo Molnar 
5866c690ee1SAndy Lutomirski 		pgd = __va(read_cr3_pa());
587426e34ccSMatt Fleming 		pgd += pgd_index(address);
588426e34ccSMatt Fleming 
589426e34ccSMatt Fleming 		pte = lookup_address_in_pgd(pgd, address, &level);
590c61e211dSHarvey Harrison 
5918f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
592d79d0d8aSDmitry Vyukov 			pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
593d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
594eff50c34SJiri Kosina 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
595eff50c34SJiri Kosina 				(pgd_flags(*pgd) & _PAGE_USER) &&
5961e02ce4cSAndy Lutomirski 				(__read_cr4() & X86_CR4_SMEP))
597d79d0d8aSDmitry Vyukov 			pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
598d79d0d8aSDmitry Vyukov 				from_kuid(&init_user_ns, current_uid()));
599c61e211dSHarvey Harrison 	}
600fd40d6e3SHarvey Harrison 
6014188f063SDmitry Vyukov 	pr_alert("BUG: unable to handle kernel %s at %px\n",
6024188f063SDmitry Vyukov 		 address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
6034188f063SDmitry Vyukov 		 (void *)address);
6042d4a7167SIngo Molnar 
605c61e211dSHarvey Harrison 	dump_pagetable(address);
606c61e211dSHarvey Harrison }
607c61e211dSHarvey Harrison 
6082d4a7167SIngo Molnar static noinline void
6092d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
6102d4a7167SIngo Molnar 	    unsigned long address)
611c61e211dSHarvey Harrison {
6122d4a7167SIngo Molnar 	struct task_struct *tsk;
6132d4a7167SIngo Molnar 	unsigned long flags;
6142d4a7167SIngo Molnar 	int sig;
6152d4a7167SIngo Molnar 
6162d4a7167SIngo Molnar 	flags = oops_begin();
6172d4a7167SIngo Molnar 	tsk = current;
6182d4a7167SIngo Molnar 	sig = SIGKILL;
619c61e211dSHarvey Harrison 
620c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
62192181f19SNick Piggin 	       tsk->comm, address);
622c61e211dSHarvey Harrison 	dump_pagetable(address);
6232d4a7167SIngo Molnar 
624c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
625874d93d1SAlexander van Heukelum 		sig = 0;
6262d4a7167SIngo Molnar 
627874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
628c61e211dSHarvey Harrison }
629c61e211dSHarvey Harrison 
630e49d3cbeSAndy Lutomirski static void set_signal_archinfo(unsigned long address,
631e49d3cbeSAndy Lutomirski 				unsigned long error_code)
632e49d3cbeSAndy Lutomirski {
633e49d3cbeSAndy Lutomirski 	struct task_struct *tsk = current;
634e49d3cbeSAndy Lutomirski 
635e49d3cbeSAndy Lutomirski 	/*
636e49d3cbeSAndy Lutomirski 	 * To avoid leaking information about the kernel page
637e49d3cbeSAndy Lutomirski 	 * table layout, pretend that user-mode accesses to
638e49d3cbeSAndy Lutomirski 	 * kernel addresses are always protection faults.
639e49d3cbeSAndy Lutomirski 	 */
640e49d3cbeSAndy Lutomirski 	if (address >= TASK_SIZE_MAX)
641e49d3cbeSAndy Lutomirski 		error_code |= X86_PF_PROT;
642e49d3cbeSAndy Lutomirski 
643e49d3cbeSAndy Lutomirski 	tsk->thread.trap_nr = X86_TRAP_PF;
644e49d3cbeSAndy Lutomirski 	tsk->thread.error_code = error_code | X86_PF_USER;
645e49d3cbeSAndy Lutomirski 	tsk->thread.cr2 = address;
646e49d3cbeSAndy Lutomirski }
647e49d3cbeSAndy Lutomirski 
6482d4a7167SIngo Molnar static noinline void
6492d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code,
6504fc34901SAndy Lutomirski 	   unsigned long address, int signal, int si_code)
65192181f19SNick Piggin {
65292181f19SNick Piggin 	struct task_struct *tsk = current;
65392181f19SNick Piggin 	unsigned long flags;
65492181f19SNick Piggin 	int sig;
65592181f19SNick Piggin 
656*ebb53e25SAndy Lutomirski 	if (user_mode(regs)) {
657*ebb53e25SAndy Lutomirski 		/*
658*ebb53e25SAndy Lutomirski 		 * This is an implicit supervisor-mode access from user
659*ebb53e25SAndy Lutomirski 		 * mode.  Bypass all the kernel-mode recovery code and just
660*ebb53e25SAndy Lutomirski 		 * OOPS.
661*ebb53e25SAndy Lutomirski 		 */
662*ebb53e25SAndy Lutomirski 		goto oops;
663*ebb53e25SAndy Lutomirski 	}
664*ebb53e25SAndy Lutomirski 
66592181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
66681fd9c18SJann Horn 	if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
667c026b359SPeter Zijlstra 		/*
668c026b359SPeter Zijlstra 		 * Any interrupt that takes a fault gets the fixup. This makes
669c026b359SPeter Zijlstra 		 * the below recursive fault logic only apply to a faults from
670c026b359SPeter Zijlstra 		 * task context.
671c026b359SPeter Zijlstra 		 */
672c026b359SPeter Zijlstra 		if (in_interrupt())
673c026b359SPeter Zijlstra 			return;
674c026b359SPeter Zijlstra 
675c026b359SPeter Zijlstra 		/*
676c026b359SPeter Zijlstra 		 * Per the above we're !in_interrupt(), aka. task context.
677c026b359SPeter Zijlstra 		 *
678c026b359SPeter Zijlstra 		 * In this case we need to make sure we're not recursively
679c026b359SPeter Zijlstra 		 * faulting through the emulate_vsyscall() logic.
680c026b359SPeter Zijlstra 		 */
6812a53ccbcSIngo Molnar 		if (current->thread.sig_on_uaccess_err && signal) {
682e49d3cbeSAndy Lutomirski 			set_signal_archinfo(address, error_code);
6834fc34901SAndy Lutomirski 
6844fc34901SAndy Lutomirski 			/* XXX: hwpoison faults will set the wrong code. */
685b4fd52f2SEric W. Biederman 			force_sig_fault(signal, si_code, (void __user *)address,
686b4fd52f2SEric W. Biederman 					tsk);
6874fc34901SAndy Lutomirski 		}
688c026b359SPeter Zijlstra 
689c026b359SPeter Zijlstra 		/*
690c026b359SPeter Zijlstra 		 * Barring that, we can do the fixup and be happy.
691c026b359SPeter Zijlstra 		 */
69292181f19SNick Piggin 		return;
6934fc34901SAndy Lutomirski 	}
69492181f19SNick Piggin 
6956271cfdfSAndy Lutomirski #ifdef CONFIG_VMAP_STACK
6966271cfdfSAndy Lutomirski 	/*
6976271cfdfSAndy Lutomirski 	 * Stack overflow?  During boot, we can fault near the initial
6986271cfdfSAndy Lutomirski 	 * stack in the direct map, but that's not an overflow -- check
6996271cfdfSAndy Lutomirski 	 * that we're in vmalloc space to avoid this.
7006271cfdfSAndy Lutomirski 	 */
7016271cfdfSAndy Lutomirski 	if (is_vmalloc_addr((void *)address) &&
7026271cfdfSAndy Lutomirski 	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
7036271cfdfSAndy Lutomirski 	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
7046271cfdfSAndy Lutomirski 		unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
7056271cfdfSAndy Lutomirski 		/*
7066271cfdfSAndy Lutomirski 		 * We're likely to be running with very little stack space
7076271cfdfSAndy Lutomirski 		 * left.  It's plausible that we'd hit this condition but
7086271cfdfSAndy Lutomirski 		 * double-fault even before we get this far, in which case
7096271cfdfSAndy Lutomirski 		 * we're fine: the double-fault handler will deal with it.
7106271cfdfSAndy Lutomirski 		 *
7116271cfdfSAndy Lutomirski 		 * We don't want to make it all the way into the oops code
7126271cfdfSAndy Lutomirski 		 * and then double-fault, though, because we're likely to
7136271cfdfSAndy Lutomirski 		 * break the console driver and lose most of the stack dump.
7146271cfdfSAndy Lutomirski 		 */
7156271cfdfSAndy Lutomirski 		asm volatile ("movq %[stack], %%rsp\n\t"
7166271cfdfSAndy Lutomirski 			      "call handle_stack_overflow\n\t"
7176271cfdfSAndy Lutomirski 			      "1: jmp 1b"
718f5caf621SJosh Poimboeuf 			      : ASM_CALL_CONSTRAINT
7196271cfdfSAndy Lutomirski 			      : "D" ("kernel stack overflow (page fault)"),
7206271cfdfSAndy Lutomirski 				"S" (regs), "d" (address),
7216271cfdfSAndy Lutomirski 				[stack] "rm" (stack));
7226271cfdfSAndy Lutomirski 		unreachable();
7236271cfdfSAndy Lutomirski 	}
7246271cfdfSAndy Lutomirski #endif
7256271cfdfSAndy Lutomirski 
72692181f19SNick Piggin 	/*
7272d4a7167SIngo Molnar 	 * 32-bit:
7282d4a7167SIngo Molnar 	 *
72992181f19SNick Piggin 	 *   Valid to do another page fault here, because if this fault
73092181f19SNick Piggin 	 *   had been triggered by is_prefetch fixup_exception would have
73192181f19SNick Piggin 	 *   handled it.
73292181f19SNick Piggin 	 *
7332d4a7167SIngo Molnar 	 * 64-bit:
7342d4a7167SIngo Molnar 	 *
73592181f19SNick Piggin 	 *   Hall of shame of CPU/BIOS bugs.
73692181f19SNick Piggin 	 */
73792181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
73892181f19SNick Piggin 		return;
73992181f19SNick Piggin 
74092181f19SNick Piggin 	if (is_errata93(regs, address))
74192181f19SNick Piggin 		return;
74292181f19SNick Piggin 
74392181f19SNick Piggin 	/*
7443425d934SSai Praneeth 	 * Buggy firmware could access regions which might page fault, try to
7453425d934SSai Praneeth 	 * recover from such faults.
7463425d934SSai Praneeth 	 */
7473425d934SSai Praneeth 	if (IS_ENABLED(CONFIG_EFI))
7483425d934SSai Praneeth 		efi_recover_from_page_fault(address);
7493425d934SSai Praneeth 
750*ebb53e25SAndy Lutomirski oops:
7513425d934SSai Praneeth 	/*
75292181f19SNick Piggin 	 * Oops. The kernel tried to access some bad page. We'll have to
7532d4a7167SIngo Molnar 	 * terminate things with extreme prejudice:
75492181f19SNick Piggin 	 */
75592181f19SNick Piggin 	flags = oops_begin();
75692181f19SNick Piggin 
75792181f19SNick Piggin 	show_fault_oops(regs, error_code, address);
75892181f19SNick Piggin 
759a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(tsk))
760b0f4c4b3SPrarit Bhargava 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
76119803078SIngo Molnar 
76292181f19SNick Piggin 	sig = SIGKILL;
76392181f19SNick Piggin 	if (__die("Oops", regs, error_code))
76492181f19SNick Piggin 		sig = 0;
7652d4a7167SIngo Molnar 
76692181f19SNick Piggin 	/* Executive summary in case the body of the oops scrolled away */
767b0f4c4b3SPrarit Bhargava 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
7682d4a7167SIngo Molnar 
76992181f19SNick Piggin 	oops_end(flags, regs, sig);
77092181f19SNick Piggin }
77192181f19SNick Piggin 
7722d4a7167SIngo Molnar /*
7732d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
7742d4a7167SIngo Molnar  * sysctl is set:
7752d4a7167SIngo Molnar  */
7762d4a7167SIngo Molnar static inline void
7772d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
7782d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
7792d4a7167SIngo Molnar {
780ba54d856SBorislav Petkov 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
781ba54d856SBorislav Petkov 
7822d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
7832d4a7167SIngo Molnar 		return;
7842d4a7167SIngo Molnar 
7852d4a7167SIngo Molnar 	if (!printk_ratelimit())
7862d4a7167SIngo Molnar 		return;
7872d4a7167SIngo Molnar 
78810a7e9d8SKees Cook 	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
789ba54d856SBorislav Petkov 		loglvl, tsk->comm, task_pid_nr(tsk), address,
7902d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
7912d4a7167SIngo Molnar 
7922d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
7932d4a7167SIngo Molnar 
7942d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
795ba54d856SBorislav Petkov 
796342db04aSJann Horn 	show_opcodes(regs, loglvl);
7972d4a7167SIngo Molnar }
7982d4a7167SIngo Molnar 
79902e983b7SDave Hansen /*
80002e983b7SDave Hansen  * The (legacy) vsyscall page is the long page in the kernel portion
80102e983b7SDave Hansen  * of the address space that has user-accessible permissions.
80202e983b7SDave Hansen  */
80302e983b7SDave Hansen static bool is_vsyscall_vaddr(unsigned long vaddr)
80402e983b7SDave Hansen {
8053ae0ad92SDave Hansen 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
80602e983b7SDave Hansen }
80702e983b7SDave Hansen 
8082d4a7167SIngo Molnar static void
8092d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
810419ceeb1SEric W. Biederman 		       unsigned long address, u32 pkey, int si_code)
81192181f19SNick Piggin {
81292181f19SNick Piggin 	struct task_struct *tsk = current;
81392181f19SNick Piggin 
81492181f19SNick Piggin 	/* User mode accesses just cause a SIGSEGV */
8156ea59b07SAndy Lutomirski 	if (user_mode(regs) && (error_code & X86_PF_USER)) {
81692181f19SNick Piggin 		/*
8172d4a7167SIngo Molnar 		 * It's possible to have interrupts off here:
81892181f19SNick Piggin 		 */
81992181f19SNick Piggin 		local_irq_enable();
82092181f19SNick Piggin 
82192181f19SNick Piggin 		/*
82292181f19SNick Piggin 		 * Valid to do another page fault here because this one came
8232d4a7167SIngo Molnar 		 * from user space:
82492181f19SNick Piggin 		 */
82592181f19SNick Piggin 		if (is_prefetch(regs, error_code, address))
82692181f19SNick Piggin 			return;
82792181f19SNick Piggin 
82892181f19SNick Piggin 		if (is_errata100(regs, address))
82992181f19SNick Piggin 			return;
83092181f19SNick Piggin 
831dc4fac84SAndy Lutomirski 		/*
832dc4fac84SAndy Lutomirski 		 * To avoid leaking information about the kernel page table
833dc4fac84SAndy Lutomirski 		 * layout, pretend that user-mode accesses to kernel addresses
834dc4fac84SAndy Lutomirski 		 * are always protection faults.
835dc4fac84SAndy Lutomirski 		 */
836dc4fac84SAndy Lutomirski 		if (address >= TASK_SIZE_MAX)
8371067f030SRicardo Neri 			error_code |= X86_PF_PROT;
8383ae36655SAndy Lutomirski 
839e575a86fSKees Cook 		if (likely(show_unhandled_signals))
8402d4a7167SIngo Molnar 			show_signal_msg(regs, error_code, address, tsk);
84192181f19SNick Piggin 
842e49d3cbeSAndy Lutomirski 		set_signal_archinfo(address, error_code);
8432d4a7167SIngo Molnar 
8449db812dbSEric W. Biederman 		if (si_code == SEGV_PKUERR)
845419ceeb1SEric W. Biederman 			force_sig_pkuerr((void __user *)address, pkey);
8469db812dbSEric W. Biederman 
847b4fd52f2SEric W. Biederman 		force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
8482d4a7167SIngo Molnar 
84992181f19SNick Piggin 		return;
85092181f19SNick Piggin 	}
85192181f19SNick Piggin 
85292181f19SNick Piggin 	if (is_f00f_bug(regs, address))
85392181f19SNick Piggin 		return;
85492181f19SNick Piggin 
8554fc34901SAndy Lutomirski 	no_context(regs, error_code, address, SIGSEGV, si_code);
85692181f19SNick Piggin }
85792181f19SNick Piggin 
8582d4a7167SIngo Molnar static noinline void
8592d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
860768fd9c6SEric W. Biederman 		     unsigned long address)
86192181f19SNick Piggin {
862419ceeb1SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR);
86392181f19SNick Piggin }
86492181f19SNick Piggin 
8652d4a7167SIngo Molnar static void
8662d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
867419ceeb1SEric W. Biederman 	   unsigned long address, u32 pkey, int si_code)
86892181f19SNick Piggin {
86992181f19SNick Piggin 	struct mm_struct *mm = current->mm;
87092181f19SNick Piggin 	/*
87192181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
87292181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
87392181f19SNick Piggin 	 */
87492181f19SNick Piggin 	up_read(&mm->mmap_sem);
87592181f19SNick Piggin 
876aba1ecd3SEric W. Biederman 	__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
87792181f19SNick Piggin }
87892181f19SNick Piggin 
8792d4a7167SIngo Molnar static noinline void
8802d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
88192181f19SNick Piggin {
882419ceeb1SEric W. Biederman 	__bad_area(regs, error_code, address, 0, SEGV_MAPERR);
88392181f19SNick Piggin }
88492181f19SNick Piggin 
88533a709b2SDave Hansen static inline bool bad_area_access_from_pkeys(unsigned long error_code,
88633a709b2SDave Hansen 		struct vm_area_struct *vma)
88733a709b2SDave Hansen {
88807f146f5SDave Hansen 	/* This code is always called on the current mm */
88907f146f5SDave Hansen 	bool foreign = false;
89007f146f5SDave Hansen 
89133a709b2SDave Hansen 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
89233a709b2SDave Hansen 		return false;
8931067f030SRicardo Neri 	if (error_code & X86_PF_PK)
89433a709b2SDave Hansen 		return true;
89507f146f5SDave Hansen 	/* this checks permission keys on the VMA: */
8961067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
8971067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
89807f146f5SDave Hansen 		return true;
89933a709b2SDave Hansen 	return false;
90092181f19SNick Piggin }
90192181f19SNick Piggin 
9022d4a7167SIngo Molnar static noinline void
9032d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
9047b2d0dbaSDave Hansen 		      unsigned long address, struct vm_area_struct *vma)
90592181f19SNick Piggin {
906019132ffSDave Hansen 	/*
907019132ffSDave Hansen 	 * This OSPKE check is not strictly necessary at runtime.
908019132ffSDave Hansen 	 * But, doing it this way allows compiler optimizations
909019132ffSDave Hansen 	 * if pkeys are compiled out.
910019132ffSDave Hansen 	 */
911aba1ecd3SEric W. Biederman 	if (bad_area_access_from_pkeys(error_code, vma)) {
9129db812dbSEric W. Biederman 		/*
9139db812dbSEric W. Biederman 		 * A protection key fault means that the PKRU value did not allow
9149db812dbSEric W. Biederman 		 * access to some PTE.  Userspace can figure out what PKRU was
9159db812dbSEric W. Biederman 		 * from the XSAVE state.  This function captures the pkey from
9169db812dbSEric W. Biederman 		 * the vma and passes it to userspace so userspace can discover
9179db812dbSEric W. Biederman 		 * which protection key was set on the PTE.
9189db812dbSEric W. Biederman 		 *
9199db812dbSEric W. Biederman 		 * If we get here, we know that the hardware signaled a X86_PF_PK
9209db812dbSEric W. Biederman 		 * fault and that there was a VMA once we got in the fault
9219db812dbSEric W. Biederman 		 * handler.  It does *not* guarantee that the VMA we find here
9229db812dbSEric W. Biederman 		 * was the one that we faulted on.
9239db812dbSEric W. Biederman 		 *
9249db812dbSEric W. Biederman 		 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
9259db812dbSEric W. Biederman 		 * 2. T1   : set PKRU to deny access to pkey=4, touches page
9269db812dbSEric W. Biederman 		 * 3. T1   : faults...
9279db812dbSEric W. Biederman 		 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
9289db812dbSEric W. Biederman 		 * 5. T1   : enters fault handler, takes mmap_sem, etc...
9299db812dbSEric W. Biederman 		 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
9309db812dbSEric W. Biederman 		 *	     faulted on a pte with its pkey=4.
9319db812dbSEric W. Biederman 		 */
932aba1ecd3SEric W. Biederman 		u32 pkey = vma_pkey(vma);
9339db812dbSEric W. Biederman 
934419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, pkey, SEGV_PKUERR);
935aba1ecd3SEric W. Biederman 	} else {
936419ceeb1SEric W. Biederman 		__bad_area(regs, error_code, address, 0, SEGV_ACCERR);
937aba1ecd3SEric W. Biederman 	}
93892181f19SNick Piggin }
93992181f19SNick Piggin 
9402d4a7167SIngo Molnar static void
941a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
94227274f73SEric W. Biederman 	  unsigned int fault)
94392181f19SNick Piggin {
94492181f19SNick Piggin 	struct task_struct *tsk = current;
94592181f19SNick Piggin 
9462d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
9471067f030SRicardo Neri 	if (!(error_code & X86_PF_USER)) {
9484fc34901SAndy Lutomirski 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
94996054569SLinus Torvalds 		return;
95096054569SLinus Torvalds 	}
9512d4a7167SIngo Molnar 
952cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
95392181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
95492181f19SNick Piggin 		return;
9552d4a7167SIngo Molnar 
956e49d3cbeSAndy Lutomirski 	set_signal_archinfo(address, error_code);
9572d4a7167SIngo Molnar 
958a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
959f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
96040e55394SEric W. Biederman 		unsigned lsb = 0;
96140e55394SEric W. Biederman 
96240e55394SEric W. Biederman 		pr_err(
963a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
964a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
96540e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON_LARGE)
96640e55394SEric W. Biederman 			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
96740e55394SEric W. Biederman 		if (fault & VM_FAULT_HWPOISON)
96840e55394SEric W. Biederman 			lsb = PAGE_SHIFT;
96940e55394SEric W. Biederman 		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, tsk);
97040e55394SEric W. Biederman 		return;
971a6e04aa9SAndi Kleen 	}
972a6e04aa9SAndi Kleen #endif
973b4fd52f2SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
97492181f19SNick Piggin }
97592181f19SNick Piggin 
9763a13c4d7SJohannes Weiner static noinline void
9772d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code,
97825c102d8SEric W. Biederman 	       unsigned long address, vm_fault_t fault)
97992181f19SNick Piggin {
9801067f030SRicardo Neri 	if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
9814fc34901SAndy Lutomirski 		no_context(regs, error_code, address, 0, 0);
9823a13c4d7SJohannes Weiner 		return;
983b80ef10eSKOSAKI Motohiro 	}
984b80ef10eSKOSAKI Motohiro 
9852d4a7167SIngo Molnar 	if (fault & VM_FAULT_OOM) {
986f8626854SAndrey Vagin 		/* Kernel mode? Handle exceptions or die: */
9871067f030SRicardo Neri 		if (!(error_code & X86_PF_USER)) {
9884fc34901SAndy Lutomirski 			no_context(regs, error_code, address,
9894fc34901SAndy Lutomirski 				   SIGSEGV, SEGV_MAPERR);
9903a13c4d7SJohannes Weiner 			return;
991f8626854SAndrey Vagin 		}
992f8626854SAndrey Vagin 
993c2d23f91SDavid Rientjes 		/*
994c2d23f91SDavid Rientjes 		 * We ran out of memory, call the OOM killer, and return the
995c2d23f91SDavid Rientjes 		 * userspace (which will retry the fault, or kill us if we got
996c2d23f91SDavid Rientjes 		 * oom-killed):
997c2d23f91SDavid Rientjes 		 */
998c2d23f91SDavid Rientjes 		pagefault_out_of_memory();
9992d4a7167SIngo Molnar 	} else {
1000f672b49bSAndi Kleen 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1001f672b49bSAndi Kleen 			     VM_FAULT_HWPOISON_LARGE))
100227274f73SEric W. Biederman 			do_sigbus(regs, error_code, address, fault);
100333692f27SLinus Torvalds 		else if (fault & VM_FAULT_SIGSEGV)
1004768fd9c6SEric W. Biederman 			bad_area_nosemaphore(regs, error_code, address);
100592181f19SNick Piggin 		else
100692181f19SNick Piggin 			BUG();
100792181f19SNick Piggin 	}
10082d4a7167SIngo Molnar }
100992181f19SNick Piggin 
10108fed6200SDave Hansen static int spurious_kernel_fault_check(unsigned long error_code, pte_t *pte)
1011d8b57bb7SThomas Gleixner {
10121067f030SRicardo Neri 	if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
1013d8b57bb7SThomas Gleixner 		return 0;
10142d4a7167SIngo Molnar 
10151067f030SRicardo Neri 	if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
1016d8b57bb7SThomas Gleixner 		return 0;
1017d8b57bb7SThomas Gleixner 
1018d8b57bb7SThomas Gleixner 	return 1;
1019d8b57bb7SThomas Gleixner }
1020d8b57bb7SThomas Gleixner 
1021c61e211dSHarvey Harrison /*
10222d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
10232d4a7167SIngo Molnar  *
10242d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
10252d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
10262d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
10272d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
10282d4a7167SIngo Molnar  * on other processors.
10292d4a7167SIngo Molnar  *
103031668511SDavid Vrabel  * Spurious faults may only occur if the TLB contains an entry with
103131668511SDavid Vrabel  * fewer permission than the page table entry.  Non-present (P = 0)
103231668511SDavid Vrabel  * and reserved bit (R = 1) faults are never spurious.
103331668511SDavid Vrabel  *
10345b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
10355b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
103631668511SDavid Vrabel  *
103731668511SDavid Vrabel  * Returns non-zero if a spurious fault was handled, zero otherwise.
103831668511SDavid Vrabel  *
103931668511SDavid Vrabel  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
104031668511SDavid Vrabel  * (Optional Invalidation).
10415b727a3bSJeremy Fitzhardinge  */
10429326638cSMasami Hiramatsu static noinline int
10438fed6200SDave Hansen spurious_kernel_fault(unsigned long error_code, unsigned long address)
10445b727a3bSJeremy Fitzhardinge {
10455b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
1046e0c4f675SKirill A. Shutemov 	p4d_t *p4d;
10475b727a3bSJeremy Fitzhardinge 	pud_t *pud;
10485b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
10495b727a3bSJeremy Fitzhardinge 	pte_t *pte;
10503c3e5694SSteven Rostedt 	int ret;
10515b727a3bSJeremy Fitzhardinge 
105231668511SDavid Vrabel 	/*
105331668511SDavid Vrabel 	 * Only writes to RO or instruction fetches from NX may cause
105431668511SDavid Vrabel 	 * spurious faults.
105531668511SDavid Vrabel 	 *
105631668511SDavid Vrabel 	 * These could be from user or supervisor accesses but the TLB
105731668511SDavid Vrabel 	 * is only lazily flushed after a kernel mapping protection
105831668511SDavid Vrabel 	 * change, so user accesses are not expected to cause spurious
105931668511SDavid Vrabel 	 * faults.
106031668511SDavid Vrabel 	 */
10611067f030SRicardo Neri 	if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
10621067f030SRicardo Neri 	    error_code != (X86_PF_INSTR | X86_PF_PROT))
10635b727a3bSJeremy Fitzhardinge 		return 0;
10645b727a3bSJeremy Fitzhardinge 
10655b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
10665b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
10675b727a3bSJeremy Fitzhardinge 		return 0;
10685b727a3bSJeremy Fitzhardinge 
1069e0c4f675SKirill A. Shutemov 	p4d = p4d_offset(pgd, address);
1070e0c4f675SKirill A. Shutemov 	if (!p4d_present(*p4d))
1071e0c4f675SKirill A. Shutemov 		return 0;
1072e0c4f675SKirill A. Shutemov 
1073e0c4f675SKirill A. Shutemov 	if (p4d_large(*p4d))
10748fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
1075e0c4f675SKirill A. Shutemov 
1076e0c4f675SKirill A. Shutemov 	pud = pud_offset(p4d, address);
10775b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
10785b727a3bSJeremy Fitzhardinge 		return 0;
10795b727a3bSJeremy Fitzhardinge 
1080d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
10818fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
1082d8b57bb7SThomas Gleixner 
10835b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
10845b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
10855b727a3bSJeremy Fitzhardinge 		return 0;
10865b727a3bSJeremy Fitzhardinge 
1087d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
10888fed6200SDave Hansen 		return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
1089d8b57bb7SThomas Gleixner 
10905b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
1091954f8571SAndrea Arcangeli 	if (!pte_present(*pte))
10925b727a3bSJeremy Fitzhardinge 		return 0;
10935b727a3bSJeremy Fitzhardinge 
10948fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, pte);
10953c3e5694SSteven Rostedt 	if (!ret)
10963c3e5694SSteven Rostedt 		return 0;
10973c3e5694SSteven Rostedt 
10983c3e5694SSteven Rostedt 	/*
10992d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
11002d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
11013c3e5694SSteven Rostedt 	 */
11028fed6200SDave Hansen 	ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd);
11033c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
11042d4a7167SIngo Molnar 
11053c3e5694SSteven Rostedt 	return ret;
11065b727a3bSJeremy Fitzhardinge }
11078fed6200SDave Hansen NOKPROBE_SYMBOL(spurious_kernel_fault);
11085b727a3bSJeremy Fitzhardinge 
1109c61e211dSHarvey Harrison int show_unhandled_signals = 1;
1110c61e211dSHarvey Harrison 
11112d4a7167SIngo Molnar static inline int
111268da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
111392181f19SNick Piggin {
111407f146f5SDave Hansen 	/* This is only called for the current mm, so: */
111507f146f5SDave Hansen 	bool foreign = false;
1116e8c6226dSDave Hansen 
1117e8c6226dSDave Hansen 	/*
1118e8c6226dSDave Hansen 	 * Read or write was blocked by protection keys.  This is
1119e8c6226dSDave Hansen 	 * always an unconditional error and can never result in
1120e8c6226dSDave Hansen 	 * a follow-up action to resolve the fault, like a COW.
1121e8c6226dSDave Hansen 	 */
11221067f030SRicardo Neri 	if (error_code & X86_PF_PK)
1123e8c6226dSDave Hansen 		return 1;
1124e8c6226dSDave Hansen 
112533a709b2SDave Hansen 	/*
112607f146f5SDave Hansen 	 * Make sure to check the VMA so that we do not perform
11271067f030SRicardo Neri 	 * faults just to hit a X86_PF_PK as soon as we fill in a
112807f146f5SDave Hansen 	 * page.
112907f146f5SDave Hansen 	 */
11301067f030SRicardo Neri 	if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
11311067f030SRicardo Neri 				       (error_code & X86_PF_INSTR), foreign))
113207f146f5SDave Hansen 		return 1;
113333a709b2SDave Hansen 
11341067f030SRicardo Neri 	if (error_code & X86_PF_WRITE) {
11352d4a7167SIngo Molnar 		/* write, present and write, not present: */
113692181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
113792181f19SNick Piggin 			return 1;
11382d4a7167SIngo Molnar 		return 0;
11392d4a7167SIngo Molnar 	}
11402d4a7167SIngo Molnar 
11412d4a7167SIngo Molnar 	/* read, present: */
11421067f030SRicardo Neri 	if (unlikely(error_code & X86_PF_PROT))
114392181f19SNick Piggin 		return 1;
11442d4a7167SIngo Molnar 
11452d4a7167SIngo Molnar 	/* read, not present: */
114692181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
114792181f19SNick Piggin 		return 1;
114892181f19SNick Piggin 
114992181f19SNick Piggin 	return 0;
115092181f19SNick Piggin }
115192181f19SNick Piggin 
11520973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address)
11530973a06cSHiroshi Shimamoto {
11543ae0ad92SDave Hansen 	/*
11553ae0ad92SDave Hansen 	 * On 64-bit systems, the vsyscall page is at an address above
11563ae0ad92SDave Hansen 	 * TASK_SIZE_MAX, but is not considered part of the kernel
11573ae0ad92SDave Hansen 	 * address space.
11583ae0ad92SDave Hansen 	 */
11593ae0ad92SDave Hansen 	if (IS_ENABLED(CONFIG_X86_64) && is_vsyscall_vaddr(address))
11603ae0ad92SDave Hansen 		return false;
11613ae0ad92SDave Hansen 
1162d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
11630973a06cSHiroshi Shimamoto }
11640973a06cSHiroshi Shimamoto 
1165c61e211dSHarvey Harrison /*
11668fed6200SDave Hansen  * Called for all faults where 'address' is part of the kernel address
11678fed6200SDave Hansen  * space.  Might get called for faults that originate from *code* that
11688fed6200SDave Hansen  * ran in userspace or the kernel.
1169c61e211dSHarvey Harrison  */
11708fed6200SDave Hansen static void
11718fed6200SDave Hansen do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
11720ac09f9fSJiri Olsa 		   unsigned long address)
1173c61e211dSHarvey Harrison {
11748fed6200SDave Hansen 	/*
1175367e3f1dSDave Hansen 	 * Protection keys exceptions only happen on user pages.  We
1176367e3f1dSDave Hansen 	 * have no user pages in the kernel portion of the address
1177367e3f1dSDave Hansen 	 * space, so do not expect them here.
1178367e3f1dSDave Hansen 	 */
1179367e3f1dSDave Hansen 	WARN_ON_ONCE(hw_error_code & X86_PF_PK);
1180367e3f1dSDave Hansen 
1181367e3f1dSDave Hansen 	/*
11828fed6200SDave Hansen 	 * We can fault-in kernel-space virtual memory on-demand. The
11838fed6200SDave Hansen 	 * 'reference' page table is init_mm.pgd.
11848fed6200SDave Hansen 	 *
11858fed6200SDave Hansen 	 * NOTE! We MUST NOT take any locks for this case. We may
11868fed6200SDave Hansen 	 * be in an interrupt or a critical region, and should
11878fed6200SDave Hansen 	 * only copy the information from the master page table,
11888fed6200SDave Hansen 	 * nothing more.
11898fed6200SDave Hansen 	 *
11908fed6200SDave Hansen 	 * Before doing this on-demand faulting, ensure that the
11918fed6200SDave Hansen 	 * fault is not any of the following:
11928fed6200SDave Hansen 	 * 1. A fault on a PTE with a reserved bit set.
11938fed6200SDave Hansen 	 * 2. A fault caused by a user-mode access.  (Do not demand-
11948fed6200SDave Hansen 	 *    fault kernel memory due to user-mode accesses).
11958fed6200SDave Hansen 	 * 3. A fault caused by a page-level protection violation.
11968fed6200SDave Hansen 	 *    (A demand fault would be on a non-present page which
11978fed6200SDave Hansen 	 *     would have X86_PF_PROT==0).
11988fed6200SDave Hansen 	 */
11998fed6200SDave Hansen 	if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
12008fed6200SDave Hansen 		if (vmalloc_fault(address) >= 0)
12018fed6200SDave Hansen 			return;
12028fed6200SDave Hansen 	}
12038fed6200SDave Hansen 
12048fed6200SDave Hansen 	/* Was the fault spurious, caused by lazy TLB invalidation? */
12058fed6200SDave Hansen 	if (spurious_kernel_fault(hw_error_code, address))
12068fed6200SDave Hansen 		return;
12078fed6200SDave Hansen 
12088fed6200SDave Hansen 	/* kprobes don't want to hook the spurious faults: */
12098fed6200SDave Hansen 	if (kprobes_fault(regs))
12108fed6200SDave Hansen 		return;
12118fed6200SDave Hansen 
12128fed6200SDave Hansen 	/*
12138fed6200SDave Hansen 	 * Note, despite being a "bad area", there are quite a few
12148fed6200SDave Hansen 	 * acceptable reasons to get here, such as erratum fixups
12158fed6200SDave Hansen 	 * and handling kernel code that can fault, like get_user().
12168fed6200SDave Hansen 	 *
12178fed6200SDave Hansen 	 * Don't take the mm semaphore here. If we fixup a prefetch
12188fed6200SDave Hansen 	 * fault we could otherwise deadlock:
12198fed6200SDave Hansen 	 */
1220ba9f6f89SLinus Torvalds 	bad_area_nosemaphore(regs, hw_error_code, address);
12218fed6200SDave Hansen }
12228fed6200SDave Hansen NOKPROBE_SYMBOL(do_kern_addr_fault);
12238fed6200SDave Hansen 
1224aa37c51bSDave Hansen /* Handle faults in the user portion of the address space */
1225aa37c51bSDave Hansen static inline
1226aa37c51bSDave Hansen void do_user_addr_fault(struct pt_regs *regs,
1227aa37c51bSDave Hansen 			unsigned long hw_error_code,
1228c61e211dSHarvey Harrison 			unsigned long address)
1229c61e211dSHarvey Harrison {
1230c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
1231c61e211dSHarvey Harrison 	struct task_struct *tsk;
12322d4a7167SIngo Molnar 	struct mm_struct *mm;
123350a7ca3cSSouptick Joarder 	vm_fault_t fault, major = 0;
1234759496baSJohannes Weiner 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1235c61e211dSHarvey Harrison 
1236c61e211dSHarvey Harrison 	tsk = current;
1237c61e211dSHarvey Harrison 	mm = tsk->mm;
12382d4a7167SIngo Molnar 
12392d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
1240e00b12e6SPeter Zijlstra 	if (unlikely(kprobes_fault(regs)))
12419be260a6SMasami Hiramatsu 		return;
1242e00b12e6SPeter Zijlstra 
12435b0c2cacSDave Hansen 	/*
12445b0c2cacSDave Hansen 	 * Reserved bits are never expected to be set on
12455b0c2cacSDave Hansen 	 * entries in the user portion of the page tables.
12465b0c2cacSDave Hansen 	 */
1247164477c2SDave Hansen 	if (unlikely(hw_error_code & X86_PF_RSVD))
1248164477c2SDave Hansen 		pgtable_bad(regs, hw_error_code, address);
1249e00b12e6SPeter Zijlstra 
12505b0c2cacSDave Hansen 	/*
1251e50928d7SAndy Lutomirski 	 * If SMAP is on, check for invalid kernel (supervisor) access to user
1252e50928d7SAndy Lutomirski 	 * pages in the user address space.  The odd case here is WRUSS,
1253e50928d7SAndy Lutomirski 	 * which, according to the preliminary documentation, does not respect
1254e50928d7SAndy Lutomirski 	 * SMAP and will have the USER bit set so, in all cases, SMAP
1255e50928d7SAndy Lutomirski 	 * enforcement appears to be consistent with the USER bit.
12565b0c2cacSDave Hansen 	 */
1257a15781b5SAndy Lutomirski 	if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
1258a15781b5SAndy Lutomirski 		     !(hw_error_code & X86_PF_USER) &&
1259e50928d7SAndy Lutomirski 		     !(regs->flags & X86_EFLAGS_AC)))
1260a15781b5SAndy Lutomirski 	{
1261ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1262e00b12e6SPeter Zijlstra 		return;
1263e00b12e6SPeter Zijlstra 	}
1264e00b12e6SPeter Zijlstra 
1265e00b12e6SPeter Zijlstra 	/*
1266e00b12e6SPeter Zijlstra 	 * If we're in an interrupt, have no user context or are running
126770ffdb93SDavid Hildenbrand 	 * in a region with pagefaults disabled then we must not take the fault
1268e00b12e6SPeter Zijlstra 	 */
126970ffdb93SDavid Hildenbrand 	if (unlikely(faulthandler_disabled() || !mm)) {
1270ba9f6f89SLinus Torvalds 		bad_area_nosemaphore(regs, hw_error_code, address);
1271e00b12e6SPeter Zijlstra 		return;
1272e00b12e6SPeter Zijlstra 	}
1273e00b12e6SPeter Zijlstra 
1274c61e211dSHarvey Harrison 	/*
1275891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1276891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1277891cffbdSLinus Torvalds 	 *
1278891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
12792d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1280c61e211dSHarvey Harrison 	 */
1281f39b6f0eSAndy Lutomirski 	if (user_mode(regs)) {
1282891cffbdSLinus Torvalds 		local_irq_enable();
1283759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
12842d4a7167SIngo Molnar 	} else {
12852d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1286c61e211dSHarvey Harrison 			local_irq_enable();
12872d4a7167SIngo Molnar 	}
1288c61e211dSHarvey Harrison 
1289a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
12907dd1fcc2SPeter Zijlstra 
12910ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_WRITE)
1292759496baSJohannes Weiner 		flags |= FAULT_FLAG_WRITE;
12930ed32f1aSAndy Lutomirski 	if (hw_error_code & X86_PF_INSTR)
1294d61172b4SDave Hansen 		flags |= FAULT_FLAG_INSTRUCTION;
1295759496baSJohannes Weiner 
12963ae0ad92SDave Hansen #ifdef CONFIG_X86_64
12973a1dfe6eSIngo Molnar 	/*
12983ae0ad92SDave Hansen 	 * Instruction fetch faults in the vsyscall page might need
12993ae0ad92SDave Hansen 	 * emulation.  The vsyscall page is at a high address
13003ae0ad92SDave Hansen 	 * (>PAGE_OFFSET), but is considered to be part of the user
13013ae0ad92SDave Hansen 	 * address space.
1302c61e211dSHarvey Harrison 	 *
13033ae0ad92SDave Hansen 	 * The vsyscall page does not have a "real" VMA, so do this
13043ae0ad92SDave Hansen 	 * emulation before we go searching for VMAs.
13053ae0ad92SDave Hansen 	 */
13060ed32f1aSAndy Lutomirski 	if ((hw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) {
13073ae0ad92SDave Hansen 		if (emulate_vsyscall(regs, address))
13083ae0ad92SDave Hansen 			return;
13093ae0ad92SDave Hansen 	}
13103ae0ad92SDave Hansen #endif
13113ae0ad92SDave Hansen 
1312c61e211dSHarvey Harrison 	/*
131388259744SDave Hansen 	 * Kernel-mode access to the user address space should only occur
131488259744SDave Hansen 	 * on well-defined single instructions listed in the exception
131588259744SDave Hansen 	 * tables.  But, an erroneous kernel fault occurring outside one of
131688259744SDave Hansen 	 * those areas which also holds mmap_sem might deadlock attempting
131788259744SDave Hansen 	 * to validate the fault against the address space.
1318c61e211dSHarvey Harrison 	 *
131988259744SDave Hansen 	 * Only do the expensive exception table search when we might be at
132088259744SDave Hansen 	 * risk of a deadlock.  This happens if we
132188259744SDave Hansen 	 * 1. Failed to acquire mmap_sem, and
13226344be60SAndy Lutomirski 	 * 2. The access did not originate in userspace.
1323c61e211dSHarvey Harrison 	 */
132492181f19SNick Piggin 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
13256344be60SAndy Lutomirski 		if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
132688259744SDave Hansen 			/*
132788259744SDave Hansen 			 * Fault from code in kernel from
132888259744SDave Hansen 			 * which we do not expect faults.
132988259744SDave Hansen 			 */
13300ed32f1aSAndy Lutomirski 			bad_area_nosemaphore(regs, hw_error_code, address);
133192181f19SNick Piggin 			return;
133292181f19SNick Piggin 		}
1333d065bd81SMichel Lespinasse retry:
1334c61e211dSHarvey Harrison 		down_read(&mm->mmap_sem);
133501006074SPeter Zijlstra 	} else {
133601006074SPeter Zijlstra 		/*
13372d4a7167SIngo Molnar 		 * The above down_read_trylock() might have succeeded in
13382d4a7167SIngo Molnar 		 * which case we'll have missed the might_sleep() from
13392d4a7167SIngo Molnar 		 * down_read():
134001006074SPeter Zijlstra 		 */
134101006074SPeter Zijlstra 		might_sleep();
1342c61e211dSHarvey Harrison 	}
1343c61e211dSHarvey Harrison 
1344c61e211dSHarvey Harrison 	vma = find_vma(mm, address);
134592181f19SNick Piggin 	if (unlikely(!vma)) {
13460ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
134792181f19SNick Piggin 		return;
134892181f19SNick Piggin 	}
134992181f19SNick Piggin 	if (likely(vma->vm_start <= address))
1350c61e211dSHarvey Harrison 		goto good_area;
135192181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
13520ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
135392181f19SNick Piggin 		return;
135492181f19SNick Piggin 	}
135592181f19SNick Piggin 	if (unlikely(expand_stack(vma, address))) {
13560ed32f1aSAndy Lutomirski 		bad_area(regs, hw_error_code, address);
135792181f19SNick Piggin 		return;
135892181f19SNick Piggin 	}
135992181f19SNick Piggin 
1360c61e211dSHarvey Harrison 	/*
1361c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1362c61e211dSHarvey Harrison 	 * we can handle it..
1363c61e211dSHarvey Harrison 	 */
1364c61e211dSHarvey Harrison good_area:
13650ed32f1aSAndy Lutomirski 	if (unlikely(access_error(hw_error_code, vma))) {
13660ed32f1aSAndy Lutomirski 		bad_area_access_error(regs, hw_error_code, address, vma);
136792181f19SNick Piggin 		return;
1368c61e211dSHarvey Harrison 	}
1369c61e211dSHarvey Harrison 
1370c61e211dSHarvey Harrison 	/*
1371c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1372c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
13739a95f3cfSPaul Cassella 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
13749a95f3cfSPaul Cassella 	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1375cb0631fdSVlastimil Babka 	 *
1376cb0631fdSVlastimil Babka 	 * Note that handle_userfault() may also release and reacquire mmap_sem
1377cb0631fdSVlastimil Babka 	 * (and not return with VM_FAULT_RETRY), when returning to userland to
1378cb0631fdSVlastimil Babka 	 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1379cb0631fdSVlastimil Babka 	 * (potentially after handling any pending signal during the return to
1380cb0631fdSVlastimil Babka 	 * userland). The return to userland is identified whenever
1381cb0631fdSVlastimil Babka 	 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1382c61e211dSHarvey Harrison 	 */
1383dcddffd4SKirill A. Shutemov 	fault = handle_mm_fault(vma, address, flags);
138426178ec1SLinus Torvalds 	major |= fault & VM_FAULT_MAJOR;
13852d4a7167SIngo Molnar 
13863a13c4d7SJohannes Weiner 	/*
138726178ec1SLinus Torvalds 	 * If we need to retry the mmap_sem has already been released,
138826178ec1SLinus Torvalds 	 * and if there is a fatal signal pending there is no guarantee
138926178ec1SLinus Torvalds 	 * that we made any progress. Handle this case first.
13903a13c4d7SJohannes Weiner 	 */
139126178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_RETRY)) {
139226178ec1SLinus Torvalds 		/* Retry at most once */
139326178ec1SLinus Torvalds 		if (flags & FAULT_FLAG_ALLOW_RETRY) {
139426178ec1SLinus Torvalds 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
139526178ec1SLinus Torvalds 			flags |= FAULT_FLAG_TRIED;
139626178ec1SLinus Torvalds 			if (!fatal_signal_pending(tsk))
139726178ec1SLinus Torvalds 				goto retry;
139826178ec1SLinus Torvalds 		}
139926178ec1SLinus Torvalds 
140026178ec1SLinus Torvalds 		/* User mode? Just return to handle the fatal exception */
1401cf3c0a15SLinus Torvalds 		if (flags & FAULT_FLAG_USER)
14023a13c4d7SJohannes Weiner 			return;
14033a13c4d7SJohannes Weiner 
140426178ec1SLinus Torvalds 		/* Not returning to user mode? Handle exceptions or die: */
14050ed32f1aSAndy Lutomirski 		no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR);
140626178ec1SLinus Torvalds 		return;
140726178ec1SLinus Torvalds 	}
140826178ec1SLinus Torvalds 
14097fb08ecaSLinus Torvalds 	up_read(&mm->mmap_sem);
141026178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_ERROR)) {
14110ed32f1aSAndy Lutomirski 		mm_fault_error(regs, hw_error_code, address, fault);
141237b23e05SKOSAKI Motohiro 		return;
141337b23e05SKOSAKI Motohiro 	}
141437b23e05SKOSAKI Motohiro 
141537b23e05SKOSAKI Motohiro 	/*
141626178ec1SLinus Torvalds 	 * Major/minor page fault accounting. If any of the events
141726178ec1SLinus Torvalds 	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1418d065bd81SMichel Lespinasse 	 */
141926178ec1SLinus Torvalds 	if (major) {
1420c61e211dSHarvey Harrison 		tsk->maj_flt++;
142126178ec1SLinus Torvalds 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
1422ac17dc8eSPeter Zijlstra 	} else {
1423c61e211dSHarvey Harrison 		tsk->min_flt++;
142426178ec1SLinus Torvalds 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1425d065bd81SMichel Lespinasse 	}
1426c61e211dSHarvey Harrison 
14278c938f9fSIngo Molnar 	check_v8086_mode(regs, address, tsk);
1428c61e211dSHarvey Harrison }
1429aa37c51bSDave Hansen NOKPROBE_SYMBOL(do_user_addr_fault);
1430aa37c51bSDave Hansen 
1431aa37c51bSDave Hansen /*
1432aa37c51bSDave Hansen  * This routine handles page faults.  It determines the address,
1433aa37c51bSDave Hansen  * and the problem, and then passes it off to one of the appropriate
1434aa37c51bSDave Hansen  * routines.
1435aa37c51bSDave Hansen  */
1436aa37c51bSDave Hansen static noinline void
1437aa37c51bSDave Hansen __do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
1438aa37c51bSDave Hansen 		unsigned long address)
1439aa37c51bSDave Hansen {
1440aa37c51bSDave Hansen 	prefetchw(&current->mm->mmap_sem);
1441aa37c51bSDave Hansen 
1442aa37c51bSDave Hansen 	if (unlikely(kmmio_fault(regs, address)))
1443aa37c51bSDave Hansen 		return;
1444aa37c51bSDave Hansen 
1445aa37c51bSDave Hansen 	/* Was the fault on kernel-controlled part of the address space? */
1446aa37c51bSDave Hansen 	if (unlikely(fault_in_kernel_space(address)))
1447aa37c51bSDave Hansen 		do_kern_addr_fault(regs, hw_error_code, address);
1448aa37c51bSDave Hansen 	else
1449aa37c51bSDave Hansen 		do_user_addr_fault(regs, hw_error_code, address);
1450aa37c51bSDave Hansen }
14519326638cSMasami Hiramatsu NOKPROBE_SYMBOL(__do_page_fault);
14526ba3c97aSFrederic Weisbecker 
14539326638cSMasami Hiramatsu static nokprobe_inline void
14549326638cSMasami Hiramatsu trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1455d34603b0SSeiji Aguchi 			 unsigned long error_code)
1456d34603b0SSeiji Aguchi {
1457d34603b0SSeiji Aguchi 	if (user_mode(regs))
1458d4078e23SPeter Zijlstra 		trace_page_fault_user(address, regs, error_code);
1459d34603b0SSeiji Aguchi 	else
1460d4078e23SPeter Zijlstra 		trace_page_fault_kernel(address, regs, error_code);
1461d34603b0SSeiji Aguchi }
1462d34603b0SSeiji Aguchi 
14630ac09f9fSJiri Olsa /*
146411a7ffb0SThomas Gleixner  * We must have this function blacklisted from kprobes, tagged with notrace
146511a7ffb0SThomas Gleixner  * and call read_cr2() before calling anything else. To avoid calling any
146611a7ffb0SThomas Gleixner  * kind of tracing machinery before we've observed the CR2 value.
146711a7ffb0SThomas Gleixner  *
146811a7ffb0SThomas Gleixner  * exception_{enter,exit}() contains all sorts of tracepoints.
14690ac09f9fSJiri Olsa  */
147011a7ffb0SThomas Gleixner dotraplinkage void notrace
147111a7ffb0SThomas Gleixner do_page_fault(struct pt_regs *regs, unsigned long error_code)
147211a7ffb0SThomas Gleixner {
147311a7ffb0SThomas Gleixner 	unsigned long address = read_cr2(); /* Get the faulting address */
1474d4078e23SPeter Zijlstra 	enum ctx_state prev_state;
147525c74b10SSeiji Aguchi 
147625c74b10SSeiji Aguchi 	prev_state = exception_enter();
147780954747SThomas Gleixner 	if (trace_pagefault_enabled())
1478d4078e23SPeter Zijlstra 		trace_page_fault_entries(address, regs, error_code);
147911a7ffb0SThomas Gleixner 
14800ac09f9fSJiri Olsa 	__do_page_fault(regs, error_code, address);
148125c74b10SSeiji Aguchi 	exception_exit(prev_state);
148225c74b10SSeiji Aguchi }
148311a7ffb0SThomas Gleixner NOKPROBE_SYMBOL(do_page_fault);
1484