xref: /openbmc/linux/arch/x86/mm/fault.c (revision b3ecd51559ae7a8f40b10443773b9cd0e6a50f5e)
1c61e211dSHarvey Harrison /*
2c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
3c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
5c61e211dSHarvey Harrison  */
6a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
7a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
8a2bcd473SIngo Molnar #include <linux/module.h>		/* search_exception_table	*/
9a2bcd473SIngo Molnar #include <linux/bootmem.h>		/* max_low_pfn			*/
109326638cSMasami Hiramatsu #include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
11a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
12cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
13f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
14268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
1556dd9470SFrederic Weisbecker #include <linux/context_tracking.h>	/* exception_enter(), ...	*/
1670ffdb93SDavid Hildenbrand #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
17c61e211dSHarvey Harrison 
18a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
19a2bcd473SIngo Molnar #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
20f8561296SVegard Nossum #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
21f40c3300SAndy Lutomirski #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
22f40c3300SAndy Lutomirski #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
23ba3e127eSBrian Gerst #include <asm/vm86.h>			/* struct vm86			*/
24c61e211dSHarvey Harrison 
25d34603b0SSeiji Aguchi #define CREATE_TRACE_POINTS
26d34603b0SSeiji Aguchi #include <asm/trace/exceptions.h>
27d34603b0SSeiji Aguchi 
28c61e211dSHarvey Harrison /*
292d4a7167SIngo Molnar  * Page fault error code bits:
302d4a7167SIngo Molnar  *
312d4a7167SIngo Molnar  *   bit 0 ==	 0: no page found	1: protection fault
322d4a7167SIngo Molnar  *   bit 1 ==	 0: read access		1: write access
332d4a7167SIngo Molnar  *   bit 2 ==	 0: kernel-mode access	1: user-mode access
342d4a7167SIngo Molnar  *   bit 3 ==				1: use of reserved bit detected
352d4a7167SIngo Molnar  *   bit 4 ==				1: fault was an instruction fetch
36*b3ecd515SDave Hansen  *   bit 5 ==				1: protection keys block access
37c61e211dSHarvey Harrison  */
382d4a7167SIngo Molnar enum x86_pf_error_code {
392d4a7167SIngo Molnar 
402d4a7167SIngo Molnar 	PF_PROT		=		1 << 0,
412d4a7167SIngo Molnar 	PF_WRITE	=		1 << 1,
422d4a7167SIngo Molnar 	PF_USER		=		1 << 2,
432d4a7167SIngo Molnar 	PF_RSVD		=		1 << 3,
442d4a7167SIngo Molnar 	PF_INSTR	=		1 << 4,
45*b3ecd515SDave Hansen 	PF_PK		=		1 << 5,
462d4a7167SIngo Molnar };
47c61e211dSHarvey Harrison 
48b814d41fSIngo Molnar /*
49b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
50b319eed0SIngo Molnar  * handled by mmiotrace:
51b814d41fSIngo Molnar  */
529326638cSMasami Hiramatsu static nokprobe_inline int
5362c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
5486069782SPekka Paalanen {
550fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
560fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
570fd0e3daSPekka Paalanen 			return -1;
580fd0e3daSPekka Paalanen 	return 0;
5986069782SPekka Paalanen }
6086069782SPekka Paalanen 
619326638cSMasami Hiramatsu static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
62c61e211dSHarvey Harrison {
63c61e211dSHarvey Harrison 	int ret = 0;
64c61e211dSHarvey Harrison 
65c61e211dSHarvey Harrison 	/* kprobe_running() needs smp_processor_id() */
66f39b6f0eSAndy Lutomirski 	if (kprobes_built_in() && !user_mode(regs)) {
67c61e211dSHarvey Harrison 		preempt_disable();
68c61e211dSHarvey Harrison 		if (kprobe_running() && kprobe_fault_handler(regs, 14))
69c61e211dSHarvey Harrison 			ret = 1;
70c61e211dSHarvey Harrison 		preempt_enable();
71c61e211dSHarvey Harrison 	}
72c61e211dSHarvey Harrison 
73c61e211dSHarvey Harrison 	return ret;
74c61e211dSHarvey Harrison }
75c61e211dSHarvey Harrison 
76c61e211dSHarvey Harrison /*
772d4a7167SIngo Molnar  * Prefetch quirks:
782d4a7167SIngo Molnar  *
792d4a7167SIngo Molnar  * 32-bit mode:
802d4a7167SIngo Molnar  *
81c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
82c61e211dSHarvey Harrison  *   Check that here and ignore it.
83c61e211dSHarvey Harrison  *
842d4a7167SIngo Molnar  * 64-bit mode:
852d4a7167SIngo Molnar  *
86c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
87c61e211dSHarvey Harrison  *   Check that here and ignore it.
88c61e211dSHarvey Harrison  *
892d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
90c61e211dSHarvey Harrison  */
91107a0367SIngo Molnar static inline int
92107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
93107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
94c61e211dSHarvey Harrison {
95107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
96107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
97c61e211dSHarvey Harrison 
98c61e211dSHarvey Harrison 	switch (instr_hi) {
99c61e211dSHarvey Harrison 	case 0x20:
100c61e211dSHarvey Harrison 	case 0x30:
101c61e211dSHarvey Harrison 		/*
102c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
103c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
104c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
105c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
106c61e211dSHarvey Harrison 		 */
107107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
108c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
109c61e211dSHarvey Harrison 	case 0x40:
110c61e211dSHarvey Harrison 		/*
111c61e211dSHarvey Harrison 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
112c61e211dSHarvey Harrison 		 * Need to figure out under what instruction mode the
113c61e211dSHarvey Harrison 		 * instruction was issued. Could check the LDT for lm,
114c61e211dSHarvey Harrison 		 * but for now it's good enough to assume that long
115c61e211dSHarvey Harrison 		 * mode only uses well known segments or kernel.
116c61e211dSHarvey Harrison 		 */
117318f5a2aSAndy Lutomirski 		return (!user_mode(regs) || user_64bit_mode(regs));
118c61e211dSHarvey Harrison #endif
119c61e211dSHarvey Harrison 	case 0x60:
120c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
121107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
122c61e211dSHarvey Harrison 	case 0xF0:
123c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
124107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
125c61e211dSHarvey Harrison 	case 0x00:
126c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
127107a0367SIngo Molnar 		if (probe_kernel_address(instr, opcode))
128107a0367SIngo Molnar 			return 0;
129107a0367SIngo Molnar 
130107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
131107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
132107a0367SIngo Molnar 		return 0;
133107a0367SIngo Molnar 	default:
134107a0367SIngo Molnar 		return 0;
135107a0367SIngo Molnar 	}
136107a0367SIngo Molnar }
137107a0367SIngo Molnar 
138107a0367SIngo Molnar static int
139107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
140107a0367SIngo Molnar {
141107a0367SIngo Molnar 	unsigned char *max_instr;
142107a0367SIngo Molnar 	unsigned char *instr;
143107a0367SIngo Molnar 	int prefetch = 0;
144107a0367SIngo Molnar 
145107a0367SIngo Molnar 	/*
146107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
147107a0367SIngo Molnar 	 * do not ignore the fault:
148107a0367SIngo Molnar 	 */
149107a0367SIngo Molnar 	if (error_code & PF_INSTR)
150107a0367SIngo Molnar 		return 0;
151107a0367SIngo Molnar 
152107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
153107a0367SIngo Molnar 	max_instr = instr + 15;
154107a0367SIngo Molnar 
155d31bf07fSAndy Lutomirski 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
156107a0367SIngo Molnar 		return 0;
157107a0367SIngo Molnar 
158107a0367SIngo Molnar 	while (instr < max_instr) {
159107a0367SIngo Molnar 		unsigned char opcode;
160c61e211dSHarvey Harrison 
161c61e211dSHarvey Harrison 		if (probe_kernel_address(instr, opcode))
162c61e211dSHarvey Harrison 			break;
163107a0367SIngo Molnar 
164107a0367SIngo Molnar 		instr++;
165107a0367SIngo Molnar 
166107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
167c61e211dSHarvey Harrison 			break;
168c61e211dSHarvey Harrison 	}
169c61e211dSHarvey Harrison 	return prefetch;
170c61e211dSHarvey Harrison }
171c61e211dSHarvey Harrison 
1722d4a7167SIngo Molnar static void
1732d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address,
174f672b49bSAndi Kleen 		     struct task_struct *tsk, int fault)
175c61e211dSHarvey Harrison {
176f672b49bSAndi Kleen 	unsigned lsb = 0;
177c61e211dSHarvey Harrison 	siginfo_t info;
178c61e211dSHarvey Harrison 
179c61e211dSHarvey Harrison 	info.si_signo	= si_signo;
180c61e211dSHarvey Harrison 	info.si_errno	= 0;
181c61e211dSHarvey Harrison 	info.si_code	= si_code;
182c61e211dSHarvey Harrison 	info.si_addr	= (void __user *)address;
183f672b49bSAndi Kleen 	if (fault & VM_FAULT_HWPOISON_LARGE)
184f672b49bSAndi Kleen 		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
185f672b49bSAndi Kleen 	if (fault & VM_FAULT_HWPOISON)
186f672b49bSAndi Kleen 		lsb = PAGE_SHIFT;
187f672b49bSAndi Kleen 	info.si_addr_lsb = lsb;
1882d4a7167SIngo Molnar 
189c61e211dSHarvey Harrison 	force_sig_info(si_signo, &info, tsk);
190c61e211dSHarvey Harrison }
191c61e211dSHarvey Harrison 
192f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
193f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1942d4a7167SIngo Molnar 
195f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
196f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
197f2f13a85SIngo Molnar {
198f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
199f2f13a85SIngo Molnar 	pgd_t *pgd_k;
200f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
201f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
202f2f13a85SIngo Molnar 
203f2f13a85SIngo Molnar 	pgd += index;
204f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
205f2f13a85SIngo Molnar 
206f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
207f2f13a85SIngo Molnar 		return NULL;
208f2f13a85SIngo Molnar 
209f2f13a85SIngo Molnar 	/*
210f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
211f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
212f2f13a85SIngo Molnar 	 * set_pud.
213f2f13a85SIngo Molnar 	 */
214f2f13a85SIngo Molnar 	pud = pud_offset(pgd, address);
215f2f13a85SIngo Molnar 	pud_k = pud_offset(pgd_k, address);
216f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
217f2f13a85SIngo Molnar 		return NULL;
218f2f13a85SIngo Molnar 
219f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
220f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
221f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
222f2f13a85SIngo Molnar 		return NULL;
223f2f13a85SIngo Molnar 
224b8bcfe99SJeremy Fitzhardinge 	if (!pmd_present(*pmd))
225f2f13a85SIngo Molnar 		set_pmd(pmd, *pmd_k);
226b8bcfe99SJeremy Fitzhardinge 	else
227f2f13a85SIngo Molnar 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
228f2f13a85SIngo Molnar 
229f2f13a85SIngo Molnar 	return pmd_k;
230f2f13a85SIngo Molnar }
231f2f13a85SIngo Molnar 
232f2f13a85SIngo Molnar void vmalloc_sync_all(void)
233f2f13a85SIngo Molnar {
234f2f13a85SIngo Molnar 	unsigned long address;
235f2f13a85SIngo Molnar 
236f2f13a85SIngo Molnar 	if (SHARED_KERNEL_PMD)
237f2f13a85SIngo Molnar 		return;
238f2f13a85SIngo Molnar 
239f2f13a85SIngo Molnar 	for (address = VMALLOC_START & PMD_MASK;
240f2f13a85SIngo Molnar 	     address >= TASK_SIZE && address < FIXADDR_TOP;
241f2f13a85SIngo Molnar 	     address += PMD_SIZE) {
242f2f13a85SIngo Molnar 		struct page *page;
243f2f13a85SIngo Molnar 
244a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
245f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
246617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
247f01f7c56SBorislav Petkov 			pmd_t *ret;
248617d34d9SJeremy Fitzhardinge 
249a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
250617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
251617d34d9SJeremy Fitzhardinge 
252617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
253617d34d9SJeremy Fitzhardinge 			ret = vmalloc_sync_one(page_address(page), address);
254617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
255617d34d9SJeremy Fitzhardinge 
256617d34d9SJeremy Fitzhardinge 			if (!ret)
257f2f13a85SIngo Molnar 				break;
258f2f13a85SIngo Molnar 		}
259a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
260f2f13a85SIngo Molnar 	}
261f2f13a85SIngo Molnar }
262f2f13a85SIngo Molnar 
263f2f13a85SIngo Molnar /*
264f2f13a85SIngo Molnar  * 32-bit:
265f2f13a85SIngo Molnar  *
266f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc or module mapping area
267f2f13a85SIngo Molnar  */
2689326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address)
269f2f13a85SIngo Molnar {
270f2f13a85SIngo Molnar 	unsigned long pgd_paddr;
271f2f13a85SIngo Molnar 	pmd_t *pmd_k;
272f2f13a85SIngo Molnar 	pte_t *pte_k;
273f2f13a85SIngo Molnar 
274f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
275f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
276f2f13a85SIngo Molnar 		return -1;
277f2f13a85SIngo Molnar 
278ebc8827fSFrederic Weisbecker 	WARN_ON_ONCE(in_nmi());
279ebc8827fSFrederic Weisbecker 
280f2f13a85SIngo Molnar 	/*
281f2f13a85SIngo Molnar 	 * Synchronize this task's top level page-table
282f2f13a85SIngo Molnar 	 * with the 'reference' page table.
283f2f13a85SIngo Molnar 	 *
284f2f13a85SIngo Molnar 	 * Do _not_ use "current" here. We might be inside
285f2f13a85SIngo Molnar 	 * an interrupt in the middle of a task switch..
286f2f13a85SIngo Molnar 	 */
287f2f13a85SIngo Molnar 	pgd_paddr = read_cr3();
288f2f13a85SIngo Molnar 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
289f2f13a85SIngo Molnar 	if (!pmd_k)
290f2f13a85SIngo Molnar 		return -1;
291f2f13a85SIngo Molnar 
292f2f13a85SIngo Molnar 	pte_k = pte_offset_kernel(pmd_k, address);
293f2f13a85SIngo Molnar 	if (!pte_present(*pte_k))
294f2f13a85SIngo Molnar 		return -1;
295f2f13a85SIngo Molnar 
296f2f13a85SIngo Molnar 	return 0;
297f2f13a85SIngo Molnar }
2989326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault);
299f2f13a85SIngo Molnar 
300f2f13a85SIngo Molnar /*
301f2f13a85SIngo Molnar  * Did it hit the DOS screen memory VA from vm86 mode?
302f2f13a85SIngo Molnar  */
303f2f13a85SIngo Molnar static inline void
304f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
305f2f13a85SIngo Molnar 		 struct task_struct *tsk)
306f2f13a85SIngo Molnar {
3079fda6a06SBrian Gerst #ifdef CONFIG_VM86
308f2f13a85SIngo Molnar 	unsigned long bit;
309f2f13a85SIngo Molnar 
3109fda6a06SBrian Gerst 	if (!v8086_mode(regs) || !tsk->thread.vm86)
311f2f13a85SIngo Molnar 		return;
312f2f13a85SIngo Molnar 
313f2f13a85SIngo Molnar 	bit = (address - 0xA0000) >> PAGE_SHIFT;
314f2f13a85SIngo Molnar 	if (bit < 32)
3159fda6a06SBrian Gerst 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
3169fda6a06SBrian Gerst #endif
317f2f13a85SIngo Molnar }
318c61e211dSHarvey Harrison 
319087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
320087975b0SAkinobu Mita {
321087975b0SAkinobu Mita 	return pfn < max_low_pfn;
322087975b0SAkinobu Mita }
323087975b0SAkinobu Mita 
324cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
325c61e211dSHarvey Harrison {
326087975b0SAkinobu Mita 	pgd_t *base = __va(read_cr3());
327087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
328087975b0SAkinobu Mita 	pmd_t *pmd;
329087975b0SAkinobu Mita 	pte_t *pte;
3302d4a7167SIngo Molnar 
331c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
332087975b0SAkinobu Mita 	printk("*pdpt = %016Lx ", pgd_val(*pgd));
333087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
334087975b0SAkinobu Mita 		goto out;
335c61e211dSHarvey Harrison #endif
336087975b0SAkinobu Mita 	pmd = pmd_offset(pud_offset(pgd, address), address);
337087975b0SAkinobu Mita 	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
338c61e211dSHarvey Harrison 
339c61e211dSHarvey Harrison 	/*
340c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
341c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
342c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3432d4a7167SIngo Molnar 	 * it's allocated already:
344c61e211dSHarvey Harrison 	 */
345087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
346087975b0SAkinobu Mita 		goto out;
3472d4a7167SIngo Molnar 
348087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
349087975b0SAkinobu Mita 	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
350087975b0SAkinobu Mita out:
351c61e211dSHarvey Harrison 	printk("\n");
352f2f13a85SIngo Molnar }
353f2f13a85SIngo Molnar 
354f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
355f2f13a85SIngo Molnar 
356f2f13a85SIngo Molnar void vmalloc_sync_all(void)
357f2f13a85SIngo Molnar {
3589661d5bcSYasuaki Ishimatsu 	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
359f2f13a85SIngo Molnar }
360f2f13a85SIngo Molnar 
361f2f13a85SIngo Molnar /*
362f2f13a85SIngo Molnar  * 64-bit:
363f2f13a85SIngo Molnar  *
364f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc area
365f2f13a85SIngo Molnar  *
366f2f13a85SIngo Molnar  * This assumes no large pages in there.
367f2f13a85SIngo Molnar  */
3689326638cSMasami Hiramatsu static noinline int vmalloc_fault(unsigned long address)
369f2f13a85SIngo Molnar {
370f2f13a85SIngo Molnar 	pgd_t *pgd, *pgd_ref;
371f2f13a85SIngo Molnar 	pud_t *pud, *pud_ref;
372f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_ref;
373f2f13a85SIngo Molnar 	pte_t *pte, *pte_ref;
374f2f13a85SIngo Molnar 
375f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
376f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
377f2f13a85SIngo Molnar 		return -1;
378f2f13a85SIngo Molnar 
379ebc8827fSFrederic Weisbecker 	WARN_ON_ONCE(in_nmi());
380ebc8827fSFrederic Weisbecker 
381f2f13a85SIngo Molnar 	/*
382f2f13a85SIngo Molnar 	 * Copy kernel mappings over when needed. This can also
383f2f13a85SIngo Molnar 	 * happen within a race in page table update. In the later
384f2f13a85SIngo Molnar 	 * case just flush:
385f2f13a85SIngo Molnar 	 */
386f2f13a85SIngo Molnar 	pgd = pgd_offset(current->active_mm, address);
387f2f13a85SIngo Molnar 	pgd_ref = pgd_offset_k(address);
388f2f13a85SIngo Molnar 	if (pgd_none(*pgd_ref))
389f2f13a85SIngo Molnar 		return -1;
390f2f13a85SIngo Molnar 
3911160c277SSamu Kallio 	if (pgd_none(*pgd)) {
392f2f13a85SIngo Molnar 		set_pgd(pgd, *pgd_ref);
3931160c277SSamu Kallio 		arch_flush_lazy_mmu_mode();
3941160c277SSamu Kallio 	} else {
395f2f13a85SIngo Molnar 		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
3961160c277SSamu Kallio 	}
397f2f13a85SIngo Molnar 
398f2f13a85SIngo Molnar 	/*
399f2f13a85SIngo Molnar 	 * Below here mismatches are bugs because these lower tables
400f2f13a85SIngo Molnar 	 * are shared:
401f2f13a85SIngo Molnar 	 */
402f2f13a85SIngo Molnar 
403f2f13a85SIngo Molnar 	pud = pud_offset(pgd, address);
404f2f13a85SIngo Molnar 	pud_ref = pud_offset(pgd_ref, address);
405f2f13a85SIngo Molnar 	if (pud_none(*pud_ref))
406f2f13a85SIngo Molnar 		return -1;
407f2f13a85SIngo Molnar 
408f2f13a85SIngo Molnar 	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
409f2f13a85SIngo Molnar 		BUG();
410f2f13a85SIngo Molnar 
411f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
412f2f13a85SIngo Molnar 	pmd_ref = pmd_offset(pud_ref, address);
413f2f13a85SIngo Molnar 	if (pmd_none(*pmd_ref))
414f2f13a85SIngo Molnar 		return -1;
415f2f13a85SIngo Molnar 
416f2f13a85SIngo Molnar 	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
417f2f13a85SIngo Molnar 		BUG();
418f2f13a85SIngo Molnar 
419f2f13a85SIngo Molnar 	pte_ref = pte_offset_kernel(pmd_ref, address);
420f2f13a85SIngo Molnar 	if (!pte_present(*pte_ref))
421f2f13a85SIngo Molnar 		return -1;
422f2f13a85SIngo Molnar 
423f2f13a85SIngo Molnar 	pte = pte_offset_kernel(pmd, address);
424f2f13a85SIngo Molnar 
425f2f13a85SIngo Molnar 	/*
426f2f13a85SIngo Molnar 	 * Don't use pte_page here, because the mappings can point
427f2f13a85SIngo Molnar 	 * outside mem_map, and the NUMA hash lookup cannot handle
428f2f13a85SIngo Molnar 	 * that:
429f2f13a85SIngo Molnar 	 */
430f2f13a85SIngo Molnar 	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
431f2f13a85SIngo Molnar 		BUG();
432f2f13a85SIngo Molnar 
433f2f13a85SIngo Molnar 	return 0;
434f2f13a85SIngo Molnar }
4359326638cSMasami Hiramatsu NOKPROBE_SYMBOL(vmalloc_fault);
436f2f13a85SIngo Molnar 
437e05139f2SJan Beulich #ifdef CONFIG_CPU_SUP_AMD
438f2f13a85SIngo Molnar static const char errata93_warning[] =
439ad361c98SJoe Perches KERN_ERR
440ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
441ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
442ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
443ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
444e05139f2SJan Beulich #endif
445f2f13a85SIngo Molnar 
446f2f13a85SIngo Molnar /*
447f2f13a85SIngo Molnar  * No vm86 mode in 64-bit mode:
448f2f13a85SIngo Molnar  */
449f2f13a85SIngo Molnar static inline void
450f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
451f2f13a85SIngo Molnar 		 struct task_struct *tsk)
452f2f13a85SIngo Molnar {
453f2f13a85SIngo Molnar }
454f2f13a85SIngo Molnar 
455f2f13a85SIngo Molnar static int bad_address(void *p)
456f2f13a85SIngo Molnar {
457f2f13a85SIngo Molnar 	unsigned long dummy;
458f2f13a85SIngo Molnar 
459f2f13a85SIngo Molnar 	return probe_kernel_address((unsigned long *)p, dummy);
460f2f13a85SIngo Molnar }
461f2f13a85SIngo Molnar 
462f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
463f2f13a85SIngo Molnar {
464087975b0SAkinobu Mita 	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
465087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
466c61e211dSHarvey Harrison 	pud_t *pud;
467c61e211dSHarvey Harrison 	pmd_t *pmd;
468c61e211dSHarvey Harrison 	pte_t *pte;
469c61e211dSHarvey Harrison 
4702d4a7167SIngo Molnar 	if (bad_address(pgd))
4712d4a7167SIngo Molnar 		goto bad;
4722d4a7167SIngo Molnar 
473c61e211dSHarvey Harrison 	printk("PGD %lx ", pgd_val(*pgd));
4742d4a7167SIngo Molnar 
4752d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
4762d4a7167SIngo Molnar 		goto out;
477c61e211dSHarvey Harrison 
478c61e211dSHarvey Harrison 	pud = pud_offset(pgd, address);
4792d4a7167SIngo Molnar 	if (bad_address(pud))
4802d4a7167SIngo Molnar 		goto bad;
4812d4a7167SIngo Molnar 
482c61e211dSHarvey Harrison 	printk("PUD %lx ", pud_val(*pud));
483b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
4842d4a7167SIngo Molnar 		goto out;
485c61e211dSHarvey Harrison 
486c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
4872d4a7167SIngo Molnar 	if (bad_address(pmd))
4882d4a7167SIngo Molnar 		goto bad;
4892d4a7167SIngo Molnar 
490c61e211dSHarvey Harrison 	printk("PMD %lx ", pmd_val(*pmd));
4912d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
4922d4a7167SIngo Molnar 		goto out;
493c61e211dSHarvey Harrison 
494c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
4952d4a7167SIngo Molnar 	if (bad_address(pte))
4962d4a7167SIngo Molnar 		goto bad;
4972d4a7167SIngo Molnar 
498c61e211dSHarvey Harrison 	printk("PTE %lx", pte_val(*pte));
4992d4a7167SIngo Molnar out:
500c61e211dSHarvey Harrison 	printk("\n");
501c61e211dSHarvey Harrison 	return;
502c61e211dSHarvey Harrison bad:
503c61e211dSHarvey Harrison 	printk("BAD\n");
504c61e211dSHarvey Harrison }
505c61e211dSHarvey Harrison 
506f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
507c61e211dSHarvey Harrison 
5082d4a7167SIngo Molnar /*
5092d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
5102d4a7167SIngo Molnar  *
5112d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
5122d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
5132d4a7167SIngo Molnar  *
5142d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
5152d4a7167SIngo Molnar  *
5162d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
5172d4a7167SIngo Molnar  * Try to work around it here.
5182d4a7167SIngo Molnar  *
5192d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
5202d4a7167SIngo Molnar  * Does nothing on 32-bit.
521c61e211dSHarvey Harrison  */
522c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
523c61e211dSHarvey Harrison {
524e05139f2SJan Beulich #if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
525e05139f2SJan Beulich 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
526e05139f2SJan Beulich 	    || boot_cpu_data.x86 != 0xf)
527e05139f2SJan Beulich 		return 0;
528e05139f2SJan Beulich 
529c61e211dSHarvey Harrison 	if (address != regs->ip)
530c61e211dSHarvey Harrison 		return 0;
5312d4a7167SIngo Molnar 
532c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
533c61e211dSHarvey Harrison 		return 0;
5342d4a7167SIngo Molnar 
535c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
536c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
537c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
538a454ab31SIngo Molnar 		printk_once(errata93_warning);
539c61e211dSHarvey Harrison 		regs->ip = address;
540c61e211dSHarvey Harrison 		return 1;
541c61e211dSHarvey Harrison 	}
542c61e211dSHarvey Harrison #endif
543c61e211dSHarvey Harrison 	return 0;
544c61e211dSHarvey Harrison }
545c61e211dSHarvey Harrison 
546c61e211dSHarvey Harrison /*
5472d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
5482d4a7167SIngo Molnar  * to illegal addresses >4GB.
5492d4a7167SIngo Molnar  *
5502d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
5512d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
552c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
553c61e211dSHarvey Harrison  */
554c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
555c61e211dSHarvey Harrison {
556c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
5572d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
558c61e211dSHarvey Harrison 		return 1;
559c61e211dSHarvey Harrison #endif
560c61e211dSHarvey Harrison 	return 0;
561c61e211dSHarvey Harrison }
562c61e211dSHarvey Harrison 
563c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
564c61e211dSHarvey Harrison {
565c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
566c61e211dSHarvey Harrison 	unsigned long nr;
5672d4a7167SIngo Molnar 
568c61e211dSHarvey Harrison 	/*
5692d4a7167SIngo Molnar 	 * Pentium F0 0F C7 C8 bug workaround:
570c61e211dSHarvey Harrison 	 */
571e2604b49SBorislav Petkov 	if (boot_cpu_has_bug(X86_BUG_F00F)) {
572c61e211dSHarvey Harrison 		nr = (address - idt_descr.address) >> 3;
573c61e211dSHarvey Harrison 
574c61e211dSHarvey Harrison 		if (nr == 6) {
575c61e211dSHarvey Harrison 			do_invalid_op(regs, 0);
576c61e211dSHarvey Harrison 			return 1;
577c61e211dSHarvey Harrison 		}
578c61e211dSHarvey Harrison 	}
579c61e211dSHarvey Harrison #endif
580c61e211dSHarvey Harrison 	return 0;
581c61e211dSHarvey Harrison }
582c61e211dSHarvey Harrison 
5838f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT
5848f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
585eff50c34SJiri Kosina static const char smep_warning[] = KERN_CRIT
586eff50c34SJiri Kosina "unable to execute userspace code (SMEP?) (uid: %d)\n";
5878f766149SIngo Molnar 
5882d4a7167SIngo Molnar static void
5892d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code,
590c61e211dSHarvey Harrison 		unsigned long address)
591c61e211dSHarvey Harrison {
592c61e211dSHarvey Harrison 	if (!oops_may_print())
593c61e211dSHarvey Harrison 		return;
594c61e211dSHarvey Harrison 
595c61e211dSHarvey Harrison 	if (error_code & PF_INSTR) {
59693809be8SHarvey Harrison 		unsigned int level;
597426e34ccSMatt Fleming 		pgd_t *pgd;
598426e34ccSMatt Fleming 		pte_t *pte;
5992d4a7167SIngo Molnar 
600426e34ccSMatt Fleming 		pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
601426e34ccSMatt Fleming 		pgd += pgd_index(address);
602426e34ccSMatt Fleming 
603426e34ccSMatt Fleming 		pte = lookup_address_in_pgd(pgd, address, &level);
604c61e211dSHarvey Harrison 
6058f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
606078de5f7SEric W. Biederman 			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
607eff50c34SJiri Kosina 		if (pte && pte_present(*pte) && pte_exec(*pte) &&
608eff50c34SJiri Kosina 				(pgd_flags(*pgd) & _PAGE_USER) &&
6091e02ce4cSAndy Lutomirski 				(__read_cr4() & X86_CR4_SMEP))
610eff50c34SJiri Kosina 			printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
611c61e211dSHarvey Harrison 	}
612fd40d6e3SHarvey Harrison 
613c61e211dSHarvey Harrison 	printk(KERN_ALERT "BUG: unable to handle kernel ");
614c61e211dSHarvey Harrison 	if (address < PAGE_SIZE)
615c61e211dSHarvey Harrison 		printk(KERN_CONT "NULL pointer dereference");
616c61e211dSHarvey Harrison 	else
617c61e211dSHarvey Harrison 		printk(KERN_CONT "paging request");
6182d4a7167SIngo Molnar 
619f294a8ceSVegard Nossum 	printk(KERN_CONT " at %p\n", (void *) address);
620c61e211dSHarvey Harrison 	printk(KERN_ALERT "IP:");
6215f01c988SJiri Slaby 	printk_address(regs->ip);
6222d4a7167SIngo Molnar 
623c61e211dSHarvey Harrison 	dump_pagetable(address);
624c61e211dSHarvey Harrison }
625c61e211dSHarvey Harrison 
6262d4a7167SIngo Molnar static noinline void
6272d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
6282d4a7167SIngo Molnar 	    unsigned long address)
629c61e211dSHarvey Harrison {
6302d4a7167SIngo Molnar 	struct task_struct *tsk;
6312d4a7167SIngo Molnar 	unsigned long flags;
6322d4a7167SIngo Molnar 	int sig;
6332d4a7167SIngo Molnar 
6342d4a7167SIngo Molnar 	flags = oops_begin();
6352d4a7167SIngo Molnar 	tsk = current;
6362d4a7167SIngo Molnar 	sig = SIGKILL;
637c61e211dSHarvey Harrison 
638c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
63992181f19SNick Piggin 	       tsk->comm, address);
640c61e211dSHarvey Harrison 	dump_pagetable(address);
6412d4a7167SIngo Molnar 
642c61e211dSHarvey Harrison 	tsk->thread.cr2		= address;
64351e7dc70SSrikar Dronamraju 	tsk->thread.trap_nr	= X86_TRAP_PF;
644c61e211dSHarvey Harrison 	tsk->thread.error_code	= error_code;
6452d4a7167SIngo Molnar 
646c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
647874d93d1SAlexander van Heukelum 		sig = 0;
6482d4a7167SIngo Molnar 
649874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
650c61e211dSHarvey Harrison }
651c61e211dSHarvey Harrison 
6522d4a7167SIngo Molnar static noinline void
6532d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code,
6544fc34901SAndy Lutomirski 	   unsigned long address, int signal, int si_code)
65592181f19SNick Piggin {
65692181f19SNick Piggin 	struct task_struct *tsk = current;
65792181f19SNick Piggin 	unsigned long flags;
65892181f19SNick Piggin 	int sig;
65992181f19SNick Piggin 
66092181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
6614fc34901SAndy Lutomirski 	if (fixup_exception(regs)) {
662c026b359SPeter Zijlstra 		/*
663c026b359SPeter Zijlstra 		 * Any interrupt that takes a fault gets the fixup. This makes
664c026b359SPeter Zijlstra 		 * the below recursive fault logic only apply to a faults from
665c026b359SPeter Zijlstra 		 * task context.
666c026b359SPeter Zijlstra 		 */
667c026b359SPeter Zijlstra 		if (in_interrupt())
668c026b359SPeter Zijlstra 			return;
669c026b359SPeter Zijlstra 
670c026b359SPeter Zijlstra 		/*
671c026b359SPeter Zijlstra 		 * Per the above we're !in_interrupt(), aka. task context.
672c026b359SPeter Zijlstra 		 *
673c026b359SPeter Zijlstra 		 * In this case we need to make sure we're not recursively
674c026b359SPeter Zijlstra 		 * faulting through the emulate_vsyscall() logic.
675c026b359SPeter Zijlstra 		 */
6764fc34901SAndy Lutomirski 		if (current_thread_info()->sig_on_uaccess_error && signal) {
67751e7dc70SSrikar Dronamraju 			tsk->thread.trap_nr = X86_TRAP_PF;
6784fc34901SAndy Lutomirski 			tsk->thread.error_code = error_code | PF_USER;
6794fc34901SAndy Lutomirski 			tsk->thread.cr2 = address;
6804fc34901SAndy Lutomirski 
6814fc34901SAndy Lutomirski 			/* XXX: hwpoison faults will set the wrong code. */
6824fc34901SAndy Lutomirski 			force_sig_info_fault(signal, si_code, address, tsk, 0);
6834fc34901SAndy Lutomirski 		}
684c026b359SPeter Zijlstra 
685c026b359SPeter Zijlstra 		/*
686c026b359SPeter Zijlstra 		 * Barring that, we can do the fixup and be happy.
687c026b359SPeter Zijlstra 		 */
68892181f19SNick Piggin 		return;
6894fc34901SAndy Lutomirski 	}
69092181f19SNick Piggin 
69192181f19SNick Piggin 	/*
6922d4a7167SIngo Molnar 	 * 32-bit:
6932d4a7167SIngo Molnar 	 *
69492181f19SNick Piggin 	 *   Valid to do another page fault here, because if this fault
69592181f19SNick Piggin 	 *   had been triggered by is_prefetch fixup_exception would have
69692181f19SNick Piggin 	 *   handled it.
69792181f19SNick Piggin 	 *
6982d4a7167SIngo Molnar 	 * 64-bit:
6992d4a7167SIngo Molnar 	 *
70092181f19SNick Piggin 	 *   Hall of shame of CPU/BIOS bugs.
70192181f19SNick Piggin 	 */
70292181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
70392181f19SNick Piggin 		return;
70492181f19SNick Piggin 
70592181f19SNick Piggin 	if (is_errata93(regs, address))
70692181f19SNick Piggin 		return;
70792181f19SNick Piggin 
70892181f19SNick Piggin 	/*
70992181f19SNick Piggin 	 * Oops. The kernel tried to access some bad page. We'll have to
7102d4a7167SIngo Molnar 	 * terminate things with extreme prejudice:
71192181f19SNick Piggin 	 */
71292181f19SNick Piggin 	flags = oops_begin();
71392181f19SNick Piggin 
71492181f19SNick Piggin 	show_fault_oops(regs, error_code, address);
71592181f19SNick Piggin 
716a70857e4SAaron Tomlin 	if (task_stack_end_corrupted(tsk))
717b0f4c4b3SPrarit Bhargava 		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
71819803078SIngo Molnar 
71992181f19SNick Piggin 	tsk->thread.cr2		= address;
72051e7dc70SSrikar Dronamraju 	tsk->thread.trap_nr	= X86_TRAP_PF;
72192181f19SNick Piggin 	tsk->thread.error_code	= error_code;
72292181f19SNick Piggin 
72392181f19SNick Piggin 	sig = SIGKILL;
72492181f19SNick Piggin 	if (__die("Oops", regs, error_code))
72592181f19SNick Piggin 		sig = 0;
7262d4a7167SIngo Molnar 
72792181f19SNick Piggin 	/* Executive summary in case the body of the oops scrolled away */
728b0f4c4b3SPrarit Bhargava 	printk(KERN_DEFAULT "CR2: %016lx\n", address);
7292d4a7167SIngo Molnar 
73092181f19SNick Piggin 	oops_end(flags, regs, sig);
73192181f19SNick Piggin }
73292181f19SNick Piggin 
7332d4a7167SIngo Molnar /*
7342d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
7352d4a7167SIngo Molnar  * sysctl is set:
7362d4a7167SIngo Molnar  */
7372d4a7167SIngo Molnar static inline void
7382d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
7392d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
7402d4a7167SIngo Molnar {
7412d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
7422d4a7167SIngo Molnar 		return;
7432d4a7167SIngo Molnar 
7442d4a7167SIngo Molnar 	if (!printk_ratelimit())
7452d4a7167SIngo Molnar 		return;
7462d4a7167SIngo Molnar 
747a1a08d1cSRoland Dreier 	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
7482d4a7167SIngo Molnar 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
7492d4a7167SIngo Molnar 		tsk->comm, task_pid_nr(tsk), address,
7502d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
7512d4a7167SIngo Molnar 
7522d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
7532d4a7167SIngo Molnar 
7542d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
7552d4a7167SIngo Molnar }
7562d4a7167SIngo Molnar 
7572d4a7167SIngo Molnar static void
7582d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
7592d4a7167SIngo Molnar 		       unsigned long address, int si_code)
76092181f19SNick Piggin {
76192181f19SNick Piggin 	struct task_struct *tsk = current;
76292181f19SNick Piggin 
76392181f19SNick Piggin 	/* User mode accesses just cause a SIGSEGV */
76492181f19SNick Piggin 	if (error_code & PF_USER) {
76592181f19SNick Piggin 		/*
7662d4a7167SIngo Molnar 		 * It's possible to have interrupts off here:
76792181f19SNick Piggin 		 */
76892181f19SNick Piggin 		local_irq_enable();
76992181f19SNick Piggin 
77092181f19SNick Piggin 		/*
77192181f19SNick Piggin 		 * Valid to do another page fault here because this one came
7722d4a7167SIngo Molnar 		 * from user space:
77392181f19SNick Piggin 		 */
77492181f19SNick Piggin 		if (is_prefetch(regs, error_code, address))
77592181f19SNick Piggin 			return;
77692181f19SNick Piggin 
77792181f19SNick Piggin 		if (is_errata100(regs, address))
77892181f19SNick Piggin 			return;
77992181f19SNick Piggin 
7803ae36655SAndy Lutomirski #ifdef CONFIG_X86_64
7813ae36655SAndy Lutomirski 		/*
7823ae36655SAndy Lutomirski 		 * Instruction fetch faults in the vsyscall page might need
7833ae36655SAndy Lutomirski 		 * emulation.
7843ae36655SAndy Lutomirski 		 */
7853ae36655SAndy Lutomirski 		if (unlikely((error_code & PF_INSTR) &&
786f40c3300SAndy Lutomirski 			     ((address & ~0xfff) == VSYSCALL_ADDR))) {
7873ae36655SAndy Lutomirski 			if (emulate_vsyscall(regs, address))
7883ae36655SAndy Lutomirski 				return;
7893ae36655SAndy Lutomirski 		}
7903ae36655SAndy Lutomirski #endif
791e575a86fSKees Cook 		/* Kernel addresses are always protection faults: */
792e575a86fSKees Cook 		if (address >= TASK_SIZE)
793e575a86fSKees Cook 			error_code |= PF_PROT;
7943ae36655SAndy Lutomirski 
795e575a86fSKees Cook 		if (likely(show_unhandled_signals))
7962d4a7167SIngo Molnar 			show_signal_msg(regs, error_code, address, tsk);
79792181f19SNick Piggin 
79892181f19SNick Piggin 		tsk->thread.cr2		= address;
799e575a86fSKees Cook 		tsk->thread.error_code	= error_code;
80051e7dc70SSrikar Dronamraju 		tsk->thread.trap_nr	= X86_TRAP_PF;
8012d4a7167SIngo Molnar 
802f672b49bSAndi Kleen 		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
8032d4a7167SIngo Molnar 
80492181f19SNick Piggin 		return;
80592181f19SNick Piggin 	}
80692181f19SNick Piggin 
80792181f19SNick Piggin 	if (is_f00f_bug(regs, address))
80892181f19SNick Piggin 		return;
80992181f19SNick Piggin 
8104fc34901SAndy Lutomirski 	no_context(regs, error_code, address, SIGSEGV, si_code);
81192181f19SNick Piggin }
81292181f19SNick Piggin 
8132d4a7167SIngo Molnar static noinline void
8142d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
8152d4a7167SIngo Molnar 		     unsigned long address)
81692181f19SNick Piggin {
81792181f19SNick Piggin 	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
81892181f19SNick Piggin }
81992181f19SNick Piggin 
8202d4a7167SIngo Molnar static void
8212d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
8222d4a7167SIngo Molnar 	   unsigned long address, int si_code)
82392181f19SNick Piggin {
82492181f19SNick Piggin 	struct mm_struct *mm = current->mm;
82592181f19SNick Piggin 
82692181f19SNick Piggin 	/*
82792181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
82892181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
82992181f19SNick Piggin 	 */
83092181f19SNick Piggin 	up_read(&mm->mmap_sem);
83192181f19SNick Piggin 
83292181f19SNick Piggin 	__bad_area_nosemaphore(regs, error_code, address, si_code);
83392181f19SNick Piggin }
83492181f19SNick Piggin 
8352d4a7167SIngo Molnar static noinline void
8362d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
83792181f19SNick Piggin {
83892181f19SNick Piggin 	__bad_area(regs, error_code, address, SEGV_MAPERR);
83992181f19SNick Piggin }
84092181f19SNick Piggin 
8412d4a7167SIngo Molnar static noinline void
8422d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
8432d4a7167SIngo Molnar 		      unsigned long address)
84492181f19SNick Piggin {
84592181f19SNick Piggin 	__bad_area(regs, error_code, address, SEGV_ACCERR);
84692181f19SNick Piggin }
84792181f19SNick Piggin 
8482d4a7167SIngo Molnar static void
849a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
850a6e04aa9SAndi Kleen 	  unsigned int fault)
85192181f19SNick Piggin {
85292181f19SNick Piggin 	struct task_struct *tsk = current;
853a6e04aa9SAndi Kleen 	int code = BUS_ADRERR;
85492181f19SNick Piggin 
8552d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
85696054569SLinus Torvalds 	if (!(error_code & PF_USER)) {
8574fc34901SAndy Lutomirski 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
85896054569SLinus Torvalds 		return;
85996054569SLinus Torvalds 	}
8602d4a7167SIngo Molnar 
861cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
86292181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
86392181f19SNick Piggin 		return;
8642d4a7167SIngo Molnar 
86592181f19SNick Piggin 	tsk->thread.cr2		= address;
86692181f19SNick Piggin 	tsk->thread.error_code	= error_code;
86751e7dc70SSrikar Dronamraju 	tsk->thread.trap_nr	= X86_TRAP_PF;
8682d4a7167SIngo Molnar 
869a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
870f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
871a6e04aa9SAndi Kleen 		printk(KERN_ERR
872a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
873a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
874a6e04aa9SAndi Kleen 		code = BUS_MCEERR_AR;
875a6e04aa9SAndi Kleen 	}
876a6e04aa9SAndi Kleen #endif
877f672b49bSAndi Kleen 	force_sig_info_fault(SIGBUS, code, address, tsk, fault);
87892181f19SNick Piggin }
87992181f19SNick Piggin 
8803a13c4d7SJohannes Weiner static noinline void
8812d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code,
8822d4a7167SIngo Molnar 	       unsigned long address, unsigned int fault)
88392181f19SNick Piggin {
8843a13c4d7SJohannes Weiner 	if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
8854fc34901SAndy Lutomirski 		no_context(regs, error_code, address, 0, 0);
8863a13c4d7SJohannes Weiner 		return;
887b80ef10eSKOSAKI Motohiro 	}
888b80ef10eSKOSAKI Motohiro 
8892d4a7167SIngo Molnar 	if (fault & VM_FAULT_OOM) {
890f8626854SAndrey Vagin 		/* Kernel mode? Handle exceptions or die: */
891f8626854SAndrey Vagin 		if (!(error_code & PF_USER)) {
8924fc34901SAndy Lutomirski 			no_context(regs, error_code, address,
8934fc34901SAndy Lutomirski 				   SIGSEGV, SEGV_MAPERR);
8943a13c4d7SJohannes Weiner 			return;
895f8626854SAndrey Vagin 		}
896f8626854SAndrey Vagin 
897c2d23f91SDavid Rientjes 		/*
898c2d23f91SDavid Rientjes 		 * We ran out of memory, call the OOM killer, and return the
899c2d23f91SDavid Rientjes 		 * userspace (which will retry the fault, or kill us if we got
900c2d23f91SDavid Rientjes 		 * oom-killed):
901c2d23f91SDavid Rientjes 		 */
902c2d23f91SDavid Rientjes 		pagefault_out_of_memory();
9032d4a7167SIngo Molnar 	} else {
904f672b49bSAndi Kleen 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
905f672b49bSAndi Kleen 			     VM_FAULT_HWPOISON_LARGE))
906a6e04aa9SAndi Kleen 			do_sigbus(regs, error_code, address, fault);
90733692f27SLinus Torvalds 		else if (fault & VM_FAULT_SIGSEGV)
90833692f27SLinus Torvalds 			bad_area_nosemaphore(regs, error_code, address);
90992181f19SNick Piggin 		else
91092181f19SNick Piggin 			BUG();
91192181f19SNick Piggin 	}
9122d4a7167SIngo Molnar }
91392181f19SNick Piggin 
914d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte)
915d8b57bb7SThomas Gleixner {
916d8b57bb7SThomas Gleixner 	if ((error_code & PF_WRITE) && !pte_write(*pte))
917d8b57bb7SThomas Gleixner 		return 0;
9182d4a7167SIngo Molnar 
919d8b57bb7SThomas Gleixner 	if ((error_code & PF_INSTR) && !pte_exec(*pte))
920d8b57bb7SThomas Gleixner 		return 0;
921*b3ecd515SDave Hansen 	/*
922*b3ecd515SDave Hansen 	 * Note: We do not do lazy flushing on protection key
923*b3ecd515SDave Hansen 	 * changes, so no spurious fault will ever set PF_PK.
924*b3ecd515SDave Hansen 	 */
925*b3ecd515SDave Hansen 	if ((error_code & PF_PK))
926*b3ecd515SDave Hansen 		return 1;
927d8b57bb7SThomas Gleixner 
928d8b57bb7SThomas Gleixner 	return 1;
929d8b57bb7SThomas Gleixner }
930d8b57bb7SThomas Gleixner 
931c61e211dSHarvey Harrison /*
9322d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
9332d4a7167SIngo Molnar  *
9342d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
9352d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
9362d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
9372d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
9382d4a7167SIngo Molnar  * on other processors.
9392d4a7167SIngo Molnar  *
94031668511SDavid Vrabel  * Spurious faults may only occur if the TLB contains an entry with
94131668511SDavid Vrabel  * fewer permission than the page table entry.  Non-present (P = 0)
94231668511SDavid Vrabel  * and reserved bit (R = 1) faults are never spurious.
94331668511SDavid Vrabel  *
9445b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
9455b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
94631668511SDavid Vrabel  *
94731668511SDavid Vrabel  * Returns non-zero if a spurious fault was handled, zero otherwise.
94831668511SDavid Vrabel  *
94931668511SDavid Vrabel  * See Intel Developer's Manual Vol 3 Section 4.10.4.3, bullet 3
95031668511SDavid Vrabel  * (Optional Invalidation).
9515b727a3bSJeremy Fitzhardinge  */
9529326638cSMasami Hiramatsu static noinline int
9532d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address)
9545b727a3bSJeremy Fitzhardinge {
9555b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
9565b727a3bSJeremy Fitzhardinge 	pud_t *pud;
9575b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
9585b727a3bSJeremy Fitzhardinge 	pte_t *pte;
9593c3e5694SSteven Rostedt 	int ret;
9605b727a3bSJeremy Fitzhardinge 
96131668511SDavid Vrabel 	/*
96231668511SDavid Vrabel 	 * Only writes to RO or instruction fetches from NX may cause
96331668511SDavid Vrabel 	 * spurious faults.
96431668511SDavid Vrabel 	 *
96531668511SDavid Vrabel 	 * These could be from user or supervisor accesses but the TLB
96631668511SDavid Vrabel 	 * is only lazily flushed after a kernel mapping protection
96731668511SDavid Vrabel 	 * change, so user accesses are not expected to cause spurious
96831668511SDavid Vrabel 	 * faults.
96931668511SDavid Vrabel 	 */
97031668511SDavid Vrabel 	if (error_code != (PF_WRITE | PF_PROT)
97131668511SDavid Vrabel 	    && error_code != (PF_INSTR | PF_PROT))
9725b727a3bSJeremy Fitzhardinge 		return 0;
9735b727a3bSJeremy Fitzhardinge 
9745b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
9755b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
9765b727a3bSJeremy Fitzhardinge 		return 0;
9775b727a3bSJeremy Fitzhardinge 
9785b727a3bSJeremy Fitzhardinge 	pud = pud_offset(pgd, address);
9795b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
9805b727a3bSJeremy Fitzhardinge 		return 0;
9815b727a3bSJeremy Fitzhardinge 
982d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
983d8b57bb7SThomas Gleixner 		return spurious_fault_check(error_code, (pte_t *) pud);
984d8b57bb7SThomas Gleixner 
9855b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
9865b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
9875b727a3bSJeremy Fitzhardinge 		return 0;
9885b727a3bSJeremy Fitzhardinge 
989d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
990d8b57bb7SThomas Gleixner 		return spurious_fault_check(error_code, (pte_t *) pmd);
991d8b57bb7SThomas Gleixner 
9925b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
993954f8571SAndrea Arcangeli 	if (!pte_present(*pte))
9945b727a3bSJeremy Fitzhardinge 		return 0;
9955b727a3bSJeremy Fitzhardinge 
9963c3e5694SSteven Rostedt 	ret = spurious_fault_check(error_code, pte);
9973c3e5694SSteven Rostedt 	if (!ret)
9983c3e5694SSteven Rostedt 		return 0;
9993c3e5694SSteven Rostedt 
10003c3e5694SSteven Rostedt 	/*
10012d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
10022d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
10033c3e5694SSteven Rostedt 	 */
10043c3e5694SSteven Rostedt 	ret = spurious_fault_check(error_code, (pte_t *) pmd);
10053c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
10062d4a7167SIngo Molnar 
10073c3e5694SSteven Rostedt 	return ret;
10085b727a3bSJeremy Fitzhardinge }
10099326638cSMasami Hiramatsu NOKPROBE_SYMBOL(spurious_fault);
10105b727a3bSJeremy Fitzhardinge 
1011c61e211dSHarvey Harrison int show_unhandled_signals = 1;
1012c61e211dSHarvey Harrison 
10132d4a7167SIngo Molnar static inline int
101468da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
101592181f19SNick Piggin {
101668da336aSMichel Lespinasse 	if (error_code & PF_WRITE) {
10172d4a7167SIngo Molnar 		/* write, present and write, not present: */
101892181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
101992181f19SNick Piggin 			return 1;
10202d4a7167SIngo Molnar 		return 0;
10212d4a7167SIngo Molnar 	}
10222d4a7167SIngo Molnar 
10232d4a7167SIngo Molnar 	/* read, present: */
10242d4a7167SIngo Molnar 	if (unlikely(error_code & PF_PROT))
102592181f19SNick Piggin 		return 1;
10262d4a7167SIngo Molnar 
10272d4a7167SIngo Molnar 	/* read, not present: */
102892181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
102992181f19SNick Piggin 		return 1;
103092181f19SNick Piggin 
103192181f19SNick Piggin 	return 0;
103292181f19SNick Piggin }
103392181f19SNick Piggin 
10340973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address)
10350973a06cSHiroshi Shimamoto {
1036d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
10370973a06cSHiroshi Shimamoto }
10380973a06cSHiroshi Shimamoto 
103940d3cd66SH. Peter Anvin static inline bool smap_violation(int error_code, struct pt_regs *regs)
104040d3cd66SH. Peter Anvin {
10414640c7eeSH. Peter Anvin 	if (!IS_ENABLED(CONFIG_X86_SMAP))
10424640c7eeSH. Peter Anvin 		return false;
10434640c7eeSH. Peter Anvin 
10444640c7eeSH. Peter Anvin 	if (!static_cpu_has(X86_FEATURE_SMAP))
10454640c7eeSH. Peter Anvin 		return false;
10464640c7eeSH. Peter Anvin 
104740d3cd66SH. Peter Anvin 	if (error_code & PF_USER)
104840d3cd66SH. Peter Anvin 		return false;
104940d3cd66SH. Peter Anvin 
1050f39b6f0eSAndy Lutomirski 	if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
105140d3cd66SH. Peter Anvin 		return false;
105240d3cd66SH. Peter Anvin 
105340d3cd66SH. Peter Anvin 	return true;
105440d3cd66SH. Peter Anvin }
105540d3cd66SH. Peter Anvin 
1056c61e211dSHarvey Harrison /*
1057c61e211dSHarvey Harrison  * This routine handles page faults.  It determines the address,
1058c61e211dSHarvey Harrison  * and the problem, and then passes it off to one of the appropriate
1059c61e211dSHarvey Harrison  * routines.
1060d4078e23SPeter Zijlstra  *
1061d4078e23SPeter Zijlstra  * This function must have noinline because both callers
1062d4078e23SPeter Zijlstra  * {,trace_}do_page_fault() have notrace on. Having this an actual function
1063d4078e23SPeter Zijlstra  * guarantees there's a function trace entry.
1064c61e211dSHarvey Harrison  */
10659326638cSMasami Hiramatsu static noinline void
10660ac09f9fSJiri Olsa __do_page_fault(struct pt_regs *regs, unsigned long error_code,
10670ac09f9fSJiri Olsa 		unsigned long address)
1068c61e211dSHarvey Harrison {
1069c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
10702d4a7167SIngo Molnar 	struct task_struct *tsk;
10712d4a7167SIngo Molnar 	struct mm_struct *mm;
107226178ec1SLinus Torvalds 	int fault, major = 0;
1073759496baSJohannes Weiner 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1074c61e211dSHarvey Harrison 
1075c61e211dSHarvey Harrison 	tsk = current;
1076c61e211dSHarvey Harrison 	mm = tsk->mm;
10772d4a7167SIngo Molnar 
1078f8561296SVegard Nossum 	/*
1079f8561296SVegard Nossum 	 * Detect and handle instructions that would cause a page fault for
1080f8561296SVegard Nossum 	 * both a tracked kernel page and a userspace page.
1081f8561296SVegard Nossum 	 */
1082f8561296SVegard Nossum 	if (kmemcheck_active(regs))
1083f8561296SVegard Nossum 		kmemcheck_hide(regs);
10845dfaf90fSIngo Molnar 	prefetchw(&mm->mmap_sem);
1085f8561296SVegard Nossum 
10860fd0e3daSPekka Paalanen 	if (unlikely(kmmio_fault(regs, address)))
108786069782SPekka Paalanen 		return;
1088c61e211dSHarvey Harrison 
1089c61e211dSHarvey Harrison 	/*
1090c61e211dSHarvey Harrison 	 * We fault-in kernel-space virtual memory on-demand. The
1091c61e211dSHarvey Harrison 	 * 'reference' page table is init_mm.pgd.
1092c61e211dSHarvey Harrison 	 *
1093c61e211dSHarvey Harrison 	 * NOTE! We MUST NOT take any locks for this case. We may
1094c61e211dSHarvey Harrison 	 * be in an interrupt or a critical region, and should
1095c61e211dSHarvey Harrison 	 * only copy the information from the master page table,
1096c61e211dSHarvey Harrison 	 * nothing more.
1097c61e211dSHarvey Harrison 	 *
1098c61e211dSHarvey Harrison 	 * This verifies that the fault happens in kernel space
1099c61e211dSHarvey Harrison 	 * (error_code & 4) == 0, and that the fault was not a
1100c61e211dSHarvey Harrison 	 * protection error (error_code & 9) == 0.
1101c61e211dSHarvey Harrison 	 */
11020973a06cSHiroshi Shimamoto 	if (unlikely(fault_in_kernel_space(address))) {
1103f8561296SVegard Nossum 		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
1104f8561296SVegard Nossum 			if (vmalloc_fault(address) >= 0)
1105c61e211dSHarvey Harrison 				return;
11065b727a3bSJeremy Fitzhardinge 
1107f8561296SVegard Nossum 			if (kmemcheck_fault(regs, address, error_code))
1108f8561296SVegard Nossum 				return;
1109f8561296SVegard Nossum 		}
1110f8561296SVegard Nossum 
11112d4a7167SIngo Molnar 		/* Can handle a stale RO->RW TLB: */
111292181f19SNick Piggin 		if (spurious_fault(error_code, address))
11135b727a3bSJeremy Fitzhardinge 			return;
11145b727a3bSJeremy Fitzhardinge 
11152d4a7167SIngo Molnar 		/* kprobes don't want to hook the spurious faults: */
1116e00b12e6SPeter Zijlstra 		if (kprobes_fault(regs))
11179be260a6SMasami Hiramatsu 			return;
1118c61e211dSHarvey Harrison 		/*
1119c61e211dSHarvey Harrison 		 * Don't take the mm semaphore here. If we fixup a prefetch
11202d4a7167SIngo Molnar 		 * fault we could otherwise deadlock:
1121c61e211dSHarvey Harrison 		 */
112292181f19SNick Piggin 		bad_area_nosemaphore(regs, error_code, address);
11232d4a7167SIngo Molnar 
112492181f19SNick Piggin 		return;
1125c61e211dSHarvey Harrison 	}
1126c61e211dSHarvey Harrison 
11272d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
1128e00b12e6SPeter Zijlstra 	if (unlikely(kprobes_fault(regs)))
11299be260a6SMasami Hiramatsu 		return;
1130e00b12e6SPeter Zijlstra 
1131e00b12e6SPeter Zijlstra 	if (unlikely(error_code & PF_RSVD))
1132e00b12e6SPeter Zijlstra 		pgtable_bad(regs, error_code, address);
1133e00b12e6SPeter Zijlstra 
1134e00b12e6SPeter Zijlstra 	if (unlikely(smap_violation(error_code, regs))) {
1135e00b12e6SPeter Zijlstra 		bad_area_nosemaphore(regs, error_code, address);
1136e00b12e6SPeter Zijlstra 		return;
1137e00b12e6SPeter Zijlstra 	}
1138e00b12e6SPeter Zijlstra 
1139e00b12e6SPeter Zijlstra 	/*
1140e00b12e6SPeter Zijlstra 	 * If we're in an interrupt, have no user context or are running
114170ffdb93SDavid Hildenbrand 	 * in a region with pagefaults disabled then we must not take the fault
1142e00b12e6SPeter Zijlstra 	 */
114370ffdb93SDavid Hildenbrand 	if (unlikely(faulthandler_disabled() || !mm)) {
1144e00b12e6SPeter Zijlstra 		bad_area_nosemaphore(regs, error_code, address);
1145e00b12e6SPeter Zijlstra 		return;
1146e00b12e6SPeter Zijlstra 	}
1147e00b12e6SPeter Zijlstra 
1148c61e211dSHarvey Harrison 	/*
1149891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1150891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1151891cffbdSLinus Torvalds 	 *
1152891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
11532d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1154c61e211dSHarvey Harrison 	 */
1155f39b6f0eSAndy Lutomirski 	if (user_mode(regs)) {
1156891cffbdSLinus Torvalds 		local_irq_enable();
1157891cffbdSLinus Torvalds 		error_code |= PF_USER;
1158759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
11592d4a7167SIngo Molnar 	} else {
11602d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1161c61e211dSHarvey Harrison 			local_irq_enable();
11622d4a7167SIngo Molnar 	}
1163c61e211dSHarvey Harrison 
1164a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
11657dd1fcc2SPeter Zijlstra 
1166759496baSJohannes Weiner 	if (error_code & PF_WRITE)
1167759496baSJohannes Weiner 		flags |= FAULT_FLAG_WRITE;
1168759496baSJohannes Weiner 
11693a1dfe6eSIngo Molnar 	/*
11703a1dfe6eSIngo Molnar 	 * When running in the kernel we expect faults to occur only to
11712d4a7167SIngo Molnar 	 * addresses in user space.  All other faults represent errors in
11722d4a7167SIngo Molnar 	 * the kernel and should generate an OOPS.  Unfortunately, in the
11732d4a7167SIngo Molnar 	 * case of an erroneous fault occurring in a code path which already
11742d4a7167SIngo Molnar 	 * holds mmap_sem we will deadlock attempting to validate the fault
11752d4a7167SIngo Molnar 	 * against the address space.  Luckily the kernel only validly
11762d4a7167SIngo Molnar 	 * references user space from well defined areas of code, which are
11772d4a7167SIngo Molnar 	 * listed in the exceptions table.
1178c61e211dSHarvey Harrison 	 *
1179c61e211dSHarvey Harrison 	 * As the vast majority of faults will be valid we will only perform
11802d4a7167SIngo Molnar 	 * the source reference check when there is a possibility of a
11812d4a7167SIngo Molnar 	 * deadlock. Attempt to lock the address space, if we cannot we then
11822d4a7167SIngo Molnar 	 * validate the source. If this is invalid we can skip the address
11832d4a7167SIngo Molnar 	 * space check, thus avoiding the deadlock:
1184c61e211dSHarvey Harrison 	 */
118592181f19SNick Piggin 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1186c61e211dSHarvey Harrison 		if ((error_code & PF_USER) == 0 &&
118792181f19SNick Piggin 		    !search_exception_tables(regs->ip)) {
118892181f19SNick Piggin 			bad_area_nosemaphore(regs, error_code, address);
118992181f19SNick Piggin 			return;
119092181f19SNick Piggin 		}
1191d065bd81SMichel Lespinasse retry:
1192c61e211dSHarvey Harrison 		down_read(&mm->mmap_sem);
119301006074SPeter Zijlstra 	} else {
119401006074SPeter Zijlstra 		/*
11952d4a7167SIngo Molnar 		 * The above down_read_trylock() might have succeeded in
11962d4a7167SIngo Molnar 		 * which case we'll have missed the might_sleep() from
11972d4a7167SIngo Molnar 		 * down_read():
119801006074SPeter Zijlstra 		 */
119901006074SPeter Zijlstra 		might_sleep();
1200c61e211dSHarvey Harrison 	}
1201c61e211dSHarvey Harrison 
1202c61e211dSHarvey Harrison 	vma = find_vma(mm, address);
120392181f19SNick Piggin 	if (unlikely(!vma)) {
120492181f19SNick Piggin 		bad_area(regs, error_code, address);
120592181f19SNick Piggin 		return;
120692181f19SNick Piggin 	}
120792181f19SNick Piggin 	if (likely(vma->vm_start <= address))
1208c61e211dSHarvey Harrison 		goto good_area;
120992181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
121092181f19SNick Piggin 		bad_area(regs, error_code, address);
121192181f19SNick Piggin 		return;
121292181f19SNick Piggin 	}
1213c61e211dSHarvey Harrison 	if (error_code & PF_USER) {
1214c61e211dSHarvey Harrison 		/*
1215c61e211dSHarvey Harrison 		 * Accessing the stack below %sp is always a bug.
1216c61e211dSHarvey Harrison 		 * The large cushion allows instructions like enter
1217c61e211dSHarvey Harrison 		 * and pusha to work. ("enter $65535, $31" pushes
1218c61e211dSHarvey Harrison 		 * 32 pointers and then decrements %sp by 65535.)
1219c61e211dSHarvey Harrison 		 */
122092181f19SNick Piggin 		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
122192181f19SNick Piggin 			bad_area(regs, error_code, address);
122292181f19SNick Piggin 			return;
1223c61e211dSHarvey Harrison 		}
122492181f19SNick Piggin 	}
122592181f19SNick Piggin 	if (unlikely(expand_stack(vma, address))) {
122692181f19SNick Piggin 		bad_area(regs, error_code, address);
122792181f19SNick Piggin 		return;
122892181f19SNick Piggin 	}
122992181f19SNick Piggin 
1230c61e211dSHarvey Harrison 	/*
1231c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1232c61e211dSHarvey Harrison 	 * we can handle it..
1233c61e211dSHarvey Harrison 	 */
1234c61e211dSHarvey Harrison good_area:
123568da336aSMichel Lespinasse 	if (unlikely(access_error(error_code, vma))) {
123692181f19SNick Piggin 		bad_area_access_error(regs, error_code, address);
123792181f19SNick Piggin 		return;
1238c61e211dSHarvey Harrison 	}
1239c61e211dSHarvey Harrison 
1240c61e211dSHarvey Harrison 	/*
1241c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1242c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
12439a95f3cfSPaul Cassella 	 * the fault.  Since we never set FAULT_FLAG_RETRY_NOWAIT, if
12449a95f3cfSPaul Cassella 	 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1245c61e211dSHarvey Harrison 	 */
1246d065bd81SMichel Lespinasse 	fault = handle_mm_fault(mm, vma, address, flags);
124726178ec1SLinus Torvalds 	major |= fault & VM_FAULT_MAJOR;
12482d4a7167SIngo Molnar 
12493a13c4d7SJohannes Weiner 	/*
125026178ec1SLinus Torvalds 	 * If we need to retry the mmap_sem has already been released,
125126178ec1SLinus Torvalds 	 * and if there is a fatal signal pending there is no guarantee
125226178ec1SLinus Torvalds 	 * that we made any progress. Handle this case first.
12533a13c4d7SJohannes Weiner 	 */
125426178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_RETRY)) {
125526178ec1SLinus Torvalds 		/* Retry at most once */
125626178ec1SLinus Torvalds 		if (flags & FAULT_FLAG_ALLOW_RETRY) {
125726178ec1SLinus Torvalds 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
125826178ec1SLinus Torvalds 			flags |= FAULT_FLAG_TRIED;
125926178ec1SLinus Torvalds 			if (!fatal_signal_pending(tsk))
126026178ec1SLinus Torvalds 				goto retry;
126126178ec1SLinus Torvalds 		}
126226178ec1SLinus Torvalds 
126326178ec1SLinus Torvalds 		/* User mode? Just return to handle the fatal exception */
1264cf3c0a15SLinus Torvalds 		if (flags & FAULT_FLAG_USER)
12653a13c4d7SJohannes Weiner 			return;
12663a13c4d7SJohannes Weiner 
126726178ec1SLinus Torvalds 		/* Not returning to user mode? Handle exceptions or die: */
126826178ec1SLinus Torvalds 		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
126926178ec1SLinus Torvalds 		return;
127026178ec1SLinus Torvalds 	}
127126178ec1SLinus Torvalds 
12727fb08ecaSLinus Torvalds 	up_read(&mm->mmap_sem);
127326178ec1SLinus Torvalds 	if (unlikely(fault & VM_FAULT_ERROR)) {
12743a13c4d7SJohannes Weiner 		mm_fault_error(regs, error_code, address, fault);
127537b23e05SKOSAKI Motohiro 		return;
127637b23e05SKOSAKI Motohiro 	}
127737b23e05SKOSAKI Motohiro 
127837b23e05SKOSAKI Motohiro 	/*
127926178ec1SLinus Torvalds 	 * Major/minor page fault accounting. If any of the events
128026178ec1SLinus Torvalds 	 * returned VM_FAULT_MAJOR, we account it as a major fault.
1281d065bd81SMichel Lespinasse 	 */
128226178ec1SLinus Torvalds 	if (major) {
1283c61e211dSHarvey Harrison 		tsk->maj_flt++;
128426178ec1SLinus Torvalds 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
1285ac17dc8eSPeter Zijlstra 	} else {
1286c61e211dSHarvey Harrison 		tsk->min_flt++;
128726178ec1SLinus Torvalds 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1288d065bd81SMichel Lespinasse 	}
1289c61e211dSHarvey Harrison 
12908c938f9fSIngo Molnar 	check_v8086_mode(regs, address, tsk);
1291c61e211dSHarvey Harrison }
12929326638cSMasami Hiramatsu NOKPROBE_SYMBOL(__do_page_fault);
12936ba3c97aSFrederic Weisbecker 
12949326638cSMasami Hiramatsu dotraplinkage void notrace
12956ba3c97aSFrederic Weisbecker do_page_fault(struct pt_regs *regs, unsigned long error_code)
12966ba3c97aSFrederic Weisbecker {
1297d4078e23SPeter Zijlstra 	unsigned long address = read_cr2(); /* Get the faulting address */
12986c1e0256SFrederic Weisbecker 	enum ctx_state prev_state;
1299d4078e23SPeter Zijlstra 
1300d4078e23SPeter Zijlstra 	/*
1301d4078e23SPeter Zijlstra 	 * We must have this function tagged with __kprobes, notrace and call
1302d4078e23SPeter Zijlstra 	 * read_cr2() before calling anything else. To avoid calling any kind
1303d4078e23SPeter Zijlstra 	 * of tracing machinery before we've observed the CR2 value.
1304d4078e23SPeter Zijlstra 	 *
1305d4078e23SPeter Zijlstra 	 * exception_{enter,exit}() contain all sorts of tracepoints.
1306d4078e23SPeter Zijlstra 	 */
13076c1e0256SFrederic Weisbecker 
13086c1e0256SFrederic Weisbecker 	prev_state = exception_enter();
13090ac09f9fSJiri Olsa 	__do_page_fault(regs, error_code, address);
13106c1e0256SFrederic Weisbecker 	exception_exit(prev_state);
13116ba3c97aSFrederic Weisbecker }
13129326638cSMasami Hiramatsu NOKPROBE_SYMBOL(do_page_fault);
131325c74b10SSeiji Aguchi 
1314d4078e23SPeter Zijlstra #ifdef CONFIG_TRACING
13159326638cSMasami Hiramatsu static nokprobe_inline void
13169326638cSMasami Hiramatsu trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
1317d34603b0SSeiji Aguchi 			 unsigned long error_code)
1318d34603b0SSeiji Aguchi {
1319d34603b0SSeiji Aguchi 	if (user_mode(regs))
1320d4078e23SPeter Zijlstra 		trace_page_fault_user(address, regs, error_code);
1321d34603b0SSeiji Aguchi 	else
1322d4078e23SPeter Zijlstra 		trace_page_fault_kernel(address, regs, error_code);
1323d34603b0SSeiji Aguchi }
1324d34603b0SSeiji Aguchi 
13259326638cSMasami Hiramatsu dotraplinkage void notrace
132625c74b10SSeiji Aguchi trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
132725c74b10SSeiji Aguchi {
13280ac09f9fSJiri Olsa 	/*
13290ac09f9fSJiri Olsa 	 * The exception_enter and tracepoint processing could
13300ac09f9fSJiri Olsa 	 * trigger another page faults (user space callchain
13310ac09f9fSJiri Olsa 	 * reading) and destroy the original cr2 value, so read
13320ac09f9fSJiri Olsa 	 * the faulting address now.
13330ac09f9fSJiri Olsa 	 */
13340ac09f9fSJiri Olsa 	unsigned long address = read_cr2();
1335d4078e23SPeter Zijlstra 	enum ctx_state prev_state;
133625c74b10SSeiji Aguchi 
133725c74b10SSeiji Aguchi 	prev_state = exception_enter();
1338d4078e23SPeter Zijlstra 	trace_page_fault_entries(address, regs, error_code);
13390ac09f9fSJiri Olsa 	__do_page_fault(regs, error_code, address);
134025c74b10SSeiji Aguchi 	exception_exit(prev_state);
134125c74b10SSeiji Aguchi }
13429326638cSMasami Hiramatsu NOKPROBE_SYMBOL(trace_do_page_fault);
1343d4078e23SPeter Zijlstra #endif /* CONFIG_TRACING */
1344