xref: /openbmc/linux/arch/x86/mm/fault.c (revision a8b0ca17b80e92faab46ee7179ba9e99ccb61233)
1c61e211dSHarvey Harrison /*
2c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
3c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
5c61e211dSHarvey Harrison  */
6a2bcd473SIngo Molnar #include <linux/magic.h>		/* STACK_END_MAGIC		*/
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
8a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
9a2bcd473SIngo Molnar #include <linux/module.h>		/* search_exception_table	*/
10a2bcd473SIngo Molnar #include <linux/bootmem.h>		/* max_low_pfn			*/
11a2bcd473SIngo Molnar #include <linux/kprobes.h>		/* __kprobes, ...		*/
12a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13cdd6c482SIngo Molnar #include <linux/perf_event.h>		/* perf_sw_event		*/
14f672b49bSAndi Kleen #include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
15268bb0ceSLinus Torvalds #include <linux/prefetch.h>		/* prefetchw			*/
16c61e211dSHarvey Harrison 
17a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
18a2bcd473SIngo Molnar #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
19f8561296SVegard Nossum #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
20c61e211dSHarvey Harrison 
21c61e211dSHarvey Harrison /*
222d4a7167SIngo Molnar  * Page fault error code bits:
232d4a7167SIngo Molnar  *
242d4a7167SIngo Molnar  *   bit 0 ==	 0: no page found	1: protection fault
252d4a7167SIngo Molnar  *   bit 1 ==	 0: read access		1: write access
262d4a7167SIngo Molnar  *   bit 2 ==	 0: kernel-mode access	1: user-mode access
272d4a7167SIngo Molnar  *   bit 3 ==				1: use of reserved bit detected
282d4a7167SIngo Molnar  *   bit 4 ==				1: fault was an instruction fetch
29c61e211dSHarvey Harrison  */
302d4a7167SIngo Molnar enum x86_pf_error_code {
312d4a7167SIngo Molnar 
322d4a7167SIngo Molnar 	PF_PROT		=		1 << 0,
332d4a7167SIngo Molnar 	PF_WRITE	=		1 << 1,
342d4a7167SIngo Molnar 	PF_USER		=		1 << 2,
352d4a7167SIngo Molnar 	PF_RSVD		=		1 << 3,
362d4a7167SIngo Molnar 	PF_INSTR	=		1 << 4,
372d4a7167SIngo Molnar };
38c61e211dSHarvey Harrison 
39b814d41fSIngo Molnar /*
40b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
41b319eed0SIngo Molnar  * handled by mmiotrace:
42b814d41fSIngo Molnar  */
4362c9295fSMasami Hiramatsu static inline int __kprobes
4462c9295fSMasami Hiramatsu kmmio_fault(struct pt_regs *regs, unsigned long addr)
4586069782SPekka Paalanen {
460fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
470fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
480fd0e3daSPekka Paalanen 			return -1;
490fd0e3daSPekka Paalanen 	return 0;
5086069782SPekka Paalanen }
5186069782SPekka Paalanen 
5262c9295fSMasami Hiramatsu static inline int __kprobes notify_page_fault(struct pt_regs *regs)
53c61e211dSHarvey Harrison {
54c61e211dSHarvey Harrison 	int ret = 0;
55c61e211dSHarvey Harrison 
56c61e211dSHarvey Harrison 	/* kprobe_running() needs smp_processor_id() */
57b1801812SIngo Molnar 	if (kprobes_built_in() && !user_mode_vm(regs)) {
58c61e211dSHarvey Harrison 		preempt_disable();
59c61e211dSHarvey Harrison 		if (kprobe_running() && kprobe_fault_handler(regs, 14))
60c61e211dSHarvey Harrison 			ret = 1;
61c61e211dSHarvey Harrison 		preempt_enable();
62c61e211dSHarvey Harrison 	}
63c61e211dSHarvey Harrison 
64c61e211dSHarvey Harrison 	return ret;
65c61e211dSHarvey Harrison }
66c61e211dSHarvey Harrison 
67c61e211dSHarvey Harrison /*
682d4a7167SIngo Molnar  * Prefetch quirks:
692d4a7167SIngo Molnar  *
702d4a7167SIngo Molnar  * 32-bit mode:
712d4a7167SIngo Molnar  *
72c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
73c61e211dSHarvey Harrison  *   Check that here and ignore it.
74c61e211dSHarvey Harrison  *
752d4a7167SIngo Molnar  * 64-bit mode:
762d4a7167SIngo Molnar  *
77c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
78c61e211dSHarvey Harrison  *   Check that here and ignore it.
79c61e211dSHarvey Harrison  *
802d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
81c61e211dSHarvey Harrison  */
82107a0367SIngo Molnar static inline int
83107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
84107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
85c61e211dSHarvey Harrison {
86107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
87107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
88c61e211dSHarvey Harrison 
89c61e211dSHarvey Harrison 	switch (instr_hi) {
90c61e211dSHarvey Harrison 	case 0x20:
91c61e211dSHarvey Harrison 	case 0x30:
92c61e211dSHarvey Harrison 		/*
93c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
94c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
95c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
96c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
97c61e211dSHarvey Harrison 		 */
98107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
99c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
100c61e211dSHarvey Harrison 	case 0x40:
101c61e211dSHarvey Harrison 		/*
102c61e211dSHarvey Harrison 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
103c61e211dSHarvey Harrison 		 * Need to figure out under what instruction mode the
104c61e211dSHarvey Harrison 		 * instruction was issued. Could check the LDT for lm,
105c61e211dSHarvey Harrison 		 * but for now it's good enough to assume that long
106c61e211dSHarvey Harrison 		 * mode only uses well known segments or kernel.
107c61e211dSHarvey Harrison 		 */
108107a0367SIngo Molnar 		return (!user_mode(regs)) || (regs->cs == __USER_CS);
109c61e211dSHarvey Harrison #endif
110c61e211dSHarvey Harrison 	case 0x60:
111c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
112107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
113c61e211dSHarvey Harrison 	case 0xF0:
114c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
115107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
116c61e211dSHarvey Harrison 	case 0x00:
117c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
118107a0367SIngo Molnar 		if (probe_kernel_address(instr, opcode))
119107a0367SIngo Molnar 			return 0;
120107a0367SIngo Molnar 
121107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
122107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
123107a0367SIngo Molnar 		return 0;
124107a0367SIngo Molnar 	default:
125107a0367SIngo Molnar 		return 0;
126107a0367SIngo Molnar 	}
127107a0367SIngo Molnar }
128107a0367SIngo Molnar 
129107a0367SIngo Molnar static int
130107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
131107a0367SIngo Molnar {
132107a0367SIngo Molnar 	unsigned char *max_instr;
133107a0367SIngo Molnar 	unsigned char *instr;
134107a0367SIngo Molnar 	int prefetch = 0;
135107a0367SIngo Molnar 
136107a0367SIngo Molnar 	/*
137107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
138107a0367SIngo Molnar 	 * do not ignore the fault:
139107a0367SIngo Molnar 	 */
140107a0367SIngo Molnar 	if (error_code & PF_INSTR)
141107a0367SIngo Molnar 		return 0;
142107a0367SIngo Molnar 
143107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
144107a0367SIngo Molnar 	max_instr = instr + 15;
145107a0367SIngo Molnar 
146107a0367SIngo Molnar 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
147107a0367SIngo Molnar 		return 0;
148107a0367SIngo Molnar 
149107a0367SIngo Molnar 	while (instr < max_instr) {
150107a0367SIngo Molnar 		unsigned char opcode;
151c61e211dSHarvey Harrison 
152c61e211dSHarvey Harrison 		if (probe_kernel_address(instr, opcode))
153c61e211dSHarvey Harrison 			break;
154107a0367SIngo Molnar 
155107a0367SIngo Molnar 		instr++;
156107a0367SIngo Molnar 
157107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
158c61e211dSHarvey Harrison 			break;
159c61e211dSHarvey Harrison 	}
160c61e211dSHarvey Harrison 	return prefetch;
161c61e211dSHarvey Harrison }
162c61e211dSHarvey Harrison 
1632d4a7167SIngo Molnar static void
1642d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address,
165f672b49bSAndi Kleen 		     struct task_struct *tsk, int fault)
166c61e211dSHarvey Harrison {
167f672b49bSAndi Kleen 	unsigned lsb = 0;
168c61e211dSHarvey Harrison 	siginfo_t info;
169c61e211dSHarvey Harrison 
170c61e211dSHarvey Harrison 	info.si_signo	= si_signo;
171c61e211dSHarvey Harrison 	info.si_errno	= 0;
172c61e211dSHarvey Harrison 	info.si_code	= si_code;
173c61e211dSHarvey Harrison 	info.si_addr	= (void __user *)address;
174f672b49bSAndi Kleen 	if (fault & VM_FAULT_HWPOISON_LARGE)
175f672b49bSAndi Kleen 		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
176f672b49bSAndi Kleen 	if (fault & VM_FAULT_HWPOISON)
177f672b49bSAndi Kleen 		lsb = PAGE_SHIFT;
178f672b49bSAndi Kleen 	info.si_addr_lsb = lsb;
1792d4a7167SIngo Molnar 
180c61e211dSHarvey Harrison 	force_sig_info(si_signo, &info, tsk);
181c61e211dSHarvey Harrison }
182c61e211dSHarvey Harrison 
183f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
184f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1852d4a7167SIngo Molnar 
186f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
187f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
188f2f13a85SIngo Molnar {
189f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
190f2f13a85SIngo Molnar 	pgd_t *pgd_k;
191f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
192f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
193f2f13a85SIngo Molnar 
194f2f13a85SIngo Molnar 	pgd += index;
195f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
196f2f13a85SIngo Molnar 
197f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
198f2f13a85SIngo Molnar 		return NULL;
199f2f13a85SIngo Molnar 
200f2f13a85SIngo Molnar 	/*
201f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
202f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
203f2f13a85SIngo Molnar 	 * set_pud.
204f2f13a85SIngo Molnar 	 */
205f2f13a85SIngo Molnar 	pud = pud_offset(pgd, address);
206f2f13a85SIngo Molnar 	pud_k = pud_offset(pgd_k, address);
207f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
208f2f13a85SIngo Molnar 		return NULL;
209f2f13a85SIngo Molnar 
210f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
211f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
212f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
213f2f13a85SIngo Molnar 		return NULL;
214f2f13a85SIngo Molnar 
215b8bcfe99SJeremy Fitzhardinge 	if (!pmd_present(*pmd))
216f2f13a85SIngo Molnar 		set_pmd(pmd, *pmd_k);
217b8bcfe99SJeremy Fitzhardinge 	else
218f2f13a85SIngo Molnar 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
219f2f13a85SIngo Molnar 
220f2f13a85SIngo Molnar 	return pmd_k;
221f2f13a85SIngo Molnar }
222f2f13a85SIngo Molnar 
223f2f13a85SIngo Molnar void vmalloc_sync_all(void)
224f2f13a85SIngo Molnar {
225f2f13a85SIngo Molnar 	unsigned long address;
226f2f13a85SIngo Molnar 
227f2f13a85SIngo Molnar 	if (SHARED_KERNEL_PMD)
228f2f13a85SIngo Molnar 		return;
229f2f13a85SIngo Molnar 
230f2f13a85SIngo Molnar 	for (address = VMALLOC_START & PMD_MASK;
231f2f13a85SIngo Molnar 	     address >= TASK_SIZE && address < FIXADDR_TOP;
232f2f13a85SIngo Molnar 	     address += PMD_SIZE) {
233f2f13a85SIngo Molnar 		struct page *page;
234f2f13a85SIngo Molnar 
235a79e53d8SAndrea Arcangeli 		spin_lock(&pgd_lock);
236f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
237617d34d9SJeremy Fitzhardinge 			spinlock_t *pgt_lock;
238f01f7c56SBorislav Petkov 			pmd_t *ret;
239617d34d9SJeremy Fitzhardinge 
240a79e53d8SAndrea Arcangeli 			/* the pgt_lock only for Xen */
241617d34d9SJeremy Fitzhardinge 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
242617d34d9SJeremy Fitzhardinge 
243617d34d9SJeremy Fitzhardinge 			spin_lock(pgt_lock);
244617d34d9SJeremy Fitzhardinge 			ret = vmalloc_sync_one(page_address(page), address);
245617d34d9SJeremy Fitzhardinge 			spin_unlock(pgt_lock);
246617d34d9SJeremy Fitzhardinge 
247617d34d9SJeremy Fitzhardinge 			if (!ret)
248f2f13a85SIngo Molnar 				break;
249f2f13a85SIngo Molnar 		}
250a79e53d8SAndrea Arcangeli 		spin_unlock(&pgd_lock);
251f2f13a85SIngo Molnar 	}
252f2f13a85SIngo Molnar }
253f2f13a85SIngo Molnar 
254f2f13a85SIngo Molnar /*
255f2f13a85SIngo Molnar  * 32-bit:
256f2f13a85SIngo Molnar  *
257f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc or module mapping area
258f2f13a85SIngo Molnar  */
25962c9295fSMasami Hiramatsu static noinline __kprobes int vmalloc_fault(unsigned long address)
260f2f13a85SIngo Molnar {
261f2f13a85SIngo Molnar 	unsigned long pgd_paddr;
262f2f13a85SIngo Molnar 	pmd_t *pmd_k;
263f2f13a85SIngo Molnar 	pte_t *pte_k;
264f2f13a85SIngo Molnar 
265f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
266f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
267f2f13a85SIngo Molnar 		return -1;
268f2f13a85SIngo Molnar 
269ebc8827fSFrederic Weisbecker 	WARN_ON_ONCE(in_nmi());
270ebc8827fSFrederic Weisbecker 
271f2f13a85SIngo Molnar 	/*
272f2f13a85SIngo Molnar 	 * Synchronize this task's top level page-table
273f2f13a85SIngo Molnar 	 * with the 'reference' page table.
274f2f13a85SIngo Molnar 	 *
275f2f13a85SIngo Molnar 	 * Do _not_ use "current" here. We might be inside
276f2f13a85SIngo Molnar 	 * an interrupt in the middle of a task switch..
277f2f13a85SIngo Molnar 	 */
278f2f13a85SIngo Molnar 	pgd_paddr = read_cr3();
279f2f13a85SIngo Molnar 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
280f2f13a85SIngo Molnar 	if (!pmd_k)
281f2f13a85SIngo Molnar 		return -1;
282f2f13a85SIngo Molnar 
283f2f13a85SIngo Molnar 	pte_k = pte_offset_kernel(pmd_k, address);
284f2f13a85SIngo Molnar 	if (!pte_present(*pte_k))
285f2f13a85SIngo Molnar 		return -1;
286f2f13a85SIngo Molnar 
287f2f13a85SIngo Molnar 	return 0;
288f2f13a85SIngo Molnar }
289f2f13a85SIngo Molnar 
290f2f13a85SIngo Molnar /*
291f2f13a85SIngo Molnar  * Did it hit the DOS screen memory VA from vm86 mode?
292f2f13a85SIngo Molnar  */
293f2f13a85SIngo Molnar static inline void
294f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
295f2f13a85SIngo Molnar 		 struct task_struct *tsk)
296f2f13a85SIngo Molnar {
297f2f13a85SIngo Molnar 	unsigned long bit;
298f2f13a85SIngo Molnar 
299f2f13a85SIngo Molnar 	if (!v8086_mode(regs))
300f2f13a85SIngo Molnar 		return;
301f2f13a85SIngo Molnar 
302f2f13a85SIngo Molnar 	bit = (address - 0xA0000) >> PAGE_SHIFT;
303f2f13a85SIngo Molnar 	if (bit < 32)
304f2f13a85SIngo Molnar 		tsk->thread.screen_bitmap |= 1 << bit;
305f2f13a85SIngo Molnar }
306c61e211dSHarvey Harrison 
307087975b0SAkinobu Mita static bool low_pfn(unsigned long pfn)
308087975b0SAkinobu Mita {
309087975b0SAkinobu Mita 	return pfn < max_low_pfn;
310087975b0SAkinobu Mita }
311087975b0SAkinobu Mita 
312cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
313c61e211dSHarvey Harrison {
314087975b0SAkinobu Mita 	pgd_t *base = __va(read_cr3());
315087975b0SAkinobu Mita 	pgd_t *pgd = &base[pgd_index(address)];
316087975b0SAkinobu Mita 	pmd_t *pmd;
317087975b0SAkinobu Mita 	pte_t *pte;
3182d4a7167SIngo Molnar 
319c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
320087975b0SAkinobu Mita 	printk("*pdpt = %016Lx ", pgd_val(*pgd));
321087975b0SAkinobu Mita 	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
322087975b0SAkinobu Mita 		goto out;
323c61e211dSHarvey Harrison #endif
324087975b0SAkinobu Mita 	pmd = pmd_offset(pud_offset(pgd, address), address);
325087975b0SAkinobu Mita 	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
326c61e211dSHarvey Harrison 
327c61e211dSHarvey Harrison 	/*
328c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
329c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
330c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3312d4a7167SIngo Molnar 	 * it's allocated already:
332c61e211dSHarvey Harrison 	 */
333087975b0SAkinobu Mita 	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
334087975b0SAkinobu Mita 		goto out;
3352d4a7167SIngo Molnar 
336087975b0SAkinobu Mita 	pte = pte_offset_kernel(pmd, address);
337087975b0SAkinobu Mita 	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
338087975b0SAkinobu Mita out:
339c61e211dSHarvey Harrison 	printk("\n");
340f2f13a85SIngo Molnar }
341f2f13a85SIngo Molnar 
342f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
343f2f13a85SIngo Molnar 
344f2f13a85SIngo Molnar void vmalloc_sync_all(void)
345f2f13a85SIngo Molnar {
3466afb5157SHaicheng Li 	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
347f2f13a85SIngo Molnar }
348f2f13a85SIngo Molnar 
349f2f13a85SIngo Molnar /*
350f2f13a85SIngo Molnar  * 64-bit:
351f2f13a85SIngo Molnar  *
352f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc area
353f2f13a85SIngo Molnar  *
354f2f13a85SIngo Molnar  * This assumes no large pages in there.
355f2f13a85SIngo Molnar  */
35662c9295fSMasami Hiramatsu static noinline __kprobes int vmalloc_fault(unsigned long address)
357f2f13a85SIngo Molnar {
358f2f13a85SIngo Molnar 	pgd_t *pgd, *pgd_ref;
359f2f13a85SIngo Molnar 	pud_t *pud, *pud_ref;
360f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_ref;
361f2f13a85SIngo Molnar 	pte_t *pte, *pte_ref;
362f2f13a85SIngo Molnar 
363f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
364f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
365f2f13a85SIngo Molnar 		return -1;
366f2f13a85SIngo Molnar 
367ebc8827fSFrederic Weisbecker 	WARN_ON_ONCE(in_nmi());
368ebc8827fSFrederic Weisbecker 
369f2f13a85SIngo Molnar 	/*
370f2f13a85SIngo Molnar 	 * Copy kernel mappings over when needed. This can also
371f2f13a85SIngo Molnar 	 * happen within a race in page table update. In the later
372f2f13a85SIngo Molnar 	 * case just flush:
373f2f13a85SIngo Molnar 	 */
374f2f13a85SIngo Molnar 	pgd = pgd_offset(current->active_mm, address);
375f2f13a85SIngo Molnar 	pgd_ref = pgd_offset_k(address);
376f2f13a85SIngo Molnar 	if (pgd_none(*pgd_ref))
377f2f13a85SIngo Molnar 		return -1;
378f2f13a85SIngo Molnar 
379f2f13a85SIngo Molnar 	if (pgd_none(*pgd))
380f2f13a85SIngo Molnar 		set_pgd(pgd, *pgd_ref);
381f2f13a85SIngo Molnar 	else
382f2f13a85SIngo Molnar 		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
383f2f13a85SIngo Molnar 
384f2f13a85SIngo Molnar 	/*
385f2f13a85SIngo Molnar 	 * Below here mismatches are bugs because these lower tables
386f2f13a85SIngo Molnar 	 * are shared:
387f2f13a85SIngo Molnar 	 */
388f2f13a85SIngo Molnar 
389f2f13a85SIngo Molnar 	pud = pud_offset(pgd, address);
390f2f13a85SIngo Molnar 	pud_ref = pud_offset(pgd_ref, address);
391f2f13a85SIngo Molnar 	if (pud_none(*pud_ref))
392f2f13a85SIngo Molnar 		return -1;
393f2f13a85SIngo Molnar 
394f2f13a85SIngo Molnar 	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
395f2f13a85SIngo Molnar 		BUG();
396f2f13a85SIngo Molnar 
397f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
398f2f13a85SIngo Molnar 	pmd_ref = pmd_offset(pud_ref, address);
399f2f13a85SIngo Molnar 	if (pmd_none(*pmd_ref))
400f2f13a85SIngo Molnar 		return -1;
401f2f13a85SIngo Molnar 
402f2f13a85SIngo Molnar 	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
403f2f13a85SIngo Molnar 		BUG();
404f2f13a85SIngo Molnar 
405f2f13a85SIngo Molnar 	pte_ref = pte_offset_kernel(pmd_ref, address);
406f2f13a85SIngo Molnar 	if (!pte_present(*pte_ref))
407f2f13a85SIngo Molnar 		return -1;
408f2f13a85SIngo Molnar 
409f2f13a85SIngo Molnar 	pte = pte_offset_kernel(pmd, address);
410f2f13a85SIngo Molnar 
411f2f13a85SIngo Molnar 	/*
412f2f13a85SIngo Molnar 	 * Don't use pte_page here, because the mappings can point
413f2f13a85SIngo Molnar 	 * outside mem_map, and the NUMA hash lookup cannot handle
414f2f13a85SIngo Molnar 	 * that:
415f2f13a85SIngo Molnar 	 */
416f2f13a85SIngo Molnar 	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
417f2f13a85SIngo Molnar 		BUG();
418f2f13a85SIngo Molnar 
419f2f13a85SIngo Molnar 	return 0;
420f2f13a85SIngo Molnar }
421f2f13a85SIngo Molnar 
422f2f13a85SIngo Molnar static const char errata93_warning[] =
423ad361c98SJoe Perches KERN_ERR
424ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
425ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
426ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
427ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
428f2f13a85SIngo Molnar 
429f2f13a85SIngo Molnar /*
430f2f13a85SIngo Molnar  * No vm86 mode in 64-bit mode:
431f2f13a85SIngo Molnar  */
432f2f13a85SIngo Molnar static inline void
433f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
434f2f13a85SIngo Molnar 		 struct task_struct *tsk)
435f2f13a85SIngo Molnar {
436f2f13a85SIngo Molnar }
437f2f13a85SIngo Molnar 
438f2f13a85SIngo Molnar static int bad_address(void *p)
439f2f13a85SIngo Molnar {
440f2f13a85SIngo Molnar 	unsigned long dummy;
441f2f13a85SIngo Molnar 
442f2f13a85SIngo Molnar 	return probe_kernel_address((unsigned long *)p, dummy);
443f2f13a85SIngo Molnar }
444f2f13a85SIngo Molnar 
445f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
446f2f13a85SIngo Molnar {
447087975b0SAkinobu Mita 	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
448087975b0SAkinobu Mita 	pgd_t *pgd = base + pgd_index(address);
449c61e211dSHarvey Harrison 	pud_t *pud;
450c61e211dSHarvey Harrison 	pmd_t *pmd;
451c61e211dSHarvey Harrison 	pte_t *pte;
452c61e211dSHarvey Harrison 
4532d4a7167SIngo Molnar 	if (bad_address(pgd))
4542d4a7167SIngo Molnar 		goto bad;
4552d4a7167SIngo Molnar 
456c61e211dSHarvey Harrison 	printk("PGD %lx ", pgd_val(*pgd));
4572d4a7167SIngo Molnar 
4582d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
4592d4a7167SIngo Molnar 		goto out;
460c61e211dSHarvey Harrison 
461c61e211dSHarvey Harrison 	pud = pud_offset(pgd, address);
4622d4a7167SIngo Molnar 	if (bad_address(pud))
4632d4a7167SIngo Molnar 		goto bad;
4642d4a7167SIngo Molnar 
465c61e211dSHarvey Harrison 	printk("PUD %lx ", pud_val(*pud));
466b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
4672d4a7167SIngo Molnar 		goto out;
468c61e211dSHarvey Harrison 
469c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
4702d4a7167SIngo Molnar 	if (bad_address(pmd))
4712d4a7167SIngo Molnar 		goto bad;
4722d4a7167SIngo Molnar 
473c61e211dSHarvey Harrison 	printk("PMD %lx ", pmd_val(*pmd));
4742d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
4752d4a7167SIngo Molnar 		goto out;
476c61e211dSHarvey Harrison 
477c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
4782d4a7167SIngo Molnar 	if (bad_address(pte))
4792d4a7167SIngo Molnar 		goto bad;
4802d4a7167SIngo Molnar 
481c61e211dSHarvey Harrison 	printk("PTE %lx", pte_val(*pte));
4822d4a7167SIngo Molnar out:
483c61e211dSHarvey Harrison 	printk("\n");
484c61e211dSHarvey Harrison 	return;
485c61e211dSHarvey Harrison bad:
486c61e211dSHarvey Harrison 	printk("BAD\n");
487c61e211dSHarvey Harrison }
488c61e211dSHarvey Harrison 
489f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
490c61e211dSHarvey Harrison 
4912d4a7167SIngo Molnar /*
4922d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
4932d4a7167SIngo Molnar  *
4942d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
4952d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
4962d4a7167SIngo Molnar  *
4972d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
4982d4a7167SIngo Molnar  *
4992d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
5002d4a7167SIngo Molnar  * Try to work around it here.
5012d4a7167SIngo Molnar  *
5022d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
5032d4a7167SIngo Molnar  * Does nothing on 32-bit.
504c61e211dSHarvey Harrison  */
505c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
506c61e211dSHarvey Harrison {
507c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
508c61e211dSHarvey Harrison 	if (address != regs->ip)
509c61e211dSHarvey Harrison 		return 0;
5102d4a7167SIngo Molnar 
511c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
512c61e211dSHarvey Harrison 		return 0;
5132d4a7167SIngo Molnar 
514c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
515c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
516c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
517a454ab31SIngo Molnar 		printk_once(errata93_warning);
518c61e211dSHarvey Harrison 		regs->ip = address;
519c61e211dSHarvey Harrison 		return 1;
520c61e211dSHarvey Harrison 	}
521c61e211dSHarvey Harrison #endif
522c61e211dSHarvey Harrison 	return 0;
523c61e211dSHarvey Harrison }
524c61e211dSHarvey Harrison 
525c61e211dSHarvey Harrison /*
5262d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
5272d4a7167SIngo Molnar  * to illegal addresses >4GB.
5282d4a7167SIngo Molnar  *
5292d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
5302d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
531c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
532c61e211dSHarvey Harrison  */
533c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
534c61e211dSHarvey Harrison {
535c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
5362d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
537c61e211dSHarvey Harrison 		return 1;
538c61e211dSHarvey Harrison #endif
539c61e211dSHarvey Harrison 	return 0;
540c61e211dSHarvey Harrison }
541c61e211dSHarvey Harrison 
542c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
543c61e211dSHarvey Harrison {
544c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
545c61e211dSHarvey Harrison 	unsigned long nr;
5462d4a7167SIngo Molnar 
547c61e211dSHarvey Harrison 	/*
5482d4a7167SIngo Molnar 	 * Pentium F0 0F C7 C8 bug workaround:
549c61e211dSHarvey Harrison 	 */
550c61e211dSHarvey Harrison 	if (boot_cpu_data.f00f_bug) {
551c61e211dSHarvey Harrison 		nr = (address - idt_descr.address) >> 3;
552c61e211dSHarvey Harrison 
553c61e211dSHarvey Harrison 		if (nr == 6) {
554c61e211dSHarvey Harrison 			do_invalid_op(regs, 0);
555c61e211dSHarvey Harrison 			return 1;
556c61e211dSHarvey Harrison 		}
557c61e211dSHarvey Harrison 	}
558c61e211dSHarvey Harrison #endif
559c61e211dSHarvey Harrison 	return 0;
560c61e211dSHarvey Harrison }
561c61e211dSHarvey Harrison 
5628f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT
5638f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
5648f766149SIngo Molnar 
5652d4a7167SIngo Molnar static void
5662d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code,
567c61e211dSHarvey Harrison 		unsigned long address)
568c61e211dSHarvey Harrison {
569c61e211dSHarvey Harrison 	if (!oops_may_print())
570c61e211dSHarvey Harrison 		return;
571c61e211dSHarvey Harrison 
572c61e211dSHarvey Harrison 	if (error_code & PF_INSTR) {
57393809be8SHarvey Harrison 		unsigned int level;
5742d4a7167SIngo Molnar 
575c61e211dSHarvey Harrison 		pte_t *pte = lookup_address(address, &level);
576c61e211dSHarvey Harrison 
5778f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
5788f766149SIngo Molnar 			printk(nx_warning, current_uid());
579c61e211dSHarvey Harrison 	}
580fd40d6e3SHarvey Harrison 
581c61e211dSHarvey Harrison 	printk(KERN_ALERT "BUG: unable to handle kernel ");
582c61e211dSHarvey Harrison 	if (address < PAGE_SIZE)
583c61e211dSHarvey Harrison 		printk(KERN_CONT "NULL pointer dereference");
584c61e211dSHarvey Harrison 	else
585c61e211dSHarvey Harrison 		printk(KERN_CONT "paging request");
5862d4a7167SIngo Molnar 
587f294a8ceSVegard Nossum 	printk(KERN_CONT " at %p\n", (void *) address);
588c61e211dSHarvey Harrison 	printk(KERN_ALERT "IP:");
589c61e211dSHarvey Harrison 	printk_address(regs->ip, 1);
5902d4a7167SIngo Molnar 
591c61e211dSHarvey Harrison 	dump_pagetable(address);
592c61e211dSHarvey Harrison }
593c61e211dSHarvey Harrison 
5942d4a7167SIngo Molnar static noinline void
5952d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
5962d4a7167SIngo Molnar 	    unsigned long address)
597c61e211dSHarvey Harrison {
5982d4a7167SIngo Molnar 	struct task_struct *tsk;
5992d4a7167SIngo Molnar 	unsigned long flags;
6002d4a7167SIngo Molnar 	int sig;
6012d4a7167SIngo Molnar 
6022d4a7167SIngo Molnar 	flags = oops_begin();
6032d4a7167SIngo Molnar 	tsk = current;
6042d4a7167SIngo Molnar 	sig = SIGKILL;
605c61e211dSHarvey Harrison 
606c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
60792181f19SNick Piggin 	       tsk->comm, address);
608c61e211dSHarvey Harrison 	dump_pagetable(address);
6092d4a7167SIngo Molnar 
610c61e211dSHarvey Harrison 	tsk->thread.cr2		= address;
611c61e211dSHarvey Harrison 	tsk->thread.trap_no	= 14;
612c61e211dSHarvey Harrison 	tsk->thread.error_code	= error_code;
6132d4a7167SIngo Molnar 
614c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
615874d93d1SAlexander van Heukelum 		sig = 0;
6162d4a7167SIngo Molnar 
617874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
618c61e211dSHarvey Harrison }
619c61e211dSHarvey Harrison 
6202d4a7167SIngo Molnar static noinline void
6212d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code,
6222d4a7167SIngo Molnar 	   unsigned long address)
62392181f19SNick Piggin {
62492181f19SNick Piggin 	struct task_struct *tsk = current;
62519803078SIngo Molnar 	unsigned long *stackend;
62692181f19SNick Piggin 	unsigned long flags;
62792181f19SNick Piggin 	int sig;
62892181f19SNick Piggin 
62992181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
63092181f19SNick Piggin 	if (fixup_exception(regs))
63192181f19SNick Piggin 		return;
63292181f19SNick Piggin 
63392181f19SNick Piggin 	/*
6342d4a7167SIngo Molnar 	 * 32-bit:
6352d4a7167SIngo Molnar 	 *
63692181f19SNick Piggin 	 *   Valid to do another page fault here, because if this fault
63792181f19SNick Piggin 	 *   had been triggered by is_prefetch fixup_exception would have
63892181f19SNick Piggin 	 *   handled it.
63992181f19SNick Piggin 	 *
6402d4a7167SIngo Molnar 	 * 64-bit:
6412d4a7167SIngo Molnar 	 *
64292181f19SNick Piggin 	 *   Hall of shame of CPU/BIOS bugs.
64392181f19SNick Piggin 	 */
64492181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
64592181f19SNick Piggin 		return;
64692181f19SNick Piggin 
64792181f19SNick Piggin 	if (is_errata93(regs, address))
64892181f19SNick Piggin 		return;
64992181f19SNick Piggin 
65092181f19SNick Piggin 	/*
65192181f19SNick Piggin 	 * Oops. The kernel tried to access some bad page. We'll have to
6522d4a7167SIngo Molnar 	 * terminate things with extreme prejudice:
65392181f19SNick Piggin 	 */
65492181f19SNick Piggin 	flags = oops_begin();
65592181f19SNick Piggin 
65692181f19SNick Piggin 	show_fault_oops(regs, error_code, address);
65792181f19SNick Piggin 
65819803078SIngo Molnar 	stackend = end_of_stack(tsk);
6590e7810beSJan Beulich 	if (tsk != &init_task && *stackend != STACK_END_MAGIC)
66019803078SIngo Molnar 		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
66119803078SIngo Molnar 
66292181f19SNick Piggin 	tsk->thread.cr2		= address;
66392181f19SNick Piggin 	tsk->thread.trap_no	= 14;
66492181f19SNick Piggin 	tsk->thread.error_code	= error_code;
66592181f19SNick Piggin 
66692181f19SNick Piggin 	sig = SIGKILL;
66792181f19SNick Piggin 	if (__die("Oops", regs, error_code))
66892181f19SNick Piggin 		sig = 0;
6692d4a7167SIngo Molnar 
67092181f19SNick Piggin 	/* Executive summary in case the body of the oops scrolled away */
67192181f19SNick Piggin 	printk(KERN_EMERG "CR2: %016lx\n", address);
6722d4a7167SIngo Molnar 
67392181f19SNick Piggin 	oops_end(flags, regs, sig);
67492181f19SNick Piggin }
67592181f19SNick Piggin 
6762d4a7167SIngo Molnar /*
6772d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
6782d4a7167SIngo Molnar  * sysctl is set:
6792d4a7167SIngo Molnar  */
6802d4a7167SIngo Molnar static inline void
6812d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
6822d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
6832d4a7167SIngo Molnar {
6842d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
6852d4a7167SIngo Molnar 		return;
6862d4a7167SIngo Molnar 
6872d4a7167SIngo Molnar 	if (!printk_ratelimit())
6882d4a7167SIngo Molnar 		return;
6892d4a7167SIngo Molnar 
690a1a08d1cSRoland Dreier 	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
6912d4a7167SIngo Molnar 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
6922d4a7167SIngo Molnar 		tsk->comm, task_pid_nr(tsk), address,
6932d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
6942d4a7167SIngo Molnar 
6952d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
6962d4a7167SIngo Molnar 
6972d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
6982d4a7167SIngo Molnar }
6992d4a7167SIngo Molnar 
7002d4a7167SIngo Molnar static void
7012d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
7022d4a7167SIngo Molnar 		       unsigned long address, int si_code)
70392181f19SNick Piggin {
70492181f19SNick Piggin 	struct task_struct *tsk = current;
70592181f19SNick Piggin 
70692181f19SNick Piggin 	/* User mode accesses just cause a SIGSEGV */
70792181f19SNick Piggin 	if (error_code & PF_USER) {
70892181f19SNick Piggin 		/*
7092d4a7167SIngo Molnar 		 * It's possible to have interrupts off here:
71092181f19SNick Piggin 		 */
71192181f19SNick Piggin 		local_irq_enable();
71292181f19SNick Piggin 
71392181f19SNick Piggin 		/*
71492181f19SNick Piggin 		 * Valid to do another page fault here because this one came
7152d4a7167SIngo Molnar 		 * from user space:
71692181f19SNick Piggin 		 */
71792181f19SNick Piggin 		if (is_prefetch(regs, error_code, address))
71892181f19SNick Piggin 			return;
71992181f19SNick Piggin 
72092181f19SNick Piggin 		if (is_errata100(regs, address))
72192181f19SNick Piggin 			return;
72292181f19SNick Piggin 
7232d4a7167SIngo Molnar 		if (unlikely(show_unhandled_signals))
7242d4a7167SIngo Molnar 			show_signal_msg(regs, error_code, address, tsk);
72592181f19SNick Piggin 
7262d4a7167SIngo Molnar 		/* Kernel addresses are always protection faults: */
72792181f19SNick Piggin 		tsk->thread.cr2		= address;
72892181f19SNick Piggin 		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
72992181f19SNick Piggin 		tsk->thread.trap_no	= 14;
7302d4a7167SIngo Molnar 
731f672b49bSAndi Kleen 		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
7322d4a7167SIngo Molnar 
73392181f19SNick Piggin 		return;
73492181f19SNick Piggin 	}
73592181f19SNick Piggin 
73692181f19SNick Piggin 	if (is_f00f_bug(regs, address))
73792181f19SNick Piggin 		return;
73892181f19SNick Piggin 
73992181f19SNick Piggin 	no_context(regs, error_code, address);
74092181f19SNick Piggin }
74192181f19SNick Piggin 
7422d4a7167SIngo Molnar static noinline void
7432d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
7442d4a7167SIngo Molnar 		     unsigned long address)
74592181f19SNick Piggin {
74692181f19SNick Piggin 	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
74792181f19SNick Piggin }
74892181f19SNick Piggin 
7492d4a7167SIngo Molnar static void
7502d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
7512d4a7167SIngo Molnar 	   unsigned long address, int si_code)
75292181f19SNick Piggin {
75392181f19SNick Piggin 	struct mm_struct *mm = current->mm;
75492181f19SNick Piggin 
75592181f19SNick Piggin 	/*
75692181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
75792181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
75892181f19SNick Piggin 	 */
75992181f19SNick Piggin 	up_read(&mm->mmap_sem);
76092181f19SNick Piggin 
76192181f19SNick Piggin 	__bad_area_nosemaphore(regs, error_code, address, si_code);
76292181f19SNick Piggin }
76392181f19SNick Piggin 
7642d4a7167SIngo Molnar static noinline void
7652d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
76692181f19SNick Piggin {
76792181f19SNick Piggin 	__bad_area(regs, error_code, address, SEGV_MAPERR);
76892181f19SNick Piggin }
76992181f19SNick Piggin 
7702d4a7167SIngo Molnar static noinline void
7712d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
7722d4a7167SIngo Molnar 		      unsigned long address)
77392181f19SNick Piggin {
77492181f19SNick Piggin 	__bad_area(regs, error_code, address, SEGV_ACCERR);
77592181f19SNick Piggin }
77692181f19SNick Piggin 
77792181f19SNick Piggin /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
7782d4a7167SIngo Molnar static void
7792d4a7167SIngo Molnar out_of_memory(struct pt_regs *regs, unsigned long error_code,
7802d4a7167SIngo Molnar 	      unsigned long address)
78192181f19SNick Piggin {
78292181f19SNick Piggin 	/*
78392181f19SNick Piggin 	 * We ran out of memory, call the OOM killer, and return the userspace
7842d4a7167SIngo Molnar 	 * (which will retry the fault, or kill us if we got oom-killed):
78592181f19SNick Piggin 	 */
78692181f19SNick Piggin 	up_read(&current->mm->mmap_sem);
7872d4a7167SIngo Molnar 
78892181f19SNick Piggin 	pagefault_out_of_memory();
78992181f19SNick Piggin }
79092181f19SNick Piggin 
7912d4a7167SIngo Molnar static void
792a6e04aa9SAndi Kleen do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
793a6e04aa9SAndi Kleen 	  unsigned int fault)
79492181f19SNick Piggin {
79592181f19SNick Piggin 	struct task_struct *tsk = current;
79692181f19SNick Piggin 	struct mm_struct *mm = tsk->mm;
797a6e04aa9SAndi Kleen 	int code = BUS_ADRERR;
79892181f19SNick Piggin 
79992181f19SNick Piggin 	up_read(&mm->mmap_sem);
80092181f19SNick Piggin 
8012d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
80296054569SLinus Torvalds 	if (!(error_code & PF_USER)) {
80392181f19SNick Piggin 		no_context(regs, error_code, address);
80496054569SLinus Torvalds 		return;
80596054569SLinus Torvalds 	}
8062d4a7167SIngo Molnar 
807cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
80892181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
80992181f19SNick Piggin 		return;
8102d4a7167SIngo Molnar 
81192181f19SNick Piggin 	tsk->thread.cr2		= address;
81292181f19SNick Piggin 	tsk->thread.error_code	= error_code;
81392181f19SNick Piggin 	tsk->thread.trap_no	= 14;
8142d4a7167SIngo Molnar 
815a6e04aa9SAndi Kleen #ifdef CONFIG_MEMORY_FAILURE
816f672b49bSAndi Kleen 	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
817a6e04aa9SAndi Kleen 		printk(KERN_ERR
818a6e04aa9SAndi Kleen 	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
819a6e04aa9SAndi Kleen 			tsk->comm, tsk->pid, address);
820a6e04aa9SAndi Kleen 		code = BUS_MCEERR_AR;
821a6e04aa9SAndi Kleen 	}
822a6e04aa9SAndi Kleen #endif
823f672b49bSAndi Kleen 	force_sig_info_fault(SIGBUS, code, address, tsk, fault);
82492181f19SNick Piggin }
82592181f19SNick Piggin 
826b80ef10eSKOSAKI Motohiro static noinline int
8272d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code,
8282d4a7167SIngo Molnar 	       unsigned long address, unsigned int fault)
82992181f19SNick Piggin {
830b80ef10eSKOSAKI Motohiro 	/*
831b80ef10eSKOSAKI Motohiro 	 * Pagefault was interrupted by SIGKILL. We have no reason to
832b80ef10eSKOSAKI Motohiro 	 * continue pagefault.
833b80ef10eSKOSAKI Motohiro 	 */
834b80ef10eSKOSAKI Motohiro 	if (fatal_signal_pending(current)) {
835b80ef10eSKOSAKI Motohiro 		if (!(fault & VM_FAULT_RETRY))
836b80ef10eSKOSAKI Motohiro 			up_read(&current->mm->mmap_sem);
837b80ef10eSKOSAKI Motohiro 		if (!(error_code & PF_USER))
838b80ef10eSKOSAKI Motohiro 			no_context(regs, error_code, address);
839b80ef10eSKOSAKI Motohiro 		return 1;
840b80ef10eSKOSAKI Motohiro 	}
841b80ef10eSKOSAKI Motohiro 	if (!(fault & VM_FAULT_ERROR))
842b80ef10eSKOSAKI Motohiro 		return 0;
843b80ef10eSKOSAKI Motohiro 
8442d4a7167SIngo Molnar 	if (fault & VM_FAULT_OOM) {
845f8626854SAndrey Vagin 		/* Kernel mode? Handle exceptions or die: */
846f8626854SAndrey Vagin 		if (!(error_code & PF_USER)) {
847f8626854SAndrey Vagin 			up_read(&current->mm->mmap_sem);
848f8626854SAndrey Vagin 			no_context(regs, error_code, address);
849b80ef10eSKOSAKI Motohiro 			return 1;
850f8626854SAndrey Vagin 		}
851f8626854SAndrey Vagin 
85292181f19SNick Piggin 		out_of_memory(regs, error_code, address);
8532d4a7167SIngo Molnar 	} else {
854f672b49bSAndi Kleen 		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
855f672b49bSAndi Kleen 			     VM_FAULT_HWPOISON_LARGE))
856a6e04aa9SAndi Kleen 			do_sigbus(regs, error_code, address, fault);
85792181f19SNick Piggin 		else
85892181f19SNick Piggin 			BUG();
85992181f19SNick Piggin 	}
860b80ef10eSKOSAKI Motohiro 	return 1;
8612d4a7167SIngo Molnar }
86292181f19SNick Piggin 
863d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte)
864d8b57bb7SThomas Gleixner {
865d8b57bb7SThomas Gleixner 	if ((error_code & PF_WRITE) && !pte_write(*pte))
866d8b57bb7SThomas Gleixner 		return 0;
8672d4a7167SIngo Molnar 
868d8b57bb7SThomas Gleixner 	if ((error_code & PF_INSTR) && !pte_exec(*pte))
869d8b57bb7SThomas Gleixner 		return 0;
870d8b57bb7SThomas Gleixner 
871d8b57bb7SThomas Gleixner 	return 1;
872d8b57bb7SThomas Gleixner }
873d8b57bb7SThomas Gleixner 
874c61e211dSHarvey Harrison /*
8752d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
8762d4a7167SIngo Molnar  *
8772d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
8782d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
8792d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
8802d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
8812d4a7167SIngo Molnar  * on other processors.
8822d4a7167SIngo Molnar  *
8835b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
8845b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
8855b727a3bSJeremy Fitzhardinge  */
88662c9295fSMasami Hiramatsu static noinline __kprobes int
8872d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address)
8885b727a3bSJeremy Fitzhardinge {
8895b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
8905b727a3bSJeremy Fitzhardinge 	pud_t *pud;
8915b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
8925b727a3bSJeremy Fitzhardinge 	pte_t *pte;
8933c3e5694SSteven Rostedt 	int ret;
8945b727a3bSJeremy Fitzhardinge 
8955b727a3bSJeremy Fitzhardinge 	/* Reserved-bit violation or user access to kernel space? */
8965b727a3bSJeremy Fitzhardinge 	if (error_code & (PF_USER | PF_RSVD))
8975b727a3bSJeremy Fitzhardinge 		return 0;
8985b727a3bSJeremy Fitzhardinge 
8995b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
9005b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
9015b727a3bSJeremy Fitzhardinge 		return 0;
9025b727a3bSJeremy Fitzhardinge 
9035b727a3bSJeremy Fitzhardinge 	pud = pud_offset(pgd, address);
9045b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
9055b727a3bSJeremy Fitzhardinge 		return 0;
9065b727a3bSJeremy Fitzhardinge 
907d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
908d8b57bb7SThomas Gleixner 		return spurious_fault_check(error_code, (pte_t *) pud);
909d8b57bb7SThomas Gleixner 
9105b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
9115b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
9125b727a3bSJeremy Fitzhardinge 		return 0;
9135b727a3bSJeremy Fitzhardinge 
914d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
915d8b57bb7SThomas Gleixner 		return spurious_fault_check(error_code, (pte_t *) pmd);
916d8b57bb7SThomas Gleixner 
917660a293eSShaohua Li 	/*
918660a293eSShaohua Li 	 * Note: don't use pte_present() here, since it returns true
919660a293eSShaohua Li 	 * if the _PAGE_PROTNONE bit is set.  However, this aliases the
920660a293eSShaohua Li 	 * _PAGE_GLOBAL bit, which for kernel pages give false positives
921660a293eSShaohua Li 	 * when CONFIG_DEBUG_PAGEALLOC is used.
922660a293eSShaohua Li 	 */
9235b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
924660a293eSShaohua Li 	if (!(pte_flags(*pte) & _PAGE_PRESENT))
9255b727a3bSJeremy Fitzhardinge 		return 0;
9265b727a3bSJeremy Fitzhardinge 
9273c3e5694SSteven Rostedt 	ret = spurious_fault_check(error_code, pte);
9283c3e5694SSteven Rostedt 	if (!ret)
9293c3e5694SSteven Rostedt 		return 0;
9303c3e5694SSteven Rostedt 
9313c3e5694SSteven Rostedt 	/*
9322d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
9332d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
9343c3e5694SSteven Rostedt 	 */
9353c3e5694SSteven Rostedt 	ret = spurious_fault_check(error_code, (pte_t *) pmd);
9363c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
9372d4a7167SIngo Molnar 
9383c3e5694SSteven Rostedt 	return ret;
9395b727a3bSJeremy Fitzhardinge }
9405b727a3bSJeremy Fitzhardinge 
941c61e211dSHarvey Harrison int show_unhandled_signals = 1;
942c61e211dSHarvey Harrison 
9432d4a7167SIngo Molnar static inline int
94468da336aSMichel Lespinasse access_error(unsigned long error_code, struct vm_area_struct *vma)
94592181f19SNick Piggin {
94668da336aSMichel Lespinasse 	if (error_code & PF_WRITE) {
9472d4a7167SIngo Molnar 		/* write, present and write, not present: */
94892181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
94992181f19SNick Piggin 			return 1;
9502d4a7167SIngo Molnar 		return 0;
9512d4a7167SIngo Molnar 	}
9522d4a7167SIngo Molnar 
9532d4a7167SIngo Molnar 	/* read, present: */
9542d4a7167SIngo Molnar 	if (unlikely(error_code & PF_PROT))
95592181f19SNick Piggin 		return 1;
9562d4a7167SIngo Molnar 
9572d4a7167SIngo Molnar 	/* read, not present: */
95892181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
95992181f19SNick Piggin 		return 1;
96092181f19SNick Piggin 
96192181f19SNick Piggin 	return 0;
96292181f19SNick Piggin }
96392181f19SNick Piggin 
9640973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address)
9650973a06cSHiroshi Shimamoto {
966d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
9670973a06cSHiroshi Shimamoto }
9680973a06cSHiroshi Shimamoto 
969c61e211dSHarvey Harrison /*
970c61e211dSHarvey Harrison  * This routine handles page faults.  It determines the address,
971c61e211dSHarvey Harrison  * and the problem, and then passes it off to one of the appropriate
972c61e211dSHarvey Harrison  * routines.
973c61e211dSHarvey Harrison  */
974c3731c68SIngo Molnar dotraplinkage void __kprobes
975c3731c68SIngo Molnar do_page_fault(struct pt_regs *regs, unsigned long error_code)
976c61e211dSHarvey Harrison {
977c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
9782d4a7167SIngo Molnar 	struct task_struct *tsk;
9792d4a7167SIngo Molnar 	unsigned long address;
9802d4a7167SIngo Molnar 	struct mm_struct *mm;
981c61e211dSHarvey Harrison 	int fault;
982d065bd81SMichel Lespinasse 	int write = error_code & PF_WRITE;
98337b23e05SKOSAKI Motohiro 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
984d065bd81SMichel Lespinasse 					(write ? FAULT_FLAG_WRITE : 0);
985c61e211dSHarvey Harrison 
986c61e211dSHarvey Harrison 	tsk = current;
987c61e211dSHarvey Harrison 	mm = tsk->mm;
9882d4a7167SIngo Molnar 
9892d4a7167SIngo Molnar 	/* Get the faulting address: */
990c61e211dSHarvey Harrison 	address = read_cr2();
991c61e211dSHarvey Harrison 
992f8561296SVegard Nossum 	/*
993f8561296SVegard Nossum 	 * Detect and handle instructions that would cause a page fault for
994f8561296SVegard Nossum 	 * both a tracked kernel page and a userspace page.
995f8561296SVegard Nossum 	 */
996f8561296SVegard Nossum 	if (kmemcheck_active(regs))
997f8561296SVegard Nossum 		kmemcheck_hide(regs);
9985dfaf90fSIngo Molnar 	prefetchw(&mm->mmap_sem);
999f8561296SVegard Nossum 
10000fd0e3daSPekka Paalanen 	if (unlikely(kmmio_fault(regs, address)))
100186069782SPekka Paalanen 		return;
1002c61e211dSHarvey Harrison 
1003c61e211dSHarvey Harrison 	/*
1004c61e211dSHarvey Harrison 	 * We fault-in kernel-space virtual memory on-demand. The
1005c61e211dSHarvey Harrison 	 * 'reference' page table is init_mm.pgd.
1006c61e211dSHarvey Harrison 	 *
1007c61e211dSHarvey Harrison 	 * NOTE! We MUST NOT take any locks for this case. We may
1008c61e211dSHarvey Harrison 	 * be in an interrupt or a critical region, and should
1009c61e211dSHarvey Harrison 	 * only copy the information from the master page table,
1010c61e211dSHarvey Harrison 	 * nothing more.
1011c61e211dSHarvey Harrison 	 *
1012c61e211dSHarvey Harrison 	 * This verifies that the fault happens in kernel space
1013c61e211dSHarvey Harrison 	 * (error_code & 4) == 0, and that the fault was not a
1014c61e211dSHarvey Harrison 	 * protection error (error_code & 9) == 0.
1015c61e211dSHarvey Harrison 	 */
10160973a06cSHiroshi Shimamoto 	if (unlikely(fault_in_kernel_space(address))) {
1017f8561296SVegard Nossum 		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
1018f8561296SVegard Nossum 			if (vmalloc_fault(address) >= 0)
1019c61e211dSHarvey Harrison 				return;
10205b727a3bSJeremy Fitzhardinge 
1021f8561296SVegard Nossum 			if (kmemcheck_fault(regs, address, error_code))
1022f8561296SVegard Nossum 				return;
1023f8561296SVegard Nossum 		}
1024f8561296SVegard Nossum 
10252d4a7167SIngo Molnar 		/* Can handle a stale RO->RW TLB: */
102692181f19SNick Piggin 		if (spurious_fault(error_code, address))
10275b727a3bSJeremy Fitzhardinge 			return;
10285b727a3bSJeremy Fitzhardinge 
10292d4a7167SIngo Molnar 		/* kprobes don't want to hook the spurious faults: */
10309be260a6SMasami Hiramatsu 		if (notify_page_fault(regs))
10319be260a6SMasami Hiramatsu 			return;
1032c61e211dSHarvey Harrison 		/*
1033c61e211dSHarvey Harrison 		 * Don't take the mm semaphore here. If we fixup a prefetch
10342d4a7167SIngo Molnar 		 * fault we could otherwise deadlock:
1035c61e211dSHarvey Harrison 		 */
103692181f19SNick Piggin 		bad_area_nosemaphore(regs, error_code, address);
10372d4a7167SIngo Molnar 
103892181f19SNick Piggin 		return;
1039c61e211dSHarvey Harrison 	}
1040c61e211dSHarvey Harrison 
10412d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
1042f8a6b2b9SIngo Molnar 	if (unlikely(notify_page_fault(regs)))
10439be260a6SMasami Hiramatsu 		return;
1044c61e211dSHarvey Harrison 	/*
1045891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1046891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1047891cffbdSLinus Torvalds 	 *
1048891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
10492d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1050c61e211dSHarvey Harrison 	 */
1051891cffbdSLinus Torvalds 	if (user_mode_vm(regs)) {
1052891cffbdSLinus Torvalds 		local_irq_enable();
1053891cffbdSLinus Torvalds 		error_code |= PF_USER;
10542d4a7167SIngo Molnar 	} else {
10552d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1056c61e211dSHarvey Harrison 			local_irq_enable();
10572d4a7167SIngo Molnar 	}
1058c61e211dSHarvey Harrison 
1059c61e211dSHarvey Harrison 	if (unlikely(error_code & PF_RSVD))
106092181f19SNick Piggin 		pgtable_bad(regs, error_code, address);
1061c61e211dSHarvey Harrison 
1062*a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
10637dd1fcc2SPeter Zijlstra 
1064c61e211dSHarvey Harrison 	/*
10652d4a7167SIngo Molnar 	 * If we're in an interrupt, have no user context or are running
10662d4a7167SIngo Molnar 	 * in an atomic region then we must not take the fault:
1067c61e211dSHarvey Harrison 	 */
106892181f19SNick Piggin 	if (unlikely(in_atomic() || !mm)) {
106992181f19SNick Piggin 		bad_area_nosemaphore(regs, error_code, address);
107092181f19SNick Piggin 		return;
107192181f19SNick Piggin 	}
1072c61e211dSHarvey Harrison 
10733a1dfe6eSIngo Molnar 	/*
10743a1dfe6eSIngo Molnar 	 * When running in the kernel we expect faults to occur only to
10752d4a7167SIngo Molnar 	 * addresses in user space.  All other faults represent errors in
10762d4a7167SIngo Molnar 	 * the kernel and should generate an OOPS.  Unfortunately, in the
10772d4a7167SIngo Molnar 	 * case of an erroneous fault occurring in a code path which already
10782d4a7167SIngo Molnar 	 * holds mmap_sem we will deadlock attempting to validate the fault
10792d4a7167SIngo Molnar 	 * against the address space.  Luckily the kernel only validly
10802d4a7167SIngo Molnar 	 * references user space from well defined areas of code, which are
10812d4a7167SIngo Molnar 	 * listed in the exceptions table.
1082c61e211dSHarvey Harrison 	 *
1083c61e211dSHarvey Harrison 	 * As the vast majority of faults will be valid we will only perform
10842d4a7167SIngo Molnar 	 * the source reference check when there is a possibility of a
10852d4a7167SIngo Molnar 	 * deadlock. Attempt to lock the address space, if we cannot we then
10862d4a7167SIngo Molnar 	 * validate the source. If this is invalid we can skip the address
10872d4a7167SIngo Molnar 	 * space check, thus avoiding the deadlock:
1088c61e211dSHarvey Harrison 	 */
108992181f19SNick Piggin 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1090c61e211dSHarvey Harrison 		if ((error_code & PF_USER) == 0 &&
109192181f19SNick Piggin 		    !search_exception_tables(regs->ip)) {
109292181f19SNick Piggin 			bad_area_nosemaphore(regs, error_code, address);
109392181f19SNick Piggin 			return;
109492181f19SNick Piggin 		}
1095d065bd81SMichel Lespinasse retry:
1096c61e211dSHarvey Harrison 		down_read(&mm->mmap_sem);
109701006074SPeter Zijlstra 	} else {
109801006074SPeter Zijlstra 		/*
10992d4a7167SIngo Molnar 		 * The above down_read_trylock() might have succeeded in
11002d4a7167SIngo Molnar 		 * which case we'll have missed the might_sleep() from
11012d4a7167SIngo Molnar 		 * down_read():
110201006074SPeter Zijlstra 		 */
110301006074SPeter Zijlstra 		might_sleep();
1104c61e211dSHarvey Harrison 	}
1105c61e211dSHarvey Harrison 
1106c61e211dSHarvey Harrison 	vma = find_vma(mm, address);
110792181f19SNick Piggin 	if (unlikely(!vma)) {
110892181f19SNick Piggin 		bad_area(regs, error_code, address);
110992181f19SNick Piggin 		return;
111092181f19SNick Piggin 	}
111192181f19SNick Piggin 	if (likely(vma->vm_start <= address))
1112c61e211dSHarvey Harrison 		goto good_area;
111392181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
111492181f19SNick Piggin 		bad_area(regs, error_code, address);
111592181f19SNick Piggin 		return;
111692181f19SNick Piggin 	}
1117c61e211dSHarvey Harrison 	if (error_code & PF_USER) {
1118c61e211dSHarvey Harrison 		/*
1119c61e211dSHarvey Harrison 		 * Accessing the stack below %sp is always a bug.
1120c61e211dSHarvey Harrison 		 * The large cushion allows instructions like enter
1121c61e211dSHarvey Harrison 		 * and pusha to work. ("enter $65535, $31" pushes
1122c61e211dSHarvey Harrison 		 * 32 pointers and then decrements %sp by 65535.)
1123c61e211dSHarvey Harrison 		 */
112492181f19SNick Piggin 		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
112592181f19SNick Piggin 			bad_area(regs, error_code, address);
112692181f19SNick Piggin 			return;
1127c61e211dSHarvey Harrison 		}
112892181f19SNick Piggin 	}
112992181f19SNick Piggin 	if (unlikely(expand_stack(vma, address))) {
113092181f19SNick Piggin 		bad_area(regs, error_code, address);
113192181f19SNick Piggin 		return;
113292181f19SNick Piggin 	}
113392181f19SNick Piggin 
1134c61e211dSHarvey Harrison 	/*
1135c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1136c61e211dSHarvey Harrison 	 * we can handle it..
1137c61e211dSHarvey Harrison 	 */
1138c61e211dSHarvey Harrison good_area:
113968da336aSMichel Lespinasse 	if (unlikely(access_error(error_code, vma))) {
114092181f19SNick Piggin 		bad_area_access_error(regs, error_code, address);
114192181f19SNick Piggin 		return;
1142c61e211dSHarvey Harrison 	}
1143c61e211dSHarvey Harrison 
1144c61e211dSHarvey Harrison 	/*
1145c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1146c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
11472d4a7167SIngo Molnar 	 * the fault:
1148c61e211dSHarvey Harrison 	 */
1149d065bd81SMichel Lespinasse 	fault = handle_mm_fault(mm, vma, address, flags);
11502d4a7167SIngo Molnar 
1151b80ef10eSKOSAKI Motohiro 	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
1152b80ef10eSKOSAKI Motohiro 		if (mm_fault_error(regs, error_code, address, fault))
115337b23e05SKOSAKI Motohiro 			return;
115437b23e05SKOSAKI Motohiro 	}
115537b23e05SKOSAKI Motohiro 
115637b23e05SKOSAKI Motohiro 	/*
1157d065bd81SMichel Lespinasse 	 * Major/minor page fault accounting is only done on the
1158d065bd81SMichel Lespinasse 	 * initial attempt. If we go through a retry, it is extremely
1159d065bd81SMichel Lespinasse 	 * likely that the page will be found in page cache at that point.
1160d065bd81SMichel Lespinasse 	 */
1161d065bd81SMichel Lespinasse 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
1162ac17dc8eSPeter Zijlstra 		if (fault & VM_FAULT_MAJOR) {
1163c61e211dSHarvey Harrison 			tsk->maj_flt++;
1164*a8b0ca17SPeter Zijlstra 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
116578f13e95SPeter Zijlstra 				      regs, address);
1166ac17dc8eSPeter Zijlstra 		} else {
1167c61e211dSHarvey Harrison 			tsk->min_flt++;
1168*a8b0ca17SPeter Zijlstra 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
116978f13e95SPeter Zijlstra 				      regs, address);
1170ac17dc8eSPeter Zijlstra 		}
1171d065bd81SMichel Lespinasse 		if (fault & VM_FAULT_RETRY) {
1172d065bd81SMichel Lespinasse 			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
1173d065bd81SMichel Lespinasse 			 * of starvation. */
1174d065bd81SMichel Lespinasse 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
1175d065bd81SMichel Lespinasse 			goto retry;
1176d065bd81SMichel Lespinasse 		}
1177d065bd81SMichel Lespinasse 	}
1178c61e211dSHarvey Harrison 
11798c938f9fSIngo Molnar 	check_v8086_mode(regs, address, tsk);
11808c938f9fSIngo Molnar 
1181c61e211dSHarvey Harrison 	up_read(&mm->mmap_sem);
1182c61e211dSHarvey Harrison }
1183