xref: /openbmc/linux/arch/x86/mm/fault.c (revision ad361c9884e809340f6daca80d56a9e9c871690a)
1c61e211dSHarvey Harrison /*
2c61e211dSHarvey Harrison  *  Copyright (C) 1995  Linus Torvalds
3c61e211dSHarvey Harrison  *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4f8eeb2e6SIngo Molnar  *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
5c61e211dSHarvey Harrison  */
6a2bcd473SIngo Molnar #include <linux/magic.h>		/* STACK_END_MAGIC		*/
7a2bcd473SIngo Molnar #include <linux/sched.h>		/* test_thread_flag(), ...	*/
8a2bcd473SIngo Molnar #include <linux/kdebug.h>		/* oops_begin/end, ...		*/
9a2bcd473SIngo Molnar #include <linux/module.h>		/* search_exception_table	*/
10a2bcd473SIngo Molnar #include <linux/bootmem.h>		/* max_low_pfn			*/
11a2bcd473SIngo Molnar #include <linux/kprobes.h>		/* __kprobes, ...		*/
12a2bcd473SIngo Molnar #include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13940010c5SIngo Molnar #include <linux/perf_counter.h>		/* perf_swcounter_event		*/
14c61e211dSHarvey Harrison 
15a2bcd473SIngo Molnar #include <asm/traps.h>			/* dotraplinkage, ...		*/
16a2bcd473SIngo Molnar #include <asm/pgalloc.h>		/* pgd_*(), ...			*/
17f8561296SVegard Nossum #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
18c61e211dSHarvey Harrison 
19c61e211dSHarvey Harrison /*
202d4a7167SIngo Molnar  * Page fault error code bits:
212d4a7167SIngo Molnar  *
222d4a7167SIngo Molnar  *   bit 0 ==	 0: no page found	1: protection fault
232d4a7167SIngo Molnar  *   bit 1 ==	 0: read access		1: write access
242d4a7167SIngo Molnar  *   bit 2 ==	 0: kernel-mode access	1: user-mode access
252d4a7167SIngo Molnar  *   bit 3 ==				1: use of reserved bit detected
262d4a7167SIngo Molnar  *   bit 4 ==				1: fault was an instruction fetch
27c61e211dSHarvey Harrison  */
282d4a7167SIngo Molnar enum x86_pf_error_code {
292d4a7167SIngo Molnar 
302d4a7167SIngo Molnar 	PF_PROT		=		1 << 0,
312d4a7167SIngo Molnar 	PF_WRITE	=		1 << 1,
322d4a7167SIngo Molnar 	PF_USER		=		1 << 2,
332d4a7167SIngo Molnar 	PF_RSVD		=		1 << 3,
342d4a7167SIngo Molnar 	PF_INSTR	=		1 << 4,
352d4a7167SIngo Molnar };
36c61e211dSHarvey Harrison 
37b814d41fSIngo Molnar /*
38b319eed0SIngo Molnar  * Returns 0 if mmiotrace is disabled, or if the fault is not
39b319eed0SIngo Molnar  * handled by mmiotrace:
40b814d41fSIngo Molnar  */
410fd0e3daSPekka Paalanen static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
4286069782SPekka Paalanen {
430fd0e3daSPekka Paalanen 	if (unlikely(is_kmmio_active()))
440fd0e3daSPekka Paalanen 		if (kmmio_handler(regs, addr) == 1)
450fd0e3daSPekka Paalanen 			return -1;
460fd0e3daSPekka Paalanen 	return 0;
4786069782SPekka Paalanen }
4886069782SPekka Paalanen 
49c61e211dSHarvey Harrison static inline int notify_page_fault(struct pt_regs *regs)
50c61e211dSHarvey Harrison {
51c61e211dSHarvey Harrison 	int ret = 0;
52c61e211dSHarvey Harrison 
53c61e211dSHarvey Harrison 	/* kprobe_running() needs smp_processor_id() */
54b1801812SIngo Molnar 	if (kprobes_built_in() && !user_mode_vm(regs)) {
55c61e211dSHarvey Harrison 		preempt_disable();
56c61e211dSHarvey Harrison 		if (kprobe_running() && kprobe_fault_handler(regs, 14))
57c61e211dSHarvey Harrison 			ret = 1;
58c61e211dSHarvey Harrison 		preempt_enable();
59c61e211dSHarvey Harrison 	}
60c61e211dSHarvey Harrison 
61c61e211dSHarvey Harrison 	return ret;
62c61e211dSHarvey Harrison }
63c61e211dSHarvey Harrison 
64c61e211dSHarvey Harrison /*
652d4a7167SIngo Molnar  * Prefetch quirks:
662d4a7167SIngo Molnar  *
672d4a7167SIngo Molnar  * 32-bit mode:
682d4a7167SIngo Molnar  *
69c61e211dSHarvey Harrison  *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
70c61e211dSHarvey Harrison  *   Check that here and ignore it.
71c61e211dSHarvey Harrison  *
722d4a7167SIngo Molnar  * 64-bit mode:
732d4a7167SIngo Molnar  *
74c61e211dSHarvey Harrison  *   Sometimes the CPU reports invalid exceptions on prefetch.
75c61e211dSHarvey Harrison  *   Check that here and ignore it.
76c61e211dSHarvey Harrison  *
772d4a7167SIngo Molnar  * Opcode checker based on code by Richard Brunner.
78c61e211dSHarvey Harrison  */
79107a0367SIngo Molnar static inline int
80107a0367SIngo Molnar check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
81107a0367SIngo Molnar 		      unsigned char opcode, int *prefetch)
82c61e211dSHarvey Harrison {
83107a0367SIngo Molnar 	unsigned char instr_hi = opcode & 0xf0;
84107a0367SIngo Molnar 	unsigned char instr_lo = opcode & 0x0f;
85c61e211dSHarvey Harrison 
86c61e211dSHarvey Harrison 	switch (instr_hi) {
87c61e211dSHarvey Harrison 	case 0x20:
88c61e211dSHarvey Harrison 	case 0x30:
89c61e211dSHarvey Harrison 		/*
90c61e211dSHarvey Harrison 		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
91c61e211dSHarvey Harrison 		 * In X86_64 long mode, the CPU will signal invalid
92c61e211dSHarvey Harrison 		 * opcode if some of these prefixes are present so
93c61e211dSHarvey Harrison 		 * X86_64 will never get here anyway
94c61e211dSHarvey Harrison 		 */
95107a0367SIngo Molnar 		return ((instr_lo & 7) == 0x6);
96c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
97c61e211dSHarvey Harrison 	case 0x40:
98c61e211dSHarvey Harrison 		/*
99c61e211dSHarvey Harrison 		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
100c61e211dSHarvey Harrison 		 * Need to figure out under what instruction mode the
101c61e211dSHarvey Harrison 		 * instruction was issued. Could check the LDT for lm,
102c61e211dSHarvey Harrison 		 * but for now it's good enough to assume that long
103c61e211dSHarvey Harrison 		 * mode only uses well known segments or kernel.
104c61e211dSHarvey Harrison 		 */
105107a0367SIngo Molnar 		return (!user_mode(regs)) || (regs->cs == __USER_CS);
106c61e211dSHarvey Harrison #endif
107c61e211dSHarvey Harrison 	case 0x60:
108c61e211dSHarvey Harrison 		/* 0x64 thru 0x67 are valid prefixes in all modes. */
109107a0367SIngo Molnar 		return (instr_lo & 0xC) == 0x4;
110c61e211dSHarvey Harrison 	case 0xF0:
111c61e211dSHarvey Harrison 		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
112107a0367SIngo Molnar 		return !instr_lo || (instr_lo>>1) == 1;
113c61e211dSHarvey Harrison 	case 0x00:
114c61e211dSHarvey Harrison 		/* Prefetch instruction is 0x0F0D or 0x0F18 */
115107a0367SIngo Molnar 		if (probe_kernel_address(instr, opcode))
116107a0367SIngo Molnar 			return 0;
117107a0367SIngo Molnar 
118107a0367SIngo Molnar 		*prefetch = (instr_lo == 0xF) &&
119107a0367SIngo Molnar 			(opcode == 0x0D || opcode == 0x18);
120107a0367SIngo Molnar 		return 0;
121107a0367SIngo Molnar 	default:
122107a0367SIngo Molnar 		return 0;
123107a0367SIngo Molnar 	}
124107a0367SIngo Molnar }
125107a0367SIngo Molnar 
126107a0367SIngo Molnar static int
127107a0367SIngo Molnar is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
128107a0367SIngo Molnar {
129107a0367SIngo Molnar 	unsigned char *max_instr;
130107a0367SIngo Molnar 	unsigned char *instr;
131107a0367SIngo Molnar 	int prefetch = 0;
132107a0367SIngo Molnar 
133107a0367SIngo Molnar 	/*
134107a0367SIngo Molnar 	 * If it was a exec (instruction fetch) fault on NX page, then
135107a0367SIngo Molnar 	 * do not ignore the fault:
136107a0367SIngo Molnar 	 */
137107a0367SIngo Molnar 	if (error_code & PF_INSTR)
138107a0367SIngo Molnar 		return 0;
139107a0367SIngo Molnar 
140107a0367SIngo Molnar 	instr = (void *)convert_ip_to_linear(current, regs);
141107a0367SIngo Molnar 	max_instr = instr + 15;
142107a0367SIngo Molnar 
143107a0367SIngo Molnar 	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
144107a0367SIngo Molnar 		return 0;
145107a0367SIngo Molnar 
146107a0367SIngo Molnar 	while (instr < max_instr) {
147107a0367SIngo Molnar 		unsigned char opcode;
148c61e211dSHarvey Harrison 
149c61e211dSHarvey Harrison 		if (probe_kernel_address(instr, opcode))
150c61e211dSHarvey Harrison 			break;
151107a0367SIngo Molnar 
152107a0367SIngo Molnar 		instr++;
153107a0367SIngo Molnar 
154107a0367SIngo Molnar 		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
155c61e211dSHarvey Harrison 			break;
156c61e211dSHarvey Harrison 	}
157c61e211dSHarvey Harrison 	return prefetch;
158c61e211dSHarvey Harrison }
159c61e211dSHarvey Harrison 
1602d4a7167SIngo Molnar static void
1612d4a7167SIngo Molnar force_sig_info_fault(int si_signo, int si_code, unsigned long address,
1622d4a7167SIngo Molnar 		     struct task_struct *tsk)
163c61e211dSHarvey Harrison {
164c61e211dSHarvey Harrison 	siginfo_t info;
165c61e211dSHarvey Harrison 
166c61e211dSHarvey Harrison 	info.si_signo	= si_signo;
167c61e211dSHarvey Harrison 	info.si_errno	= 0;
168c61e211dSHarvey Harrison 	info.si_code	= si_code;
169c61e211dSHarvey Harrison 	info.si_addr	= (void __user *)address;
1702d4a7167SIngo Molnar 
171c61e211dSHarvey Harrison 	force_sig_info(si_signo, &info, tsk);
172c61e211dSHarvey Harrison }
173c61e211dSHarvey Harrison 
174f2f13a85SIngo Molnar DEFINE_SPINLOCK(pgd_lock);
175f2f13a85SIngo Molnar LIST_HEAD(pgd_list);
1762d4a7167SIngo Molnar 
177f2f13a85SIngo Molnar #ifdef CONFIG_X86_32
178f2f13a85SIngo Molnar static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
179f2f13a85SIngo Molnar {
180f2f13a85SIngo Molnar 	unsigned index = pgd_index(address);
181f2f13a85SIngo Molnar 	pgd_t *pgd_k;
182f2f13a85SIngo Molnar 	pud_t *pud, *pud_k;
183f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_k;
184f2f13a85SIngo Molnar 
185f2f13a85SIngo Molnar 	pgd += index;
186f2f13a85SIngo Molnar 	pgd_k = init_mm.pgd + index;
187f2f13a85SIngo Molnar 
188f2f13a85SIngo Molnar 	if (!pgd_present(*pgd_k))
189f2f13a85SIngo Molnar 		return NULL;
190f2f13a85SIngo Molnar 
191f2f13a85SIngo Molnar 	/*
192f2f13a85SIngo Molnar 	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
193f2f13a85SIngo Molnar 	 * and redundant with the set_pmd() on non-PAE. As would
194f2f13a85SIngo Molnar 	 * set_pud.
195f2f13a85SIngo Molnar 	 */
196f2f13a85SIngo Molnar 	pud = pud_offset(pgd, address);
197f2f13a85SIngo Molnar 	pud_k = pud_offset(pgd_k, address);
198f2f13a85SIngo Molnar 	if (!pud_present(*pud_k))
199f2f13a85SIngo Molnar 		return NULL;
200f2f13a85SIngo Molnar 
201f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
202f2f13a85SIngo Molnar 	pmd_k = pmd_offset(pud_k, address);
203f2f13a85SIngo Molnar 	if (!pmd_present(*pmd_k))
204f2f13a85SIngo Molnar 		return NULL;
205f2f13a85SIngo Molnar 
206b8bcfe99SJeremy Fitzhardinge 	if (!pmd_present(*pmd))
207f2f13a85SIngo Molnar 		set_pmd(pmd, *pmd_k);
208b8bcfe99SJeremy Fitzhardinge 	else
209f2f13a85SIngo Molnar 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
210f2f13a85SIngo Molnar 
211f2f13a85SIngo Molnar 	return pmd_k;
212f2f13a85SIngo Molnar }
213f2f13a85SIngo Molnar 
214f2f13a85SIngo Molnar void vmalloc_sync_all(void)
215f2f13a85SIngo Molnar {
216f2f13a85SIngo Molnar 	unsigned long address;
217f2f13a85SIngo Molnar 
218f2f13a85SIngo Molnar 	if (SHARED_KERNEL_PMD)
219f2f13a85SIngo Molnar 		return;
220f2f13a85SIngo Molnar 
221f2f13a85SIngo Molnar 	for (address = VMALLOC_START & PMD_MASK;
222f2f13a85SIngo Molnar 	     address >= TASK_SIZE && address < FIXADDR_TOP;
223f2f13a85SIngo Molnar 	     address += PMD_SIZE) {
224f2f13a85SIngo Molnar 
225f2f13a85SIngo Molnar 		unsigned long flags;
226f2f13a85SIngo Molnar 		struct page *page;
227f2f13a85SIngo Molnar 
228f2f13a85SIngo Molnar 		spin_lock_irqsave(&pgd_lock, flags);
229f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
230f2f13a85SIngo Molnar 			if (!vmalloc_sync_one(page_address(page), address))
231f2f13a85SIngo Molnar 				break;
232f2f13a85SIngo Molnar 		}
233f2f13a85SIngo Molnar 		spin_unlock_irqrestore(&pgd_lock, flags);
234f2f13a85SIngo Molnar 	}
235f2f13a85SIngo Molnar }
236f2f13a85SIngo Molnar 
237f2f13a85SIngo Molnar /*
238f2f13a85SIngo Molnar  * 32-bit:
239f2f13a85SIngo Molnar  *
240f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc or module mapping area
241f2f13a85SIngo Molnar  */
242f2f13a85SIngo Molnar static noinline int vmalloc_fault(unsigned long address)
243f2f13a85SIngo Molnar {
244f2f13a85SIngo Molnar 	unsigned long pgd_paddr;
245f2f13a85SIngo Molnar 	pmd_t *pmd_k;
246f2f13a85SIngo Molnar 	pte_t *pte_k;
247f2f13a85SIngo Molnar 
248f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
249f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
250f2f13a85SIngo Molnar 		return -1;
251f2f13a85SIngo Molnar 
252f2f13a85SIngo Molnar 	/*
253f2f13a85SIngo Molnar 	 * Synchronize this task's top level page-table
254f2f13a85SIngo Molnar 	 * with the 'reference' page table.
255f2f13a85SIngo Molnar 	 *
256f2f13a85SIngo Molnar 	 * Do _not_ use "current" here. We might be inside
257f2f13a85SIngo Molnar 	 * an interrupt in the middle of a task switch..
258f2f13a85SIngo Molnar 	 */
259f2f13a85SIngo Molnar 	pgd_paddr = read_cr3();
260f2f13a85SIngo Molnar 	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
261f2f13a85SIngo Molnar 	if (!pmd_k)
262f2f13a85SIngo Molnar 		return -1;
263f2f13a85SIngo Molnar 
264f2f13a85SIngo Molnar 	pte_k = pte_offset_kernel(pmd_k, address);
265f2f13a85SIngo Molnar 	if (!pte_present(*pte_k))
266f2f13a85SIngo Molnar 		return -1;
267f2f13a85SIngo Molnar 
268f2f13a85SIngo Molnar 	return 0;
269f2f13a85SIngo Molnar }
270f2f13a85SIngo Molnar 
271f2f13a85SIngo Molnar /*
272f2f13a85SIngo Molnar  * Did it hit the DOS screen memory VA from vm86 mode?
273f2f13a85SIngo Molnar  */
274f2f13a85SIngo Molnar static inline void
275f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
276f2f13a85SIngo Molnar 		 struct task_struct *tsk)
277f2f13a85SIngo Molnar {
278f2f13a85SIngo Molnar 	unsigned long bit;
279f2f13a85SIngo Molnar 
280f2f13a85SIngo Molnar 	if (!v8086_mode(regs))
281f2f13a85SIngo Molnar 		return;
282f2f13a85SIngo Molnar 
283f2f13a85SIngo Molnar 	bit = (address - 0xA0000) >> PAGE_SHIFT;
284f2f13a85SIngo Molnar 	if (bit < 32)
285f2f13a85SIngo Molnar 		tsk->thread.screen_bitmap |= 1 << bit;
286f2f13a85SIngo Molnar }
287c61e211dSHarvey Harrison 
288cae30f82SAdrian Bunk static void dump_pagetable(unsigned long address)
289c61e211dSHarvey Harrison {
290c61e211dSHarvey Harrison 	__typeof__(pte_val(__pte(0))) page;
291c61e211dSHarvey Harrison 
292c61e211dSHarvey Harrison 	page = read_cr3();
293c61e211dSHarvey Harrison 	page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
2942d4a7167SIngo Molnar 
295c61e211dSHarvey Harrison #ifdef CONFIG_X86_PAE
296c61e211dSHarvey Harrison 	printk("*pdpt = %016Lx ", page);
297c61e211dSHarvey Harrison 	if ((page >> PAGE_SHIFT) < max_low_pfn
298c61e211dSHarvey Harrison 	    && page & _PAGE_PRESENT) {
299c61e211dSHarvey Harrison 		page &= PAGE_MASK;
300c61e211dSHarvey Harrison 		page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
301c61e211dSHarvey Harrison 							& (PTRS_PER_PMD - 1)];
302c61e211dSHarvey Harrison 		printk(KERN_CONT "*pde = %016Lx ", page);
303c61e211dSHarvey Harrison 		page &= ~_PAGE_NX;
304c61e211dSHarvey Harrison 	}
305c61e211dSHarvey Harrison #else
306c61e211dSHarvey Harrison 	printk("*pde = %08lx ", page);
307c61e211dSHarvey Harrison #endif
308c61e211dSHarvey Harrison 
309c61e211dSHarvey Harrison 	/*
310c61e211dSHarvey Harrison 	 * We must not directly access the pte in the highpte
311c61e211dSHarvey Harrison 	 * case if the page table is located in highmem.
312c61e211dSHarvey Harrison 	 * And let's rather not kmap-atomic the pte, just in case
3132d4a7167SIngo Molnar 	 * it's allocated already:
314c61e211dSHarvey Harrison 	 */
315c61e211dSHarvey Harrison 	if ((page >> PAGE_SHIFT) < max_low_pfn
316c61e211dSHarvey Harrison 	    && (page & _PAGE_PRESENT)
317c61e211dSHarvey Harrison 	    && !(page & _PAGE_PSE)) {
3182d4a7167SIngo Molnar 
319c61e211dSHarvey Harrison 		page &= PAGE_MASK;
320c61e211dSHarvey Harrison 		page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
321c61e211dSHarvey Harrison 							& (PTRS_PER_PTE - 1)];
322c61e211dSHarvey Harrison 		printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
323c61e211dSHarvey Harrison 	}
324c61e211dSHarvey Harrison 
325c61e211dSHarvey Harrison 	printk("\n");
326f2f13a85SIngo Molnar }
327f2f13a85SIngo Molnar 
328f2f13a85SIngo Molnar #else /* CONFIG_X86_64: */
329f2f13a85SIngo Molnar 
330f2f13a85SIngo Molnar void vmalloc_sync_all(void)
331f2f13a85SIngo Molnar {
332f2f13a85SIngo Molnar 	unsigned long address;
333f2f13a85SIngo Molnar 
334f2f13a85SIngo Molnar 	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
335f2f13a85SIngo Molnar 	     address += PGDIR_SIZE) {
336f2f13a85SIngo Molnar 
337f2f13a85SIngo Molnar 		const pgd_t *pgd_ref = pgd_offset_k(address);
338f2f13a85SIngo Molnar 		unsigned long flags;
339f2f13a85SIngo Molnar 		struct page *page;
340f2f13a85SIngo Molnar 
341f2f13a85SIngo Molnar 		if (pgd_none(*pgd_ref))
342f2f13a85SIngo Molnar 			continue;
343f2f13a85SIngo Molnar 
344f2f13a85SIngo Molnar 		spin_lock_irqsave(&pgd_lock, flags);
345f2f13a85SIngo Molnar 		list_for_each_entry(page, &pgd_list, lru) {
346f2f13a85SIngo Molnar 			pgd_t *pgd;
347f2f13a85SIngo Molnar 			pgd = (pgd_t *)page_address(page) + pgd_index(address);
348f2f13a85SIngo Molnar 			if (pgd_none(*pgd))
349f2f13a85SIngo Molnar 				set_pgd(pgd, *pgd_ref);
350f2f13a85SIngo Molnar 			else
351f2f13a85SIngo Molnar 				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
352f2f13a85SIngo Molnar 		}
353f2f13a85SIngo Molnar 		spin_unlock_irqrestore(&pgd_lock, flags);
354f2f13a85SIngo Molnar 	}
355f2f13a85SIngo Molnar }
356f2f13a85SIngo Molnar 
357f2f13a85SIngo Molnar /*
358f2f13a85SIngo Molnar  * 64-bit:
359f2f13a85SIngo Molnar  *
360f2f13a85SIngo Molnar  *   Handle a fault on the vmalloc area
361f2f13a85SIngo Molnar  *
362f2f13a85SIngo Molnar  * This assumes no large pages in there.
363f2f13a85SIngo Molnar  */
364f2f13a85SIngo Molnar static noinline int vmalloc_fault(unsigned long address)
365f2f13a85SIngo Molnar {
366f2f13a85SIngo Molnar 	pgd_t *pgd, *pgd_ref;
367f2f13a85SIngo Molnar 	pud_t *pud, *pud_ref;
368f2f13a85SIngo Molnar 	pmd_t *pmd, *pmd_ref;
369f2f13a85SIngo Molnar 	pte_t *pte, *pte_ref;
370f2f13a85SIngo Molnar 
371f2f13a85SIngo Molnar 	/* Make sure we are in vmalloc area: */
372f2f13a85SIngo Molnar 	if (!(address >= VMALLOC_START && address < VMALLOC_END))
373f2f13a85SIngo Molnar 		return -1;
374f2f13a85SIngo Molnar 
375f2f13a85SIngo Molnar 	/*
376f2f13a85SIngo Molnar 	 * Copy kernel mappings over when needed. This can also
377f2f13a85SIngo Molnar 	 * happen within a race in page table update. In the later
378f2f13a85SIngo Molnar 	 * case just flush:
379f2f13a85SIngo Molnar 	 */
380f2f13a85SIngo Molnar 	pgd = pgd_offset(current->active_mm, address);
381f2f13a85SIngo Molnar 	pgd_ref = pgd_offset_k(address);
382f2f13a85SIngo Molnar 	if (pgd_none(*pgd_ref))
383f2f13a85SIngo Molnar 		return -1;
384f2f13a85SIngo Molnar 
385f2f13a85SIngo Molnar 	if (pgd_none(*pgd))
386f2f13a85SIngo Molnar 		set_pgd(pgd, *pgd_ref);
387f2f13a85SIngo Molnar 	else
388f2f13a85SIngo Molnar 		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
389f2f13a85SIngo Molnar 
390f2f13a85SIngo Molnar 	/*
391f2f13a85SIngo Molnar 	 * Below here mismatches are bugs because these lower tables
392f2f13a85SIngo Molnar 	 * are shared:
393f2f13a85SIngo Molnar 	 */
394f2f13a85SIngo Molnar 
395f2f13a85SIngo Molnar 	pud = pud_offset(pgd, address);
396f2f13a85SIngo Molnar 	pud_ref = pud_offset(pgd_ref, address);
397f2f13a85SIngo Molnar 	if (pud_none(*pud_ref))
398f2f13a85SIngo Molnar 		return -1;
399f2f13a85SIngo Molnar 
400f2f13a85SIngo Molnar 	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
401f2f13a85SIngo Molnar 		BUG();
402f2f13a85SIngo Molnar 
403f2f13a85SIngo Molnar 	pmd = pmd_offset(pud, address);
404f2f13a85SIngo Molnar 	pmd_ref = pmd_offset(pud_ref, address);
405f2f13a85SIngo Molnar 	if (pmd_none(*pmd_ref))
406f2f13a85SIngo Molnar 		return -1;
407f2f13a85SIngo Molnar 
408f2f13a85SIngo Molnar 	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
409f2f13a85SIngo Molnar 		BUG();
410f2f13a85SIngo Molnar 
411f2f13a85SIngo Molnar 	pte_ref = pte_offset_kernel(pmd_ref, address);
412f2f13a85SIngo Molnar 	if (!pte_present(*pte_ref))
413f2f13a85SIngo Molnar 		return -1;
414f2f13a85SIngo Molnar 
415f2f13a85SIngo Molnar 	pte = pte_offset_kernel(pmd, address);
416f2f13a85SIngo Molnar 
417f2f13a85SIngo Molnar 	/*
418f2f13a85SIngo Molnar 	 * Don't use pte_page here, because the mappings can point
419f2f13a85SIngo Molnar 	 * outside mem_map, and the NUMA hash lookup cannot handle
420f2f13a85SIngo Molnar 	 * that:
421f2f13a85SIngo Molnar 	 */
422f2f13a85SIngo Molnar 	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
423f2f13a85SIngo Molnar 		BUG();
424f2f13a85SIngo Molnar 
425f2f13a85SIngo Molnar 	return 0;
426f2f13a85SIngo Molnar }
427f2f13a85SIngo Molnar 
428f2f13a85SIngo Molnar static const char errata93_warning[] =
429*ad361c98SJoe Perches KERN_ERR
430*ad361c98SJoe Perches "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
431*ad361c98SJoe Perches "******* Working around it, but it may cause SEGVs or burn power.\n"
432*ad361c98SJoe Perches "******* Please consider a BIOS update.\n"
433*ad361c98SJoe Perches "******* Disabling USB legacy in the BIOS may also help.\n";
434f2f13a85SIngo Molnar 
435f2f13a85SIngo Molnar /*
436f2f13a85SIngo Molnar  * No vm86 mode in 64-bit mode:
437f2f13a85SIngo Molnar  */
438f2f13a85SIngo Molnar static inline void
439f2f13a85SIngo Molnar check_v8086_mode(struct pt_regs *regs, unsigned long address,
440f2f13a85SIngo Molnar 		 struct task_struct *tsk)
441f2f13a85SIngo Molnar {
442f2f13a85SIngo Molnar }
443f2f13a85SIngo Molnar 
444f2f13a85SIngo Molnar static int bad_address(void *p)
445f2f13a85SIngo Molnar {
446f2f13a85SIngo Molnar 	unsigned long dummy;
447f2f13a85SIngo Molnar 
448f2f13a85SIngo Molnar 	return probe_kernel_address((unsigned long *)p, dummy);
449f2f13a85SIngo Molnar }
450f2f13a85SIngo Molnar 
451f2f13a85SIngo Molnar static void dump_pagetable(unsigned long address)
452f2f13a85SIngo Molnar {
453c61e211dSHarvey Harrison 	pgd_t *pgd;
454c61e211dSHarvey Harrison 	pud_t *pud;
455c61e211dSHarvey Harrison 	pmd_t *pmd;
456c61e211dSHarvey Harrison 	pte_t *pte;
457c61e211dSHarvey Harrison 
458c61e211dSHarvey Harrison 	pgd = (pgd_t *)read_cr3();
459c61e211dSHarvey Harrison 
460c61e211dSHarvey Harrison 	pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
4612d4a7167SIngo Molnar 
462c61e211dSHarvey Harrison 	pgd += pgd_index(address);
4632d4a7167SIngo Molnar 	if (bad_address(pgd))
4642d4a7167SIngo Molnar 		goto bad;
4652d4a7167SIngo Molnar 
466c61e211dSHarvey Harrison 	printk("PGD %lx ", pgd_val(*pgd));
4672d4a7167SIngo Molnar 
4682d4a7167SIngo Molnar 	if (!pgd_present(*pgd))
4692d4a7167SIngo Molnar 		goto out;
470c61e211dSHarvey Harrison 
471c61e211dSHarvey Harrison 	pud = pud_offset(pgd, address);
4722d4a7167SIngo Molnar 	if (bad_address(pud))
4732d4a7167SIngo Molnar 		goto bad;
4742d4a7167SIngo Molnar 
475c61e211dSHarvey Harrison 	printk("PUD %lx ", pud_val(*pud));
476b5360222SAndi Kleen 	if (!pud_present(*pud) || pud_large(*pud))
4772d4a7167SIngo Molnar 		goto out;
478c61e211dSHarvey Harrison 
479c61e211dSHarvey Harrison 	pmd = pmd_offset(pud, address);
4802d4a7167SIngo Molnar 	if (bad_address(pmd))
4812d4a7167SIngo Molnar 		goto bad;
4822d4a7167SIngo Molnar 
483c61e211dSHarvey Harrison 	printk("PMD %lx ", pmd_val(*pmd));
4842d4a7167SIngo Molnar 	if (!pmd_present(*pmd) || pmd_large(*pmd))
4852d4a7167SIngo Molnar 		goto out;
486c61e211dSHarvey Harrison 
487c61e211dSHarvey Harrison 	pte = pte_offset_kernel(pmd, address);
4882d4a7167SIngo Molnar 	if (bad_address(pte))
4892d4a7167SIngo Molnar 		goto bad;
4902d4a7167SIngo Molnar 
491c61e211dSHarvey Harrison 	printk("PTE %lx", pte_val(*pte));
4922d4a7167SIngo Molnar out:
493c61e211dSHarvey Harrison 	printk("\n");
494c61e211dSHarvey Harrison 	return;
495c61e211dSHarvey Harrison bad:
496c61e211dSHarvey Harrison 	printk("BAD\n");
497c61e211dSHarvey Harrison }
498c61e211dSHarvey Harrison 
499f2f13a85SIngo Molnar #endif /* CONFIG_X86_64 */
500c61e211dSHarvey Harrison 
5012d4a7167SIngo Molnar /*
5022d4a7167SIngo Molnar  * Workaround for K8 erratum #93 & buggy BIOS.
5032d4a7167SIngo Molnar  *
5042d4a7167SIngo Molnar  * BIOS SMM functions are required to use a specific workaround
5052d4a7167SIngo Molnar  * to avoid corruption of the 64bit RIP register on C stepping K8.
5062d4a7167SIngo Molnar  *
5072d4a7167SIngo Molnar  * A lot of BIOS that didn't get tested properly miss this.
5082d4a7167SIngo Molnar  *
5092d4a7167SIngo Molnar  * The OS sees this as a page fault with the upper 32bits of RIP cleared.
5102d4a7167SIngo Molnar  * Try to work around it here.
5112d4a7167SIngo Molnar  *
5122d4a7167SIngo Molnar  * Note we only handle faults in kernel here.
5132d4a7167SIngo Molnar  * Does nothing on 32-bit.
514c61e211dSHarvey Harrison  */
515c61e211dSHarvey Harrison static int is_errata93(struct pt_regs *regs, unsigned long address)
516c61e211dSHarvey Harrison {
517c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
518c61e211dSHarvey Harrison 	if (address != regs->ip)
519c61e211dSHarvey Harrison 		return 0;
5202d4a7167SIngo Molnar 
521c61e211dSHarvey Harrison 	if ((address >> 32) != 0)
522c61e211dSHarvey Harrison 		return 0;
5232d4a7167SIngo Molnar 
524c61e211dSHarvey Harrison 	address |= 0xffffffffUL << 32;
525c61e211dSHarvey Harrison 	if ((address >= (u64)_stext && address <= (u64)_etext) ||
526c61e211dSHarvey Harrison 	    (address >= MODULES_VADDR && address <= MODULES_END)) {
527a454ab31SIngo Molnar 		printk_once(errata93_warning);
528c61e211dSHarvey Harrison 		regs->ip = address;
529c61e211dSHarvey Harrison 		return 1;
530c61e211dSHarvey Harrison 	}
531c61e211dSHarvey Harrison #endif
532c61e211dSHarvey Harrison 	return 0;
533c61e211dSHarvey Harrison }
534c61e211dSHarvey Harrison 
535c61e211dSHarvey Harrison /*
5362d4a7167SIngo Molnar  * Work around K8 erratum #100 K8 in compat mode occasionally jumps
5372d4a7167SIngo Molnar  * to illegal addresses >4GB.
5382d4a7167SIngo Molnar  *
5392d4a7167SIngo Molnar  * We catch this in the page fault handler because these addresses
5402d4a7167SIngo Molnar  * are not reachable. Just detect this case and return.  Any code
541c61e211dSHarvey Harrison  * segment in LDT is compatibility mode.
542c61e211dSHarvey Harrison  */
543c61e211dSHarvey Harrison static int is_errata100(struct pt_regs *regs, unsigned long address)
544c61e211dSHarvey Harrison {
545c61e211dSHarvey Harrison #ifdef CONFIG_X86_64
5462d4a7167SIngo Molnar 	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
547c61e211dSHarvey Harrison 		return 1;
548c61e211dSHarvey Harrison #endif
549c61e211dSHarvey Harrison 	return 0;
550c61e211dSHarvey Harrison }
551c61e211dSHarvey Harrison 
552c61e211dSHarvey Harrison static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
553c61e211dSHarvey Harrison {
554c61e211dSHarvey Harrison #ifdef CONFIG_X86_F00F_BUG
555c61e211dSHarvey Harrison 	unsigned long nr;
5562d4a7167SIngo Molnar 
557c61e211dSHarvey Harrison 	/*
5582d4a7167SIngo Molnar 	 * Pentium F0 0F C7 C8 bug workaround:
559c61e211dSHarvey Harrison 	 */
560c61e211dSHarvey Harrison 	if (boot_cpu_data.f00f_bug) {
561c61e211dSHarvey Harrison 		nr = (address - idt_descr.address) >> 3;
562c61e211dSHarvey Harrison 
563c61e211dSHarvey Harrison 		if (nr == 6) {
564c61e211dSHarvey Harrison 			do_invalid_op(regs, 0);
565c61e211dSHarvey Harrison 			return 1;
566c61e211dSHarvey Harrison 		}
567c61e211dSHarvey Harrison 	}
568c61e211dSHarvey Harrison #endif
569c61e211dSHarvey Harrison 	return 0;
570c61e211dSHarvey Harrison }
571c61e211dSHarvey Harrison 
5728f766149SIngo Molnar static const char nx_warning[] = KERN_CRIT
5738f766149SIngo Molnar "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
5748f766149SIngo Molnar 
5752d4a7167SIngo Molnar static void
5762d4a7167SIngo Molnar show_fault_oops(struct pt_regs *regs, unsigned long error_code,
577c61e211dSHarvey Harrison 		unsigned long address)
578c61e211dSHarvey Harrison {
579c61e211dSHarvey Harrison 	if (!oops_may_print())
580c61e211dSHarvey Harrison 		return;
581c61e211dSHarvey Harrison 
582c61e211dSHarvey Harrison 	if (error_code & PF_INSTR) {
58393809be8SHarvey Harrison 		unsigned int level;
5842d4a7167SIngo Molnar 
585c61e211dSHarvey Harrison 		pte_t *pte = lookup_address(address, &level);
586c61e211dSHarvey Harrison 
5878f766149SIngo Molnar 		if (pte && pte_present(*pte) && !pte_exec(*pte))
5888f766149SIngo Molnar 			printk(nx_warning, current_uid());
589c61e211dSHarvey Harrison 	}
590fd40d6e3SHarvey Harrison 
591c61e211dSHarvey Harrison 	printk(KERN_ALERT "BUG: unable to handle kernel ");
592c61e211dSHarvey Harrison 	if (address < PAGE_SIZE)
593c61e211dSHarvey Harrison 		printk(KERN_CONT "NULL pointer dereference");
594c61e211dSHarvey Harrison 	else
595c61e211dSHarvey Harrison 		printk(KERN_CONT "paging request");
5962d4a7167SIngo Molnar 
597f294a8ceSVegard Nossum 	printk(KERN_CONT " at %p\n", (void *) address);
598c61e211dSHarvey Harrison 	printk(KERN_ALERT "IP:");
599c61e211dSHarvey Harrison 	printk_address(regs->ip, 1);
6002d4a7167SIngo Molnar 
601c61e211dSHarvey Harrison 	dump_pagetable(address);
602c61e211dSHarvey Harrison }
603c61e211dSHarvey Harrison 
6042d4a7167SIngo Molnar static noinline void
6052d4a7167SIngo Molnar pgtable_bad(struct pt_regs *regs, unsigned long error_code,
6062d4a7167SIngo Molnar 	    unsigned long address)
607c61e211dSHarvey Harrison {
6082d4a7167SIngo Molnar 	struct task_struct *tsk;
6092d4a7167SIngo Molnar 	unsigned long flags;
6102d4a7167SIngo Molnar 	int sig;
6112d4a7167SIngo Molnar 
6122d4a7167SIngo Molnar 	flags = oops_begin();
6132d4a7167SIngo Molnar 	tsk = current;
6142d4a7167SIngo Molnar 	sig = SIGKILL;
615c61e211dSHarvey Harrison 
616c61e211dSHarvey Harrison 	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
61792181f19SNick Piggin 	       tsk->comm, address);
618c61e211dSHarvey Harrison 	dump_pagetable(address);
6192d4a7167SIngo Molnar 
620c61e211dSHarvey Harrison 	tsk->thread.cr2		= address;
621c61e211dSHarvey Harrison 	tsk->thread.trap_no	= 14;
622c61e211dSHarvey Harrison 	tsk->thread.error_code	= error_code;
6232d4a7167SIngo Molnar 
624c61e211dSHarvey Harrison 	if (__die("Bad pagetable", regs, error_code))
625874d93d1SAlexander van Heukelum 		sig = 0;
6262d4a7167SIngo Molnar 
627874d93d1SAlexander van Heukelum 	oops_end(flags, regs, sig);
628c61e211dSHarvey Harrison }
629c61e211dSHarvey Harrison 
6302d4a7167SIngo Molnar static noinline void
6312d4a7167SIngo Molnar no_context(struct pt_regs *regs, unsigned long error_code,
6322d4a7167SIngo Molnar 	   unsigned long address)
63392181f19SNick Piggin {
63492181f19SNick Piggin 	struct task_struct *tsk = current;
63519803078SIngo Molnar 	unsigned long *stackend;
63692181f19SNick Piggin 	unsigned long flags;
63792181f19SNick Piggin 	int sig;
63892181f19SNick Piggin 
63992181f19SNick Piggin 	/* Are we prepared to handle this kernel fault? */
64092181f19SNick Piggin 	if (fixup_exception(regs))
64192181f19SNick Piggin 		return;
64292181f19SNick Piggin 
64392181f19SNick Piggin 	/*
6442d4a7167SIngo Molnar 	 * 32-bit:
6452d4a7167SIngo Molnar 	 *
64692181f19SNick Piggin 	 *   Valid to do another page fault here, because if this fault
64792181f19SNick Piggin 	 *   had been triggered by is_prefetch fixup_exception would have
64892181f19SNick Piggin 	 *   handled it.
64992181f19SNick Piggin 	 *
6502d4a7167SIngo Molnar 	 * 64-bit:
6512d4a7167SIngo Molnar 	 *
65292181f19SNick Piggin 	 *   Hall of shame of CPU/BIOS bugs.
65392181f19SNick Piggin 	 */
65492181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
65592181f19SNick Piggin 		return;
65692181f19SNick Piggin 
65792181f19SNick Piggin 	if (is_errata93(regs, address))
65892181f19SNick Piggin 		return;
65992181f19SNick Piggin 
66092181f19SNick Piggin 	/*
66192181f19SNick Piggin 	 * Oops. The kernel tried to access some bad page. We'll have to
6622d4a7167SIngo Molnar 	 * terminate things with extreme prejudice:
66392181f19SNick Piggin 	 */
66492181f19SNick Piggin 	flags = oops_begin();
66592181f19SNick Piggin 
66692181f19SNick Piggin 	show_fault_oops(regs, error_code, address);
66792181f19SNick Piggin 
66819803078SIngo Molnar 	stackend = end_of_stack(tsk);
66919803078SIngo Molnar 	if (*stackend != STACK_END_MAGIC)
67019803078SIngo Molnar 		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
67119803078SIngo Molnar 
67292181f19SNick Piggin 	tsk->thread.cr2		= address;
67392181f19SNick Piggin 	tsk->thread.trap_no	= 14;
67492181f19SNick Piggin 	tsk->thread.error_code	= error_code;
67592181f19SNick Piggin 
67692181f19SNick Piggin 	sig = SIGKILL;
67792181f19SNick Piggin 	if (__die("Oops", regs, error_code))
67892181f19SNick Piggin 		sig = 0;
6792d4a7167SIngo Molnar 
68092181f19SNick Piggin 	/* Executive summary in case the body of the oops scrolled away */
68192181f19SNick Piggin 	printk(KERN_EMERG "CR2: %016lx\n", address);
6822d4a7167SIngo Molnar 
68392181f19SNick Piggin 	oops_end(flags, regs, sig);
68492181f19SNick Piggin }
68592181f19SNick Piggin 
6862d4a7167SIngo Molnar /*
6872d4a7167SIngo Molnar  * Print out info about fatal segfaults, if the show_unhandled_signals
6882d4a7167SIngo Molnar  * sysctl is set:
6892d4a7167SIngo Molnar  */
6902d4a7167SIngo Molnar static inline void
6912d4a7167SIngo Molnar show_signal_msg(struct pt_regs *regs, unsigned long error_code,
6922d4a7167SIngo Molnar 		unsigned long address, struct task_struct *tsk)
6932d4a7167SIngo Molnar {
6942d4a7167SIngo Molnar 	if (!unhandled_signal(tsk, SIGSEGV))
6952d4a7167SIngo Molnar 		return;
6962d4a7167SIngo Molnar 
6972d4a7167SIngo Molnar 	if (!printk_ratelimit())
6982d4a7167SIngo Molnar 		return;
6992d4a7167SIngo Molnar 
7002d4a7167SIngo Molnar 	printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
7012d4a7167SIngo Molnar 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
7022d4a7167SIngo Molnar 		tsk->comm, task_pid_nr(tsk), address,
7032d4a7167SIngo Molnar 		(void *)regs->ip, (void *)regs->sp, error_code);
7042d4a7167SIngo Molnar 
7052d4a7167SIngo Molnar 	print_vma_addr(KERN_CONT " in ", regs->ip);
7062d4a7167SIngo Molnar 
7072d4a7167SIngo Molnar 	printk(KERN_CONT "\n");
7082d4a7167SIngo Molnar }
7092d4a7167SIngo Molnar 
7102d4a7167SIngo Molnar static void
7112d4a7167SIngo Molnar __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
7122d4a7167SIngo Molnar 		       unsigned long address, int si_code)
71392181f19SNick Piggin {
71492181f19SNick Piggin 	struct task_struct *tsk = current;
71592181f19SNick Piggin 
71692181f19SNick Piggin 	/* User mode accesses just cause a SIGSEGV */
71792181f19SNick Piggin 	if (error_code & PF_USER) {
71892181f19SNick Piggin 		/*
7192d4a7167SIngo Molnar 		 * It's possible to have interrupts off here:
72092181f19SNick Piggin 		 */
72192181f19SNick Piggin 		local_irq_enable();
72292181f19SNick Piggin 
72392181f19SNick Piggin 		/*
72492181f19SNick Piggin 		 * Valid to do another page fault here because this one came
7252d4a7167SIngo Molnar 		 * from user space:
72692181f19SNick Piggin 		 */
72792181f19SNick Piggin 		if (is_prefetch(regs, error_code, address))
72892181f19SNick Piggin 			return;
72992181f19SNick Piggin 
73092181f19SNick Piggin 		if (is_errata100(regs, address))
73192181f19SNick Piggin 			return;
73292181f19SNick Piggin 
7332d4a7167SIngo Molnar 		if (unlikely(show_unhandled_signals))
7342d4a7167SIngo Molnar 			show_signal_msg(regs, error_code, address, tsk);
73592181f19SNick Piggin 
7362d4a7167SIngo Molnar 		/* Kernel addresses are always protection faults: */
73792181f19SNick Piggin 		tsk->thread.cr2		= address;
73892181f19SNick Piggin 		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
73992181f19SNick Piggin 		tsk->thread.trap_no	= 14;
7402d4a7167SIngo Molnar 
74192181f19SNick Piggin 		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
7422d4a7167SIngo Molnar 
74392181f19SNick Piggin 		return;
74492181f19SNick Piggin 	}
74592181f19SNick Piggin 
74692181f19SNick Piggin 	if (is_f00f_bug(regs, address))
74792181f19SNick Piggin 		return;
74892181f19SNick Piggin 
74992181f19SNick Piggin 	no_context(regs, error_code, address);
75092181f19SNick Piggin }
75192181f19SNick Piggin 
7522d4a7167SIngo Molnar static noinline void
7532d4a7167SIngo Molnar bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
7542d4a7167SIngo Molnar 		     unsigned long address)
75592181f19SNick Piggin {
75692181f19SNick Piggin 	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
75792181f19SNick Piggin }
75892181f19SNick Piggin 
7592d4a7167SIngo Molnar static void
7602d4a7167SIngo Molnar __bad_area(struct pt_regs *regs, unsigned long error_code,
7612d4a7167SIngo Molnar 	   unsigned long address, int si_code)
76292181f19SNick Piggin {
76392181f19SNick Piggin 	struct mm_struct *mm = current->mm;
76492181f19SNick Piggin 
76592181f19SNick Piggin 	/*
76692181f19SNick Piggin 	 * Something tried to access memory that isn't in our memory map..
76792181f19SNick Piggin 	 * Fix it, but check if it's kernel or user first..
76892181f19SNick Piggin 	 */
76992181f19SNick Piggin 	up_read(&mm->mmap_sem);
77092181f19SNick Piggin 
77192181f19SNick Piggin 	__bad_area_nosemaphore(regs, error_code, address, si_code);
77292181f19SNick Piggin }
77392181f19SNick Piggin 
7742d4a7167SIngo Molnar static noinline void
7752d4a7167SIngo Molnar bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
77692181f19SNick Piggin {
77792181f19SNick Piggin 	__bad_area(regs, error_code, address, SEGV_MAPERR);
77892181f19SNick Piggin }
77992181f19SNick Piggin 
7802d4a7167SIngo Molnar static noinline void
7812d4a7167SIngo Molnar bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
7822d4a7167SIngo Molnar 		      unsigned long address)
78392181f19SNick Piggin {
78492181f19SNick Piggin 	__bad_area(regs, error_code, address, SEGV_ACCERR);
78592181f19SNick Piggin }
78692181f19SNick Piggin 
78792181f19SNick Piggin /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
7882d4a7167SIngo Molnar static void
7892d4a7167SIngo Molnar out_of_memory(struct pt_regs *regs, unsigned long error_code,
7902d4a7167SIngo Molnar 	      unsigned long address)
79192181f19SNick Piggin {
79292181f19SNick Piggin 	/*
79392181f19SNick Piggin 	 * We ran out of memory, call the OOM killer, and return the userspace
7942d4a7167SIngo Molnar 	 * (which will retry the fault, or kill us if we got oom-killed):
79592181f19SNick Piggin 	 */
79692181f19SNick Piggin 	up_read(&current->mm->mmap_sem);
7972d4a7167SIngo Molnar 
79892181f19SNick Piggin 	pagefault_out_of_memory();
79992181f19SNick Piggin }
80092181f19SNick Piggin 
8012d4a7167SIngo Molnar static void
8022d4a7167SIngo Molnar do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
80392181f19SNick Piggin {
80492181f19SNick Piggin 	struct task_struct *tsk = current;
80592181f19SNick Piggin 	struct mm_struct *mm = tsk->mm;
80692181f19SNick Piggin 
80792181f19SNick Piggin 	up_read(&mm->mmap_sem);
80892181f19SNick Piggin 
8092d4a7167SIngo Molnar 	/* Kernel mode? Handle exceptions or die: */
81092181f19SNick Piggin 	if (!(error_code & PF_USER))
81192181f19SNick Piggin 		no_context(regs, error_code, address);
8122d4a7167SIngo Molnar 
813cd1b68f0SIngo Molnar 	/* User-space => ok to do another page fault: */
81492181f19SNick Piggin 	if (is_prefetch(regs, error_code, address))
81592181f19SNick Piggin 		return;
8162d4a7167SIngo Molnar 
81792181f19SNick Piggin 	tsk->thread.cr2		= address;
81892181f19SNick Piggin 	tsk->thread.error_code	= error_code;
81992181f19SNick Piggin 	tsk->thread.trap_no	= 14;
8202d4a7167SIngo Molnar 
82192181f19SNick Piggin 	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
82292181f19SNick Piggin }
82392181f19SNick Piggin 
8242d4a7167SIngo Molnar static noinline void
8252d4a7167SIngo Molnar mm_fault_error(struct pt_regs *regs, unsigned long error_code,
8262d4a7167SIngo Molnar 	       unsigned long address, unsigned int fault)
82792181f19SNick Piggin {
8282d4a7167SIngo Molnar 	if (fault & VM_FAULT_OOM) {
82992181f19SNick Piggin 		out_of_memory(regs, error_code, address);
8302d4a7167SIngo Molnar 	} else {
8312d4a7167SIngo Molnar 		if (fault & VM_FAULT_SIGBUS)
83292181f19SNick Piggin 			do_sigbus(regs, error_code, address);
83392181f19SNick Piggin 		else
83492181f19SNick Piggin 			BUG();
83592181f19SNick Piggin 	}
8362d4a7167SIngo Molnar }
83792181f19SNick Piggin 
838d8b57bb7SThomas Gleixner static int spurious_fault_check(unsigned long error_code, pte_t *pte)
839d8b57bb7SThomas Gleixner {
840d8b57bb7SThomas Gleixner 	if ((error_code & PF_WRITE) && !pte_write(*pte))
841d8b57bb7SThomas Gleixner 		return 0;
8422d4a7167SIngo Molnar 
843d8b57bb7SThomas Gleixner 	if ((error_code & PF_INSTR) && !pte_exec(*pte))
844d8b57bb7SThomas Gleixner 		return 0;
845d8b57bb7SThomas Gleixner 
846d8b57bb7SThomas Gleixner 	return 1;
847d8b57bb7SThomas Gleixner }
848d8b57bb7SThomas Gleixner 
849c61e211dSHarvey Harrison /*
8502d4a7167SIngo Molnar  * Handle a spurious fault caused by a stale TLB entry.
8512d4a7167SIngo Molnar  *
8522d4a7167SIngo Molnar  * This allows us to lazily refresh the TLB when increasing the
8532d4a7167SIngo Molnar  * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
8542d4a7167SIngo Molnar  * eagerly is very expensive since that implies doing a full
8552d4a7167SIngo Molnar  * cross-processor TLB flush, even if no stale TLB entries exist
8562d4a7167SIngo Molnar  * on other processors.
8572d4a7167SIngo Molnar  *
8585b727a3bSJeremy Fitzhardinge  * There are no security implications to leaving a stale TLB when
8595b727a3bSJeremy Fitzhardinge  * increasing the permissions on a page.
8605b727a3bSJeremy Fitzhardinge  */
8612d4a7167SIngo Molnar static noinline int
8622d4a7167SIngo Molnar spurious_fault(unsigned long error_code, unsigned long address)
8635b727a3bSJeremy Fitzhardinge {
8645b727a3bSJeremy Fitzhardinge 	pgd_t *pgd;
8655b727a3bSJeremy Fitzhardinge 	pud_t *pud;
8665b727a3bSJeremy Fitzhardinge 	pmd_t *pmd;
8675b727a3bSJeremy Fitzhardinge 	pte_t *pte;
8683c3e5694SSteven Rostedt 	int ret;
8695b727a3bSJeremy Fitzhardinge 
8705b727a3bSJeremy Fitzhardinge 	/* Reserved-bit violation or user access to kernel space? */
8715b727a3bSJeremy Fitzhardinge 	if (error_code & (PF_USER | PF_RSVD))
8725b727a3bSJeremy Fitzhardinge 		return 0;
8735b727a3bSJeremy Fitzhardinge 
8745b727a3bSJeremy Fitzhardinge 	pgd = init_mm.pgd + pgd_index(address);
8755b727a3bSJeremy Fitzhardinge 	if (!pgd_present(*pgd))
8765b727a3bSJeremy Fitzhardinge 		return 0;
8775b727a3bSJeremy Fitzhardinge 
8785b727a3bSJeremy Fitzhardinge 	pud = pud_offset(pgd, address);
8795b727a3bSJeremy Fitzhardinge 	if (!pud_present(*pud))
8805b727a3bSJeremy Fitzhardinge 		return 0;
8815b727a3bSJeremy Fitzhardinge 
882d8b57bb7SThomas Gleixner 	if (pud_large(*pud))
883d8b57bb7SThomas Gleixner 		return spurious_fault_check(error_code, (pte_t *) pud);
884d8b57bb7SThomas Gleixner 
8855b727a3bSJeremy Fitzhardinge 	pmd = pmd_offset(pud, address);
8865b727a3bSJeremy Fitzhardinge 	if (!pmd_present(*pmd))
8875b727a3bSJeremy Fitzhardinge 		return 0;
8885b727a3bSJeremy Fitzhardinge 
889d8b57bb7SThomas Gleixner 	if (pmd_large(*pmd))
890d8b57bb7SThomas Gleixner 		return spurious_fault_check(error_code, (pte_t *) pmd);
891d8b57bb7SThomas Gleixner 
8925b727a3bSJeremy Fitzhardinge 	pte = pte_offset_kernel(pmd, address);
8935b727a3bSJeremy Fitzhardinge 	if (!pte_present(*pte))
8945b727a3bSJeremy Fitzhardinge 		return 0;
8955b727a3bSJeremy Fitzhardinge 
8963c3e5694SSteven Rostedt 	ret = spurious_fault_check(error_code, pte);
8973c3e5694SSteven Rostedt 	if (!ret)
8983c3e5694SSteven Rostedt 		return 0;
8993c3e5694SSteven Rostedt 
9003c3e5694SSteven Rostedt 	/*
9012d4a7167SIngo Molnar 	 * Make sure we have permissions in PMD.
9022d4a7167SIngo Molnar 	 * If not, then there's a bug in the page tables:
9033c3e5694SSteven Rostedt 	 */
9043c3e5694SSteven Rostedt 	ret = spurious_fault_check(error_code, (pte_t *) pmd);
9053c3e5694SSteven Rostedt 	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
9062d4a7167SIngo Molnar 
9073c3e5694SSteven Rostedt 	return ret;
9085b727a3bSJeremy Fitzhardinge }
9095b727a3bSJeremy Fitzhardinge 
910c61e211dSHarvey Harrison int show_unhandled_signals = 1;
911c61e211dSHarvey Harrison 
9122d4a7167SIngo Molnar static inline int
9132d4a7167SIngo Molnar access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
91492181f19SNick Piggin {
91592181f19SNick Piggin 	if (write) {
9162d4a7167SIngo Molnar 		/* write, present and write, not present: */
91792181f19SNick Piggin 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
91892181f19SNick Piggin 			return 1;
9192d4a7167SIngo Molnar 		return 0;
9202d4a7167SIngo Molnar 	}
9212d4a7167SIngo Molnar 
9222d4a7167SIngo Molnar 	/* read, present: */
9232d4a7167SIngo Molnar 	if (unlikely(error_code & PF_PROT))
92492181f19SNick Piggin 		return 1;
9252d4a7167SIngo Molnar 
9262d4a7167SIngo Molnar 	/* read, not present: */
92792181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
92892181f19SNick Piggin 		return 1;
92992181f19SNick Piggin 
93092181f19SNick Piggin 	return 0;
93192181f19SNick Piggin }
93292181f19SNick Piggin 
9330973a06cSHiroshi Shimamoto static int fault_in_kernel_space(unsigned long address)
9340973a06cSHiroshi Shimamoto {
935d9517346SIngo Molnar 	return address >= TASK_SIZE_MAX;
9360973a06cSHiroshi Shimamoto }
9370973a06cSHiroshi Shimamoto 
938c61e211dSHarvey Harrison /*
939c61e211dSHarvey Harrison  * This routine handles page faults.  It determines the address,
940c61e211dSHarvey Harrison  * and the problem, and then passes it off to one of the appropriate
941c61e211dSHarvey Harrison  * routines.
942c61e211dSHarvey Harrison  */
943c3731c68SIngo Molnar dotraplinkage void __kprobes
944c3731c68SIngo Molnar do_page_fault(struct pt_regs *regs, unsigned long error_code)
945c61e211dSHarvey Harrison {
946c61e211dSHarvey Harrison 	struct vm_area_struct *vma;
9472d4a7167SIngo Molnar 	struct task_struct *tsk;
9482d4a7167SIngo Molnar 	unsigned long address;
9492d4a7167SIngo Molnar 	struct mm_struct *mm;
95092181f19SNick Piggin 	int write;
951c61e211dSHarvey Harrison 	int fault;
952c61e211dSHarvey Harrison 
953c61e211dSHarvey Harrison 	tsk = current;
954c61e211dSHarvey Harrison 	mm = tsk->mm;
9552d4a7167SIngo Molnar 
9562d4a7167SIngo Molnar 	/* Get the faulting address: */
957c61e211dSHarvey Harrison 	address = read_cr2();
958c61e211dSHarvey Harrison 
959f8561296SVegard Nossum 	/*
960f8561296SVegard Nossum 	 * Detect and handle instructions that would cause a page fault for
961f8561296SVegard Nossum 	 * both a tracked kernel page and a userspace page.
962f8561296SVegard Nossum 	 */
963f8561296SVegard Nossum 	if (kmemcheck_active(regs))
964f8561296SVegard Nossum 		kmemcheck_hide(regs);
9655dfaf90fSIngo Molnar 	prefetchw(&mm->mmap_sem);
966f8561296SVegard Nossum 
9670fd0e3daSPekka Paalanen 	if (unlikely(kmmio_fault(regs, address)))
96886069782SPekka Paalanen 		return;
969c61e211dSHarvey Harrison 
970c61e211dSHarvey Harrison 	/*
971c61e211dSHarvey Harrison 	 * We fault-in kernel-space virtual memory on-demand. The
972c61e211dSHarvey Harrison 	 * 'reference' page table is init_mm.pgd.
973c61e211dSHarvey Harrison 	 *
974c61e211dSHarvey Harrison 	 * NOTE! We MUST NOT take any locks for this case. We may
975c61e211dSHarvey Harrison 	 * be in an interrupt or a critical region, and should
976c61e211dSHarvey Harrison 	 * only copy the information from the master page table,
977c61e211dSHarvey Harrison 	 * nothing more.
978c61e211dSHarvey Harrison 	 *
979c61e211dSHarvey Harrison 	 * This verifies that the fault happens in kernel space
980c61e211dSHarvey Harrison 	 * (error_code & 4) == 0, and that the fault was not a
981c61e211dSHarvey Harrison 	 * protection error (error_code & 9) == 0.
982c61e211dSHarvey Harrison 	 */
9830973a06cSHiroshi Shimamoto 	if (unlikely(fault_in_kernel_space(address))) {
984f8561296SVegard Nossum 		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
985f8561296SVegard Nossum 			if (vmalloc_fault(address) >= 0)
986c61e211dSHarvey Harrison 				return;
9875b727a3bSJeremy Fitzhardinge 
988f8561296SVegard Nossum 			if (kmemcheck_fault(regs, address, error_code))
989f8561296SVegard Nossum 				return;
990f8561296SVegard Nossum 		}
991f8561296SVegard Nossum 
9922d4a7167SIngo Molnar 		/* Can handle a stale RO->RW TLB: */
99392181f19SNick Piggin 		if (spurious_fault(error_code, address))
9945b727a3bSJeremy Fitzhardinge 			return;
9955b727a3bSJeremy Fitzhardinge 
9962d4a7167SIngo Molnar 		/* kprobes don't want to hook the spurious faults: */
9979be260a6SMasami Hiramatsu 		if (notify_page_fault(regs))
9989be260a6SMasami Hiramatsu 			return;
999c61e211dSHarvey Harrison 		/*
1000c61e211dSHarvey Harrison 		 * Don't take the mm semaphore here. If we fixup a prefetch
10012d4a7167SIngo Molnar 		 * fault we could otherwise deadlock:
1002c61e211dSHarvey Harrison 		 */
100392181f19SNick Piggin 		bad_area_nosemaphore(regs, error_code, address);
10042d4a7167SIngo Molnar 
100592181f19SNick Piggin 		return;
1006c61e211dSHarvey Harrison 	}
1007c61e211dSHarvey Harrison 
10082d4a7167SIngo Molnar 	/* kprobes don't want to hook the spurious faults: */
1009f8a6b2b9SIngo Molnar 	if (unlikely(notify_page_fault(regs)))
10109be260a6SMasami Hiramatsu 		return;
1011c61e211dSHarvey Harrison 	/*
1012891cffbdSLinus Torvalds 	 * It's safe to allow irq's after cr2 has been saved and the
1013891cffbdSLinus Torvalds 	 * vmalloc fault has been handled.
1014891cffbdSLinus Torvalds 	 *
1015891cffbdSLinus Torvalds 	 * User-mode registers count as a user access even for any
10162d4a7167SIngo Molnar 	 * potential system fault or CPU buglet:
1017c61e211dSHarvey Harrison 	 */
1018891cffbdSLinus Torvalds 	if (user_mode_vm(regs)) {
1019891cffbdSLinus Torvalds 		local_irq_enable();
1020891cffbdSLinus Torvalds 		error_code |= PF_USER;
10212d4a7167SIngo Molnar 	} else {
10222d4a7167SIngo Molnar 		if (regs->flags & X86_EFLAGS_IF)
1023c61e211dSHarvey Harrison 			local_irq_enable();
10242d4a7167SIngo Molnar 	}
1025c61e211dSHarvey Harrison 
1026c61e211dSHarvey Harrison 	if (unlikely(error_code & PF_RSVD))
102792181f19SNick Piggin 		pgtable_bad(regs, error_code, address);
1028c61e211dSHarvey Harrison 
1029f4dbfa8fSPeter Zijlstra 	perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
10307dd1fcc2SPeter Zijlstra 
1031c61e211dSHarvey Harrison 	/*
10322d4a7167SIngo Molnar 	 * If we're in an interrupt, have no user context or are running
10332d4a7167SIngo Molnar 	 * in an atomic region then we must not take the fault:
1034c61e211dSHarvey Harrison 	 */
103592181f19SNick Piggin 	if (unlikely(in_atomic() || !mm)) {
103692181f19SNick Piggin 		bad_area_nosemaphore(regs, error_code, address);
103792181f19SNick Piggin 		return;
103892181f19SNick Piggin 	}
1039c61e211dSHarvey Harrison 
10403a1dfe6eSIngo Molnar 	/*
10413a1dfe6eSIngo Molnar 	 * When running in the kernel we expect faults to occur only to
10422d4a7167SIngo Molnar 	 * addresses in user space.  All other faults represent errors in
10432d4a7167SIngo Molnar 	 * the kernel and should generate an OOPS.  Unfortunately, in the
10442d4a7167SIngo Molnar 	 * case of an erroneous fault occurring in a code path which already
10452d4a7167SIngo Molnar 	 * holds mmap_sem we will deadlock attempting to validate the fault
10462d4a7167SIngo Molnar 	 * against the address space.  Luckily the kernel only validly
10472d4a7167SIngo Molnar 	 * references user space from well defined areas of code, which are
10482d4a7167SIngo Molnar 	 * listed in the exceptions table.
1049c61e211dSHarvey Harrison 	 *
1050c61e211dSHarvey Harrison 	 * As the vast majority of faults will be valid we will only perform
10512d4a7167SIngo Molnar 	 * the source reference check when there is a possibility of a
10522d4a7167SIngo Molnar 	 * deadlock. Attempt to lock the address space, if we cannot we then
10532d4a7167SIngo Molnar 	 * validate the source. If this is invalid we can skip the address
10542d4a7167SIngo Molnar 	 * space check, thus avoiding the deadlock:
1055c61e211dSHarvey Harrison 	 */
105692181f19SNick Piggin 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1057c61e211dSHarvey Harrison 		if ((error_code & PF_USER) == 0 &&
105892181f19SNick Piggin 		    !search_exception_tables(regs->ip)) {
105992181f19SNick Piggin 			bad_area_nosemaphore(regs, error_code, address);
106092181f19SNick Piggin 			return;
106192181f19SNick Piggin 		}
1062c61e211dSHarvey Harrison 		down_read(&mm->mmap_sem);
106301006074SPeter Zijlstra 	} else {
106401006074SPeter Zijlstra 		/*
10652d4a7167SIngo Molnar 		 * The above down_read_trylock() might have succeeded in
10662d4a7167SIngo Molnar 		 * which case we'll have missed the might_sleep() from
10672d4a7167SIngo Molnar 		 * down_read():
106801006074SPeter Zijlstra 		 */
106901006074SPeter Zijlstra 		might_sleep();
1070c61e211dSHarvey Harrison 	}
1071c61e211dSHarvey Harrison 
1072c61e211dSHarvey Harrison 	vma = find_vma(mm, address);
107392181f19SNick Piggin 	if (unlikely(!vma)) {
107492181f19SNick Piggin 		bad_area(regs, error_code, address);
107592181f19SNick Piggin 		return;
107692181f19SNick Piggin 	}
107792181f19SNick Piggin 	if (likely(vma->vm_start <= address))
1078c61e211dSHarvey Harrison 		goto good_area;
107992181f19SNick Piggin 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
108092181f19SNick Piggin 		bad_area(regs, error_code, address);
108192181f19SNick Piggin 		return;
108292181f19SNick Piggin 	}
1083c61e211dSHarvey Harrison 	if (error_code & PF_USER) {
1084c61e211dSHarvey Harrison 		/*
1085c61e211dSHarvey Harrison 		 * Accessing the stack below %sp is always a bug.
1086c61e211dSHarvey Harrison 		 * The large cushion allows instructions like enter
1087c61e211dSHarvey Harrison 		 * and pusha to work. ("enter $65535, $31" pushes
1088c61e211dSHarvey Harrison 		 * 32 pointers and then decrements %sp by 65535.)
1089c61e211dSHarvey Harrison 		 */
109092181f19SNick Piggin 		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
109192181f19SNick Piggin 			bad_area(regs, error_code, address);
109292181f19SNick Piggin 			return;
1093c61e211dSHarvey Harrison 		}
109492181f19SNick Piggin 	}
109592181f19SNick Piggin 	if (unlikely(expand_stack(vma, address))) {
109692181f19SNick Piggin 		bad_area(regs, error_code, address);
109792181f19SNick Piggin 		return;
109892181f19SNick Piggin 	}
109992181f19SNick Piggin 
1100c61e211dSHarvey Harrison 	/*
1101c61e211dSHarvey Harrison 	 * Ok, we have a good vm_area for this memory access, so
1102c61e211dSHarvey Harrison 	 * we can handle it..
1103c61e211dSHarvey Harrison 	 */
1104c61e211dSHarvey Harrison good_area:
110592181f19SNick Piggin 	write = error_code & PF_WRITE;
11062d4a7167SIngo Molnar 
110792181f19SNick Piggin 	if (unlikely(access_error(error_code, write, vma))) {
110892181f19SNick Piggin 		bad_area_access_error(regs, error_code, address);
110992181f19SNick Piggin 		return;
1110c61e211dSHarvey Harrison 	}
1111c61e211dSHarvey Harrison 
1112c61e211dSHarvey Harrison 	/*
1113c61e211dSHarvey Harrison 	 * If for any reason at all we couldn't handle the fault,
1114c61e211dSHarvey Harrison 	 * make sure we exit gracefully rather than endlessly redo
11152d4a7167SIngo Molnar 	 * the fault:
1116c61e211dSHarvey Harrison 	 */
1117d06063ccSLinus Torvalds 	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
11182d4a7167SIngo Molnar 
1119c61e211dSHarvey Harrison 	if (unlikely(fault & VM_FAULT_ERROR)) {
112092181f19SNick Piggin 		mm_fault_error(regs, error_code, address, fault);
112192181f19SNick Piggin 		return;
1122c61e211dSHarvey Harrison 	}
11232d4a7167SIngo Molnar 
1124ac17dc8eSPeter Zijlstra 	if (fault & VM_FAULT_MAJOR) {
1125c61e211dSHarvey Harrison 		tsk->maj_flt++;
1126f4dbfa8fSPeter Zijlstra 		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
112778f13e95SPeter Zijlstra 				     regs, address);
1128ac17dc8eSPeter Zijlstra 	} else {
1129c61e211dSHarvey Harrison 		tsk->min_flt++;
1130f4dbfa8fSPeter Zijlstra 		perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
113178f13e95SPeter Zijlstra 				     regs, address);
1132ac17dc8eSPeter Zijlstra 	}
1133c61e211dSHarvey Harrison 
11348c938f9fSIngo Molnar 	check_v8086_mode(regs, address, tsk);
11358c938f9fSIngo Molnar 
1136c61e211dSHarvey Harrison 	up_read(&mm->mmap_sem);
1137c61e211dSHarvey Harrison }
1138