xref: /openbmc/linux/arch/powerpc/mm/fault.c (revision ba12eedee321eeb5baecaada285daeb3462c35f5)
114cf11afSPaul Mackerras /*
214cf11afSPaul Mackerras  *  PowerPC version
314cf11afSPaul Mackerras  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
414cf11afSPaul Mackerras  *
514cf11afSPaul Mackerras  *  Derived from "arch/i386/mm/fault.c"
614cf11afSPaul Mackerras  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
714cf11afSPaul Mackerras  *
814cf11afSPaul Mackerras  *  Modified by Cort Dougan and Paul Mackerras.
914cf11afSPaul Mackerras  *
1014cf11afSPaul Mackerras  *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
1114cf11afSPaul Mackerras  *
1214cf11afSPaul Mackerras  *  This program is free software; you can redistribute it and/or
1314cf11afSPaul Mackerras  *  modify it under the terms of the GNU General Public License
1414cf11afSPaul Mackerras  *  as published by the Free Software Foundation; either version
1514cf11afSPaul Mackerras  *  2 of the License, or (at your option) any later version.
1614cf11afSPaul Mackerras  */
1714cf11afSPaul Mackerras 
1814cf11afSPaul Mackerras #include <linux/signal.h>
1914cf11afSPaul Mackerras #include <linux/sched.h>
2014cf11afSPaul Mackerras #include <linux/kernel.h>
2114cf11afSPaul Mackerras #include <linux/errno.h>
2214cf11afSPaul Mackerras #include <linux/string.h>
2314cf11afSPaul Mackerras #include <linux/types.h>
2414cf11afSPaul Mackerras #include <linux/ptrace.h>
2514cf11afSPaul Mackerras #include <linux/mman.h>
2614cf11afSPaul Mackerras #include <linux/mm.h>
2714cf11afSPaul Mackerras #include <linux/interrupt.h>
2814cf11afSPaul Mackerras #include <linux/highmem.h>
2914cf11afSPaul Mackerras #include <linux/module.h>
3014cf11afSPaul Mackerras #include <linux/kprobes.h>
311eeb66a1SChristoph Hellwig #include <linux/kdebug.h>
32cdd6c482SIngo Molnar #include <linux/perf_event.h>
3328b54990SAnton Blanchard #include <linux/magic.h>
3476462232SChristian Dietrich #include <linux/ratelimit.h>
35*ba12eedeSLi Zhong #include <linux/context_tracking.h>
3614cf11afSPaul Mackerras 
3740900194SBrian King #include <asm/firmware.h>
3814cf11afSPaul Mackerras #include <asm/page.h>
3914cf11afSPaul Mackerras #include <asm/pgtable.h>
4014cf11afSPaul Mackerras #include <asm/mmu.h>
4114cf11afSPaul Mackerras #include <asm/mmu_context.h>
4214cf11afSPaul Mackerras #include <asm/uaccess.h>
4314cf11afSPaul Mackerras #include <asm/tlbflush.h>
4414cf11afSPaul Mackerras #include <asm/siginfo.h>
45ae3a197eSDavid Howells #include <asm/debug.h>
465efab4a0SJoakim Tjernlund #include <mm/mmu_decl.h>
479f90b997SChristoph Hellwig 
48c3dcf53aSJimi Xenidis #include "icswx.h"
49c3dcf53aSJimi Xenidis 
504f9e87c0SAnil S Keshavamurthy #ifdef CONFIG_KPROBES
519f90b997SChristoph Hellwig static inline int notify_page_fault(struct pt_regs *regs)
524f9e87c0SAnil S Keshavamurthy {
539f90b997SChristoph Hellwig 	int ret = 0;
549f90b997SChristoph Hellwig 
559f90b997SChristoph Hellwig 	/* kprobe_running() needs smp_processor_id() */
569f90b997SChristoph Hellwig 	if (!user_mode(regs)) {
579f90b997SChristoph Hellwig 		preempt_disable();
589f90b997SChristoph Hellwig 		if (kprobe_running() && kprobe_fault_handler(regs, 11))
599f90b997SChristoph Hellwig 			ret = 1;
609f90b997SChristoph Hellwig 		preempt_enable();
614f9e87c0SAnil S Keshavamurthy 	}
624f9e87c0SAnil S Keshavamurthy 
639f90b997SChristoph Hellwig 	return ret;
644f9e87c0SAnil S Keshavamurthy }
654f9e87c0SAnil S Keshavamurthy #else
669f90b997SChristoph Hellwig static inline int notify_page_fault(struct pt_regs *regs)
674f9e87c0SAnil S Keshavamurthy {
689f90b997SChristoph Hellwig 	return 0;
694f9e87c0SAnil S Keshavamurthy }
704f9e87c0SAnil S Keshavamurthy #endif
714f9e87c0SAnil S Keshavamurthy 
7214cf11afSPaul Mackerras /*
7314cf11afSPaul Mackerras  * Check whether the instruction at regs->nip is a store using
7414cf11afSPaul Mackerras  * an update addressing form which will update r1.
7514cf11afSPaul Mackerras  */
7614cf11afSPaul Mackerras static int store_updates_sp(struct pt_regs *regs)
7714cf11afSPaul Mackerras {
7814cf11afSPaul Mackerras 	unsigned int inst;
7914cf11afSPaul Mackerras 
8014cf11afSPaul Mackerras 	if (get_user(inst, (unsigned int __user *)regs->nip))
8114cf11afSPaul Mackerras 		return 0;
8214cf11afSPaul Mackerras 	/* check for 1 in the rA field */
8314cf11afSPaul Mackerras 	if (((inst >> 16) & 0x1f) != 1)
8414cf11afSPaul Mackerras 		return 0;
8514cf11afSPaul Mackerras 	/* check major opcode */
8614cf11afSPaul Mackerras 	switch (inst >> 26) {
8714cf11afSPaul Mackerras 	case 37:	/* stwu */
8814cf11afSPaul Mackerras 	case 39:	/* stbu */
8914cf11afSPaul Mackerras 	case 45:	/* sthu */
9014cf11afSPaul Mackerras 	case 53:	/* stfsu */
9114cf11afSPaul Mackerras 	case 55:	/* stfdu */
9214cf11afSPaul Mackerras 		return 1;
9314cf11afSPaul Mackerras 	case 62:	/* std or stdu */
9414cf11afSPaul Mackerras 		return (inst & 3) == 1;
9514cf11afSPaul Mackerras 	case 31:
9614cf11afSPaul Mackerras 		/* check minor opcode */
9714cf11afSPaul Mackerras 		switch ((inst >> 1) & 0x3ff) {
9814cf11afSPaul Mackerras 		case 181:	/* stdux */
9914cf11afSPaul Mackerras 		case 183:	/* stwux */
10014cf11afSPaul Mackerras 		case 247:	/* stbux */
10114cf11afSPaul Mackerras 		case 439:	/* sthux */
10214cf11afSPaul Mackerras 		case 695:	/* stfsux */
10314cf11afSPaul Mackerras 		case 759:	/* stfdux */
10414cf11afSPaul Mackerras 			return 1;
10514cf11afSPaul Mackerras 		}
10614cf11afSPaul Mackerras 	}
10714cf11afSPaul Mackerras 	return 0;
10814cf11afSPaul Mackerras }
1099be72573SBenjamin Herrenschmidt /*
1109be72573SBenjamin Herrenschmidt  * do_page_fault error handling helpers
1119be72573SBenjamin Herrenschmidt  */
1129be72573SBenjamin Herrenschmidt 
1139be72573SBenjamin Herrenschmidt #define MM_FAULT_RETURN		0
1149be72573SBenjamin Herrenschmidt #define MM_FAULT_CONTINUE	-1
1159be72573SBenjamin Herrenschmidt #define MM_FAULT_ERR(sig)	(sig)
1169be72573SBenjamin Herrenschmidt 
1179be72573SBenjamin Herrenschmidt static int do_sigbus(struct pt_regs *regs, unsigned long address)
1189be72573SBenjamin Herrenschmidt {
1199be72573SBenjamin Herrenschmidt 	siginfo_t info;
1209be72573SBenjamin Herrenschmidt 
1219be72573SBenjamin Herrenschmidt 	up_read(&current->mm->mmap_sem);
1229be72573SBenjamin Herrenschmidt 
1239be72573SBenjamin Herrenschmidt 	if (user_mode(regs)) {
12441ab5266SAnanth N Mavinakayanahalli 		current->thread.trap_nr = BUS_ADRERR;
1259be72573SBenjamin Herrenschmidt 		info.si_signo = SIGBUS;
1269be72573SBenjamin Herrenschmidt 		info.si_errno = 0;
1279be72573SBenjamin Herrenschmidt 		info.si_code = BUS_ADRERR;
1289be72573SBenjamin Herrenschmidt 		info.si_addr = (void __user *)address;
1299be72573SBenjamin Herrenschmidt 		force_sig_info(SIGBUS, &info, current);
1309be72573SBenjamin Herrenschmidt 		return MM_FAULT_RETURN;
1319be72573SBenjamin Herrenschmidt 	}
1329be72573SBenjamin Herrenschmidt 	return MM_FAULT_ERR(SIGBUS);
1339be72573SBenjamin Herrenschmidt }
1349be72573SBenjamin Herrenschmidt 
1359be72573SBenjamin Herrenschmidt static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
1369be72573SBenjamin Herrenschmidt {
1379be72573SBenjamin Herrenschmidt 	/*
1389be72573SBenjamin Herrenschmidt 	 * Pagefault was interrupted by SIGKILL. We have no reason to
1399be72573SBenjamin Herrenschmidt 	 * continue the pagefault.
1409be72573SBenjamin Herrenschmidt 	 */
1419be72573SBenjamin Herrenschmidt 	if (fatal_signal_pending(current)) {
1429be72573SBenjamin Herrenschmidt 		/*
1439be72573SBenjamin Herrenschmidt 		 * If we have retry set, the mmap semaphore will have
1449be72573SBenjamin Herrenschmidt 		 * alrady been released in __lock_page_or_retry(). Else
1459be72573SBenjamin Herrenschmidt 		 * we release it now.
1469be72573SBenjamin Herrenschmidt 		 */
1479be72573SBenjamin Herrenschmidt 		if (!(fault & VM_FAULT_RETRY))
1489be72573SBenjamin Herrenschmidt 			up_read(&current->mm->mmap_sem);
1499be72573SBenjamin Herrenschmidt 		/* Coming from kernel, we need to deal with uaccess fixups */
1509be72573SBenjamin Herrenschmidt 		if (user_mode(regs))
1519be72573SBenjamin Herrenschmidt 			return MM_FAULT_RETURN;
1529be72573SBenjamin Herrenschmidt 		return MM_FAULT_ERR(SIGKILL);
1539be72573SBenjamin Herrenschmidt 	}
1549be72573SBenjamin Herrenschmidt 
1559be72573SBenjamin Herrenschmidt 	/* No fault: be happy */
1569be72573SBenjamin Herrenschmidt 	if (!(fault & VM_FAULT_ERROR))
1579be72573SBenjamin Herrenschmidt 		return MM_FAULT_CONTINUE;
1589be72573SBenjamin Herrenschmidt 
1599be72573SBenjamin Herrenschmidt 	/* Out of memory */
160c2d23f91SDavid Rientjes 	if (fault & VM_FAULT_OOM) {
161c2d23f91SDavid Rientjes 		up_read(&current->mm->mmap_sem);
162c2d23f91SDavid Rientjes 
163c2d23f91SDavid Rientjes 		/*
164c2d23f91SDavid Rientjes 		 * We ran out of memory, or some other thing happened to us that
165c2d23f91SDavid Rientjes 		 * made us unable to handle the page fault gracefully.
166c2d23f91SDavid Rientjes 		 */
167c2d23f91SDavid Rientjes 		if (!user_mode(regs))
168c2d23f91SDavid Rientjes 			return MM_FAULT_ERR(SIGKILL);
169c2d23f91SDavid Rientjes 		pagefault_out_of_memory();
170c2d23f91SDavid Rientjes 		return MM_FAULT_RETURN;
171c2d23f91SDavid Rientjes 	}
1729be72573SBenjamin Herrenschmidt 
1739be72573SBenjamin Herrenschmidt 	/* Bus error. x86 handles HWPOISON here, we'll add this if/when
1749be72573SBenjamin Herrenschmidt 	 * we support the feature in HW
1759be72573SBenjamin Herrenschmidt 	 */
1769be72573SBenjamin Herrenschmidt 	if (fault & VM_FAULT_SIGBUS)
1779be72573SBenjamin Herrenschmidt 		return do_sigbus(regs, addr);
1789be72573SBenjamin Herrenschmidt 
1799be72573SBenjamin Herrenschmidt 	/* We don't understand the fault code, this is fatal */
1809be72573SBenjamin Herrenschmidt 	BUG();
1819be72573SBenjamin Herrenschmidt 	return MM_FAULT_CONTINUE;
1829be72573SBenjamin Herrenschmidt }
18314cf11afSPaul Mackerras 
18414cf11afSPaul Mackerras /*
18514cf11afSPaul Mackerras  * For 600- and 800-family processors, the error_code parameter is DSISR
18614cf11afSPaul Mackerras  * for a data fault, SRR1 for an instruction fault. For 400-family processors
18714cf11afSPaul Mackerras  * the error_code parameter is ESR for a data fault, 0 for an instruction
18814cf11afSPaul Mackerras  * fault.
18914cf11afSPaul Mackerras  * For 64-bit processors, the error_code parameter is
19014cf11afSPaul Mackerras  *  - DSISR for a non-SLB data access fault,
19114cf11afSPaul Mackerras  *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
19214cf11afSPaul Mackerras  *  - 0 any SLB fault.
19314cf11afSPaul Mackerras  *
19414cf11afSPaul Mackerras  * The return value is 0 if the fault was handled, or the signal
19514cf11afSPaul Mackerras  * number if this is a kernel fault that can't be handled here.
19614cf11afSPaul Mackerras  */
19714cf11afSPaul Mackerras int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
19814cf11afSPaul Mackerras 			    unsigned long error_code)
19914cf11afSPaul Mackerras {
200*ba12eedeSLi Zhong 	enum ctx_state prev_state = exception_enter();
20114cf11afSPaul Mackerras 	struct vm_area_struct * vma;
20214cf11afSPaul Mackerras 	struct mm_struct *mm = current->mm;
2039be72573SBenjamin Herrenschmidt 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
20414cf11afSPaul Mackerras 	int code = SEGV_MAPERR;
2059be72573SBenjamin Herrenschmidt 	int is_write = 0;
20614cf11afSPaul Mackerras 	int trap = TRAP(regs);
20714cf11afSPaul Mackerras  	int is_exec = trap == 0x400;
2089be72573SBenjamin Herrenschmidt 	int fault;
209*ba12eedeSLi Zhong 	int rc = 0;
21014cf11afSPaul Mackerras 
21114cf11afSPaul Mackerras #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
21214cf11afSPaul Mackerras 	/*
21314cf11afSPaul Mackerras 	 * Fortunately the bit assignments in SRR1 for an instruction
21414cf11afSPaul Mackerras 	 * fault and DSISR for a data fault are mostly the same for the
21514cf11afSPaul Mackerras 	 * bits we are interested in.  But there are some bits which
21614cf11afSPaul Mackerras 	 * indicate errors in DSISR but can validly be set in SRR1.
21714cf11afSPaul Mackerras 	 */
21814cf11afSPaul Mackerras 	if (trap == 0x400)
21914cf11afSPaul Mackerras 		error_code &= 0x48200000;
22014cf11afSPaul Mackerras 	else
22114cf11afSPaul Mackerras 		is_write = error_code & DSISR_ISSTORE;
22214cf11afSPaul Mackerras #else
22314cf11afSPaul Mackerras 	is_write = error_code & ESR_DST;
22414cf11afSPaul Mackerras #endif /* CONFIG_4xx || CONFIG_BOOKE */
22514cf11afSPaul Mackerras 
2269be72573SBenjamin Herrenschmidt 	if (is_write)
2279be72573SBenjamin Herrenschmidt 		flags |= FAULT_FLAG_WRITE;
2289be72573SBenjamin Herrenschmidt 
229c3dcf53aSJimi Xenidis #ifdef CONFIG_PPC_ICSWX
230c3dcf53aSJimi Xenidis 	/*
231c3dcf53aSJimi Xenidis 	 * we need to do this early because this "data storage
232c3dcf53aSJimi Xenidis 	 * interrupt" does not update the DAR/DEAR so we don't want to
233c3dcf53aSJimi Xenidis 	 * look at it
234c3dcf53aSJimi Xenidis 	 */
235c3dcf53aSJimi Xenidis 	if (error_code & ICSWX_DSI_UCT) {
236*ba12eedeSLi Zhong 		rc = acop_handle_fault(regs, address, error_code);
2379be72573SBenjamin Herrenschmidt 		if (rc)
238*ba12eedeSLi Zhong 			goto bail;
239c3dcf53aSJimi Xenidis 	}
2409be72573SBenjamin Herrenschmidt #endif /* CONFIG_PPC_ICSWX */
241c3dcf53aSJimi Xenidis 
2429f90b997SChristoph Hellwig 	if (notify_page_fault(regs))
243*ba12eedeSLi Zhong 		goto bail;
24414cf11afSPaul Mackerras 
245c3b75bd7SMichael Neuling 	if (unlikely(debugger_fault_handler(regs)))
246*ba12eedeSLi Zhong 		goto bail;
24714cf11afSPaul Mackerras 
24814cf11afSPaul Mackerras 	/* On a kernel SLB miss we can only check for a valid exception entry */
249*ba12eedeSLi Zhong 	if (!user_mode(regs) && (address >= TASK_SIZE)) {
250*ba12eedeSLi Zhong 		rc = SIGSEGV;
251*ba12eedeSLi Zhong 		goto bail;
252*ba12eedeSLi Zhong 	}
25314cf11afSPaul Mackerras 
2549c7cc234SK.Prasad #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
2559c7cc234SK.Prasad 			     defined(CONFIG_PPC_BOOK3S_64))
25614cf11afSPaul Mackerras   	if (error_code & DSISR_DABRMATCH) {
2579422de3eSMichael Neuling 		/* breakpoint match */
2589422de3eSMichael Neuling 		do_break(regs, address, error_code);
259*ba12eedeSLi Zhong 		goto bail;
26014cf11afSPaul Mackerras 	}
2619c7cc234SK.Prasad #endif
26214cf11afSPaul Mackerras 
263a546498fSBenjamin Herrenschmidt 	/* We restore the interrupt state now */
264a546498fSBenjamin Herrenschmidt 	if (!arch_irq_disabled_regs(regs))
265a546498fSBenjamin Herrenschmidt 		local_irq_enable();
266a546498fSBenjamin Herrenschmidt 
26714cf11afSPaul Mackerras 	if (in_atomic() || mm == NULL) {
268*ba12eedeSLi Zhong 		if (!user_mode(regs)) {
269*ba12eedeSLi Zhong 			rc = SIGSEGV;
270*ba12eedeSLi Zhong 			goto bail;
271*ba12eedeSLi Zhong 		}
27214cf11afSPaul Mackerras 		/* in_atomic() in user mode is really bad,
27314cf11afSPaul Mackerras 		   as is current->mm == NULL. */
27414cf11afSPaul Mackerras 		printk(KERN_EMERG "Page fault in user mode with "
27514cf11afSPaul Mackerras 		       "in_atomic() = %d mm = %p\n", in_atomic(), mm);
27614cf11afSPaul Mackerras 		printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
27714cf11afSPaul Mackerras 		       regs->nip, regs->msr);
27814cf11afSPaul Mackerras 		die("Weird page fault", regs, SIGSEGV);
27914cf11afSPaul Mackerras 	}
28014cf11afSPaul Mackerras 
281a8b0ca17SPeter Zijlstra 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
2827dd1fcc2SPeter Zijlstra 
28314cf11afSPaul Mackerras 	/* When running in the kernel we expect faults to occur only to
28414cf11afSPaul Mackerras 	 * addresses in user space.  All other faults represent errors in the
285fc5266eaSAnton Blanchard 	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
286fc5266eaSAnton Blanchard 	 * erroneous fault occurring in a code path which already holds mmap_sem
28714cf11afSPaul Mackerras 	 * we will deadlock attempting to validate the fault against the
28814cf11afSPaul Mackerras 	 * address space.  Luckily the kernel only validly references user
28914cf11afSPaul Mackerras 	 * space from well defined areas of code, which are listed in the
29014cf11afSPaul Mackerras 	 * exceptions table.
29114cf11afSPaul Mackerras 	 *
29214cf11afSPaul Mackerras 	 * As the vast majority of faults will be valid we will only perform
293fc5266eaSAnton Blanchard 	 * the source reference check when there is a possibility of a deadlock.
29414cf11afSPaul Mackerras 	 * Attempt to lock the address space, if we cannot we then validate the
29514cf11afSPaul Mackerras 	 * source.  If this is invalid we can skip the address space check,
29614cf11afSPaul Mackerras 	 * thus avoiding the deadlock.
29714cf11afSPaul Mackerras 	 */
29814cf11afSPaul Mackerras 	if (!down_read_trylock(&mm->mmap_sem)) {
29914cf11afSPaul Mackerras 		if (!user_mode(regs) && !search_exception_tables(regs->nip))
30014cf11afSPaul Mackerras 			goto bad_area_nosemaphore;
30114cf11afSPaul Mackerras 
3029be72573SBenjamin Herrenschmidt retry:
30314cf11afSPaul Mackerras 		down_read(&mm->mmap_sem);
304a546498fSBenjamin Herrenschmidt 	} else {
305a546498fSBenjamin Herrenschmidt 		/*
306a546498fSBenjamin Herrenschmidt 		 * The above down_read_trylock() might have succeeded in
307a546498fSBenjamin Herrenschmidt 		 * which case we'll have missed the might_sleep() from
308a546498fSBenjamin Herrenschmidt 		 * down_read():
309a546498fSBenjamin Herrenschmidt 		 */
310a546498fSBenjamin Herrenschmidt 		might_sleep();
31114cf11afSPaul Mackerras 	}
31214cf11afSPaul Mackerras 
31314cf11afSPaul Mackerras 	vma = find_vma(mm, address);
31414cf11afSPaul Mackerras 	if (!vma)
31514cf11afSPaul Mackerras 		goto bad_area;
31614cf11afSPaul Mackerras 	if (vma->vm_start <= address)
31714cf11afSPaul Mackerras 		goto good_area;
31814cf11afSPaul Mackerras 	if (!(vma->vm_flags & VM_GROWSDOWN))
31914cf11afSPaul Mackerras 		goto bad_area;
32014cf11afSPaul Mackerras 
32114cf11afSPaul Mackerras 	/*
32214cf11afSPaul Mackerras 	 * N.B. The POWER/Open ABI allows programs to access up to
32314cf11afSPaul Mackerras 	 * 288 bytes below the stack pointer.
32414cf11afSPaul Mackerras 	 * The kernel signal delivery code writes up to about 1.5kB
32514cf11afSPaul Mackerras 	 * below the stack pointer (r1) before decrementing it.
32614cf11afSPaul Mackerras 	 * The exec code can write slightly over 640kB to the stack
32714cf11afSPaul Mackerras 	 * before setting the user r1.  Thus we allow the stack to
32814cf11afSPaul Mackerras 	 * expand to 1MB without further checks.
32914cf11afSPaul Mackerras 	 */
33014cf11afSPaul Mackerras 	if (address + 0x100000 < vma->vm_end) {
33114cf11afSPaul Mackerras 		/* get user regs even if this fault is in kernel mode */
33214cf11afSPaul Mackerras 		struct pt_regs *uregs = current->thread.regs;
33314cf11afSPaul Mackerras 		if (uregs == NULL)
33414cf11afSPaul Mackerras 			goto bad_area;
33514cf11afSPaul Mackerras 
33614cf11afSPaul Mackerras 		/*
33714cf11afSPaul Mackerras 		 * A user-mode access to an address a long way below
33814cf11afSPaul Mackerras 		 * the stack pointer is only valid if the instruction
33914cf11afSPaul Mackerras 		 * is one which would update the stack pointer to the
34014cf11afSPaul Mackerras 		 * address accessed if the instruction completed,
34114cf11afSPaul Mackerras 		 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
34214cf11afSPaul Mackerras 		 * (or the byte, halfword, float or double forms).
34314cf11afSPaul Mackerras 		 *
34414cf11afSPaul Mackerras 		 * If we don't check this then any write to the area
34514cf11afSPaul Mackerras 		 * between the last mapped region and the stack will
34614cf11afSPaul Mackerras 		 * expand the stack rather than segfaulting.
34714cf11afSPaul Mackerras 		 */
34814cf11afSPaul Mackerras 		if (address + 2048 < uregs->gpr[1]
34914cf11afSPaul Mackerras 		    && (!user_mode(regs) || !store_updates_sp(regs)))
35014cf11afSPaul Mackerras 			goto bad_area;
35114cf11afSPaul Mackerras 	}
35214cf11afSPaul Mackerras 	if (expand_stack(vma, address))
35314cf11afSPaul Mackerras 		goto bad_area;
35414cf11afSPaul Mackerras 
35514cf11afSPaul Mackerras good_area:
35614cf11afSPaul Mackerras 	code = SEGV_ACCERR;
35714cf11afSPaul Mackerras #if defined(CONFIG_6xx)
35814cf11afSPaul Mackerras 	if (error_code & 0x95700000)
35914cf11afSPaul Mackerras 		/* an error such as lwarx to I/O controller space,
36014cf11afSPaul Mackerras 		   address matching DABR, eciwx, etc. */
36114cf11afSPaul Mackerras 		goto bad_area;
36214cf11afSPaul Mackerras #endif /* CONFIG_6xx */
36314cf11afSPaul Mackerras #if defined(CONFIG_8xx)
3645efab4a0SJoakim Tjernlund 	/* 8xx sometimes need to load a invalid/non-present TLBs.
3655efab4a0SJoakim Tjernlund 	 * These must be invalidated separately as linux mm don't.
3665efab4a0SJoakim Tjernlund 	 */
3675efab4a0SJoakim Tjernlund 	if (error_code & 0x40000000) /* no translation? */
3685efab4a0SJoakim Tjernlund 		_tlbil_va(address, 0, 0, 0);
3695efab4a0SJoakim Tjernlund 
37014cf11afSPaul Mackerras         /* The MPC8xx seems to always set 0x80000000, which is
37114cf11afSPaul Mackerras          * "undefined".  Of those that can be set, this is the only
37214cf11afSPaul Mackerras          * one which seems bad.
37314cf11afSPaul Mackerras          */
37414cf11afSPaul Mackerras 	if (error_code & 0x10000000)
37514cf11afSPaul Mackerras                 /* Guarded storage error. */
37614cf11afSPaul Mackerras 		goto bad_area;
37714cf11afSPaul Mackerras #endif /* CONFIG_8xx */
37814cf11afSPaul Mackerras 
37914cf11afSPaul Mackerras 	if (is_exec) {
3808d30c14cSBenjamin Herrenschmidt #ifdef CONFIG_PPC_STD_MMU
3818d30c14cSBenjamin Herrenschmidt 		/* Protection fault on exec go straight to failure on
3828d30c14cSBenjamin Herrenschmidt 		 * Hash based MMUs as they either don't support per-page
3838d30c14cSBenjamin Herrenschmidt 		 * execute permission, or if they do, it's handled already
3848d30c14cSBenjamin Herrenschmidt 		 * at the hash level. This test would probably have to
3858d30c14cSBenjamin Herrenschmidt 		 * be removed if we change the way this works to make hash
3868d30c14cSBenjamin Herrenschmidt 		 * processors use the same I/D cache coherency mechanism
3878d30c14cSBenjamin Herrenschmidt 		 * as embedded.
3888d30c14cSBenjamin Herrenschmidt 		 */
38914cf11afSPaul Mackerras 		if (error_code & DSISR_PROTFAULT)
39014cf11afSPaul Mackerras 			goto bad_area;
3918d30c14cSBenjamin Herrenschmidt #endif /* CONFIG_PPC_STD_MMU */
3928d30c14cSBenjamin Herrenschmidt 
39308ae6cc1SPaul Mackerras 		/*
39408ae6cc1SPaul Mackerras 		 * Allow execution from readable areas if the MMU does not
39508ae6cc1SPaul Mackerras 		 * provide separate controls over reading and executing.
3968d30c14cSBenjamin Herrenschmidt 		 *
3978d30c14cSBenjamin Herrenschmidt 		 * Note: That code used to not be enabled for 4xx/BookE.
3988d30c14cSBenjamin Herrenschmidt 		 * It is now as I/D cache coherency for these is done at
3998d30c14cSBenjamin Herrenschmidt 		 * set_pte_at() time and I see no reason why the test
4008d30c14cSBenjamin Herrenschmidt 		 * below wouldn't be valid on those processors. This -may-
4018d30c14cSBenjamin Herrenschmidt 		 * break programs compiled with a really old ABI though.
40208ae6cc1SPaul Mackerras 		 */
40308ae6cc1SPaul Mackerras 		if (!(vma->vm_flags & VM_EXEC) &&
40408ae6cc1SPaul Mackerras 		    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
40508ae6cc1SPaul Mackerras 		     !(vma->vm_flags & (VM_READ | VM_WRITE))))
40614cf11afSPaul Mackerras 			goto bad_area;
40714cf11afSPaul Mackerras 	/* a write */
40814cf11afSPaul Mackerras 	} else if (is_write) {
40914cf11afSPaul Mackerras 		if (!(vma->vm_flags & VM_WRITE))
41014cf11afSPaul Mackerras 			goto bad_area;
41114cf11afSPaul Mackerras 	/* a read */
41214cf11afSPaul Mackerras 	} else {
41314cf11afSPaul Mackerras 		/* protection fault */
41414cf11afSPaul Mackerras 		if (error_code & 0x08000000)
41514cf11afSPaul Mackerras 			goto bad_area;
416df67b3daSJason Baron 		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
41714cf11afSPaul Mackerras 			goto bad_area;
41814cf11afSPaul Mackerras 	}
41914cf11afSPaul Mackerras 
42014cf11afSPaul Mackerras 	/*
42114cf11afSPaul Mackerras 	 * If for any reason at all we couldn't handle the fault,
42214cf11afSPaul Mackerras 	 * make sure we exit gracefully rather than endlessly redo
42314cf11afSPaul Mackerras 	 * the fault.
42414cf11afSPaul Mackerras 	 */
4259be72573SBenjamin Herrenschmidt 	fault = handle_mm_fault(mm, vma, address, flags);
4269be72573SBenjamin Herrenschmidt 	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
427*ba12eedeSLi Zhong 		rc = mm_fault_error(regs, address, fault);
4289be72573SBenjamin Herrenschmidt 		if (rc >= MM_FAULT_RETURN)
429*ba12eedeSLi Zhong 			goto bail;
430*ba12eedeSLi Zhong 		else
431*ba12eedeSLi Zhong 			rc = 0;
43214cf11afSPaul Mackerras 	}
4339be72573SBenjamin Herrenschmidt 
4349be72573SBenjamin Herrenschmidt 	/*
4359be72573SBenjamin Herrenschmidt 	 * Major/minor page fault accounting is only done on the
4369be72573SBenjamin Herrenschmidt 	 * initial attempt. If we go through a retry, it is extremely
4379be72573SBenjamin Herrenschmidt 	 * likely that the page will be found in page cache at that point.
4389be72573SBenjamin Herrenschmidt 	 */
4399be72573SBenjamin Herrenschmidt 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
4409be72573SBenjamin Herrenschmidt 		if (fault & VM_FAULT_MAJOR) {
44183c54070SNick Piggin 			current->maj_flt++;
442a8b0ca17SPeter Zijlstra 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
44378f13e95SPeter Zijlstra 				      regs, address);
44440900194SBrian King #ifdef CONFIG_PPC_SMLPAR
44540900194SBrian King 			if (firmware_has_feature(FW_FEATURE_CMO)) {
44640900194SBrian King 				preempt_disable();
447a6326e98SRobert Jennings 				get_lppaca()->page_ins += (1 << PAGE_FACTOR);
44840900194SBrian King 				preempt_enable();
44940900194SBrian King 			}
4509be72573SBenjamin Herrenschmidt #endif /* CONFIG_PPC_SMLPAR */
451ac17dc8eSPeter Zijlstra 		} else {
45283c54070SNick Piggin 			current->min_flt++;
453a8b0ca17SPeter Zijlstra 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
45478f13e95SPeter Zijlstra 				      regs, address);
455ac17dc8eSPeter Zijlstra 		}
4569be72573SBenjamin Herrenschmidt 		if (fault & VM_FAULT_RETRY) {
4579be72573SBenjamin Herrenschmidt 			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
4589be72573SBenjamin Herrenschmidt 			 * of starvation. */
4599be72573SBenjamin Herrenschmidt 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
46045cac65bSShaohua Li 			flags |= FAULT_FLAG_TRIED;
4619be72573SBenjamin Herrenschmidt 			goto retry;
4629be72573SBenjamin Herrenschmidt 		}
4639be72573SBenjamin Herrenschmidt 	}
4649be72573SBenjamin Herrenschmidt 
46514cf11afSPaul Mackerras 	up_read(&mm->mmap_sem);
466*ba12eedeSLi Zhong 	goto bail;
46714cf11afSPaul Mackerras 
46814cf11afSPaul Mackerras bad_area:
46914cf11afSPaul Mackerras 	up_read(&mm->mmap_sem);
47014cf11afSPaul Mackerras 
47114cf11afSPaul Mackerras bad_area_nosemaphore:
47214cf11afSPaul Mackerras 	/* User mode accesses cause a SIGSEGV */
47314cf11afSPaul Mackerras 	if (user_mode(regs)) {
47414cf11afSPaul Mackerras 		_exception(SIGSEGV, regs, code, address);
475*ba12eedeSLi Zhong 		goto bail;
47614cf11afSPaul Mackerras 	}
47714cf11afSPaul Mackerras 
47876462232SChristian Dietrich 	if (is_exec && (error_code & DSISR_PROTFAULT))
47976462232SChristian Dietrich 		printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
48014cf11afSPaul Mackerras 				   " page (%lx) - exploit attempt? (uid: %d)\n",
4819e184e0aSEric W. Biederman 				   address, from_kuid(&init_user_ns, current_uid()));
48214cf11afSPaul Mackerras 
483*ba12eedeSLi Zhong 	rc = SIGSEGV;
484*ba12eedeSLi Zhong 
485*ba12eedeSLi Zhong bail:
486*ba12eedeSLi Zhong 	exception_exit(prev_state);
487*ba12eedeSLi Zhong 	return rc;
48814cf11afSPaul Mackerras 
48914cf11afSPaul Mackerras }
49014cf11afSPaul Mackerras 
49114cf11afSPaul Mackerras /*
49214cf11afSPaul Mackerras  * bad_page_fault is called when we have a bad access from the kernel.
49314cf11afSPaul Mackerras  * It is called from the DSI and ISI handlers in head.S and from some
49414cf11afSPaul Mackerras  * of the procedures in traps.c.
49514cf11afSPaul Mackerras  */
49614cf11afSPaul Mackerras void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
49714cf11afSPaul Mackerras {
49814cf11afSPaul Mackerras 	const struct exception_table_entry *entry;
49928b54990SAnton Blanchard 	unsigned long *stackend;
50014cf11afSPaul Mackerras 
50114cf11afSPaul Mackerras 	/* Are we prepared to handle this fault?  */
50214cf11afSPaul Mackerras 	if ((entry = search_exception_tables(regs->nip)) != NULL) {
50314cf11afSPaul Mackerras 		regs->nip = entry->fixup;
50414cf11afSPaul Mackerras 		return;
50514cf11afSPaul Mackerras 	}
50614cf11afSPaul Mackerras 
50714cf11afSPaul Mackerras 	/* kernel has accessed a bad area */
508723925b7SOlof Johansson 
509723925b7SOlof Johansson 	switch (regs->trap) {
510723925b7SOlof Johansson 	case 0x300:
511723925b7SOlof Johansson 	case 0x380:
512a416dd8dSMichael Ellerman 		printk(KERN_ALERT "Unable to handle kernel paging request for "
513a416dd8dSMichael Ellerman 			"data at address 0x%08lx\n", regs->dar);
514723925b7SOlof Johansson 		break;
515723925b7SOlof Johansson 	case 0x400:
516723925b7SOlof Johansson 	case 0x480:
517a416dd8dSMichael Ellerman 		printk(KERN_ALERT "Unable to handle kernel paging request for "
518a416dd8dSMichael Ellerman 			"instruction fetch\n");
519723925b7SOlof Johansson 		break;
520723925b7SOlof Johansson 	default:
521a416dd8dSMichael Ellerman 		printk(KERN_ALERT "Unable to handle kernel paging request for "
522a416dd8dSMichael Ellerman 			"unknown fault\n");
523a416dd8dSMichael Ellerman 		break;
524723925b7SOlof Johansson 	}
525723925b7SOlof Johansson 	printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
526723925b7SOlof Johansson 		regs->nip);
527723925b7SOlof Johansson 
52828b54990SAnton Blanchard 	stackend = end_of_stack(current);
52928b54990SAnton Blanchard 	if (current != &init_task && *stackend != STACK_END_MAGIC)
53028b54990SAnton Blanchard 		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
53128b54990SAnton Blanchard 
53214cf11afSPaul Mackerras 	die("Kernel access of bad area", regs, sig);
53314cf11afSPaul Mackerras }
534