xref: /openbmc/linux/arch/xtensa/mm/fault.c (revision b02afe1df518369dd322f48b49e50efc49250575)
13f65ce4dSChris Zankel // TODO VM_EXEC flag work-around, cache aliasing
23f65ce4dSChris Zankel /*
33f65ce4dSChris Zankel  * arch/xtensa/mm/fault.c
43f65ce4dSChris Zankel  *
53f65ce4dSChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
63f65ce4dSChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
73f65ce4dSChris Zankel  * for more details.
83f65ce4dSChris Zankel  *
91bbedc3aSMarc Gauthier  * Copyright (C) 2001 - 2010 Tensilica Inc.
103f65ce4dSChris Zankel  *
113f65ce4dSChris Zankel  * Chris Zankel <chris@zankel.net>
123f65ce4dSChris Zankel  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
133f65ce4dSChris Zankel  */
143f65ce4dSChris Zankel 
153f65ce4dSChris Zankel #include <linux/mm.h>
166cc306eaSPaul Gortmaker #include <linux/extable.h>
175a891ed5SAlexey Dobriyan #include <linux/hardirq.h>
18af885de8SMax Filippov #include <linux/perf_event.h>
1970ffdb93SDavid Hildenbrand #include <linux/uaccess.h>
203f65ce4dSChris Zankel #include <asm/mmu_context.h>
213f65ce4dSChris Zankel #include <asm/cacheflush.h>
223f65ce4dSChris Zankel #include <asm/hardirq.h>
23*84e34a99SRandy Dunlap #include <asm/traps.h>
243f65ce4dSChris Zankel 
253f65ce4dSChris Zankel void bad_page_fault(struct pt_regs*, unsigned long, int);
263f65ce4dSChris Zankel 
vmalloc_fault(struct pt_regs * regs,unsigned int address)27270a8306SMax Filippov static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
28270a8306SMax Filippov {
29a8f0c31fSMax Filippov #ifdef CONFIG_MMU
30270a8306SMax Filippov 	/* Synchronize this task's top level page-table
31270a8306SMax Filippov 	 * with the 'reference' page table.
32270a8306SMax Filippov 	 */
33270a8306SMax Filippov 	struct mm_struct *act_mm = current->active_mm;
34270a8306SMax Filippov 	int index = pgd_index(address);
35270a8306SMax Filippov 	pgd_t *pgd, *pgd_k;
36270a8306SMax Filippov 	p4d_t *p4d, *p4d_k;
37270a8306SMax Filippov 	pud_t *pud, *pud_k;
38270a8306SMax Filippov 	pmd_t *pmd, *pmd_k;
39270a8306SMax Filippov 	pte_t *pte_k;
40270a8306SMax Filippov 
41270a8306SMax Filippov 	if (act_mm == NULL)
42270a8306SMax Filippov 		goto bad_page_fault;
43270a8306SMax Filippov 
44270a8306SMax Filippov 	pgd = act_mm->pgd + index;
45270a8306SMax Filippov 	pgd_k = init_mm.pgd + index;
46270a8306SMax Filippov 
47270a8306SMax Filippov 	if (!pgd_present(*pgd_k))
48270a8306SMax Filippov 		goto bad_page_fault;
49270a8306SMax Filippov 
50270a8306SMax Filippov 	pgd_val(*pgd) = pgd_val(*pgd_k);
51270a8306SMax Filippov 
52270a8306SMax Filippov 	p4d = p4d_offset(pgd, address);
53270a8306SMax Filippov 	p4d_k = p4d_offset(pgd_k, address);
54270a8306SMax Filippov 	if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
55270a8306SMax Filippov 		goto bad_page_fault;
56270a8306SMax Filippov 
57270a8306SMax Filippov 	pud = pud_offset(p4d, address);
58270a8306SMax Filippov 	pud_k = pud_offset(p4d_k, address);
59270a8306SMax Filippov 	if (!pud_present(*pud) || !pud_present(*pud_k))
60270a8306SMax Filippov 		goto bad_page_fault;
61270a8306SMax Filippov 
62270a8306SMax Filippov 	pmd = pmd_offset(pud, address);
63270a8306SMax Filippov 	pmd_k = pmd_offset(pud_k, address);
64270a8306SMax Filippov 	if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
65270a8306SMax Filippov 		goto bad_page_fault;
66270a8306SMax Filippov 
67270a8306SMax Filippov 	pmd_val(*pmd) = pmd_val(*pmd_k);
68270a8306SMax Filippov 	pte_k = pte_offset_kernel(pmd_k, address);
69270a8306SMax Filippov 
70270a8306SMax Filippov 	if (!pte_present(*pte_k))
71270a8306SMax Filippov 		goto bad_page_fault;
72270a8306SMax Filippov 	return;
73270a8306SMax Filippov 
74270a8306SMax Filippov bad_page_fault:
75270a8306SMax Filippov 	bad_page_fault(regs, address, SIGKILL);
76a8f0c31fSMax Filippov #else
77a8f0c31fSMax Filippov 	WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
78a8f0c31fSMax Filippov #endif
79270a8306SMax Filippov }
803f65ce4dSChris Zankel /*
813f65ce4dSChris Zankel  * This routine handles page faults.  It determines the address,
823f65ce4dSChris Zankel  * and the problem, and then passes it off to one of the appropriate
833f65ce4dSChris Zankel  * routines.
843f65ce4dSChris Zankel  *
853f65ce4dSChris Zankel  * Note: does not handle Miss and MultiHit.
863f65ce4dSChris Zankel  */
873f65ce4dSChris Zankel 
do_page_fault(struct pt_regs * regs)883f65ce4dSChris Zankel void do_page_fault(struct pt_regs *regs)
893f65ce4dSChris Zankel {
903f65ce4dSChris Zankel 	struct vm_area_struct * vma;
913f65ce4dSChris Zankel 	struct mm_struct *mm = current->mm;
923f65ce4dSChris Zankel 	unsigned int exccause = regs->exccause;
933f65ce4dSChris Zankel 	unsigned int address = regs->excvaddr;
9491810105SEric W. Biederman 	int code;
953f65ce4dSChris Zankel 
963f65ce4dSChris Zankel 	int is_write, is_exec;
9750a7ca3cSSouptick Joarder 	vm_fault_t fault;
98dde16072SPeter Xu 	unsigned int flags = FAULT_FLAG_DEFAULT;
993f65ce4dSChris Zankel 
10091810105SEric W. Biederman 	code = SEGV_MAPERR;
1013f65ce4dSChris Zankel 
1023f65ce4dSChris Zankel 	/* We fault-in kernel-space virtual memory on-demand. The
1033f65ce4dSChris Zankel 	 * 'reference' page table is init_mm.pgd.
1043f65ce4dSChris Zankel 	 */
105270a8306SMax Filippov 	if (address >= TASK_SIZE && !user_mode(regs)) {
106270a8306SMax Filippov 		vmalloc_fault(regs, address);
107270a8306SMax Filippov 		return;
108270a8306SMax Filippov 	}
1093f65ce4dSChris Zankel 
1103f65ce4dSChris Zankel 	/* If we're in an interrupt or have no user
1113f65ce4dSChris Zankel 	 * context, we must not take the fault..
1123f65ce4dSChris Zankel 	 */
11370ffdb93SDavid Hildenbrand 	if (faulthandler_disabled() || !mm) {
1143f65ce4dSChris Zankel 		bad_page_fault(regs, address, SIGSEGV);
1153f65ce4dSChris Zankel 		return;
1163f65ce4dSChris Zankel 	}
1173f65ce4dSChris Zankel 
118173d6681SChris Zankel 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
119173d6681SChris Zankel 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
120173d6681SChris Zankel 		    exccause == EXCCAUSE_ITLB_MISS ||
121173d6681SChris Zankel 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
1223f65ce4dSChris Zankel 
123c130d3beSMax Filippov 	pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
124c130d3beSMax Filippov 		 current->comm, current->pid,
125c130d3beSMax Filippov 		 address, exccause, regs->pc,
126c130d3beSMax Filippov 		 is_write ? "w" : "", is_exec ? "x" : "");
1273f65ce4dSChris Zankel 
128759496baSJohannes Weiner 	if (user_mode(regs))
129759496baSJohannes Weiner 		flags |= FAULT_FLAG_USER;
130484e51e4SPeter Xu 
131484e51e4SPeter Xu 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
132484e51e4SPeter Xu 
133f107701fSKautuk Consul retry:
134a050ba1eSLinus Torvalds 	vma = lock_mm_and_find_vma(mm, address, regs);
1353f65ce4dSChris Zankel 	if (!vma)
136a050ba1eSLinus Torvalds 		goto bad_area_nosemaphore;
1373f65ce4dSChris Zankel 
1383f65ce4dSChris Zankel 	/* Ok, we have a good vm_area for this memory access, so
1393f65ce4dSChris Zankel 	 * we can handle it..
1403f65ce4dSChris Zankel 	 */
1413f65ce4dSChris Zankel 
14291810105SEric W. Biederman 	code = SEGV_ACCERR;
1433f65ce4dSChris Zankel 
1443f65ce4dSChris Zankel 	if (is_write) {
1453f65ce4dSChris Zankel 		if (!(vma->vm_flags & VM_WRITE))
1463f65ce4dSChris Zankel 			goto bad_area;
147f107701fSKautuk Consul 		flags |= FAULT_FLAG_WRITE;
1483f65ce4dSChris Zankel 	} else if (is_exec) {
1493f65ce4dSChris Zankel 		if (!(vma->vm_flags & VM_EXEC))
1503f65ce4dSChris Zankel 			goto bad_area;
1513f65ce4dSChris Zankel 	} else	/* Allow read even from write-only pages. */
1523f65ce4dSChris Zankel 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
1533f65ce4dSChris Zankel 			goto bad_area;
1543f65ce4dSChris Zankel 
1553f65ce4dSChris Zankel 	/* If for any reason at all we couldn't handle the fault,
1563f65ce4dSChris Zankel 	 * make sure we exit gracefully rather than endlessly redo
1573f65ce4dSChris Zankel 	 * the fault.
1583f65ce4dSChris Zankel 	 */
159484e51e4SPeter Xu 	fault = handle_mm_fault(vma, address, flags, regs);
160f107701fSKautuk Consul 
1617b9acbb6SMax Filippov 	if (fault_signal_pending(fault, regs)) {
1627b9acbb6SMax Filippov 		if (!user_mode(regs))
163270a8306SMax Filippov 			bad_page_fault(regs, address, SIGKILL);
164f107701fSKautuk Consul 		return;
1657b9acbb6SMax Filippov 	}
166f107701fSKautuk Consul 
167d9272525SPeter Xu 	/* The fault is fully completed (including releasing mmap lock) */
168d9272525SPeter Xu 	if (fault & VM_FAULT_COMPLETED)
169d9272525SPeter Xu 		return;
170d9272525SPeter Xu 
17183c54070SNick Piggin 	if (unlikely(fault & VM_FAULT_ERROR)) {
17283c54070SNick Piggin 		if (fault & VM_FAULT_OOM)
1733f65ce4dSChris Zankel 			goto out_of_memory;
17433692f27SLinus Torvalds 		else if (fault & VM_FAULT_SIGSEGV)
17533692f27SLinus Torvalds 			goto bad_area;
17683c54070SNick Piggin 		else if (fault & VM_FAULT_SIGBUS)
17783c54070SNick Piggin 			goto do_sigbus;
1783f65ce4dSChris Zankel 		BUG();
1793f65ce4dSChris Zankel 	}
18036ef159fSQi Zheng 
181f107701fSKautuk Consul 	if (fault & VM_FAULT_RETRY) {
18245cac65bSShaohua Li 		flags |= FAULT_FLAG_TRIED;
183f107701fSKautuk Consul 
1843e4e28c5SMichel Lespinasse 		/* No need to mmap_read_unlock(mm) as we would
185f107701fSKautuk Consul 		 * have already released it in __lock_page_or_retry
186f107701fSKautuk Consul 		 * in mm/filemap.c.
187f107701fSKautuk Consul 		 */
188f107701fSKautuk Consul 
189f107701fSKautuk Consul 		goto retry;
190f107701fSKautuk Consul 	}
1913f65ce4dSChris Zankel 
192d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1933f65ce4dSChris Zankel 	return;
1943f65ce4dSChris Zankel 
1953f65ce4dSChris Zankel 	/* Something tried to access memory that isn't in our memory map..
1963f65ce4dSChris Zankel 	 * Fix it, but check if it's kernel or user first..
1973f65ce4dSChris Zankel 	 */
1983f65ce4dSChris Zankel bad_area:
199d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
200a050ba1eSLinus Torvalds bad_area_nosemaphore:
2013f65ce4dSChris Zankel 	if (user_mode(regs)) {
2022e1661d2SEric W. Biederman 		force_sig_fault(SIGSEGV, code, (void *) address);
2033f65ce4dSChris Zankel 		return;
2043f65ce4dSChris Zankel 	}
2053f65ce4dSChris Zankel 	bad_page_fault(regs, address, SIGSEGV);
2063f65ce4dSChris Zankel 	return;
2073f65ce4dSChris Zankel 
2083f65ce4dSChris Zankel 
2093f65ce4dSChris Zankel 	/* We ran out of memory, or some other thing happened to us that made
2103f65ce4dSChris Zankel 	 * us unable to handle the page fault gracefully.
2113f65ce4dSChris Zankel 	 */
2123f65ce4dSChris Zankel out_of_memory:
213d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
214f76f5d71SNick Piggin 	if (!user_mode(regs))
2153f65ce4dSChris Zankel 		bad_page_fault(regs, address, SIGKILL);
216f76f5d71SNick Piggin 	else
217f76f5d71SNick Piggin 		pagefault_out_of_memory();
2183f65ce4dSChris Zankel 	return;
2193f65ce4dSChris Zankel 
2203f65ce4dSChris Zankel do_sigbus:
221d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
2223f65ce4dSChris Zankel 
2233f65ce4dSChris Zankel 	/* Send a sigbus, regardless of whether we were in kernel
2243f65ce4dSChris Zankel 	 * or user mode.
2253f65ce4dSChris Zankel 	 */
2262e1661d2SEric W. Biederman 	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
2273f65ce4dSChris Zankel 
2283f65ce4dSChris Zankel 	/* Kernel mode? Handle exceptions or die */
2293f65ce4dSChris Zankel 	if (!user_mode(regs))
2303f65ce4dSChris Zankel 		bad_page_fault(regs, address, SIGBUS);
2311bbedc3aSMarc Gauthier 	return;
2323f65ce4dSChris Zankel }
2333f65ce4dSChris Zankel 
2343f65ce4dSChris Zankel 
2353f65ce4dSChris Zankel void
bad_page_fault(struct pt_regs * regs,unsigned long address,int sig)2363f65ce4dSChris Zankel bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
2373f65ce4dSChris Zankel {
2389fd5a04dSEric W. Biederman 	extern void __noreturn die(const char*, struct pt_regs*, long);
2393f65ce4dSChris Zankel 	const struct exception_table_entry *entry;
2403f65ce4dSChris Zankel 
2413f65ce4dSChris Zankel 	/* Are we prepared to handle this kernel fault?  */
2423f65ce4dSChris Zankel 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
243c130d3beSMax Filippov 		pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
2443f65ce4dSChris Zankel 			 current->comm, regs->pc, entry->fixup);
2453f65ce4dSChris Zankel 		regs->pc = entry->fixup;
2463f65ce4dSChris Zankel 		return;
2473f65ce4dSChris Zankel 	}
2483f65ce4dSChris Zankel 
2493f65ce4dSChris Zankel 	/* Oops. The kernel tried to access some bad page. We'll have to
2503f65ce4dSChris Zankel 	 * terminate things with extreme prejudice.
2513f65ce4dSChris Zankel 	 */
252c130d3beSMax Filippov 	pr_alert("Unable to handle kernel paging request at virtual "
2533f65ce4dSChris Zankel 		 "address %08lx\n pc = %08lx, ra = %08lx\n",
2543f65ce4dSChris Zankel 		 address, regs->pc, regs->areg[0]);
2553f65ce4dSChris Zankel 	die("Oops", regs, sig);
2563f65ce4dSChris Zankel }
257