xref: /openbmc/linux/arch/xtensa/mm/fault.c (revision e0bf6c5c)
1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3  * arch/xtensa/mm/fault.c
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001 - 2010 Tensilica Inc.
10  *
11  * Chris Zankel <chris@zankel.net>
12  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/hardirq.h>
18 #include <asm/mmu_context.h>
19 #include <asm/cacheflush.h>
20 #include <asm/hardirq.h>
21 #include <asm/uaccess.h>
22 #include <asm/pgalloc.h>
23 
24 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
25 void bad_page_fault(struct pt_regs*, unsigned long, int);
26 
27 #undef DEBUG_PAGE_FAULT
28 
29 /*
30  * This routine handles page faults.  It determines the address,
31  * and the problem, and then passes it off to one of the appropriate
32  * routines.
33  *
34  * Note: does not handle Miss and MultiHit.
35  */
36 
37 void do_page_fault(struct pt_regs *regs)
38 {
39 	struct vm_area_struct * vma;
40 	struct mm_struct *mm = current->mm;
41 	unsigned int exccause = regs->exccause;
42 	unsigned int address = regs->excvaddr;
43 	siginfo_t info;
44 
45 	int is_write, is_exec;
46 	int fault;
47 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
48 
49 	info.si_code = SEGV_MAPERR;
50 
51 	/* We fault-in kernel-space virtual memory on-demand. The
52 	 * 'reference' page table is init_mm.pgd.
53 	 */
54 	if (address >= TASK_SIZE && !user_mode(regs))
55 		goto vmalloc_fault;
56 
57 	/* If we're in an interrupt or have no user
58 	 * context, we must not take the fault..
59 	 */
60 	if (in_atomic() || !mm) {
61 		bad_page_fault(regs, address, SIGSEGV);
62 		return;
63 	}
64 
65 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
66 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
67 		    exccause == EXCCAUSE_ITLB_MISS ||
68 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
69 
70 #ifdef DEBUG_PAGE_FAULT
71 	printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
72 	       address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
73 #endif
74 
75 	if (user_mode(regs))
76 		flags |= FAULT_FLAG_USER;
77 retry:
78 	down_read(&mm->mmap_sem);
79 	vma = find_vma(mm, address);
80 
81 	if (!vma)
82 		goto bad_area;
83 	if (vma->vm_start <= address)
84 		goto good_area;
85 	if (!(vma->vm_flags & VM_GROWSDOWN))
86 		goto bad_area;
87 	if (expand_stack(vma, address))
88 		goto bad_area;
89 
90 	/* Ok, we have a good vm_area for this memory access, so
91 	 * we can handle it..
92 	 */
93 
94 good_area:
95 	info.si_code = SEGV_ACCERR;
96 
97 	if (is_write) {
98 		if (!(vma->vm_flags & VM_WRITE))
99 			goto bad_area;
100 		flags |= FAULT_FLAG_WRITE;
101 	} else if (is_exec) {
102 		if (!(vma->vm_flags & VM_EXEC))
103 			goto bad_area;
104 	} else	/* Allow read even from write-only pages. */
105 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
106 			goto bad_area;
107 
108 	/* If for any reason at all we couldn't handle the fault,
109 	 * make sure we exit gracefully rather than endlessly redo
110 	 * the fault.
111 	 */
112 	fault = handle_mm_fault(mm, vma, address, flags);
113 
114 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
115 		return;
116 
117 	if (unlikely(fault & VM_FAULT_ERROR)) {
118 		if (fault & VM_FAULT_OOM)
119 			goto out_of_memory;
120 		else if (fault & VM_FAULT_SIGSEGV)
121 			goto bad_area;
122 		else if (fault & VM_FAULT_SIGBUS)
123 			goto do_sigbus;
124 		BUG();
125 	}
126 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
127 		if (fault & VM_FAULT_MAJOR)
128 			current->maj_flt++;
129 		else
130 			current->min_flt++;
131 		if (fault & VM_FAULT_RETRY) {
132 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
133 			flags |= FAULT_FLAG_TRIED;
134 
135 			 /* No need to up_read(&mm->mmap_sem) as we would
136 			 * have already released it in __lock_page_or_retry
137 			 * in mm/filemap.c.
138 			 */
139 
140 			goto retry;
141 		}
142 	}
143 
144 	up_read(&mm->mmap_sem);
145 	return;
146 
147 	/* Something tried to access memory that isn't in our memory map..
148 	 * Fix it, but check if it's kernel or user first..
149 	 */
150 bad_area:
151 	up_read(&mm->mmap_sem);
152 	if (user_mode(regs)) {
153 		current->thread.bad_vaddr = address;
154 		current->thread.error_code = is_write;
155 		info.si_signo = SIGSEGV;
156 		info.si_errno = 0;
157 		/* info.si_code has been set above */
158 		info.si_addr = (void *) address;
159 		force_sig_info(SIGSEGV, &info, current);
160 		return;
161 	}
162 	bad_page_fault(regs, address, SIGSEGV);
163 	return;
164 
165 
166 	/* We ran out of memory, or some other thing happened to us that made
167 	 * us unable to handle the page fault gracefully.
168 	 */
169 out_of_memory:
170 	up_read(&mm->mmap_sem);
171 	if (!user_mode(regs))
172 		bad_page_fault(regs, address, SIGKILL);
173 	else
174 		pagefault_out_of_memory();
175 	return;
176 
177 do_sigbus:
178 	up_read(&mm->mmap_sem);
179 
180 	/* Send a sigbus, regardless of whether we were in kernel
181 	 * or user mode.
182 	 */
183 	current->thread.bad_vaddr = address;
184 	info.si_code = SIGBUS;
185 	info.si_errno = 0;
186 	info.si_code = BUS_ADRERR;
187 	info.si_addr = (void *) address;
188 	force_sig_info(SIGBUS, &info, current);
189 
190 	/* Kernel mode? Handle exceptions or die */
191 	if (!user_mode(regs))
192 		bad_page_fault(regs, address, SIGBUS);
193 	return;
194 
195 vmalloc_fault:
196 	{
197 		/* Synchronize this task's top level page-table
198 		 * with the 'reference' page table.
199 		 */
200 		struct mm_struct *act_mm = current->active_mm;
201 		int index = pgd_index(address);
202 		pgd_t *pgd, *pgd_k;
203 		pmd_t *pmd, *pmd_k;
204 		pte_t *pte_k;
205 
206 		if (act_mm == NULL)
207 			goto bad_page_fault;
208 
209 		pgd = act_mm->pgd + index;
210 		pgd_k = init_mm.pgd + index;
211 
212 		if (!pgd_present(*pgd_k))
213 			goto bad_page_fault;
214 
215 		pgd_val(*pgd) = pgd_val(*pgd_k);
216 
217 		pmd = pmd_offset(pgd, address);
218 		pmd_k = pmd_offset(pgd_k, address);
219 		if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
220 			goto bad_page_fault;
221 
222 		pmd_val(*pmd) = pmd_val(*pmd_k);
223 		pte_k = pte_offset_kernel(pmd_k, address);
224 
225 		if (!pte_present(*pte_k))
226 			goto bad_page_fault;
227 		return;
228 	}
229 bad_page_fault:
230 	bad_page_fault(regs, address, SIGKILL);
231 	return;
232 }
233 
234 
235 void
236 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
237 {
238 	extern void die(const char*, struct pt_regs*, long);
239 	const struct exception_table_entry *entry;
240 
241 	/* Are we prepared to handle this kernel fault?  */
242 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
243 #ifdef DEBUG_PAGE_FAULT
244 		printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
245 				current->comm, regs->pc, entry->fixup);
246 #endif
247 		current->thread.bad_uaddr = address;
248 		regs->pc = entry->fixup;
249 		return;
250 	}
251 
252 	/* Oops. The kernel tried to access some bad page. We'll have to
253 	 * terminate things with extreme prejudice.
254 	 */
255 	printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
256 	       "address %08lx\n pc = %08lx, ra = %08lx\n",
257 	       address, regs->pc, regs->areg[0]);
258 	die("Oops", regs, sig);
259 	do_exit(sig);
260 }
261