xref: /openbmc/linux/arch/xtensa/mm/fault.c (revision 95e9fd10)
1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3  * arch/xtensa/mm/fault.c
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001 - 2005 Tensilica Inc.
10  *
11  * Chris Zankel <chris@zankel.net>
12  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/hardirq.h>
18 #include <asm/mmu_context.h>
19 #include <asm/cacheflush.h>
20 #include <asm/hardirq.h>
21 #include <asm/uaccess.h>
22 #include <asm/pgalloc.h>
23 
24 unsigned long asid_cache = ASID_USER_FIRST;
25 void bad_page_fault(struct pt_regs*, unsigned long, int);
26 
27 #undef DEBUG_PAGE_FAULT
28 
29 /*
30  * This routine handles page faults.  It determines the address,
31  * and the problem, and then passes it off to one of the appropriate
32  * routines.
33  *
34  * Note: does not handle Miss and MultiHit.
35  */
36 
37 void do_page_fault(struct pt_regs *regs)
38 {
39 	struct vm_area_struct * vma;
40 	struct mm_struct *mm = current->mm;
41 	unsigned int exccause = regs->exccause;
42 	unsigned int address = regs->excvaddr;
43 	siginfo_t info;
44 
45 	int is_write, is_exec;
46 	int fault;
47 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
48 
49 	info.si_code = SEGV_MAPERR;
50 
51 	/* We fault-in kernel-space virtual memory on-demand. The
52 	 * 'reference' page table is init_mm.pgd.
53 	 */
54 	if (address >= TASK_SIZE && !user_mode(regs))
55 		goto vmalloc_fault;
56 
57 	/* If we're in an interrupt or have no user
58 	 * context, we must not take the fault..
59 	 */
60 	if (in_atomic() || !mm) {
61 		bad_page_fault(regs, address, SIGSEGV);
62 		return;
63 	}
64 
65 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
66 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
67 		    exccause == EXCCAUSE_ITLB_MISS ||
68 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
69 
70 #ifdef DEBUG_PAGE_FAULT
71 	printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
72 	       address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
73 #endif
74 
75 retry:
76 	down_read(&mm->mmap_sem);
77 	vma = find_vma(mm, address);
78 
79 	if (!vma)
80 		goto bad_area;
81 	if (vma->vm_start <= address)
82 		goto good_area;
83 	if (!(vma->vm_flags & VM_GROWSDOWN))
84 		goto bad_area;
85 	if (expand_stack(vma, address))
86 		goto bad_area;
87 
88 	/* Ok, we have a good vm_area for this memory access, so
89 	 * we can handle it..
90 	 */
91 
92 good_area:
93 	info.si_code = SEGV_ACCERR;
94 
95 	if (is_write) {
96 		if (!(vma->vm_flags & VM_WRITE))
97 			goto bad_area;
98 		flags |= FAULT_FLAG_WRITE;
99 	} else if (is_exec) {
100 		if (!(vma->vm_flags & VM_EXEC))
101 			goto bad_area;
102 	} else	/* Allow read even from write-only pages. */
103 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
104 			goto bad_area;
105 
106 	/* If for any reason at all we couldn't handle the fault,
107 	 * make sure we exit gracefully rather than endlessly redo
108 	 * the fault.
109 	 */
110 	fault = handle_mm_fault(mm, vma, address, flags);
111 
112 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
113 		return;
114 
115 	if (unlikely(fault & VM_FAULT_ERROR)) {
116 		if (fault & VM_FAULT_OOM)
117 			goto out_of_memory;
118 		else if (fault & VM_FAULT_SIGBUS)
119 			goto do_sigbus;
120 		BUG();
121 	}
122 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
123 		if (fault & VM_FAULT_MAJOR)
124 			current->maj_flt++;
125 		else
126 			current->min_flt++;
127 		if (fault & VM_FAULT_RETRY) {
128 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
129 
130 			 /* No need to up_read(&mm->mmap_sem) as we would
131 			 * have already released it in __lock_page_or_retry
132 			 * in mm/filemap.c.
133 			 */
134 
135 			goto retry;
136 		}
137 	}
138 
139 	up_read(&mm->mmap_sem);
140 	return;
141 
142 	/* Something tried to access memory that isn't in our memory map..
143 	 * Fix it, but check if it's kernel or user first..
144 	 */
145 bad_area:
146 	up_read(&mm->mmap_sem);
147 	if (user_mode(regs)) {
148 		current->thread.bad_vaddr = address;
149 		current->thread.error_code = is_write;
150 		info.si_signo = SIGSEGV;
151 		info.si_errno = 0;
152 		/* info.si_code has been set above */
153 		info.si_addr = (void *) address;
154 		force_sig_info(SIGSEGV, &info, current);
155 		return;
156 	}
157 	bad_page_fault(regs, address, SIGSEGV);
158 	return;
159 
160 
161 	/* We ran out of memory, or some other thing happened to us that made
162 	 * us unable to handle the page fault gracefully.
163 	 */
164 out_of_memory:
165 	up_read(&mm->mmap_sem);
166 	if (!user_mode(regs))
167 		bad_page_fault(regs, address, SIGKILL);
168 	else
169 		pagefault_out_of_memory();
170 	return;
171 
172 do_sigbus:
173 	up_read(&mm->mmap_sem);
174 
175 	/* Send a sigbus, regardless of whether we were in kernel
176 	 * or user mode.
177 	 */
178 	current->thread.bad_vaddr = address;
179 	info.si_code = SIGBUS;
180 	info.si_errno = 0;
181 	info.si_code = BUS_ADRERR;
182 	info.si_addr = (void *) address;
183 	force_sig_info(SIGBUS, &info, current);
184 
185 	/* Kernel mode? Handle exceptions or die */
186 	if (!user_mode(regs))
187 		bad_page_fault(regs, address, SIGBUS);
188 
189 vmalloc_fault:
190 	{
191 		/* Synchronize this task's top level page-table
192 		 * with the 'reference' page table.
193 		 */
194 		struct mm_struct *act_mm = current->active_mm;
195 		int index = pgd_index(address);
196 		pgd_t *pgd, *pgd_k;
197 		pmd_t *pmd, *pmd_k;
198 		pte_t *pte_k;
199 
200 		if (act_mm == NULL)
201 			goto bad_page_fault;
202 
203 		pgd = act_mm->pgd + index;
204 		pgd_k = init_mm.pgd + index;
205 
206 		if (!pgd_present(*pgd_k))
207 			goto bad_page_fault;
208 
209 		pgd_val(*pgd) = pgd_val(*pgd_k);
210 
211 		pmd = pmd_offset(pgd, address);
212 		pmd_k = pmd_offset(pgd_k, address);
213 		if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
214 			goto bad_page_fault;
215 
216 		pmd_val(*pmd) = pmd_val(*pmd_k);
217 		pte_k = pte_offset_kernel(pmd_k, address);
218 
219 		if (!pte_present(*pte_k))
220 			goto bad_page_fault;
221 		return;
222 	}
223 bad_page_fault:
224 	bad_page_fault(regs, address, SIGKILL);
225 	return;
226 }
227 
228 
229 void
230 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
231 {
232 	extern void die(const char*, struct pt_regs*, long);
233 	const struct exception_table_entry *entry;
234 
235 	/* Are we prepared to handle this kernel fault?  */
236 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
237 #ifdef DEBUG_PAGE_FAULT
238 		printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
239 				current->comm, regs->pc, entry->fixup);
240 #endif
241 		current->thread.bad_uaddr = address;
242 		regs->pc = entry->fixup;
243 		return;
244 	}
245 
246 	/* Oops. The kernel tried to access some bad page. We'll have to
247 	 * terminate things with extreme prejudice.
248 	 */
249 	printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
250 	       "address %08lx\n pc = %08lx, ra = %08lx\n",
251 	       address, regs->pc, regs->areg[0]);
252 	die("Oops", regs, sig);
253 	do_exit(sig);
254 }
255 
256