xref: /openbmc/linux/arch/xtensa/mm/fault.c (revision 95298d63)
1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3  * arch/xtensa/mm/fault.c
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001 - 2010 Tensilica Inc.
10  *
11  * Chris Zankel <chris@zankel.net>
12  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
23 #include <asm/pgalloc.h>
24 
25 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
26 void bad_page_fault(struct pt_regs*, unsigned long, int);
27 
28 /*
29  * This routine handles page faults.  It determines the address,
30  * and the problem, and then passes it off to one of the appropriate
31  * routines.
32  *
33  * Note: does not handle Miss and MultiHit.
34  */
35 
36 void do_page_fault(struct pt_regs *regs)
37 {
38 	struct vm_area_struct * vma;
39 	struct mm_struct *mm = current->mm;
40 	unsigned int exccause = regs->exccause;
41 	unsigned int address = regs->excvaddr;
42 	int code;
43 
44 	int is_write, is_exec;
45 	vm_fault_t fault;
46 	unsigned int flags = FAULT_FLAG_DEFAULT;
47 
48 	code = SEGV_MAPERR;
49 
50 	/* We fault-in kernel-space virtual memory on-demand. The
51 	 * 'reference' page table is init_mm.pgd.
52 	 */
53 	if (address >= TASK_SIZE && !user_mode(regs))
54 		goto vmalloc_fault;
55 
56 	/* If we're in an interrupt or have no user
57 	 * context, we must not take the fault..
58 	 */
59 	if (faulthandler_disabled() || !mm) {
60 		bad_page_fault(regs, address, SIGSEGV);
61 		return;
62 	}
63 
64 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
65 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
66 		    exccause == EXCCAUSE_ITLB_MISS ||
67 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
68 
69 	pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
70 		 current->comm, current->pid,
71 		 address, exccause, regs->pc,
72 		 is_write ? "w" : "", is_exec ? "x" : "");
73 
74 	if (user_mode(regs))
75 		flags |= FAULT_FLAG_USER;
76 retry:
77 	mmap_read_lock(mm);
78 	vma = find_vma(mm, address);
79 
80 	if (!vma)
81 		goto bad_area;
82 	if (vma->vm_start <= address)
83 		goto good_area;
84 	if (!(vma->vm_flags & VM_GROWSDOWN))
85 		goto bad_area;
86 	if (expand_stack(vma, address))
87 		goto bad_area;
88 
89 	/* Ok, we have a good vm_area for this memory access, so
90 	 * we can handle it..
91 	 */
92 
93 good_area:
94 	code = SEGV_ACCERR;
95 
96 	if (is_write) {
97 		if (!(vma->vm_flags & VM_WRITE))
98 			goto bad_area;
99 		flags |= FAULT_FLAG_WRITE;
100 	} else if (is_exec) {
101 		if (!(vma->vm_flags & VM_EXEC))
102 			goto bad_area;
103 	} else	/* Allow read even from write-only pages. */
104 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
105 			goto bad_area;
106 
107 	/* If for any reason at all we couldn't handle the fault,
108 	 * make sure we exit gracefully rather than endlessly redo
109 	 * the fault.
110 	 */
111 	fault = handle_mm_fault(vma, address, flags);
112 
113 	if (fault_signal_pending(fault, regs))
114 		return;
115 
116 	if (unlikely(fault & VM_FAULT_ERROR)) {
117 		if (fault & VM_FAULT_OOM)
118 			goto out_of_memory;
119 		else if (fault & VM_FAULT_SIGSEGV)
120 			goto bad_area;
121 		else if (fault & VM_FAULT_SIGBUS)
122 			goto do_sigbus;
123 		BUG();
124 	}
125 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
126 		if (fault & VM_FAULT_MAJOR)
127 			current->maj_flt++;
128 		else
129 			current->min_flt++;
130 		if (fault & VM_FAULT_RETRY) {
131 			flags |= FAULT_FLAG_TRIED;
132 
133 			 /* No need to mmap_read_unlock(mm) as we would
134 			 * have already released it in __lock_page_or_retry
135 			 * in mm/filemap.c.
136 			 */
137 
138 			goto retry;
139 		}
140 	}
141 
142 	mmap_read_unlock(mm);
143 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
144 	if (flags & VM_FAULT_MAJOR)
145 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
146 	else
147 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
148 
149 	return;
150 
151 	/* Something tried to access memory that isn't in our memory map..
152 	 * Fix it, but check if it's kernel or user first..
153 	 */
154 bad_area:
155 	mmap_read_unlock(mm);
156 	if (user_mode(regs)) {
157 		current->thread.bad_vaddr = address;
158 		current->thread.error_code = is_write;
159 		force_sig_fault(SIGSEGV, code, (void *) address);
160 		return;
161 	}
162 	bad_page_fault(regs, address, SIGSEGV);
163 	return;
164 
165 
166 	/* We ran out of memory, or some other thing happened to us that made
167 	 * us unable to handle the page fault gracefully.
168 	 */
169 out_of_memory:
170 	mmap_read_unlock(mm);
171 	if (!user_mode(regs))
172 		bad_page_fault(regs, address, SIGKILL);
173 	else
174 		pagefault_out_of_memory();
175 	return;
176 
177 do_sigbus:
178 	mmap_read_unlock(mm);
179 
180 	/* Send a sigbus, regardless of whether we were in kernel
181 	 * or user mode.
182 	 */
183 	current->thread.bad_vaddr = address;
184 	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
185 
186 	/* Kernel mode? Handle exceptions or die */
187 	if (!user_mode(regs))
188 		bad_page_fault(regs, address, SIGBUS);
189 	return;
190 
191 vmalloc_fault:
192 	{
193 		/* Synchronize this task's top level page-table
194 		 * with the 'reference' page table.
195 		 */
196 		struct mm_struct *act_mm = current->active_mm;
197 		int index = pgd_index(address);
198 		pgd_t *pgd, *pgd_k;
199 		p4d_t *p4d, *p4d_k;
200 		pud_t *pud, *pud_k;
201 		pmd_t *pmd, *pmd_k;
202 		pte_t *pte_k;
203 
204 		if (act_mm == NULL)
205 			goto bad_page_fault;
206 
207 		pgd = act_mm->pgd + index;
208 		pgd_k = init_mm.pgd + index;
209 
210 		if (!pgd_present(*pgd_k))
211 			goto bad_page_fault;
212 
213 		pgd_val(*pgd) = pgd_val(*pgd_k);
214 
215 		p4d = p4d_offset(pgd, address);
216 		p4d_k = p4d_offset(pgd_k, address);
217 		if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
218 			goto bad_page_fault;
219 
220 		pud = pud_offset(p4d, address);
221 		pud_k = pud_offset(p4d_k, address);
222 		if (!pud_present(*pud) || !pud_present(*pud_k))
223 			goto bad_page_fault;
224 
225 		pmd = pmd_offset(pud, address);
226 		pmd_k = pmd_offset(pud_k, address);
227 		if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
228 			goto bad_page_fault;
229 
230 		pmd_val(*pmd) = pmd_val(*pmd_k);
231 		pte_k = pte_offset_kernel(pmd_k, address);
232 
233 		if (!pte_present(*pte_k))
234 			goto bad_page_fault;
235 		return;
236 	}
237 bad_page_fault:
238 	bad_page_fault(regs, address, SIGKILL);
239 	return;
240 }
241 
242 
243 void
244 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
245 {
246 	extern void die(const char*, struct pt_regs*, long);
247 	const struct exception_table_entry *entry;
248 
249 	/* Are we prepared to handle this kernel fault?  */
250 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
251 		pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
252 			 current->comm, regs->pc, entry->fixup);
253 		current->thread.bad_uaddr = address;
254 		regs->pc = entry->fixup;
255 		return;
256 	}
257 
258 	/* Oops. The kernel tried to access some bad page. We'll have to
259 	 * terminate things with extreme prejudice.
260 	 */
261 	pr_alert("Unable to handle kernel paging request at virtual "
262 		 "address %08lx\n pc = %08lx, ra = %08lx\n",
263 		 address, regs->pc, regs->areg[0]);
264 	die("Oops", regs, sig);
265 	do_exit(sig);
266 }
267