xref: /openbmc/linux/arch/sh/mm/fault.c (revision 63dc02bd)
1 /*
2  * Page fault handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2012  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/fault.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/perf_event.h>
19 #include <linux/kdebug.h>
20 #include <asm/io_trapped.h>
21 #include <asm/mmu_context.h>
22 #include <asm/tlbflush.h>
23 #include <asm/traps.h>
24 
25 static inline int notify_page_fault(struct pt_regs *regs, int trap)
26 {
27 	int ret = 0;
28 
29 	if (kprobes_built_in() && !user_mode(regs)) {
30 		preempt_disable();
31 		if (kprobe_running() && kprobe_fault_handler(regs, trap))
32 			ret = 1;
33 		preempt_enable();
34 	}
35 
36 	return ret;
37 }
38 
39 static void
40 force_sig_info_fault(int si_signo, int si_code, unsigned long address,
41 		     struct task_struct *tsk)
42 {
43 	siginfo_t info;
44 
45 	info.si_signo	= si_signo;
46 	info.si_errno	= 0;
47 	info.si_code	= si_code;
48 	info.si_addr	= (void __user *)address;
49 
50 	force_sig_info(si_signo, &info, tsk);
51 }
52 
53 /*
54  * This is useful to dump out the page tables associated with
55  * 'addr' in mm 'mm'.
56  */
57 static void show_pte(struct mm_struct *mm, unsigned long addr)
58 {
59 	pgd_t *pgd;
60 
61 	if (mm)
62 		pgd = mm->pgd;
63 	else
64 		pgd = get_TTB();
65 
66 	printk(KERN_ALERT "pgd = %p\n", pgd);
67 	pgd += pgd_index(addr);
68 	printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
69 	       (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
70 
71 	do {
72 		pud_t *pud;
73 		pmd_t *pmd;
74 		pte_t *pte;
75 
76 		if (pgd_none(*pgd))
77 			break;
78 
79 		if (pgd_bad(*pgd)) {
80 			printk("(bad)");
81 			break;
82 		}
83 
84 		pud = pud_offset(pgd, addr);
85 		if (PTRS_PER_PUD != 1)
86 			printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
87 			       (u64)pud_val(*pud));
88 
89 		if (pud_none(*pud))
90 			break;
91 
92 		if (pud_bad(*pud)) {
93 			printk("(bad)");
94 			break;
95 		}
96 
97 		pmd = pmd_offset(pud, addr);
98 		if (PTRS_PER_PMD != 1)
99 			printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
100 			       (u64)pmd_val(*pmd));
101 
102 		if (pmd_none(*pmd))
103 			break;
104 
105 		if (pmd_bad(*pmd)) {
106 			printk("(bad)");
107 			break;
108 		}
109 
110 		/* We must not map this if we have highmem enabled */
111 		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
112 			break;
113 
114 		pte = pte_offset_kernel(pmd, addr);
115 		printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
116 		       (u64)pte_val(*pte));
117 	} while (0);
118 
119 	printk("\n");
120 }
121 
122 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
123 {
124 	unsigned index = pgd_index(address);
125 	pgd_t *pgd_k;
126 	pud_t *pud, *pud_k;
127 	pmd_t *pmd, *pmd_k;
128 
129 	pgd += index;
130 	pgd_k = init_mm.pgd + index;
131 
132 	if (!pgd_present(*pgd_k))
133 		return NULL;
134 
135 	pud = pud_offset(pgd, address);
136 	pud_k = pud_offset(pgd_k, address);
137 	if (!pud_present(*pud_k))
138 		return NULL;
139 
140 	if (!pud_present(*pud))
141 	    set_pud(pud, *pud_k);
142 
143 	pmd = pmd_offset(pud, address);
144 	pmd_k = pmd_offset(pud_k, address);
145 	if (!pmd_present(*pmd_k))
146 		return NULL;
147 
148 	if (!pmd_present(*pmd))
149 		set_pmd(pmd, *pmd_k);
150 	else {
151 		/*
152 		 * The page tables are fully synchronised so there must
153 		 * be another reason for the fault. Return NULL here to
154 		 * signal that we have not taken care of the fault.
155 		 */
156 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
157 		return NULL;
158 	}
159 
160 	return pmd_k;
161 }
162 
163 #ifdef CONFIG_SH_STORE_QUEUES
164 #define __FAULT_ADDR_LIMIT	P3_ADDR_MAX
165 #else
166 #define __FAULT_ADDR_LIMIT	VMALLOC_END
167 #endif
168 
169 /*
170  * Handle a fault on the vmalloc or module mapping area
171  */
172 static noinline int vmalloc_fault(unsigned long address)
173 {
174 	pgd_t *pgd_k;
175 	pmd_t *pmd_k;
176 	pte_t *pte_k;
177 
178 	/* Make sure we are in vmalloc/module/P3 area: */
179 	if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
180 		return -1;
181 
182 	/*
183 	 * Synchronize this task's top level page-table
184 	 * with the 'reference' page table.
185 	 *
186 	 * Do _not_ use "current" here. We might be inside
187 	 * an interrupt in the middle of a task switch..
188 	 */
189 	pgd_k = get_TTB();
190 	pmd_k = vmalloc_sync_one(pgd_k, address);
191 	if (!pmd_k)
192 		return -1;
193 
194 	pte_k = pte_offset_kernel(pmd_k, address);
195 	if (!pte_present(*pte_k))
196 		return -1;
197 
198 	return 0;
199 }
200 
201 static void
202 show_fault_oops(struct pt_regs *regs, unsigned long address)
203 {
204 	if (!oops_may_print())
205 		return;
206 
207 	printk(KERN_ALERT "BUG: unable to handle kernel ");
208 	if (address < PAGE_SIZE)
209 		printk(KERN_CONT "NULL pointer dereference");
210 	else
211 		printk(KERN_CONT "paging request");
212 
213 	printk(KERN_CONT " at %08lx\n", address);
214 	printk(KERN_ALERT "PC:");
215 	printk_address(regs->pc, 1);
216 
217 	show_pte(NULL, address);
218 }
219 
220 static noinline void
221 no_context(struct pt_regs *regs, unsigned long error_code,
222 	   unsigned long address)
223 {
224 	/* Are we prepared to handle this kernel fault?  */
225 	if (fixup_exception(regs))
226 		return;
227 
228 	if (handle_trapped_io(regs, address))
229 		return;
230 
231 	/*
232 	 * Oops. The kernel tried to access some bad page. We'll have to
233 	 * terminate things with extreme prejudice.
234 	 */
235 	bust_spinlocks(1);
236 
237 	show_fault_oops(regs, address);
238 
239 	die("Oops", regs, error_code);
240 	bust_spinlocks(0);
241 	do_exit(SIGKILL);
242 }
243 
244 static void
245 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
246 		       unsigned long address, int si_code)
247 {
248 	struct task_struct *tsk = current;
249 
250 	/* User mode accesses just cause a SIGSEGV */
251 	if (user_mode(regs)) {
252 		/*
253 		 * It's possible to have interrupts off here:
254 		 */
255 		local_irq_enable();
256 
257 		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
258 
259 		return;
260 	}
261 
262 	no_context(regs, error_code, address);
263 }
264 
265 static noinline void
266 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
267 		     unsigned long address)
268 {
269 	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
270 }
271 
272 static void
273 __bad_area(struct pt_regs *regs, unsigned long error_code,
274 	   unsigned long address, int si_code)
275 {
276 	struct mm_struct *mm = current->mm;
277 
278 	/*
279 	 * Something tried to access memory that isn't in our memory map..
280 	 * Fix it, but check if it's kernel or user first..
281 	 */
282 	up_read(&mm->mmap_sem);
283 
284 	__bad_area_nosemaphore(regs, error_code, address, si_code);
285 }
286 
287 static noinline void
288 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
289 {
290 	__bad_area(regs, error_code, address, SEGV_MAPERR);
291 }
292 
293 static noinline void
294 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
295 		      unsigned long address)
296 {
297 	__bad_area(regs, error_code, address, SEGV_ACCERR);
298 }
299 
300 static void out_of_memory(void)
301 {
302 	/*
303 	 * We ran out of memory, call the OOM killer, and return the userspace
304 	 * (which will retry the fault, or kill us if we got oom-killed):
305 	 */
306 	up_read(&current->mm->mmap_sem);
307 
308 	pagefault_out_of_memory();
309 }
310 
311 static void
312 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
313 {
314 	struct task_struct *tsk = current;
315 	struct mm_struct *mm = tsk->mm;
316 
317 	up_read(&mm->mmap_sem);
318 
319 	/* Kernel mode? Handle exceptions or die: */
320 	if (!user_mode(regs))
321 		no_context(regs, error_code, address);
322 
323 	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
324 }
325 
326 static noinline int
327 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
328 	       unsigned long address, unsigned int fault)
329 {
330 	/*
331 	 * Pagefault was interrupted by SIGKILL. We have no reason to
332 	 * continue pagefault.
333 	 */
334 	if (fatal_signal_pending(current)) {
335 		if (!(fault & VM_FAULT_RETRY))
336 			up_read(&current->mm->mmap_sem);
337 		if (!user_mode(regs))
338 			no_context(regs, error_code, address);
339 		return 1;
340 	}
341 
342 	if (!(fault & VM_FAULT_ERROR))
343 		return 0;
344 
345 	if (fault & VM_FAULT_OOM) {
346 		/* Kernel mode? Handle exceptions or die: */
347 		if (!user_mode(regs)) {
348 			up_read(&current->mm->mmap_sem);
349 			no_context(regs, error_code, address);
350 			return 1;
351 		}
352 
353 		out_of_memory();
354 	} else {
355 		if (fault & VM_FAULT_SIGBUS)
356 			do_sigbus(regs, error_code, address);
357 		else
358 			BUG();
359 	}
360 
361 	return 1;
362 }
363 
364 static inline int access_error(int error_code, struct vm_area_struct *vma)
365 {
366 	if (error_code & FAULT_CODE_WRITE) {
367 		/* write, present and write, not present: */
368 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
369 			return 1;
370 		return 0;
371 	}
372 
373 	/* ITLB miss on NX page */
374 	if (unlikely((error_code & FAULT_CODE_ITLB) &&
375 		     !(vma->vm_flags & VM_EXEC)))
376 		return 1;
377 
378 	/* read, not present: */
379 	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
380 		return 1;
381 
382 	return 0;
383 }
384 
385 static int fault_in_kernel_space(unsigned long address)
386 {
387 	return address >= TASK_SIZE;
388 }
389 
390 /*
391  * This routine handles page faults.  It determines the address,
392  * and the problem, and then passes it off to one of the appropriate
393  * routines.
394  */
395 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
396 					unsigned long error_code,
397 					unsigned long address)
398 {
399 	unsigned long vec;
400 	struct task_struct *tsk;
401 	struct mm_struct *mm;
402 	struct vm_area_struct * vma;
403 	int fault;
404 	int write = error_code & FAULT_CODE_WRITE;
405 	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
406 			      (write ? FAULT_FLAG_WRITE : 0));
407 
408 	tsk = current;
409 	mm = tsk->mm;
410 	vec = lookup_exception_vector();
411 
412 	/*
413 	 * We fault-in kernel-space virtual memory on-demand. The
414 	 * 'reference' page table is init_mm.pgd.
415 	 *
416 	 * NOTE! We MUST NOT take any locks for this case. We may
417 	 * be in an interrupt or a critical region, and should
418 	 * only copy the information from the master page table,
419 	 * nothing more.
420 	 */
421 	if (unlikely(fault_in_kernel_space(address))) {
422 		if (vmalloc_fault(address) >= 0)
423 			return;
424 		if (notify_page_fault(regs, vec))
425 			return;
426 
427 		bad_area_nosemaphore(regs, error_code, address);
428 		return;
429 	}
430 
431 	if (unlikely(notify_page_fault(regs, vec)))
432 		return;
433 
434 	/* Only enable interrupts if they were on before the fault */
435 	if ((regs->sr & SR_IMASK) != SR_IMASK)
436 		local_irq_enable();
437 
438 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
439 
440 	/*
441 	 * If we're in an interrupt, have no user context or are running
442 	 * in an atomic region then we must not take the fault:
443 	 */
444 	if (unlikely(in_atomic() || !mm)) {
445 		bad_area_nosemaphore(regs, error_code, address);
446 		return;
447 	}
448 
449 retry:
450 	down_read(&mm->mmap_sem);
451 
452 	vma = find_vma(mm, address);
453 	if (unlikely(!vma)) {
454 		bad_area(regs, error_code, address);
455 		return;
456 	}
457 	if (likely(vma->vm_start <= address))
458 		goto good_area;
459 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
460 		bad_area(regs, error_code, address);
461 		return;
462 	}
463 	if (unlikely(expand_stack(vma, address))) {
464 		bad_area(regs, error_code, address);
465 		return;
466 	}
467 
468 	/*
469 	 * Ok, we have a good vm_area for this memory access, so
470 	 * we can handle it..
471 	 */
472 good_area:
473 	if (unlikely(access_error(error_code, vma))) {
474 		bad_area_access_error(regs, error_code, address);
475 		return;
476 	}
477 
478 	set_thread_fault_code(error_code);
479 
480 	/*
481 	 * If for any reason at all we couldn't handle the fault,
482 	 * make sure we exit gracefully rather than endlessly redo
483 	 * the fault.
484 	 */
485 	fault = handle_mm_fault(mm, vma, address, flags);
486 
487 	if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
488 		if (mm_fault_error(regs, error_code, address, fault))
489 			return;
490 
491 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
492 		if (fault & VM_FAULT_MAJOR) {
493 			tsk->maj_flt++;
494 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
495 				      regs, address);
496 		} else {
497 			tsk->min_flt++;
498 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
499 				      regs, address);
500 		}
501 		if (fault & VM_FAULT_RETRY) {
502 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
503 
504 			/*
505 			 * No need to up_read(&mm->mmap_sem) as we would
506 			 * have already released it in __lock_page_or_retry
507 			 * in mm/filemap.c.
508 			 */
509 			goto retry;
510 		}
511 	}
512 
513 	up_read(&mm->mmap_sem);
514 }
515