xref: /openbmc/linux/arch/sh/mm/fault.c (revision eaabf98b0932a540f3c772e4243e140ec239302c)
1 /*
2  * Page fault handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2012  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/fault.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/perf_event.h>
20 #include <linux/kdebug.h>
21 #include <linux/uaccess.h>
22 #include <asm/io_trapped.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/traps.h>
26 
27 static void
28 force_sig_info_fault(int si_signo, int si_code, unsigned long address)
29 {
30 	force_sig_fault(si_signo, si_code, (void __user *)address);
31 }
32 
33 /*
34  * This is useful to dump out the page tables associated with
35  * 'addr' in mm 'mm'.
36  */
37 static void show_pte(struct mm_struct *mm, unsigned long addr)
38 {
39 	pgd_t *pgd;
40 
41 	if (mm) {
42 		pgd = mm->pgd;
43 	} else {
44 		pgd = get_TTB();
45 
46 		if (unlikely(!pgd))
47 			pgd = swapper_pg_dir;
48 	}
49 
50 	pr_alert("pgd = %p\n", pgd);
51 	pgd += pgd_index(addr);
52 	pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),
53 		 (u64)pgd_val(*pgd));
54 
55 	do {
56 		pud_t *pud;
57 		pmd_t *pmd;
58 		pte_t *pte;
59 
60 		if (pgd_none(*pgd))
61 			break;
62 
63 		if (pgd_bad(*pgd)) {
64 			pr_cont("(bad)");
65 			break;
66 		}
67 
68 		pud = pud_offset(pgd, addr);
69 		if (PTRS_PER_PUD != 1)
70 			pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
71 				(u64)pud_val(*pud));
72 
73 		if (pud_none(*pud))
74 			break;
75 
76 		if (pud_bad(*pud)) {
77 			pr_cont("(bad)");
78 			break;
79 		}
80 
81 		pmd = pmd_offset(pud, addr);
82 		if (PTRS_PER_PMD != 1)
83 			pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
84 				(u64)pmd_val(*pmd));
85 
86 		if (pmd_none(*pmd))
87 			break;
88 
89 		if (pmd_bad(*pmd)) {
90 			pr_cont("(bad)");
91 			break;
92 		}
93 
94 		/* We must not map this if we have highmem enabled */
95 		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
96 			break;
97 
98 		pte = pte_offset_kernel(pmd, addr);
99 		pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),
100 			(u64)pte_val(*pte));
101 	} while (0);
102 
103 	pr_cont("\n");
104 }
105 
106 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
107 {
108 	unsigned index = pgd_index(address);
109 	pgd_t *pgd_k;
110 	pud_t *pud, *pud_k;
111 	pmd_t *pmd, *pmd_k;
112 
113 	pgd += index;
114 	pgd_k = init_mm.pgd + index;
115 
116 	if (!pgd_present(*pgd_k))
117 		return NULL;
118 
119 	pud = pud_offset(pgd, address);
120 	pud_k = pud_offset(pgd_k, address);
121 	if (!pud_present(*pud_k))
122 		return NULL;
123 
124 	if (!pud_present(*pud))
125 	    set_pud(pud, *pud_k);
126 
127 	pmd = pmd_offset(pud, address);
128 	pmd_k = pmd_offset(pud_k, address);
129 	if (!pmd_present(*pmd_k))
130 		return NULL;
131 
132 	if (!pmd_present(*pmd))
133 		set_pmd(pmd, *pmd_k);
134 	else {
135 		/*
136 		 * The page tables are fully synchronised so there must
137 		 * be another reason for the fault. Return NULL here to
138 		 * signal that we have not taken care of the fault.
139 		 */
140 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
141 		return NULL;
142 	}
143 
144 	return pmd_k;
145 }
146 
147 #ifdef CONFIG_SH_STORE_QUEUES
148 #define __FAULT_ADDR_LIMIT	P3_ADDR_MAX
149 #else
150 #define __FAULT_ADDR_LIMIT	VMALLOC_END
151 #endif
152 
153 /*
154  * Handle a fault on the vmalloc or module mapping area
155  */
156 static noinline int vmalloc_fault(unsigned long address)
157 {
158 	pgd_t *pgd_k;
159 	pmd_t *pmd_k;
160 	pte_t *pte_k;
161 
162 	/* Make sure we are in vmalloc/module/P3 area: */
163 	if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
164 		return -1;
165 
166 	/*
167 	 * Synchronize this task's top level page-table
168 	 * with the 'reference' page table.
169 	 *
170 	 * Do _not_ use "current" here. We might be inside
171 	 * an interrupt in the middle of a task switch..
172 	 */
173 	pgd_k = get_TTB();
174 	pmd_k = vmalloc_sync_one(pgd_k, address);
175 	if (!pmd_k)
176 		return -1;
177 
178 	pte_k = pte_offset_kernel(pmd_k, address);
179 	if (!pte_present(*pte_k))
180 		return -1;
181 
182 	return 0;
183 }
184 
185 static void
186 show_fault_oops(struct pt_regs *regs, unsigned long address)
187 {
188 	if (!oops_may_print())
189 		return;
190 
191 	printk(KERN_ALERT "PC:");
192 	pr_alert("BUG: unable to handle kernel %s at %08lx\n",
193 		 address < PAGE_SIZE ? "NULL pointer dereference"
194 				     : "paging request",
195 		 address);
196 	pr_alert("PC:");
197 	printk_address(regs->pc, 1);
198 
199 	show_pte(NULL, address);
200 }
201 
202 static noinline void
203 no_context(struct pt_regs *regs, unsigned long error_code,
204 	   unsigned long address)
205 {
206 	/* Are we prepared to handle this kernel fault?  */
207 	if (fixup_exception(regs))
208 		return;
209 
210 	if (handle_trapped_io(regs, address))
211 		return;
212 
213 	/*
214 	 * Oops. The kernel tried to access some bad page. We'll have to
215 	 * terminate things with extreme prejudice.
216 	 */
217 	bust_spinlocks(1);
218 
219 	show_fault_oops(regs, address);
220 
221 	die("Oops", regs, error_code);
222 	bust_spinlocks(0);
223 	do_exit(SIGKILL);
224 }
225 
226 static void
227 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
228 		       unsigned long address, int si_code)
229 {
230 	/* User mode accesses just cause a SIGSEGV */
231 	if (user_mode(regs)) {
232 		/*
233 		 * It's possible to have interrupts off here:
234 		 */
235 		local_irq_enable();
236 
237 		force_sig_info_fault(SIGSEGV, si_code, address);
238 
239 		return;
240 	}
241 
242 	no_context(regs, error_code, address);
243 }
244 
245 static noinline void
246 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
247 		     unsigned long address)
248 {
249 	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
250 }
251 
252 static void
253 __bad_area(struct pt_regs *regs, unsigned long error_code,
254 	   unsigned long address, int si_code)
255 {
256 	struct mm_struct *mm = current->mm;
257 
258 	/*
259 	 * Something tried to access memory that isn't in our memory map..
260 	 * Fix it, but check if it's kernel or user first..
261 	 */
262 	up_read(&mm->mmap_sem);
263 
264 	__bad_area_nosemaphore(regs, error_code, address, si_code);
265 }
266 
267 static noinline void
268 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
269 {
270 	__bad_area(regs, error_code, address, SEGV_MAPERR);
271 }
272 
273 static noinline void
274 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
275 		      unsigned long address)
276 {
277 	__bad_area(regs, error_code, address, SEGV_ACCERR);
278 }
279 
280 static void
281 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
282 {
283 	struct task_struct *tsk = current;
284 	struct mm_struct *mm = tsk->mm;
285 
286 	up_read(&mm->mmap_sem);
287 
288 	/* Kernel mode? Handle exceptions or die: */
289 	if (!user_mode(regs))
290 		no_context(regs, error_code, address);
291 
292 	force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
293 }
294 
295 static noinline int
296 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
297 	       unsigned long address, vm_fault_t fault)
298 {
299 	/*
300 	 * Pagefault was interrupted by SIGKILL. We have no reason to
301 	 * continue pagefault.
302 	 */
303 	if (fault_signal_pending(fault, regs)) {
304 		if (!user_mode(regs))
305 			no_context(regs, error_code, address);
306 		return 1;
307 	}
308 
309 	/* Release mmap_sem first if necessary */
310 	if (!(fault & VM_FAULT_RETRY))
311 		up_read(&current->mm->mmap_sem);
312 
313 	if (!(fault & VM_FAULT_ERROR))
314 		return 0;
315 
316 	if (fault & VM_FAULT_OOM) {
317 		/* Kernel mode? Handle exceptions or die: */
318 		if (!user_mode(regs)) {
319 			no_context(regs, error_code, address);
320 			return 1;
321 		}
322 
323 		/*
324 		 * We ran out of memory, call the OOM killer, and return the
325 		 * userspace (which will retry the fault, or kill us if we got
326 		 * oom-killed):
327 		 */
328 		pagefault_out_of_memory();
329 	} else {
330 		if (fault & VM_FAULT_SIGBUS)
331 			do_sigbus(regs, error_code, address);
332 		else if (fault & VM_FAULT_SIGSEGV)
333 			bad_area(regs, error_code, address);
334 		else
335 			BUG();
336 	}
337 
338 	return 1;
339 }
340 
341 static inline int access_error(int error_code, struct vm_area_struct *vma)
342 {
343 	if (error_code & FAULT_CODE_WRITE) {
344 		/* write, present and write, not present: */
345 		if (unlikely(!(vma->vm_flags & VM_WRITE)))
346 			return 1;
347 		return 0;
348 	}
349 
350 	/* ITLB miss on NX page */
351 	if (unlikely((error_code & FAULT_CODE_ITLB) &&
352 		     !(vma->vm_flags & VM_EXEC)))
353 		return 1;
354 
355 	/* read, not present: */
356 	if (unlikely(!vma_is_accessible(vma)))
357 		return 1;
358 
359 	return 0;
360 }
361 
362 static int fault_in_kernel_space(unsigned long address)
363 {
364 	return address >= TASK_SIZE;
365 }
366 
367 /*
368  * This routine handles page faults.  It determines the address,
369  * and the problem, and then passes it off to one of the appropriate
370  * routines.
371  */
372 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
373 					unsigned long error_code,
374 					unsigned long address)
375 {
376 	unsigned long vec;
377 	struct task_struct *tsk;
378 	struct mm_struct *mm;
379 	struct vm_area_struct * vma;
380 	vm_fault_t fault;
381 	unsigned int flags = FAULT_FLAG_DEFAULT;
382 
383 	tsk = current;
384 	mm = tsk->mm;
385 	vec = lookup_exception_vector();
386 
387 	/*
388 	 * We fault-in kernel-space virtual memory on-demand. The
389 	 * 'reference' page table is init_mm.pgd.
390 	 *
391 	 * NOTE! We MUST NOT take any locks for this case. We may
392 	 * be in an interrupt or a critical region, and should
393 	 * only copy the information from the master page table,
394 	 * nothing more.
395 	 */
396 	if (unlikely(fault_in_kernel_space(address))) {
397 		if (vmalloc_fault(address) >= 0)
398 			return;
399 		if (kprobe_page_fault(regs, vec))
400 			return;
401 
402 		bad_area_nosemaphore(regs, error_code, address);
403 		return;
404 	}
405 
406 	if (unlikely(kprobe_page_fault(regs, vec)))
407 		return;
408 
409 	/* Only enable interrupts if they were on before the fault */
410 	if ((regs->sr & SR_IMASK) != SR_IMASK)
411 		local_irq_enable();
412 
413 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
414 
415 	/*
416 	 * If we're in an interrupt, have no user context or are running
417 	 * with pagefaults disabled then we must not take the fault:
418 	 */
419 	if (unlikely(faulthandler_disabled() || !mm)) {
420 		bad_area_nosemaphore(regs, error_code, address);
421 		return;
422 	}
423 
424 retry:
425 	down_read(&mm->mmap_sem);
426 
427 	vma = find_vma(mm, address);
428 	if (unlikely(!vma)) {
429 		bad_area(regs, error_code, address);
430 		return;
431 	}
432 	if (likely(vma->vm_start <= address))
433 		goto good_area;
434 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
435 		bad_area(regs, error_code, address);
436 		return;
437 	}
438 	if (unlikely(expand_stack(vma, address))) {
439 		bad_area(regs, error_code, address);
440 		return;
441 	}
442 
443 	/*
444 	 * Ok, we have a good vm_area for this memory access, so
445 	 * we can handle it..
446 	 */
447 good_area:
448 	if (unlikely(access_error(error_code, vma))) {
449 		bad_area_access_error(regs, error_code, address);
450 		return;
451 	}
452 
453 	set_thread_fault_code(error_code);
454 
455 	if (user_mode(regs))
456 		flags |= FAULT_FLAG_USER;
457 	if (error_code & FAULT_CODE_WRITE)
458 		flags |= FAULT_FLAG_WRITE;
459 
460 	/*
461 	 * If for any reason at all we couldn't handle the fault,
462 	 * make sure we exit gracefully rather than endlessly redo
463 	 * the fault.
464 	 */
465 	fault = handle_mm_fault(vma, address, flags);
466 
467 	if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
468 		if (mm_fault_error(regs, error_code, address, fault))
469 			return;
470 
471 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
472 		if (fault & VM_FAULT_MAJOR) {
473 			tsk->maj_flt++;
474 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
475 				      regs, address);
476 		} else {
477 			tsk->min_flt++;
478 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
479 				      regs, address);
480 		}
481 		if (fault & VM_FAULT_RETRY) {
482 			flags |= FAULT_FLAG_TRIED;
483 
484 			/*
485 			 * No need to up_read(&mm->mmap_sem) as we would
486 			 * have already released it in __lock_page_or_retry
487 			 * in mm/filemap.c.
488 			 */
489 			goto retry;
490 		}
491 	}
492 
493 	up_read(&mm->mmap_sem);
494 }
495