xref: /openbmc/linux/arch/sparc/mm/fault_32.c (revision 98ddec80)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fault.c:  Page fault handlers for the Sparc.
4  *
5  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6  * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7  * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  */
9 
10 #include <asm/head.h>
11 
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/sched.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/threads.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/perf_event.h>
23 #include <linux/interrupt.h>
24 #include <linux/kdebug.h>
25 #include <linux/uaccess.h>
26 
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 #include <asm/traps.h>
34 
35 #include "mm_32.h"
36 
37 int show_unhandled_signals = 1;
38 
39 static void __noreturn unhandled_fault(unsigned long address,
40 				       struct task_struct *tsk,
41 				       struct pt_regs *regs)
42 {
43 	if ((unsigned long) address < PAGE_SIZE) {
44 		printk(KERN_ALERT
45 		    "Unable to handle kernel NULL pointer dereference\n");
46 	} else {
47 		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
48 		       address);
49 	}
50 	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
51 		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
52 	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
53 		(tsk->mm ? (unsigned long) tsk->mm->pgd :
54 			(unsigned long) tsk->active_mm->pgd));
55 	die_if_kernel("Oops", regs);
56 }
57 
58 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
59 			    unsigned long address)
60 {
61 	struct pt_regs regs;
62 	unsigned long g2;
63 	unsigned int insn;
64 	int i;
65 
66 	i = search_extables_range(ret_pc, &g2);
67 	switch (i) {
68 	case 3:
69 		/* load & store will be handled by fixup */
70 		return 3;
71 
72 	case 1:
73 		/* store will be handled by fixup, load will bump out */
74 		/* for _to_ macros */
75 		insn = *((unsigned int *) pc);
76 		if ((insn >> 21) & 1)
77 			return 1;
78 		break;
79 
80 	case 2:
81 		/* load will be handled by fixup, store will bump out */
82 		/* for _from_ macros */
83 		insn = *((unsigned int *) pc);
84 		if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
85 			return 2;
86 		break;
87 
88 	default:
89 		break;
90 	}
91 
92 	memset(&regs, 0, sizeof(regs));
93 	regs.pc = pc;
94 	regs.npc = pc + 4;
95 	__asm__ __volatile__(
96 		"rd %%psr, %0\n\t"
97 		"nop\n\t"
98 		"nop\n\t"
99 		"nop\n" : "=r" (regs.psr));
100 	unhandled_fault(address, current, &regs);
101 
102 	/* Not reached */
103 	return 0;
104 }
105 
106 static inline void
107 show_signal_msg(struct pt_regs *regs, int sig, int code,
108 		unsigned long address, struct task_struct *tsk)
109 {
110 	if (!unhandled_signal(tsk, sig))
111 		return;
112 
113 	if (!printk_ratelimit())
114 		return;
115 
116 	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
117 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
118 	       tsk->comm, task_pid_nr(tsk), address,
119 	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
120 	       (void *)regs->u_regs[UREG_FP], code);
121 
122 	print_vma_addr(KERN_CONT " in ", regs->pc);
123 
124 	printk(KERN_CONT "\n");
125 }
126 
127 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
128 			       unsigned long addr)
129 {
130 	if (unlikely(show_unhandled_signals))
131 		show_signal_msg(regs, sig, code,
132 				addr, current);
133 
134 	force_sig_fault(sig, code, (void __user *) addr, 0, current);
135 }
136 
137 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
138 {
139 	unsigned int insn;
140 
141 	if (text_fault)
142 		return regs->pc;
143 
144 	if (regs->psr & PSR_PS)
145 		insn = *(unsigned int *) regs->pc;
146 	else
147 		__get_user(insn, (unsigned int *) regs->pc);
148 
149 	return safe_compute_effective_address(regs, insn);
150 }
151 
152 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
153 				      int text_fault)
154 {
155 	unsigned long addr = compute_si_addr(regs, text_fault);
156 
157 	__do_fault_siginfo(code, sig, regs, addr);
158 }
159 
160 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
161 			       unsigned long address)
162 {
163 	struct vm_area_struct *vma;
164 	struct task_struct *tsk = current;
165 	struct mm_struct *mm = tsk->mm;
166 	unsigned int fixup;
167 	unsigned long g2;
168 	int from_user = !(regs->psr & PSR_PS);
169 	int fault, code;
170 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
171 
172 	if (text_fault)
173 		address = regs->pc;
174 
175 	/*
176 	 * We fault-in kernel-space virtual memory on-demand. The
177 	 * 'reference' page table is init_mm.pgd.
178 	 *
179 	 * NOTE! We MUST NOT take any locks for this case. We may
180 	 * be in an interrupt or a critical region, and should
181 	 * only copy the information from the master page table,
182 	 * nothing more.
183 	 */
184 	code = SEGV_MAPERR;
185 	if (address >= TASK_SIZE)
186 		goto vmalloc_fault;
187 
188 	/*
189 	 * If we're in an interrupt or have no user
190 	 * context, we must not take the fault..
191 	 */
192 	if (pagefault_disabled() || !mm)
193 		goto no_context;
194 
195 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
196 
197 retry:
198 	down_read(&mm->mmap_sem);
199 
200 	if (!from_user && address >= PAGE_OFFSET)
201 		goto bad_area;
202 
203 	vma = find_vma(mm, address);
204 	if (!vma)
205 		goto bad_area;
206 	if (vma->vm_start <= address)
207 		goto good_area;
208 	if (!(vma->vm_flags & VM_GROWSDOWN))
209 		goto bad_area;
210 	if (expand_stack(vma, address))
211 		goto bad_area;
212 	/*
213 	 * Ok, we have a good vm_area for this memory access, so
214 	 * we can handle it..
215 	 */
216 good_area:
217 	code = SEGV_ACCERR;
218 	if (write) {
219 		if (!(vma->vm_flags & VM_WRITE))
220 			goto bad_area;
221 	} else {
222 		/* Allow reads even for write-only mappings */
223 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
224 			goto bad_area;
225 	}
226 
227 	if (from_user)
228 		flags |= FAULT_FLAG_USER;
229 	if (write)
230 		flags |= FAULT_FLAG_WRITE;
231 
232 	/*
233 	 * If for any reason at all we couldn't handle the fault,
234 	 * make sure we exit gracefully rather than endlessly redo
235 	 * the fault.
236 	 */
237 	fault = handle_mm_fault(vma, address, flags);
238 
239 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
240 		return;
241 
242 	if (unlikely(fault & VM_FAULT_ERROR)) {
243 		if (fault & VM_FAULT_OOM)
244 			goto out_of_memory;
245 		else if (fault & VM_FAULT_SIGSEGV)
246 			goto bad_area;
247 		else if (fault & VM_FAULT_SIGBUS)
248 			goto do_sigbus;
249 		BUG();
250 	}
251 
252 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
253 		if (fault & VM_FAULT_MAJOR) {
254 			current->maj_flt++;
255 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
256 				      1, regs, address);
257 		} else {
258 			current->min_flt++;
259 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
260 				      1, regs, address);
261 		}
262 		if (fault & VM_FAULT_RETRY) {
263 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
264 			flags |= FAULT_FLAG_TRIED;
265 
266 			/* No need to up_read(&mm->mmap_sem) as we would
267 			 * have already released it in __lock_page_or_retry
268 			 * in mm/filemap.c.
269 			 */
270 
271 			goto retry;
272 		}
273 	}
274 
275 	up_read(&mm->mmap_sem);
276 	return;
277 
278 	/*
279 	 * Something tried to access memory that isn't in our memory map..
280 	 * Fix it, but check if it's kernel or user first..
281 	 */
282 bad_area:
283 	up_read(&mm->mmap_sem);
284 
285 bad_area_nosemaphore:
286 	/* User mode accesses just cause a SIGSEGV */
287 	if (from_user) {
288 		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
289 		return;
290 	}
291 
292 	/* Is this in ex_table? */
293 no_context:
294 	g2 = regs->u_regs[UREG_G2];
295 	if (!from_user) {
296 		fixup = search_extables_range(regs->pc, &g2);
297 		/* Values below 10 are reserved for other things */
298 		if (fixup > 10) {
299 			extern const unsigned int __memset_start[];
300 			extern const unsigned int __memset_end[];
301 			extern const unsigned int __csum_partial_copy_start[];
302 			extern const unsigned int __csum_partial_copy_end[];
303 
304 #ifdef DEBUG_EXCEPTIONS
305 			printk("Exception: PC<%08lx> faddr<%08lx>\n",
306 			       regs->pc, address);
307 			printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
308 				regs->pc, fixup, g2);
309 #endif
310 			if ((regs->pc >= (unsigned long)__memset_start &&
311 			     regs->pc < (unsigned long)__memset_end) ||
312 			    (regs->pc >= (unsigned long)__csum_partial_copy_start &&
313 			     regs->pc < (unsigned long)__csum_partial_copy_end)) {
314 				regs->u_regs[UREG_I4] = address;
315 				regs->u_regs[UREG_I5] = regs->pc;
316 			}
317 			regs->u_regs[UREG_G2] = g2;
318 			regs->pc = fixup;
319 			regs->npc = regs->pc + 4;
320 			return;
321 		}
322 	}
323 
324 	unhandled_fault(address, tsk, regs);
325 	do_exit(SIGKILL);
326 
327 /*
328  * We ran out of memory, or some other thing happened to us that made
329  * us unable to handle the page fault gracefully.
330  */
331 out_of_memory:
332 	up_read(&mm->mmap_sem);
333 	if (from_user) {
334 		pagefault_out_of_memory();
335 		return;
336 	}
337 	goto no_context;
338 
339 do_sigbus:
340 	up_read(&mm->mmap_sem);
341 	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
342 	if (!from_user)
343 		goto no_context;
344 
345 vmalloc_fault:
346 	{
347 		/*
348 		 * Synchronize this task's top level page-table
349 		 * with the 'reference' page table.
350 		 */
351 		int offset = pgd_index(address);
352 		pgd_t *pgd, *pgd_k;
353 		pmd_t *pmd, *pmd_k;
354 
355 		pgd = tsk->active_mm->pgd + offset;
356 		pgd_k = init_mm.pgd + offset;
357 
358 		if (!pgd_present(*pgd)) {
359 			if (!pgd_present(*pgd_k))
360 				goto bad_area_nosemaphore;
361 			pgd_val(*pgd) = pgd_val(*pgd_k);
362 			return;
363 		}
364 
365 		pmd = pmd_offset(pgd, address);
366 		pmd_k = pmd_offset(pgd_k, address);
367 
368 		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
369 			goto bad_area_nosemaphore;
370 
371 		*pmd = *pmd_k;
372 		return;
373 	}
374 }
375 
376 /* This always deals with user addresses. */
377 static void force_user_fault(unsigned long address, int write)
378 {
379 	struct vm_area_struct *vma;
380 	struct task_struct *tsk = current;
381 	struct mm_struct *mm = tsk->mm;
382 	unsigned int flags = FAULT_FLAG_USER;
383 	int code;
384 
385 	code = SEGV_MAPERR;
386 
387 	down_read(&mm->mmap_sem);
388 	vma = find_vma(mm, address);
389 	if (!vma)
390 		goto bad_area;
391 	if (vma->vm_start <= address)
392 		goto good_area;
393 	if (!(vma->vm_flags & VM_GROWSDOWN))
394 		goto bad_area;
395 	if (expand_stack(vma, address))
396 		goto bad_area;
397 good_area:
398 	code = SEGV_ACCERR;
399 	if (write) {
400 		if (!(vma->vm_flags & VM_WRITE))
401 			goto bad_area;
402 		flags |= FAULT_FLAG_WRITE;
403 	} else {
404 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
405 			goto bad_area;
406 	}
407 	switch (handle_mm_fault(vma, address, flags)) {
408 	case VM_FAULT_SIGBUS:
409 	case VM_FAULT_OOM:
410 		goto do_sigbus;
411 	}
412 	up_read(&mm->mmap_sem);
413 	return;
414 bad_area:
415 	up_read(&mm->mmap_sem);
416 	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
417 	return;
418 
419 do_sigbus:
420 	up_read(&mm->mmap_sem);
421 	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
422 }
423 
424 static void check_stack_aligned(unsigned long sp)
425 {
426 	if (sp & 0x7UL)
427 		force_sig(SIGILL, current);
428 }
429 
430 void window_overflow_fault(void)
431 {
432 	unsigned long sp;
433 
434 	sp = current_thread_info()->rwbuf_stkptrs[0];
435 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
436 		force_user_fault(sp + 0x38, 1);
437 	force_user_fault(sp, 1);
438 
439 	check_stack_aligned(sp);
440 }
441 
442 void window_underflow_fault(unsigned long sp)
443 {
444 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
445 		force_user_fault(sp + 0x38, 0);
446 	force_user_fault(sp, 0);
447 
448 	check_stack_aligned(sp);
449 }
450 
451 void window_ret_fault(struct pt_regs *regs)
452 {
453 	unsigned long sp;
454 
455 	sp = regs->u_regs[UREG_FP];
456 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
457 		force_user_fault(sp + 0x38, 0);
458 	force_user_fault(sp, 0);
459 
460 	check_stack_aligned(sp);
461 }
462