xref: /openbmc/linux/arch/sparc/mm/fault_32.c (revision b58c6630)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fault.c:  Page fault handlers for the Sparc.
4  *
5  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6  * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7  * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  */
9 
10 #include <asm/head.h>
11 
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/sched.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/threads.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/perf_event.h>
23 #include <linux/interrupt.h>
24 #include <linux/kdebug.h>
25 #include <linux/uaccess.h>
26 
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 #include <asm/traps.h>
34 
35 #include "mm_32.h"
36 
37 int show_unhandled_signals = 1;
38 
39 static void __noreturn unhandled_fault(unsigned long address,
40 				       struct task_struct *tsk,
41 				       struct pt_regs *regs)
42 {
43 	if ((unsigned long) address < PAGE_SIZE) {
44 		printk(KERN_ALERT
45 		    "Unable to handle kernel NULL pointer dereference\n");
46 	} else {
47 		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
48 		       address);
49 	}
50 	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
51 		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
52 	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
53 		(tsk->mm ? (unsigned long) tsk->mm->pgd :
54 			(unsigned long) tsk->active_mm->pgd));
55 	die_if_kernel("Oops", regs);
56 }
57 
58 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
59 			    unsigned long address)
60 {
61 	struct pt_regs regs;
62 	unsigned long g2;
63 	unsigned int insn;
64 	int i;
65 
66 	i = search_extables_range(ret_pc, &g2);
67 	switch (i) {
68 	case 3:
69 		/* load & store will be handled by fixup */
70 		return 3;
71 
72 	case 1:
73 		/* store will be handled by fixup, load will bump out */
74 		/* for _to_ macros */
75 		insn = *((unsigned int *) pc);
76 		if ((insn >> 21) & 1)
77 			return 1;
78 		break;
79 
80 	case 2:
81 		/* load will be handled by fixup, store will bump out */
82 		/* for _from_ macros */
83 		insn = *((unsigned int *) pc);
84 		if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
85 			return 2;
86 		break;
87 
88 	default:
89 		break;
90 	}
91 
92 	memset(&regs, 0, sizeof(regs));
93 	regs.pc = pc;
94 	regs.npc = pc + 4;
95 	__asm__ __volatile__(
96 		"rd %%psr, %0\n\t"
97 		"nop\n\t"
98 		"nop\n\t"
99 		"nop\n" : "=r" (regs.psr));
100 	unhandled_fault(address, current, &regs);
101 
102 	/* Not reached */
103 	return 0;
104 }
105 
106 static inline void
107 show_signal_msg(struct pt_regs *regs, int sig, int code,
108 		unsigned long address, struct task_struct *tsk)
109 {
110 	if (!unhandled_signal(tsk, sig))
111 		return;
112 
113 	if (!printk_ratelimit())
114 		return;
115 
116 	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
117 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
118 	       tsk->comm, task_pid_nr(tsk), address,
119 	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
120 	       (void *)regs->u_regs[UREG_FP], code);
121 
122 	print_vma_addr(KERN_CONT " in ", regs->pc);
123 
124 	printk(KERN_CONT "\n");
125 }
126 
127 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
128 			       unsigned long addr)
129 {
130 	if (unlikely(show_unhandled_signals))
131 		show_signal_msg(regs, sig, code,
132 				addr, current);
133 
134 	force_sig_fault(sig, code, (void __user *) addr, 0);
135 }
136 
137 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
138 {
139 	unsigned int insn;
140 
141 	if (text_fault)
142 		return regs->pc;
143 
144 	if (regs->psr & PSR_PS)
145 		insn = *(unsigned int *) regs->pc;
146 	else
147 		__get_user(insn, (unsigned int *) regs->pc);
148 
149 	return safe_compute_effective_address(regs, insn);
150 }
151 
152 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
153 				      int text_fault)
154 {
155 	unsigned long addr = compute_si_addr(regs, text_fault);
156 
157 	__do_fault_siginfo(code, sig, regs, addr);
158 }
159 
160 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
161 			       unsigned long address)
162 {
163 	struct vm_area_struct *vma;
164 	struct task_struct *tsk = current;
165 	struct mm_struct *mm = tsk->mm;
166 	unsigned int fixup;
167 	unsigned long g2;
168 	int from_user = !(regs->psr & PSR_PS);
169 	int code;
170 	vm_fault_t fault;
171 	unsigned int flags = FAULT_FLAG_DEFAULT;
172 
173 	if (text_fault)
174 		address = regs->pc;
175 
176 	/*
177 	 * We fault-in kernel-space virtual memory on-demand. The
178 	 * 'reference' page table is init_mm.pgd.
179 	 *
180 	 * NOTE! We MUST NOT take any locks for this case. We may
181 	 * be in an interrupt or a critical region, and should
182 	 * only copy the information from the master page table,
183 	 * nothing more.
184 	 */
185 	code = SEGV_MAPERR;
186 	if (address >= TASK_SIZE)
187 		goto vmalloc_fault;
188 
189 	/*
190 	 * If we're in an interrupt or have no user
191 	 * context, we must not take the fault..
192 	 */
193 	if (pagefault_disabled() || !mm)
194 		goto no_context;
195 
196 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
197 
198 retry:
199 	down_read(&mm->mmap_sem);
200 
201 	if (!from_user && address >= PAGE_OFFSET)
202 		goto bad_area;
203 
204 	vma = find_vma(mm, address);
205 	if (!vma)
206 		goto bad_area;
207 	if (vma->vm_start <= address)
208 		goto good_area;
209 	if (!(vma->vm_flags & VM_GROWSDOWN))
210 		goto bad_area;
211 	if (expand_stack(vma, address))
212 		goto bad_area;
213 	/*
214 	 * Ok, we have a good vm_area for this memory access, so
215 	 * we can handle it..
216 	 */
217 good_area:
218 	code = SEGV_ACCERR;
219 	if (write) {
220 		if (!(vma->vm_flags & VM_WRITE))
221 			goto bad_area;
222 	} else {
223 		/* Allow reads even for write-only mappings */
224 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
225 			goto bad_area;
226 	}
227 
228 	if (from_user)
229 		flags |= FAULT_FLAG_USER;
230 	if (write)
231 		flags |= FAULT_FLAG_WRITE;
232 
233 	/*
234 	 * If for any reason at all we couldn't handle the fault,
235 	 * make sure we exit gracefully rather than endlessly redo
236 	 * the fault.
237 	 */
238 	fault = handle_mm_fault(vma, address, flags);
239 
240 	if (fault_signal_pending(fault, regs))
241 		return;
242 
243 	if (unlikely(fault & VM_FAULT_ERROR)) {
244 		if (fault & VM_FAULT_OOM)
245 			goto out_of_memory;
246 		else if (fault & VM_FAULT_SIGSEGV)
247 			goto bad_area;
248 		else if (fault & VM_FAULT_SIGBUS)
249 			goto do_sigbus;
250 		BUG();
251 	}
252 
253 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
254 		if (fault & VM_FAULT_MAJOR) {
255 			current->maj_flt++;
256 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
257 				      1, regs, address);
258 		} else {
259 			current->min_flt++;
260 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
261 				      1, regs, address);
262 		}
263 		if (fault & VM_FAULT_RETRY) {
264 			flags |= FAULT_FLAG_TRIED;
265 
266 			/* No need to up_read(&mm->mmap_sem) as we would
267 			 * have already released it in __lock_page_or_retry
268 			 * in mm/filemap.c.
269 			 */
270 
271 			goto retry;
272 		}
273 	}
274 
275 	up_read(&mm->mmap_sem);
276 	return;
277 
278 	/*
279 	 * Something tried to access memory that isn't in our memory map..
280 	 * Fix it, but check if it's kernel or user first..
281 	 */
282 bad_area:
283 	up_read(&mm->mmap_sem);
284 
285 bad_area_nosemaphore:
286 	/* User mode accesses just cause a SIGSEGV */
287 	if (from_user) {
288 		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
289 		return;
290 	}
291 
292 	/* Is this in ex_table? */
293 no_context:
294 	g2 = regs->u_regs[UREG_G2];
295 	if (!from_user) {
296 		fixup = search_extables_range(regs->pc, &g2);
297 		/* Values below 10 are reserved for other things */
298 		if (fixup > 10) {
299 			extern const unsigned int __memset_start[];
300 			extern const unsigned int __memset_end[];
301 			extern const unsigned int __csum_partial_copy_start[];
302 			extern const unsigned int __csum_partial_copy_end[];
303 
304 #ifdef DEBUG_EXCEPTIONS
305 			printk("Exception: PC<%08lx> faddr<%08lx>\n",
306 			       regs->pc, address);
307 			printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
308 				regs->pc, fixup, g2);
309 #endif
310 			if ((regs->pc >= (unsigned long)__memset_start &&
311 			     regs->pc < (unsigned long)__memset_end) ||
312 			    (regs->pc >= (unsigned long)__csum_partial_copy_start &&
313 			     regs->pc < (unsigned long)__csum_partial_copy_end)) {
314 				regs->u_regs[UREG_I4] = address;
315 				regs->u_regs[UREG_I5] = regs->pc;
316 			}
317 			regs->u_regs[UREG_G2] = g2;
318 			regs->pc = fixup;
319 			regs->npc = regs->pc + 4;
320 			return;
321 		}
322 	}
323 
324 	unhandled_fault(address, tsk, regs);
325 	do_exit(SIGKILL);
326 
327 /*
328  * We ran out of memory, or some other thing happened to us that made
329  * us unable to handle the page fault gracefully.
330  */
331 out_of_memory:
332 	up_read(&mm->mmap_sem);
333 	if (from_user) {
334 		pagefault_out_of_memory();
335 		return;
336 	}
337 	goto no_context;
338 
339 do_sigbus:
340 	up_read(&mm->mmap_sem);
341 	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
342 	if (!from_user)
343 		goto no_context;
344 
345 vmalloc_fault:
346 	{
347 		/*
348 		 * Synchronize this task's top level page-table
349 		 * with the 'reference' page table.
350 		 */
351 		int offset = pgd_index(address);
352 		pgd_t *pgd, *pgd_k;
353 		p4d_t *p4d, *p4d_k;
354 		pud_t *pud, *pud_k;
355 		pmd_t *pmd, *pmd_k;
356 
357 		pgd = tsk->active_mm->pgd + offset;
358 		pgd_k = init_mm.pgd + offset;
359 
360 		if (!pgd_present(*pgd)) {
361 			if (!pgd_present(*pgd_k))
362 				goto bad_area_nosemaphore;
363 			pgd_val(*pgd) = pgd_val(*pgd_k);
364 			return;
365 		}
366 
367 		p4d = p4d_offset(pgd, address);
368 		pud = pud_offset(p4d, address);
369 		pmd = pmd_offset(pud, address);
370 
371 		p4d_k = p4d_offset(pgd_k, address);
372 		pud_k = pud_offset(p4d_k, address);
373 		pmd_k = pmd_offset(pud_k, address);
374 
375 		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
376 			goto bad_area_nosemaphore;
377 
378 		*pmd = *pmd_k;
379 		return;
380 	}
381 }
382 
383 /* This always deals with user addresses. */
384 static void force_user_fault(unsigned long address, int write)
385 {
386 	struct vm_area_struct *vma;
387 	struct task_struct *tsk = current;
388 	struct mm_struct *mm = tsk->mm;
389 	unsigned int flags = FAULT_FLAG_USER;
390 	int code;
391 
392 	code = SEGV_MAPERR;
393 
394 	down_read(&mm->mmap_sem);
395 	vma = find_vma(mm, address);
396 	if (!vma)
397 		goto bad_area;
398 	if (vma->vm_start <= address)
399 		goto good_area;
400 	if (!(vma->vm_flags & VM_GROWSDOWN))
401 		goto bad_area;
402 	if (expand_stack(vma, address))
403 		goto bad_area;
404 good_area:
405 	code = SEGV_ACCERR;
406 	if (write) {
407 		if (!(vma->vm_flags & VM_WRITE))
408 			goto bad_area;
409 		flags |= FAULT_FLAG_WRITE;
410 	} else {
411 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
412 			goto bad_area;
413 	}
414 	switch (handle_mm_fault(vma, address, flags)) {
415 	case VM_FAULT_SIGBUS:
416 	case VM_FAULT_OOM:
417 		goto do_sigbus;
418 	}
419 	up_read(&mm->mmap_sem);
420 	return;
421 bad_area:
422 	up_read(&mm->mmap_sem);
423 	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
424 	return;
425 
426 do_sigbus:
427 	up_read(&mm->mmap_sem);
428 	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
429 }
430 
431 static void check_stack_aligned(unsigned long sp)
432 {
433 	if (sp & 0x7UL)
434 		force_sig(SIGILL);
435 }
436 
437 void window_overflow_fault(void)
438 {
439 	unsigned long sp;
440 
441 	sp = current_thread_info()->rwbuf_stkptrs[0];
442 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
443 		force_user_fault(sp + 0x38, 1);
444 	force_user_fault(sp, 1);
445 
446 	check_stack_aligned(sp);
447 }
448 
449 void window_underflow_fault(unsigned long sp)
450 {
451 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
452 		force_user_fault(sp + 0x38, 0);
453 	force_user_fault(sp, 0);
454 
455 	check_stack_aligned(sp);
456 }
457 
458 void window_ret_fault(struct pt_regs *regs)
459 {
460 	unsigned long sp;
461 
462 	sp = regs->u_regs[UREG_FP];
463 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
464 		force_user_fault(sp + 0x38, 0);
465 	force_user_fault(sp, 0);
466 
467 	check_stack_aligned(sp);
468 }
469