xref: /openbmc/linux/arch/sparc/mm/fault_32.c (revision 901181b7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fault.c:  Page fault handlers for the Sparc.
4  *
5  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6  * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7  * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  */
9 
10 #include <asm/head.h>
11 
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/sched.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/threads.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/perf_event.h>
23 #include <linux/interrupt.h>
24 #include <linux/kdebug.h>
25 #include <linux/uaccess.h>
26 #include <linux/extable.h>
27 
28 #include <asm/page.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
31 #include <asm/setup.h>
32 #include <asm/smp.h>
33 #include <asm/traps.h>
34 
35 #include "mm_32.h"
36 
37 int show_unhandled_signals = 1;
38 
39 static void __noreturn unhandled_fault(unsigned long address,
40 				       struct task_struct *tsk,
41 				       struct pt_regs *regs)
42 {
43 	if ((unsigned long) address < PAGE_SIZE) {
44 		printk(KERN_ALERT
45 		    "Unable to handle kernel NULL pointer dereference\n");
46 	} else {
47 		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
48 		       address);
49 	}
50 	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
51 		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
52 	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
53 		(tsk->mm ? (unsigned long) tsk->mm->pgd :
54 			(unsigned long) tsk->active_mm->pgd));
55 	die_if_kernel("Oops", regs);
56 }
57 
58 static inline void
59 show_signal_msg(struct pt_regs *regs, int sig, int code,
60 		unsigned long address, struct task_struct *tsk)
61 {
62 	if (!unhandled_signal(tsk, sig))
63 		return;
64 
65 	if (!printk_ratelimit())
66 		return;
67 
68 	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
69 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
70 	       tsk->comm, task_pid_nr(tsk), address,
71 	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
72 	       (void *)regs->u_regs[UREG_FP], code);
73 
74 	print_vma_addr(KERN_CONT " in ", regs->pc);
75 
76 	printk(KERN_CONT "\n");
77 }
78 
79 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
80 			       unsigned long addr)
81 {
82 	if (unlikely(show_unhandled_signals))
83 		show_signal_msg(regs, sig, code,
84 				addr, current);
85 
86 	force_sig_fault(sig, code, (void __user *) addr);
87 }
88 
89 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
90 {
91 	unsigned int insn;
92 
93 	if (text_fault)
94 		return regs->pc;
95 
96 	if (regs->psr & PSR_PS)
97 		insn = *(unsigned int *) regs->pc;
98 	else
99 		__get_user(insn, (unsigned int *) regs->pc);
100 
101 	return safe_compute_effective_address(regs, insn);
102 }
103 
104 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
105 				      int text_fault)
106 {
107 	unsigned long addr = compute_si_addr(regs, text_fault);
108 
109 	__do_fault_siginfo(code, sig, regs, addr);
110 }
111 
112 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
113 			       unsigned long address)
114 {
115 	struct vm_area_struct *vma;
116 	struct task_struct *tsk = current;
117 	struct mm_struct *mm = tsk->mm;
118 	int from_user = !(regs->psr & PSR_PS);
119 	int code;
120 	vm_fault_t fault;
121 	unsigned int flags = FAULT_FLAG_DEFAULT;
122 
123 	if (text_fault)
124 		address = regs->pc;
125 
126 	/*
127 	 * We fault-in kernel-space virtual memory on-demand. The
128 	 * 'reference' page table is init_mm.pgd.
129 	 *
130 	 * NOTE! We MUST NOT take any locks for this case. We may
131 	 * be in an interrupt or a critical region, and should
132 	 * only copy the information from the master page table,
133 	 * nothing more.
134 	 */
135 	code = SEGV_MAPERR;
136 	if (address >= TASK_SIZE)
137 		goto vmalloc_fault;
138 
139 	/*
140 	 * If we're in an interrupt or have no user
141 	 * context, we must not take the fault..
142 	 */
143 	if (pagefault_disabled() || !mm)
144 		goto no_context;
145 
146 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
147 
148 retry:
149 	mmap_read_lock(mm);
150 
151 	if (!from_user && address >= PAGE_OFFSET)
152 		goto bad_area;
153 
154 	vma = find_vma(mm, address);
155 	if (!vma)
156 		goto bad_area;
157 	if (vma->vm_start <= address)
158 		goto good_area;
159 	if (!(vma->vm_flags & VM_GROWSDOWN))
160 		goto bad_area;
161 	if (expand_stack(vma, address))
162 		goto bad_area;
163 	/*
164 	 * Ok, we have a good vm_area for this memory access, so
165 	 * we can handle it..
166 	 */
167 good_area:
168 	code = SEGV_ACCERR;
169 	if (write) {
170 		if (!(vma->vm_flags & VM_WRITE))
171 			goto bad_area;
172 	} else {
173 		/* Allow reads even for write-only mappings */
174 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
175 			goto bad_area;
176 	}
177 
178 	if (from_user)
179 		flags |= FAULT_FLAG_USER;
180 	if (write)
181 		flags |= FAULT_FLAG_WRITE;
182 
183 	/*
184 	 * If for any reason at all we couldn't handle the fault,
185 	 * make sure we exit gracefully rather than endlessly redo
186 	 * the fault.
187 	 */
188 	fault = handle_mm_fault(vma, address, flags, regs);
189 
190 	if (fault_signal_pending(fault, regs))
191 		return;
192 
193 	if (unlikely(fault & VM_FAULT_ERROR)) {
194 		if (fault & VM_FAULT_OOM)
195 			goto out_of_memory;
196 		else if (fault & VM_FAULT_SIGSEGV)
197 			goto bad_area;
198 		else if (fault & VM_FAULT_SIGBUS)
199 			goto do_sigbus;
200 		BUG();
201 	}
202 
203 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
204 		if (fault & VM_FAULT_RETRY) {
205 			flags |= FAULT_FLAG_TRIED;
206 
207 			/* No need to mmap_read_unlock(mm) as we would
208 			 * have already released it in __lock_page_or_retry
209 			 * in mm/filemap.c.
210 			 */
211 
212 			goto retry;
213 		}
214 	}
215 
216 	mmap_read_unlock(mm);
217 	return;
218 
219 	/*
220 	 * Something tried to access memory that isn't in our memory map..
221 	 * Fix it, but check if it's kernel or user first..
222 	 */
223 bad_area:
224 	mmap_read_unlock(mm);
225 
226 bad_area_nosemaphore:
227 	/* User mode accesses just cause a SIGSEGV */
228 	if (from_user) {
229 		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
230 		return;
231 	}
232 
233 	/* Is this in ex_table? */
234 no_context:
235 	if (!from_user) {
236 		const struct exception_table_entry *entry;
237 
238 		entry = search_exception_tables(regs->pc);
239 #ifdef DEBUG_EXCEPTIONS
240 		printk("Exception: PC<%08lx> faddr<%08lx>\n",
241 		       regs->pc, address);
242 		printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
243 			regs->pc, entry->fixup);
244 #endif
245 		regs->pc = entry->fixup;
246 		regs->npc = regs->pc + 4;
247 		return;
248 	}
249 
250 	unhandled_fault(address, tsk, regs);
251 
252 /*
253  * We ran out of memory, or some other thing happened to us that made
254  * us unable to handle the page fault gracefully.
255  */
256 out_of_memory:
257 	mmap_read_unlock(mm);
258 	if (from_user) {
259 		pagefault_out_of_memory();
260 		return;
261 	}
262 	goto no_context;
263 
264 do_sigbus:
265 	mmap_read_unlock(mm);
266 	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
267 	if (!from_user)
268 		goto no_context;
269 
270 vmalloc_fault:
271 	{
272 		/*
273 		 * Synchronize this task's top level page-table
274 		 * with the 'reference' page table.
275 		 */
276 		int offset = pgd_index(address);
277 		pgd_t *pgd, *pgd_k;
278 		p4d_t *p4d, *p4d_k;
279 		pud_t *pud, *pud_k;
280 		pmd_t *pmd, *pmd_k;
281 
282 		pgd = tsk->active_mm->pgd + offset;
283 		pgd_k = init_mm.pgd + offset;
284 
285 		if (!pgd_present(*pgd)) {
286 			if (!pgd_present(*pgd_k))
287 				goto bad_area_nosemaphore;
288 			pgd_val(*pgd) = pgd_val(*pgd_k);
289 			return;
290 		}
291 
292 		p4d = p4d_offset(pgd, address);
293 		pud = pud_offset(p4d, address);
294 		pmd = pmd_offset(pud, address);
295 
296 		p4d_k = p4d_offset(pgd_k, address);
297 		pud_k = pud_offset(p4d_k, address);
298 		pmd_k = pmd_offset(pud_k, address);
299 
300 		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
301 			goto bad_area_nosemaphore;
302 
303 		*pmd = *pmd_k;
304 		return;
305 	}
306 }
307 
308 /* This always deals with user addresses. */
309 static void force_user_fault(unsigned long address, int write)
310 {
311 	struct vm_area_struct *vma;
312 	struct task_struct *tsk = current;
313 	struct mm_struct *mm = tsk->mm;
314 	unsigned int flags = FAULT_FLAG_USER;
315 	int code;
316 
317 	code = SEGV_MAPERR;
318 
319 	mmap_read_lock(mm);
320 	vma = find_vma(mm, address);
321 	if (!vma)
322 		goto bad_area;
323 	if (vma->vm_start <= address)
324 		goto good_area;
325 	if (!(vma->vm_flags & VM_GROWSDOWN))
326 		goto bad_area;
327 	if (expand_stack(vma, address))
328 		goto bad_area;
329 good_area:
330 	code = SEGV_ACCERR;
331 	if (write) {
332 		if (!(vma->vm_flags & VM_WRITE))
333 			goto bad_area;
334 		flags |= FAULT_FLAG_WRITE;
335 	} else {
336 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
337 			goto bad_area;
338 	}
339 	switch (handle_mm_fault(vma, address, flags, NULL)) {
340 	case VM_FAULT_SIGBUS:
341 	case VM_FAULT_OOM:
342 		goto do_sigbus;
343 	}
344 	mmap_read_unlock(mm);
345 	return;
346 bad_area:
347 	mmap_read_unlock(mm);
348 	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
349 	return;
350 
351 do_sigbus:
352 	mmap_read_unlock(mm);
353 	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
354 }
355 
356 static void check_stack_aligned(unsigned long sp)
357 {
358 	if (sp & 0x7UL)
359 		force_sig(SIGILL);
360 }
361 
362 void window_overflow_fault(void)
363 {
364 	unsigned long sp;
365 
366 	sp = current_thread_info()->rwbuf_stkptrs[0];
367 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
368 		force_user_fault(sp + 0x38, 1);
369 	force_user_fault(sp, 1);
370 
371 	check_stack_aligned(sp);
372 }
373 
374 void window_underflow_fault(unsigned long sp)
375 {
376 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
377 		force_user_fault(sp + 0x38, 0);
378 	force_user_fault(sp, 0);
379 
380 	check_stack_aligned(sp);
381 }
382 
383 void window_ret_fault(struct pt_regs *regs)
384 {
385 	unsigned long sp;
386 
387 	sp = regs->u_regs[UREG_FP];
388 	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
389 		force_user_fault(sp + 0x38, 0);
390 	force_user_fault(sp, 0);
391 
392 	check_stack_aligned(sp);
393 }
394