xref: /openbmc/linux/arch/sparc/mm/fault_64.c (revision 565d76cb)
1 /*
2  * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
3  *
4  * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
5  * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
6  */
7 
8 #include <asm/head.h>
9 
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
15 #include <linux/signal.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/perf_event.h>
20 #include <linux/interrupt.h>
21 #include <linux/kprobes.h>
22 #include <linux/kdebug.h>
23 #include <linux/percpu.h>
24 
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/openprom.h>
28 #include <asm/oplib.h>
29 #include <asm/uaccess.h>
30 #include <asm/asi.h>
31 #include <asm/lsu.h>
32 #include <asm/sections.h>
33 #include <asm/mmu_context.h>
34 
35 int show_unhandled_signals = 1;
36 
37 static inline __kprobes int notify_page_fault(struct pt_regs *regs)
38 {
39 	int ret = 0;
40 
41 	/* kprobe_running() needs smp_processor_id() */
42 	if (kprobes_built_in() && !user_mode(regs)) {
43 		preempt_disable();
44 		if (kprobe_running() && kprobe_fault_handler(regs, 0))
45 			ret = 1;
46 		preempt_enable();
47 	}
48 	return ret;
49 }
50 
51 static void __kprobes unhandled_fault(unsigned long address,
52 				      struct task_struct *tsk,
53 				      struct pt_regs *regs)
54 {
55 	if ((unsigned long) address < PAGE_SIZE) {
56 		printk(KERN_ALERT "Unable to handle kernel NULL "
57 		       "pointer dereference\n");
58 	} else {
59 		printk(KERN_ALERT "Unable to handle kernel paging request "
60 		       "at virtual address %016lx\n", (unsigned long)address);
61 	}
62 	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
63 	       (tsk->mm ?
64 		CTX_HWBITS(tsk->mm->context) :
65 		CTX_HWBITS(tsk->active_mm->context)));
66 	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
67 	       (tsk->mm ? (unsigned long) tsk->mm->pgd :
68 		          (unsigned long) tsk->active_mm->pgd));
69 	die_if_kernel("Oops", regs);
70 }
71 
72 static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
73 {
74 	printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
75 	       regs->tpc);
76 	printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
77 	printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
78 	printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
79 	dump_stack();
80 	unhandled_fault(regs->tpc, current, regs);
81 }
82 
83 /*
84  * We now make sure that mmap_sem is held in all paths that call
85  * this. Additionally, to prevent kswapd from ripping ptes from
86  * under us, raise interrupts around the time that we look at the
87  * pte, kswapd will have to wait to get his smp ipi response from
88  * us. vmtruncate likewise. This saves us having to get pte lock.
89  */
90 static unsigned int get_user_insn(unsigned long tpc)
91 {
92 	pgd_t *pgdp = pgd_offset(current->mm, tpc);
93 	pud_t *pudp;
94 	pmd_t *pmdp;
95 	pte_t *ptep, pte;
96 	unsigned long pa;
97 	u32 insn = 0;
98 	unsigned long pstate;
99 
100 	if (pgd_none(*pgdp))
101 		goto outret;
102 	pudp = pud_offset(pgdp, tpc);
103 	if (pud_none(*pudp))
104 		goto outret;
105 	pmdp = pmd_offset(pudp, tpc);
106 	if (pmd_none(*pmdp))
107 		goto outret;
108 
109 	/* This disables preemption for us as well. */
110 	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
111 	__asm__ __volatile__("wrpr %0, %1, %%pstate"
112 				: : "r" (pstate), "i" (PSTATE_IE));
113 	ptep = pte_offset_map(pmdp, tpc);
114 	pte = *ptep;
115 	if (!pte_present(pte))
116 		goto out;
117 
118 	pa  = (pte_pfn(pte) << PAGE_SHIFT);
119 	pa += (tpc & ~PAGE_MASK);
120 
121 	/* Use phys bypass so we don't pollute dtlb/dcache. */
122 	__asm__ __volatile__("lduwa [%1] %2, %0"
123 			     : "=r" (insn)
124 			     : "r" (pa), "i" (ASI_PHYS_USE_EC));
125 
126 out:
127 	pte_unmap(ptep);
128 	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
129 outret:
130 	return insn;
131 }
132 
133 static inline void
134 show_signal_msg(struct pt_regs *regs, int sig, int code,
135 		unsigned long address, struct task_struct *tsk)
136 {
137 	if (!unhandled_signal(tsk, sig))
138 		return;
139 
140 	if (!printk_ratelimit())
141 		return;
142 
143 	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
144 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
145 	       tsk->comm, task_pid_nr(tsk), address,
146 	       (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
147 	       (void *)regs->u_regs[UREG_FP], code);
148 
149 	print_vma_addr(KERN_CONT " in ", regs->tpc);
150 
151 	printk(KERN_CONT "\n");
152 }
153 
154 extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
155 
156 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
157 			     unsigned int insn, int fault_code)
158 {
159 	unsigned long addr;
160 	siginfo_t info;
161 
162 	info.si_code = code;
163 	info.si_signo = sig;
164 	info.si_errno = 0;
165 	if (fault_code & FAULT_CODE_ITLB)
166 		addr = regs->tpc;
167 	else
168 		addr = compute_effective_address(regs, insn, 0);
169 	info.si_addr = (void __user *) addr;
170 	info.si_trapno = 0;
171 
172 	if (unlikely(show_unhandled_signals))
173 		show_signal_msg(regs, sig, code, addr, current);
174 
175 	force_sig_info(sig, &info, current);
176 }
177 
178 extern int handle_ldf_stq(u32, struct pt_regs *);
179 extern int handle_ld_nf(u32, struct pt_regs *);
180 
181 static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
182 {
183 	if (!insn) {
184 		if (!regs->tpc || (regs->tpc & 0x3))
185 			return 0;
186 		if (regs->tstate & TSTATE_PRIV) {
187 			insn = *(unsigned int *) regs->tpc;
188 		} else {
189 			insn = get_user_insn(regs->tpc);
190 		}
191 	}
192 	return insn;
193 }
194 
195 static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
196 				      int fault_code, unsigned int insn,
197 				      unsigned long address)
198 {
199 	unsigned char asi = ASI_P;
200 
201 	if ((!insn) && (regs->tstate & TSTATE_PRIV))
202 		goto cannot_handle;
203 
204 	/* If user insn could be read (thus insn is zero), that
205 	 * is fine.  We will just gun down the process with a signal
206 	 * in that case.
207 	 */
208 
209 	if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
210 	    (insn & 0xc0800000) == 0xc0800000) {
211 		if (insn & 0x2000)
212 			asi = (regs->tstate >> 24);
213 		else
214 			asi = (insn >> 5);
215 		if ((asi & 0xf2) == 0x82) {
216 			if (insn & 0x1000000) {
217 				handle_ldf_stq(insn, regs);
218 			} else {
219 				/* This was a non-faulting load. Just clear the
220 				 * destination register(s) and continue with the next
221 				 * instruction. -jj
222 				 */
223 				handle_ld_nf(insn, regs);
224 			}
225 			return;
226 		}
227 	}
228 
229 	/* Is this in ex_table? */
230 	if (regs->tstate & TSTATE_PRIV) {
231 		const struct exception_table_entry *entry;
232 
233 		entry = search_exception_tables(regs->tpc);
234 		if (entry) {
235 			regs->tpc = entry->fixup;
236 			regs->tnpc = regs->tpc + 4;
237 			return;
238 		}
239 	} else {
240 		/* The si_code was set to make clear whether
241 		 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
242 		 */
243 		do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
244 		return;
245 	}
246 
247 cannot_handle:
248 	unhandled_fault (address, current, regs);
249 }
250 
251 static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
252 {
253 	static int times;
254 
255 	if (times++ < 10)
256 		printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
257 		       "64-bit TPC [%lx]\n",
258 		       current->comm, current->pid,
259 		       regs->tpc);
260 	show_regs(regs);
261 }
262 
263 static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
264 							 unsigned long addr)
265 {
266 	static int times;
267 
268 	if (times++ < 10)
269 		printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
270 		       "reports 64-bit fault address [%lx]\n",
271 		       current->comm, current->pid, addr);
272 	show_regs(regs);
273 }
274 
275 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
276 {
277 	struct mm_struct *mm = current->mm;
278 	struct vm_area_struct *vma;
279 	unsigned int insn = 0;
280 	int si_code, fault_code, fault;
281 	unsigned long address, mm_rss;
282 
283 	fault_code = get_thread_fault_code();
284 
285 	if (notify_page_fault(regs))
286 		return;
287 
288 	si_code = SEGV_MAPERR;
289 	address = current_thread_info()->fault_address;
290 
291 	if ((fault_code & FAULT_CODE_ITLB) &&
292 	    (fault_code & FAULT_CODE_DTLB))
293 		BUG();
294 
295 	if (test_thread_flag(TIF_32BIT)) {
296 		if (!(regs->tstate & TSTATE_PRIV)) {
297 			if (unlikely((regs->tpc >> 32) != 0)) {
298 				bogus_32bit_fault_tpc(regs);
299 				goto intr_or_no_mm;
300 			}
301 		}
302 		if (unlikely((address >> 32) != 0)) {
303 			bogus_32bit_fault_address(regs, address);
304 			goto intr_or_no_mm;
305 		}
306 	}
307 
308 	if (regs->tstate & TSTATE_PRIV) {
309 		unsigned long tpc = regs->tpc;
310 
311 		/* Sanity check the PC. */
312 		if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
313 		    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
314 			/* Valid, no problems... */
315 		} else {
316 			bad_kernel_pc(regs, address);
317 			return;
318 		}
319 	}
320 
321 	/*
322 	 * If we're in an interrupt or have no user
323 	 * context, we must not take the fault..
324 	 */
325 	if (in_atomic() || !mm)
326 		goto intr_or_no_mm;
327 
328 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
329 
330 	if (!down_read_trylock(&mm->mmap_sem)) {
331 		if ((regs->tstate & TSTATE_PRIV) &&
332 		    !search_exception_tables(regs->tpc)) {
333 			insn = get_fault_insn(regs, insn);
334 			goto handle_kernel_fault;
335 		}
336 		down_read(&mm->mmap_sem);
337 	}
338 
339 	vma = find_vma(mm, address);
340 	if (!vma)
341 		goto bad_area;
342 
343 	/* Pure DTLB misses do not tell us whether the fault causing
344 	 * load/store/atomic was a write or not, it only says that there
345 	 * was no match.  So in such a case we (carefully) read the
346 	 * instruction to try and figure this out.  It's an optimization
347 	 * so it's ok if we can't do this.
348 	 *
349 	 * Special hack, window spill/fill knows the exact fault type.
350 	 */
351 	if (((fault_code &
352 	      (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
353 	    (vma->vm_flags & VM_WRITE) != 0) {
354 		insn = get_fault_insn(regs, 0);
355 		if (!insn)
356 			goto continue_fault;
357 		/* All loads, stores and atomics have bits 30 and 31 both set
358 		 * in the instruction.  Bit 21 is set in all stores, but we
359 		 * have to avoid prefetches which also have bit 21 set.
360 		 */
361 		if ((insn & 0xc0200000) == 0xc0200000 &&
362 		    (insn & 0x01780000) != 0x01680000) {
363 			/* Don't bother updating thread struct value,
364 			 * because update_mmu_cache only cares which tlb
365 			 * the access came from.
366 			 */
367 			fault_code |= FAULT_CODE_WRITE;
368 		}
369 	}
370 continue_fault:
371 
372 	if (vma->vm_start <= address)
373 		goto good_area;
374 	if (!(vma->vm_flags & VM_GROWSDOWN))
375 		goto bad_area;
376 	if (!(fault_code & FAULT_CODE_WRITE)) {
377 		/* Non-faulting loads shouldn't expand stack. */
378 		insn = get_fault_insn(regs, insn);
379 		if ((insn & 0xc0800000) == 0xc0800000) {
380 			unsigned char asi;
381 
382 			if (insn & 0x2000)
383 				asi = (regs->tstate >> 24);
384 			else
385 				asi = (insn >> 5);
386 			if ((asi & 0xf2) == 0x82)
387 				goto bad_area;
388 		}
389 	}
390 	if (expand_stack(vma, address))
391 		goto bad_area;
392 	/*
393 	 * Ok, we have a good vm_area for this memory access, so
394 	 * we can handle it..
395 	 */
396 good_area:
397 	si_code = SEGV_ACCERR;
398 
399 	/* If we took a ITLB miss on a non-executable page, catch
400 	 * that here.
401 	 */
402 	if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
403 		BUG_ON(address != regs->tpc);
404 		BUG_ON(regs->tstate & TSTATE_PRIV);
405 		goto bad_area;
406 	}
407 
408 	if (fault_code & FAULT_CODE_WRITE) {
409 		if (!(vma->vm_flags & VM_WRITE))
410 			goto bad_area;
411 
412 		/* Spitfire has an icache which does not snoop
413 		 * processor stores.  Later processors do...
414 		 */
415 		if (tlb_type == spitfire &&
416 		    (vma->vm_flags & VM_EXEC) != 0 &&
417 		    vma->vm_file != NULL)
418 			set_thread_fault_code(fault_code |
419 					      FAULT_CODE_BLKCOMMIT);
420 	} else {
421 		/* Allow reads even for write-only mappings */
422 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
423 			goto bad_area;
424 	}
425 
426 	fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
427 	if (unlikely(fault & VM_FAULT_ERROR)) {
428 		if (fault & VM_FAULT_OOM)
429 			goto out_of_memory;
430 		else if (fault & VM_FAULT_SIGBUS)
431 			goto do_sigbus;
432 		BUG();
433 	}
434 	if (fault & VM_FAULT_MAJOR) {
435 		current->maj_flt++;
436 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
437 			      regs, address);
438 	} else {
439 		current->min_flt++;
440 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
441 			      regs, address);
442 	}
443 	up_read(&mm->mmap_sem);
444 
445 	mm_rss = get_mm_rss(mm);
446 #ifdef CONFIG_HUGETLB_PAGE
447 	mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
448 #endif
449 	if (unlikely(mm_rss >
450 		     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
451 		tsb_grow(mm, MM_TSB_BASE, mm_rss);
452 #ifdef CONFIG_HUGETLB_PAGE
453 	mm_rss = mm->context.huge_pte_count;
454 	if (unlikely(mm_rss >
455 		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
456 		tsb_grow(mm, MM_TSB_HUGE, mm_rss);
457 #endif
458 	return;
459 
460 	/*
461 	 * Something tried to access memory that isn't in our memory map..
462 	 * Fix it, but check if it's kernel or user first..
463 	 */
464 bad_area:
465 	insn = get_fault_insn(regs, insn);
466 	up_read(&mm->mmap_sem);
467 
468 handle_kernel_fault:
469 	do_kernel_fault(regs, si_code, fault_code, insn, address);
470 	return;
471 
472 /*
473  * We ran out of memory, or some other thing happened to us that made
474  * us unable to handle the page fault gracefully.
475  */
476 out_of_memory:
477 	insn = get_fault_insn(regs, insn);
478 	up_read(&mm->mmap_sem);
479 	if (!(regs->tstate & TSTATE_PRIV)) {
480 		pagefault_out_of_memory();
481 		return;
482 	}
483 	goto handle_kernel_fault;
484 
485 intr_or_no_mm:
486 	insn = get_fault_insn(regs, 0);
487 	goto handle_kernel_fault;
488 
489 do_sigbus:
490 	insn = get_fault_insn(regs, insn);
491 	up_read(&mm->mmap_sem);
492 
493 	/*
494 	 * Send a sigbus, regardless of whether we were in kernel
495 	 * or user mode.
496 	 */
497 	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
498 
499 	/* Kernel mode? Handle exceptions or die */
500 	if (regs->tstate & TSTATE_PRIV)
501 		goto handle_kernel_fault;
502 }
503