1 /* 2 * fault.c: Page fault handlers for the Sparc. 3 * 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9 #include <asm/head.h> 10 11 #include <linux/string.h> 12 #include <linux/types.h> 13 #include <linux/sched.h> 14 #include <linux/ptrace.h> 15 #include <linux/mman.h> 16 #include <linux/threads.h> 17 #include <linux/kernel.h> 18 #include <linux/signal.h> 19 #include <linux/mm.h> 20 #include <linux/smp.h> 21 #include <linux/perf_event.h> 22 #include <linux/interrupt.h> 23 #include <linux/kdebug.h> 24 25 #include <asm/system.h> 26 #include <asm/page.h> 27 #include <asm/pgtable.h> 28 #include <asm/memreg.h> 29 #include <asm/openprom.h> 30 #include <asm/oplib.h> 31 #include <asm/smp.h> 32 #include <asm/traps.h> 33 #include <asm/uaccess.h> 34 35 extern int prom_node_root; 36 37 int show_unhandled_signals = 1; 38 39 /* At boot time we determine these two values necessary for setting 40 * up the segment maps and page table entries (pte's). 41 */ 42 43 int num_segmaps, num_contexts; 44 int invalid_segment; 45 46 /* various Virtual Address Cache parameters we find at boot time... */ 47 48 int vac_size, vac_linesize, vac_do_hw_vac_flushes; 49 int vac_entries_per_context, vac_entries_per_segment; 50 int vac_entries_per_page; 51 52 /* Return how much physical memory we have. */ 53 unsigned long probe_memory(void) 54 { 55 unsigned long total = 0; 56 int i; 57 58 for (i = 0; sp_banks[i].num_bytes; i++) 59 total += sp_banks[i].num_bytes; 60 61 return total; 62 } 63 64 extern void sun4c_complete_all_stores(void); 65 66 /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */ 67 asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr, 68 unsigned long svaddr, unsigned long aerr, 69 unsigned long avaddr) 70 { 71 sun4c_complete_all_stores(); 72 printk("FAULT: NMI received\n"); 73 printk("SREGS: Synchronous Error %08lx\n", serr); 74 printk(" Synchronous Vaddr %08lx\n", svaddr); 75 printk(" Asynchronous Error %08lx\n", aerr); 76 printk(" Asynchronous Vaddr %08lx\n", avaddr); 77 if (sun4c_memerr_reg) 78 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg); 79 printk("REGISTER DUMP:\n"); 80 show_regs(regs); 81 prom_halt(); 82 } 83 84 static void unhandled_fault(unsigned long, struct task_struct *, 85 struct pt_regs *) __attribute__ ((noreturn)); 86 87 static void unhandled_fault(unsigned long address, struct task_struct *tsk, 88 struct pt_regs *regs) 89 { 90 if((unsigned long) address < PAGE_SIZE) { 91 printk(KERN_ALERT 92 "Unable to handle kernel NULL pointer dereference\n"); 93 } else { 94 printk(KERN_ALERT "Unable to handle kernel paging request " 95 "at virtual address %08lx\n", address); 96 } 97 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", 98 (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); 99 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", 100 (tsk->mm ? (unsigned long) tsk->mm->pgd : 101 (unsigned long) tsk->active_mm->pgd)); 102 die_if_kernel("Oops", regs); 103 } 104 105 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, 106 unsigned long address) 107 { 108 struct pt_regs regs; 109 unsigned long g2; 110 unsigned int insn; 111 int i; 112 113 i = search_extables_range(ret_pc, &g2); 114 switch (i) { 115 case 3: 116 /* load & store will be handled by fixup */ 117 return 3; 118 119 case 1: 120 /* store will be handled by fixup, load will bump out */ 121 /* for _to_ macros */ 122 insn = *((unsigned int *) pc); 123 if ((insn >> 21) & 1) 124 return 1; 125 break; 126 127 case 2: 128 /* load will be handled by fixup, store will bump out */ 129 /* for _from_ macros */ 130 insn = *((unsigned int *) pc); 131 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) 132 return 2; 133 break; 134 135 default: 136 break; 137 } 138 139 memset(®s, 0, sizeof (regs)); 140 regs.pc = pc; 141 regs.npc = pc + 4; 142 __asm__ __volatile__( 143 "rd %%psr, %0\n\t" 144 "nop\n\t" 145 "nop\n\t" 146 "nop\n" : "=r" (regs.psr)); 147 unhandled_fault(address, current, ®s); 148 149 /* Not reached */ 150 return 0; 151 } 152 153 static inline void 154 show_signal_msg(struct pt_regs *regs, int sig, int code, 155 unsigned long address, struct task_struct *tsk) 156 { 157 if (!unhandled_signal(tsk, sig)) 158 return; 159 160 if (!printk_ratelimit()) 161 return; 162 163 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x", 164 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 165 tsk->comm, task_pid_nr(tsk), address, 166 (void *)regs->pc, (void *)regs->u_regs[UREG_I7], 167 (void *)regs->u_regs[UREG_FP], code); 168 169 print_vma_addr(KERN_CONT " in ", regs->pc); 170 171 printk(KERN_CONT "\n"); 172 } 173 174 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, 175 unsigned long addr) 176 { 177 siginfo_t info; 178 179 info.si_signo = sig; 180 info.si_code = code; 181 info.si_errno = 0; 182 info.si_addr = (void __user *) addr; 183 info.si_trapno = 0; 184 185 if (unlikely(show_unhandled_signals)) 186 show_signal_msg(regs, sig, info.si_code, 187 addr, current); 188 189 force_sig_info (sig, &info, current); 190 } 191 192 extern unsigned long safe_compute_effective_address(struct pt_regs *, 193 unsigned int); 194 195 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) 196 { 197 unsigned int insn; 198 199 if (text_fault) 200 return regs->pc; 201 202 if (regs->psr & PSR_PS) { 203 insn = *(unsigned int *) regs->pc; 204 } else { 205 __get_user(insn, (unsigned int *) regs->pc); 206 } 207 208 return safe_compute_effective_address(regs, insn); 209 } 210 211 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, 212 int text_fault) 213 { 214 unsigned long addr = compute_si_addr(regs, text_fault); 215 216 __do_fault_siginfo(code, sig, regs, addr); 217 } 218 219 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, 220 unsigned long address) 221 { 222 struct vm_area_struct *vma; 223 struct task_struct *tsk = current; 224 struct mm_struct *mm = tsk->mm; 225 unsigned int fixup; 226 unsigned long g2; 227 int from_user = !(regs->psr & PSR_PS); 228 int fault, code; 229 230 if(text_fault) 231 address = regs->pc; 232 233 /* 234 * We fault-in kernel-space virtual memory on-demand. The 235 * 'reference' page table is init_mm.pgd. 236 * 237 * NOTE! We MUST NOT take any locks for this case. We may 238 * be in an interrupt or a critical region, and should 239 * only copy the information from the master page table, 240 * nothing more. 241 */ 242 code = SEGV_MAPERR; 243 if (!ARCH_SUN4C && address >= TASK_SIZE) 244 goto vmalloc_fault; 245 246 /* 247 * If we're in an interrupt or have no user 248 * context, we must not take the fault.. 249 */ 250 if (in_atomic() || !mm) 251 goto no_context; 252 253 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 254 255 down_read(&mm->mmap_sem); 256 257 /* 258 * The kernel referencing a bad kernel pointer can lock up 259 * a sun4c machine completely, so we must attempt recovery. 260 */ 261 if(!from_user && address >= PAGE_OFFSET) 262 goto bad_area; 263 264 vma = find_vma(mm, address); 265 if(!vma) 266 goto bad_area; 267 if(vma->vm_start <= address) 268 goto good_area; 269 if(!(vma->vm_flags & VM_GROWSDOWN)) 270 goto bad_area; 271 if(expand_stack(vma, address)) 272 goto bad_area; 273 /* 274 * Ok, we have a good vm_area for this memory access, so 275 * we can handle it.. 276 */ 277 good_area: 278 code = SEGV_ACCERR; 279 if(write) { 280 if(!(vma->vm_flags & VM_WRITE)) 281 goto bad_area; 282 } else { 283 /* Allow reads even for write-only mappings */ 284 if(!(vma->vm_flags & (VM_READ | VM_EXEC))) 285 goto bad_area; 286 } 287 288 /* 289 * If for any reason at all we couldn't handle the fault, 290 * make sure we exit gracefully rather than endlessly redo 291 * the fault. 292 */ 293 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 294 if (unlikely(fault & VM_FAULT_ERROR)) { 295 if (fault & VM_FAULT_OOM) 296 goto out_of_memory; 297 else if (fault & VM_FAULT_SIGBUS) 298 goto do_sigbus; 299 BUG(); 300 } 301 if (fault & VM_FAULT_MAJOR) { 302 current->maj_flt++; 303 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 304 } else { 305 current->min_flt++; 306 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 307 } 308 up_read(&mm->mmap_sem); 309 return; 310 311 /* 312 * Something tried to access memory that isn't in our memory map.. 313 * Fix it, but check if it's kernel or user first.. 314 */ 315 bad_area: 316 up_read(&mm->mmap_sem); 317 318 bad_area_nosemaphore: 319 /* User mode accesses just cause a SIGSEGV */ 320 if (from_user) { 321 do_fault_siginfo(code, SIGSEGV, regs, text_fault); 322 return; 323 } 324 325 /* Is this in ex_table? */ 326 no_context: 327 g2 = regs->u_regs[UREG_G2]; 328 if (!from_user) { 329 fixup = search_extables_range(regs->pc, &g2); 330 if (fixup > 10) { /* Values below are reserved for other things */ 331 extern const unsigned __memset_start[]; 332 extern const unsigned __memset_end[]; 333 extern const unsigned __csum_partial_copy_start[]; 334 extern const unsigned __csum_partial_copy_end[]; 335 336 #ifdef DEBUG_EXCEPTIONS 337 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address); 338 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", 339 regs->pc, fixup, g2); 340 #endif 341 if ((regs->pc >= (unsigned long)__memset_start && 342 regs->pc < (unsigned long)__memset_end) || 343 (regs->pc >= (unsigned long)__csum_partial_copy_start && 344 regs->pc < (unsigned long)__csum_partial_copy_end)) { 345 regs->u_regs[UREG_I4] = address; 346 regs->u_regs[UREG_I5] = regs->pc; 347 } 348 regs->u_regs[UREG_G2] = g2; 349 regs->pc = fixup; 350 regs->npc = regs->pc + 4; 351 return; 352 } 353 } 354 355 unhandled_fault (address, tsk, regs); 356 do_exit(SIGKILL); 357 358 /* 359 * We ran out of memory, or some other thing happened to us that made 360 * us unable to handle the page fault gracefully. 361 */ 362 out_of_memory: 363 up_read(&mm->mmap_sem); 364 if (from_user) { 365 pagefault_out_of_memory(); 366 return; 367 } 368 goto no_context; 369 370 do_sigbus: 371 up_read(&mm->mmap_sem); 372 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); 373 if (!from_user) 374 goto no_context; 375 376 vmalloc_fault: 377 { 378 /* 379 * Synchronize this task's top level page-table 380 * with the 'reference' page table. 381 */ 382 int offset = pgd_index(address); 383 pgd_t *pgd, *pgd_k; 384 pmd_t *pmd, *pmd_k; 385 386 pgd = tsk->active_mm->pgd + offset; 387 pgd_k = init_mm.pgd + offset; 388 389 if (!pgd_present(*pgd)) { 390 if (!pgd_present(*pgd_k)) 391 goto bad_area_nosemaphore; 392 pgd_val(*pgd) = pgd_val(*pgd_k); 393 return; 394 } 395 396 pmd = pmd_offset(pgd, address); 397 pmd_k = pmd_offset(pgd_k, address); 398 399 if (pmd_present(*pmd) || !pmd_present(*pmd_k)) 400 goto bad_area_nosemaphore; 401 *pmd = *pmd_k; 402 return; 403 } 404 } 405 406 asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, 407 unsigned long address) 408 { 409 extern void sun4c_update_mmu_cache(struct vm_area_struct *, 410 unsigned long,pte_t *); 411 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long); 412 struct task_struct *tsk = current; 413 struct mm_struct *mm = tsk->mm; 414 pgd_t *pgdp; 415 pte_t *ptep; 416 417 if (text_fault) { 418 address = regs->pc; 419 } else if (!write && 420 !(regs->psr & PSR_PS)) { 421 unsigned int insn, __user *ip; 422 423 ip = (unsigned int __user *)regs->pc; 424 if (!get_user(insn, ip)) { 425 if ((insn & 0xc1680000) == 0xc0680000) 426 write = 1; 427 } 428 } 429 430 if (!mm) { 431 /* We are oopsing. */ 432 do_sparc_fault(regs, text_fault, write, address); 433 BUG(); /* P3 Oops already, you bitch */ 434 } 435 436 pgdp = pgd_offset(mm, address); 437 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address); 438 439 if (pgd_val(*pgdp)) { 440 if (write) { 441 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) 442 == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) { 443 unsigned long flags; 444 445 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | 446 _SUN4C_PAGE_MODIFIED | 447 _SUN4C_PAGE_VALID | 448 _SUN4C_PAGE_DIRTY); 449 450 local_irq_save(flags); 451 if (sun4c_get_segmap(address) != invalid_segment) { 452 sun4c_put_pte(address, pte_val(*ptep)); 453 local_irq_restore(flags); 454 return; 455 } 456 local_irq_restore(flags); 457 } 458 } else { 459 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) 460 == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) { 461 unsigned long flags; 462 463 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | 464 _SUN4C_PAGE_VALID); 465 466 local_irq_save(flags); 467 if (sun4c_get_segmap(address) != invalid_segment) { 468 sun4c_put_pte(address, pte_val(*ptep)); 469 local_irq_restore(flags); 470 return; 471 } 472 local_irq_restore(flags); 473 } 474 } 475 } 476 477 /* This conditional is 'interesting'. */ 478 if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE)) 479 && (pte_val(*ptep) & _SUN4C_PAGE_VALID)) 480 /* Note: It is safe to not grab the MMAP semaphore here because 481 * we know that update_mmu_cache() will not sleep for 482 * any reason (at least not in the current implementation) 483 * and therefore there is no danger of another thread getting 484 * on the CPU and doing a shrink_mmap() on this vma. 485 */ 486 sun4c_update_mmu_cache (find_vma(current->mm, address), address, 487 ptep); 488 else 489 do_sparc_fault(regs, text_fault, write, address); 490 } 491 492 /* This always deals with user addresses. */ 493 static void force_user_fault(unsigned long address, int write) 494 { 495 struct vm_area_struct *vma; 496 struct task_struct *tsk = current; 497 struct mm_struct *mm = tsk->mm; 498 int code; 499 500 code = SEGV_MAPERR; 501 502 down_read(&mm->mmap_sem); 503 vma = find_vma(mm, address); 504 if(!vma) 505 goto bad_area; 506 if(vma->vm_start <= address) 507 goto good_area; 508 if(!(vma->vm_flags & VM_GROWSDOWN)) 509 goto bad_area; 510 if(expand_stack(vma, address)) 511 goto bad_area; 512 good_area: 513 code = SEGV_ACCERR; 514 if(write) { 515 if(!(vma->vm_flags & VM_WRITE)) 516 goto bad_area; 517 } else { 518 if(!(vma->vm_flags & (VM_READ | VM_EXEC))) 519 goto bad_area; 520 } 521 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { 522 case VM_FAULT_SIGBUS: 523 case VM_FAULT_OOM: 524 goto do_sigbus; 525 } 526 up_read(&mm->mmap_sem); 527 return; 528 bad_area: 529 up_read(&mm->mmap_sem); 530 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); 531 return; 532 533 do_sigbus: 534 up_read(&mm->mmap_sem); 535 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); 536 } 537 538 static void check_stack_aligned(unsigned long sp) 539 { 540 if (sp & 0x7UL) 541 force_sig(SIGILL, current); 542 } 543 544 void window_overflow_fault(void) 545 { 546 unsigned long sp; 547 548 sp = current_thread_info()->rwbuf_stkptrs[0]; 549 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 550 force_user_fault(sp + 0x38, 1); 551 force_user_fault(sp, 1); 552 553 check_stack_aligned(sp); 554 } 555 556 void window_underflow_fault(unsigned long sp) 557 { 558 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 559 force_user_fault(sp + 0x38, 0); 560 force_user_fault(sp, 0); 561 562 check_stack_aligned(sp); 563 } 564 565 void window_ret_fault(struct pt_regs *regs) 566 { 567 unsigned long sp; 568 569 sp = regs->u_regs[UREG_FP]; 570 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 571 force_user_fault(sp + 0x38, 0); 572 force_user_fault(sp, 0); 573 574 check_stack_aligned(sp); 575 } 576