1 /* 2 * fault.c: Page fault handlers for the Sparc. 3 * 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9 #include <asm/head.h> 10 11 #include <linux/string.h> 12 #include <linux/types.h> 13 #include <linux/sched.h> 14 #include <linux/ptrace.h> 15 #include <linux/mman.h> 16 #include <linux/threads.h> 17 #include <linux/kernel.h> 18 #include <linux/signal.h> 19 #include <linux/mm.h> 20 #include <linux/smp.h> 21 #include <linux/perf_event.h> 22 #include <linux/interrupt.h> 23 #include <linux/module.h> 24 #include <linux/kdebug.h> 25 26 #include <asm/system.h> 27 #include <asm/page.h> 28 #include <asm/pgtable.h> 29 #include <asm/memreg.h> 30 #include <asm/openprom.h> 31 #include <asm/oplib.h> 32 #include <asm/smp.h> 33 #include <asm/traps.h> 34 #include <asm/uaccess.h> 35 36 extern int prom_node_root; 37 38 int show_unhandled_signals = 1; 39 40 /* At boot time we determine these two values necessary for setting 41 * up the segment maps and page table entries (pte's). 42 */ 43 44 int num_segmaps, num_contexts; 45 int invalid_segment; 46 47 /* various Virtual Address Cache parameters we find at boot time... */ 48 49 int vac_size, vac_linesize, vac_do_hw_vac_flushes; 50 int vac_entries_per_context, vac_entries_per_segment; 51 int vac_entries_per_page; 52 53 /* Return how much physical memory we have. */ 54 unsigned long probe_memory(void) 55 { 56 unsigned long total = 0; 57 int i; 58 59 for (i = 0; sp_banks[i].num_bytes; i++) 60 total += sp_banks[i].num_bytes; 61 62 return total; 63 } 64 65 extern void sun4c_complete_all_stores(void); 66 67 /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */ 68 asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr, 69 unsigned long svaddr, unsigned long aerr, 70 unsigned long avaddr) 71 { 72 sun4c_complete_all_stores(); 73 printk("FAULT: NMI received\n"); 74 printk("SREGS: Synchronous Error %08lx\n", serr); 75 printk(" Synchronous Vaddr %08lx\n", svaddr); 76 printk(" Asynchronous Error %08lx\n", aerr); 77 printk(" Asynchronous Vaddr %08lx\n", avaddr); 78 if (sun4c_memerr_reg) 79 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg); 80 printk("REGISTER DUMP:\n"); 81 show_regs(regs); 82 prom_halt(); 83 } 84 85 static void unhandled_fault(unsigned long, struct task_struct *, 86 struct pt_regs *) __attribute__ ((noreturn)); 87 88 static void unhandled_fault(unsigned long address, struct task_struct *tsk, 89 struct pt_regs *regs) 90 { 91 if((unsigned long) address < PAGE_SIZE) { 92 printk(KERN_ALERT 93 "Unable to handle kernel NULL pointer dereference\n"); 94 } else { 95 printk(KERN_ALERT "Unable to handle kernel paging request " 96 "at virtual address %08lx\n", address); 97 } 98 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", 99 (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); 100 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", 101 (tsk->mm ? (unsigned long) tsk->mm->pgd : 102 (unsigned long) tsk->active_mm->pgd)); 103 die_if_kernel("Oops", regs); 104 } 105 106 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, 107 unsigned long address) 108 { 109 struct pt_regs regs; 110 unsigned long g2; 111 unsigned int insn; 112 int i; 113 114 i = search_extables_range(ret_pc, &g2); 115 switch (i) { 116 case 3: 117 /* load & store will be handled by fixup */ 118 return 3; 119 120 case 1: 121 /* store will be handled by fixup, load will bump out */ 122 /* for _to_ macros */ 123 insn = *((unsigned int *) pc); 124 if ((insn >> 21) & 1) 125 return 1; 126 break; 127 128 case 2: 129 /* load will be handled by fixup, store will bump out */ 130 /* for _from_ macros */ 131 insn = *((unsigned int *) pc); 132 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) 133 return 2; 134 break; 135 136 default: 137 break; 138 } 139 140 memset(®s, 0, sizeof (regs)); 141 regs.pc = pc; 142 regs.npc = pc + 4; 143 __asm__ __volatile__( 144 "rd %%psr, %0\n\t" 145 "nop\n\t" 146 "nop\n\t" 147 "nop\n" : "=r" (regs.psr)); 148 unhandled_fault(address, current, ®s); 149 150 /* Not reached */ 151 return 0; 152 } 153 154 static inline void 155 show_signal_msg(struct pt_regs *regs, int sig, int code, 156 unsigned long address, struct task_struct *tsk) 157 { 158 if (!unhandled_signal(tsk, sig)) 159 return; 160 161 if (!printk_ratelimit()) 162 return; 163 164 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x", 165 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 166 tsk->comm, task_pid_nr(tsk), address, 167 (void *)regs->pc, (void *)regs->u_regs[UREG_I7], 168 (void *)regs->u_regs[UREG_FP], code); 169 170 print_vma_addr(KERN_CONT " in ", regs->pc); 171 172 printk(KERN_CONT "\n"); 173 } 174 175 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, 176 unsigned long addr) 177 { 178 siginfo_t info; 179 180 info.si_signo = sig; 181 info.si_code = code; 182 info.si_errno = 0; 183 info.si_addr = (void __user *) addr; 184 info.si_trapno = 0; 185 186 if (unlikely(show_unhandled_signals)) 187 show_signal_msg(regs, sig, info.si_code, 188 addr, current); 189 190 force_sig_info (sig, &info, current); 191 } 192 193 extern unsigned long safe_compute_effective_address(struct pt_regs *, 194 unsigned int); 195 196 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) 197 { 198 unsigned int insn; 199 200 if (text_fault) 201 return regs->pc; 202 203 if (regs->psr & PSR_PS) { 204 insn = *(unsigned int *) regs->pc; 205 } else { 206 __get_user(insn, (unsigned int *) regs->pc); 207 } 208 209 return safe_compute_effective_address(regs, insn); 210 } 211 212 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, 213 int text_fault) 214 { 215 unsigned long addr = compute_si_addr(regs, text_fault); 216 217 __do_fault_siginfo(code, sig, regs, addr); 218 } 219 220 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, 221 unsigned long address) 222 { 223 struct vm_area_struct *vma; 224 struct task_struct *tsk = current; 225 struct mm_struct *mm = tsk->mm; 226 unsigned int fixup; 227 unsigned long g2; 228 int from_user = !(regs->psr & PSR_PS); 229 int fault, code; 230 231 if(text_fault) 232 address = regs->pc; 233 234 /* 235 * We fault-in kernel-space virtual memory on-demand. The 236 * 'reference' page table is init_mm.pgd. 237 * 238 * NOTE! We MUST NOT take any locks for this case. We may 239 * be in an interrupt or a critical region, and should 240 * only copy the information from the master page table, 241 * nothing more. 242 */ 243 code = SEGV_MAPERR; 244 if (!ARCH_SUN4C && address >= TASK_SIZE) 245 goto vmalloc_fault; 246 247 /* 248 * If we're in an interrupt or have no user 249 * context, we must not take the fault.. 250 */ 251 if (in_atomic() || !mm) 252 goto no_context; 253 254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 255 256 down_read(&mm->mmap_sem); 257 258 /* 259 * The kernel referencing a bad kernel pointer can lock up 260 * a sun4c machine completely, so we must attempt recovery. 261 */ 262 if(!from_user && address >= PAGE_OFFSET) 263 goto bad_area; 264 265 vma = find_vma(mm, address); 266 if(!vma) 267 goto bad_area; 268 if(vma->vm_start <= address) 269 goto good_area; 270 if(!(vma->vm_flags & VM_GROWSDOWN)) 271 goto bad_area; 272 if(expand_stack(vma, address)) 273 goto bad_area; 274 /* 275 * Ok, we have a good vm_area for this memory access, so 276 * we can handle it.. 277 */ 278 good_area: 279 code = SEGV_ACCERR; 280 if(write) { 281 if(!(vma->vm_flags & VM_WRITE)) 282 goto bad_area; 283 } else { 284 /* Allow reads even for write-only mappings */ 285 if(!(vma->vm_flags & (VM_READ | VM_EXEC))) 286 goto bad_area; 287 } 288 289 /* 290 * If for any reason at all we couldn't handle the fault, 291 * make sure we exit gracefully rather than endlessly redo 292 * the fault. 293 */ 294 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 295 if (unlikely(fault & VM_FAULT_ERROR)) { 296 if (fault & VM_FAULT_OOM) 297 goto out_of_memory; 298 else if (fault & VM_FAULT_SIGBUS) 299 goto do_sigbus; 300 BUG(); 301 } 302 if (fault & VM_FAULT_MAJOR) { 303 current->maj_flt++; 304 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 305 } else { 306 current->min_flt++; 307 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 308 } 309 up_read(&mm->mmap_sem); 310 return; 311 312 /* 313 * Something tried to access memory that isn't in our memory map.. 314 * Fix it, but check if it's kernel or user first.. 315 */ 316 bad_area: 317 up_read(&mm->mmap_sem); 318 319 bad_area_nosemaphore: 320 /* User mode accesses just cause a SIGSEGV */ 321 if (from_user) { 322 do_fault_siginfo(code, SIGSEGV, regs, text_fault); 323 return; 324 } 325 326 /* Is this in ex_table? */ 327 no_context: 328 g2 = regs->u_regs[UREG_G2]; 329 if (!from_user) { 330 fixup = search_extables_range(regs->pc, &g2); 331 if (fixup > 10) { /* Values below are reserved for other things */ 332 extern const unsigned __memset_start[]; 333 extern const unsigned __memset_end[]; 334 extern const unsigned __csum_partial_copy_start[]; 335 extern const unsigned __csum_partial_copy_end[]; 336 337 #ifdef DEBUG_EXCEPTIONS 338 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address); 339 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n", 340 regs->pc, fixup, g2); 341 #endif 342 if ((regs->pc >= (unsigned long)__memset_start && 343 regs->pc < (unsigned long)__memset_end) || 344 (regs->pc >= (unsigned long)__csum_partial_copy_start && 345 regs->pc < (unsigned long)__csum_partial_copy_end)) { 346 regs->u_regs[UREG_I4] = address; 347 regs->u_regs[UREG_I5] = regs->pc; 348 } 349 regs->u_regs[UREG_G2] = g2; 350 regs->pc = fixup; 351 regs->npc = regs->pc + 4; 352 return; 353 } 354 } 355 356 unhandled_fault (address, tsk, regs); 357 do_exit(SIGKILL); 358 359 /* 360 * We ran out of memory, or some other thing happened to us that made 361 * us unable to handle the page fault gracefully. 362 */ 363 out_of_memory: 364 up_read(&mm->mmap_sem); 365 if (from_user) { 366 pagefault_out_of_memory(); 367 return; 368 } 369 goto no_context; 370 371 do_sigbus: 372 up_read(&mm->mmap_sem); 373 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); 374 if (!from_user) 375 goto no_context; 376 377 vmalloc_fault: 378 { 379 /* 380 * Synchronize this task's top level page-table 381 * with the 'reference' page table. 382 */ 383 int offset = pgd_index(address); 384 pgd_t *pgd, *pgd_k; 385 pmd_t *pmd, *pmd_k; 386 387 pgd = tsk->active_mm->pgd + offset; 388 pgd_k = init_mm.pgd + offset; 389 390 if (!pgd_present(*pgd)) { 391 if (!pgd_present(*pgd_k)) 392 goto bad_area_nosemaphore; 393 pgd_val(*pgd) = pgd_val(*pgd_k); 394 return; 395 } 396 397 pmd = pmd_offset(pgd, address); 398 pmd_k = pmd_offset(pgd_k, address); 399 400 if (pmd_present(*pmd) || !pmd_present(*pmd_k)) 401 goto bad_area_nosemaphore; 402 *pmd = *pmd_k; 403 return; 404 } 405 } 406 407 asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, 408 unsigned long address) 409 { 410 extern void sun4c_update_mmu_cache(struct vm_area_struct *, 411 unsigned long,pte_t *); 412 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long); 413 struct task_struct *tsk = current; 414 struct mm_struct *mm = tsk->mm; 415 pgd_t *pgdp; 416 pte_t *ptep; 417 418 if (text_fault) { 419 address = regs->pc; 420 } else if (!write && 421 !(regs->psr & PSR_PS)) { 422 unsigned int insn, __user *ip; 423 424 ip = (unsigned int __user *)regs->pc; 425 if (!get_user(insn, ip)) { 426 if ((insn & 0xc1680000) == 0xc0680000) 427 write = 1; 428 } 429 } 430 431 if (!mm) { 432 /* We are oopsing. */ 433 do_sparc_fault(regs, text_fault, write, address); 434 BUG(); /* P3 Oops already, you bitch */ 435 } 436 437 pgdp = pgd_offset(mm, address); 438 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address); 439 440 if (pgd_val(*pgdp)) { 441 if (write) { 442 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) 443 == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) { 444 unsigned long flags; 445 446 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | 447 _SUN4C_PAGE_MODIFIED | 448 _SUN4C_PAGE_VALID | 449 _SUN4C_PAGE_DIRTY); 450 451 local_irq_save(flags); 452 if (sun4c_get_segmap(address) != invalid_segment) { 453 sun4c_put_pte(address, pte_val(*ptep)); 454 local_irq_restore(flags); 455 return; 456 } 457 local_irq_restore(flags); 458 } 459 } else { 460 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) 461 == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) { 462 unsigned long flags; 463 464 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED | 465 _SUN4C_PAGE_VALID); 466 467 local_irq_save(flags); 468 if (sun4c_get_segmap(address) != invalid_segment) { 469 sun4c_put_pte(address, pte_val(*ptep)); 470 local_irq_restore(flags); 471 return; 472 } 473 local_irq_restore(flags); 474 } 475 } 476 } 477 478 /* This conditional is 'interesting'. */ 479 if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE)) 480 && (pte_val(*ptep) & _SUN4C_PAGE_VALID)) 481 /* Note: It is safe to not grab the MMAP semaphore here because 482 * we know that update_mmu_cache() will not sleep for 483 * any reason (at least not in the current implementation) 484 * and therefore there is no danger of another thread getting 485 * on the CPU and doing a shrink_mmap() on this vma. 486 */ 487 sun4c_update_mmu_cache (find_vma(current->mm, address), address, 488 ptep); 489 else 490 do_sparc_fault(regs, text_fault, write, address); 491 } 492 493 /* This always deals with user addresses. */ 494 static void force_user_fault(unsigned long address, int write) 495 { 496 struct vm_area_struct *vma; 497 struct task_struct *tsk = current; 498 struct mm_struct *mm = tsk->mm; 499 int code; 500 501 code = SEGV_MAPERR; 502 503 down_read(&mm->mmap_sem); 504 vma = find_vma(mm, address); 505 if(!vma) 506 goto bad_area; 507 if(vma->vm_start <= address) 508 goto good_area; 509 if(!(vma->vm_flags & VM_GROWSDOWN)) 510 goto bad_area; 511 if(expand_stack(vma, address)) 512 goto bad_area; 513 good_area: 514 code = SEGV_ACCERR; 515 if(write) { 516 if(!(vma->vm_flags & VM_WRITE)) 517 goto bad_area; 518 } else { 519 if(!(vma->vm_flags & (VM_READ | VM_EXEC))) 520 goto bad_area; 521 } 522 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { 523 case VM_FAULT_SIGBUS: 524 case VM_FAULT_OOM: 525 goto do_sigbus; 526 } 527 up_read(&mm->mmap_sem); 528 return; 529 bad_area: 530 up_read(&mm->mmap_sem); 531 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); 532 return; 533 534 do_sigbus: 535 up_read(&mm->mmap_sem); 536 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); 537 } 538 539 static void check_stack_aligned(unsigned long sp) 540 { 541 if (sp & 0x7UL) 542 force_sig(SIGILL, current); 543 } 544 545 void window_overflow_fault(void) 546 { 547 unsigned long sp; 548 549 sp = current_thread_info()->rwbuf_stkptrs[0]; 550 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 551 force_user_fault(sp + 0x38, 1); 552 force_user_fault(sp, 1); 553 554 check_stack_aligned(sp); 555 } 556 557 void window_underflow_fault(unsigned long sp) 558 { 559 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 560 force_user_fault(sp + 0x38, 0); 561 force_user_fault(sp, 0); 562 563 check_stack_aligned(sp); 564 } 565 566 void window_ret_fault(struct pt_regs *regs) 567 { 568 unsigned long sp; 569 570 sp = regs->u_regs[UREG_FP]; 571 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 572 force_user_fault(sp + 0x38, 0); 573 force_user_fault(sp, 0); 574 575 check_stack_aligned(sp); 576 } 577