1 /* 2 * linux/arch/arm/mm/fault.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * Modifications for ARM processor (c) 1995-2004 Russell King 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/signal.h> 13 #include <linux/mm.h> 14 #include <linux/init.h> 15 #include <linux/kprobes.h> 16 #include <linux/uaccess.h> 17 18 #include <asm/system.h> 19 #include <asm/pgtable.h> 20 #include <asm/tlbflush.h> 21 22 #include "fault.h" 23 24 25 #ifdef CONFIG_KPROBES 26 static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) 27 { 28 int ret = 0; 29 30 if (!user_mode(regs)) { 31 /* kprobe_running() needs smp_processor_id() */ 32 preempt_disable(); 33 if (kprobe_running() && kprobe_fault_handler(regs, fsr)) 34 ret = 1; 35 preempt_enable(); 36 } 37 38 return ret; 39 } 40 #else 41 static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) 42 { 43 return 0; 44 } 45 #endif 46 47 /* 48 * This is useful to dump out the page tables associated with 49 * 'addr' in mm 'mm'. 50 */ 51 void show_pte(struct mm_struct *mm, unsigned long addr) 52 { 53 pgd_t *pgd; 54 55 if (!mm) 56 mm = &init_mm; 57 58 printk(KERN_ALERT "pgd = %p\n", mm->pgd); 59 pgd = pgd_offset(mm, addr); 60 printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); 61 62 do { 63 pmd_t *pmd; 64 pte_t *pte; 65 66 if (pgd_none(*pgd)) 67 break; 68 69 if (pgd_bad(*pgd)) { 70 printk("(bad)"); 71 break; 72 } 73 74 pmd = pmd_offset(pgd, addr); 75 if (PTRS_PER_PMD != 1) 76 printk(", *pmd=%08lx", pmd_val(*pmd)); 77 78 if (pmd_none(*pmd)) 79 break; 80 81 if (pmd_bad(*pmd)) { 82 printk("(bad)"); 83 break; 84 } 85 86 #ifndef CONFIG_HIGHMEM 87 /* We must not map this if we have highmem enabled */ 88 pte = pte_offset_map(pmd, addr); 89 printk(", *pte=%08lx", pte_val(*pte)); 90 printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); 91 pte_unmap(pte); 92 #endif 93 } while(0); 94 95 printk("\n"); 96 } 97 98 /* 99 * Oops. The kernel tried to access some page that wasn't present. 100 */ 101 static void 102 __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 103 struct pt_regs *regs) 104 { 105 /* 106 * Are we prepared to handle this kernel fault? 107 */ 108 if (fixup_exception(regs)) 109 return; 110 111 /* 112 * No handler, we'll have to terminate things with extreme prejudice. 113 */ 114 bust_spinlocks(1); 115 printk(KERN_ALERT 116 "Unable to handle kernel %s at virtual address %08lx\n", 117 (addr < PAGE_SIZE) ? "NULL pointer dereference" : 118 "paging request", addr); 119 120 show_pte(mm, addr); 121 die("Oops", regs, fsr); 122 bust_spinlocks(0); 123 do_exit(SIGKILL); 124 } 125 126 /* 127 * Something tried to access memory that isn't in our memory map.. 128 * User mode accesses just cause a SIGSEGV 129 */ 130 static void 131 __do_user_fault(struct task_struct *tsk, unsigned long addr, 132 unsigned int fsr, unsigned int sig, int code, 133 struct pt_regs *regs) 134 { 135 struct siginfo si; 136 137 #ifdef CONFIG_DEBUG_USER 138 if (user_debug & UDBG_SEGV) { 139 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", 140 tsk->comm, sig, addr, fsr); 141 show_pte(tsk->mm, addr); 142 show_regs(regs); 143 } 144 #endif 145 146 tsk->thread.address = addr; 147 tsk->thread.error_code = fsr; 148 tsk->thread.trap_no = 14; 149 si.si_signo = sig; 150 si.si_errno = 0; 151 si.si_code = code; 152 si.si_addr = (void __user *)addr; 153 force_sig_info(sig, &si, tsk); 154 } 155 156 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 157 { 158 struct task_struct *tsk = current; 159 struct mm_struct *mm = tsk->active_mm; 160 161 /* 162 * If we are in kernel mode at this point, we 163 * have no context to handle this fault with. 164 */ 165 if (user_mode(regs)) 166 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs); 167 else 168 __do_kernel_fault(mm, addr, fsr, regs); 169 } 170 171 #define VM_FAULT_BADMAP 0x010000 172 #define VM_FAULT_BADACCESS 0x020000 173 174 static int 175 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 176 struct task_struct *tsk) 177 { 178 struct vm_area_struct *vma; 179 int fault, mask; 180 181 vma = find_vma(mm, addr); 182 fault = VM_FAULT_BADMAP; 183 if (!vma) 184 goto out; 185 if (vma->vm_start > addr) 186 goto check_stack; 187 188 /* 189 * Ok, we have a good vm_area for this 190 * memory access, so we can handle it. 191 */ 192 good_area: 193 if (fsr & (1 << 11)) /* write? */ 194 mask = VM_WRITE; 195 else 196 mask = VM_READ|VM_EXEC|VM_WRITE; 197 198 fault = VM_FAULT_BADACCESS; 199 if (!(vma->vm_flags & mask)) 200 goto out; 201 202 /* 203 * If for any reason at all we couldn't handle 204 * the fault, make sure we exit gracefully rather 205 * than endlessly redo the fault. 206 */ 207 survive: 208 fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); 209 if (unlikely(fault & VM_FAULT_ERROR)) { 210 if (fault & VM_FAULT_OOM) 211 goto out_of_memory; 212 else if (fault & VM_FAULT_SIGBUS) 213 return fault; 214 BUG(); 215 } 216 if (fault & VM_FAULT_MAJOR) 217 tsk->maj_flt++; 218 else 219 tsk->min_flt++; 220 return fault; 221 222 out_of_memory: 223 if (!is_global_init(tsk)) 224 goto out; 225 226 /* 227 * If we are out of memory for pid1, sleep for a while and retry 228 */ 229 up_read(&mm->mmap_sem); 230 yield(); 231 down_read(&mm->mmap_sem); 232 goto survive; 233 234 check_stack: 235 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 236 goto good_area; 237 out: 238 return fault; 239 } 240 241 static int __kprobes 242 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 243 { 244 struct task_struct *tsk; 245 struct mm_struct *mm; 246 int fault, sig, code; 247 248 if (notify_page_fault(regs, fsr)) 249 return 0; 250 251 tsk = current; 252 mm = tsk->mm; 253 254 /* 255 * If we're in an interrupt or have no user 256 * context, we must not take the fault.. 257 */ 258 if (in_atomic() || !mm) 259 goto no_context; 260 261 /* 262 * As per x86, we may deadlock here. However, since the kernel only 263 * validly references user space from well defined areas of the code, 264 * we can bug out early if this is from code which shouldn't. 265 */ 266 if (!down_read_trylock(&mm->mmap_sem)) { 267 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) 268 goto no_context; 269 down_read(&mm->mmap_sem); 270 } 271 272 fault = __do_page_fault(mm, addr, fsr, tsk); 273 up_read(&mm->mmap_sem); 274 275 /* 276 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR 277 */ 278 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) 279 return 0; 280 281 /* 282 * If we are in kernel mode at this point, we 283 * have no context to handle this fault with. 284 */ 285 if (!user_mode(regs)) 286 goto no_context; 287 288 if (fault & VM_FAULT_OOM) { 289 /* 290 * We ran out of memory, or some other thing 291 * happened to us that made us unable to handle 292 * the page fault gracefully. 293 */ 294 printk("VM: killing process %s\n", tsk->comm); 295 do_group_exit(SIGKILL); 296 return 0; 297 } 298 if (fault & VM_FAULT_SIGBUS) { 299 /* 300 * We had some memory, but were unable to 301 * successfully fix up this page fault. 302 */ 303 sig = SIGBUS; 304 code = BUS_ADRERR; 305 } else { 306 /* 307 * Something tried to access memory that 308 * isn't in our memory map.. 309 */ 310 sig = SIGSEGV; 311 code = fault == VM_FAULT_BADACCESS ? 312 SEGV_ACCERR : SEGV_MAPERR; 313 } 314 315 __do_user_fault(tsk, addr, fsr, sig, code, regs); 316 return 0; 317 318 no_context: 319 __do_kernel_fault(mm, addr, fsr, regs); 320 return 0; 321 } 322 323 /* 324 * First Level Translation Fault Handler 325 * 326 * We enter here because the first level page table doesn't contain 327 * a valid entry for the address. 328 * 329 * If the address is in kernel space (>= TASK_SIZE), then we are 330 * probably faulting in the vmalloc() area. 331 * 332 * If the init_task's first level page tables contains the relevant 333 * entry, we copy the it to this task. If not, we send the process 334 * a signal, fixup the exception, or oops the kernel. 335 * 336 * NOTE! We MUST NOT take any locks for this case. We may be in an 337 * interrupt or a critical region, and should only copy the information 338 * from the master page table, nothing more. 339 */ 340 static int __kprobes 341 do_translation_fault(unsigned long addr, unsigned int fsr, 342 struct pt_regs *regs) 343 { 344 unsigned int index; 345 pgd_t *pgd, *pgd_k; 346 pmd_t *pmd, *pmd_k; 347 348 if (addr < TASK_SIZE) 349 return do_page_fault(addr, fsr, regs); 350 351 index = pgd_index(addr); 352 353 /* 354 * FIXME: CP15 C1 is write only on ARMv3 architectures. 355 */ 356 pgd = cpu_get_pgd() + index; 357 pgd_k = init_mm.pgd + index; 358 359 if (pgd_none(*pgd_k)) 360 goto bad_area; 361 362 if (!pgd_present(*pgd)) 363 set_pgd(pgd, *pgd_k); 364 365 pmd_k = pmd_offset(pgd_k, addr); 366 pmd = pmd_offset(pgd, addr); 367 368 if (pmd_none(*pmd_k)) 369 goto bad_area; 370 371 copy_pmd(pmd, pmd_k); 372 return 0; 373 374 bad_area: 375 do_bad_area(addr, fsr, regs); 376 return 0; 377 } 378 379 /* 380 * Some section permission faults need to be handled gracefully. 381 * They can happen due to a __{get,put}_user during an oops. 382 */ 383 static int 384 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 385 { 386 do_bad_area(addr, fsr, regs); 387 return 0; 388 } 389 390 /* 391 * This abort handler always returns "fault". 392 */ 393 static int 394 do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 395 { 396 return 1; 397 } 398 399 static struct fsr_info { 400 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); 401 int sig; 402 int code; 403 const char *name; 404 } fsr_info[] = { 405 /* 406 * The following are the standard ARMv3 and ARMv4 aborts. ARMv5 407 * defines these to be "precise" aborts. 408 */ 409 { do_bad, SIGSEGV, 0, "vector exception" }, 410 { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, 411 { do_bad, SIGKILL, 0, "terminal exception" }, 412 { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, 413 { do_bad, SIGBUS, 0, "external abort on linefetch" }, 414 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, 415 { do_bad, SIGBUS, 0, "external abort on linefetch" }, 416 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, 417 { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, 418 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, 419 { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, 420 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, 421 { do_bad, SIGBUS, 0, "external abort on translation" }, 422 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, 423 { do_bad, SIGBUS, 0, "external abort on translation" }, 424 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, 425 /* 426 * The following are "imprecise" aborts, which are signalled by bit 427 * 10 of the FSR, and may not be recoverable. These are only 428 * supported if the CPU abort handler supports bit 10. 429 */ 430 { do_bad, SIGBUS, 0, "unknown 16" }, 431 { do_bad, SIGBUS, 0, "unknown 17" }, 432 { do_bad, SIGBUS, 0, "unknown 18" }, 433 { do_bad, SIGBUS, 0, "unknown 19" }, 434 { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ 435 { do_bad, SIGBUS, 0, "unknown 21" }, 436 { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ 437 { do_bad, SIGBUS, 0, "unknown 23" }, 438 { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ 439 { do_bad, SIGBUS, 0, "unknown 25" }, 440 { do_bad, SIGBUS, 0, "unknown 26" }, 441 { do_bad, SIGBUS, 0, "unknown 27" }, 442 { do_bad, SIGBUS, 0, "unknown 28" }, 443 { do_bad, SIGBUS, 0, "unknown 29" }, 444 { do_bad, SIGBUS, 0, "unknown 30" }, 445 { do_bad, SIGBUS, 0, "unknown 31" } 446 }; 447 448 void __init 449 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), 450 int sig, const char *name) 451 { 452 if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) { 453 fsr_info[nr].fn = fn; 454 fsr_info[nr].sig = sig; 455 fsr_info[nr].name = name; 456 } 457 } 458 459 /* 460 * Dispatch a data abort to the relevant handler. 461 */ 462 asmlinkage void __exception 463 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 464 { 465 const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6); 466 struct siginfo info; 467 468 if (!inf->fn(addr, fsr, regs)) 469 return; 470 471 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", 472 inf->name, fsr, addr); 473 474 info.si_signo = inf->sig; 475 info.si_errno = 0; 476 info.si_code = inf->code; 477 info.si_addr = (void __user *)addr; 478 arm_notify_die("", regs, &info, fsr, 0); 479 } 480 481 asmlinkage void __exception 482 do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) 483 { 484 do_translation_fault(addr, 0, regs); 485 } 486 487