1 /* 2 * Copyright (C) 2009 Wind River Systems Inc 3 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com 4 * 5 * based on arch/mips/mm/fault.c which is: 6 * 7 * Copyright (C) 1995-2000 Ralf Baechle 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/types.h> 21 #include <linux/ptrace.h> 22 #include <linux/mman.h> 23 #include <linux/mm.h> 24 #include <linux/module.h> 25 #include <linux/uaccess.h> 26 #include <linux/ptrace.h> 27 28 #include <asm/mmu_context.h> 29 #include <asm/traps.h> 30 31 #define EXC_SUPERV_INSN_ACCESS 9 /* Supervisor only instruction address */ 32 #define EXC_SUPERV_DATA_ACCESS 11 /* Supervisor only data address */ 33 #define EXC_X_PROTECTION_FAULT 13 /* TLB permission violation (x) */ 34 #define EXC_R_PROTECTION_FAULT 14 /* TLB permission violation (r) */ 35 #define EXC_W_PROTECTION_FAULT 15 /* TLB permission violation (w) */ 36 37 /* 38 * This routine handles page faults. It determines the address, 39 * and the problem, and then passes it off to one of the appropriate 40 * routines. 41 */ 42 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, 43 unsigned long address) 44 { 45 struct vm_area_struct *vma = NULL; 46 struct task_struct *tsk = current; 47 struct mm_struct *mm = tsk->mm; 48 int code = SEGV_MAPERR; 49 int fault; 50 unsigned int flags = 0; 51 52 cause >>= 2; 53 54 /* Restart the instruction */ 55 regs->ea -= 4; 56 57 /* 58 * We fault-in kernel-space virtual memory on-demand. The 59 * 'reference' page table is init_mm.pgd. 60 * 61 * NOTE! We MUST NOT take any locks for this case. We may 62 * be in an interrupt or a critical region, and should 63 * only copy the information from the master page table, 64 * nothing more. 65 */ 66 if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) { 67 if (user_mode(regs)) 68 goto bad_area_nosemaphore; 69 else 70 goto vmalloc_fault; 71 } 72 73 if (unlikely(address >= TASK_SIZE)) 74 goto bad_area_nosemaphore; 75 76 /* 77 * If we're in an interrupt or have no user 78 * context, we must not take the fault.. 79 */ 80 if (in_atomic() || !mm) 81 goto bad_area_nosemaphore; 82 83 if (user_mode(regs)) 84 flags |= FAULT_FLAG_USER; 85 86 if (!down_read_trylock(&mm->mmap_sem)) { 87 if (!user_mode(regs) && !search_exception_tables(regs->ea)) 88 goto bad_area_nosemaphore; 89 down_read(&mm->mmap_sem); 90 } 91 92 vma = find_vma(mm, address); 93 if (!vma) 94 goto bad_area; 95 if (vma->vm_start <= address) 96 goto good_area; 97 if (!(vma->vm_flags & VM_GROWSDOWN)) 98 goto bad_area; 99 if (expand_stack(vma, address)) 100 goto bad_area; 101 /* 102 * Ok, we have a good vm_area for this memory access, so 103 * we can handle it.. 104 */ 105 good_area: 106 code = SEGV_ACCERR; 107 108 switch (cause) { 109 case EXC_SUPERV_INSN_ACCESS: 110 goto bad_area; 111 case EXC_SUPERV_DATA_ACCESS: 112 goto bad_area; 113 case EXC_X_PROTECTION_FAULT: 114 if (!(vma->vm_flags & VM_EXEC)) 115 goto bad_area; 116 break; 117 case EXC_R_PROTECTION_FAULT: 118 if (!(vma->vm_flags & VM_READ)) 119 goto bad_area; 120 break; 121 case EXC_W_PROTECTION_FAULT: 122 if (!(vma->vm_flags & VM_WRITE)) 123 goto bad_area; 124 flags = FAULT_FLAG_WRITE; 125 break; 126 } 127 128 survive: 129 /* 130 * If for any reason at all we couldn't handle the fault, 131 * make sure we exit gracefully rather than endlessly redo 132 * the fault. 133 */ 134 fault = handle_mm_fault(mm, vma, address, flags); 135 if (unlikely(fault & VM_FAULT_ERROR)) { 136 if (fault & VM_FAULT_OOM) 137 goto out_of_memory; 138 else if (fault & VM_FAULT_SIGBUS) 139 goto do_sigbus; 140 BUG(); 141 } 142 if (fault & VM_FAULT_MAJOR) 143 tsk->maj_flt++; 144 else 145 tsk->min_flt++; 146 147 up_read(&mm->mmap_sem); 148 return; 149 150 /* 151 * Something tried to access memory that isn't in our memory map.. 152 * Fix it, but check if it's kernel or user first.. 153 */ 154 bad_area: 155 up_read(&mm->mmap_sem); 156 157 bad_area_nosemaphore: 158 /* User mode accesses just cause a SIGSEGV */ 159 if (user_mode(regs)) { 160 pr_alert("%s: unhandled page fault (%d) at 0x%08lx, " 161 "cause %ld\n", current->comm, SIGSEGV, address, cause); 162 show_regs(regs); 163 _exception(SIGSEGV, regs, code, address); 164 return; 165 } 166 167 no_context: 168 /* Are we prepared to handle this kernel fault? */ 169 if (fixup_exception(regs)) 170 return; 171 172 /* 173 * Oops. The kernel tried to access some bad page. We'll have to 174 * terminate things with extreme prejudice. 175 */ 176 bust_spinlocks(1); 177 178 pr_alert("Unable to handle kernel %s at virtual address %08lx", 179 address < PAGE_SIZE ? "NULL pointer dereference" : 180 "paging request", address); 181 pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra, 182 cause); 183 panic("Oops"); 184 return; 185 186 /* 187 * We ran out of memory, or some other thing happened to us that made 188 * us unable to handle the page fault gracefully. 189 */ 190 out_of_memory: 191 up_read(&mm->mmap_sem); 192 if (is_global_init(tsk)) { 193 yield(); 194 down_read(&mm->mmap_sem); 195 goto survive; 196 } 197 if (!user_mode(regs)) 198 goto no_context; 199 pagefault_out_of_memory(); 200 return; 201 202 do_sigbus: 203 up_read(&mm->mmap_sem); 204 205 /* Kernel mode? Handle exceptions or die */ 206 if (!user_mode(regs)) 207 goto no_context; 208 209 _exception(SIGBUS, regs, BUS_ADRERR, address); 210 return; 211 212 vmalloc_fault: 213 { 214 /* 215 * Synchronize this task's top level page-table 216 * with the 'reference' page table. 217 * 218 * Do _not_ use "tsk" here. We might be inside 219 * an interrupt in the middle of a task switch.. 220 */ 221 int offset = pgd_index(address); 222 pgd_t *pgd, *pgd_k; 223 pud_t *pud, *pud_k; 224 pmd_t *pmd, *pmd_k; 225 pte_t *pte_k; 226 227 pgd = pgd_current + offset; 228 pgd_k = init_mm.pgd + offset; 229 230 if (!pgd_present(*pgd_k)) 231 goto no_context; 232 set_pgd(pgd, *pgd_k); 233 234 pud = pud_offset(pgd, address); 235 pud_k = pud_offset(pgd_k, address); 236 if (!pud_present(*pud_k)) 237 goto no_context; 238 pmd = pmd_offset(pud, address); 239 pmd_k = pmd_offset(pud_k, address); 240 if (!pmd_present(*pmd_k)) 241 goto no_context; 242 set_pmd(pmd, *pmd_k); 243 244 pte_k = pte_offset_kernel(pmd_k, address); 245 if (!pte_present(*pte_k)) 246 goto no_context; 247 248 flush_tlb_one(address); 249 return; 250 } 251 } 252