1 /* 2 * Copyright (C) 2009 Wind River Systems Inc 3 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com 4 * 5 * based on arch/mips/mm/fault.c which is: 6 * 7 * Copyright (C) 1995-2000 Ralf Baechle 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License. See the file "COPYING" in the main directory of this archive 11 * for more details. 12 */ 13 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/types.h> 21 #include <linux/ptrace.h> 22 #include <linux/mman.h> 23 #include <linux/mm.h> 24 #include <linux/module.h> 25 #include <linux/uaccess.h> 26 #include <linux/ptrace.h> 27 28 #include <asm/mmu_context.h> 29 #include <asm/traps.h> 30 31 #define EXC_SUPERV_INSN_ACCESS 9 /* Supervisor only instruction address */ 32 #define EXC_SUPERV_DATA_ACCESS 11 /* Supervisor only data address */ 33 #define EXC_X_PROTECTION_FAULT 13 /* TLB permission violation (x) */ 34 #define EXC_R_PROTECTION_FAULT 14 /* TLB permission violation (r) */ 35 #define EXC_W_PROTECTION_FAULT 15 /* TLB permission violation (w) */ 36 37 /* 38 * This routine handles page faults. It determines the address, 39 * and the problem, and then passes it off to one of the appropriate 40 * routines. 41 */ 42 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, 43 unsigned long address) 44 { 45 struct vm_area_struct *vma = NULL; 46 struct task_struct *tsk = current; 47 struct mm_struct *mm = tsk->mm; 48 int code = SEGV_MAPERR; 49 int fault; 50 unsigned int flags = 0; 51 52 cause >>= 2; 53 54 /* Restart the instruction */ 55 regs->ea -= 4; 56 57 /* 58 * We fault-in kernel-space virtual memory on-demand. The 59 * 'reference' page table is init_mm.pgd. 60 * 61 * NOTE! We MUST NOT take any locks for this case. We may 62 * be in an interrupt or a critical region, and should 63 * only copy the information from the master page table, 64 * nothing more. 65 */ 66 if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) { 67 if (user_mode(regs)) 68 goto bad_area_nosemaphore; 69 else 70 goto vmalloc_fault; 71 } 72 73 if (unlikely(address >= TASK_SIZE)) 74 goto bad_area_nosemaphore; 75 76 /* 77 * If we're in an interrupt or have no user 78 * context, we must not take the fault.. 79 */ 80 if (in_atomic() || !mm) 81 goto bad_area_nosemaphore; 82 83 if (user_mode(regs)) 84 flags |= FAULT_FLAG_USER; 85 86 if (!down_read_trylock(&mm->mmap_sem)) { 87 if (!user_mode(regs) && !search_exception_tables(regs->ea)) 88 goto bad_area_nosemaphore; 89 down_read(&mm->mmap_sem); 90 } 91 92 vma = find_vma(mm, address); 93 if (!vma) 94 goto bad_area; 95 if (vma->vm_start <= address) 96 goto good_area; 97 if (!(vma->vm_flags & VM_GROWSDOWN)) 98 goto bad_area; 99 if (expand_stack(vma, address)) 100 goto bad_area; 101 /* 102 * Ok, we have a good vm_area for this memory access, so 103 * we can handle it.. 104 */ 105 good_area: 106 code = SEGV_ACCERR; 107 108 switch (cause) { 109 case EXC_SUPERV_INSN_ACCESS: 110 goto bad_area; 111 case EXC_SUPERV_DATA_ACCESS: 112 goto bad_area; 113 case EXC_X_PROTECTION_FAULT: 114 if (!(vma->vm_flags & VM_EXEC)) 115 goto bad_area; 116 break; 117 case EXC_R_PROTECTION_FAULT: 118 if (!(vma->vm_flags & VM_READ)) 119 goto bad_area; 120 break; 121 case EXC_W_PROTECTION_FAULT: 122 if (!(vma->vm_flags & VM_WRITE)) 123 goto bad_area; 124 flags = FAULT_FLAG_WRITE; 125 break; 126 } 127 128 survive: 129 /* 130 * If for any reason at all we couldn't handle the fault, 131 * make sure we exit gracefully rather than endlessly redo 132 * the fault. 133 */ 134 fault = handle_mm_fault(mm, vma, address, flags); 135 if (unlikely(fault & VM_FAULT_ERROR)) { 136 if (fault & VM_FAULT_OOM) 137 goto out_of_memory; 138 else if (fault & VM_FAULT_SIGSEGV) 139 goto bad_area; 140 else if (fault & VM_FAULT_SIGBUS) 141 goto do_sigbus; 142 BUG(); 143 } 144 if (fault & VM_FAULT_MAJOR) 145 tsk->maj_flt++; 146 else 147 tsk->min_flt++; 148 149 up_read(&mm->mmap_sem); 150 return; 151 152 /* 153 * Something tried to access memory that isn't in our memory map.. 154 * Fix it, but check if it's kernel or user first.. 155 */ 156 bad_area: 157 up_read(&mm->mmap_sem); 158 159 bad_area_nosemaphore: 160 /* User mode accesses just cause a SIGSEGV */ 161 if (user_mode(regs)) { 162 pr_alert("%s: unhandled page fault (%d) at 0x%08lx, " 163 "cause %ld\n", current->comm, SIGSEGV, address, cause); 164 show_regs(regs); 165 _exception(SIGSEGV, regs, code, address); 166 return; 167 } 168 169 no_context: 170 /* Are we prepared to handle this kernel fault? */ 171 if (fixup_exception(regs)) 172 return; 173 174 /* 175 * Oops. The kernel tried to access some bad page. We'll have to 176 * terminate things with extreme prejudice. 177 */ 178 bust_spinlocks(1); 179 180 pr_alert("Unable to handle kernel %s at virtual address %08lx", 181 address < PAGE_SIZE ? "NULL pointer dereference" : 182 "paging request", address); 183 pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra, 184 cause); 185 panic("Oops"); 186 return; 187 188 /* 189 * We ran out of memory, or some other thing happened to us that made 190 * us unable to handle the page fault gracefully. 191 */ 192 out_of_memory: 193 up_read(&mm->mmap_sem); 194 if (is_global_init(tsk)) { 195 yield(); 196 down_read(&mm->mmap_sem); 197 goto survive; 198 } 199 if (!user_mode(regs)) 200 goto no_context; 201 pagefault_out_of_memory(); 202 return; 203 204 do_sigbus: 205 up_read(&mm->mmap_sem); 206 207 /* Kernel mode? Handle exceptions or die */ 208 if (!user_mode(regs)) 209 goto no_context; 210 211 _exception(SIGBUS, regs, BUS_ADRERR, address); 212 return; 213 214 vmalloc_fault: 215 { 216 /* 217 * Synchronize this task's top level page-table 218 * with the 'reference' page table. 219 * 220 * Do _not_ use "tsk" here. We might be inside 221 * an interrupt in the middle of a task switch.. 222 */ 223 int offset = pgd_index(address); 224 pgd_t *pgd, *pgd_k; 225 pud_t *pud, *pud_k; 226 pmd_t *pmd, *pmd_k; 227 pte_t *pte_k; 228 229 pgd = pgd_current + offset; 230 pgd_k = init_mm.pgd + offset; 231 232 if (!pgd_present(*pgd_k)) 233 goto no_context; 234 set_pgd(pgd, *pgd_k); 235 236 pud = pud_offset(pgd, address); 237 pud_k = pud_offset(pgd_k, address); 238 if (!pud_present(*pud_k)) 239 goto no_context; 240 pmd = pmd_offset(pud, address); 241 pmd_k = pmd_offset(pud_k, address); 242 if (!pmd_present(*pmd_k)) 243 goto no_context; 244 set_pmd(pmd, *pmd_k); 245 246 pte_k = pte_offset_kernel(pmd_k, address); 247 if (!pte_present(*pte_k)) 248 goto no_context; 249 250 flush_tlb_one(address); 251 return; 252 } 253 } 254