1 // TODO VM_EXEC flag work-around, cache aliasing 2 /* 3 * arch/xtensa/mm/fault.c 4 * 5 * This file is subject to the terms and conditions of the GNU General Public 6 * License. See the file "COPYING" in the main directory of this archive 7 * for more details. 8 * 9 * Copyright (C) 2001 - 2010 Tensilica Inc. 10 * 11 * Chris Zankel <chris@zankel.net> 12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 13 */ 14 15 #include <linux/mm.h> 16 #include <linux/extable.h> 17 #include <linux/hardirq.h> 18 #include <linux/perf_event.h> 19 #include <linux/uaccess.h> 20 #include <asm/mmu_context.h> 21 #include <asm/cacheflush.h> 22 #include <asm/hardirq.h> 23 24 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; 25 void bad_page_fault(struct pt_regs*, unsigned long, int); 26 27 /* 28 * This routine handles page faults. It determines the address, 29 * and the problem, and then passes it off to one of the appropriate 30 * routines. 31 * 32 * Note: does not handle Miss and MultiHit. 33 */ 34 35 void do_page_fault(struct pt_regs *regs) 36 { 37 struct vm_area_struct * vma; 38 struct mm_struct *mm = current->mm; 39 unsigned int exccause = regs->exccause; 40 unsigned int address = regs->excvaddr; 41 int code; 42 43 int is_write, is_exec; 44 vm_fault_t fault; 45 unsigned int flags = FAULT_FLAG_DEFAULT; 46 47 code = SEGV_MAPERR; 48 49 /* We fault-in kernel-space virtual memory on-demand. The 50 * 'reference' page table is init_mm.pgd. 51 */ 52 if (address >= TASK_SIZE && !user_mode(regs)) 53 goto vmalloc_fault; 54 55 /* If we're in an interrupt or have no user 56 * context, we must not take the fault.. 57 */ 58 if (faulthandler_disabled() || !mm) { 59 bad_page_fault(regs, address, SIGSEGV); 60 return; 61 } 62 63 is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0; 64 is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE || 65 exccause == EXCCAUSE_ITLB_MISS || 66 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; 67 68 pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n", 69 current->comm, current->pid, 70 address, exccause, regs->pc, 71 is_write ? "w" : "", is_exec ? "x" : ""); 72 73 if (user_mode(regs)) 74 flags |= FAULT_FLAG_USER; 75 retry: 76 mmap_read_lock(mm); 77 vma = find_vma(mm, address); 78 79 if (!vma) 80 goto bad_area; 81 if (vma->vm_start <= address) 82 goto good_area; 83 if (!(vma->vm_flags & VM_GROWSDOWN)) 84 goto bad_area; 85 if (expand_stack(vma, address)) 86 goto bad_area; 87 88 /* Ok, we have a good vm_area for this memory access, so 89 * we can handle it.. 90 */ 91 92 good_area: 93 code = SEGV_ACCERR; 94 95 if (is_write) { 96 if (!(vma->vm_flags & VM_WRITE)) 97 goto bad_area; 98 flags |= FAULT_FLAG_WRITE; 99 } else if (is_exec) { 100 if (!(vma->vm_flags & VM_EXEC)) 101 goto bad_area; 102 } else /* Allow read even from write-only pages. */ 103 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) 104 goto bad_area; 105 106 /* If for any reason at all we couldn't handle the fault, 107 * make sure we exit gracefully rather than endlessly redo 108 * the fault. 109 */ 110 fault = handle_mm_fault(vma, address, flags); 111 112 if (fault_signal_pending(fault, regs)) 113 return; 114 115 if (unlikely(fault & VM_FAULT_ERROR)) { 116 if (fault & VM_FAULT_OOM) 117 goto out_of_memory; 118 else if (fault & VM_FAULT_SIGSEGV) 119 goto bad_area; 120 else if (fault & VM_FAULT_SIGBUS) 121 goto do_sigbus; 122 BUG(); 123 } 124 if (flags & FAULT_FLAG_ALLOW_RETRY) { 125 if (fault & VM_FAULT_MAJOR) 126 current->maj_flt++; 127 else 128 current->min_flt++; 129 if (fault & VM_FAULT_RETRY) { 130 flags |= FAULT_FLAG_TRIED; 131 132 /* No need to mmap_read_unlock(mm) as we would 133 * have already released it in __lock_page_or_retry 134 * in mm/filemap.c. 135 */ 136 137 goto retry; 138 } 139 } 140 141 mmap_read_unlock(mm); 142 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 143 if (flags & VM_FAULT_MAJOR) 144 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 145 else 146 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 147 148 return; 149 150 /* Something tried to access memory that isn't in our memory map.. 151 * Fix it, but check if it's kernel or user first.. 152 */ 153 bad_area: 154 mmap_read_unlock(mm); 155 if (user_mode(regs)) { 156 current->thread.bad_vaddr = address; 157 current->thread.error_code = is_write; 158 force_sig_fault(SIGSEGV, code, (void *) address); 159 return; 160 } 161 bad_page_fault(regs, address, SIGSEGV); 162 return; 163 164 165 /* We ran out of memory, or some other thing happened to us that made 166 * us unable to handle the page fault gracefully. 167 */ 168 out_of_memory: 169 mmap_read_unlock(mm); 170 if (!user_mode(regs)) 171 bad_page_fault(regs, address, SIGKILL); 172 else 173 pagefault_out_of_memory(); 174 return; 175 176 do_sigbus: 177 mmap_read_unlock(mm); 178 179 /* Send a sigbus, regardless of whether we were in kernel 180 * or user mode. 181 */ 182 current->thread.bad_vaddr = address; 183 force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address); 184 185 /* Kernel mode? Handle exceptions or die */ 186 if (!user_mode(regs)) 187 bad_page_fault(regs, address, SIGBUS); 188 return; 189 190 vmalloc_fault: 191 { 192 /* Synchronize this task's top level page-table 193 * with the 'reference' page table. 194 */ 195 struct mm_struct *act_mm = current->active_mm; 196 int index = pgd_index(address); 197 pgd_t *pgd, *pgd_k; 198 p4d_t *p4d, *p4d_k; 199 pud_t *pud, *pud_k; 200 pmd_t *pmd, *pmd_k; 201 pte_t *pte_k; 202 203 if (act_mm == NULL) 204 goto bad_page_fault; 205 206 pgd = act_mm->pgd + index; 207 pgd_k = init_mm.pgd + index; 208 209 if (!pgd_present(*pgd_k)) 210 goto bad_page_fault; 211 212 pgd_val(*pgd) = pgd_val(*pgd_k); 213 214 p4d = p4d_offset(pgd, address); 215 p4d_k = p4d_offset(pgd_k, address); 216 if (!p4d_present(*p4d) || !p4d_present(*p4d_k)) 217 goto bad_page_fault; 218 219 pud = pud_offset(p4d, address); 220 pud_k = pud_offset(p4d_k, address); 221 if (!pud_present(*pud) || !pud_present(*pud_k)) 222 goto bad_page_fault; 223 224 pmd = pmd_offset(pud, address); 225 pmd_k = pmd_offset(pud_k, address); 226 if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) 227 goto bad_page_fault; 228 229 pmd_val(*pmd) = pmd_val(*pmd_k); 230 pte_k = pte_offset_kernel(pmd_k, address); 231 232 if (!pte_present(*pte_k)) 233 goto bad_page_fault; 234 return; 235 } 236 bad_page_fault: 237 bad_page_fault(regs, address, SIGKILL); 238 return; 239 } 240 241 242 void 243 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 244 { 245 extern void die(const char*, struct pt_regs*, long); 246 const struct exception_table_entry *entry; 247 248 /* Are we prepared to handle this kernel fault? */ 249 if ((entry = search_exception_tables(regs->pc)) != NULL) { 250 pr_debug("%s: Exception at pc=%#010lx (%lx)\n", 251 current->comm, regs->pc, entry->fixup); 252 current->thread.bad_uaddr = address; 253 regs->pc = entry->fixup; 254 return; 255 } 256 257 /* Oops. The kernel tried to access some bad page. We'll have to 258 * terminate things with extreme prejudice. 259 */ 260 pr_alert("Unable to handle kernel paging request at virtual " 261 "address %08lx\n pc = %08lx, ra = %08lx\n", 262 address, regs->pc, regs->areg[0]); 263 die("Oops", regs, sig); 264 do_exit(sig); 265 } 266