1 /* 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <linux/hardirq.h> 9 #include <linux/module.h> 10 #include <asm/current.h> 11 #include <asm/pgtable.h> 12 #include <asm/tlbflush.h> 13 #include "arch.h" 14 #include "as-layout.h" 15 #include "kern_util.h" 16 #include "os.h" 17 #include "skas.h" 18 19 /* 20 * Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by 21 * segv(). 22 */ 23 int handle_page_fault(unsigned long address, unsigned long ip, 24 int is_write, int is_user, int *code_out) 25 { 26 struct mm_struct *mm = current->mm; 27 struct vm_area_struct *vma; 28 pgd_t *pgd; 29 pud_t *pud; 30 pmd_t *pmd; 31 pte_t *pte; 32 int err = -EFAULT; 33 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 34 (is_write ? FAULT_FLAG_WRITE : 0); 35 36 *code_out = SEGV_MAPERR; 37 38 /* 39 * If the fault was during atomic operation, don't take the fault, just 40 * fail. 41 */ 42 if (in_atomic()) 43 goto out_nosemaphore; 44 45 retry: 46 down_read(&mm->mmap_sem); 47 vma = find_vma(mm, address); 48 if (!vma) 49 goto out; 50 else if (vma->vm_start <= address) 51 goto good_area; 52 else if (!(vma->vm_flags & VM_GROWSDOWN)) 53 goto out; 54 else if (is_user && !ARCH_IS_STACKGROW(address)) 55 goto out; 56 else if (expand_stack(vma, address)) 57 goto out; 58 59 good_area: 60 *code_out = SEGV_ACCERR; 61 if (is_write && !(vma->vm_flags & VM_WRITE)) 62 goto out; 63 64 /* Don't require VM_READ|VM_EXEC for write faults! */ 65 if (!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC))) 66 goto out; 67 68 do { 69 int fault; 70 71 fault = handle_mm_fault(mm, vma, address, flags); 72 73 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 74 goto out_nosemaphore; 75 76 if (unlikely(fault & VM_FAULT_ERROR)) { 77 if (fault & VM_FAULT_OOM) { 78 goto out_of_memory; 79 } else if (fault & VM_FAULT_SIGBUS) { 80 err = -EACCES; 81 goto out; 82 } 83 BUG(); 84 } 85 if (flags & FAULT_FLAG_ALLOW_RETRY) { 86 if (fault & VM_FAULT_MAJOR) 87 current->maj_flt++; 88 else 89 current->min_flt++; 90 if (fault & VM_FAULT_RETRY) { 91 flags &= ~FAULT_FLAG_ALLOW_RETRY; 92 93 goto retry; 94 } 95 } 96 97 pgd = pgd_offset(mm, address); 98 pud = pud_offset(pgd, address); 99 pmd = pmd_offset(pud, address); 100 pte = pte_offset_kernel(pmd, address); 101 } while (!pte_present(*pte)); 102 err = 0; 103 /* 104 * The below warning was added in place of 105 * pte_mkyoung(); if (is_write) pte_mkdirty(); 106 * If it's triggered, we'd see normally a hang here (a clean pte is 107 * marked read-only to emulate the dirty bit). 108 * However, the generic code can mark a PTE writable but clean on a 109 * concurrent read fault, triggering this harmlessly. So comment it out. 110 */ 111 #if 0 112 WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte))); 113 #endif 114 flush_tlb_page(vma, address); 115 out: 116 up_read(&mm->mmap_sem); 117 out_nosemaphore: 118 return err; 119 120 out_of_memory: 121 /* 122 * We ran out of memory, call the OOM killer, and return the userspace 123 * (which will retry the fault, or kill us if we got oom-killed). 124 */ 125 up_read(&mm->mmap_sem); 126 pagefault_out_of_memory(); 127 return 0; 128 } 129 EXPORT_SYMBOL(handle_page_fault); 130 131 static void show_segv_info(struct uml_pt_regs *regs) 132 { 133 struct task_struct *tsk = current; 134 struct faultinfo *fi = UPT_FAULTINFO(regs); 135 136 if (!unhandled_signal(tsk, SIGSEGV)) 137 return; 138 139 if (!printk_ratelimit()) 140 return; 141 142 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x", 143 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 144 tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi), 145 (void *)UPT_IP(regs), (void *)UPT_SP(regs), 146 fi->error_code); 147 148 print_vma_addr(KERN_CONT " in ", UPT_IP(regs)); 149 printk(KERN_CONT "\n"); 150 } 151 152 static void bad_segv(struct faultinfo fi, unsigned long ip) 153 { 154 struct siginfo si; 155 156 si.si_signo = SIGSEGV; 157 si.si_code = SEGV_ACCERR; 158 si.si_addr = (void __user *) FAULT_ADDRESS(fi); 159 current->thread.arch.faultinfo = fi; 160 force_sig_info(SIGSEGV, &si, current); 161 } 162 163 void fatal_sigsegv(void) 164 { 165 force_sigsegv(SIGSEGV, current); 166 do_signal(); 167 /* 168 * This is to tell gcc that we're not returning - do_signal 169 * can, in general, return, but in this case, it's not, since 170 * we just got a fatal SIGSEGV queued. 171 */ 172 os_dump_core(); 173 } 174 175 void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 176 { 177 struct faultinfo * fi = UPT_FAULTINFO(regs); 178 179 if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) { 180 show_segv_info(regs); 181 bad_segv(*fi, UPT_IP(regs)); 182 return; 183 } 184 segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs); 185 } 186 187 /* 188 * We give a *copy* of the faultinfo in the regs to segv. 189 * This must be done, since nesting SEGVs could overwrite 190 * the info in the regs. A pointer to the info then would 191 * give us bad data! 192 */ 193 unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, 194 struct uml_pt_regs *regs) 195 { 196 struct siginfo si; 197 jmp_buf *catcher; 198 int err; 199 int is_write = FAULT_WRITE(fi); 200 unsigned long address = FAULT_ADDRESS(fi); 201 202 if (!is_user && (address >= start_vm) && (address < end_vm)) { 203 flush_tlb_kernel_vm(); 204 return 0; 205 } 206 else if (current->mm == NULL) { 207 show_regs(container_of(regs, struct pt_regs, regs)); 208 panic("Segfault with no mm"); 209 } 210 211 if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi)) 212 err = handle_page_fault(address, ip, is_write, is_user, 213 &si.si_code); 214 else { 215 err = -EFAULT; 216 /* 217 * A thread accessed NULL, we get a fault, but CR2 is invalid. 218 * This code is used in __do_copy_from_user() of TT mode. 219 * XXX tt mode is gone, so maybe this isn't needed any more 220 */ 221 address = 0; 222 } 223 224 catcher = current->thread.fault_catcher; 225 if (!err) 226 return 0; 227 else if (catcher != NULL) { 228 current->thread.fault_addr = (void *) address; 229 UML_LONGJMP(catcher, 1); 230 } 231 else if (current->thread.fault_addr != NULL) 232 panic("fault_addr set but no fault catcher"); 233 else if (!is_user && arch_fixup(ip, regs)) 234 return 0; 235 236 if (!is_user) { 237 show_regs(container_of(regs, struct pt_regs, regs)); 238 panic("Kernel mode fault at addr 0x%lx, ip 0x%lx", 239 address, ip); 240 } 241 242 show_segv_info(regs); 243 244 if (err == -EACCES) { 245 si.si_signo = SIGBUS; 246 si.si_errno = 0; 247 si.si_code = BUS_ADRERR; 248 si.si_addr = (void __user *)address; 249 current->thread.arch.faultinfo = fi; 250 force_sig_info(SIGBUS, &si, current); 251 } else { 252 BUG_ON(err != -EFAULT); 253 si.si_signo = SIGSEGV; 254 si.si_addr = (void __user *) address; 255 current->thread.arch.faultinfo = fi; 256 force_sig_info(SIGSEGV, &si, current); 257 } 258 return 0; 259 } 260 261 void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs) 262 { 263 struct faultinfo *fi; 264 struct siginfo clean_si; 265 266 if (!UPT_IS_USER(regs)) { 267 if (sig == SIGBUS) 268 printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " 269 "mount likely just ran out of space\n"); 270 panic("Kernel mode signal %d", sig); 271 } 272 273 arch_examine_signal(sig, regs); 274 275 memset(&clean_si, 0, sizeof(clean_si)); 276 clean_si.si_signo = si->si_signo; 277 clean_si.si_errno = si->si_errno; 278 clean_si.si_code = si->si_code; 279 switch (sig) { 280 case SIGILL: 281 case SIGFPE: 282 case SIGSEGV: 283 case SIGBUS: 284 case SIGTRAP: 285 fi = UPT_FAULTINFO(regs); 286 clean_si.si_addr = (void __user *) FAULT_ADDRESS(*fi); 287 current->thread.arch.faultinfo = *fi; 288 #ifdef __ARCH_SI_TRAPNO 289 clean_si.si_trapno = si->si_trapno; 290 #endif 291 break; 292 default: 293 printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d)\n", 294 sig, si->si_code); 295 } 296 297 force_sig_info(sig, &clean_si, current); 298 } 299 300 void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs) 301 { 302 if (current->thread.fault_catcher != NULL) 303 UML_LONGJMP(current->thread.fault_catcher, 1); 304 else 305 relay_signal(sig, si, regs); 306 } 307 308 void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) 309 { 310 do_IRQ(WINCH_IRQ, regs); 311 } 312 313 void trap_init(void) 314 { 315 } 316