1 /* 2 * Memory fault handling for Hexagon 3 * 4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 and 8 * only version 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 * 02110-1301, USA. 19 */ 20 21 /* 22 * Page fault handling for the Hexagon Virtual Machine. 23 * Can also be called by a native port emulating the HVM 24 * execptions. 25 */ 26 27 #include <asm/pgtable.h> 28 #include <asm/traps.h> 29 #include <asm/uaccess.h> 30 #include <linux/mm.h> 31 #include <linux/signal.h> 32 #include <linux/module.h> 33 #include <linux/hardirq.h> 34 35 /* 36 * Decode of hardware exception sends us to one of several 37 * entry points. At each, we generate canonical arguments 38 * for handling by the abstract memory management code. 39 */ 40 #define FLT_IFETCH -1 41 #define FLT_LOAD 0 42 #define FLT_STORE 1 43 44 45 /* 46 * Canonical page fault handler 47 */ 48 void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) 49 { 50 struct vm_area_struct *vma; 51 struct mm_struct *mm = current->mm; 52 siginfo_t info; 53 int si_code = SEGV_MAPERR; 54 int fault; 55 const struct exception_table_entry *fixup; 56 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 57 58 /* 59 * If we're in an interrupt or have no user context, 60 * then must not take the fault. 61 */ 62 if (unlikely(in_interrupt() || !mm)) 63 goto no_context; 64 65 local_irq_enable(); 66 67 if (user_mode(regs)) 68 flags |= FAULT_FLAG_USER; 69 retry: 70 down_read(&mm->mmap_sem); 71 vma = find_vma(mm, address); 72 if (!vma) 73 goto bad_area; 74 75 if (vma->vm_start <= address) 76 goto good_area; 77 78 if (!(vma->vm_flags & VM_GROWSDOWN)) 79 goto bad_area; 80 81 if (expand_stack(vma, address)) 82 goto bad_area; 83 84 good_area: 85 /* Address space is OK. Now check access rights. */ 86 si_code = SEGV_ACCERR; 87 88 switch (cause) { 89 case FLT_IFETCH: 90 if (!(vma->vm_flags & VM_EXEC)) 91 goto bad_area; 92 break; 93 case FLT_LOAD: 94 if (!(vma->vm_flags & VM_READ)) 95 goto bad_area; 96 break; 97 case FLT_STORE: 98 if (!(vma->vm_flags & VM_WRITE)) 99 goto bad_area; 100 flags |= FAULT_FLAG_WRITE; 101 break; 102 } 103 104 fault = handle_mm_fault(vma, address, flags); 105 106 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 107 return; 108 109 /* The most common case -- we are done. */ 110 if (likely(!(fault & VM_FAULT_ERROR))) { 111 if (flags & FAULT_FLAG_ALLOW_RETRY) { 112 if (fault & VM_FAULT_MAJOR) 113 current->maj_flt++; 114 else 115 current->min_flt++; 116 if (fault & VM_FAULT_RETRY) { 117 flags &= ~FAULT_FLAG_ALLOW_RETRY; 118 flags |= FAULT_FLAG_TRIED; 119 goto retry; 120 } 121 } 122 123 up_read(&mm->mmap_sem); 124 return; 125 } 126 127 up_read(&mm->mmap_sem); 128 129 /* Handle copyin/out exception cases */ 130 if (!user_mode(regs)) 131 goto no_context; 132 133 if (fault & VM_FAULT_OOM) { 134 pagefault_out_of_memory(); 135 return; 136 } 137 138 /* User-mode address is in the memory map, but we are 139 * unable to fix up the page fault. 140 */ 141 if (fault & VM_FAULT_SIGBUS) { 142 info.si_signo = SIGBUS; 143 info.si_code = BUS_ADRERR; 144 } 145 /* Address is not in the memory map */ 146 else { 147 info.si_signo = SIGSEGV; 148 info.si_code = SEGV_ACCERR; 149 } 150 info.si_errno = 0; 151 info.si_addr = (void __user *)address; 152 force_sig_info(info.si_signo, &info, current); 153 return; 154 155 bad_area: 156 up_read(&mm->mmap_sem); 157 158 if (user_mode(regs)) { 159 info.si_signo = SIGSEGV; 160 info.si_errno = 0; 161 info.si_code = si_code; 162 info.si_addr = (void *)address; 163 force_sig_info(info.si_signo, &info, current); 164 return; 165 } 166 /* Kernel-mode fault falls through */ 167 168 no_context: 169 fixup = search_exception_tables(pt_elr(regs)); 170 if (fixup) { 171 pt_set_elr(regs, fixup->fixup); 172 return; 173 } 174 175 /* Things are looking very, very bad now */ 176 bust_spinlocks(1); 177 printk(KERN_EMERG "Unable to handle kernel paging request at " 178 "virtual address 0x%08lx, regs %p\n", address, regs); 179 die("Bad Kernel VA", regs, SIGKILL); 180 } 181 182 183 void read_protection_fault(struct pt_regs *regs) 184 { 185 unsigned long badvadr = pt_badva(regs); 186 187 do_page_fault(badvadr, FLT_LOAD, regs); 188 } 189 190 void write_protection_fault(struct pt_regs *regs) 191 { 192 unsigned long badvadr = pt_badva(regs); 193 194 do_page_fault(badvadr, FLT_STORE, regs); 195 } 196 197 void execute_protection_fault(struct pt_regs *regs) 198 { 199 unsigned long badvadr = pt_badva(regs); 200 201 do_page_fault(badvadr, FLT_IFETCH, regs); 202 } 203