1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MMU fault handling support. 4 * 5 * Copyright (C) 1998-2002 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 */ 8 #include <linux/sched/signal.h> 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 11 #include <linux/extable.h> 12 #include <linux/interrupt.h> 13 #include <linux/kprobes.h> 14 #include <linux/kdebug.h> 15 #include <linux/prefetch.h> 16 #include <linux/uaccess.h> 17 18 #include <asm/pgtable.h> 19 #include <asm/processor.h> 20 #include <asm/exception.h> 21 22 extern int die(char *, struct pt_regs *, long); 23 24 #ifdef CONFIG_KPROBES 25 static inline int notify_page_fault(struct pt_regs *regs, int trap) 26 { 27 int ret = 0; 28 29 if (!user_mode(regs)) { 30 /* kprobe_running() needs smp_processor_id() */ 31 preempt_disable(); 32 if (kprobe_running() && kprobe_fault_handler(regs, trap)) 33 ret = 1; 34 preempt_enable(); 35 } 36 37 return ret; 38 } 39 #else 40 static inline int notify_page_fault(struct pt_regs *regs, int trap) 41 { 42 return 0; 43 } 44 #endif 45 46 /* 47 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment 48 * (inside region 5, on ia64) and that page is present. 49 */ 50 static int 51 mapped_kernel_page_is_present (unsigned long address) 52 { 53 pgd_t *pgd; 54 pud_t *pud; 55 pmd_t *pmd; 56 pte_t *ptep, pte; 57 58 pgd = pgd_offset_k(address); 59 if (pgd_none(*pgd) || pgd_bad(*pgd)) 60 return 0; 61 62 pud = pud_offset(pgd, address); 63 if (pud_none(*pud) || pud_bad(*pud)) 64 return 0; 65 66 pmd = pmd_offset(pud, address); 67 if (pmd_none(*pmd) || pmd_bad(*pmd)) 68 return 0; 69 70 ptep = pte_offset_kernel(pmd, address); 71 if (!ptep) 72 return 0; 73 74 pte = *ptep; 75 return pte_present(pte); 76 } 77 78 # define VM_READ_BIT 0 79 # define VM_WRITE_BIT 1 80 # define VM_EXEC_BIT 2 81 82 void __kprobes 83 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) 84 { 85 int signal = SIGSEGV, code = SEGV_MAPERR; 86 struct vm_area_struct *vma, *prev_vma; 87 struct mm_struct *mm = current->mm; 88 unsigned long mask; 89 int fault; 90 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 91 92 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) 93 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); 94 95 /* mmap_sem is performance critical.... */ 96 prefetchw(&mm->mmap_sem); 97 98 /* 99 * If we're in an interrupt or have no user context, we must not take the fault.. 100 */ 101 if (faulthandler_disabled() || !mm) 102 goto no_context; 103 104 #ifdef CONFIG_VIRTUAL_MEM_MAP 105 /* 106 * If fault is in region 5 and we are in the kernel, we may already 107 * have the mmap_sem (pfn_valid macro is called during mmap). There 108 * is no vma for region 5 addr's anyway, so skip getting the semaphore 109 * and go directly to the exception handling code. 110 */ 111 112 if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) 113 goto bad_area_no_up; 114 #endif 115 116 /* 117 * This is to handle the kprobes on user space access instructions 118 */ 119 if (notify_page_fault(regs, TRAP_BRKPT)) 120 return; 121 122 if (user_mode(regs)) 123 flags |= FAULT_FLAG_USER; 124 if (mask & VM_WRITE) 125 flags |= FAULT_FLAG_WRITE; 126 retry: 127 down_read(&mm->mmap_sem); 128 129 vma = find_vma_prev(mm, address, &prev_vma); 130 if (!vma && !prev_vma ) 131 goto bad_area; 132 133 /* 134 * find_vma_prev() returns vma such that address < vma->vm_end or NULL 135 * 136 * May find no vma, but could be that the last vm area is the 137 * register backing store that needs to expand upwards, in 138 * this case vma will be null, but prev_vma will ne non-null 139 */ 140 if (( !vma && prev_vma ) || (address < vma->vm_start) ) 141 goto check_expansion; 142 143 good_area: 144 code = SEGV_ACCERR; 145 146 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ 147 148 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ 149 || (1 << VM_EXEC_BIT) != VM_EXEC) 150 # error File is out of sync with <linux/mm.h>. Please update. 151 # endif 152 153 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) 154 goto bad_area; 155 156 if ((vma->vm_flags & mask) != mask) 157 goto bad_area; 158 159 /* 160 * If for any reason at all we couldn't handle the fault, make 161 * sure we exit gracefully rather than endlessly redo the 162 * fault. 163 */ 164 fault = handle_mm_fault(vma, address, flags); 165 166 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 167 return; 168 169 if (unlikely(fault & VM_FAULT_ERROR)) { 170 /* 171 * We ran out of memory, or some other thing happened 172 * to us that made us unable to handle the page fault 173 * gracefully. 174 */ 175 if (fault & VM_FAULT_OOM) { 176 goto out_of_memory; 177 } else if (fault & VM_FAULT_SIGSEGV) { 178 goto bad_area; 179 } else if (fault & VM_FAULT_SIGBUS) { 180 signal = SIGBUS; 181 goto bad_area; 182 } 183 BUG(); 184 } 185 186 if (flags & FAULT_FLAG_ALLOW_RETRY) { 187 if (fault & VM_FAULT_MAJOR) 188 current->maj_flt++; 189 else 190 current->min_flt++; 191 if (fault & VM_FAULT_RETRY) { 192 flags &= ~FAULT_FLAG_ALLOW_RETRY; 193 flags |= FAULT_FLAG_TRIED; 194 195 /* No need to up_read(&mm->mmap_sem) as we would 196 * have already released it in __lock_page_or_retry 197 * in mm/filemap.c. 198 */ 199 200 goto retry; 201 } 202 } 203 204 up_read(&mm->mmap_sem); 205 return; 206 207 check_expansion: 208 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { 209 if (!vma) 210 goto bad_area; 211 if (!(vma->vm_flags & VM_GROWSDOWN)) 212 goto bad_area; 213 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) 214 || REGION_OFFSET(address) >= RGN_MAP_LIMIT) 215 goto bad_area; 216 if (expand_stack(vma, address)) 217 goto bad_area; 218 } else { 219 vma = prev_vma; 220 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) 221 || REGION_OFFSET(address) >= RGN_MAP_LIMIT) 222 goto bad_area; 223 /* 224 * Since the register backing store is accessed sequentially, 225 * we disallow growing it by more than a page at a time. 226 */ 227 if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) 228 goto bad_area; 229 if (expand_upwards(vma, address)) 230 goto bad_area; 231 } 232 goto good_area; 233 234 bad_area: 235 up_read(&mm->mmap_sem); 236 #ifdef CONFIG_VIRTUAL_MEM_MAP 237 bad_area_no_up: 238 #endif 239 if ((isr & IA64_ISR_SP) 240 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) 241 { 242 /* 243 * This fault was due to a speculative load or lfetch.fault, set the "ed" 244 * bit in the psr to ensure forward progress. (Target register will get a 245 * NaT for ld.s, lfetch will be canceled.) 246 */ 247 ia64_psr(regs)->ed = 1; 248 return; 249 } 250 if (user_mode(regs)) { 251 struct siginfo si; 252 253 clear_siginfo(&si); 254 si.si_signo = signal; 255 si.si_errno = 0; 256 si.si_code = code; 257 si.si_addr = (void __user *) address; 258 si.si_isr = isr; 259 si.si_flags = __ISR_VALID; 260 force_sig_info(signal, &si, current); 261 return; 262 } 263 264 no_context: 265 if ((isr & IA64_ISR_SP) 266 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) 267 { 268 /* 269 * This fault was due to a speculative load or lfetch.fault, set the "ed" 270 * bit in the psr to ensure forward progress. (Target register will get a 271 * NaT for ld.s, lfetch will be canceled.) 272 */ 273 ia64_psr(regs)->ed = 1; 274 return; 275 } 276 277 /* 278 * Since we have no vma's for region 5, we might get here even if the address is 279 * valid, due to the VHPT walker inserting a non present translation that becomes 280 * stale. If that happens, the non present fault handler already purged the stale 281 * translation, which fixed the problem. So, we check to see if the translation is 282 * valid, and return if it is. 283 */ 284 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) 285 return; 286 287 if (ia64_done_with_exception(regs)) 288 return; 289 290 /* 291 * Oops. The kernel tried to access some bad page. We'll have to terminate things 292 * with extreme prejudice. 293 */ 294 bust_spinlocks(1); 295 296 if (address < PAGE_SIZE) 297 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); 298 else 299 printk(KERN_ALERT "Unable to handle kernel paging request at " 300 "virtual address %016lx\n", address); 301 if (die("Oops", regs, isr)) 302 regs = NULL; 303 bust_spinlocks(0); 304 if (regs) 305 do_exit(SIGKILL); 306 return; 307 308 out_of_memory: 309 up_read(&mm->mmap_sem); 310 if (!user_mode(regs)) 311 goto no_context; 312 pagefault_out_of_memory(); 313 } 314