1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MMU fault handling support. 4 * 5 * Copyright (C) 1998-2002 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 */ 8 #include <linux/sched/signal.h> 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 11 #include <linux/extable.h> 12 #include <linux/interrupt.h> 13 #include <linux/kprobes.h> 14 #include <linux/kdebug.h> 15 #include <linux/prefetch.h> 16 #include <linux/uaccess.h> 17 18 #include <asm/pgtable.h> 19 #include <asm/processor.h> 20 #include <asm/exception.h> 21 22 extern int die(char *, struct pt_regs *, long); 23 24 /* 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment 26 * (inside region 5, on ia64) and that page is present. 27 */ 28 static int 29 mapped_kernel_page_is_present (unsigned long address) 30 { 31 pgd_t *pgd; 32 pud_t *pud; 33 pmd_t *pmd; 34 pte_t *ptep, pte; 35 36 pgd = pgd_offset_k(address); 37 if (pgd_none(*pgd) || pgd_bad(*pgd)) 38 return 0; 39 40 pud = pud_offset(pgd, address); 41 if (pud_none(*pud) || pud_bad(*pud)) 42 return 0; 43 44 pmd = pmd_offset(pud, address); 45 if (pmd_none(*pmd) || pmd_bad(*pmd)) 46 return 0; 47 48 ptep = pte_offset_kernel(pmd, address); 49 if (!ptep) 50 return 0; 51 52 pte = *ptep; 53 return pte_present(pte); 54 } 55 56 # define VM_READ_BIT 0 57 # define VM_WRITE_BIT 1 58 # define VM_EXEC_BIT 2 59 60 void __kprobes 61 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) 62 { 63 int signal = SIGSEGV, code = SEGV_MAPERR; 64 struct vm_area_struct *vma, *prev_vma; 65 struct mm_struct *mm = current->mm; 66 unsigned long mask; 67 vm_fault_t fault; 68 unsigned int flags = FAULT_FLAG_DEFAULT; 69 70 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) 71 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); 72 73 /* mmap_sem is performance critical.... */ 74 prefetchw(&mm->mmap_sem); 75 76 /* 77 * If we're in an interrupt or have no user context, we must not take the fault.. 78 */ 79 if (faulthandler_disabled() || !mm) 80 goto no_context; 81 82 #ifdef CONFIG_VIRTUAL_MEM_MAP 83 /* 84 * If fault is in region 5 and we are in the kernel, we may already 85 * have the mmap_sem (pfn_valid macro is called during mmap). There 86 * is no vma for region 5 addr's anyway, so skip getting the semaphore 87 * and go directly to the exception handling code. 88 */ 89 90 if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) 91 goto bad_area_no_up; 92 #endif 93 94 /* 95 * This is to handle the kprobes on user space access instructions 96 */ 97 if (kprobe_page_fault(regs, TRAP_BRKPT)) 98 return; 99 100 if (user_mode(regs)) 101 flags |= FAULT_FLAG_USER; 102 if (mask & VM_WRITE) 103 flags |= FAULT_FLAG_WRITE; 104 retry: 105 down_read(&mm->mmap_sem); 106 107 vma = find_vma_prev(mm, address, &prev_vma); 108 if (!vma && !prev_vma ) 109 goto bad_area; 110 111 /* 112 * find_vma_prev() returns vma such that address < vma->vm_end or NULL 113 * 114 * May find no vma, but could be that the last vm area is the 115 * register backing store that needs to expand upwards, in 116 * this case vma will be null, but prev_vma will ne non-null 117 */ 118 if (( !vma && prev_vma ) || (address < vma->vm_start) ) 119 goto check_expansion; 120 121 good_area: 122 code = SEGV_ACCERR; 123 124 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ 125 126 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ 127 || (1 << VM_EXEC_BIT) != VM_EXEC) 128 # error File is out of sync with <linux/mm.h>. Please update. 129 # endif 130 131 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) 132 goto bad_area; 133 134 if ((vma->vm_flags & mask) != mask) 135 goto bad_area; 136 137 /* 138 * If for any reason at all we couldn't handle the fault, make 139 * sure we exit gracefully rather than endlessly redo the 140 * fault. 141 */ 142 fault = handle_mm_fault(vma, address, flags); 143 144 if (fault_signal_pending(fault, regs)) 145 return; 146 147 if (unlikely(fault & VM_FAULT_ERROR)) { 148 /* 149 * We ran out of memory, or some other thing happened 150 * to us that made us unable to handle the page fault 151 * gracefully. 152 */ 153 if (fault & VM_FAULT_OOM) { 154 goto out_of_memory; 155 } else if (fault & VM_FAULT_SIGSEGV) { 156 goto bad_area; 157 } else if (fault & VM_FAULT_SIGBUS) { 158 signal = SIGBUS; 159 goto bad_area; 160 } 161 BUG(); 162 } 163 164 if (flags & FAULT_FLAG_ALLOW_RETRY) { 165 if (fault & VM_FAULT_MAJOR) 166 current->maj_flt++; 167 else 168 current->min_flt++; 169 if (fault & VM_FAULT_RETRY) { 170 flags |= FAULT_FLAG_TRIED; 171 172 /* No need to up_read(&mm->mmap_sem) as we would 173 * have already released it in __lock_page_or_retry 174 * in mm/filemap.c. 175 */ 176 177 goto retry; 178 } 179 } 180 181 up_read(&mm->mmap_sem); 182 return; 183 184 check_expansion: 185 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { 186 if (!vma) 187 goto bad_area; 188 if (!(vma->vm_flags & VM_GROWSDOWN)) 189 goto bad_area; 190 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) 191 || REGION_OFFSET(address) >= RGN_MAP_LIMIT) 192 goto bad_area; 193 if (expand_stack(vma, address)) 194 goto bad_area; 195 } else { 196 vma = prev_vma; 197 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) 198 || REGION_OFFSET(address) >= RGN_MAP_LIMIT) 199 goto bad_area; 200 /* 201 * Since the register backing store is accessed sequentially, 202 * we disallow growing it by more than a page at a time. 203 */ 204 if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) 205 goto bad_area; 206 if (expand_upwards(vma, address)) 207 goto bad_area; 208 } 209 goto good_area; 210 211 bad_area: 212 up_read(&mm->mmap_sem); 213 #ifdef CONFIG_VIRTUAL_MEM_MAP 214 bad_area_no_up: 215 #endif 216 if ((isr & IA64_ISR_SP) 217 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) 218 { 219 /* 220 * This fault was due to a speculative load or lfetch.fault, set the "ed" 221 * bit in the psr to ensure forward progress. (Target register will get a 222 * NaT for ld.s, lfetch will be canceled.) 223 */ 224 ia64_psr(regs)->ed = 1; 225 return; 226 } 227 if (user_mode(regs)) { 228 force_sig_fault(signal, code, (void __user *) address, 229 0, __ISR_VALID, isr); 230 return; 231 } 232 233 no_context: 234 if ((isr & IA64_ISR_SP) 235 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) 236 { 237 /* 238 * This fault was due to a speculative load or lfetch.fault, set the "ed" 239 * bit in the psr to ensure forward progress. (Target register will get a 240 * NaT for ld.s, lfetch will be canceled.) 241 */ 242 ia64_psr(regs)->ed = 1; 243 return; 244 } 245 246 /* 247 * Since we have no vma's for region 5, we might get here even if the address is 248 * valid, due to the VHPT walker inserting a non present translation that becomes 249 * stale. If that happens, the non present fault handler already purged the stale 250 * translation, which fixed the problem. So, we check to see if the translation is 251 * valid, and return if it is. 252 */ 253 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) 254 return; 255 256 if (ia64_done_with_exception(regs)) 257 return; 258 259 /* 260 * Oops. The kernel tried to access some bad page. We'll have to terminate things 261 * with extreme prejudice. 262 */ 263 bust_spinlocks(1); 264 265 if (address < PAGE_SIZE) 266 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); 267 else 268 printk(KERN_ALERT "Unable to handle kernel paging request at " 269 "virtual address %016lx\n", address); 270 if (die("Oops", regs, isr)) 271 regs = NULL; 272 bust_spinlocks(0); 273 if (regs) 274 do_exit(SIGKILL); 275 return; 276 277 out_of_memory: 278 up_read(&mm->mmap_sem); 279 if (!user_mode(regs)) 280 goto no_context; 281 pagefault_out_of_memory(); 282 } 283