1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * MMU fault handling support. 4 * 5 * Copyright (C) 1998-2002 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 */ 8 #include <linux/sched/signal.h> 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 11 #include <linux/extable.h> 12 #include <linux/interrupt.h> 13 #include <linux/kprobes.h> 14 #include <linux/kdebug.h> 15 #include <linux/prefetch.h> 16 #include <linux/uaccess.h> 17 #include <linux/perf_event.h> 18 19 #include <asm/processor.h> 20 #include <asm/exception.h> 21 22 extern int die(char *, struct pt_regs *, long); 23 24 /* 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment 26 * (inside region 5, on ia64) and that page is present. 27 */ 28 static int 29 mapped_kernel_page_is_present (unsigned long address) 30 { 31 pgd_t *pgd; 32 p4d_t *p4d; 33 pud_t *pud; 34 pmd_t *pmd; 35 pte_t *ptep, pte; 36 37 pgd = pgd_offset_k(address); 38 if (pgd_none(*pgd) || pgd_bad(*pgd)) 39 return 0; 40 41 p4d = p4d_offset(pgd, address); 42 if (p4d_none(*p4d) || p4d_bad(*p4d)) 43 return 0; 44 45 pud = pud_offset(p4d, address); 46 if (pud_none(*pud) || pud_bad(*pud)) 47 return 0; 48 49 pmd = pmd_offset(pud, address); 50 if (pmd_none(*pmd) || pmd_bad(*pmd)) 51 return 0; 52 53 ptep = pte_offset_kernel(pmd, address); 54 if (!ptep) 55 return 0; 56 57 pte = *ptep; 58 return pte_present(pte); 59 } 60 61 # define VM_READ_BIT 0 62 # define VM_WRITE_BIT 1 63 # define VM_EXEC_BIT 2 64 65 void __kprobes 66 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) 67 { 68 int signal = SIGSEGV, code = SEGV_MAPERR; 69 struct vm_area_struct *vma, *prev_vma; 70 struct mm_struct *mm = current->mm; 71 unsigned long mask; 72 vm_fault_t fault; 73 unsigned int flags = FAULT_FLAG_DEFAULT; 74 75 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) 76 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); 77 78 /* mmap_lock is performance critical.... */ 79 prefetchw(&mm->mmap_lock); 80 81 /* 82 * If we're in an interrupt or have no user context, we must not take the fault.. 83 */ 84 if (faulthandler_disabled() || !mm) 85 goto no_context; 86 87 /* 88 * This is to handle the kprobes on user space access instructions 89 */ 90 if (kprobe_page_fault(regs, TRAP_BRKPT)) 91 return; 92 93 if (user_mode(regs)) 94 flags |= FAULT_FLAG_USER; 95 if (mask & VM_WRITE) 96 flags |= FAULT_FLAG_WRITE; 97 98 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 99 retry: 100 mmap_read_lock(mm); 101 102 vma = find_vma_prev(mm, address, &prev_vma); 103 if (!vma && !prev_vma ) 104 goto bad_area; 105 106 /* 107 * find_vma_prev() returns vma such that address < vma->vm_end or NULL 108 * 109 * May find no vma, but could be that the last vm area is the 110 * register backing store that needs to expand upwards, in 111 * this case vma will be null, but prev_vma will ne non-null 112 */ 113 if (( !vma && prev_vma ) || (address < vma->vm_start) ) 114 goto check_expansion; 115 116 good_area: 117 code = SEGV_ACCERR; 118 119 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ 120 121 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ 122 || (1 << VM_EXEC_BIT) != VM_EXEC) 123 # error File is out of sync with <linux/mm.h>. Please update. 124 # endif 125 126 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) 127 goto bad_area; 128 129 if ((vma->vm_flags & mask) != mask) 130 goto bad_area; 131 132 /* 133 * If for any reason at all we couldn't handle the fault, make 134 * sure we exit gracefully rather than endlessly redo the 135 * fault. 136 */ 137 fault = handle_mm_fault(vma, address, flags, regs); 138 139 if (fault_signal_pending(fault, regs)) 140 return; 141 142 /* The fault is fully completed (including releasing mmap lock) */ 143 if (fault & VM_FAULT_COMPLETED) 144 return; 145 146 if (unlikely(fault & VM_FAULT_ERROR)) { 147 /* 148 * We ran out of memory, or some other thing happened 149 * to us that made us unable to handle the page fault 150 * gracefully. 151 */ 152 if (fault & VM_FAULT_OOM) { 153 goto out_of_memory; 154 } else if (fault & VM_FAULT_SIGSEGV) { 155 goto bad_area; 156 } else if (fault & VM_FAULT_SIGBUS) { 157 signal = SIGBUS; 158 goto bad_area; 159 } 160 BUG(); 161 } 162 163 if (fault & VM_FAULT_RETRY) { 164 flags |= FAULT_FLAG_TRIED; 165 166 /* No need to mmap_read_unlock(mm) as we would 167 * have already released it in __lock_page_or_retry 168 * in mm/filemap.c. 169 */ 170 171 goto retry; 172 } 173 174 mmap_read_unlock(mm); 175 return; 176 177 check_expansion: 178 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { 179 if (!vma) 180 goto bad_area; 181 if (!(vma->vm_flags & VM_GROWSDOWN)) 182 goto bad_area; 183 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) 184 || REGION_OFFSET(address) >= RGN_MAP_LIMIT) 185 goto bad_area; 186 if (expand_stack(vma, address)) 187 goto bad_area; 188 } else { 189 vma = prev_vma; 190 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) 191 || REGION_OFFSET(address) >= RGN_MAP_LIMIT) 192 goto bad_area; 193 /* 194 * Since the register backing store is accessed sequentially, 195 * we disallow growing it by more than a page at a time. 196 */ 197 if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) 198 goto bad_area; 199 if (expand_upwards(vma, address)) 200 goto bad_area; 201 } 202 goto good_area; 203 204 bad_area: 205 mmap_read_unlock(mm); 206 if ((isr & IA64_ISR_SP) 207 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) 208 { 209 /* 210 * This fault was due to a speculative load or lfetch.fault, set the "ed" 211 * bit in the psr to ensure forward progress. (Target register will get a 212 * NaT for ld.s, lfetch will be canceled.) 213 */ 214 ia64_psr(regs)->ed = 1; 215 return; 216 } 217 if (user_mode(regs)) { 218 force_sig_fault(signal, code, (void __user *) address, 219 0, __ISR_VALID, isr); 220 return; 221 } 222 223 no_context: 224 if ((isr & IA64_ISR_SP) 225 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) 226 { 227 /* 228 * This fault was due to a speculative load or lfetch.fault, set the "ed" 229 * bit in the psr to ensure forward progress. (Target register will get a 230 * NaT for ld.s, lfetch will be canceled.) 231 */ 232 ia64_psr(regs)->ed = 1; 233 return; 234 } 235 236 /* 237 * Since we have no vma's for region 5, we might get here even if the address is 238 * valid, due to the VHPT walker inserting a non present translation that becomes 239 * stale. If that happens, the non present fault handler already purged the stale 240 * translation, which fixed the problem. So, we check to see if the translation is 241 * valid, and return if it is. 242 */ 243 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) 244 return; 245 246 if (ia64_done_with_exception(regs)) 247 return; 248 249 /* 250 * Oops. The kernel tried to access some bad page. We'll have to terminate things 251 * with extreme prejudice. 252 */ 253 bust_spinlocks(1); 254 255 if (address < PAGE_SIZE) 256 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); 257 else 258 printk(KERN_ALERT "Unable to handle kernel paging request at " 259 "virtual address %016lx\n", address); 260 if (die("Oops", regs, isr)) 261 regs = NULL; 262 bust_spinlocks(0); 263 if (regs) 264 make_task_dead(SIGKILL); 265 return; 266 267 out_of_memory: 268 mmap_read_unlock(mm); 269 if (!user_mode(regs)) 270 goto no_context; 271 pagefault_out_of_memory(); 272 } 273