1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/m68k/mm/fault.c 4 * 5 * Copyright (C) 1995 Hamish Macdonald 6 */ 7 8 #include <linux/mman.h> 9 #include <linux/mm.h> 10 #include <linux/kernel.h> 11 #include <linux/ptrace.h> 12 #include <linux/interrupt.h> 13 #include <linux/module.h> 14 #include <linux/uaccess.h> 15 16 #include <asm/setup.h> 17 #include <asm/traps.h> 18 19 extern void die_if_kernel(char *, struct pt_regs *, long); 20 21 int send_fault_sig(struct pt_regs *regs) 22 { 23 int signo, si_code; 24 void __user *addr; 25 26 signo = current->thread.signo; 27 si_code = current->thread.code; 28 addr = (void __user *)current->thread.faddr; 29 pr_debug("send_fault_sig: %p,%d,%d\n", addr, signo, si_code); 30 31 if (user_mode(regs)) { 32 force_sig_fault(signo, si_code, addr); 33 } else { 34 if (fixup_exception(regs)) 35 return -1; 36 37 //if (signo == SIGBUS) 38 // force_sig_fault(si_signo, si_code, addr); 39 40 /* 41 * Oops. The kernel tried to access some bad page. We'll have to 42 * terminate things with extreme prejudice. 43 */ 44 if ((unsigned long)addr < PAGE_SIZE) 45 pr_alert("Unable to handle kernel NULL pointer dereference"); 46 else 47 pr_alert("Unable to handle kernel access"); 48 pr_cont(" at virtual address %p\n", addr); 49 die_if_kernel("Oops", regs, 0 /*error_code*/); 50 do_exit(SIGKILL); 51 } 52 53 return 1; 54 } 55 56 /* 57 * This routine handles page faults. It determines the problem, and 58 * then passes it off to one of the appropriate routines. 59 * 60 * error_code: 61 * bit 0 == 0 means no page found, 1 means protection fault 62 * bit 1 == 0 means read, 1 means write 63 * 64 * If this routine detects a bad access, it returns 1, otherwise it 65 * returns 0. 66 */ 67 int do_page_fault(struct pt_regs *regs, unsigned long address, 68 unsigned long error_code) 69 { 70 struct mm_struct *mm = current->mm; 71 struct vm_area_struct * vma; 72 vm_fault_t fault; 73 unsigned int flags = FAULT_FLAG_DEFAULT; 74 75 pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", 76 regs->sr, regs->pc, address, error_code, mm ? mm->pgd : NULL); 77 78 /* 79 * If we're in an interrupt or have no user 80 * context, we must not take the fault.. 81 */ 82 if (faulthandler_disabled() || !mm) 83 goto no_context; 84 85 if (user_mode(regs)) 86 flags |= FAULT_FLAG_USER; 87 retry: 88 mmap_read_lock(mm); 89 90 vma = find_vma(mm, address); 91 if (!vma) 92 goto map_err; 93 if (vma->vm_flags & VM_IO) 94 goto acc_err; 95 if (vma->vm_start <= address) 96 goto good_area; 97 if (!(vma->vm_flags & VM_GROWSDOWN)) 98 goto map_err; 99 if (user_mode(regs)) { 100 /* Accessing the stack below usp is always a bug. The 101 "+ 256" is there due to some instructions doing 102 pre-decrement on the stack and that doesn't show up 103 until later. */ 104 if (address + 256 < rdusp()) 105 goto map_err; 106 } 107 if (expand_stack(vma, address)) 108 goto map_err; 109 110 /* 111 * Ok, we have a good vm_area for this memory access, so 112 * we can handle it.. 113 */ 114 good_area: 115 pr_debug("do_page_fault: good_area\n"); 116 switch (error_code & 3) { 117 default: /* 3: write, present */ 118 /* fall through */ 119 case 2: /* write, not present */ 120 if (!(vma->vm_flags & VM_WRITE)) 121 goto acc_err; 122 flags |= FAULT_FLAG_WRITE; 123 break; 124 case 1: /* read, present */ 125 goto acc_err; 126 case 0: /* read, not present */ 127 if (unlikely(!vma_is_accessible(vma))) 128 goto acc_err; 129 } 130 131 /* 132 * If for any reason at all we couldn't handle the fault, 133 * make sure we exit gracefully rather than endlessly redo 134 * the fault. 135 */ 136 137 fault = handle_mm_fault(vma, address, flags); 138 pr_debug("handle_mm_fault returns %x\n", fault); 139 140 if (fault_signal_pending(fault, regs)) 141 return 0; 142 143 if (unlikely(fault & VM_FAULT_ERROR)) { 144 if (fault & VM_FAULT_OOM) 145 goto out_of_memory; 146 else if (fault & VM_FAULT_SIGSEGV) 147 goto map_err; 148 else if (fault & VM_FAULT_SIGBUS) 149 goto bus_err; 150 BUG(); 151 } 152 153 /* 154 * Major/minor page fault accounting is only done on the 155 * initial attempt. If we go through a retry, it is extremely 156 * likely that the page will be found in page cache at that point. 157 */ 158 if (flags & FAULT_FLAG_ALLOW_RETRY) { 159 if (fault & VM_FAULT_MAJOR) 160 current->maj_flt++; 161 else 162 current->min_flt++; 163 if (fault & VM_FAULT_RETRY) { 164 flags |= FAULT_FLAG_TRIED; 165 166 /* 167 * No need to mmap_read_unlock(mm) as we would 168 * have already released it in __lock_page_or_retry 169 * in mm/filemap.c. 170 */ 171 172 goto retry; 173 } 174 } 175 176 mmap_read_unlock(mm); 177 return 0; 178 179 /* 180 * We ran out of memory, or some other thing happened to us that made 181 * us unable to handle the page fault gracefully. 182 */ 183 out_of_memory: 184 mmap_read_unlock(mm); 185 if (!user_mode(regs)) 186 goto no_context; 187 pagefault_out_of_memory(); 188 return 0; 189 190 no_context: 191 current->thread.signo = SIGBUS; 192 current->thread.faddr = address; 193 return send_fault_sig(regs); 194 195 bus_err: 196 current->thread.signo = SIGBUS; 197 current->thread.code = BUS_ADRERR; 198 current->thread.faddr = address; 199 goto send_sig; 200 201 map_err: 202 current->thread.signo = SIGSEGV; 203 current->thread.code = SEGV_MAPERR; 204 current->thread.faddr = address; 205 goto send_sig; 206 207 acc_err: 208 current->thread.signo = SIGSEGV; 209 current->thread.code = SEGV_ACCERR; 210 current->thread.faddr = address; 211 212 send_sig: 213 mmap_read_unlock(mm); 214 return send_fault_sig(regs); 215 } 216