1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Page Fault Handling for ARC (TLB Miss / ProtV) 3 * 4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5 */ 6 7 #include <linux/signal.h> 8 #include <linux/interrupt.h> 9 #include <linux/sched/signal.h> 10 #include <linux/errno.h> 11 #include <linux/ptrace.h> 12 #include <linux/uaccess.h> 13 #include <linux/kdebug.h> 14 #include <linux/perf_event.h> 15 #include <linux/mm_types.h> 16 #include <asm/pgalloc.h> 17 #include <asm/mmu.h> 18 19 /* 20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap 21 * Refer to asm/processor.h for System Memory Map 22 * 23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage) 24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared 25 */ 26 noinline static int handle_kernel_vaddr_fault(unsigned long address) 27 { 28 /* 29 * Synchronize this task's top level page-table 30 * with the 'reference' page table. 31 */ 32 pgd_t *pgd, *pgd_k; 33 pud_t *pud, *pud_k; 34 pmd_t *pmd, *pmd_k; 35 36 pgd = pgd_offset_fast(current->active_mm, address); 37 pgd_k = pgd_offset_k(address); 38 39 if (!pgd_present(*pgd_k)) 40 goto bad_area; 41 42 pud = pud_offset(pgd, address); 43 pud_k = pud_offset(pgd_k, address); 44 if (!pud_present(*pud_k)) 45 goto bad_area; 46 47 pmd = pmd_offset(pud, address); 48 pmd_k = pmd_offset(pud_k, address); 49 if (!pmd_present(*pmd_k)) 50 goto bad_area; 51 52 set_pmd(pmd, *pmd_k); 53 54 /* XXX: create the TLB entry here */ 55 return 0; 56 57 bad_area: 58 return 1; 59 } 60 61 void do_page_fault(unsigned long address, struct pt_regs *regs) 62 { 63 struct vm_area_struct *vma = NULL; 64 struct task_struct *tsk = current; 65 struct mm_struct *mm = tsk->mm; 66 int si_code = SEGV_MAPERR; 67 int ret; 68 vm_fault_t fault; 69 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ 70 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 71 72 /* 73 * We fault-in kernel-space virtual memory on-demand. The 74 * 'reference' page table is init_mm.pgd. 75 * 76 * NOTE! We MUST NOT take any locks for this case. We may 77 * be in an interrupt or a critical region, and should 78 * only copy the information from the master page table, 79 * nothing more. 80 */ 81 if (address >= VMALLOC_START && !user_mode(regs)) { 82 ret = handle_kernel_vaddr_fault(address); 83 if (unlikely(ret)) 84 goto no_context; 85 else 86 return; 87 } 88 89 /* 90 * If we're in an interrupt or have no user 91 * context, we must not take the fault.. 92 */ 93 if (faulthandler_disabled() || !mm) 94 goto no_context; 95 96 if (user_mode(regs)) 97 flags |= FAULT_FLAG_USER; 98 retry: 99 down_read(&mm->mmap_sem); 100 101 vma = find_vma(mm, address); 102 if (!vma) 103 goto bad_area; 104 if (unlikely(address < vma->vm_start)) { 105 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) 106 goto bad_area; 107 } 108 109 /* 110 * Ok, we have a good vm_area for this memory access, so 111 * we can handle it.. 112 */ 113 si_code = SEGV_ACCERR; 114 115 /* Handle protection violation, execute on heap or stack */ 116 117 if ((regs->ecr_vec == ECR_V_PROTV) && 118 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) 119 goto bad_area; 120 121 if (write) { 122 if (!(vma->vm_flags & VM_WRITE)) 123 goto bad_area; 124 flags |= FAULT_FLAG_WRITE; 125 } else { 126 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 127 goto bad_area; 128 } 129 130 /* 131 * If for any reason at all we couldn't handle the fault, 132 * make sure we exit gracefully rather than endlessly redo 133 * the fault. 134 */ 135 fault = handle_mm_fault(vma, address, flags); 136 137 if (fatal_signal_pending(current)) { 138 139 /* 140 * if fault retry, mmap_sem already relinquished by core mm 141 * so OK to return to user mode (with signal handled first) 142 */ 143 if (fault & VM_FAULT_RETRY) { 144 if (!user_mode(regs)) 145 goto no_context; 146 return; 147 } 148 } 149 150 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 151 152 if (likely(!(fault & VM_FAULT_ERROR))) { 153 if (flags & FAULT_FLAG_ALLOW_RETRY) { 154 /* To avoid updating stats twice for retry case */ 155 if (fault & VM_FAULT_MAJOR) { 156 tsk->maj_flt++; 157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 158 regs, address); 159 } else { 160 tsk->min_flt++; 161 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 162 regs, address); 163 } 164 165 if (fault & VM_FAULT_RETRY) { 166 flags &= ~FAULT_FLAG_ALLOW_RETRY; 167 flags |= FAULT_FLAG_TRIED; 168 goto retry; 169 } 170 } 171 172 /* Fault Handled Gracefully */ 173 up_read(&mm->mmap_sem); 174 return; 175 } 176 177 if (fault & VM_FAULT_OOM) 178 goto out_of_memory; 179 else if (fault & VM_FAULT_SIGSEGV) 180 goto bad_area; 181 else if (fault & VM_FAULT_SIGBUS) 182 goto do_sigbus; 183 184 /* no man's land */ 185 BUG(); 186 187 /* 188 * Something tried to access memory that isn't in our memory map.. 189 * Fix it, but check if it's kernel or user first.. 190 */ 191 bad_area: 192 up_read(&mm->mmap_sem); 193 194 /* User mode accesses just cause a SIGSEGV */ 195 if (user_mode(regs)) { 196 tsk->thread.fault_address = address; 197 force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); 198 return; 199 } 200 201 no_context: 202 /* Are we prepared to handle this kernel fault? 203 * 204 * (The kernel has valid exception-points in the source 205 * when it accesses user-memory. When it fails in one 206 * of those points, we find it in a table and do a jump 207 * to some fixup code that loads an appropriate error 208 * code) 209 */ 210 if (fixup_exception(regs)) 211 return; 212 213 die("Oops", regs, address); 214 215 out_of_memory: 216 up_read(&mm->mmap_sem); 217 218 if (user_mode(regs)) { 219 pagefault_out_of_memory(); 220 return; 221 } 222 223 goto no_context; 224 225 do_sigbus: 226 up_read(&mm->mmap_sem); 227 228 if (!user_mode(regs)) 229 goto no_context; 230 231 tsk->thread.fault_address = address; 232 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); 233 } 234