1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Page Fault Handling for ARC (TLB Miss / ProtV) 3 * 4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5 */ 6 7 #include <linux/signal.h> 8 #include <linux/interrupt.h> 9 #include <linux/sched/signal.h> 10 #include <linux/errno.h> 11 #include <linux/ptrace.h> 12 #include <linux/uaccess.h> 13 #include <linux/kdebug.h> 14 #include <linux/perf_event.h> 15 #include <linux/mm_types.h> 16 #include <asm/pgalloc.h> 17 #include <asm/mmu.h> 18 19 /* 20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap 21 * Refer to asm/processor.h for System Memory Map 22 * 23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage) 24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared 25 */ 26 noinline static int handle_kernel_vaddr_fault(unsigned long address) 27 { 28 /* 29 * Synchronize this task's top level page-table 30 * with the 'reference' page table. 31 */ 32 pgd_t *pgd, *pgd_k; 33 pud_t *pud, *pud_k; 34 pmd_t *pmd, *pmd_k; 35 36 pgd = pgd_offset_fast(current->active_mm, address); 37 pgd_k = pgd_offset_k(address); 38 39 if (!pgd_present(*pgd_k)) 40 goto bad_area; 41 42 pud = pud_offset(pgd, address); 43 pud_k = pud_offset(pgd_k, address); 44 if (!pud_present(*pud_k)) 45 goto bad_area; 46 47 pmd = pmd_offset(pud, address); 48 pmd_k = pmd_offset(pud_k, address); 49 if (!pmd_present(*pmd_k)) 50 goto bad_area; 51 52 set_pmd(pmd, *pmd_k); 53 54 /* XXX: create the TLB entry here */ 55 return 0; 56 57 bad_area: 58 return 1; 59 } 60 61 void do_page_fault(unsigned long address, struct pt_regs *regs) 62 { 63 struct vm_area_struct *vma = NULL; 64 struct task_struct *tsk = current; 65 struct mm_struct *mm = tsk->mm; 66 int si_code = SEGV_MAPERR; 67 int ret; 68 vm_fault_t fault; 69 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ 70 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 71 72 /* 73 * We fault-in kernel-space virtual memory on-demand. The 74 * 'reference' page table is init_mm.pgd. 75 * 76 * NOTE! We MUST NOT take any locks for this case. We may 77 * be in an interrupt or a critical region, and should 78 * only copy the information from the master page table, 79 * nothing more. 80 */ 81 if (address >= VMALLOC_START && !user_mode(regs)) { 82 ret = handle_kernel_vaddr_fault(address); 83 if (unlikely(ret)) 84 goto no_context; 85 else 86 return; 87 } 88 89 /* 90 * If we're in an interrupt or have no user 91 * context, we must not take the fault.. 92 */ 93 if (faulthandler_disabled() || !mm) 94 goto no_context; 95 96 if (user_mode(regs)) 97 flags |= FAULT_FLAG_USER; 98 retry: 99 down_read(&mm->mmap_sem); 100 vma = find_vma(mm, address); 101 if (!vma) 102 goto bad_area; 103 if (vma->vm_start <= address) 104 goto good_area; 105 if (!(vma->vm_flags & VM_GROWSDOWN)) 106 goto bad_area; 107 if (expand_stack(vma, address)) 108 goto bad_area; 109 110 /* 111 * Ok, we have a good vm_area for this memory access, so 112 * we can handle it.. 113 */ 114 good_area: 115 si_code = SEGV_ACCERR; 116 117 /* Handle protection violation, execute on heap or stack */ 118 119 if ((regs->ecr_vec == ECR_V_PROTV) && 120 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) 121 goto bad_area; 122 123 if (write) { 124 if (!(vma->vm_flags & VM_WRITE)) 125 goto bad_area; 126 flags |= FAULT_FLAG_WRITE; 127 } else { 128 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 129 goto bad_area; 130 } 131 132 /* 133 * If for any reason at all we couldn't handle the fault, 134 * make sure we exit gracefully rather than endlessly redo 135 * the fault. 136 */ 137 fault = handle_mm_fault(vma, address, flags); 138 139 if (fatal_signal_pending(current)) { 140 141 /* 142 * if fault retry, mmap_sem already relinquished by core mm 143 * so OK to return to user mode (with signal handled first) 144 */ 145 if (fault & VM_FAULT_RETRY) { 146 if (!user_mode(regs)) 147 goto no_context; 148 return; 149 } 150 } 151 152 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 153 154 if (likely(!(fault & VM_FAULT_ERROR))) { 155 if (flags & FAULT_FLAG_ALLOW_RETRY) { 156 /* To avoid updating stats twice for retry case */ 157 if (fault & VM_FAULT_MAJOR) { 158 tsk->maj_flt++; 159 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 160 regs, address); 161 } else { 162 tsk->min_flt++; 163 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 164 regs, address); 165 } 166 167 if (fault & VM_FAULT_RETRY) { 168 flags &= ~FAULT_FLAG_ALLOW_RETRY; 169 flags |= FAULT_FLAG_TRIED; 170 goto retry; 171 } 172 } 173 174 /* Fault Handled Gracefully */ 175 up_read(&mm->mmap_sem); 176 return; 177 } 178 179 if (fault & VM_FAULT_OOM) 180 goto out_of_memory; 181 else if (fault & VM_FAULT_SIGSEGV) 182 goto bad_area; 183 else if (fault & VM_FAULT_SIGBUS) 184 goto do_sigbus; 185 186 /* no man's land */ 187 BUG(); 188 189 /* 190 * Something tried to access memory that isn't in our memory map.. 191 * Fix it, but check if it's kernel or user first.. 192 */ 193 bad_area: 194 up_read(&mm->mmap_sem); 195 196 /* User mode accesses just cause a SIGSEGV */ 197 if (user_mode(regs)) { 198 tsk->thread.fault_address = address; 199 force_sig_fault(SIGSEGV, si_code, (void __user *)address); 200 return; 201 } 202 203 no_context: 204 /* Are we prepared to handle this kernel fault? 205 * 206 * (The kernel has valid exception-points in the source 207 * when it accesses user-memory. When it fails in one 208 * of those points, we find it in a table and do a jump 209 * to some fixup code that loads an appropriate error 210 * code) 211 */ 212 if (fixup_exception(regs)) 213 return; 214 215 die("Oops", regs, address); 216 217 out_of_memory: 218 up_read(&mm->mmap_sem); 219 220 if (user_mode(regs)) { 221 pagefault_out_of_memory(); 222 return; 223 } 224 225 goto no_context; 226 227 do_sigbus: 228 up_read(&mm->mmap_sem); 229 230 if (!user_mode(regs)) 231 goto no_context; 232 233 tsk->thread.fault_address = address; 234 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); 235 } 236