1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Page Fault Handling for ARC (TLB Miss / ProtV) 3 * 4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5 */ 6 7 #include <linux/signal.h> 8 #include <linux/interrupt.h> 9 #include <linux/sched/signal.h> 10 #include <linux/errno.h> 11 #include <linux/ptrace.h> 12 #include <linux/uaccess.h> 13 #include <linux/kdebug.h> 14 #include <linux/perf_event.h> 15 #include <linux/mm_types.h> 16 #include <asm/pgalloc.h> 17 #include <asm/mmu.h> 18 19 /* 20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap 21 * Refer to asm/processor.h for System Memory Map 22 * 23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage) 24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared 25 */ 26 noinline static int handle_kernel_vaddr_fault(unsigned long address) 27 { 28 /* 29 * Synchronize this task's top level page-table 30 * with the 'reference' page table. 31 */ 32 pgd_t *pgd, *pgd_k; 33 pud_t *pud, *pud_k; 34 pmd_t *pmd, *pmd_k; 35 36 pgd = pgd_offset_fast(current->active_mm, address); 37 pgd_k = pgd_offset_k(address); 38 39 if (!pgd_present(*pgd_k)) 40 goto bad_area; 41 42 pud = pud_offset(pgd, address); 43 pud_k = pud_offset(pgd_k, address); 44 if (!pud_present(*pud_k)) 45 goto bad_area; 46 47 pmd = pmd_offset(pud, address); 48 pmd_k = pmd_offset(pud_k, address); 49 if (!pmd_present(*pmd_k)) 50 goto bad_area; 51 52 set_pmd(pmd, *pmd_k); 53 54 /* XXX: create the TLB entry here */ 55 return 0; 56 57 bad_area: 58 return 1; 59 } 60 61 void do_page_fault(unsigned long address, struct pt_regs *regs) 62 { 63 struct vm_area_struct *vma = NULL; 64 struct task_struct *tsk = current; 65 struct mm_struct *mm = tsk->mm; 66 int si_code = SEGV_MAPERR; 67 vm_fault_t fault; 68 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ 69 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 70 71 /* 72 * NOTE! We MUST NOT take any locks for this case. We may 73 * be in an interrupt or a critical region, and should 74 * only copy the information from the master page table, 75 * nothing more. 76 */ 77 if (address >= VMALLOC_START && !user_mode(regs)) { 78 if (unlikely(handle_kernel_vaddr_fault(address))) 79 goto no_context; 80 else 81 return; 82 } 83 84 /* 85 * If we're in an interrupt or have no user 86 * context, we must not take the fault.. 87 */ 88 if (faulthandler_disabled() || !mm) 89 goto no_context; 90 91 if (user_mode(regs)) 92 flags |= FAULT_FLAG_USER; 93 retry: 94 down_read(&mm->mmap_sem); 95 96 vma = find_vma(mm, address); 97 if (!vma) 98 goto bad_area; 99 if (unlikely(address < vma->vm_start)) { 100 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) 101 goto bad_area; 102 } 103 104 /* 105 * Ok, we have a good vm_area for this memory access, so 106 * we can handle it.. 107 */ 108 si_code = SEGV_ACCERR; 109 110 /* Handle protection violation, execute on heap or stack */ 111 112 if ((regs->ecr_vec == ECR_V_PROTV) && 113 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) 114 goto bad_area; 115 116 if (write) { 117 if (!(vma->vm_flags & VM_WRITE)) 118 goto bad_area; 119 flags |= FAULT_FLAG_WRITE; 120 } else { 121 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 122 goto bad_area; 123 } 124 125 /* 126 * If for any reason at all we couldn't handle the fault, 127 * make sure we exit gracefully rather than endlessly redo 128 * the fault. 129 */ 130 fault = handle_mm_fault(vma, address, flags); 131 132 if (fatal_signal_pending(current)) { 133 134 /* 135 * if fault retry, mmap_sem already relinquished by core mm 136 * so OK to return to user mode (with signal handled first) 137 */ 138 if (fault & VM_FAULT_RETRY) { 139 if (!user_mode(regs)) 140 goto no_context; 141 return; 142 } 143 } 144 145 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 146 147 if (likely(!(fault & VM_FAULT_ERROR))) { 148 if (flags & FAULT_FLAG_ALLOW_RETRY) { 149 /* To avoid updating stats twice for retry case */ 150 if (fault & VM_FAULT_MAJOR) { 151 tsk->maj_flt++; 152 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 153 regs, address); 154 } else { 155 tsk->min_flt++; 156 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 157 regs, address); 158 } 159 160 if (fault & VM_FAULT_RETRY) { 161 flags &= ~FAULT_FLAG_ALLOW_RETRY; 162 flags |= FAULT_FLAG_TRIED; 163 goto retry; 164 } 165 } 166 167 /* Fault Handled Gracefully */ 168 up_read(&mm->mmap_sem); 169 return; 170 } 171 172 if (fault & VM_FAULT_OOM) 173 goto out_of_memory; 174 else if (fault & VM_FAULT_SIGSEGV) 175 goto bad_area; 176 else if (fault & VM_FAULT_SIGBUS) 177 goto do_sigbus; 178 179 /* no man's land */ 180 BUG(); 181 182 /* 183 * Something tried to access memory that isn't in our memory map.. 184 * Fix it, but check if it's kernel or user first.. 185 */ 186 bad_area: 187 up_read(&mm->mmap_sem); 188 189 /* User mode accesses just cause a SIGSEGV */ 190 if (user_mode(regs)) { 191 tsk->thread.fault_address = address; 192 force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); 193 return; 194 } 195 196 no_context: 197 /* Are we prepared to handle this kernel fault? 198 * 199 * (The kernel has valid exception-points in the source 200 * when it accesses user-memory. When it fails in one 201 * of those points, we find it in a table and do a jump 202 * to some fixup code that loads an appropriate error 203 * code) 204 */ 205 if (fixup_exception(regs)) 206 return; 207 208 die("Oops", regs, address); 209 210 out_of_memory: 211 up_read(&mm->mmap_sem); 212 213 if (user_mode(regs)) { 214 pagefault_out_of_memory(); 215 return; 216 } 217 218 goto no_context; 219 220 do_sigbus: 221 up_read(&mm->mmap_sem); 222 223 if (!user_mode(regs)) 224 goto no_context; 225 226 tsk->thread.fault_address = address; 227 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); 228 } 229