1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Page Fault Handling for ARC (TLB Miss / ProtV) 3 * 4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5 */ 6 7 #include <linux/signal.h> 8 #include <linux/interrupt.h> 9 #include <linux/sched/signal.h> 10 #include <linux/errno.h> 11 #include <linux/ptrace.h> 12 #include <linux/uaccess.h> 13 #include <linux/kdebug.h> 14 #include <linux/perf_event.h> 15 #include <linux/mm_types.h> 16 #include <asm/pgalloc.h> 17 #include <asm/mmu.h> 18 19 /* 20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap 21 * Refer to asm/processor.h for System Memory Map 22 * 23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage) 24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared 25 */ 26 noinline static int handle_kernel_vaddr_fault(unsigned long address) 27 { 28 /* 29 * Synchronize this task's top level page-table 30 * with the 'reference' page table. 31 */ 32 pgd_t *pgd, *pgd_k; 33 p4d_t *p4d, *p4d_k; 34 pud_t *pud, *pud_k; 35 pmd_t *pmd, *pmd_k; 36 37 pgd = pgd_offset_fast(current->active_mm, address); 38 pgd_k = pgd_offset_k(address); 39 40 if (!pgd_present(*pgd_k)) 41 goto bad_area; 42 43 p4d = p4d_offset(pgd, address); 44 p4d_k = p4d_offset(pgd_k, address); 45 if (!p4d_present(*p4d_k)) 46 goto bad_area; 47 48 pud = pud_offset(p4d, address); 49 pud_k = pud_offset(p4d_k, address); 50 if (!pud_present(*pud_k)) 51 goto bad_area; 52 53 pmd = pmd_offset(pud, address); 54 pmd_k = pmd_offset(pud_k, address); 55 if (!pmd_present(*pmd_k)) 56 goto bad_area; 57 58 set_pmd(pmd, *pmd_k); 59 60 /* XXX: create the TLB entry here */ 61 return 0; 62 63 bad_area: 64 return 1; 65 } 66 67 void do_page_fault(unsigned long address, struct pt_regs *regs) 68 { 69 struct vm_area_struct *vma = NULL; 70 struct task_struct *tsk = current; 71 struct mm_struct *mm = tsk->mm; 72 int sig, si_code = SEGV_MAPERR; 73 unsigned int write = 0, exec = 0, mask; 74 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */ 75 unsigned int flags; /* handle_mm_fault() input */ 76 77 /* 78 * NOTE! We MUST NOT take any locks for this case. We may 79 * be in an interrupt or a critical region, and should 80 * only copy the information from the master page table, 81 * nothing more. 82 */ 83 if (address >= VMALLOC_START && !user_mode(regs)) { 84 if (unlikely(handle_kernel_vaddr_fault(address))) 85 goto no_context; 86 else 87 return; 88 } 89 90 /* 91 * If we're in an interrupt or have no user 92 * context, we must not take the fault.. 93 */ 94 if (faulthandler_disabled() || !mm) 95 goto no_context; 96 97 if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */ 98 write = 1; 99 else if ((regs->ecr_vec == ECR_V_PROTV) && 100 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) 101 exec = 1; 102 103 flags = FAULT_FLAG_DEFAULT; 104 if (user_mode(regs)) 105 flags |= FAULT_FLAG_USER; 106 if (write) 107 flags |= FAULT_FLAG_WRITE; 108 109 retry: 110 down_read(&mm->mmap_sem); 111 112 vma = find_vma(mm, address); 113 if (!vma) 114 goto bad_area; 115 if (unlikely(address < vma->vm_start)) { 116 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) 117 goto bad_area; 118 } 119 120 /* 121 * vm_area is good, now check permissions for this memory access 122 */ 123 mask = VM_READ; 124 if (write) 125 mask = VM_WRITE; 126 if (exec) 127 mask = VM_EXEC; 128 129 if (!(vma->vm_flags & mask)) { 130 si_code = SEGV_ACCERR; 131 goto bad_area; 132 } 133 134 fault = handle_mm_fault(vma, address, flags); 135 136 /* Quick path to respond to signals */ 137 if (fault_signal_pending(fault, regs)) { 138 if (!user_mode(regs)) 139 goto no_context; 140 return; 141 } 142 143 /* 144 * Fault retry nuances, mmap_sem already relinquished by core mm 145 */ 146 if (unlikely((fault & VM_FAULT_RETRY) && 147 (flags & FAULT_FLAG_ALLOW_RETRY))) { 148 flags |= FAULT_FLAG_TRIED; 149 goto retry; 150 } 151 152 bad_area: 153 up_read(&mm->mmap_sem); 154 155 /* 156 * Major/minor page fault accounting 157 * (in case of retry we only land here once) 158 */ 159 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 160 161 if (likely(!(fault & VM_FAULT_ERROR))) { 162 if (fault & VM_FAULT_MAJOR) { 163 tsk->maj_flt++; 164 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 165 regs, address); 166 } else { 167 tsk->min_flt++; 168 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 169 regs, address); 170 } 171 172 /* Normal return path: fault Handled Gracefully */ 173 return; 174 } 175 176 if (!user_mode(regs)) 177 goto no_context; 178 179 if (fault & VM_FAULT_OOM) { 180 pagefault_out_of_memory(); 181 return; 182 } 183 184 if (fault & VM_FAULT_SIGBUS) { 185 sig = SIGBUS; 186 si_code = BUS_ADRERR; 187 } 188 else { 189 sig = SIGSEGV; 190 } 191 192 tsk->thread.fault_address = address; 193 force_sig_fault(sig, si_code, (void __user *)address); 194 return; 195 196 no_context: 197 if (fixup_exception(regs)) 198 return; 199 200 die("Oops", regs, address); 201 } 202