1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Page Fault Handling for ARC (TLB Miss / ProtV) 3 * 4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 5 */ 6 7 #include <linux/signal.h> 8 #include <linux/interrupt.h> 9 #include <linux/sched/signal.h> 10 #include <linux/errno.h> 11 #include <linux/ptrace.h> 12 #include <linux/uaccess.h> 13 #include <linux/kdebug.h> 14 #include <linux/perf_event.h> 15 #include <linux/mm_types.h> 16 #include <asm/mmu.h> 17 18 /* 19 * kernel virtual address is required to implement vmalloc/pkmap/fixmap 20 * Refer to asm/processor.h for System Memory Map 21 * 22 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage) 23 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared 24 */ 25 noinline static int handle_kernel_vaddr_fault(unsigned long address) 26 { 27 /* 28 * Synchronize this task's top level page-table 29 * with the 'reference' page table. 30 */ 31 pgd_t *pgd, *pgd_k; 32 p4d_t *p4d, *p4d_k; 33 pud_t *pud, *pud_k; 34 pmd_t *pmd, *pmd_k; 35 36 pgd = pgd_offset(current->active_mm, address); 37 pgd_k = pgd_offset_k(address); 38 39 if (!pgd_present(*pgd_k)) 40 goto bad_area; 41 42 set_pgd(pgd, *pgd_k); 43 44 p4d = p4d_offset(pgd, address); 45 p4d_k = p4d_offset(pgd_k, address); 46 if (!p4d_present(*p4d_k)) 47 goto bad_area; 48 49 pud = pud_offset(p4d, address); 50 pud_k = pud_offset(p4d_k, address); 51 if (!pud_present(*pud_k)) 52 goto bad_area; 53 54 set_pud(pud, *pud_k); 55 56 pmd = pmd_offset(pud, address); 57 pmd_k = pmd_offset(pud_k, address); 58 if (!pmd_present(*pmd_k)) 59 goto bad_area; 60 61 set_pmd(pmd, *pmd_k); 62 63 /* XXX: create the TLB entry here */ 64 return 0; 65 66 bad_area: 67 return 1; 68 } 69 70 void do_page_fault(unsigned long address, struct pt_regs *regs) 71 { 72 struct vm_area_struct *vma = NULL; 73 struct task_struct *tsk = current; 74 struct mm_struct *mm = tsk->mm; 75 int sig, si_code = SEGV_MAPERR; 76 unsigned int write = 0, exec = 0, mask; 77 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */ 78 unsigned int flags; /* handle_mm_fault() input */ 79 80 /* 81 * NOTE! We MUST NOT take any locks for this case. We may 82 * be in an interrupt or a critical region, and should 83 * only copy the information from the master page table, 84 * nothing more. 85 */ 86 if (address >= VMALLOC_START && !user_mode(regs)) { 87 if (unlikely(handle_kernel_vaddr_fault(address))) 88 goto no_context; 89 else 90 return; 91 } 92 93 /* 94 * If we're in an interrupt or have no user 95 * context, we must not take the fault.. 96 */ 97 if (faulthandler_disabled() || !mm) 98 goto no_context; 99 100 if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */ 101 write = 1; 102 else if ((regs->ecr_vec == ECR_V_PROTV) && 103 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) 104 exec = 1; 105 106 flags = FAULT_FLAG_DEFAULT; 107 if (user_mode(regs)) 108 flags |= FAULT_FLAG_USER; 109 if (write) 110 flags |= FAULT_FLAG_WRITE; 111 112 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 113 retry: 114 mmap_read_lock(mm); 115 116 vma = find_vma(mm, address); 117 if (!vma) 118 goto bad_area; 119 if (unlikely(address < vma->vm_start)) { 120 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) 121 goto bad_area; 122 } 123 124 /* 125 * vm_area is good, now check permissions for this memory access 126 */ 127 mask = VM_READ; 128 if (write) 129 mask = VM_WRITE; 130 if (exec) 131 mask = VM_EXEC; 132 133 if (!(vma->vm_flags & mask)) { 134 si_code = SEGV_ACCERR; 135 goto bad_area; 136 } 137 138 fault = handle_mm_fault(vma, address, flags, regs); 139 140 /* Quick path to respond to signals */ 141 if (fault_signal_pending(fault, regs)) { 142 if (!user_mode(regs)) 143 goto no_context; 144 return; 145 } 146 147 /* 148 * Fault retry nuances, mmap_lock already relinquished by core mm 149 */ 150 if (unlikely((fault & VM_FAULT_RETRY) && 151 (flags & FAULT_FLAG_ALLOW_RETRY))) { 152 flags |= FAULT_FLAG_TRIED; 153 goto retry; 154 } 155 156 bad_area: 157 mmap_read_unlock(mm); 158 159 /* 160 * Major/minor page fault accounting 161 * (in case of retry we only land here once) 162 */ 163 if (likely(!(fault & VM_FAULT_ERROR))) 164 /* Normal return path: fault Handled Gracefully */ 165 return; 166 167 if (!user_mode(regs)) 168 goto no_context; 169 170 if (fault & VM_FAULT_OOM) { 171 pagefault_out_of_memory(); 172 return; 173 } 174 175 if (fault & VM_FAULT_SIGBUS) { 176 sig = SIGBUS; 177 si_code = BUS_ADRERR; 178 } 179 else { 180 sig = SIGSEGV; 181 } 182 183 tsk->thread.fault_address = address; 184 force_sig_fault(sig, si_code, (void __user *)address); 185 return; 186 187 no_context: 188 if (fixup_exception(regs)) 189 return; 190 191 die("Oops", regs, address); 192 } 193