1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #include <linux/signal.h> 5 #include <linux/module.h> 6 #include <linux/sched.h> 7 #include <linux/interrupt.h> 8 #include <linux/kernel.h> 9 #include <linux/errno.h> 10 #include <linux/string.h> 11 #include <linux/types.h> 12 #include <linux/ptrace.h> 13 #include <linux/mman.h> 14 #include <linux/mm.h> 15 #include <linux/smp.h> 16 #include <linux/version.h> 17 #include <linux/vt_kern.h> 18 #include <linux/extable.h> 19 #include <linux/uaccess.h> 20 #include <linux/perf_event.h> 21 #include <linux/kprobes.h> 22 23 #include <asm/hardirq.h> 24 #include <asm/mmu_context.h> 25 #include <asm/traps.h> 26 #include <asm/page.h> 27 28 int fixup_exception(struct pt_regs *regs) 29 { 30 const struct exception_table_entry *fixup; 31 32 fixup = search_exception_tables(instruction_pointer(regs)); 33 if (fixup) { 34 regs->pc = fixup->nextinsn; 35 36 return 1; 37 } 38 39 return 0; 40 } 41 42 /* 43 * This routine handles page faults. It determines the address, 44 * and the problem, and then passes it off to one of the appropriate 45 * routines. 46 */ 47 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, 48 unsigned long mmu_meh) 49 { 50 struct vm_area_struct *vma = NULL; 51 struct task_struct *tsk = current; 52 struct mm_struct *mm = tsk->mm; 53 int si_code; 54 int fault; 55 unsigned long address = mmu_meh & PAGE_MASK; 56 57 if (kprobe_page_fault(regs, tsk->thread.trap_no)) 58 return; 59 60 si_code = SEGV_MAPERR; 61 62 #ifndef CONFIG_CPU_HAS_TLBI 63 /* 64 * We fault-in kernel-space virtual memory on-demand. The 65 * 'reference' page table is init_mm.pgd. 66 * 67 * NOTE! We MUST NOT take any locks for this case. We may 68 * be in an interrupt or a critical region, and should 69 * only copy the information from the master page table, 70 * nothing more. 71 */ 72 if (unlikely(address >= VMALLOC_START) && 73 unlikely(address <= VMALLOC_END)) { 74 /* 75 * Synchronize this task's top level page-table 76 * with the 'reference' page table. 77 * 78 * Do _not_ use "tsk" here. We might be inside 79 * an interrupt in the middle of a task switch.. 80 */ 81 int offset = __pgd_offset(address); 82 pgd_t *pgd, *pgd_k; 83 pud_t *pud, *pud_k; 84 pmd_t *pmd, *pmd_k; 85 pte_t *pte_k; 86 87 unsigned long pgd_base; 88 89 pgd_base = (unsigned long)__va(get_pgd()); 90 pgd = (pgd_t *)pgd_base + offset; 91 pgd_k = init_mm.pgd + offset; 92 93 if (!pgd_present(*pgd_k)) 94 goto no_context; 95 set_pgd(pgd, *pgd_k); 96 97 pud = (pud_t *)pgd; 98 pud_k = (pud_t *)pgd_k; 99 if (!pud_present(*pud_k)) 100 goto no_context; 101 102 pmd = pmd_offset(pud, address); 103 pmd_k = pmd_offset(pud_k, address); 104 if (!pmd_present(*pmd_k)) 105 goto no_context; 106 set_pmd(pmd, *pmd_k); 107 108 pte_k = pte_offset_kernel(pmd_k, address); 109 if (!pte_present(*pte_k)) 110 goto no_context; 111 return; 112 } 113 #endif 114 115 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 116 /* 117 * If we're in an interrupt or have no user 118 * context, we must not take the fault.. 119 */ 120 if (in_atomic() || !mm) 121 goto bad_area_nosemaphore; 122 123 down_read(&mm->mmap_sem); 124 vma = find_vma(mm, address); 125 if (!vma) 126 goto bad_area; 127 if (vma->vm_start <= address) 128 goto good_area; 129 if (!(vma->vm_flags & VM_GROWSDOWN)) 130 goto bad_area; 131 if (expand_stack(vma, address)) 132 goto bad_area; 133 /* 134 * Ok, we have a good vm_area for this memory access, so 135 * we can handle it.. 136 */ 137 good_area: 138 si_code = SEGV_ACCERR; 139 140 if (write) { 141 if (!(vma->vm_flags & VM_WRITE)) 142 goto bad_area; 143 } else { 144 if (unlikely(!vma_is_accessible(vma))) 145 goto bad_area; 146 } 147 148 /* 149 * If for any reason at all we couldn't handle the fault, 150 * make sure we exit gracefully rather than endlessly redo 151 * the fault. 152 */ 153 fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0); 154 if (unlikely(fault & VM_FAULT_ERROR)) { 155 if (fault & VM_FAULT_OOM) 156 goto out_of_memory; 157 else if (fault & VM_FAULT_SIGBUS) 158 goto do_sigbus; 159 else if (fault & VM_FAULT_SIGSEGV) 160 goto bad_area; 161 BUG(); 162 } 163 if (fault & VM_FAULT_MAJOR) { 164 tsk->maj_flt++; 165 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, 166 address); 167 } else { 168 tsk->min_flt++; 169 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, 170 address); 171 } 172 173 up_read(&mm->mmap_sem); 174 return; 175 176 /* 177 * Something tried to access memory that isn't in our memory map.. 178 * Fix it, but check if it's kernel or user first.. 179 */ 180 bad_area: 181 up_read(&mm->mmap_sem); 182 183 bad_area_nosemaphore: 184 /* User mode accesses just cause a SIGSEGV */ 185 if (user_mode(regs)) { 186 tsk->thread.trap_no = (regs->sr >> 16) & 0xff; 187 force_sig_fault(SIGSEGV, si_code, (void __user *)address); 188 return; 189 } 190 191 no_context: 192 tsk->thread.trap_no = (regs->sr >> 16) & 0xff; 193 194 /* Are we prepared to handle this kernel fault? */ 195 if (fixup_exception(regs)) 196 return; 197 198 /* 199 * Oops. The kernel tried to access some bad page. We'll have to 200 * terminate things with extreme prejudice. 201 */ 202 bust_spinlocks(1); 203 pr_alert("Unable to handle kernel paging request at virtual " 204 "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc); 205 die_if_kernel("Oops", regs, write); 206 207 out_of_memory: 208 tsk->thread.trap_no = (regs->sr >> 16) & 0xff; 209 210 /* 211 * We ran out of memory, call the OOM killer, and return the userspace 212 * (which will retry the fault, or kill us if we got oom-killed). 213 */ 214 pagefault_out_of_memory(); 215 return; 216 217 do_sigbus: 218 tsk->thread.trap_no = (regs->sr >> 16) & 0xff; 219 220 up_read(&mm->mmap_sem); 221 222 /* Kernel mode? Handle exceptions or die */ 223 if (!user_mode(regs)) 224 goto no_context; 225 226 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); 227 } 228