1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. 4 * Lennox Wu <lennox.wu@sunplusct.com> 5 * Chen Liqin <liqin.chen@sunplusct.com> 6 * Copyright (C) 2012 Regents of the University of California 7 */ 8 9 10 #include <linux/mm.h> 11 #include <linux/kernel.h> 12 #include <linux/interrupt.h> 13 #include <linux/perf_event.h> 14 #include <linux/signal.h> 15 #include <linux/uaccess.h> 16 17 #include <asm/pgalloc.h> 18 #include <asm/ptrace.h> 19 #include <asm/tlbflush.h> 20 21 #include "../kernel/head.h" 22 23 /* 24 * This routine handles page faults. It determines the address and the 25 * problem, and then passes it off to one of the appropriate routines. 26 */ 27 asmlinkage void do_page_fault(struct pt_regs *regs) 28 { 29 struct task_struct *tsk; 30 struct vm_area_struct *vma; 31 struct mm_struct *mm; 32 unsigned long addr, cause; 33 unsigned int flags = FAULT_FLAG_DEFAULT; 34 int code = SEGV_MAPERR; 35 vm_fault_t fault; 36 37 cause = regs->cause; 38 addr = regs->badaddr; 39 40 tsk = current; 41 mm = tsk->mm; 42 43 /* 44 * Fault-in kernel-space virtual memory on-demand. 45 * The 'reference' page table is init_mm.pgd. 46 * 47 * NOTE! We MUST NOT take any locks for this case. We may 48 * be in an interrupt or a critical region, and should 49 * only copy the information from the master page table, 50 * nothing more. 51 */ 52 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) 53 goto vmalloc_fault; 54 55 /* Enable interrupts if they were enabled in the parent context. */ 56 if (likely(regs->status & SR_PIE)) 57 local_irq_enable(); 58 59 /* 60 * If we're in an interrupt, have no user context, or are running 61 * in an atomic region, then we must not take the fault. 62 */ 63 if (unlikely(faulthandler_disabled() || !mm)) 64 goto no_context; 65 66 if (user_mode(regs)) 67 flags |= FAULT_FLAG_USER; 68 69 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 70 71 retry: 72 mmap_read_lock(mm); 73 vma = find_vma(mm, addr); 74 if (unlikely(!vma)) 75 goto bad_area; 76 if (likely(vma->vm_start <= addr)) 77 goto good_area; 78 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) 79 goto bad_area; 80 if (unlikely(expand_stack(vma, addr))) 81 goto bad_area; 82 83 /* 84 * Ok, we have a good vm_area for this memory access, so 85 * we can handle it. 86 */ 87 good_area: 88 code = SEGV_ACCERR; 89 90 switch (cause) { 91 case EXC_INST_PAGE_FAULT: 92 if (!(vma->vm_flags & VM_EXEC)) 93 goto bad_area; 94 break; 95 case EXC_LOAD_PAGE_FAULT: 96 if (!(vma->vm_flags & VM_READ)) 97 goto bad_area; 98 break; 99 case EXC_STORE_PAGE_FAULT: 100 if (!(vma->vm_flags & VM_WRITE)) 101 goto bad_area; 102 flags |= FAULT_FLAG_WRITE; 103 break; 104 default: 105 panic("%s: unhandled cause %lu", __func__, cause); 106 } 107 108 /* 109 * If for any reason at all we could not handle the fault, 110 * make sure we exit gracefully rather than endlessly redo 111 * the fault. 112 */ 113 fault = handle_mm_fault(vma, addr, flags); 114 115 /* 116 * If we need to retry but a fatal signal is pending, handle the 117 * signal first. We do not need to release the mmap_lock because it 118 * would already be released in __lock_page_or_retry in mm/filemap.c. 119 */ 120 if (fault_signal_pending(fault, regs)) 121 return; 122 123 if (unlikely(fault & VM_FAULT_ERROR)) { 124 if (fault & VM_FAULT_OOM) 125 goto out_of_memory; 126 else if (fault & VM_FAULT_SIGBUS) 127 goto do_sigbus; 128 BUG(); 129 } 130 131 /* 132 * Major/minor page fault accounting is only done on the 133 * initial attempt. If we go through a retry, it is extremely 134 * likely that the page will be found in page cache at that point. 135 */ 136 if (flags & FAULT_FLAG_ALLOW_RETRY) { 137 if (fault & VM_FAULT_MAJOR) { 138 tsk->maj_flt++; 139 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 140 1, regs, addr); 141 } else { 142 tsk->min_flt++; 143 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 144 1, regs, addr); 145 } 146 if (fault & VM_FAULT_RETRY) { 147 flags |= FAULT_FLAG_TRIED; 148 149 /* 150 * No need to mmap_read_unlock(mm) as we would 151 * have already released it in __lock_page_or_retry 152 * in mm/filemap.c. 153 */ 154 goto retry; 155 } 156 } 157 158 mmap_read_unlock(mm); 159 return; 160 161 /* 162 * Something tried to access memory that isn't in our memory map. 163 * Fix it, but check if it's kernel or user first. 164 */ 165 bad_area: 166 mmap_read_unlock(mm); 167 /* User mode accesses just cause a SIGSEGV */ 168 if (user_mode(regs)) { 169 do_trap(regs, SIGSEGV, code, addr); 170 return; 171 } 172 173 no_context: 174 /* Are we prepared to handle this kernel fault? */ 175 if (fixup_exception(regs)) 176 return; 177 178 /* 179 * Oops. The kernel tried to access some bad page. We'll have to 180 * terminate things with extreme prejudice. 181 */ 182 bust_spinlocks(1); 183 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", 184 (addr < PAGE_SIZE) ? "NULL pointer dereference" : 185 "paging request", addr); 186 die(regs, "Oops"); 187 do_exit(SIGKILL); 188 189 /* 190 * We ran out of memory, call the OOM killer, and return the userspace 191 * (which will retry the fault, or kill us if we got oom-killed). 192 */ 193 out_of_memory: 194 mmap_read_unlock(mm); 195 if (!user_mode(regs)) 196 goto no_context; 197 pagefault_out_of_memory(); 198 return; 199 200 do_sigbus: 201 mmap_read_unlock(mm); 202 /* Kernel mode? Handle exceptions or die */ 203 if (!user_mode(regs)) 204 goto no_context; 205 do_trap(regs, SIGBUS, BUS_ADRERR, addr); 206 return; 207 208 vmalloc_fault: 209 { 210 pgd_t *pgd, *pgd_k; 211 pud_t *pud, *pud_k; 212 p4d_t *p4d, *p4d_k; 213 pmd_t *pmd, *pmd_k; 214 pte_t *pte_k; 215 int index; 216 217 /* User mode accesses just cause a SIGSEGV */ 218 if (user_mode(regs)) 219 return do_trap(regs, SIGSEGV, code, addr); 220 221 /* 222 * Synchronize this task's top level page-table 223 * with the 'reference' page table. 224 * 225 * Do _not_ use "tsk->active_mm->pgd" here. 226 * We might be inside an interrupt in the middle 227 * of a task switch. 228 */ 229 index = pgd_index(addr); 230 pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index; 231 pgd_k = init_mm.pgd + index; 232 233 if (!pgd_present(*pgd_k)) 234 goto no_context; 235 set_pgd(pgd, *pgd_k); 236 237 p4d = p4d_offset(pgd, addr); 238 p4d_k = p4d_offset(pgd_k, addr); 239 if (!p4d_present(*p4d_k)) 240 goto no_context; 241 242 pud = pud_offset(p4d, addr); 243 pud_k = pud_offset(p4d_k, addr); 244 if (!pud_present(*pud_k)) 245 goto no_context; 246 247 /* 248 * Since the vmalloc area is global, it is unnecessary 249 * to copy individual PTEs 250 */ 251 pmd = pmd_offset(pud, addr); 252 pmd_k = pmd_offset(pud_k, addr); 253 if (!pmd_present(*pmd_k)) 254 goto no_context; 255 set_pmd(pmd, *pmd_k); 256 257 /* 258 * Make sure the actual PTE exists as well to 259 * catch kernel vmalloc-area accesses to non-mapped 260 * addresses. If we don't do this, this will just 261 * silently loop forever. 262 */ 263 pte_k = pte_offset_kernel(pmd_k, addr); 264 if (!pte_present(*pte_k)) 265 goto no_context; 266 267 /* 268 * The kernel assumes that TLBs don't cache invalid 269 * entries, but in RISC-V, SFENCE.VMA specifies an 270 * ordering constraint, not a cache flush; it is 271 * necessary even after writing invalid entries. 272 */ 273 local_flush_tlb_page(addr); 274 275 return; 276 } 277 } 278