xref: /openbmc/linux/arch/riscv/mm/fault.c (revision afba8b0a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4  *  Lennox Wu <lennox.wu@sunplusct.com>
5  *  Chen Liqin <liqin.chen@sunplusct.com>
6  * Copyright (C) 2012 Regents of the University of California
7  */
8 
9 
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/ptrace.h>
18 #include <asm/tlbflush.h>
19 
20 #include "../kernel/head.h"
21 
22 static inline void no_context(struct pt_regs *regs, unsigned long addr)
23 {
24 	/* Are we prepared to handle this kernel fault? */
25 	if (fixup_exception(regs))
26 		return;
27 
28 	/*
29 	 * Oops. The kernel tried to access some bad page. We'll have to
30 	 * terminate things with extreme prejudice.
31 	 */
32 	bust_spinlocks(1);
33 	pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
34 		(addr < PAGE_SIZE) ? "NULL pointer dereference" :
35 		"paging request", addr);
36 	die(regs, "Oops");
37 	do_exit(SIGKILL);
38 }
39 
40 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
41 {
42 	if (fault & VM_FAULT_OOM) {
43 		/*
44 		 * We ran out of memory, call the OOM killer, and return the userspace
45 		 * (which will retry the fault, or kill us if we got oom-killed).
46 		 */
47 		if (!user_mode(regs)) {
48 			no_context(regs, addr);
49 			return;
50 		}
51 		pagefault_out_of_memory();
52 		return;
53 	} else if (fault & VM_FAULT_SIGBUS) {
54 		/* Kernel mode? Handle exceptions or die */
55 		if (!user_mode(regs)) {
56 			no_context(regs, addr);
57 			return;
58 		}
59 		do_trap(regs, SIGBUS, BUS_ADRERR, addr);
60 		return;
61 	}
62 	BUG();
63 }
64 
65 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
66 {
67 	/*
68 	 * Something tried to access memory that isn't in our memory map.
69 	 * Fix it, but check if it's kernel or user first.
70 	 */
71 	mmap_read_unlock(mm);
72 	/* User mode accesses just cause a SIGSEGV */
73 	if (user_mode(regs)) {
74 		do_trap(regs, SIGSEGV, code, addr);
75 		return;
76 	}
77 
78 	no_context(regs, addr);
79 }
80 
81 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
82 {
83 	pgd_t *pgd, *pgd_k;
84 	pud_t *pud, *pud_k;
85 	p4d_t *p4d, *p4d_k;
86 	pmd_t *pmd, *pmd_k;
87 	pte_t *pte_k;
88 	int index;
89 
90 	/* User mode accesses just cause a SIGSEGV */
91 	if (user_mode(regs))
92 		return do_trap(regs, SIGSEGV, code, addr);
93 
94 	/*
95 	 * Synchronize this task's top level page-table
96 	 * with the 'reference' page table.
97 	 *
98 	 * Do _not_ use "tsk->active_mm->pgd" here.
99 	 * We might be inside an interrupt in the middle
100 	 * of a task switch.
101 	 */
102 	index = pgd_index(addr);
103 	pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
104 	pgd_k = init_mm.pgd + index;
105 
106 	if (!pgd_present(*pgd_k)) {
107 		no_context(regs, addr);
108 		return;
109 	}
110 	set_pgd(pgd, *pgd_k);
111 
112 	p4d = p4d_offset(pgd, addr);
113 	p4d_k = p4d_offset(pgd_k, addr);
114 	if (!p4d_present(*p4d_k)) {
115 		no_context(regs, addr);
116 		return;
117 	}
118 
119 	pud = pud_offset(p4d, addr);
120 	pud_k = pud_offset(p4d_k, addr);
121 	if (!pud_present(*pud_k)) {
122 		no_context(regs, addr);
123 		return;
124 	}
125 
126 	/*
127 	 * Since the vmalloc area is global, it is unnecessary
128 	 * to copy individual PTEs
129 	 */
130 	pmd = pmd_offset(pud, addr);
131 	pmd_k = pmd_offset(pud_k, addr);
132 	if (!pmd_present(*pmd_k)) {
133 		no_context(regs, addr);
134 		return;
135 	}
136 	set_pmd(pmd, *pmd_k);
137 
138 	/*
139 	 * Make sure the actual PTE exists as well to
140 	 * catch kernel vmalloc-area accesses to non-mapped
141 	 * addresses. If we don't do this, this will just
142 	 * silently loop forever.
143 	 */
144 	pte_k = pte_offset_kernel(pmd_k, addr);
145 	if (!pte_present(*pte_k)) {
146 		no_context(regs, addr);
147 		return;
148 	}
149 
150 	/*
151 	 * The kernel assumes that TLBs don't cache invalid
152 	 * entries, but in RISC-V, SFENCE.VMA specifies an
153 	 * ordering constraint, not a cache flush; it is
154 	 * necessary even after writing invalid entries.
155 	 */
156 	local_flush_tlb_page(addr);
157 }
158 
159 static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
160 {
161 	switch (cause) {
162 	case EXC_INST_PAGE_FAULT:
163 		if (!(vma->vm_flags & VM_EXEC)) {
164 			return true;
165 		}
166 		break;
167 	case EXC_LOAD_PAGE_FAULT:
168 		if (!(vma->vm_flags & VM_READ)) {
169 			return true;
170 		}
171 		break;
172 	case EXC_STORE_PAGE_FAULT:
173 		if (!(vma->vm_flags & VM_WRITE)) {
174 			return true;
175 		}
176 		break;
177 	default:
178 		panic("%s: unhandled cause %lu", __func__, cause);
179 	}
180 	return false;
181 }
182 
183 /*
184  * This routine handles page faults.  It determines the address and the
185  * problem, and then passes it off to one of the appropriate routines.
186  */
187 asmlinkage void do_page_fault(struct pt_regs *regs)
188 {
189 	struct task_struct *tsk;
190 	struct vm_area_struct *vma;
191 	struct mm_struct *mm;
192 	unsigned long addr, cause;
193 	unsigned int flags = FAULT_FLAG_DEFAULT;
194 	int code = SEGV_MAPERR;
195 	vm_fault_t fault;
196 
197 	cause = regs->cause;
198 	addr = regs->badaddr;
199 
200 	tsk = current;
201 	mm = tsk->mm;
202 
203 	/*
204 	 * Fault-in kernel-space virtual memory on-demand.
205 	 * The 'reference' page table is init_mm.pgd.
206 	 *
207 	 * NOTE! We MUST NOT take any locks for this case. We may
208 	 * be in an interrupt or a critical region, and should
209 	 * only copy the information from the master page table,
210 	 * nothing more.
211 	 */
212 	if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
213 		vmalloc_fault(regs, code, addr);
214 		return;
215 	}
216 
217 	/* Enable interrupts if they were enabled in the parent context. */
218 	if (likely(regs->status & SR_PIE))
219 		local_irq_enable();
220 
221 	/*
222 	 * If we're in an interrupt, have no user context, or are running
223 	 * in an atomic region, then we must not take the fault.
224 	 */
225 	if (unlikely(faulthandler_disabled() || !mm)) {
226 		no_context(regs, addr);
227 		return;
228 	}
229 
230 	if (user_mode(regs))
231 		flags |= FAULT_FLAG_USER;
232 
233 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
234 
235 	if (cause == EXC_STORE_PAGE_FAULT)
236 		flags |= FAULT_FLAG_WRITE;
237 	else if (cause == EXC_INST_PAGE_FAULT)
238 		flags |= FAULT_FLAG_INSTRUCTION;
239 retry:
240 	mmap_read_lock(mm);
241 	vma = find_vma(mm, addr);
242 	if (unlikely(!vma)) {
243 		bad_area(regs, mm, code, addr);
244 		return;
245 	}
246 	if (likely(vma->vm_start <= addr))
247 		goto good_area;
248 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
249 		bad_area(regs, mm, code, addr);
250 		return;
251 	}
252 	if (unlikely(expand_stack(vma, addr))) {
253 		bad_area(regs, mm, code, addr);
254 		return;
255 	}
256 
257 	/*
258 	 * Ok, we have a good vm_area for this memory access, so
259 	 * we can handle it.
260 	 */
261 good_area:
262 	code = SEGV_ACCERR;
263 
264 	if (unlikely(access_error(cause, vma))) {
265 		bad_area(regs, mm, code, addr);
266 		return;
267 	}
268 
269 	/*
270 	 * If for any reason at all we could not handle the fault,
271 	 * make sure we exit gracefully rather than endlessly redo
272 	 * the fault.
273 	 */
274 	fault = handle_mm_fault(vma, addr, flags, regs);
275 
276 	/*
277 	 * If we need to retry but a fatal signal is pending, handle the
278 	 * signal first. We do not need to release the mmap_lock because it
279 	 * would already be released in __lock_page_or_retry in mm/filemap.c.
280 	 */
281 	if (fault_signal_pending(fault, regs))
282 		return;
283 
284 	if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
285 		flags |= FAULT_FLAG_TRIED;
286 
287 		/*
288 		 * No need to mmap_read_unlock(mm) as we would
289 		 * have already released it in __lock_page_or_retry
290 		 * in mm/filemap.c.
291 		 */
292 		goto retry;
293 	}
294 
295 	mmap_read_unlock(mm);
296 
297 	if (unlikely(fault & VM_FAULT_ERROR)) {
298 		mm_fault_error(regs, addr, fault);
299 		return;
300 	}
301 	return;
302 }
303