xref: /openbmc/linux/arch/riscv/mm/fault.c (revision d89775fc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4  *  Lennox Wu <lennox.wu@sunplusct.com>
5  *  Chen Liqin <liqin.chen@sunplusct.com>
6  * Copyright (C) 2012 Regents of the University of California
7  */
8 
9 
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
16 
17 #include <asm/ptrace.h>
18 #include <asm/tlbflush.h>
19 
20 #include "../kernel/head.h"
21 
22 /*
23  * This routine handles page faults.  It determines the address and the
24  * problem, and then passes it off to one of the appropriate routines.
25  */
26 asmlinkage void do_page_fault(struct pt_regs *regs)
27 {
28 	struct task_struct *tsk;
29 	struct vm_area_struct *vma;
30 	struct mm_struct *mm;
31 	unsigned long addr, cause;
32 	unsigned int flags = FAULT_FLAG_DEFAULT;
33 	int code = SEGV_MAPERR;
34 	vm_fault_t fault;
35 
36 	cause = regs->cause;
37 	addr = regs->badaddr;
38 
39 	tsk = current;
40 	mm = tsk->mm;
41 
42 	/*
43 	 * Fault-in kernel-space virtual memory on-demand.
44 	 * The 'reference' page table is init_mm.pgd.
45 	 *
46 	 * NOTE! We MUST NOT take any locks for this case. We may
47 	 * be in an interrupt or a critical region, and should
48 	 * only copy the information from the master page table,
49 	 * nothing more.
50 	 */
51 	if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
52 		goto vmalloc_fault;
53 
54 	/* Enable interrupts if they were enabled in the parent context. */
55 	if (likely(regs->status & SR_PIE))
56 		local_irq_enable();
57 
58 	/*
59 	 * If we're in an interrupt, have no user context, or are running
60 	 * in an atomic region, then we must not take the fault.
61 	 */
62 	if (unlikely(faulthandler_disabled() || !mm))
63 		goto no_context;
64 
65 	if (user_mode(regs))
66 		flags |= FAULT_FLAG_USER;
67 
68 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
69 
70 retry:
71 	mmap_read_lock(mm);
72 	vma = find_vma(mm, addr);
73 	if (unlikely(!vma))
74 		goto bad_area;
75 	if (likely(vma->vm_start <= addr))
76 		goto good_area;
77 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
78 		goto bad_area;
79 	if (unlikely(expand_stack(vma, addr)))
80 		goto bad_area;
81 
82 	/*
83 	 * Ok, we have a good vm_area for this memory access, so
84 	 * we can handle it.
85 	 */
86 good_area:
87 	code = SEGV_ACCERR;
88 
89 	switch (cause) {
90 	case EXC_INST_PAGE_FAULT:
91 		if (!(vma->vm_flags & VM_EXEC))
92 			goto bad_area;
93 		break;
94 	case EXC_LOAD_PAGE_FAULT:
95 		if (!(vma->vm_flags & VM_READ))
96 			goto bad_area;
97 		break;
98 	case EXC_STORE_PAGE_FAULT:
99 		if (!(vma->vm_flags & VM_WRITE))
100 			goto bad_area;
101 		flags |= FAULT_FLAG_WRITE;
102 		break;
103 	default:
104 		panic("%s: unhandled cause %lu", __func__, cause);
105 	}
106 
107 	/*
108 	 * If for any reason at all we could not handle the fault,
109 	 * make sure we exit gracefully rather than endlessly redo
110 	 * the fault.
111 	 */
112 	fault = handle_mm_fault(vma, addr, flags);
113 
114 	/*
115 	 * If we need to retry but a fatal signal is pending, handle the
116 	 * signal first. We do not need to release the mmap_lock because it
117 	 * would already be released in __lock_page_or_retry in mm/filemap.c.
118 	 */
119 	if (fault_signal_pending(fault, regs))
120 		return;
121 
122 	if (unlikely(fault & VM_FAULT_ERROR)) {
123 		if (fault & VM_FAULT_OOM)
124 			goto out_of_memory;
125 		else if (fault & VM_FAULT_SIGBUS)
126 			goto do_sigbus;
127 		BUG();
128 	}
129 
130 	/*
131 	 * Major/minor page fault accounting is only done on the
132 	 * initial attempt. If we go through a retry, it is extremely
133 	 * likely that the page will be found in page cache at that point.
134 	 */
135 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
136 		if (fault & VM_FAULT_MAJOR) {
137 			tsk->maj_flt++;
138 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
139 				      1, regs, addr);
140 		} else {
141 			tsk->min_flt++;
142 			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
143 				      1, regs, addr);
144 		}
145 		if (fault & VM_FAULT_RETRY) {
146 			flags |= FAULT_FLAG_TRIED;
147 
148 			/*
149 			 * No need to mmap_read_unlock(mm) as we would
150 			 * have already released it in __lock_page_or_retry
151 			 * in mm/filemap.c.
152 			 */
153 			goto retry;
154 		}
155 	}
156 
157 	mmap_read_unlock(mm);
158 	return;
159 
160 	/*
161 	 * Something tried to access memory that isn't in our memory map.
162 	 * Fix it, but check if it's kernel or user first.
163 	 */
164 bad_area:
165 	mmap_read_unlock(mm);
166 	/* User mode accesses just cause a SIGSEGV */
167 	if (user_mode(regs)) {
168 		do_trap(regs, SIGSEGV, code, addr);
169 		return;
170 	}
171 
172 no_context:
173 	/* Are we prepared to handle this kernel fault? */
174 	if (fixup_exception(regs))
175 		return;
176 
177 	/*
178 	 * Oops. The kernel tried to access some bad page. We'll have to
179 	 * terminate things with extreme prejudice.
180 	 */
181 	bust_spinlocks(1);
182 	pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
183 		(addr < PAGE_SIZE) ? "NULL pointer dereference" :
184 		"paging request", addr);
185 	die(regs, "Oops");
186 	do_exit(SIGKILL);
187 
188 	/*
189 	 * We ran out of memory, call the OOM killer, and return the userspace
190 	 * (which will retry the fault, or kill us if we got oom-killed).
191 	 */
192 out_of_memory:
193 	mmap_read_unlock(mm);
194 	if (!user_mode(regs))
195 		goto no_context;
196 	pagefault_out_of_memory();
197 	return;
198 
199 do_sigbus:
200 	mmap_read_unlock(mm);
201 	/* Kernel mode? Handle exceptions or die */
202 	if (!user_mode(regs))
203 		goto no_context;
204 	do_trap(regs, SIGBUS, BUS_ADRERR, addr);
205 	return;
206 
207 vmalloc_fault:
208 	{
209 		pgd_t *pgd, *pgd_k;
210 		pud_t *pud, *pud_k;
211 		p4d_t *p4d, *p4d_k;
212 		pmd_t *pmd, *pmd_k;
213 		pte_t *pte_k;
214 		int index;
215 
216 		/* User mode accesses just cause a SIGSEGV */
217 		if (user_mode(regs))
218 			return do_trap(regs, SIGSEGV, code, addr);
219 
220 		/*
221 		 * Synchronize this task's top level page-table
222 		 * with the 'reference' page table.
223 		 *
224 		 * Do _not_ use "tsk->active_mm->pgd" here.
225 		 * We might be inside an interrupt in the middle
226 		 * of a task switch.
227 		 */
228 		index = pgd_index(addr);
229 		pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
230 		pgd_k = init_mm.pgd + index;
231 
232 		if (!pgd_present(*pgd_k))
233 			goto no_context;
234 		set_pgd(pgd, *pgd_k);
235 
236 		p4d = p4d_offset(pgd, addr);
237 		p4d_k = p4d_offset(pgd_k, addr);
238 		if (!p4d_present(*p4d_k))
239 			goto no_context;
240 
241 		pud = pud_offset(p4d, addr);
242 		pud_k = pud_offset(p4d_k, addr);
243 		if (!pud_present(*pud_k))
244 			goto no_context;
245 
246 		/*
247 		 * Since the vmalloc area is global, it is unnecessary
248 		 * to copy individual PTEs
249 		 */
250 		pmd = pmd_offset(pud, addr);
251 		pmd_k = pmd_offset(pud_k, addr);
252 		if (!pmd_present(*pmd_k))
253 			goto no_context;
254 		set_pmd(pmd, *pmd_k);
255 
256 		/*
257 		 * Make sure the actual PTE exists as well to
258 		 * catch kernel vmalloc-area accesses to non-mapped
259 		 * addresses. If we don't do this, this will just
260 		 * silently loop forever.
261 		 */
262 		pte_k = pte_offset_kernel(pmd_k, addr);
263 		if (!pte_present(*pte_k))
264 			goto no_context;
265 
266 		/*
267 		 * The kernel assumes that TLBs don't cache invalid
268 		 * entries, but in RISC-V, SFENCE.VMA specifies an
269 		 * ordering constraint, not a cache flush; it is
270 		 * necessary even after writing invalid entries.
271 		 */
272 		local_flush_tlb_page(addr);
273 
274 		return;
275 	}
276 }
277