108dbd0f8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2499236d9SRichard Kuo /*
3499236d9SRichard Kuo * Memory fault handling for Hexagon
4499236d9SRichard Kuo *
5e1858b2aSRichard Kuo * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6499236d9SRichard Kuo */
7499236d9SRichard Kuo
8499236d9SRichard Kuo /*
9499236d9SRichard Kuo * Page fault handling for the Hexagon Virtual Machine.
10499236d9SRichard Kuo * Can also be called by a native port emulating the HVM
11499236d9SRichard Kuo * execptions.
12499236d9SRichard Kuo */
13499236d9SRichard Kuo
14499236d9SRichard Kuo #include <asm/traps.h>
157c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
16499236d9SRichard Kuo #include <linux/mm.h>
173f07c014SIngo Molnar #include <linux/sched/signal.h>
18499236d9SRichard Kuo #include <linux/signal.h>
191e8fb9c3SPaul Gortmaker #include <linux/extable.h>
20499236d9SRichard Kuo #include <linux/hardirq.h>
21e08157c3SPeter Xu #include <linux/perf_event.h>
22499236d9SRichard Kuo
23499236d9SRichard Kuo /*
24499236d9SRichard Kuo * Decode of hardware exception sends us to one of several
25499236d9SRichard Kuo * entry points. At each, we generate canonical arguments
26499236d9SRichard Kuo * for handling by the abstract memory management code.
27499236d9SRichard Kuo */
28499236d9SRichard Kuo #define FLT_IFETCH -1
29499236d9SRichard Kuo #define FLT_LOAD 0
30499236d9SRichard Kuo #define FLT_STORE 1
31499236d9SRichard Kuo
32499236d9SRichard Kuo
33499236d9SRichard Kuo /*
34499236d9SRichard Kuo * Canonical page fault handler
35499236d9SRichard Kuo */
do_page_fault(unsigned long address,long cause,struct pt_regs * regs)36499236d9SRichard Kuo void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
37499236d9SRichard Kuo {
38499236d9SRichard Kuo struct vm_area_struct *vma;
39499236d9SRichard Kuo struct mm_struct *mm = current->mm;
401a4bd979SEric W. Biederman int si_signo;
41499236d9SRichard Kuo int si_code = SEGV_MAPERR;
4250a7ca3cSSouptick Joarder vm_fault_t fault;
43499236d9SRichard Kuo const struct exception_table_entry *fixup;
44dde16072SPeter Xu unsigned int flags = FAULT_FLAG_DEFAULT;
45499236d9SRichard Kuo
46499236d9SRichard Kuo /*
47499236d9SRichard Kuo * If we're in an interrupt or have no user context,
48499236d9SRichard Kuo * then must not take the fault.
49499236d9SRichard Kuo */
50499236d9SRichard Kuo if (unlikely(in_interrupt() || !mm))
51499236d9SRichard Kuo goto no_context;
52499236d9SRichard Kuo
53499236d9SRichard Kuo local_irq_enable();
54499236d9SRichard Kuo
55759496baSJohannes Weiner if (user_mode(regs))
56759496baSJohannes Weiner flags |= FAULT_FLAG_USER;
57e08157c3SPeter Xu
58e08157c3SPeter Xu perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
59393a86afSKautuk Consul retry:
60*a050ba1eSLinus Torvalds vma = lock_mm_and_find_vma(mm, address, regs);
61*a050ba1eSLinus Torvalds if (unlikely(!vma))
62*a050ba1eSLinus Torvalds goto bad_area_nosemaphore;
63499236d9SRichard Kuo
64499236d9SRichard Kuo /* Address space is OK. Now check access rights. */
65499236d9SRichard Kuo si_code = SEGV_ACCERR;
66499236d9SRichard Kuo
67499236d9SRichard Kuo switch (cause) {
68499236d9SRichard Kuo case FLT_IFETCH:
69499236d9SRichard Kuo if (!(vma->vm_flags & VM_EXEC))
70499236d9SRichard Kuo goto bad_area;
71499236d9SRichard Kuo break;
72499236d9SRichard Kuo case FLT_LOAD:
73499236d9SRichard Kuo if (!(vma->vm_flags & VM_READ))
74499236d9SRichard Kuo goto bad_area;
75499236d9SRichard Kuo break;
76499236d9SRichard Kuo case FLT_STORE:
77499236d9SRichard Kuo if (!(vma->vm_flags & VM_WRITE))
78499236d9SRichard Kuo goto bad_area;
79759496baSJohannes Weiner flags |= FAULT_FLAG_WRITE;
80499236d9SRichard Kuo break;
81499236d9SRichard Kuo }
82499236d9SRichard Kuo
83e08157c3SPeter Xu fault = handle_mm_fault(vma, address, flags, regs);
84393a86afSKautuk Consul
850b92ed09SAl Viro if (fault_signal_pending(fault, regs)) {
860b92ed09SAl Viro if (!user_mode(regs))
870b92ed09SAl Viro goto no_context;
88393a86afSKautuk Consul return;
890b92ed09SAl Viro }
90499236d9SRichard Kuo
91d9272525SPeter Xu /* The fault is fully completed (including releasing mmap lock) */
92d9272525SPeter Xu if (fault & VM_FAULT_COMPLETED)
93d9272525SPeter Xu return;
94d9272525SPeter Xu
95499236d9SRichard Kuo /* The most common case -- we are done. */
96499236d9SRichard Kuo if (likely(!(fault & VM_FAULT_ERROR))) {
97393a86afSKautuk Consul if (fault & VM_FAULT_RETRY) {
9845cac65bSShaohua Li flags |= FAULT_FLAG_TRIED;
99393a86afSKautuk Consul goto retry;
100393a86afSKautuk Consul }
101499236d9SRichard Kuo
102d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
103499236d9SRichard Kuo return;
104499236d9SRichard Kuo }
105499236d9SRichard Kuo
106d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
107499236d9SRichard Kuo
108499236d9SRichard Kuo /* Handle copyin/out exception cases */
109499236d9SRichard Kuo if (!user_mode(regs))
110499236d9SRichard Kuo goto no_context;
111499236d9SRichard Kuo
112499236d9SRichard Kuo if (fault & VM_FAULT_OOM) {
113499236d9SRichard Kuo pagefault_out_of_memory();
114499236d9SRichard Kuo return;
115499236d9SRichard Kuo }
116499236d9SRichard Kuo
117499236d9SRichard Kuo /* User-mode address is in the memory map, but we are
118499236d9SRichard Kuo * unable to fix up the page fault.
119499236d9SRichard Kuo */
120499236d9SRichard Kuo if (fault & VM_FAULT_SIGBUS) {
1211a4bd979SEric W. Biederman si_signo = SIGBUS;
1221a4bd979SEric W. Biederman si_code = BUS_ADRERR;
123499236d9SRichard Kuo }
124499236d9SRichard Kuo /* Address is not in the memory map */
125499236d9SRichard Kuo else {
1261a4bd979SEric W. Biederman si_signo = SIGSEGV;
1271a4bd979SEric W. Biederman si_code = SEGV_ACCERR;
128499236d9SRichard Kuo }
1292e1661d2SEric W. Biederman force_sig_fault(si_signo, si_code, (void __user *)address);
130499236d9SRichard Kuo return;
131499236d9SRichard Kuo
132499236d9SRichard Kuo bad_area:
133d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
134499236d9SRichard Kuo
135*a050ba1eSLinus Torvalds bad_area_nosemaphore:
136499236d9SRichard Kuo if (user_mode(regs)) {
1372e1661d2SEric W. Biederman force_sig_fault(SIGSEGV, si_code, (void __user *)address);
138499236d9SRichard Kuo return;
139499236d9SRichard Kuo }
140499236d9SRichard Kuo /* Kernel-mode fault falls through */
141499236d9SRichard Kuo
142499236d9SRichard Kuo no_context:
143499236d9SRichard Kuo fixup = search_exception_tables(pt_elr(regs));
144499236d9SRichard Kuo if (fixup) {
145499236d9SRichard Kuo pt_set_elr(regs, fixup->fixup);
146499236d9SRichard Kuo return;
147499236d9SRichard Kuo }
148499236d9SRichard Kuo
149499236d9SRichard Kuo /* Things are looking very, very bad now */
150499236d9SRichard Kuo bust_spinlocks(1);
151499236d9SRichard Kuo printk(KERN_EMERG "Unable to handle kernel paging request at "
152499236d9SRichard Kuo "virtual address 0x%08lx, regs %p\n", address, regs);
153499236d9SRichard Kuo die("Bad Kernel VA", regs, SIGKILL);
154499236d9SRichard Kuo }
155499236d9SRichard Kuo
156499236d9SRichard Kuo
read_protection_fault(struct pt_regs * regs)157499236d9SRichard Kuo void read_protection_fault(struct pt_regs *regs)
158499236d9SRichard Kuo {
159499236d9SRichard Kuo unsigned long badvadr = pt_badva(regs);
160499236d9SRichard Kuo
161499236d9SRichard Kuo do_page_fault(badvadr, FLT_LOAD, regs);
162499236d9SRichard Kuo }
163499236d9SRichard Kuo
write_protection_fault(struct pt_regs * regs)164499236d9SRichard Kuo void write_protection_fault(struct pt_regs *regs)
165499236d9SRichard Kuo {
166499236d9SRichard Kuo unsigned long badvadr = pt_badva(regs);
167499236d9SRichard Kuo
168499236d9SRichard Kuo do_page_fault(badvadr, FLT_STORE, regs);
169499236d9SRichard Kuo }
170499236d9SRichard Kuo
execute_protection_fault(struct pt_regs * regs)171499236d9SRichard Kuo void execute_protection_fault(struct pt_regs *regs)
172499236d9SRichard Kuo {
173499236d9SRichard Kuo unsigned long badvadr = pt_badva(regs);
174499236d9SRichard Kuo
175499236d9SRichard Kuo do_page_fault(badvadr, FLT_IFETCH, regs);
176499236d9SRichard Kuo }
177