xref: /openbmc/linux/arch/hexagon/mm/vm_fault.c (revision 9fb29c73)
1 /*
2  * Memory fault handling for Hexagon
3  *
4  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 and
8  * only version 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  */
20 
21 /*
22  * Page fault handling for the Hexagon Virtual Machine.
23  * Can also be called by a native port emulating the HVM
24  * execptions.
25  */
26 
27 #include <asm/pgtable.h>
28 #include <asm/traps.h>
29 #include <linux/uaccess.h>
30 #include <linux/mm.h>
31 #include <linux/sched/signal.h>
32 #include <linux/signal.h>
33 #include <linux/extable.h>
34 #include <linux/hardirq.h>
35 
36 /*
37  * Decode of hardware exception sends us to one of several
38  * entry points.  At each, we generate canonical arguments
39  * for handling by the abstract memory management code.
40  */
41 #define FLT_IFETCH     -1
42 #define FLT_LOAD        0
43 #define FLT_STORE       1
44 
45 
46 /*
47  * Canonical page fault handler
48  */
49 void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
50 {
51 	struct vm_area_struct *vma;
52 	struct mm_struct *mm = current->mm;
53 	int si_signo;
54 	int si_code = SEGV_MAPERR;
55 	vm_fault_t fault;
56 	const struct exception_table_entry *fixup;
57 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
58 
59 	/*
60 	 * If we're in an interrupt or have no user context,
61 	 * then must not take the fault.
62 	 */
63 	if (unlikely(in_interrupt() || !mm))
64 		goto no_context;
65 
66 	local_irq_enable();
67 
68 	if (user_mode(regs))
69 		flags |= FAULT_FLAG_USER;
70 retry:
71 	down_read(&mm->mmap_sem);
72 	vma = find_vma(mm, address);
73 	if (!vma)
74 		goto bad_area;
75 
76 	if (vma->vm_start <= address)
77 		goto good_area;
78 
79 	if (!(vma->vm_flags & VM_GROWSDOWN))
80 		goto bad_area;
81 
82 	if (expand_stack(vma, address))
83 		goto bad_area;
84 
85 good_area:
86 	/* Address space is OK.  Now check access rights. */
87 	si_code = SEGV_ACCERR;
88 
89 	switch (cause) {
90 	case FLT_IFETCH:
91 		if (!(vma->vm_flags & VM_EXEC))
92 			goto bad_area;
93 		break;
94 	case FLT_LOAD:
95 		if (!(vma->vm_flags & VM_READ))
96 			goto bad_area;
97 		break;
98 	case FLT_STORE:
99 		if (!(vma->vm_flags & VM_WRITE))
100 			goto bad_area;
101 		flags |= FAULT_FLAG_WRITE;
102 		break;
103 	}
104 
105 	fault = handle_mm_fault(vma, address, flags);
106 
107 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
108 		return;
109 
110 	/* The most common case -- we are done. */
111 	if (likely(!(fault & VM_FAULT_ERROR))) {
112 		if (flags & FAULT_FLAG_ALLOW_RETRY) {
113 			if (fault & VM_FAULT_MAJOR)
114 				current->maj_flt++;
115 			else
116 				current->min_flt++;
117 			if (fault & VM_FAULT_RETRY) {
118 				flags &= ~FAULT_FLAG_ALLOW_RETRY;
119 				flags |= FAULT_FLAG_TRIED;
120 				goto retry;
121 			}
122 		}
123 
124 		up_read(&mm->mmap_sem);
125 		return;
126 	}
127 
128 	up_read(&mm->mmap_sem);
129 
130 	/* Handle copyin/out exception cases */
131 	if (!user_mode(regs))
132 		goto no_context;
133 
134 	if (fault & VM_FAULT_OOM) {
135 		pagefault_out_of_memory();
136 		return;
137 	}
138 
139 	/* User-mode address is in the memory map, but we are
140 	 * unable to fix up the page fault.
141 	 */
142 	if (fault & VM_FAULT_SIGBUS) {
143 		si_signo = SIGBUS;
144 		si_code = BUS_ADRERR;
145 	}
146 	/* Address is not in the memory map */
147 	else {
148 		si_signo = SIGSEGV;
149 		si_code  = SEGV_ACCERR;
150 	}
151 	force_sig_fault(si_signo, si_code, (void __user *)address, current);
152 	return;
153 
154 bad_area:
155 	up_read(&mm->mmap_sem);
156 
157 	if (user_mode(regs)) {
158 		force_sig_fault(SIGSEGV, si_code, (void __user *)address, current);
159 		return;
160 	}
161 	/* Kernel-mode fault falls through */
162 
163 no_context:
164 	fixup = search_exception_tables(pt_elr(regs));
165 	if (fixup) {
166 		pt_set_elr(regs, fixup->fixup);
167 		return;
168 	}
169 
170 	/* Things are looking very, very bad now */
171 	bust_spinlocks(1);
172 	printk(KERN_EMERG "Unable to handle kernel paging request at "
173 		"virtual address 0x%08lx, regs %p\n", address, regs);
174 	die("Bad Kernel VA", regs, SIGKILL);
175 }
176 
177 
178 void read_protection_fault(struct pt_regs *regs)
179 {
180 	unsigned long badvadr = pt_badva(regs);
181 
182 	do_page_fault(badvadr, FLT_LOAD, regs);
183 }
184 
185 void write_protection_fault(struct pt_regs *regs)
186 {
187 	unsigned long badvadr = pt_badva(regs);
188 
189 	do_page_fault(badvadr, FLT_STORE, regs);
190 }
191 
192 void execute_protection_fault(struct pt_regs *regs)
193 {
194 	unsigned long badvadr = pt_badva(regs);
195 
196 	do_page_fault(badvadr, FLT_IFETCH, regs);
197 }
198