xref: /openbmc/linux/arch/ia64/mm/fault.c (revision eaa163ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MMU fault handling support.
4  *
5  * Copyright (C) 1998-2002 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8 #include <linux/sched/signal.h>
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/extable.h>
12 #include <linux/interrupt.h>
13 #include <linux/kprobes.h>
14 #include <linux/kdebug.h>
15 #include <linux/prefetch.h>
16 #include <linux/uaccess.h>
17 
18 #include <asm/processor.h>
19 #include <asm/exception.h>
20 
21 extern int die(char *, struct pt_regs *, long);
22 
23 /*
24  * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
25  * (inside region 5, on ia64) and that page is present.
26  */
27 static int
28 mapped_kernel_page_is_present (unsigned long address)
29 {
30 	pgd_t *pgd;
31 	p4d_t *p4d;
32 	pud_t *pud;
33 	pmd_t *pmd;
34 	pte_t *ptep, pte;
35 
36 	pgd = pgd_offset_k(address);
37 	if (pgd_none(*pgd) || pgd_bad(*pgd))
38 		return 0;
39 
40 	p4d = p4d_offset(pgd, address);
41 	if (p4d_none(*p4d) || p4d_bad(*p4d))
42 		return 0;
43 
44 	pud = pud_offset(p4d, address);
45 	if (pud_none(*pud) || pud_bad(*pud))
46 		return 0;
47 
48 	pmd = pmd_offset(pud, address);
49 	if (pmd_none(*pmd) || pmd_bad(*pmd))
50 		return 0;
51 
52 	ptep = pte_offset_kernel(pmd, address);
53 	if (!ptep)
54 		return 0;
55 
56 	pte = *ptep;
57 	return pte_present(pte);
58 }
59 
60 #	define VM_READ_BIT	0
61 #	define VM_WRITE_BIT	1
62 #	define VM_EXEC_BIT	2
63 
64 void __kprobes
65 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
66 {
67 	int signal = SIGSEGV, code = SEGV_MAPERR;
68 	struct vm_area_struct *vma, *prev_vma;
69 	struct mm_struct *mm = current->mm;
70 	unsigned long mask;
71 	vm_fault_t fault;
72 	unsigned int flags = FAULT_FLAG_DEFAULT;
73 
74 	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
75 		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
76 
77 	/* mmap_lock is performance critical.... */
78 	prefetchw(&mm->mmap_lock);
79 
80 	/*
81 	 * If we're in an interrupt or have no user context, we must not take the fault..
82 	 */
83 	if (faulthandler_disabled() || !mm)
84 		goto no_context;
85 
86 #ifdef CONFIG_VIRTUAL_MEM_MAP
87 	/*
88 	 * If fault is in region 5 and we are in the kernel, we may already
89 	 * have the mmap_lock (pfn_valid macro is called during mmap). There
90 	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
91 	 * and go directly to the exception handling code.
92 	 */
93 
94 	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
95 		goto bad_area_no_up;
96 #endif
97 
98 	/*
99 	 * This is to handle the kprobes on user space access instructions
100 	 */
101 	if (kprobe_page_fault(regs, TRAP_BRKPT))
102 		return;
103 
104 	if (user_mode(regs))
105 		flags |= FAULT_FLAG_USER;
106 	if (mask & VM_WRITE)
107 		flags |= FAULT_FLAG_WRITE;
108 retry:
109 	mmap_read_lock(mm);
110 
111 	vma = find_vma_prev(mm, address, &prev_vma);
112 	if (!vma && !prev_vma )
113 		goto bad_area;
114 
115         /*
116          * find_vma_prev() returns vma such that address < vma->vm_end or NULL
117          *
118          * May find no vma, but could be that the last vm area is the
119          * register backing store that needs to expand upwards, in
120          * this case vma will be null, but prev_vma will ne non-null
121          */
122         if (( !vma && prev_vma ) || (address < vma->vm_start) )
123 		goto check_expansion;
124 
125   good_area:
126 	code = SEGV_ACCERR;
127 
128 	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
129 
130 #	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
131 	    || (1 << VM_EXEC_BIT) != VM_EXEC)
132 #		error File is out of sync with <linux/mm.h>.  Please update.
133 #	endif
134 
135 	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
136 		goto bad_area;
137 
138 	if ((vma->vm_flags & mask) != mask)
139 		goto bad_area;
140 
141 	/*
142 	 * If for any reason at all we couldn't handle the fault, make
143 	 * sure we exit gracefully rather than endlessly redo the
144 	 * fault.
145 	 */
146 	fault = handle_mm_fault(vma, address, flags);
147 
148 	if (fault_signal_pending(fault, regs))
149 		return;
150 
151 	if (unlikely(fault & VM_FAULT_ERROR)) {
152 		/*
153 		 * We ran out of memory, or some other thing happened
154 		 * to us that made us unable to handle the page fault
155 		 * gracefully.
156 		 */
157 		if (fault & VM_FAULT_OOM) {
158 			goto out_of_memory;
159 		} else if (fault & VM_FAULT_SIGSEGV) {
160 			goto bad_area;
161 		} else if (fault & VM_FAULT_SIGBUS) {
162 			signal = SIGBUS;
163 			goto bad_area;
164 		}
165 		BUG();
166 	}
167 
168 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
169 		if (fault & VM_FAULT_MAJOR)
170 			current->maj_flt++;
171 		else
172 			current->min_flt++;
173 		if (fault & VM_FAULT_RETRY) {
174 			flags |= FAULT_FLAG_TRIED;
175 
176 			 /* No need to mmap_read_unlock(mm) as we would
177 			 * have already released it in __lock_page_or_retry
178 			 * in mm/filemap.c.
179 			 */
180 
181 			goto retry;
182 		}
183 	}
184 
185 	mmap_read_unlock(mm);
186 	return;
187 
188   check_expansion:
189 	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
190 		if (!vma)
191 			goto bad_area;
192 		if (!(vma->vm_flags & VM_GROWSDOWN))
193 			goto bad_area;
194 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
195 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
196 			goto bad_area;
197 		if (expand_stack(vma, address))
198 			goto bad_area;
199 	} else {
200 		vma = prev_vma;
201 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
202 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
203 			goto bad_area;
204 		/*
205 		 * Since the register backing store is accessed sequentially,
206 		 * we disallow growing it by more than a page at a time.
207 		 */
208 		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
209 			goto bad_area;
210 		if (expand_upwards(vma, address))
211 			goto bad_area;
212 	}
213 	goto good_area;
214 
215   bad_area:
216 	mmap_read_unlock(mm);
217 #ifdef CONFIG_VIRTUAL_MEM_MAP
218   bad_area_no_up:
219 #endif
220 	if ((isr & IA64_ISR_SP)
221 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
222 	{
223 		/*
224 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
225 		 * bit in the psr to ensure forward progress.  (Target register will get a
226 		 * NaT for ld.s, lfetch will be canceled.)
227 		 */
228 		ia64_psr(regs)->ed = 1;
229 		return;
230 	}
231 	if (user_mode(regs)) {
232 		force_sig_fault(signal, code, (void __user *) address,
233 				0, __ISR_VALID, isr);
234 		return;
235 	}
236 
237   no_context:
238 	if ((isr & IA64_ISR_SP)
239 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
240 	{
241 		/*
242 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
243 		 * bit in the psr to ensure forward progress.  (Target register will get a
244 		 * NaT for ld.s, lfetch will be canceled.)
245 		 */
246 		ia64_psr(regs)->ed = 1;
247 		return;
248 	}
249 
250 	/*
251 	 * Since we have no vma's for region 5, we might get here even if the address is
252 	 * valid, due to the VHPT walker inserting a non present translation that becomes
253 	 * stale. If that happens, the non present fault handler already purged the stale
254 	 * translation, which fixed the problem. So, we check to see if the translation is
255 	 * valid, and return if it is.
256 	 */
257 	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
258 		return;
259 
260 	if (ia64_done_with_exception(regs))
261 		return;
262 
263 	/*
264 	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
265 	 * with extreme prejudice.
266 	 */
267 	bust_spinlocks(1);
268 
269 	if (address < PAGE_SIZE)
270 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
271 	else
272 		printk(KERN_ALERT "Unable to handle kernel paging request at "
273 		       "virtual address %016lx\n", address);
274 	if (die("Oops", regs, isr))
275 		regs = NULL;
276 	bust_spinlocks(0);
277 	if (regs)
278 		do_exit(SIGKILL);
279 	return;
280 
281   out_of_memory:
282 	mmap_read_unlock(mm);
283 	if (!user_mode(regs))
284 		goto no_context;
285 	pagefault_out_of_memory();
286 }
287