xref: /openbmc/linux/arch/ia64/mm/fault.c (revision 31b90347)
1 /*
2  * MMU fault handling support.
3  *
4  * Copyright (C) 1998-2002 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  */
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/interrupt.h>
11 #include <linux/kprobes.h>
12 #include <linux/kdebug.h>
13 #include <linux/prefetch.h>
14 
15 #include <asm/pgtable.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 
19 extern int die(char *, struct pt_regs *, long);
20 
21 #ifdef CONFIG_KPROBES
22 static inline int notify_page_fault(struct pt_regs *regs, int trap)
23 {
24 	int ret = 0;
25 
26 	if (!user_mode(regs)) {
27 		/* kprobe_running() needs smp_processor_id() */
28 		preempt_disable();
29 		if (kprobe_running() && kprobe_fault_handler(regs, trap))
30 			ret = 1;
31 		preempt_enable();
32 	}
33 
34 	return ret;
35 }
36 #else
37 static inline int notify_page_fault(struct pt_regs *regs, int trap)
38 {
39 	return 0;
40 }
41 #endif
42 
43 /*
44  * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
45  * (inside region 5, on ia64) and that page is present.
46  */
47 static int
48 mapped_kernel_page_is_present (unsigned long address)
49 {
50 	pgd_t *pgd;
51 	pud_t *pud;
52 	pmd_t *pmd;
53 	pte_t *ptep, pte;
54 
55 	pgd = pgd_offset_k(address);
56 	if (pgd_none(*pgd) || pgd_bad(*pgd))
57 		return 0;
58 
59 	pud = pud_offset(pgd, address);
60 	if (pud_none(*pud) || pud_bad(*pud))
61 		return 0;
62 
63 	pmd = pmd_offset(pud, address);
64 	if (pmd_none(*pmd) || pmd_bad(*pmd))
65 		return 0;
66 
67 	ptep = pte_offset_kernel(pmd, address);
68 	if (!ptep)
69 		return 0;
70 
71 	pte = *ptep;
72 	return pte_present(pte);
73 }
74 
75 #	define VM_READ_BIT	0
76 #	define VM_WRITE_BIT	1
77 #	define VM_EXEC_BIT	2
78 
79 void __kprobes
80 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
81 {
82 	int signal = SIGSEGV, code = SEGV_MAPERR;
83 	struct vm_area_struct *vma, *prev_vma;
84 	struct mm_struct *mm = current->mm;
85 	struct siginfo si;
86 	unsigned long mask;
87 	int fault;
88 	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
89 
90 	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
91 		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
92 
93 	/* mmap_sem is performance critical.... */
94 	prefetchw(&mm->mmap_sem);
95 
96 	/*
97 	 * If we're in an interrupt or have no user context, we must not take the fault..
98 	 */
99 	if (in_atomic() || !mm)
100 		goto no_context;
101 
102 #ifdef CONFIG_VIRTUAL_MEM_MAP
103 	/*
104 	 * If fault is in region 5 and we are in the kernel, we may already
105 	 * have the mmap_sem (pfn_valid macro is called during mmap). There
106 	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
107 	 * and go directly to the exception handling code.
108 	 */
109 
110 	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
111 		goto bad_area_no_up;
112 #endif
113 
114 	/*
115 	 * This is to handle the kprobes on user space access instructions
116 	 */
117 	if (notify_page_fault(regs, TRAP_BRKPT))
118 		return;
119 
120 	if (user_mode(regs))
121 		flags |= FAULT_FLAG_USER;
122 	if (mask & VM_WRITE)
123 		flags |= FAULT_FLAG_WRITE;
124 retry:
125 	down_read(&mm->mmap_sem);
126 
127 	vma = find_vma_prev(mm, address, &prev_vma);
128 	if (!vma && !prev_vma )
129 		goto bad_area;
130 
131         /*
132          * find_vma_prev() returns vma such that address < vma->vm_end or NULL
133          *
134          * May find no vma, but could be that the last vm area is the
135          * register backing store that needs to expand upwards, in
136          * this case vma will be null, but prev_vma will ne non-null
137          */
138         if (( !vma && prev_vma ) || (address < vma->vm_start) )
139 		goto check_expansion;
140 
141   good_area:
142 	code = SEGV_ACCERR;
143 
144 	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
145 
146 #	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
147 	    || (1 << VM_EXEC_BIT) != VM_EXEC)
148 #		error File is out of sync with <linux/mm.h>.  Please update.
149 #	endif
150 
151 	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
152 		goto bad_area;
153 
154 	if ((vma->vm_flags & mask) != mask)
155 		goto bad_area;
156 
157 	/*
158 	 * If for any reason at all we couldn't handle the fault, make
159 	 * sure we exit gracefully rather than endlessly redo the
160 	 * fault.
161 	 */
162 	fault = handle_mm_fault(mm, vma, address, flags);
163 
164 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
165 		return;
166 
167 	if (unlikely(fault & VM_FAULT_ERROR)) {
168 		/*
169 		 * We ran out of memory, or some other thing happened
170 		 * to us that made us unable to handle the page fault
171 		 * gracefully.
172 		 */
173 		if (fault & VM_FAULT_OOM) {
174 			goto out_of_memory;
175 		} else if (fault & VM_FAULT_SIGBUS) {
176 			signal = SIGBUS;
177 			goto bad_area;
178 		}
179 		BUG();
180 	}
181 
182 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
183 		if (fault & VM_FAULT_MAJOR)
184 			current->maj_flt++;
185 		else
186 			current->min_flt++;
187 		if (fault & VM_FAULT_RETRY) {
188 			flags &= ~FAULT_FLAG_ALLOW_RETRY;
189 			flags |= FAULT_FLAG_TRIED;
190 
191 			 /* No need to up_read(&mm->mmap_sem) as we would
192 			 * have already released it in __lock_page_or_retry
193 			 * in mm/filemap.c.
194 			 */
195 
196 			goto retry;
197 		}
198 	}
199 
200 	up_read(&mm->mmap_sem);
201 	return;
202 
203   check_expansion:
204 	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
205 		if (!vma)
206 			goto bad_area;
207 		if (!(vma->vm_flags & VM_GROWSDOWN))
208 			goto bad_area;
209 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
210 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
211 			goto bad_area;
212 		if (expand_stack(vma, address))
213 			goto bad_area;
214 	} else {
215 		vma = prev_vma;
216 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
217 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
218 			goto bad_area;
219 		/*
220 		 * Since the register backing store is accessed sequentially,
221 		 * we disallow growing it by more than a page at a time.
222 		 */
223 		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
224 			goto bad_area;
225 		if (expand_upwards(vma, address))
226 			goto bad_area;
227 	}
228 	goto good_area;
229 
230   bad_area:
231 	up_read(&mm->mmap_sem);
232 #ifdef CONFIG_VIRTUAL_MEM_MAP
233   bad_area_no_up:
234 #endif
235 	if ((isr & IA64_ISR_SP)
236 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
237 	{
238 		/*
239 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
240 		 * bit in the psr to ensure forward progress.  (Target register will get a
241 		 * NaT for ld.s, lfetch will be canceled.)
242 		 */
243 		ia64_psr(regs)->ed = 1;
244 		return;
245 	}
246 	if (user_mode(regs)) {
247 		si.si_signo = signal;
248 		si.si_errno = 0;
249 		si.si_code = code;
250 		si.si_addr = (void __user *) address;
251 		si.si_isr = isr;
252 		si.si_flags = __ISR_VALID;
253 		force_sig_info(signal, &si, current);
254 		return;
255 	}
256 
257   no_context:
258 	if ((isr & IA64_ISR_SP)
259 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
260 	{
261 		/*
262 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
263 		 * bit in the psr to ensure forward progress.  (Target register will get a
264 		 * NaT for ld.s, lfetch will be canceled.)
265 		 */
266 		ia64_psr(regs)->ed = 1;
267 		return;
268 	}
269 
270 	/*
271 	 * Since we have no vma's for region 5, we might get here even if the address is
272 	 * valid, due to the VHPT walker inserting a non present translation that becomes
273 	 * stale. If that happens, the non present fault handler already purged the stale
274 	 * translation, which fixed the problem. So, we check to see if the translation is
275 	 * valid, and return if it is.
276 	 */
277 	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
278 		return;
279 
280 	if (ia64_done_with_exception(regs))
281 		return;
282 
283 	/*
284 	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
285 	 * with extreme prejudice.
286 	 */
287 	bust_spinlocks(1);
288 
289 	if (address < PAGE_SIZE)
290 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
291 	else
292 		printk(KERN_ALERT "Unable to handle kernel paging request at "
293 		       "virtual address %016lx\n", address);
294 	if (die("Oops", regs, isr))
295 		regs = NULL;
296 	bust_spinlocks(0);
297 	if (regs)
298 		do_exit(SIGKILL);
299 	return;
300 
301   out_of_memory:
302 	up_read(&mm->mmap_sem);
303 	if (!user_mode(regs))
304 		goto no_context;
305 	pagefault_out_of_memory();
306 }
307