xref: /openbmc/linux/arch/ia64/mm/fault.c (revision 87c2ce3b)
1 /*
2  * MMU fault handling support.
3  *
4  * Copyright (C) 1998-2002 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  */
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/smp_lock.h>
11 #include <linux/interrupt.h>
12 #include <linux/kprobes.h>
13 
14 #include <asm/pgtable.h>
15 #include <asm/processor.h>
16 #include <asm/system.h>
17 #include <asm/uaccess.h>
18 #include <asm/kdebug.h>
19 
20 extern void die (char *, struct pt_regs *, long);
21 
22 /*
23  * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
24  * (inside region 5, on ia64) and that page is present.
25  */
26 static int
27 mapped_kernel_page_is_present (unsigned long address)
28 {
29 	pgd_t *pgd;
30 	pud_t *pud;
31 	pmd_t *pmd;
32 	pte_t *ptep, pte;
33 
34 	pgd = pgd_offset_k(address);
35 	if (pgd_none(*pgd) || pgd_bad(*pgd))
36 		return 0;
37 
38 	pud = pud_offset(pgd, address);
39 	if (pud_none(*pud) || pud_bad(*pud))
40 		return 0;
41 
42 	pmd = pmd_offset(pud, address);
43 	if (pmd_none(*pmd) || pmd_bad(*pmd))
44 		return 0;
45 
46 	ptep = pte_offset_kernel(pmd, address);
47 	if (!ptep)
48 		return 0;
49 
50 	pte = *ptep;
51 	return pte_present(pte);
52 }
53 
54 void __kprobes
55 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
56 {
57 	int signal = SIGSEGV, code = SEGV_MAPERR;
58 	struct vm_area_struct *vma, *prev_vma;
59 	struct mm_struct *mm = current->mm;
60 	struct siginfo si;
61 	unsigned long mask;
62 
63 	/*
64 	 * If we're in an interrupt or have no user context, we must not take the fault..
65 	 */
66 	if (in_atomic() || !mm)
67 		goto no_context;
68 
69 #ifdef CONFIG_VIRTUAL_MEM_MAP
70 	/*
71 	 * If fault is in region 5 and we are in the kernel, we may already
72 	 * have the mmap_sem (pfn_valid macro is called during mmap). There
73 	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
74 	 * and go directly to the exception handling code.
75 	 */
76 
77 	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
78 		goto bad_area_no_up;
79 #endif
80 
81 	/*
82 	 * This is to handle the kprobes on user space access instructions
83 	 */
84 	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
85 					SIGSEGV) == NOTIFY_STOP)
86 		return;
87 
88 	down_read(&mm->mmap_sem);
89 
90 	vma = find_vma_prev(mm, address, &prev_vma);
91 	if (!vma)
92 		goto bad_area;
93 
94 	/* find_vma_prev() returns vma such that address < vma->vm_end or NULL */
95 	if (address < vma->vm_start)
96 		goto check_expansion;
97 
98   good_area:
99 	code = SEGV_ACCERR;
100 
101 	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
102 
103 #	define VM_READ_BIT	0
104 #	define VM_WRITE_BIT	1
105 #	define VM_EXEC_BIT	2
106 
107 #	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
108 	    || (1 << VM_EXEC_BIT) != VM_EXEC)
109 #		error File is out of sync with <linux/mm.h>.  Please update.
110 #	endif
111 
112 	mask = (  (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
113 		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)
114 		| (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT));
115 
116 	if ((vma->vm_flags & mask) != mask)
117 		goto bad_area;
118 
119   survive:
120 	/*
121 	 * If for any reason at all we couldn't handle the fault, make
122 	 * sure we exit gracefully rather than endlessly redo the
123 	 * fault.
124 	 */
125 	switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
126 	      case VM_FAULT_MINOR:
127 		++current->min_flt;
128 		break;
129 	      case VM_FAULT_MAJOR:
130 		++current->maj_flt;
131 		break;
132 	      case VM_FAULT_SIGBUS:
133 		/*
134 		 * We ran out of memory, or some other thing happened
135 		 * to us that made us unable to handle the page fault
136 		 * gracefully.
137 		 */
138 		signal = SIGBUS;
139 		goto bad_area;
140 	      case VM_FAULT_OOM:
141 		goto out_of_memory;
142 	      default:
143 		BUG();
144 	}
145 	up_read(&mm->mmap_sem);
146 	return;
147 
148   check_expansion:
149 	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
150 		if (!(vma->vm_flags & VM_GROWSDOWN))
151 			goto bad_area;
152 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
153 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
154 			goto bad_area;
155 		if (expand_stack(vma, address))
156 			goto bad_area;
157 	} else {
158 		vma = prev_vma;
159 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
160 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
161 			goto bad_area;
162 		/*
163 		 * Since the register backing store is accessed sequentially,
164 		 * we disallow growing it by more than a page at a time.
165 		 */
166 		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
167 			goto bad_area;
168 		if (expand_upwards(vma, address))
169 			goto bad_area;
170 	}
171 	goto good_area;
172 
173   bad_area:
174 	up_read(&mm->mmap_sem);
175 #ifdef CONFIG_VIRTUAL_MEM_MAP
176   bad_area_no_up:
177 #endif
178 	if ((isr & IA64_ISR_SP)
179 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
180 	{
181 		/*
182 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
183 		 * bit in the psr to ensure forward progress.  (Target register will get a
184 		 * NaT for ld.s, lfetch will be canceled.)
185 		 */
186 		ia64_psr(regs)->ed = 1;
187 		return;
188 	}
189 	if (user_mode(regs)) {
190 		si.si_signo = signal;
191 		si.si_errno = 0;
192 		si.si_code = code;
193 		si.si_addr = (void __user *) address;
194 		si.si_isr = isr;
195 		si.si_flags = __ISR_VALID;
196 		force_sig_info(signal, &si, current);
197 		return;
198 	}
199 
200   no_context:
201 	if ((isr & IA64_ISR_SP)
202 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
203 	{
204 		/*
205 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
206 		 * bit in the psr to ensure forward progress.  (Target register will get a
207 		 * NaT for ld.s, lfetch will be canceled.)
208 		 */
209 		ia64_psr(regs)->ed = 1;
210 		return;
211 	}
212 
213 	/*
214 	 * Since we have no vma's for region 5, we might get here even if the address is
215 	 * valid, due to the VHPT walker inserting a non present translation that becomes
216 	 * stale. If that happens, the non present fault handler already purged the stale
217 	 * translation, which fixed the problem. So, we check to see if the translation is
218 	 * valid, and return if it is.
219 	 */
220 	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
221 		return;
222 
223 	if (ia64_done_with_exception(regs))
224 		return;
225 
226 	/*
227 	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
228 	 * with extreme prejudice.
229 	 */
230 	bust_spinlocks(1);
231 
232 	if (address < PAGE_SIZE)
233 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
234 	else
235 		printk(KERN_ALERT "Unable to handle kernel paging request at "
236 		       "virtual address %016lx\n", address);
237 	die("Oops", regs, isr);
238 	bust_spinlocks(0);
239 	do_exit(SIGKILL);
240 	return;
241 
242   out_of_memory:
243 	up_read(&mm->mmap_sem);
244 	if (current->pid == 1) {
245 		yield();
246 		down_read(&mm->mmap_sem);
247 		goto survive;
248 	}
249 	printk(KERN_CRIT "VM: killing process %s\n", current->comm);
250 	if (user_mode(regs))
251 		do_exit(SIGKILL);
252 	goto no_context;
253 }
254