xref: /openbmc/linux/arch/sh/mm/fault.c (revision 87c2ce3b)
1 /* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $
2  *
3  *  linux/arch/sh/mm/fault.c
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/fault.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  */
10 
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 
25 #include <asm/system.h>
26 #include <asm/io.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgalloc.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cacheflush.h>
31 #include <asm/kgdb.h>
32 
33 extern void die(const char *,struct pt_regs *,long);
34 
35 /*
36  * This routine handles page faults.  It determines the address,
37  * and the problem, and then passes it off to one of the appropriate
38  * routines.
39  */
40 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
41 			      unsigned long address)
42 {
43 	struct task_struct *tsk;
44 	struct mm_struct *mm;
45 	struct vm_area_struct * vma;
46 	unsigned long page;
47 
48 #ifdef CONFIG_SH_KGDB
49 	if (kgdb_nofault && kgdb_bus_err_hook)
50 		kgdb_bus_err_hook();
51 #endif
52 
53 	tsk = current;
54 	mm = tsk->mm;
55 
56 	/*
57 	 * If we're in an interrupt or have no user
58 	 * context, we must not take the fault..
59 	 */
60 	if (in_atomic() || !mm)
61 		goto no_context;
62 
63 	down_read(&mm->mmap_sem);
64 
65 	vma = find_vma(mm, address);
66 	if (!vma)
67 		goto bad_area;
68 	if (vma->vm_start <= address)
69 		goto good_area;
70 	if (!(vma->vm_flags & VM_GROWSDOWN))
71 		goto bad_area;
72 	if (expand_stack(vma, address))
73 		goto bad_area;
74 /*
75  * Ok, we have a good vm_area for this memory access, so
76  * we can handle it..
77  */
78 good_area:
79 	if (writeaccess) {
80 		if (!(vma->vm_flags & VM_WRITE))
81 			goto bad_area;
82 	} else {
83 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
84 			goto bad_area;
85 	}
86 
87 	/*
88 	 * If for any reason at all we couldn't handle the fault,
89 	 * make sure we exit gracefully rather than endlessly redo
90 	 * the fault.
91 	 */
92 survive:
93 	switch (handle_mm_fault(mm, vma, address, writeaccess)) {
94 		case VM_FAULT_MINOR:
95 			tsk->min_flt++;
96 			break;
97 		case VM_FAULT_MAJOR:
98 			tsk->maj_flt++;
99 			break;
100 		case VM_FAULT_SIGBUS:
101 			goto do_sigbus;
102 		case VM_FAULT_OOM:
103 			goto out_of_memory;
104 		default:
105 			BUG();
106 	}
107 
108 	up_read(&mm->mmap_sem);
109 	return;
110 
111 /*
112  * Something tried to access memory that isn't in our memory map..
113  * Fix it, but check if it's kernel or user first..
114  */
115 bad_area:
116 	up_read(&mm->mmap_sem);
117 
118 	if (user_mode(regs)) {
119 		tsk->thread.address = address;
120 		tsk->thread.error_code = writeaccess;
121 		force_sig(SIGSEGV, tsk);
122 		return;
123 	}
124 
125 no_context:
126 	/* Are we prepared to handle this kernel fault?  */
127 	if (fixup_exception(regs))
128 		return;
129 
130 /*
131  * Oops. The kernel tried to access some bad page. We'll have to
132  * terminate things with extreme prejudice.
133  *
134  */
135 	if (address < PAGE_SIZE)
136 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
137 	else
138 		printk(KERN_ALERT "Unable to handle kernel paging request");
139 	printk(" at virtual address %08lx\n", address);
140 	printk(KERN_ALERT "pc = %08lx\n", regs->pc);
141 	asm volatile("mov.l	%1, %0"
142 		     : "=r" (page)
143 		     : "m" (__m(MMU_TTB)));
144 	if (page) {
145 		page = ((unsigned long *) page)[address >> 22];
146 		printk(KERN_ALERT "*pde = %08lx\n", page);
147 		if (page & _PAGE_PRESENT) {
148 			page &= PAGE_MASK;
149 			address &= 0x003ff000;
150 			page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
151 			printk(KERN_ALERT "*pte = %08lx\n", page);
152 		}
153 	}
154 	die("Oops", regs, writeaccess);
155 	do_exit(SIGKILL);
156 
157 /*
158  * We ran out of memory, or some other thing happened to us that made
159  * us unable to handle the page fault gracefully.
160  */
161 out_of_memory:
162 	up_read(&mm->mmap_sem);
163 	if (current->pid == 1) {
164 		yield();
165 		down_read(&mm->mmap_sem);
166 		goto survive;
167 	}
168 	printk("VM: killing process %s\n", tsk->comm);
169 	if (user_mode(regs))
170 		do_exit(SIGKILL);
171 	goto no_context;
172 
173 do_sigbus:
174 	up_read(&mm->mmap_sem);
175 
176 	/*
177 	 * Send a sigbus, regardless of whether we were in kernel
178 	 * or user mode.
179 	 */
180 	tsk->thread.address = address;
181 	tsk->thread.error_code = writeaccess;
182 	tsk->thread.trap_no = 14;
183 	force_sig(SIGBUS, tsk);
184 
185 	/* Kernel mode? Handle exceptions or die */
186 	if (!user_mode(regs))
187 		goto no_context;
188 }
189 
190 /*
191  * Called with interrupt disabled.
192  */
193 asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
194 			       unsigned long address)
195 {
196 	unsigned long addrmax = P4SEG;
197 	pgd_t *pgd;
198 	pmd_t *pmd;
199 	pte_t *pte;
200 	pte_t entry;
201 	struct mm_struct *mm;
202 	spinlock_t *ptl;
203 	int ret = 1;
204 
205 #ifdef CONFIG_SH_KGDB
206 	if (kgdb_nofault && kgdb_bus_err_hook)
207 		kgdb_bus_err_hook();
208 #endif
209 
210 #ifdef CONFIG_SH_STORE_QUEUES
211 	addrmax = P4SEG_STORE_QUE + 0x04000000;
212 #endif
213 
214 	if (address >= P3SEG && address < addrmax) {
215 		pgd = pgd_offset_k(address);
216 		mm = NULL;
217 	} else if (address >= TASK_SIZE)
218 		return 1;
219 	else if (!(mm = current->mm))
220 		return 1;
221 	else
222 		pgd = pgd_offset(mm, address);
223 
224 	pmd = pmd_offset(pgd, address);
225 	if (pmd_none_or_clear_bad(pmd))
226 		return 1;
227 	if (mm)
228 		pte = pte_offset_map_lock(mm, pmd, address, &ptl);
229 	else
230 		pte = pte_offset_kernel(pmd, address);
231 
232 	entry = *pte;
233 	if (pte_none(entry) || pte_not_present(entry)
234 	    || (writeaccess && !pte_write(entry)))
235 		goto unlock;
236 
237 	if (writeaccess)
238 		entry = pte_mkdirty(entry);
239 	entry = pte_mkyoung(entry);
240 
241 #ifdef CONFIG_CPU_SH4
242 	/*
243 	 * ITLB is not affected by "ldtlb" instruction.
244 	 * So, we need to flush the entry by ourselves.
245 	 */
246 
247 	{
248 		unsigned long flags;
249 		local_irq_save(flags);
250 		__flush_tlb_page(get_asid(), address&PAGE_MASK);
251 		local_irq_restore(flags);
252 	}
253 #endif
254 
255 	set_pte(pte, entry);
256 	update_mmu_cache(NULL, address, entry);
257 	ret = 0;
258 unlock:
259 	if (mm)
260 		pte_unmap_unlock(pte, ptl);
261 	return ret;
262 }
263 
264 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
265 {
266 	if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
267 		unsigned long flags;
268 		unsigned long asid;
269 		unsigned long saved_asid = MMU_NO_ASID;
270 
271 		asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
272 		page &= PAGE_MASK;
273 
274 		local_irq_save(flags);
275 		if (vma->vm_mm != current->mm) {
276 			saved_asid = get_asid();
277 			set_asid(asid);
278 		}
279 		__flush_tlb_page(asid, page);
280 		if (saved_asid != MMU_NO_ASID)
281 			set_asid(saved_asid);
282 		local_irq_restore(flags);
283 	}
284 }
285 
286 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
287 		     unsigned long end)
288 {
289 	struct mm_struct *mm = vma->vm_mm;
290 
291 	if (mm->context != NO_CONTEXT) {
292 		unsigned long flags;
293 		int size;
294 
295 		local_irq_save(flags);
296 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
297 		if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
298 			mm->context = NO_CONTEXT;
299 			if (mm == current->mm)
300 				activate_context(mm);
301 		} else {
302 			unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
303 			unsigned long saved_asid = MMU_NO_ASID;
304 
305 			start &= PAGE_MASK;
306 			end += (PAGE_SIZE - 1);
307 			end &= PAGE_MASK;
308 			if (mm != current->mm) {
309 				saved_asid = get_asid();
310 				set_asid(asid);
311 			}
312 			while (start < end) {
313 				__flush_tlb_page(asid, start);
314 				start += PAGE_SIZE;
315 			}
316 			if (saved_asid != MMU_NO_ASID)
317 				set_asid(saved_asid);
318 		}
319 		local_irq_restore(flags);
320 	}
321 }
322 
323 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
324 {
325 	unsigned long flags;
326 	int size;
327 
328 	local_irq_save(flags);
329 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
330 	if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
331 		flush_tlb_all();
332 	} else {
333 		unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
334 		unsigned long saved_asid = get_asid();
335 
336 		start &= PAGE_MASK;
337 		end += (PAGE_SIZE - 1);
338 		end &= PAGE_MASK;
339 		set_asid(asid);
340 		while (start < end) {
341 			__flush_tlb_page(asid, start);
342 			start += PAGE_SIZE;
343 		}
344 		set_asid(saved_asid);
345 	}
346 	local_irq_restore(flags);
347 }
348 
349 void flush_tlb_mm(struct mm_struct *mm)
350 {
351 	/* Invalidate all TLB of this process. */
352 	/* Instead of invalidating each TLB, we get new MMU context. */
353 	if (mm->context != NO_CONTEXT) {
354 		unsigned long flags;
355 
356 		local_irq_save(flags);
357 		mm->context = NO_CONTEXT;
358 		if (mm == current->mm)
359 			activate_context(mm);
360 		local_irq_restore(flags);
361 	}
362 }
363 
364 void flush_tlb_all(void)
365 {
366 	unsigned long flags, status;
367 
368 	/*
369 	 * Flush all the TLB.
370 	 *
371 	 * Write to the MMU control register's bit:
372 	 * 	TF-bit for SH-3, TI-bit for SH-4.
373 	 *      It's same position, bit #2.
374 	 */
375 	local_irq_save(flags);
376 	status = ctrl_inl(MMUCR);
377 	status |= 0x04;
378 	ctrl_outl(status, MMUCR);
379 	local_irq_restore(flags);
380 }
381