xref: /openbmc/linux/arch/sh/mm/tlbex_32.c (revision 28080329ede3e4110bb14306b4529a5b9a2ce163)
1*28080329SPaul Mundt /*
2*28080329SPaul Mundt  * TLB miss handler for SH with an MMU.
3*28080329SPaul Mundt  *
4*28080329SPaul Mundt  *  Copyright (C) 1999  Niibe Yutaka
5*28080329SPaul Mundt  *  Copyright (C) 2003 - 2012  Paul Mundt
6*28080329SPaul Mundt  *
7*28080329SPaul Mundt  * This file is subject to the terms and conditions of the GNU General Public
8*28080329SPaul Mundt  * License.  See the file "COPYING" in the main directory of this archive
9*28080329SPaul Mundt  * for more details.
10*28080329SPaul Mundt  */
11*28080329SPaul Mundt #include <linux/kernel.h>
12*28080329SPaul Mundt #include <linux/mm.h>
13*28080329SPaul Mundt #include <linux/kprobes.h>
14*28080329SPaul Mundt #include <linux/kdebug.h>
15*28080329SPaul Mundt #include <asm/mmu_context.h>
16*28080329SPaul Mundt #include <asm/thread_info.h>
17*28080329SPaul Mundt 
18*28080329SPaul Mundt /*
19*28080329SPaul Mundt  * Called with interrupts disabled.
20*28080329SPaul Mundt  */
21*28080329SPaul Mundt asmlinkage int __kprobes
22*28080329SPaul Mundt handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
23*28080329SPaul Mundt 	       unsigned long address)
24*28080329SPaul Mundt {
25*28080329SPaul Mundt 	pgd_t *pgd;
26*28080329SPaul Mundt 	pud_t *pud;
27*28080329SPaul Mundt 	pmd_t *pmd;
28*28080329SPaul Mundt 	pte_t *pte;
29*28080329SPaul Mundt 	pte_t entry;
30*28080329SPaul Mundt 
31*28080329SPaul Mundt 	/*
32*28080329SPaul Mundt 	 * We don't take page faults for P1, P2, and parts of P4, these
33*28080329SPaul Mundt 	 * are always mapped, whether it be due to legacy behaviour in
34*28080329SPaul Mundt 	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
35*28080329SPaul Mundt 	 */
36*28080329SPaul Mundt 	if (address >= P3SEG && address < P3_ADDR_MAX) {
37*28080329SPaul Mundt 		pgd = pgd_offset_k(address);
38*28080329SPaul Mundt 	} else {
39*28080329SPaul Mundt 		if (unlikely(address >= TASK_SIZE || !current->mm))
40*28080329SPaul Mundt 			return 1;
41*28080329SPaul Mundt 
42*28080329SPaul Mundt 		pgd = pgd_offset(current->mm, address);
43*28080329SPaul Mundt 	}
44*28080329SPaul Mundt 
45*28080329SPaul Mundt 	pud = pud_offset(pgd, address);
46*28080329SPaul Mundt 	if (pud_none_or_clear_bad(pud))
47*28080329SPaul Mundt 		return 1;
48*28080329SPaul Mundt 	pmd = pmd_offset(pud, address);
49*28080329SPaul Mundt 	if (pmd_none_or_clear_bad(pmd))
50*28080329SPaul Mundt 		return 1;
51*28080329SPaul Mundt 	pte = pte_offset_kernel(pmd, address);
52*28080329SPaul Mundt 	entry = *pte;
53*28080329SPaul Mundt 	if (unlikely(pte_none(entry) || pte_not_present(entry)))
54*28080329SPaul Mundt 		return 1;
55*28080329SPaul Mundt 	if (unlikely(error_code && !pte_write(entry)))
56*28080329SPaul Mundt 		return 1;
57*28080329SPaul Mundt 
58*28080329SPaul Mundt 	if (error_code)
59*28080329SPaul Mundt 		entry = pte_mkdirty(entry);
60*28080329SPaul Mundt 	entry = pte_mkyoung(entry);
61*28080329SPaul Mundt 
62*28080329SPaul Mundt 	set_pte(pte, entry);
63*28080329SPaul Mundt 
64*28080329SPaul Mundt #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
65*28080329SPaul Mundt 	/*
66*28080329SPaul Mundt 	 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
67*28080329SPaul Mundt 	 * the case of an initial page write exception, so we need to
68*28080329SPaul Mundt 	 * flush it in order to avoid potential TLB entry duplication.
69*28080329SPaul Mundt 	 */
70*28080329SPaul Mundt 	if (error_code == FAULT_CODE_INITIAL)
71*28080329SPaul Mundt 		local_flush_tlb_one(get_asid(), address & PAGE_MASK);
72*28080329SPaul Mundt #endif
73*28080329SPaul Mundt 
74*28080329SPaul Mundt 	set_thread_fault_code(error_code);
75*28080329SPaul Mundt 	update_mmu_cache(NULL, address, pte);
76*28080329SPaul Mundt 
77*28080329SPaul Mundt 	return 0;
78*28080329SPaul Mundt }
79