xref: /openbmc/linux/arch/sh/mm/tlbex_32.c (revision 874e2cc1)
1 /*
2  * TLB miss handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/kprobes.h>
14 #include <linux/kdebug.h>
15 #include <asm/mmu_context.h>
16 #include <asm/thread_info.h>
17 
18 /*
19  * Called with interrupts disabled.
20  */
21 asmlinkage int __kprobes
handle_tlbmiss(struct pt_regs * regs,unsigned long error_code,unsigned long address)22 handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
23 	       unsigned long address)
24 {
25 	pgd_t *pgd;
26 	p4d_t *p4d;
27 	pud_t *pud;
28 	pmd_t *pmd;
29 	pte_t *pte;
30 	pte_t entry;
31 
32 	/*
33 	 * We don't take page faults for P1, P2, and parts of P4, these
34 	 * are always mapped, whether it be due to legacy behaviour in
35 	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
36 	 */
37 	if (address >= P3SEG && address < P3_ADDR_MAX) {
38 		pgd = pgd_offset_k(address);
39 	} else {
40 		if (unlikely(address >= TASK_SIZE || !current->mm))
41 			return 1;
42 
43 		pgd = pgd_offset(current->mm, address);
44 	}
45 
46 	p4d = p4d_offset(pgd, address);
47 	if (p4d_none_or_clear_bad(p4d))
48 		return 1;
49 	pud = pud_offset(p4d, address);
50 	if (pud_none_or_clear_bad(pud))
51 		return 1;
52 	pmd = pmd_offset(pud, address);
53 	if (pmd_none_or_clear_bad(pmd))
54 		return 1;
55 	pte = pte_offset_kernel(pmd, address);
56 	entry = *pte;
57 	if (unlikely(pte_none(entry) || pte_not_present(entry)))
58 		return 1;
59 	if (unlikely(error_code && !pte_write(entry)))
60 		return 1;
61 
62 	if (error_code)
63 		entry = pte_mkdirty(entry);
64 	entry = pte_mkyoung(entry);
65 
66 	set_pte(pte, entry);
67 
68 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
69 	/*
70 	 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
71 	 * the case of an initial page write exception, so we need to
72 	 * flush it in order to avoid potential TLB entry duplication.
73 	 */
74 	if (error_code == FAULT_CODE_INITIAL)
75 		local_flush_tlb_one(get_asid(), address & PAGE_MASK);
76 #endif
77 
78 	set_thread_fault_code(error_code);
79 	update_mmu_cache(NULL, address, pte);
80 
81 	return 0;
82 }
83