xref: /openbmc/linux/arch/sh/mm/tlbex_32.c (revision 874e2cc1)
128080329SPaul Mundt /*
228080329SPaul Mundt  * TLB miss handler for SH with an MMU.
328080329SPaul Mundt  *
428080329SPaul Mundt  *  Copyright (C) 1999  Niibe Yutaka
528080329SPaul Mundt  *  Copyright (C) 2003 - 2012  Paul Mundt
628080329SPaul Mundt  *
728080329SPaul Mundt  * This file is subject to the terms and conditions of the GNU General Public
828080329SPaul Mundt  * License.  See the file "COPYING" in the main directory of this archive
928080329SPaul Mundt  * for more details.
1028080329SPaul Mundt  */
1128080329SPaul Mundt #include <linux/kernel.h>
1228080329SPaul Mundt #include <linux/mm.h>
1328080329SPaul Mundt #include <linux/kprobes.h>
1428080329SPaul Mundt #include <linux/kdebug.h>
1528080329SPaul Mundt #include <asm/mmu_context.h>
1628080329SPaul Mundt #include <asm/thread_info.h>
1728080329SPaul Mundt 
1828080329SPaul Mundt /*
1928080329SPaul Mundt  * Called with interrupts disabled.
2028080329SPaul Mundt  */
2128080329SPaul Mundt asmlinkage int __kprobes
handle_tlbmiss(struct pt_regs * regs,unsigned long error_code,unsigned long address)2228080329SPaul Mundt handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
2328080329SPaul Mundt 	       unsigned long address)
2428080329SPaul Mundt {
2528080329SPaul Mundt 	pgd_t *pgd;
26874e2cc1SMike Rapoport 	p4d_t *p4d;
2728080329SPaul Mundt 	pud_t *pud;
2828080329SPaul Mundt 	pmd_t *pmd;
2928080329SPaul Mundt 	pte_t *pte;
3028080329SPaul Mundt 	pte_t entry;
3128080329SPaul Mundt 
3228080329SPaul Mundt 	/*
3328080329SPaul Mundt 	 * We don't take page faults for P1, P2, and parts of P4, these
3428080329SPaul Mundt 	 * are always mapped, whether it be due to legacy behaviour in
3528080329SPaul Mundt 	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
3628080329SPaul Mundt 	 */
3728080329SPaul Mundt 	if (address >= P3SEG && address < P3_ADDR_MAX) {
3828080329SPaul Mundt 		pgd = pgd_offset_k(address);
3928080329SPaul Mundt 	} else {
4028080329SPaul Mundt 		if (unlikely(address >= TASK_SIZE || !current->mm))
4128080329SPaul Mundt 			return 1;
4228080329SPaul Mundt 
4328080329SPaul Mundt 		pgd = pgd_offset(current->mm, address);
4428080329SPaul Mundt 	}
4528080329SPaul Mundt 
46874e2cc1SMike Rapoport 	p4d = p4d_offset(pgd, address);
47874e2cc1SMike Rapoport 	if (p4d_none_or_clear_bad(p4d))
48874e2cc1SMike Rapoport 		return 1;
49874e2cc1SMike Rapoport 	pud = pud_offset(p4d, address);
5028080329SPaul Mundt 	if (pud_none_or_clear_bad(pud))
5128080329SPaul Mundt 		return 1;
5228080329SPaul Mundt 	pmd = pmd_offset(pud, address);
5328080329SPaul Mundt 	if (pmd_none_or_clear_bad(pmd))
5428080329SPaul Mundt 		return 1;
5528080329SPaul Mundt 	pte = pte_offset_kernel(pmd, address);
5628080329SPaul Mundt 	entry = *pte;
5728080329SPaul Mundt 	if (unlikely(pte_none(entry) || pte_not_present(entry)))
5828080329SPaul Mundt 		return 1;
5928080329SPaul Mundt 	if (unlikely(error_code && !pte_write(entry)))
6028080329SPaul Mundt 		return 1;
6128080329SPaul Mundt 
6228080329SPaul Mundt 	if (error_code)
6328080329SPaul Mundt 		entry = pte_mkdirty(entry);
6428080329SPaul Mundt 	entry = pte_mkyoung(entry);
6528080329SPaul Mundt 
6628080329SPaul Mundt 	set_pte(pte, entry);
6728080329SPaul Mundt 
6828080329SPaul Mundt #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
6928080329SPaul Mundt 	/*
7028080329SPaul Mundt 	 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
7128080329SPaul Mundt 	 * the case of an initial page write exception, so we need to
7228080329SPaul Mundt 	 * flush it in order to avoid potential TLB entry duplication.
7328080329SPaul Mundt 	 */
7428080329SPaul Mundt 	if (error_code == FAULT_CODE_INITIAL)
7528080329SPaul Mundt 		local_flush_tlb_one(get_asid(), address & PAGE_MASK);
7628080329SPaul Mundt #endif
7728080329SPaul Mundt 
7828080329SPaul Mundt 	set_thread_fault_code(error_code);
7928080329SPaul Mundt 	update_mmu_cache(NULL, address, pte);
8028080329SPaul Mundt 
8128080329SPaul Mundt 	return 0;
8228080329SPaul Mundt }
83