xref: /openbmc/linux/arch/arm/include/asm/tlb.h (revision 7fccfc00c003c855936970facdbb667bae9dbe9a)
14baa9922SRussell King /*
24baa9922SRussell King  *  arch/arm/include/asm/tlb.h
34baa9922SRussell King  *
44baa9922SRussell King  *  Copyright (C) 2002 Russell King
54baa9922SRussell King  *
64baa9922SRussell King  * This program is free software; you can redistribute it and/or modify
74baa9922SRussell King  * it under the terms of the GNU General Public License version 2 as
84baa9922SRussell King  * published by the Free Software Foundation.
94baa9922SRussell King  *
104baa9922SRussell King  *  Experimentation shows that on a StrongARM, it appears to be faster
114baa9922SRussell King  *  to use the "invalidate whole tlb" rather than "invalidate single
124baa9922SRussell King  *  tlb" for this.
134baa9922SRussell King  *
144baa9922SRussell King  *  This appears true for both the process fork+exit case, as well as
154baa9922SRussell King  *  the munmap-large-area case.
164baa9922SRussell King  */
174baa9922SRussell King #ifndef __ASMARM_TLB_H
184baa9922SRussell King #define __ASMARM_TLB_H
194baa9922SRussell King 
204baa9922SRussell King #include <asm/cacheflush.h>
214baa9922SRussell King #include <asm/tlbflush.h>
224baa9922SRussell King 
234baa9922SRussell King #ifndef CONFIG_MMU
244baa9922SRussell King 
254baa9922SRussell King #include <linux/pagemap.h>
264baa9922SRussell King #include <asm-generic/tlb.h>
274baa9922SRussell King 
284baa9922SRussell King #else /* !CONFIG_MMU */
294baa9922SRussell King 
304baa9922SRussell King #include <asm/pgalloc.h>
314baa9922SRussell King 
324baa9922SRussell King /*
334baa9922SRussell King  * TLB handling.  This allows us to remove pages from the page
344baa9922SRussell King  * tables, and efficiently handle the TLB issues.
354baa9922SRussell King  */
364baa9922SRussell King struct mmu_gather {
374baa9922SRussell King 	struct mm_struct	*mm;
384baa9922SRussell King 	unsigned int		fullmm;
39*7fccfc00SAaro Koskinen 	unsigned long		range_start;
40*7fccfc00SAaro Koskinen 	unsigned long		range_end;
414baa9922SRussell King };
424baa9922SRussell King 
434baa9922SRussell King DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
444baa9922SRussell King 
454baa9922SRussell King static inline struct mmu_gather *
464baa9922SRussell King tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
474baa9922SRussell King {
484baa9922SRussell King 	struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
494baa9922SRussell King 
504baa9922SRussell King 	tlb->mm = mm;
514baa9922SRussell King 	tlb->fullmm = full_mm_flush;
524baa9922SRussell King 
534baa9922SRussell King 	return tlb;
544baa9922SRussell King }
554baa9922SRussell King 
564baa9922SRussell King static inline void
574baa9922SRussell King tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
584baa9922SRussell King {
594baa9922SRussell King 	if (tlb->fullmm)
604baa9922SRussell King 		flush_tlb_mm(tlb->mm);
614baa9922SRussell King 
624baa9922SRussell King 	/* keep the page table cache within bounds */
634baa9922SRussell King 	check_pgt_cache();
644baa9922SRussell King 
654baa9922SRussell King 	put_cpu_var(mmu_gathers);
664baa9922SRussell King }
674baa9922SRussell King 
68*7fccfc00SAaro Koskinen /*
69*7fccfc00SAaro Koskinen  * Memorize the range for the TLB flush.
70*7fccfc00SAaro Koskinen  */
71*7fccfc00SAaro Koskinen static inline void
72*7fccfc00SAaro Koskinen tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
73*7fccfc00SAaro Koskinen {
74*7fccfc00SAaro Koskinen 	if (!tlb->fullmm) {
75*7fccfc00SAaro Koskinen 		if (addr < tlb->range_start)
76*7fccfc00SAaro Koskinen 			tlb->range_start = addr;
77*7fccfc00SAaro Koskinen 		if (addr + PAGE_SIZE > tlb->range_end)
78*7fccfc00SAaro Koskinen 			tlb->range_end = addr + PAGE_SIZE;
79*7fccfc00SAaro Koskinen 	}
80*7fccfc00SAaro Koskinen }
814baa9922SRussell King 
824baa9922SRussell King /*
834baa9922SRussell King  * In the case of tlb vma handling, we can optimise these away in the
844baa9922SRussell King  * case where we're doing a full MM flush.  When we're doing a munmap,
854baa9922SRussell King  * the vmas are adjusted to only cover the region to be torn down.
864baa9922SRussell King  */
874baa9922SRussell King static inline void
884baa9922SRussell King tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
894baa9922SRussell King {
90*7fccfc00SAaro Koskinen 	if (!tlb->fullmm) {
914baa9922SRussell King 		flush_cache_range(vma, vma->vm_start, vma->vm_end);
92*7fccfc00SAaro Koskinen 		tlb->range_start = TASK_SIZE;
93*7fccfc00SAaro Koskinen 		tlb->range_end = 0;
94*7fccfc00SAaro Koskinen 	}
954baa9922SRussell King }
964baa9922SRussell King 
974baa9922SRussell King static inline void
984baa9922SRussell King tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
994baa9922SRussell King {
100*7fccfc00SAaro Koskinen 	if (!tlb->fullmm && tlb->range_end > 0)
101*7fccfc00SAaro Koskinen 		flush_tlb_range(vma, tlb->range_start, tlb->range_end);
1024baa9922SRussell King }
1034baa9922SRussell King 
1044baa9922SRussell King #define tlb_remove_page(tlb,page)	free_page_and_swap_cache(page)
1054baa9922SRussell King #define pte_free_tlb(tlb, ptep)		pte_free((tlb)->mm, ptep)
1064baa9922SRussell King #define pmd_free_tlb(tlb, pmdp)		pmd_free((tlb)->mm, pmdp)
1074baa9922SRussell King 
1084baa9922SRussell King #define tlb_migrate_finish(mm)		do { } while (0)
1094baa9922SRussell King 
1104baa9922SRussell King #endif /* CONFIG_MMU */
1114baa9922SRussell King #endif
112