xref: /openbmc/linux/arch/arm/include/asm/tlb.h (revision a0ad5496b2b3accf09ab9485ad0170e3b4b1cb27)
14baa9922SRussell King /*
24baa9922SRussell King  *  arch/arm/include/asm/tlb.h
34baa9922SRussell King  *
44baa9922SRussell King  *  Copyright (C) 2002 Russell King
54baa9922SRussell King  *
64baa9922SRussell King  * This program is free software; you can redistribute it and/or modify
74baa9922SRussell King  * it under the terms of the GNU General Public License version 2 as
84baa9922SRussell King  * published by the Free Software Foundation.
94baa9922SRussell King  *
104baa9922SRussell King  *  Experimentation shows that on a StrongARM, it appears to be faster
114baa9922SRussell King  *  to use the "invalidate whole tlb" rather than "invalidate single
124baa9922SRussell King  *  tlb" for this.
134baa9922SRussell King  *
144baa9922SRussell King  *  This appears true for both the process fork+exit case, as well as
154baa9922SRussell King  *  the munmap-large-area case.
164baa9922SRussell King  */
174baa9922SRussell King #ifndef __ASMARM_TLB_H
184baa9922SRussell King #define __ASMARM_TLB_H
194baa9922SRussell King 
204baa9922SRussell King #include <asm/cacheflush.h>
214baa9922SRussell King 
224baa9922SRussell King #ifndef CONFIG_MMU
234baa9922SRussell King 
244baa9922SRussell King #include <linux/pagemap.h>
2558e9c47fSRussell King 
2658e9c47fSRussell King #define tlb_flush(tlb)	((void) tlb)
2758e9c47fSRussell King 
284baa9922SRussell King #include <asm-generic/tlb.h>
294baa9922SRussell King 
304baa9922SRussell King #else /* !CONFIG_MMU */
314baa9922SRussell King 
3206824ba8SRussell King #include <linux/swap.h>
334baa9922SRussell King #include <asm/pgalloc.h>
3406824ba8SRussell King #include <asm/tlbflush.h>
3506824ba8SRussell King 
369e14f674SPeter Zijlstra #define MMU_GATHER_BUNDLE	8
379e14f674SPeter Zijlstra 
38*a0ad5496SSteve Capper #ifdef CONFIG_HAVE_RCU_TABLE_FREE
39*a0ad5496SSteve Capper static inline void __tlb_remove_table(void *_table)
40*a0ad5496SSteve Capper {
41*a0ad5496SSteve Capper 	free_page_and_swap_cache((struct page *)_table);
42*a0ad5496SSteve Capper }
43*a0ad5496SSteve Capper 
44*a0ad5496SSteve Capper struct mmu_table_batch {
45*a0ad5496SSteve Capper 	struct rcu_head		rcu;
46*a0ad5496SSteve Capper 	unsigned int		nr;
47*a0ad5496SSteve Capper 	void			*tables[0];
48*a0ad5496SSteve Capper };
49*a0ad5496SSteve Capper 
50*a0ad5496SSteve Capper #define MAX_TABLE_BATCH		\
51*a0ad5496SSteve Capper 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
52*a0ad5496SSteve Capper 
53*a0ad5496SSteve Capper extern void tlb_table_flush(struct mmu_gather *tlb);
54*a0ad5496SSteve Capper extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
55*a0ad5496SSteve Capper 
56*a0ad5496SSteve Capper #define tlb_remove_entry(tlb, entry)	tlb_remove_table(tlb, entry)
57*a0ad5496SSteve Capper #else
58*a0ad5496SSteve Capper #define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
59*a0ad5496SSteve Capper #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
60*a0ad5496SSteve Capper 
614baa9922SRussell King /*
624baa9922SRussell King  * TLB handling.  This allows us to remove pages from the page
634baa9922SRussell King  * tables, and efficiently handle the TLB issues.
644baa9922SRussell King  */
654baa9922SRussell King struct mmu_gather {
664baa9922SRussell King 	struct mm_struct	*mm;
67*a0ad5496SSteve Capper #ifdef CONFIG_HAVE_RCU_TABLE_FREE
68*a0ad5496SSteve Capper 	struct mmu_table_batch	*batch;
69*a0ad5496SSteve Capper 	unsigned int		need_flush;
70*a0ad5496SSteve Capper #endif
714baa9922SRussell King 	unsigned int		fullmm;
7206824ba8SRussell King 	struct vm_area_struct	*vma;
732b047252SLinus Torvalds 	unsigned long		start, end;
747fccfc00SAaro Koskinen 	unsigned long		range_start;
757fccfc00SAaro Koskinen 	unsigned long		range_end;
7606824ba8SRussell King 	unsigned int		nr;
779e14f674SPeter Zijlstra 	unsigned int		max;
789e14f674SPeter Zijlstra 	struct page		**pages;
799e14f674SPeter Zijlstra 	struct page		*local[MMU_GATHER_BUNDLE];
804baa9922SRussell King };
814baa9922SRussell King 
824baa9922SRussell King DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
834baa9922SRussell King 
8406824ba8SRussell King /*
8506824ba8SRussell King  * This is unnecessarily complex.  There's three ways the TLB shootdown
8606824ba8SRussell King  * code is used:
8706824ba8SRussell King  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
8806824ba8SRussell King  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
8906824ba8SRussell King  *     tlb->vma will be non-NULL.
9006824ba8SRussell King  *  2. Unmapping all vmas.  See exit_mmap().
9106824ba8SRussell King  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
9206824ba8SRussell King  *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
9306824ba8SRussell King  *  3. Unmapping argument pages.  See shift_arg_pages().
9406824ba8SRussell King  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
9506824ba8SRussell King  *     tlb->vma will be NULL.
9606824ba8SRussell King  */
9706824ba8SRussell King static inline void tlb_flush(struct mmu_gather *tlb)
9806824ba8SRussell King {
9906824ba8SRussell King 	if (tlb->fullmm || !tlb->vma)
10006824ba8SRussell King 		flush_tlb_mm(tlb->mm);
10106824ba8SRussell King 	else if (tlb->range_end > 0) {
10206824ba8SRussell King 		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
10306824ba8SRussell King 		tlb->range_start = TASK_SIZE;
10406824ba8SRussell King 		tlb->range_end = 0;
10506824ba8SRussell King 	}
10606824ba8SRussell King }
10706824ba8SRussell King 
10806824ba8SRussell King static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
10906824ba8SRussell King {
11006824ba8SRussell King 	if (!tlb->fullmm) {
11106824ba8SRussell King 		if (addr < tlb->range_start)
11206824ba8SRussell King 			tlb->range_start = addr;
11306824ba8SRussell King 		if (addr + PAGE_SIZE > tlb->range_end)
11406824ba8SRussell King 			tlb->range_end = addr + PAGE_SIZE;
11506824ba8SRussell King 	}
11606824ba8SRussell King }
11706824ba8SRussell King 
1189e14f674SPeter Zijlstra static inline void __tlb_alloc_page(struct mmu_gather *tlb)
1199e14f674SPeter Zijlstra {
1209e14f674SPeter Zijlstra 	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
1219e14f674SPeter Zijlstra 
1229e14f674SPeter Zijlstra 	if (addr) {
1239e14f674SPeter Zijlstra 		tlb->pages = (void *)addr;
1249e14f674SPeter Zijlstra 		tlb->max = PAGE_SIZE / sizeof(struct page *);
1259e14f674SPeter Zijlstra 	}
1269e14f674SPeter Zijlstra }
1279e14f674SPeter Zijlstra 
1281cf35d47SLinus Torvalds static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
12906824ba8SRussell King {
13006824ba8SRussell King 	tlb_flush(tlb);
131*a0ad5496SSteve Capper #ifdef CONFIG_HAVE_RCU_TABLE_FREE
132*a0ad5496SSteve Capper 	tlb_table_flush(tlb);
133*a0ad5496SSteve Capper #endif
1341cf35d47SLinus Torvalds }
1351cf35d47SLinus Torvalds 
1361cf35d47SLinus Torvalds static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
1371cf35d47SLinus Torvalds {
13806824ba8SRussell King 	free_pages_and_swap_cache(tlb->pages, tlb->nr);
13906824ba8SRussell King 	tlb->nr = 0;
1409e14f674SPeter Zijlstra 	if (tlb->pages == tlb->local)
1419e14f674SPeter Zijlstra 		__tlb_alloc_page(tlb);
14206824ba8SRussell King }
14306824ba8SRussell King 
1441cf35d47SLinus Torvalds static inline void tlb_flush_mmu(struct mmu_gather *tlb)
1451cf35d47SLinus Torvalds {
1461cf35d47SLinus Torvalds 	tlb_flush_mmu_tlbonly(tlb);
1471cf35d47SLinus Torvalds 	tlb_flush_mmu_free(tlb);
1481cf35d47SLinus Torvalds }
1491cf35d47SLinus Torvalds 
1509e14f674SPeter Zijlstra static inline void
1512b047252SLinus Torvalds tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
1524baa9922SRussell King {
1534baa9922SRussell King 	tlb->mm = mm;
1542b047252SLinus Torvalds 	tlb->fullmm = !(start | (end+1));
1552b047252SLinus Torvalds 	tlb->start = start;
1562b047252SLinus Torvalds 	tlb->end = end;
15706824ba8SRussell King 	tlb->vma = NULL;
1589e14f674SPeter Zijlstra 	tlb->max = ARRAY_SIZE(tlb->local);
1599e14f674SPeter Zijlstra 	tlb->pages = tlb->local;
16006824ba8SRussell King 	tlb->nr = 0;
1619e14f674SPeter Zijlstra 	__tlb_alloc_page(tlb);
162*a0ad5496SSteve Capper 
163*a0ad5496SSteve Capper #ifdef CONFIG_HAVE_RCU_TABLE_FREE
164*a0ad5496SSteve Capper 	tlb->batch = NULL;
165*a0ad5496SSteve Capper #endif
1664baa9922SRussell King }
1674baa9922SRussell King 
1684baa9922SRussell King static inline void
1694baa9922SRussell King tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
1704baa9922SRussell King {
17106824ba8SRussell King 	tlb_flush_mmu(tlb);
1724baa9922SRussell King 
1734baa9922SRussell King 	/* keep the page table cache within bounds */
1744baa9922SRussell King 	check_pgt_cache();
1754baa9922SRussell King 
1769e14f674SPeter Zijlstra 	if (tlb->pages != tlb->local)
1779e14f674SPeter Zijlstra 		free_pages((unsigned long)tlb->pages, 0);
1784baa9922SRussell King }
1794baa9922SRussell King 
1807fccfc00SAaro Koskinen /*
1817fccfc00SAaro Koskinen  * Memorize the range for the TLB flush.
1827fccfc00SAaro Koskinen  */
1837fccfc00SAaro Koskinen static inline void
1847fccfc00SAaro Koskinen tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
1857fccfc00SAaro Koskinen {
18606824ba8SRussell King 	tlb_add_flush(tlb, addr);
1877fccfc00SAaro Koskinen }
1884baa9922SRussell King 
1894baa9922SRussell King /*
1904baa9922SRussell King  * In the case of tlb vma handling, we can optimise these away in the
1914baa9922SRussell King  * case where we're doing a full MM flush.  When we're doing a munmap,
1924baa9922SRussell King  * the vmas are adjusted to only cover the region to be torn down.
1934baa9922SRussell King  */
1944baa9922SRussell King static inline void
1954baa9922SRussell King tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
1964baa9922SRussell King {
1977fccfc00SAaro Koskinen 	if (!tlb->fullmm) {
1984baa9922SRussell King 		flush_cache_range(vma, vma->vm_start, vma->vm_end);
19906824ba8SRussell King 		tlb->vma = vma;
2007fccfc00SAaro Koskinen 		tlb->range_start = TASK_SIZE;
2017fccfc00SAaro Koskinen 		tlb->range_end = 0;
2027fccfc00SAaro Koskinen 	}
2034baa9922SRussell King }
2044baa9922SRussell King 
2054baa9922SRussell King static inline void
2064baa9922SRussell King tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
2074baa9922SRussell King {
20806824ba8SRussell King 	if (!tlb->fullmm)
20906824ba8SRussell King 		tlb_flush(tlb);
2104baa9922SRussell King }
2114baa9922SRussell King 
2129e14f674SPeter Zijlstra static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
21306824ba8SRussell King {
2149e14f674SPeter Zijlstra 	tlb->pages[tlb->nr++] = page;
2159e14f674SPeter Zijlstra 	VM_BUG_ON(tlb->nr > tlb->max);
2169e14f674SPeter Zijlstra 	return tlb->max - tlb->nr;
2179e14f674SPeter Zijlstra }
2189e14f674SPeter Zijlstra 
2199e14f674SPeter Zijlstra static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
2209e14f674SPeter Zijlstra {
2219e14f674SPeter Zijlstra 	if (!__tlb_remove_page(tlb, page))
2229e14f674SPeter Zijlstra 		tlb_flush_mmu(tlb);
22306824ba8SRussell King }
22406824ba8SRussell King 
22506824ba8SRussell King static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
22606824ba8SRussell King 	unsigned long addr)
22706824ba8SRussell King {
22806824ba8SRussell King 	pgtable_page_dtor(pte);
2296d3ec1aeSCatalin Marinas 
230df547e08SWill Deacon #ifdef CONFIG_ARM_LPAE
231df547e08SWill Deacon 	tlb_add_flush(tlb, addr);
232df547e08SWill Deacon #else
2336d3ec1aeSCatalin Marinas 	/*
2346d3ec1aeSCatalin Marinas 	 * With the classic ARM MMU, a pte page has two corresponding pmd
2356d3ec1aeSCatalin Marinas 	 * entries, each covering 1MB.
2366d3ec1aeSCatalin Marinas 	 */
2376d3ec1aeSCatalin Marinas 	addr &= PMD_MASK;
2386d3ec1aeSCatalin Marinas 	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
2396d3ec1aeSCatalin Marinas 	tlb_add_flush(tlb, addr + SZ_1M);
240df547e08SWill Deacon #endif
2416d3ec1aeSCatalin Marinas 
242*a0ad5496SSteve Capper 	tlb_remove_entry(tlb, pte);
24306824ba8SRussell King }
24406824ba8SRussell King 
245c9f27f10SCatalin Marinas static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
246c9f27f10SCatalin Marinas 				  unsigned long addr)
247c9f27f10SCatalin Marinas {
248c9f27f10SCatalin Marinas #ifdef CONFIG_ARM_LPAE
249c9f27f10SCatalin Marinas 	tlb_add_flush(tlb, addr);
250*a0ad5496SSteve Capper 	tlb_remove_entry(tlb, virt_to_page(pmdp));
251c9f27f10SCatalin Marinas #endif
252c9f27f10SCatalin Marinas }
253c9f27f10SCatalin Marinas 
2548d962507SCatalin Marinas static inline void
2558d962507SCatalin Marinas tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
2568d962507SCatalin Marinas {
2578d962507SCatalin Marinas 	tlb_add_flush(tlb, addr);
2588d962507SCatalin Marinas }
2598d962507SCatalin Marinas 
26006824ba8SRussell King #define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
261c9f27f10SCatalin Marinas #define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
262a32618d2SRussell King #define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
2634baa9922SRussell King 
2644baa9922SRussell King #define tlb_migrate_finish(mm)		do { } while (0)
2654baa9922SRussell King 
2664baa9922SRussell King #endif /* CONFIG_MMU */
2674baa9922SRussell King #endif
268