xref: /openbmc/linux/arch/arm64/include/asm/tlbflush.h (revision c845428b7a9157523103100806bc8130d64769c8)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
258d0ba57SCatalin Marinas /*
358d0ba57SCatalin Marinas  * Based on arch/arm/include/asm/tlbflush.h
458d0ba57SCatalin Marinas  *
558d0ba57SCatalin Marinas  * Copyright (C) 1999-2003 Russell King
658d0ba57SCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
758d0ba57SCatalin Marinas  */
858d0ba57SCatalin Marinas #ifndef __ASM_TLBFLUSH_H
958d0ba57SCatalin Marinas #define __ASM_TLBFLUSH_H
1058d0ba57SCatalin Marinas 
1158d0ba57SCatalin Marinas #ifndef __ASSEMBLY__
1258d0ba57SCatalin Marinas 
13c10bc62aSMarc Zyngier #include <linux/bitfield.h>
143403e56bSAlex Van Brunt #include <linux/mm_types.h>
1558d0ba57SCatalin Marinas #include <linux/sched.h>
166bbd42e2SAlistair Popple #include <linux/mmu_notifier.h>
1758d0ba57SCatalin Marinas #include <asm/cputype.h>
189b0de864SWill Deacon #include <asm/mmu.h>
1958d0ba57SCatalin Marinas 
2058d0ba57SCatalin Marinas /*
21db68f3e7SMark Rutland  * Raw TLBI operations.
22db68f3e7SMark Rutland  *
23db68f3e7SMark Rutland  * Where necessary, use the __tlbi() macro to avoid asm()
24db68f3e7SMark Rutland  * boilerplate. Drivers and most kernel code should use the TLB
25db68f3e7SMark Rutland  * management routines in preference to the macro below.
26db68f3e7SMark Rutland  *
27db68f3e7SMark Rutland  * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
28db68f3e7SMark Rutland  * on whether a particular TLBI operation takes an argument or
29db68f3e7SMark Rutland  * not. The macros handles invoking the asm with or without the
30db68f3e7SMark Rutland  * register argument as appropriate.
31db68f3e7SMark Rutland  */
321764c3edSSami Tolvanen #define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE			       \
331764c3edSSami Tolvanen 			       "tlbi " #op "\n"				       \
34d9ff80f8SChristopher Covington 		   ALTERNATIVE("nop\n			nop",		       \
35d9ff80f8SChristopher Covington 			       "dsb ish\n		tlbi " #op,	       \
36d9ff80f8SChristopher Covington 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
37ce8c80c5SCatalin Marinas 			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
38d9ff80f8SChristopher Covington 			    : : )
39d9ff80f8SChristopher Covington 
401764c3edSSami Tolvanen #define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE			       \
411764c3edSSami Tolvanen 			       "tlbi " #op ", %0\n"			       \
42d9ff80f8SChristopher Covington 		   ALTERNATIVE("nop\n			nop",		       \
43d9ff80f8SChristopher Covington 			       "dsb ish\n		tlbi " #op ", %0",     \
44d9ff80f8SChristopher Covington 			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
45ce8c80c5SCatalin Marinas 			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
46d9ff80f8SChristopher Covington 			    : : "r" (arg))
47d9ff80f8SChristopher Covington 
48db68f3e7SMark Rutland #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
49db68f3e7SMark Rutland 
50db68f3e7SMark Rutland #define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
51db68f3e7SMark Rutland 
529b0de864SWill Deacon #define __tlbi_user(op, arg) do {						\
539b0de864SWill Deacon 	if (arm64_kernel_unmapped_at_el0())					\
549b0de864SWill Deacon 		__tlbi(op, (arg) | USER_ASID_FLAG);				\
559b0de864SWill Deacon } while (0)
569b0de864SWill Deacon 
577f170499SPhilip Elcan /* This macro creates a properly formatted VA operand for the TLBI */
587f170499SPhilip Elcan #define __TLBI_VADDR(addr, asid)				\
597f170499SPhilip Elcan 	({							\
607f170499SPhilip Elcan 		unsigned long __ta = (addr) >> 12;		\
617f170499SPhilip Elcan 		__ta &= GENMASK_ULL(43, 0);			\
627f170499SPhilip Elcan 		__ta |= (unsigned long)(asid) << 48;		\
637f170499SPhilip Elcan 		__ta;						\
647f170499SPhilip Elcan 	})
657f170499SPhilip Elcan 
66db68f3e7SMark Rutland /*
67d1d3aa98SZhenyu Ye  * Get translation granule of the system, which is decided by
68d1d3aa98SZhenyu Ye  * PAGE_SIZE.  Used by TTL.
69d1d3aa98SZhenyu Ye  *  - 4KB	: 1
70d1d3aa98SZhenyu Ye  *  - 16KB	: 2
71d1d3aa98SZhenyu Ye  *  - 64KB	: 3
72d1d3aa98SZhenyu Ye  */
73d1d3aa98SZhenyu Ye #define TLBI_TTL_TG_4K		1
74d1d3aa98SZhenyu Ye #define TLBI_TTL_TG_16K		2
75d1d3aa98SZhenyu Ye #define TLBI_TTL_TG_64K		3
76d1d3aa98SZhenyu Ye 
get_trans_granule(void)77d1d3aa98SZhenyu Ye static inline unsigned long get_trans_granule(void)
78d1d3aa98SZhenyu Ye {
79d1d3aa98SZhenyu Ye 	switch (PAGE_SIZE) {
80d1d3aa98SZhenyu Ye 	case SZ_4K:
81d1d3aa98SZhenyu Ye 		return TLBI_TTL_TG_4K;
82d1d3aa98SZhenyu Ye 	case SZ_16K:
83d1d3aa98SZhenyu Ye 		return TLBI_TTL_TG_16K;
84d1d3aa98SZhenyu Ye 	case SZ_64K:
85d1d3aa98SZhenyu Ye 		return TLBI_TTL_TG_64K;
86d1d3aa98SZhenyu Ye 	default:
87d1d3aa98SZhenyu Ye 		return 0;
88d1d3aa98SZhenyu Ye 	}
89d1d3aa98SZhenyu Ye }
90d1d3aa98SZhenyu Ye 
91d1d3aa98SZhenyu Ye /*
92c10bc62aSMarc Zyngier  * Level-based TLBI operations.
93c10bc62aSMarc Zyngier  *
94c10bc62aSMarc Zyngier  * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
95c10bc62aSMarc Zyngier  * the level at which the invalidation must take place. If the level is
96c10bc62aSMarc Zyngier  * wrong, no invalidation may take place. In the case where the level
97c10bc62aSMarc Zyngier  * cannot be easily determined, a 0 value for the level parameter will
98c10bc62aSMarc Zyngier  * perform a non-hinted invalidation.
99c10bc62aSMarc Zyngier  *
100c10bc62aSMarc Zyngier  * For Stage-2 invalidation, use the level values provided to that effect
101c10bc62aSMarc Zyngier  * in asm/stage2_pgtable.h.
102c10bc62aSMarc Zyngier  */
103c10bc62aSMarc Zyngier #define TLBI_TTL_MASK		GENMASK_ULL(47, 44)
104c10bc62aSMarc Zyngier 
10534e36d81SCatalin Marinas #define __tlbi_level(op, addr, level) do {				\
106c10bc62aSMarc Zyngier 	u64 arg = addr;							\
107c10bc62aSMarc Zyngier 									\
108c10bc62aSMarc Zyngier 	if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) &&		\
109c10bc62aSMarc Zyngier 	    level) {							\
110c10bc62aSMarc Zyngier 		u64 ttl = level & 3;					\
111d1d3aa98SZhenyu Ye 		ttl |= get_trans_granule() << 2;			\
112c10bc62aSMarc Zyngier 		arg &= ~TLBI_TTL_MASK;					\
113c10bc62aSMarc Zyngier 		arg |= FIELD_PREP(TLBI_TTL_MASK, ttl);			\
114c10bc62aSMarc Zyngier 	}								\
115c10bc62aSMarc Zyngier 									\
116c10bc62aSMarc Zyngier 	__tlbi(op, arg);						\
117c10bc62aSMarc Zyngier } while(0)
118c10bc62aSMarc Zyngier 
119e735b98aSZhenyu Ye #define __tlbi_user_level(op, arg, level) do {				\
120e735b98aSZhenyu Ye 	if (arm64_kernel_unmapped_at_el0())				\
121e735b98aSZhenyu Ye 		__tlbi_level(op, (arg | USER_ASID_FLAG), level);	\
122e735b98aSZhenyu Ye } while (0)
123e735b98aSZhenyu Ye 
124c10bc62aSMarc Zyngier /*
125d1d3aa98SZhenyu Ye  * This macro creates a properly formatted VA operand for the TLB RANGE.
126d1d3aa98SZhenyu Ye  * The value bit assignments are:
127d1d3aa98SZhenyu Ye  *
128d1d3aa98SZhenyu Ye  * +----------+------+-------+-------+-------+----------------------+
129d1d3aa98SZhenyu Ye  * |   ASID   |  TG  | SCALE |  NUM  |  TTL  |        BADDR         |
130d1d3aa98SZhenyu Ye  * +-----------------+-------+-------+-------+----------------------+
131d1d3aa98SZhenyu Ye  * |63      48|47  46|45   44|43   39|38   37|36                   0|
132d1d3aa98SZhenyu Ye  *
133d1d3aa98SZhenyu Ye  * The address range is determined by below formula:
134d1d3aa98SZhenyu Ye  * [BADDR, BADDR + (NUM + 1) * 2^(5*SCALE + 1) * PAGESIZE)
135d1d3aa98SZhenyu Ye  *
136d1d3aa98SZhenyu Ye  */
137d1d3aa98SZhenyu Ye #define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl)		\
138d1d3aa98SZhenyu Ye 	({							\
139d1d3aa98SZhenyu Ye 		unsigned long __ta = (addr) >> PAGE_SHIFT;	\
140d1d3aa98SZhenyu Ye 		__ta &= GENMASK_ULL(36, 0);			\
141d1d3aa98SZhenyu Ye 		__ta |= (unsigned long)(ttl) << 37;		\
142d1d3aa98SZhenyu Ye 		__ta |= (unsigned long)(num) << 39;		\
143d1d3aa98SZhenyu Ye 		__ta |= (unsigned long)(scale) << 44;		\
144d1d3aa98SZhenyu Ye 		__ta |= get_trans_granule() << 46;		\
145d1d3aa98SZhenyu Ye 		__ta |= (unsigned long)(asid) << 48;		\
146d1d3aa98SZhenyu Ye 		__ta;						\
147d1d3aa98SZhenyu Ye 	})
148d1d3aa98SZhenyu Ye 
149d1d3aa98SZhenyu Ye /* These macros are used by the TLBI RANGE feature. */
150d1d3aa98SZhenyu Ye #define __TLBI_RANGE_PAGES(num, scale)	\
151d1d3aa98SZhenyu Ye 	((unsigned long)((num) + 1) << (5 * (scale) + 1))
152d1d3aa98SZhenyu Ye #define MAX_TLBI_RANGE_PAGES		__TLBI_RANGE_PAGES(31, 3)
153d1d3aa98SZhenyu Ye 
154d1d3aa98SZhenyu Ye /*
155*ac4ad513SGavin Shan  * Generate 'num' values from -1 to 31 with -1 rejected by the
156*ac4ad513SGavin Shan  * __flush_tlb_range() loop below. Its return value is only
157*ac4ad513SGavin Shan  * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
158*ac4ad513SGavin Shan  * 'pages' is more than that, you must iterate over the overall
159*ac4ad513SGavin Shan  * range.
160d1d3aa98SZhenyu Ye  */
161d1d3aa98SZhenyu Ye #define __TLBI_RANGE_NUM(pages, scale)					\
162*ac4ad513SGavin Shan 	({								\
163*ac4ad513SGavin Shan 		int __pages = min((pages),				\
164*ac4ad513SGavin Shan 				  __TLBI_RANGE_PAGES(31, (scale)));	\
165*ac4ad513SGavin Shan 		(__pages >> (5 * (scale) + 1)) - 1;			\
166*ac4ad513SGavin Shan 	})
167d1d3aa98SZhenyu Ye 
168d1d3aa98SZhenyu Ye /*
1697f088727SWill Deacon  *	TLB Invalidation
1707f088727SWill Deacon  *	================
17158d0ba57SCatalin Marinas  *
1727f088727SWill Deacon  * 	This header file implements the low-level TLB invalidation routines
1737f088727SWill Deacon  *	(sometimes referred to as "flushing" in the kernel) for arm64.
1747f088727SWill Deacon  *
1757f088727SWill Deacon  *	Every invalidation operation uses the following template:
1767f088727SWill Deacon  *
1777f088727SWill Deacon  *	DSB ISHST	// Ensure prior page-table updates have completed
1787f088727SWill Deacon  *	TLBI ...	// Invalidate the TLB
1797f088727SWill Deacon  *	DSB ISH		// Ensure the TLB invalidation has completed
1807f088727SWill Deacon  *      if (invalidated kernel mappings)
1817f088727SWill Deacon  *		ISB	// Discard any instructions fetched from the old mapping
1827f088727SWill Deacon  *
1837f088727SWill Deacon  *
1847f088727SWill Deacon  *	The following functions form part of the "core" TLB invalidation API,
1857f088727SWill Deacon  *	as documented in Documentation/core-api/cachetlb.rst:
18658d0ba57SCatalin Marinas  *
18758d0ba57SCatalin Marinas  *	flush_tlb_all()
1887f088727SWill Deacon  *		Invalidate the entire TLB (kernel + user) on all CPUs
18958d0ba57SCatalin Marinas  *
19058d0ba57SCatalin Marinas  *	flush_tlb_mm(mm)
1917f088727SWill Deacon  *		Invalidate an entire user address space on all CPUs.
1927f088727SWill Deacon  *		The 'mm' argument identifies the ASID to invalidate.
19358d0ba57SCatalin Marinas  *
1947f088727SWill Deacon  *	flush_tlb_range(vma, start, end)
1957f088727SWill Deacon  *		Invalidate the virtual-address range '[start, end)' on all
1967f088727SWill Deacon  *		CPUs for the user address space corresponding to 'vma->mm'.
1977f088727SWill Deacon  *		Note that this operation also invalidates any walk-cache
1987f088727SWill Deacon  *		entries associated with translations for the specified address
1997f088727SWill Deacon  *		range.
20058d0ba57SCatalin Marinas  *
2017f088727SWill Deacon  *	flush_tlb_kernel_range(start, end)
2027f088727SWill Deacon  *		Same as flush_tlb_range(..., start, end), but applies to
2037f088727SWill Deacon  * 		kernel mappings rather than a particular user address space.
2047f088727SWill Deacon  *		Whilst not explicitly documented, this function is used when
2057f088727SWill Deacon  *		unmapping pages from vmalloc/io space.
20658d0ba57SCatalin Marinas  *
2077f088727SWill Deacon  *	flush_tlb_page(vma, addr)
2087f088727SWill Deacon  *		Invalidate a single user mapping for address 'addr' in the
2097f088727SWill Deacon  *		address space corresponding to 'vma->mm'.  Note that this
2107f088727SWill Deacon  *		operation only invalidates a single, last-level page-table
2117f088727SWill Deacon  *		entry and therefore does not affect any walk-caches.
21258d0ba57SCatalin Marinas  *
21358d0ba57SCatalin Marinas  *
2147f088727SWill Deacon  *	Next, we have some undocumented invalidation routines that you probably
2157f088727SWill Deacon  *	don't want to call unless you know what you're doing:
21658d0ba57SCatalin Marinas  *
2177f088727SWill Deacon  *	local_flush_tlb_all()
2187f088727SWill Deacon  *		Same as flush_tlb_all(), but only applies to the calling CPU.
21958d0ba57SCatalin Marinas  *
2207f088727SWill Deacon  *	__flush_tlb_kernel_pgtable(addr)
2217f088727SWill Deacon  *		Invalidate a single kernel mapping for address 'addr' on all
2227f088727SWill Deacon  *		CPUs, ensuring that any walk-cache entries associated with the
2237f088727SWill Deacon  *		translation are also invalidated.
2247f088727SWill Deacon  *
2257f088727SWill Deacon  *	__flush_tlb_range(vma, start, end, stride, last_level)
2267f088727SWill Deacon  *		Invalidate the virtual-address range '[start, end)' on all
2277f088727SWill Deacon  *		CPUs for the user address space corresponding to 'vma->mm'.
2287f088727SWill Deacon  *		The invalidation operations are issued at a granularity
2297f088727SWill Deacon  *		determined by 'stride' and only affect any walk-cache entries
2307f088727SWill Deacon  *		if 'last_level' is equal to false.
2317f088727SWill Deacon  *
2327f088727SWill Deacon  *
2337f088727SWill Deacon  *	Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
2347f088727SWill Deacon  *	on top of these routines, since that is our interface to the mmu_gather
2357f088727SWill Deacon  *	API as used by munmap() and friends.
23658d0ba57SCatalin Marinas  */
local_flush_tlb_all(void)2378e63d388SWill Deacon static inline void local_flush_tlb_all(void)
2388e63d388SWill Deacon {
2398e63d388SWill Deacon 	dsb(nshst);
240db68f3e7SMark Rutland 	__tlbi(vmalle1);
2418e63d388SWill Deacon 	dsb(nsh);
2428e63d388SWill Deacon 	isb();
2438e63d388SWill Deacon }
2448e63d388SWill Deacon 
flush_tlb_all(void)24558d0ba57SCatalin Marinas static inline void flush_tlb_all(void)
24658d0ba57SCatalin Marinas {
24798f7685eSWill Deacon 	dsb(ishst);
248db68f3e7SMark Rutland 	__tlbi(vmalle1is);
24998f7685eSWill Deacon 	dsb(ish);
25058d0ba57SCatalin Marinas 	isb();
25158d0ba57SCatalin Marinas }
25258d0ba57SCatalin Marinas 
flush_tlb_mm(struct mm_struct * mm)25358d0ba57SCatalin Marinas static inline void flush_tlb_mm(struct mm_struct *mm)
25458d0ba57SCatalin Marinas {
2555e10f988SWill Deacon 	unsigned long asid;
25658d0ba57SCatalin Marinas 
25798f7685eSWill Deacon 	dsb(ishst);
2585e10f988SWill Deacon 	asid = __TLBI_VADDR(0, ASID(mm));
259db68f3e7SMark Rutland 	__tlbi(aside1is, asid);
2609b0de864SWill Deacon 	__tlbi_user(aside1is, asid);
26198f7685eSWill Deacon 	dsb(ish);
2621af5a810SAlistair Popple 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
26358d0ba57SCatalin Marinas }
26458d0ba57SCatalin Marinas 
__flush_tlb_page_nosync(struct mm_struct * mm,unsigned long uaddr)26543b3dfddSBarry Song static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
26658d0ba57SCatalin Marinas 					   unsigned long uaddr)
26758d0ba57SCatalin Marinas {
2685e10f988SWill Deacon 	unsigned long addr;
26958d0ba57SCatalin Marinas 
27098f7685eSWill Deacon 	dsb(ishst);
27143b3dfddSBarry Song 	addr = __TLBI_VADDR(uaddr, ASID(mm));
27261c11656SZhenyu Ye 	__tlbi(vale1is, addr);
27361c11656SZhenyu Ye 	__tlbi_user(vale1is, addr);
2741af5a810SAlistair Popple 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
2756bbd42e2SAlistair Popple 						(uaddr & PAGE_MASK) + PAGE_SIZE);
2763403e56bSAlex Van Brunt }
2773403e56bSAlex Van Brunt 
flush_tlb_page_nosync(struct vm_area_struct * vma,unsigned long uaddr)27843b3dfddSBarry Song static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
27943b3dfddSBarry Song 					 unsigned long uaddr)
28043b3dfddSBarry Song {
28143b3dfddSBarry Song 	return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
28243b3dfddSBarry Song }
28343b3dfddSBarry Song 
flush_tlb_page(struct vm_area_struct * vma,unsigned long uaddr)2843403e56bSAlex Van Brunt static inline void flush_tlb_page(struct vm_area_struct *vma,
2853403e56bSAlex Van Brunt 				  unsigned long uaddr)
2863403e56bSAlex Van Brunt {
2873403e56bSAlex Van Brunt 	flush_tlb_page_nosync(vma, uaddr);
28898f7685eSWill Deacon 	dsb(ish);
28958d0ba57SCatalin Marinas }
29058d0ba57SCatalin Marinas 
arch_tlbbatch_should_defer(struct mm_struct * mm)29143b3dfddSBarry Song static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
29243b3dfddSBarry Song {
29343b3dfddSBarry Song #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
29443b3dfddSBarry Song 	/*
29543b3dfddSBarry Song 	 * TLB flush deferral is not required on systems which are affected by
29643b3dfddSBarry Song 	 * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
29743b3dfddSBarry Song 	 * will have two consecutive TLBI instructions with a dsb(ish) in between
29843b3dfddSBarry Song 	 * defeating the purpose (i.e save overall 'dsb ish' cost).
29943b3dfddSBarry Song 	 */
30043b3dfddSBarry Song 	if (unlikely(cpus_have_const_cap(ARM64_WORKAROUND_REPEAT_TLBI)))
30143b3dfddSBarry Song 		return false;
30243b3dfddSBarry Song #endif
30343b3dfddSBarry Song 	return true;
30443b3dfddSBarry Song }
30543b3dfddSBarry Song 
arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch * batch,struct mm_struct * mm,unsigned long uaddr)30643b3dfddSBarry Song static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
30743b3dfddSBarry Song 					     struct mm_struct *mm,
30843b3dfddSBarry Song 					     unsigned long uaddr)
30943b3dfddSBarry Song {
31043b3dfddSBarry Song 	__flush_tlb_page_nosync(mm, uaddr);
31143b3dfddSBarry Song }
31243b3dfddSBarry Song 
3136a718bd2SYicong Yang /*
3146a718bd2SYicong Yang  * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
3156a718bd2SYicong Yang  * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
3166a718bd2SYicong Yang  * flush_tlb_batched_pending().
3176a718bd2SYicong Yang  */
arch_flush_tlb_batched_pending(struct mm_struct * mm)31843b3dfddSBarry Song static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
31943b3dfddSBarry Song {
32043b3dfddSBarry Song 	dsb(ish);
32143b3dfddSBarry Song }
32243b3dfddSBarry Song 
3236a718bd2SYicong Yang /*
3246a718bd2SYicong Yang  * To support TLB batched flush for multiple pages unmapping, we only send
3256a718bd2SYicong Yang  * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
3266a718bd2SYicong Yang  * completion at the end in arch_tlbbatch_flush(). Since we've already issued
3276a718bd2SYicong Yang  * TLBI for each page so only a DSB is needed to synchronise its effect on the
3286a718bd2SYicong Yang  * other CPUs.
3296a718bd2SYicong Yang  *
3306a718bd2SYicong Yang  * This will save the time waiting on DSB comparing issuing a TLBI;DSB sequence
3316a718bd2SYicong Yang  * for each page.
3326a718bd2SYicong Yang  */
arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch * batch)33343b3dfddSBarry Song static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
33443b3dfddSBarry Song {
33543b3dfddSBarry Song 	dsb(ish);
33643b3dfddSBarry Song }
33743b3dfddSBarry Song 
33858d0ba57SCatalin Marinas /*
33905ac6530SMark Salter  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
34005ac6530SMark Salter  * necessarily a performance improvement.
34105ac6530SMark Salter  */
3423d65b6bbSWill Deacon #define MAX_TLBI_OPS	PTRS_PER_PTE
34305ac6530SMark Salter 
34436083902SRaghavendra Rao Ananta /*
34536083902SRaghavendra Rao Ananta  * __flush_tlb_range_op - Perform TLBI operation upon a range
34636083902SRaghavendra Rao Ananta  *
34736083902SRaghavendra Rao Ananta  * @op:	TLBI instruction that operates on a range (has 'r' prefix)
34836083902SRaghavendra Rao Ananta  * @start:	The start address of the range
34936083902SRaghavendra Rao Ananta  * @pages:	Range as the number of pages from 'start'
35036083902SRaghavendra Rao Ananta  * @stride:	Flush granularity
35136083902SRaghavendra Rao Ananta  * @asid:	The ASID of the task (0 for IPA instructions)
35236083902SRaghavendra Rao Ananta  * @tlb_level:	Translation Table level hint, if known
35336083902SRaghavendra Rao Ananta  * @tlbi_user:	If 'true', call an additional __tlbi_user()
35436083902SRaghavendra Rao Ananta  *              (typically for user ASIDs). 'flase' for IPA instructions
35536083902SRaghavendra Rao Ananta  *
35636083902SRaghavendra Rao Ananta  * When the CPU does not support TLB range operations, flush the TLB
35736083902SRaghavendra Rao Ananta  * entries one by one at the granularity of 'stride'. If the TLB
35836083902SRaghavendra Rao Ananta  * range ops are supported, then:
35936083902SRaghavendra Rao Ananta  *
360663f72cfSRyan Roberts  * 1. The minimum range granularity is decided by 'scale', so multiple range
361663f72cfSRyan Roberts  *    TLBI operations may be required. Start from scale = 3, flush the largest
362663f72cfSRyan Roberts  *    possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
363663f72cfSRyan Roberts  *    requested range, then decrement scale and continue until one or zero pages
364663f72cfSRyan Roberts  *    are left.
36536083902SRaghavendra Rao Ananta  *
366663f72cfSRyan Roberts  * 2. If there is 1 page remaining, flush it through non-range operations. Range
367663f72cfSRyan Roberts  *    operations can only span an even number of pages.
36836083902SRaghavendra Rao Ananta  */
36936083902SRaghavendra Rao Ananta #define __flush_tlb_range_op(op, start, pages, stride,			\
37036083902SRaghavendra Rao Ananta 				asid, tlb_level, tlbi_user)		\
37136083902SRaghavendra Rao Ananta do {									\
37236083902SRaghavendra Rao Ananta 	int num = 0;							\
373663f72cfSRyan Roberts 	int scale = 3;							\
37436083902SRaghavendra Rao Ananta 	unsigned long addr;						\
37536083902SRaghavendra Rao Ananta 									\
37636083902SRaghavendra Rao Ananta 	while (pages > 0) {						\
37736083902SRaghavendra Rao Ananta 		if (!system_supports_tlb_range() ||			\
378663f72cfSRyan Roberts 		    pages == 1) {					\
37936083902SRaghavendra Rao Ananta 			addr = __TLBI_VADDR(start, asid);		\
38036083902SRaghavendra Rao Ananta 			__tlbi_level(op, addr, tlb_level);		\
38136083902SRaghavendra Rao Ananta 			if (tlbi_user)					\
38236083902SRaghavendra Rao Ananta 				__tlbi_user_level(op, addr, tlb_level);	\
38336083902SRaghavendra Rao Ananta 			start += stride;				\
38436083902SRaghavendra Rao Ananta 			pages -= stride >> PAGE_SHIFT;			\
38536083902SRaghavendra Rao Ananta 			continue;					\
38636083902SRaghavendra Rao Ananta 		}							\
38736083902SRaghavendra Rao Ananta 									\
38836083902SRaghavendra Rao Ananta 		num = __TLBI_RANGE_NUM(pages, scale);			\
38936083902SRaghavendra Rao Ananta 		if (num >= 0) {						\
39036083902SRaghavendra Rao Ananta 			addr = __TLBI_VADDR_RANGE(start, asid, scale,	\
39136083902SRaghavendra Rao Ananta 						  num, tlb_level);	\
39236083902SRaghavendra Rao Ananta 			__tlbi(r##op, addr);				\
39336083902SRaghavendra Rao Ananta 			if (tlbi_user)					\
39436083902SRaghavendra Rao Ananta 				__tlbi_user(r##op, addr);		\
39536083902SRaghavendra Rao Ananta 			start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
39636083902SRaghavendra Rao Ananta 			pages -= __TLBI_RANGE_PAGES(num, scale);	\
39736083902SRaghavendra Rao Ananta 		}							\
398663f72cfSRyan Roberts 		scale--;						\
39936083902SRaghavendra Rao Ananta 	}								\
40036083902SRaghavendra Rao Ananta } while (0)
40136083902SRaghavendra Rao Ananta 
4024d73a9c1SRaghavendra Rao Ananta #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
4034d73a9c1SRaghavendra Rao Ananta 	__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
4044d73a9c1SRaghavendra Rao Ananta 
__flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long stride,bool last_level,int tlb_level)4054150e50bSCatalin Marinas static inline void __flush_tlb_range(struct vm_area_struct *vma,
4064150e50bSCatalin Marinas 				     unsigned long start, unsigned long end,
407c4ab2cbcSZhenyu Ye 				     unsigned long stride, bool last_level,
408c4ab2cbcSZhenyu Ye 				     int tlb_level)
40905ac6530SMark Salter {
41036083902SRaghavendra Rao Ananta 	unsigned long asid, pages;
411da4e7330SCatalin Marinas 
41201d57485SWill Deacon 	start = round_down(start, stride);
41301d57485SWill Deacon 	end = round_up(end, stride);
414d1d3aa98SZhenyu Ye 	pages = (end - start) >> PAGE_SHIFT;
41501d57485SWill Deacon 
416d1d3aa98SZhenyu Ye 	/*
417d1d3aa98SZhenyu Ye 	 * When not uses TLB range ops, we can handle up to
418d1d3aa98SZhenyu Ye 	 * (MAX_TLBI_OPS - 1) pages;
419d1d3aa98SZhenyu Ye 	 * When uses TLB range ops, we can handle up to
420d1d3aa98SZhenyu Ye 	 * (MAX_TLBI_RANGE_PAGES - 1) pages.
421d1d3aa98SZhenyu Ye 	 */
422d1d3aa98SZhenyu Ye 	if ((!system_supports_tlb_range() &&
423d1d3aa98SZhenyu Ye 	     (end - start) >= (MAX_TLBI_OPS * stride)) ||
424d1d3aa98SZhenyu Ye 	    pages >= MAX_TLBI_RANGE_PAGES) {
42505ac6530SMark Salter 		flush_tlb_mm(vma->vm_mm);
426da4e7330SCatalin Marinas 		return;
427da4e7330SCatalin Marinas 	}
428da4e7330SCatalin Marinas 
429da4e7330SCatalin Marinas 	dsb(ishst);
4305e10f988SWill Deacon 	asid = ASID(vma->vm_mm);
431d1d3aa98SZhenyu Ye 
43236083902SRaghavendra Rao Ananta 	if (last_level)
43336083902SRaghavendra Rao Ananta 		__flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
43436083902SRaghavendra Rao Ananta 	else
43536083902SRaghavendra Rao Ananta 		__flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
436d1d3aa98SZhenyu Ye 
437da4e7330SCatalin Marinas 	dsb(ish);
4381af5a810SAlistair Popple 	mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
43905ac6530SMark Salter }
44005ac6530SMark Salter 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)4414150e50bSCatalin Marinas static inline void flush_tlb_range(struct vm_area_struct *vma,
4424150e50bSCatalin Marinas 				   unsigned long start, unsigned long end)
4434150e50bSCatalin Marinas {
444d8289d3aSWill Deacon 	/*
445d8289d3aSWill Deacon 	 * We cannot use leaf-only invalidation here, since we may be invalidating
446d8289d3aSWill Deacon 	 * table entries as part of collapsing hugepages or moving page tables.
447c4ab2cbcSZhenyu Ye 	 * Set the tlb_level to 0 because we can not get enough information here.
448d8289d3aSWill Deacon 	 */
449c4ab2cbcSZhenyu Ye 	__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
4504150e50bSCatalin Marinas }
4514150e50bSCatalin Marinas 
flush_tlb_kernel_range(unsigned long start,unsigned long end)45205ac6530SMark Salter static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
45305ac6530SMark Salter {
454da4e7330SCatalin Marinas 	unsigned long addr;
455da4e7330SCatalin Marinas 
45667a902acSWill Deacon 	if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
45705ac6530SMark Salter 		flush_tlb_all();
458da4e7330SCatalin Marinas 		return;
459da4e7330SCatalin Marinas 	}
460da4e7330SCatalin Marinas 
4617f170499SPhilip Elcan 	start = __TLBI_VADDR(start, 0);
4627f170499SPhilip Elcan 	end = __TLBI_VADDR(end, 0);
463da4e7330SCatalin Marinas 
464da4e7330SCatalin Marinas 	dsb(ishst);
465da4e7330SCatalin Marinas 	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
4666899a4c8SWill Deacon 		__tlbi(vaale1is, addr);
467da4e7330SCatalin Marinas 	dsb(ish);
468da4e7330SCatalin Marinas 	isb();
46905ac6530SMark Salter }
47005ac6530SMark Salter 
47105ac6530SMark Salter /*
472285994a6SCatalin Marinas  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
473285994a6SCatalin Marinas  * table levels (pgd/pud/pmd).
474285994a6SCatalin Marinas  */
__flush_tlb_kernel_pgtable(unsigned long kaddr)47505f2d2f8SChintan Pandya static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
47605f2d2f8SChintan Pandya {
47705f2d2f8SChintan Pandya 	unsigned long addr = __TLBI_VADDR(kaddr, 0);
47805f2d2f8SChintan Pandya 
47945a284bcSWill Deacon 	dsb(ishst);
48005f2d2f8SChintan Pandya 	__tlbi(vaae1is, addr);
48105f2d2f8SChintan Pandya 	dsb(ish);
48251696d34SWill Deacon 	isb();
48305f2d2f8SChintan Pandya }
48458d0ba57SCatalin Marinas #endif
48558d0ba57SCatalin Marinas 
48658d0ba57SCatalin Marinas #endif
487