12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
247d99948SChristophe Leroy /*
347d99948SChristophe Leroy  * TLB flush routines for radix kernels.
447d99948SChristophe Leroy  *
547d99948SChristophe Leroy  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
647d99948SChristophe Leroy  */
747d99948SChristophe Leroy 
847d99948SChristophe Leroy #include <linux/mm.h>
947d99948SChristophe Leroy #include <linux/hugetlb.h>
1047d99948SChristophe Leroy #include <linux/memblock.h>
1147d99948SChristophe Leroy #include <linux/mmu_context.h>
1247d99948SChristophe Leroy #include <linux/sched/mm.h>
1347d99948SChristophe Leroy 
1447d99948SChristophe Leroy #include <asm/ppc-opcode.h>
1547d99948SChristophe Leroy #include <asm/tlb.h>
1647d99948SChristophe Leroy #include <asm/tlbflush.h>
1747d99948SChristophe Leroy #include <asm/trace.h>
1847d99948SChristophe Leroy #include <asm/cputhreads.h>
19dd3d9aa5SNicholas Piggin #include <asm/plpar_wrappers.h>
2047d99948SChristophe Leroy 
2147d99948SChristophe Leroy #define RIC_FLUSH_TLB 0
2247d99948SChristophe Leroy #define RIC_FLUSH_PWC 1
2347d99948SChristophe Leroy #define RIC_FLUSH_ALL 2
2447d99948SChristophe Leroy 
2547d99948SChristophe Leroy /*
2647d99948SChristophe Leroy  * tlbiel instruction for radix, set invalidation
2747d99948SChristophe Leroy  * i.e., r=1 and is=01 or is=10 or is=11
2847d99948SChristophe Leroy  */
296d3ca7e7SMasahiro Yamada static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
3047d99948SChristophe Leroy 					unsigned int pid,
3147d99948SChristophe Leroy 					unsigned int ric, unsigned int prs)
3247d99948SChristophe Leroy {
3347d99948SChristophe Leroy 	unsigned long rb;
3447d99948SChristophe Leroy 	unsigned long rs;
3547d99948SChristophe Leroy 
3647d99948SChristophe Leroy 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
3747d99948SChristophe Leroy 	rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
3847d99948SChristophe Leroy 
3947d99948SChristophe Leroy 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
4047d99948SChristophe Leroy 		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
4147d99948SChristophe Leroy 		     : "memory");
4247d99948SChristophe Leroy }
4347d99948SChristophe Leroy 
4447d99948SChristophe Leroy static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
4547d99948SChristophe Leroy {
4647d99948SChristophe Leroy 	unsigned int set;
4747d99948SChristophe Leroy 
4847d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
4947d99948SChristophe Leroy 
5047d99948SChristophe Leroy 	/*
5147d99948SChristophe Leroy 	 * Flush the first set of the TLB, and the entire Page Walk Cache
5247d99948SChristophe Leroy 	 * and partition table entries. Then flush the remaining sets of the
5347d99948SChristophe Leroy 	 * TLB.
5447d99948SChristophe Leroy 	 */
557e71c428SNicholas Piggin 
567e71c428SNicholas Piggin 	if (early_cpu_has_feature(CPU_FTR_HVMODE)) {
577e71c428SNicholas Piggin 		/* MSR[HV] should flush partition scope translations first. */
5847d99948SChristophe Leroy 		tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
59e8063940SAneesh Kumar K.V 
60e8063940SAneesh Kumar K.V 		if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) {
6147d99948SChristophe Leroy 			for (set = 1; set < num_sets; set++)
62e8063940SAneesh Kumar K.V 				tlbiel_radix_set_isa300(set, is, 0,
63e8063940SAneesh Kumar K.V 							RIC_FLUSH_TLB, 0);
64e8063940SAneesh Kumar K.V 		}
657e71c428SNicholas Piggin 	}
6647d99948SChristophe Leroy 
677e71c428SNicholas Piggin 	/* Flush process scoped entries. */
6847d99948SChristophe Leroy 	tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
69e8063940SAneesh Kumar K.V 
70e8063940SAneesh Kumar K.V 	if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) {
7147d99948SChristophe Leroy 		for (set = 1; set < num_sets; set++)
7247d99948SChristophe Leroy 			tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
73e8063940SAneesh Kumar K.V 	}
7447d99948SChristophe Leroy 
7505504b42SNicholas Piggin 	ppc_after_tlbiel_barrier();
7647d99948SChristophe Leroy }
7747d99948SChristophe Leroy 
7847d99948SChristophe Leroy void radix__tlbiel_all(unsigned int action)
7947d99948SChristophe Leroy {
8047d99948SChristophe Leroy 	unsigned int is;
8147d99948SChristophe Leroy 
8247d99948SChristophe Leroy 	switch (action) {
8347d99948SChristophe Leroy 	case TLB_INVAL_SCOPE_GLOBAL:
8447d99948SChristophe Leroy 		is = 3;
8547d99948SChristophe Leroy 		break;
8647d99948SChristophe Leroy 	case TLB_INVAL_SCOPE_LPID:
8747d99948SChristophe Leroy 		is = 2;
8847d99948SChristophe Leroy 		break;
8947d99948SChristophe Leroy 	default:
9047d99948SChristophe Leroy 		BUG();
9147d99948SChristophe Leroy 	}
9247d99948SChristophe Leroy 
9347d99948SChristophe Leroy 	if (early_cpu_has_feature(CPU_FTR_ARCH_300))
9447d99948SChristophe Leroy 		tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
9547d99948SChristophe Leroy 	else
9647d99948SChristophe Leroy 		WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
9747d99948SChristophe Leroy 
98fe7946ceSNicholas Piggin 	asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
9947d99948SChristophe Leroy }
10047d99948SChristophe Leroy 
101efc344c5SMasahiro Yamada static __always_inline void __tlbiel_pid(unsigned long pid, int set,
10247d99948SChristophe Leroy 				unsigned long ric)
10347d99948SChristophe Leroy {
10447d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
10547d99948SChristophe Leroy 
10647d99948SChristophe Leroy 	rb = PPC_BIT(53); /* IS = 1 */
10747d99948SChristophe Leroy 	rb |= set << PPC_BITLSHIFT(51);
10847d99948SChristophe Leroy 	rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
10947d99948SChristophe Leroy 	prs = 1; /* process scoped */
11047d99948SChristophe Leroy 	r = 1;   /* radix format */
11147d99948SChristophe Leroy 
11247d99948SChristophe Leroy 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
11347d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
11447d99948SChristophe Leroy 	trace_tlbie(0, 1, rb, rs, ric, prs, r);
11547d99948SChristophe Leroy }
11647d99948SChristophe Leroy 
117efc344c5SMasahiro Yamada static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
11847d99948SChristophe Leroy {
11947d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
12047d99948SChristophe Leroy 
12147d99948SChristophe Leroy 	rb = PPC_BIT(53); /* IS = 1 */
12247d99948SChristophe Leroy 	rs = pid << PPC_BITLSHIFT(31);
12347d99948SChristophe Leroy 	prs = 1; /* process scoped */
12447d99948SChristophe Leroy 	r = 1;   /* radix format */
12547d99948SChristophe Leroy 
12647d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
12747d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
12847d99948SChristophe Leroy 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
12947d99948SChristophe Leroy }
13047d99948SChristophe Leroy 
131efc344c5SMasahiro Yamada static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
13247d99948SChristophe Leroy {
13347d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
13447d99948SChristophe Leroy 
13547d99948SChristophe Leroy 	rb = PPC_BIT(52); /* IS = 2 */
13647d99948SChristophe Leroy 	rs = lpid;
13747d99948SChristophe Leroy 	prs = 0; /* partition scoped */
13847d99948SChristophe Leroy 	r = 1;   /* radix format */
13947d99948SChristophe Leroy 
14047d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
14147d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
14247d99948SChristophe Leroy 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
14347d99948SChristophe Leroy }
14447d99948SChristophe Leroy 
14599161de3SNicholas Piggin static __always_inline void __tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
14647d99948SChristophe Leroy {
14747d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
14847d99948SChristophe Leroy 
14947d99948SChristophe Leroy 	rb = PPC_BIT(52); /* IS = 2 */
15099161de3SNicholas Piggin 	rs = lpid;
15147d99948SChristophe Leroy 	prs = 1; /* process scoped */
15247d99948SChristophe Leroy 	r = 1;   /* radix format */
15347d99948SChristophe Leroy 
15499161de3SNicholas Piggin 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
15547d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
15699161de3SNicholas Piggin 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
15747d99948SChristophe Leroy }
15847d99948SChristophe Leroy 
1596d3ca7e7SMasahiro Yamada static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
16047d99948SChristophe Leroy 					unsigned long ap, unsigned long ric)
16147d99948SChristophe Leroy {
16247d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
16347d99948SChristophe Leroy 
16447d99948SChristophe Leroy 	rb = va & ~(PPC_BITMASK(52, 63));
16547d99948SChristophe Leroy 	rb |= ap << PPC_BITLSHIFT(58);
16647d99948SChristophe Leroy 	rs = pid << PPC_BITLSHIFT(31);
16747d99948SChristophe Leroy 	prs = 1; /* process scoped */
16847d99948SChristophe Leroy 	r = 1;   /* radix format */
16947d99948SChristophe Leroy 
17047d99948SChristophe Leroy 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
17147d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
17247d99948SChristophe Leroy 	trace_tlbie(0, 1, rb, rs, ric, prs, r);
17347d99948SChristophe Leroy }
17447d99948SChristophe Leroy 
1756d3ca7e7SMasahiro Yamada static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
17647d99948SChristophe Leroy 				       unsigned long ap, unsigned long ric)
17747d99948SChristophe Leroy {
17847d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
17947d99948SChristophe Leroy 
18047d99948SChristophe Leroy 	rb = va & ~(PPC_BITMASK(52, 63));
18147d99948SChristophe Leroy 	rb |= ap << PPC_BITLSHIFT(58);
18247d99948SChristophe Leroy 	rs = pid << PPC_BITLSHIFT(31);
18347d99948SChristophe Leroy 	prs = 1; /* process scoped */
18447d99948SChristophe Leroy 	r = 1;   /* radix format */
18547d99948SChristophe Leroy 
18647d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
18747d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
18847d99948SChristophe Leroy 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
18947d99948SChristophe Leroy }
19047d99948SChristophe Leroy 
1916d3ca7e7SMasahiro Yamada static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
19247d99948SChristophe Leroy 					    unsigned long ap, unsigned long ric)
19347d99948SChristophe Leroy {
19447d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
19547d99948SChristophe Leroy 
19647d99948SChristophe Leroy 	rb = va & ~(PPC_BITMASK(52, 63));
19747d99948SChristophe Leroy 	rb |= ap << PPC_BITLSHIFT(58);
19847d99948SChristophe Leroy 	rs = lpid;
19947d99948SChristophe Leroy 	prs = 0; /* partition scoped */
20047d99948SChristophe Leroy 	r = 1;   /* radix format */
20147d99948SChristophe Leroy 
20247d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
20347d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
20447d99948SChristophe Leroy 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
20547d99948SChristophe Leroy }
20647d99948SChristophe Leroy 
207047e6575SAneesh Kumar K.V 
208047e6575SAneesh Kumar K.V static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
209047e6575SAneesh Kumar K.V 				  unsigned long ap)
21047d99948SChristophe Leroy {
211047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
212047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
213047e6575SAneesh Kumar K.V 		__tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
214047e6575SAneesh Kumar K.V 	}
215047e6575SAneesh Kumar K.V 
216047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
217047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
218047e6575SAneesh Kumar K.V 		__tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
219047e6575SAneesh Kumar K.V 	}
220047e6575SAneesh Kumar K.V }
221047e6575SAneesh Kumar K.V 
222047e6575SAneesh Kumar K.V static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
223047e6575SAneesh Kumar K.V 					unsigned long ap)
224047e6575SAneesh Kumar K.V {
225047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
226047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
227047e6575SAneesh Kumar K.V 		__tlbie_pid(0, RIC_FLUSH_TLB);
228047e6575SAneesh Kumar K.V 	}
229047e6575SAneesh Kumar K.V 
230047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
231047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
232047e6575SAneesh Kumar K.V 		__tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
233047e6575SAneesh Kumar K.V 	}
234047e6575SAneesh Kumar K.V }
235047e6575SAneesh Kumar K.V 
236047e6575SAneesh Kumar K.V static inline void fixup_tlbie_pid(unsigned long pid)
237047e6575SAneesh Kumar K.V {
238047e6575SAneesh Kumar K.V 	/*
239047e6575SAneesh Kumar K.V 	 * We can use any address for the invalidation, pick one which is
240047e6575SAneesh Kumar K.V 	 * probably unused as an optimisation.
241047e6575SAneesh Kumar K.V 	 */
24247d99948SChristophe Leroy 	unsigned long va = ((1UL << 52) - 1);
24347d99948SChristophe Leroy 
244047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
245047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
246047e6575SAneesh Kumar K.V 		__tlbie_pid(0, RIC_FLUSH_TLB);
247047e6575SAneesh Kumar K.V 	}
248047e6575SAneesh Kumar K.V 
24909ce98caSAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
25047d99948SChristophe Leroy 		asm volatile("ptesync": : :"memory");
25147d99948SChristophe Leroy 		__tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
25247d99948SChristophe Leroy 	}
25347d99948SChristophe Leroy }
25447d99948SChristophe Leroy 
255047e6575SAneesh Kumar K.V 
256047e6575SAneesh Kumar K.V static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
257047e6575SAneesh Kumar K.V 				       unsigned long ap)
258047e6575SAneesh Kumar K.V {
259047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
260047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
261047e6575SAneesh Kumar K.V 		__tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB);
262047e6575SAneesh Kumar K.V 	}
263047e6575SAneesh Kumar K.V 
264047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
265047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
266047e6575SAneesh Kumar K.V 		__tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB);
267047e6575SAneesh Kumar K.V 	}
268047e6575SAneesh Kumar K.V }
269047e6575SAneesh Kumar K.V 
27047d99948SChristophe Leroy static inline void fixup_tlbie_lpid(unsigned long lpid)
27147d99948SChristophe Leroy {
272047e6575SAneesh Kumar K.V 	/*
273047e6575SAneesh Kumar K.V 	 * We can use any address for the invalidation, pick one which is
274047e6575SAneesh Kumar K.V 	 * probably unused as an optimisation.
275047e6575SAneesh Kumar K.V 	 */
27647d99948SChristophe Leroy 	unsigned long va = ((1UL << 52) - 1);
27747d99948SChristophe Leroy 
278047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
279047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
280047e6575SAneesh Kumar K.V 		__tlbie_lpid(0, RIC_FLUSH_TLB);
281047e6575SAneesh Kumar K.V 	}
282047e6575SAneesh Kumar K.V 
28309ce98caSAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
28447d99948SChristophe Leroy 		asm volatile("ptesync": : :"memory");
28547d99948SChristophe Leroy 		__tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
28647d99948SChristophe Leroy 	}
28747d99948SChristophe Leroy }
28847d99948SChristophe Leroy 
28947d99948SChristophe Leroy /*
29047d99948SChristophe Leroy  * We use 128 set in radix mode and 256 set in hpt mode.
29147d99948SChristophe Leroy  */
2926d3ca7e7SMasahiro Yamada static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
29347d99948SChristophe Leroy {
29447d99948SChristophe Leroy 	int set;
29547d99948SChristophe Leroy 
29647d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
29747d99948SChristophe Leroy 
29847d99948SChristophe Leroy 	/*
29947d99948SChristophe Leroy 	 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
30047d99948SChristophe Leroy 	 * also flush the entire Page Walk Cache.
30147d99948SChristophe Leroy 	 */
30247d99948SChristophe Leroy 	__tlbiel_pid(pid, 0, ric);
30347d99948SChristophe Leroy 
30447d99948SChristophe Leroy 	/* For PWC, only one flush is needed */
30547d99948SChristophe Leroy 	if (ric == RIC_FLUSH_PWC) {
30605504b42SNicholas Piggin 		ppc_after_tlbiel_barrier();
30747d99948SChristophe Leroy 		return;
30847d99948SChristophe Leroy 	}
30947d99948SChristophe Leroy 
310e8063940SAneesh Kumar K.V 	if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
31147d99948SChristophe Leroy 		/* For the remaining sets, just flush the TLB */
31247d99948SChristophe Leroy 		for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
31347d99948SChristophe Leroy 			__tlbiel_pid(pid, set, RIC_FLUSH_TLB);
314e8063940SAneesh Kumar K.V 	}
31547d99948SChristophe Leroy 
31605504b42SNicholas Piggin 	ppc_after_tlbiel_barrier();
3176c46fcceSNicholas Piggin 	asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory");
31847d99948SChristophe Leroy }
31947d99948SChristophe Leroy 
32047d99948SChristophe Leroy static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
32147d99948SChristophe Leroy {
32247d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
32347d99948SChristophe Leroy 
32447d99948SChristophe Leroy 	/*
32547d99948SChristophe Leroy 	 * Workaround the fact that the "ric" argument to __tlbie_pid
32647d99948SChristophe Leroy 	 * must be a compile-time contraint to match the "i" constraint
32747d99948SChristophe Leroy 	 * in the asm statement.
32847d99948SChristophe Leroy 	 */
32947d99948SChristophe Leroy 	switch (ric) {
33047d99948SChristophe Leroy 	case RIC_FLUSH_TLB:
33147d99948SChristophe Leroy 		__tlbie_pid(pid, RIC_FLUSH_TLB);
332047e6575SAneesh Kumar K.V 		fixup_tlbie_pid(pid);
33347d99948SChristophe Leroy 		break;
33447d99948SChristophe Leroy 	case RIC_FLUSH_PWC:
33547d99948SChristophe Leroy 		__tlbie_pid(pid, RIC_FLUSH_PWC);
33647d99948SChristophe Leroy 		break;
33747d99948SChristophe Leroy 	case RIC_FLUSH_ALL:
33847d99948SChristophe Leroy 	default:
33947d99948SChristophe Leroy 		__tlbie_pid(pid, RIC_FLUSH_ALL);
340047e6575SAneesh Kumar K.V 		fixup_tlbie_pid(pid);
34147d99948SChristophe Leroy 	}
34247d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
34347d99948SChristophe Leroy }
34447d99948SChristophe Leroy 
3452275d7b5SNicholas Piggin struct tlbiel_pid {
3462275d7b5SNicholas Piggin 	unsigned long pid;
3472275d7b5SNicholas Piggin 	unsigned long ric;
3482275d7b5SNicholas Piggin };
3492275d7b5SNicholas Piggin 
3502275d7b5SNicholas Piggin static void do_tlbiel_pid(void *info)
3512275d7b5SNicholas Piggin {
3522275d7b5SNicholas Piggin 	struct tlbiel_pid *t = info;
3532275d7b5SNicholas Piggin 
3542275d7b5SNicholas Piggin 	if (t->ric == RIC_FLUSH_TLB)
3552275d7b5SNicholas Piggin 		_tlbiel_pid(t->pid, RIC_FLUSH_TLB);
3562275d7b5SNicholas Piggin 	else if (t->ric == RIC_FLUSH_PWC)
3572275d7b5SNicholas Piggin 		_tlbiel_pid(t->pid, RIC_FLUSH_PWC);
3582275d7b5SNicholas Piggin 	else
3592275d7b5SNicholas Piggin 		_tlbiel_pid(t->pid, RIC_FLUSH_ALL);
3602275d7b5SNicholas Piggin }
3612275d7b5SNicholas Piggin 
3622275d7b5SNicholas Piggin static inline void _tlbiel_pid_multicast(struct mm_struct *mm,
3632275d7b5SNicholas Piggin 				unsigned long pid, unsigned long ric)
3642275d7b5SNicholas Piggin {
3652275d7b5SNicholas Piggin 	struct cpumask *cpus = mm_cpumask(mm);
3662275d7b5SNicholas Piggin 	struct tlbiel_pid t = { .pid = pid, .ric = ric };
3672275d7b5SNicholas Piggin 
3682275d7b5SNicholas Piggin 	on_each_cpu_mask(cpus, do_tlbiel_pid, &t, 1);
3692275d7b5SNicholas Piggin 	/*
3702275d7b5SNicholas Piggin 	 * Always want the CPU translations to be invalidated with tlbiel in
3712275d7b5SNicholas Piggin 	 * these paths, so while coprocessors must use tlbie, we can not
3722275d7b5SNicholas Piggin 	 * optimise away the tlbiel component.
3732275d7b5SNicholas Piggin 	 */
3742275d7b5SNicholas Piggin 	if (atomic_read(&mm->context.copros) > 0)
3752275d7b5SNicholas Piggin 		_tlbie_pid(pid, RIC_FLUSH_ALL);
3762275d7b5SNicholas Piggin }
3772275d7b5SNicholas Piggin 
37847d99948SChristophe Leroy static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
37947d99948SChristophe Leroy {
38047d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
38147d99948SChristophe Leroy 
38247d99948SChristophe Leroy 	/*
38347d99948SChristophe Leroy 	 * Workaround the fact that the "ric" argument to __tlbie_pid
38447d99948SChristophe Leroy 	 * must be a compile-time contraint to match the "i" constraint
38547d99948SChristophe Leroy 	 * in the asm statement.
38647d99948SChristophe Leroy 	 */
38747d99948SChristophe Leroy 	switch (ric) {
38847d99948SChristophe Leroy 	case RIC_FLUSH_TLB:
38947d99948SChristophe Leroy 		__tlbie_lpid(lpid, RIC_FLUSH_TLB);
390047e6575SAneesh Kumar K.V 		fixup_tlbie_lpid(lpid);
39147d99948SChristophe Leroy 		break;
39247d99948SChristophe Leroy 	case RIC_FLUSH_PWC:
39347d99948SChristophe Leroy 		__tlbie_lpid(lpid, RIC_FLUSH_PWC);
39447d99948SChristophe Leroy 		break;
39547d99948SChristophe Leroy 	case RIC_FLUSH_ALL:
39647d99948SChristophe Leroy 	default:
39747d99948SChristophe Leroy 		__tlbie_lpid(lpid, RIC_FLUSH_ALL);
39847d99948SChristophe Leroy 		fixup_tlbie_lpid(lpid);
399047e6575SAneesh Kumar K.V 	}
40047d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
40147d99948SChristophe Leroy }
40247d99948SChristophe Leroy 
40399161de3SNicholas Piggin static __always_inline void _tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
40447d99948SChristophe Leroy {
40547d99948SChristophe Leroy 	/*
40699161de3SNicholas Piggin 	 * Workaround the fact that the "ric" argument to __tlbie_pid
40799161de3SNicholas Piggin 	 * must be a compile-time contraint to match the "i" constraint
40899161de3SNicholas Piggin 	 * in the asm statement.
40947d99948SChristophe Leroy 	 */
41099161de3SNicholas Piggin 	switch (ric) {
41199161de3SNicholas Piggin 	case RIC_FLUSH_TLB:
41299161de3SNicholas Piggin 		__tlbie_lpid_guest(lpid, RIC_FLUSH_TLB);
41399161de3SNicholas Piggin 		break;
41499161de3SNicholas Piggin 	case RIC_FLUSH_PWC:
41599161de3SNicholas Piggin 		__tlbie_lpid_guest(lpid, RIC_FLUSH_PWC);
41699161de3SNicholas Piggin 		break;
41799161de3SNicholas Piggin 	case RIC_FLUSH_ALL:
41899161de3SNicholas Piggin 	default:
41999161de3SNicholas Piggin 		__tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
42047d99948SChristophe Leroy 	}
42199161de3SNicholas Piggin 	fixup_tlbie_lpid(lpid);
42299161de3SNicholas Piggin 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
42347d99948SChristophe Leroy }
42447d99948SChristophe Leroy 
42547d99948SChristophe Leroy static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
42647d99948SChristophe Leroy 				    unsigned long pid, unsigned long page_size,
42747d99948SChristophe Leroy 				    unsigned long psize)
42847d99948SChristophe Leroy {
42947d99948SChristophe Leroy 	unsigned long addr;
43047d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
43147d99948SChristophe Leroy 
43247d99948SChristophe Leroy 	for (addr = start; addr < end; addr += page_size)
43347d99948SChristophe Leroy 		__tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
43447d99948SChristophe Leroy }
43547d99948SChristophe Leroy 
4366d3ca7e7SMasahiro Yamada static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
43747d99948SChristophe Leroy 				       unsigned long psize, unsigned long ric)
43847d99948SChristophe Leroy {
43947d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
44047d99948SChristophe Leroy 
44147d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
44247d99948SChristophe Leroy 	__tlbiel_va(va, pid, ap, ric);
44305504b42SNicholas Piggin 	ppc_after_tlbiel_barrier();
44447d99948SChristophe Leroy }
44547d99948SChristophe Leroy 
44647d99948SChristophe Leroy static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
44747d99948SChristophe Leroy 				    unsigned long pid, unsigned long page_size,
44847d99948SChristophe Leroy 				    unsigned long psize, bool also_pwc)
44947d99948SChristophe Leroy {
45047d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
45147d99948SChristophe Leroy 	if (also_pwc)
45247d99948SChristophe Leroy 		__tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
45347d99948SChristophe Leroy 	__tlbiel_va_range(start, end, pid, page_size, psize);
45405504b42SNicholas Piggin 	ppc_after_tlbiel_barrier();
45547d99948SChristophe Leroy }
45647d99948SChristophe Leroy 
45747d99948SChristophe Leroy static inline void __tlbie_va_range(unsigned long start, unsigned long end,
45847d99948SChristophe Leroy 				    unsigned long pid, unsigned long page_size,
45947d99948SChristophe Leroy 				    unsigned long psize)
46047d99948SChristophe Leroy {
46147d99948SChristophe Leroy 	unsigned long addr;
46247d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
46347d99948SChristophe Leroy 
46447d99948SChristophe Leroy 	for (addr = start; addr < end; addr += page_size)
46547d99948SChristophe Leroy 		__tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
466047e6575SAneesh Kumar K.V 
467047e6575SAneesh Kumar K.V 	fixup_tlbie_va_range(addr - page_size, pid, ap);
46847d99948SChristophe Leroy }
46947d99948SChristophe Leroy 
4706d3ca7e7SMasahiro Yamada static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
47147d99948SChristophe Leroy 				      unsigned long psize, unsigned long ric)
47247d99948SChristophe Leroy {
47347d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
47447d99948SChristophe Leroy 
47547d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
47647d99948SChristophe Leroy 	__tlbie_va(va, pid, ap, ric);
477047e6575SAneesh Kumar K.V 	fixup_tlbie_va(va, pid, ap);
47847d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
47947d99948SChristophe Leroy }
48047d99948SChristophe Leroy 
4812275d7b5SNicholas Piggin struct tlbiel_va {
4822275d7b5SNicholas Piggin 	unsigned long pid;
4832275d7b5SNicholas Piggin 	unsigned long va;
4842275d7b5SNicholas Piggin 	unsigned long psize;
4852275d7b5SNicholas Piggin 	unsigned long ric;
4862275d7b5SNicholas Piggin };
4872275d7b5SNicholas Piggin 
4882275d7b5SNicholas Piggin static void do_tlbiel_va(void *info)
4892275d7b5SNicholas Piggin {
4902275d7b5SNicholas Piggin 	struct tlbiel_va *t = info;
4912275d7b5SNicholas Piggin 
4922275d7b5SNicholas Piggin 	if (t->ric == RIC_FLUSH_TLB)
4932275d7b5SNicholas Piggin 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB);
4942275d7b5SNicholas Piggin 	else if (t->ric == RIC_FLUSH_PWC)
4952275d7b5SNicholas Piggin 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC);
4962275d7b5SNicholas Piggin 	else
4972275d7b5SNicholas Piggin 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL);
4982275d7b5SNicholas Piggin }
4992275d7b5SNicholas Piggin 
5002275d7b5SNicholas Piggin static inline void _tlbiel_va_multicast(struct mm_struct *mm,
5012275d7b5SNicholas Piggin 				unsigned long va, unsigned long pid,
5022275d7b5SNicholas Piggin 				unsigned long psize, unsigned long ric)
5032275d7b5SNicholas Piggin {
5042275d7b5SNicholas Piggin 	struct cpumask *cpus = mm_cpumask(mm);
5052275d7b5SNicholas Piggin 	struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric };
5062275d7b5SNicholas Piggin 	on_each_cpu_mask(cpus, do_tlbiel_va, &t, 1);
5072275d7b5SNicholas Piggin 	if (atomic_read(&mm->context.copros) > 0)
5082275d7b5SNicholas Piggin 		_tlbie_va(va, pid, psize, RIC_FLUSH_TLB);
5092275d7b5SNicholas Piggin }
5102275d7b5SNicholas Piggin 
5112275d7b5SNicholas Piggin struct tlbiel_va_range {
5122275d7b5SNicholas Piggin 	unsigned long pid;
5132275d7b5SNicholas Piggin 	unsigned long start;
5142275d7b5SNicholas Piggin 	unsigned long end;
5152275d7b5SNicholas Piggin 	unsigned long page_size;
5162275d7b5SNicholas Piggin 	unsigned long psize;
5172275d7b5SNicholas Piggin 	bool also_pwc;
5182275d7b5SNicholas Piggin };
5192275d7b5SNicholas Piggin 
5202275d7b5SNicholas Piggin static void do_tlbiel_va_range(void *info)
5212275d7b5SNicholas Piggin {
5222275d7b5SNicholas Piggin 	struct tlbiel_va_range *t = info;
5232275d7b5SNicholas Piggin 
5242275d7b5SNicholas Piggin 	_tlbiel_va_range(t->start, t->end, t->pid, t->page_size,
5252275d7b5SNicholas Piggin 				    t->psize, t->also_pwc);
5262275d7b5SNicholas Piggin }
5272275d7b5SNicholas Piggin 
5286d3ca7e7SMasahiro Yamada static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
52947d99948SChristophe Leroy 			      unsigned long psize, unsigned long ric)
53047d99948SChristophe Leroy {
53147d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
53247d99948SChristophe Leroy 
53347d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
53447d99948SChristophe Leroy 	__tlbie_lpid_va(va, lpid, ap, ric);
535047e6575SAneesh Kumar K.V 	fixup_tlbie_lpid_va(va, lpid, ap);
53647d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
53747d99948SChristophe Leroy }
53847d99948SChristophe Leroy 
53947d99948SChristophe Leroy static inline void _tlbie_va_range(unsigned long start, unsigned long end,
54047d99948SChristophe Leroy 				    unsigned long pid, unsigned long page_size,
54147d99948SChristophe Leroy 				    unsigned long psize, bool also_pwc)
54247d99948SChristophe Leroy {
54347d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
54447d99948SChristophe Leroy 	if (also_pwc)
54547d99948SChristophe Leroy 		__tlbie_pid(pid, RIC_FLUSH_PWC);
54647d99948SChristophe Leroy 	__tlbie_va_range(start, end, pid, page_size, psize);
54747d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
54847d99948SChristophe Leroy }
54947d99948SChristophe Leroy 
5502275d7b5SNicholas Piggin static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
5512275d7b5SNicholas Piggin 				unsigned long start, unsigned long end,
5522275d7b5SNicholas Piggin 				unsigned long pid, unsigned long page_size,
5532275d7b5SNicholas Piggin 				unsigned long psize, bool also_pwc)
5542275d7b5SNicholas Piggin {
5552275d7b5SNicholas Piggin 	struct cpumask *cpus = mm_cpumask(mm);
5562275d7b5SNicholas Piggin 	struct tlbiel_va_range t = { .start = start, .end = end,
5572275d7b5SNicholas Piggin 				.pid = pid, .page_size = page_size,
5582275d7b5SNicholas Piggin 				.psize = psize, .also_pwc = also_pwc };
5592275d7b5SNicholas Piggin 
5602275d7b5SNicholas Piggin 	on_each_cpu_mask(cpus, do_tlbiel_va_range, &t, 1);
5612275d7b5SNicholas Piggin 	if (atomic_read(&mm->context.copros) > 0)
5622275d7b5SNicholas Piggin 		_tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
5632275d7b5SNicholas Piggin }
5642275d7b5SNicholas Piggin 
56547d99948SChristophe Leroy /*
56647d99948SChristophe Leroy  * Base TLB flushing operations:
56747d99948SChristophe Leroy  *
56847d99948SChristophe Leroy  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
56947d99948SChristophe Leroy  *  - flush_tlb_page(vma, vmaddr) flushes one page
57047d99948SChristophe Leroy  *  - flush_tlb_range(vma, start, end) flushes a range of pages
57147d99948SChristophe Leroy  *  - flush_tlb_kernel_range(start, end) flushes kernel pages
57247d99948SChristophe Leroy  *
57347d99948SChristophe Leroy  *  - local_* variants of page and mm only apply to the current
57447d99948SChristophe Leroy  *    processor
57547d99948SChristophe Leroy  */
57647d99948SChristophe Leroy void radix__local_flush_tlb_mm(struct mm_struct *mm)
57747d99948SChristophe Leroy {
57847d99948SChristophe Leroy 	unsigned long pid;
57947d99948SChristophe Leroy 
58047d99948SChristophe Leroy 	preempt_disable();
58147d99948SChristophe Leroy 	pid = mm->context.id;
58247d99948SChristophe Leroy 	if (pid != MMU_NO_CONTEXT)
58347d99948SChristophe Leroy 		_tlbiel_pid(pid, RIC_FLUSH_TLB);
58447d99948SChristophe Leroy 	preempt_enable();
58547d99948SChristophe Leroy }
58647d99948SChristophe Leroy EXPORT_SYMBOL(radix__local_flush_tlb_mm);
58747d99948SChristophe Leroy 
58847d99948SChristophe Leroy #ifndef CONFIG_SMP
58947d99948SChristophe Leroy void radix__local_flush_all_mm(struct mm_struct *mm)
59047d99948SChristophe Leroy {
59147d99948SChristophe Leroy 	unsigned long pid;
59247d99948SChristophe Leroy 
59347d99948SChristophe Leroy 	preempt_disable();
59447d99948SChristophe Leroy 	pid = mm->context.id;
59547d99948SChristophe Leroy 	if (pid != MMU_NO_CONTEXT)
59647d99948SChristophe Leroy 		_tlbiel_pid(pid, RIC_FLUSH_ALL);
59747d99948SChristophe Leroy 	preempt_enable();
59847d99948SChristophe Leroy }
59947d99948SChristophe Leroy EXPORT_SYMBOL(radix__local_flush_all_mm);
600993cfeccSNicholas Piggin 
601993cfeccSNicholas Piggin static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
602993cfeccSNicholas Piggin {
603993cfeccSNicholas Piggin 	radix__local_flush_all_mm(mm);
604993cfeccSNicholas Piggin }
60547d99948SChristophe Leroy #endif /* CONFIG_SMP */
60647d99948SChristophe Leroy 
60747d99948SChristophe Leroy void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
60847d99948SChristophe Leroy 				       int psize)
60947d99948SChristophe Leroy {
61047d99948SChristophe Leroy 	unsigned long pid;
61147d99948SChristophe Leroy 
61247d99948SChristophe Leroy 	preempt_disable();
61347d99948SChristophe Leroy 	pid = mm->context.id;
61447d99948SChristophe Leroy 	if (pid != MMU_NO_CONTEXT)
61547d99948SChristophe Leroy 		_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
61647d99948SChristophe Leroy 	preempt_enable();
61747d99948SChristophe Leroy }
61847d99948SChristophe Leroy 
61947d99948SChristophe Leroy void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
62047d99948SChristophe Leroy {
62147d99948SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
62247d99948SChristophe Leroy 	/* need the return fix for nohash.c */
62347d99948SChristophe Leroy 	if (is_vm_hugetlb_page(vma))
62447d99948SChristophe Leroy 		return radix__local_flush_hugetlb_page(vma, vmaddr);
62547d99948SChristophe Leroy #endif
62647d99948SChristophe Leroy 	radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
62747d99948SChristophe Leroy }
62847d99948SChristophe Leroy EXPORT_SYMBOL(radix__local_flush_tlb_page);
62947d99948SChristophe Leroy 
63047d99948SChristophe Leroy static bool mm_needs_flush_escalation(struct mm_struct *mm)
63147d99948SChristophe Leroy {
63247d99948SChristophe Leroy 	/*
63347d99948SChristophe Leroy 	 * P9 nest MMU has issues with the page walk cache
63447d99948SChristophe Leroy 	 * caching PTEs and not flushing them properly when
63547d99948SChristophe Leroy 	 * RIC = 0 for a PID/LPID invalidate
63647d99948SChristophe Leroy 	 */
63747d99948SChristophe Leroy 	if (atomic_read(&mm->context.copros) > 0)
63847d99948SChristophe Leroy 		return true;
63947d99948SChristophe Leroy 	return false;
64047d99948SChristophe Leroy }
64147d99948SChristophe Leroy 
642*032b7f08SNicholas Piggin /*
643*032b7f08SNicholas Piggin  * If always_flush is true, then flush even if this CPU can't be removed
644*032b7f08SNicholas Piggin  * from mm_cpumask.
645*032b7f08SNicholas Piggin  */
646*032b7f08SNicholas Piggin void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush)
64747d99948SChristophe Leroy {
64847d99948SChristophe Leroy 	unsigned long pid = mm->context.id;
649a2496049SNicholas Piggin 	int cpu = smp_processor_id();
65047d99948SChristophe Leroy 
651a665eec0SNicholas Piggin 	/*
652a665eec0SNicholas Piggin 	 * A kthread could have done a mmget_not_zero() after the flushing CPU
653a2496049SNicholas Piggin 	 * checked mm_cpumask, and be in the process of kthread_use_mm when
654a2496049SNicholas Piggin 	 * interrupted here. In that case, current->mm will be set to mm,
655a2496049SNicholas Piggin 	 * because kthread_use_mm() setting ->mm and switching to the mm is
656a2496049SNicholas Piggin 	 * done with interrupts off.
657a665eec0SNicholas Piggin 	 */
65847d99948SChristophe Leroy 	if (current->mm == mm)
659*032b7f08SNicholas Piggin 		goto out;
66047d99948SChristophe Leroy 
66147d99948SChristophe Leroy 	if (current->active_mm == mm) {
662a665eec0SNicholas Piggin 		WARN_ON_ONCE(current->mm != NULL);
663a665eec0SNicholas Piggin 		/* Is a kernel thread and is using mm as the lazy tlb */
66447d99948SChristophe Leroy 		mmgrab(&init_mm);
66547d99948SChristophe Leroy 		current->active_mm = &init_mm;
666a665eec0SNicholas Piggin 		switch_mm_irqs_off(mm, &init_mm, current);
66747d99948SChristophe Leroy 		mmdrop(mm);
66847d99948SChristophe Leroy 	}
669a665eec0SNicholas Piggin 
670a2496049SNicholas Piggin 	/*
671780de406SNicholas Piggin 	 * This IPI may be initiated from any source including those not
672780de406SNicholas Piggin 	 * running the mm, so there may be a racing IPI that comes after
673780de406SNicholas Piggin 	 * this one which finds the cpumask already clear. Check and avoid
674780de406SNicholas Piggin 	 * underflowing the active_cpus count in that case. The race should
675780de406SNicholas Piggin 	 * not otherwise be a problem, but the TLB must be flushed because
676780de406SNicholas Piggin 	 * that's what the caller expects.
677a2496049SNicholas Piggin 	 */
678a2496049SNicholas Piggin 	if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
679a665eec0SNicholas Piggin 		atomic_dec(&mm->context.active_cpus);
680a2496049SNicholas Piggin 		cpumask_clear_cpu(cpu, mm_cpumask(mm));
681*032b7f08SNicholas Piggin 		always_flush = true;
682a2496049SNicholas Piggin 	}
683a665eec0SNicholas Piggin 
684*032b7f08SNicholas Piggin out:
685*032b7f08SNicholas Piggin 	if (always_flush)
68647d99948SChristophe Leroy 		_tlbiel_pid(pid, RIC_FLUSH_ALL);
68747d99948SChristophe Leroy }
68847d99948SChristophe Leroy 
68993935448SNicholas Piggin #ifdef CONFIG_SMP
69093935448SNicholas Piggin static void do_exit_flush_lazy_tlb(void *arg)
69193935448SNicholas Piggin {
69293935448SNicholas Piggin 	struct mm_struct *mm = arg;
693*032b7f08SNicholas Piggin 	exit_lazy_flush_tlb(mm, true);
69493935448SNicholas Piggin }
69593935448SNicholas Piggin 
69647d99948SChristophe Leroy static void exit_flush_lazy_tlbs(struct mm_struct *mm)
69747d99948SChristophe Leroy {
69847d99948SChristophe Leroy 	/*
69947d99948SChristophe Leroy 	 * Would be nice if this was async so it could be run in
70047d99948SChristophe Leroy 	 * parallel with our local flush, but generic code does not
70147d99948SChristophe Leroy 	 * give a good API for it. Could extend the generic code or
70247d99948SChristophe Leroy 	 * make a special powerpc IPI for flushing TLBs.
70347d99948SChristophe Leroy 	 * For now it's not too performance critical.
70447d99948SChristophe Leroy 	 */
70547d99948SChristophe Leroy 	smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
70647d99948SChristophe Leroy 				(void *)mm, 1);
70747d99948SChristophe Leroy }
70893935448SNicholas Piggin 
70926418b36SNicholas Piggin #else /* CONFIG_SMP */
71026418b36SNicholas Piggin static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { }
71126418b36SNicholas Piggin #endif /* CONFIG_SMP */
71247d99948SChristophe Leroy 
71393935448SNicholas Piggin static DEFINE_PER_CPU(unsigned int, mm_cpumask_trim_clock);
71493935448SNicholas Piggin 
71593935448SNicholas Piggin /*
71693935448SNicholas Piggin  * Interval between flushes at which we send out IPIs to check whether the
71793935448SNicholas Piggin  * mm_cpumask can be trimmed for the case where it's not a single-threaded
71893935448SNicholas Piggin  * process flushing its own mm. The intent is to reduce the cost of later
71993935448SNicholas Piggin  * flushes. Don't want this to be so low that it adds noticable cost to TLB
72093935448SNicholas Piggin  * flushing, or so high that it doesn't help reduce global TLBIEs.
72193935448SNicholas Piggin  */
72293935448SNicholas Piggin static unsigned long tlb_mm_cpumask_trim_timer = 1073;
72393935448SNicholas Piggin 
72493935448SNicholas Piggin static bool tick_and_test_trim_clock(void)
72593935448SNicholas Piggin {
72693935448SNicholas Piggin 	if (__this_cpu_inc_return(mm_cpumask_trim_clock) ==
72793935448SNicholas Piggin 			tlb_mm_cpumask_trim_timer) {
72893935448SNicholas Piggin 		__this_cpu_write(mm_cpumask_trim_clock, 0);
72993935448SNicholas Piggin 		return true;
73093935448SNicholas Piggin 	}
73193935448SNicholas Piggin 	return false;
73293935448SNicholas Piggin }
73393935448SNicholas Piggin 
73426418b36SNicholas Piggin enum tlb_flush_type {
73554bb5033SNicholas Piggin 	FLUSH_TYPE_NONE,
73626418b36SNicholas Piggin 	FLUSH_TYPE_LOCAL,
73726418b36SNicholas Piggin 	FLUSH_TYPE_GLOBAL,
73826418b36SNicholas Piggin };
73926418b36SNicholas Piggin 
74026418b36SNicholas Piggin static enum tlb_flush_type flush_type_needed(struct mm_struct *mm, bool fullmm)
74126418b36SNicholas Piggin {
74254bb5033SNicholas Piggin 	int active_cpus = atomic_read(&mm->context.active_cpus);
74354bb5033SNicholas Piggin 	int cpu = smp_processor_id();
74454bb5033SNicholas Piggin 
74554bb5033SNicholas Piggin 	if (active_cpus == 0)
74654bb5033SNicholas Piggin 		return FLUSH_TYPE_NONE;
74793935448SNicholas Piggin 	if (active_cpus == 1 && cpumask_test_cpu(cpu, mm_cpumask(mm))) {
74893935448SNicholas Piggin 		if (current->mm != mm) {
74993935448SNicholas Piggin 			/*
75093935448SNicholas Piggin 			 * Asynchronous flush sources may trim down to nothing
75193935448SNicholas Piggin 			 * if the process is not running, so occasionally try
75293935448SNicholas Piggin 			 * to trim.
75393935448SNicholas Piggin 			 */
75493935448SNicholas Piggin 			if (tick_and_test_trim_clock()) {
755*032b7f08SNicholas Piggin 				exit_lazy_flush_tlb(mm, true);
75693935448SNicholas Piggin 				return FLUSH_TYPE_NONE;
75793935448SNicholas Piggin 			}
75893935448SNicholas Piggin 		}
75926418b36SNicholas Piggin 		return FLUSH_TYPE_LOCAL;
76093935448SNicholas Piggin 	}
76126418b36SNicholas Piggin 
76226418b36SNicholas Piggin 	/* Coprocessors require TLBIE to invalidate nMMU. */
76326418b36SNicholas Piggin 	if (atomic_read(&mm->context.copros) > 0)
76426418b36SNicholas Piggin 		return FLUSH_TYPE_GLOBAL;
76526418b36SNicholas Piggin 
76626418b36SNicholas Piggin 	/*
76726418b36SNicholas Piggin 	 * In the fullmm case there's no point doing the exit_flush_lazy_tlbs
76826418b36SNicholas Piggin 	 * because the mm is being taken down anyway, and a TLBIE tends to
76926418b36SNicholas Piggin 	 * be faster than an IPI+TLBIEL.
77026418b36SNicholas Piggin 	 */
77126418b36SNicholas Piggin 	if (fullmm)
77226418b36SNicholas Piggin 		return FLUSH_TYPE_GLOBAL;
77326418b36SNicholas Piggin 
77426418b36SNicholas Piggin 	/*
77526418b36SNicholas Piggin 	 * If we are running the only thread of a single-threaded process,
77626418b36SNicholas Piggin 	 * then we should almost always be able to trim off the rest of the
77726418b36SNicholas Piggin 	 * CPU mask (except in the case of use_mm() races), so always try
77826418b36SNicholas Piggin 	 * trimming the mask.
77926418b36SNicholas Piggin 	 */
78026418b36SNicholas Piggin 	if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) {
78126418b36SNicholas Piggin 		exit_flush_lazy_tlbs(mm);
78226418b36SNicholas Piggin 		/*
78326418b36SNicholas Piggin 		 * use_mm() race could prevent IPIs from being able to clear
78426418b36SNicholas Piggin 		 * the cpumask here, however those users are established
78526418b36SNicholas Piggin 		 * after our first check (and so after the PTEs are removed),
78626418b36SNicholas Piggin 		 * and the TLB still gets flushed by the IPI, so this CPU
78726418b36SNicholas Piggin 		 * will only require a local flush.
78826418b36SNicholas Piggin 		 */
78926418b36SNicholas Piggin 		return FLUSH_TYPE_LOCAL;
79026418b36SNicholas Piggin 	}
79126418b36SNicholas Piggin 
79293935448SNicholas Piggin 	/*
79393935448SNicholas Piggin 	 * Occasionally try to trim down the cpumask. It's possible this can
79493935448SNicholas Piggin 	 * bring the mask to zero, which results in no flush.
79593935448SNicholas Piggin 	 */
79693935448SNicholas Piggin 	if (tick_and_test_trim_clock()) {
79793935448SNicholas Piggin 		exit_flush_lazy_tlbs(mm);
79893935448SNicholas Piggin 		if (current->mm == mm)
79993935448SNicholas Piggin 			return FLUSH_TYPE_LOCAL;
80093935448SNicholas Piggin 		if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
801*032b7f08SNicholas Piggin 			exit_lazy_flush_tlb(mm, true);
80293935448SNicholas Piggin 		return FLUSH_TYPE_NONE;
80393935448SNicholas Piggin 	}
80493935448SNicholas Piggin 
80526418b36SNicholas Piggin 	return FLUSH_TYPE_GLOBAL;
80626418b36SNicholas Piggin }
80726418b36SNicholas Piggin 
80826418b36SNicholas Piggin #ifdef CONFIG_SMP
80947d99948SChristophe Leroy void radix__flush_tlb_mm(struct mm_struct *mm)
81047d99948SChristophe Leroy {
81147d99948SChristophe Leroy 	unsigned long pid;
81226418b36SNicholas Piggin 	enum tlb_flush_type type;
81347d99948SChristophe Leroy 
81447d99948SChristophe Leroy 	pid = mm->context.id;
81547d99948SChristophe Leroy 	if (unlikely(pid == MMU_NO_CONTEXT))
81647d99948SChristophe Leroy 		return;
81747d99948SChristophe Leroy 
81847d99948SChristophe Leroy 	preempt_disable();
81947d99948SChristophe Leroy 	/*
82026418b36SNicholas Piggin 	 * Order loads of mm_cpumask (in flush_type_needed) vs previous
82126418b36SNicholas Piggin 	 * stores to clear ptes before the invalidate. See barrier in
82226418b36SNicholas Piggin 	 * switch_mm_irqs_off
82347d99948SChristophe Leroy 	 */
82447d99948SChristophe Leroy 	smp_mb();
82526418b36SNicholas Piggin 	type = flush_type_needed(mm, false);
82654bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_LOCAL) {
82754bb5033SNicholas Piggin 		_tlbiel_pid(pid, RIC_FLUSH_TLB);
82854bb5033SNicholas Piggin 	} else if (type == FLUSH_TYPE_GLOBAL) {
829dd3d9aa5SNicholas Piggin 		if (!mmu_has_feature(MMU_FTR_GTSE)) {
830dd3d9aa5SNicholas Piggin 			unsigned long tgt = H_RPTI_TARGET_CMMU;
831dd3d9aa5SNicholas Piggin 
832dd3d9aa5SNicholas Piggin 			if (atomic_read(&mm->context.copros) > 0)
833dd3d9aa5SNicholas Piggin 				tgt |= H_RPTI_TARGET_NMMU;
834dd3d9aa5SNicholas Piggin 			pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
835dd3d9aa5SNicholas Piggin 					       H_RPTI_PAGE_ALL, 0, -1UL);
836dd3d9aa5SNicholas Piggin 		} else if (cputlb_use_tlbie()) {
83747d99948SChristophe Leroy 			if (mm_needs_flush_escalation(mm))
83847d99948SChristophe Leroy 				_tlbie_pid(pid, RIC_FLUSH_ALL);
83947d99948SChristophe Leroy 			else
84047d99948SChristophe Leroy 				_tlbie_pid(pid, RIC_FLUSH_TLB);
84147d99948SChristophe Leroy 		} else {
8422275d7b5SNicholas Piggin 			_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
8432275d7b5SNicholas Piggin 		}
84447d99948SChristophe Leroy 	}
84547d99948SChristophe Leroy 	preempt_enable();
84647d99948SChristophe Leroy }
84747d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_tlb_mm);
84847d99948SChristophe Leroy 
84947d99948SChristophe Leroy static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
85047d99948SChristophe Leroy {
85147d99948SChristophe Leroy 	unsigned long pid;
85226418b36SNicholas Piggin 	enum tlb_flush_type type;
85347d99948SChristophe Leroy 
85447d99948SChristophe Leroy 	pid = mm->context.id;
85547d99948SChristophe Leroy 	if (unlikely(pid == MMU_NO_CONTEXT))
85647d99948SChristophe Leroy 		return;
85747d99948SChristophe Leroy 
85847d99948SChristophe Leroy 	preempt_disable();
85947d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
86026418b36SNicholas Piggin 	type = flush_type_needed(mm, fullmm);
86154bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_LOCAL) {
86254bb5033SNicholas Piggin 		_tlbiel_pid(pid, RIC_FLUSH_ALL);
86354bb5033SNicholas Piggin 	} else if (type == FLUSH_TYPE_GLOBAL) {
864dd3d9aa5SNicholas Piggin 		if (!mmu_has_feature(MMU_FTR_GTSE)) {
865dd3d9aa5SNicholas Piggin 			unsigned long tgt = H_RPTI_TARGET_CMMU;
866dd3d9aa5SNicholas Piggin 			unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
867dd3d9aa5SNicholas Piggin 					     H_RPTI_TYPE_PRT;
868dd3d9aa5SNicholas Piggin 
869dd3d9aa5SNicholas Piggin 			if (atomic_read(&mm->context.copros) > 0)
870dd3d9aa5SNicholas Piggin 				tgt |= H_RPTI_TARGET_NMMU;
871dd3d9aa5SNicholas Piggin 			pseries_rpt_invalidate(pid, tgt, type,
872dd3d9aa5SNicholas Piggin 					       H_RPTI_PAGE_ALL, 0, -1UL);
873dd3d9aa5SNicholas Piggin 		} else if (cputlb_use_tlbie())
87447d99948SChristophe Leroy 			_tlbie_pid(pid, RIC_FLUSH_ALL);
8752275d7b5SNicholas Piggin 		else
8762275d7b5SNicholas Piggin 			_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
87747d99948SChristophe Leroy 	}
87847d99948SChristophe Leroy 	preempt_enable();
87947d99948SChristophe Leroy }
88052162ec7SAneesh Kumar K.V 
88147d99948SChristophe Leroy void radix__flush_all_mm(struct mm_struct *mm)
88247d99948SChristophe Leroy {
88347d99948SChristophe Leroy 	__flush_all_mm(mm, false);
88447d99948SChristophe Leroy }
88547d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_all_mm);
88647d99948SChristophe Leroy 
88747d99948SChristophe Leroy void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
88847d99948SChristophe Leroy 				 int psize)
88947d99948SChristophe Leroy {
89047d99948SChristophe Leroy 	unsigned long pid;
89126418b36SNicholas Piggin 	enum tlb_flush_type type;
89247d99948SChristophe Leroy 
89347d99948SChristophe Leroy 	pid = mm->context.id;
89447d99948SChristophe Leroy 	if (unlikely(pid == MMU_NO_CONTEXT))
89547d99948SChristophe Leroy 		return;
89647d99948SChristophe Leroy 
89747d99948SChristophe Leroy 	preempt_disable();
89847d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
89926418b36SNicholas Piggin 	type = flush_type_needed(mm, false);
90054bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_LOCAL) {
90154bb5033SNicholas Piggin 		_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
90254bb5033SNicholas Piggin 	} else if (type == FLUSH_TYPE_GLOBAL) {
903dd3d9aa5SNicholas Piggin 		if (!mmu_has_feature(MMU_FTR_GTSE)) {
904dd3d9aa5SNicholas Piggin 			unsigned long tgt, pg_sizes, size;
905dd3d9aa5SNicholas Piggin 
906dd3d9aa5SNicholas Piggin 			tgt = H_RPTI_TARGET_CMMU;
907dd3d9aa5SNicholas Piggin 			pg_sizes = psize_to_rpti_pgsize(psize);
908dd3d9aa5SNicholas Piggin 			size = 1UL << mmu_psize_to_shift(psize);
909dd3d9aa5SNicholas Piggin 
910dd3d9aa5SNicholas Piggin 			if (atomic_read(&mm->context.copros) > 0)
911dd3d9aa5SNicholas Piggin 				tgt |= H_RPTI_TARGET_NMMU;
912dd3d9aa5SNicholas Piggin 			pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
913dd3d9aa5SNicholas Piggin 					       pg_sizes, vmaddr,
914dd3d9aa5SNicholas Piggin 					       vmaddr + size);
915dd3d9aa5SNicholas Piggin 		} else if (cputlb_use_tlbie())
91647d99948SChristophe Leroy 			_tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
9172275d7b5SNicholas Piggin 		else
9182275d7b5SNicholas Piggin 			_tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB);
91947d99948SChristophe Leroy 	}
92047d99948SChristophe Leroy 	preempt_enable();
92147d99948SChristophe Leroy }
92247d99948SChristophe Leroy 
92347d99948SChristophe Leroy void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
92447d99948SChristophe Leroy {
92547d99948SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
92647d99948SChristophe Leroy 	if (is_vm_hugetlb_page(vma))
92747d99948SChristophe Leroy 		return radix__flush_hugetlb_page(vma, vmaddr);
92847d99948SChristophe Leroy #endif
92947d99948SChristophe Leroy 	radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
93047d99948SChristophe Leroy }
93147d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_tlb_page);
93247d99948SChristophe Leroy 
93347d99948SChristophe Leroy #endif /* CONFIG_SMP */
93447d99948SChristophe Leroy 
9352275d7b5SNicholas Piggin static void do_tlbiel_kernel(void *info)
9362275d7b5SNicholas Piggin {
9372275d7b5SNicholas Piggin 	_tlbiel_pid(0, RIC_FLUSH_ALL);
9382275d7b5SNicholas Piggin }
9392275d7b5SNicholas Piggin 
9402275d7b5SNicholas Piggin static inline void _tlbiel_kernel_broadcast(void)
9412275d7b5SNicholas Piggin {
9422275d7b5SNicholas Piggin 	on_each_cpu(do_tlbiel_kernel, NULL, 1);
9432275d7b5SNicholas Piggin 	if (tlbie_capable) {
9442275d7b5SNicholas Piggin 		/*
9452275d7b5SNicholas Piggin 		 * Coherent accelerators don't refcount kernel memory mappings,
9462275d7b5SNicholas Piggin 		 * so have to always issue a tlbie for them. This is quite a
9472275d7b5SNicholas Piggin 		 * slow path anyway.
9482275d7b5SNicholas Piggin 		 */
9492275d7b5SNicholas Piggin 		_tlbie_pid(0, RIC_FLUSH_ALL);
9502275d7b5SNicholas Piggin 	}
9512275d7b5SNicholas Piggin }
9522275d7b5SNicholas Piggin 
95360e8523eSAlastair D'Silva /*
95460e8523eSAlastair D'Silva  * If kernel TLBIs ever become local rather than global, then
95560e8523eSAlastair D'Silva  * drivers/misc/ocxl/link.c:ocxl_link_add_pe will need some work, as it
95660e8523eSAlastair D'Silva  * assumes kernel TLBIs are global.
95760e8523eSAlastair D'Silva  */
95847d99948SChristophe Leroy void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
95947d99948SChristophe Leroy {
960dd3d9aa5SNicholas Piggin 	if (!mmu_has_feature(MMU_FTR_GTSE)) {
961dd3d9aa5SNicholas Piggin 		unsigned long tgt = H_RPTI_TARGET_CMMU | H_RPTI_TARGET_NMMU;
962dd3d9aa5SNicholas Piggin 		unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
963dd3d9aa5SNicholas Piggin 				     H_RPTI_TYPE_PRT;
964dd3d9aa5SNicholas Piggin 
965dd3d9aa5SNicholas Piggin 		pseries_rpt_invalidate(0, tgt, type, H_RPTI_PAGE_ALL,
966dd3d9aa5SNicholas Piggin 				       start, end);
967dd3d9aa5SNicholas Piggin 	} else if (cputlb_use_tlbie())
96847d99948SChristophe Leroy 		_tlbie_pid(0, RIC_FLUSH_ALL);
9692275d7b5SNicholas Piggin 	else
9702275d7b5SNicholas Piggin 		_tlbiel_kernel_broadcast();
97147d99948SChristophe Leroy }
97247d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
97347d99948SChristophe Leroy 
97447d99948SChristophe Leroy #define TLB_FLUSH_ALL -1UL
97547d99948SChristophe Leroy 
97647d99948SChristophe Leroy /*
97747d99948SChristophe Leroy  * Number of pages above which we invalidate the entire PID rather than
97847d99948SChristophe Leroy  * flush individual pages, for local and global flushes respectively.
97947d99948SChristophe Leroy  *
98047d99948SChristophe Leroy  * tlbie goes out to the interconnect and individual ops are more costly.
98147d99948SChristophe Leroy  * It also does not iterate over sets like the local tlbiel variant when
98247d99948SChristophe Leroy  * invalidating a full PID, so it has a far lower threshold to change from
98347d99948SChristophe Leroy  * individual page flushes to full-pid flushes.
98447d99948SChristophe Leroy  */
98547d99948SChristophe Leroy static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
98647d99948SChristophe Leroy static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
98747d99948SChristophe Leroy 
98847d99948SChristophe Leroy static inline void __radix__flush_tlb_range(struct mm_struct *mm,
989a42d6ba8SAneesh Kumar K.V 					    unsigned long start, unsigned long end)
99047d99948SChristophe Leroy 
99147d99948SChristophe Leroy {
99247d99948SChristophe Leroy 	unsigned long pid;
99347d99948SChristophe Leroy 	unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
99447d99948SChristophe Leroy 	unsigned long page_size = 1UL << page_shift;
99547d99948SChristophe Leroy 	unsigned long nr_pages = (end - start) >> page_shift;
99626418b36SNicholas Piggin 	bool fullmm = (end == TLB_FLUSH_ALL);
99726418b36SNicholas Piggin 	bool flush_pid;
99826418b36SNicholas Piggin 	enum tlb_flush_type type;
99947d99948SChristophe Leroy 
100047d99948SChristophe Leroy 	pid = mm->context.id;
100147d99948SChristophe Leroy 	if (unlikely(pid == MMU_NO_CONTEXT))
100247d99948SChristophe Leroy 		return;
100347d99948SChristophe Leroy 
100447d99948SChristophe Leroy 	preempt_disable();
100547d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
100626418b36SNicholas Piggin 	type = flush_type_needed(mm, fullmm);
100754bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_NONE)
100854bb5033SNicholas Piggin 		goto out;
100947d99948SChristophe Leroy 
101026418b36SNicholas Piggin 	if (fullmm)
101126418b36SNicholas Piggin 		flush_pid = true;
101226418b36SNicholas Piggin 	else if (type == FLUSH_TYPE_GLOBAL)
101326418b36SNicholas Piggin 		flush_pid = nr_pages > tlb_single_page_flush_ceiling;
101426418b36SNicholas Piggin 	else
101526418b36SNicholas Piggin 		flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
101626418b36SNicholas Piggin 
101726418b36SNicholas Piggin 	if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
1018dd3d9aa5SNicholas Piggin 		unsigned long tgt = H_RPTI_TARGET_CMMU;
1019dd3d9aa5SNicholas Piggin 		unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
1020dd3d9aa5SNicholas Piggin 
1021dd3d9aa5SNicholas Piggin 		if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1022dd3d9aa5SNicholas Piggin 			pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M);
1023dd3d9aa5SNicholas Piggin 		if (atomic_read(&mm->context.copros) > 0)
1024dd3d9aa5SNicholas Piggin 			tgt |= H_RPTI_TARGET_NMMU;
1025dd3d9aa5SNicholas Piggin 		pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB, pg_sizes,
1026dd3d9aa5SNicholas Piggin 				       start, end);
102726418b36SNicholas Piggin 	} else if (flush_pid) {
102826418b36SNicholas Piggin 		if (type == FLUSH_TYPE_LOCAL) {
102947d99948SChristophe Leroy 			_tlbiel_pid(pid, RIC_FLUSH_TLB);
103047d99948SChristophe Leroy 		} else {
10312275d7b5SNicholas Piggin 			if (cputlb_use_tlbie()) {
103247d99948SChristophe Leroy 				if (mm_needs_flush_escalation(mm))
103347d99948SChristophe Leroy 					_tlbie_pid(pid, RIC_FLUSH_ALL);
103447d99948SChristophe Leroy 				else
103547d99948SChristophe Leroy 					_tlbie_pid(pid, RIC_FLUSH_TLB);
10362275d7b5SNicholas Piggin 			} else {
10372275d7b5SNicholas Piggin 				_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
10382275d7b5SNicholas Piggin 			}
103947d99948SChristophe Leroy 		}
104047d99948SChristophe Leroy 	} else {
1041a42d6ba8SAneesh Kumar K.V 		bool hflush = false;
104247d99948SChristophe Leroy 		unsigned long hstart, hend;
104347d99948SChristophe Leroy 
1044a42d6ba8SAneesh Kumar K.V 		if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
104547d99948SChristophe Leroy 			hstart = (start + PMD_SIZE - 1) & PMD_MASK;
104647d99948SChristophe Leroy 			hend = end & PMD_MASK;
10478f53f9c0SAneesh Kumar K.V 			if (hstart < hend)
1048a42d6ba8SAneesh Kumar K.V 				hflush = true;
104947d99948SChristophe Leroy 		}
105047d99948SChristophe Leroy 
105126418b36SNicholas Piggin 		if (type == FLUSH_TYPE_LOCAL) {
10522275d7b5SNicholas Piggin 			asm volatile("ptesync": : :"memory");
105347d99948SChristophe Leroy 			__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
105447d99948SChristophe Leroy 			if (hflush)
105547d99948SChristophe Leroy 				__tlbiel_va_range(hstart, hend, pid,
105647d99948SChristophe Leroy 						PMD_SIZE, MMU_PAGE_2M);
105705504b42SNicholas Piggin 			ppc_after_tlbiel_barrier();
10582275d7b5SNicholas Piggin 		} else if (cputlb_use_tlbie()) {
10592275d7b5SNicholas Piggin 			asm volatile("ptesync": : :"memory");
106047d99948SChristophe Leroy 			__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
106147d99948SChristophe Leroy 			if (hflush)
106247d99948SChristophe Leroy 				__tlbie_va_range(hstart, hend, pid,
106347d99948SChristophe Leroy 						PMD_SIZE, MMU_PAGE_2M);
106447d99948SChristophe Leroy 			asm volatile("eieio; tlbsync; ptesync": : :"memory");
10652275d7b5SNicholas Piggin 		} else {
10662275d7b5SNicholas Piggin 			_tlbiel_va_range_multicast(mm,
10672275d7b5SNicholas Piggin 					start, end, pid, page_size, mmu_virtual_psize, false);
10682275d7b5SNicholas Piggin 			if (hflush)
10692275d7b5SNicholas Piggin 				_tlbiel_va_range_multicast(mm,
10702275d7b5SNicholas Piggin 					hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false);
107147d99948SChristophe Leroy 		}
107247d99948SChristophe Leroy 	}
107354bb5033SNicholas Piggin out:
107447d99948SChristophe Leroy 	preempt_enable();
107547d99948SChristophe Leroy }
107647d99948SChristophe Leroy 
107747d99948SChristophe Leroy void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
107847d99948SChristophe Leroy 		     unsigned long end)
107947d99948SChristophe Leroy 
108047d99948SChristophe Leroy {
108147d99948SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
108247d99948SChristophe Leroy 	if (is_vm_hugetlb_page(vma))
108347d99948SChristophe Leroy 		return radix__flush_hugetlb_tlb_range(vma, start, end);
108447d99948SChristophe Leroy #endif
108547d99948SChristophe Leroy 
1086a42d6ba8SAneesh Kumar K.V 	__radix__flush_tlb_range(vma->vm_mm, start, end);
108747d99948SChristophe Leroy }
108847d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_tlb_range);
108947d99948SChristophe Leroy 
109047d99948SChristophe Leroy static int radix_get_mmu_psize(int page_size)
109147d99948SChristophe Leroy {
109247d99948SChristophe Leroy 	int psize;
109347d99948SChristophe Leroy 
109447d99948SChristophe Leroy 	if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
109547d99948SChristophe Leroy 		psize = mmu_virtual_psize;
109647d99948SChristophe Leroy 	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
109747d99948SChristophe Leroy 		psize = MMU_PAGE_2M;
109847d99948SChristophe Leroy 	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
109947d99948SChristophe Leroy 		psize = MMU_PAGE_1G;
110047d99948SChristophe Leroy 	else
110147d99948SChristophe Leroy 		return -1;
110247d99948SChristophe Leroy 	return psize;
110347d99948SChristophe Leroy }
110447d99948SChristophe Leroy 
110547d99948SChristophe Leroy /*
110647d99948SChristophe Leroy  * Flush partition scoped LPID address translation for all CPUs.
110747d99948SChristophe Leroy  */
110847d99948SChristophe Leroy void radix__flush_tlb_lpid_page(unsigned int lpid,
110947d99948SChristophe Leroy 					unsigned long addr,
111047d99948SChristophe Leroy 					unsigned long page_size)
111147d99948SChristophe Leroy {
111247d99948SChristophe Leroy 	int psize = radix_get_mmu_psize(page_size);
111347d99948SChristophe Leroy 
111447d99948SChristophe Leroy 	_tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB);
111547d99948SChristophe Leroy }
111647d99948SChristophe Leroy EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page);
111747d99948SChristophe Leroy 
111847d99948SChristophe Leroy /*
111947d99948SChristophe Leroy  * Flush partition scoped PWC from LPID for all CPUs.
112047d99948SChristophe Leroy  */
112147d99948SChristophe Leroy void radix__flush_pwc_lpid(unsigned int lpid)
112247d99948SChristophe Leroy {
112347d99948SChristophe Leroy 	_tlbie_lpid(lpid, RIC_FLUSH_PWC);
112447d99948SChristophe Leroy }
112547d99948SChristophe Leroy EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
112647d99948SChristophe Leroy 
112747d99948SChristophe Leroy /*
112847d99948SChristophe Leroy  * Flush partition scoped translations from LPID (=LPIDR)
112947d99948SChristophe Leroy  */
113099161de3SNicholas Piggin void radix__flush_all_lpid(unsigned int lpid)
113147d99948SChristophe Leroy {
113247d99948SChristophe Leroy 	_tlbie_lpid(lpid, RIC_FLUSH_ALL);
113347d99948SChristophe Leroy }
113499161de3SNicholas Piggin EXPORT_SYMBOL_GPL(radix__flush_all_lpid);
113547d99948SChristophe Leroy 
113647d99948SChristophe Leroy /*
113799161de3SNicholas Piggin  * Flush process scoped translations from LPID (=LPIDR)
113847d99948SChristophe Leroy  */
113999161de3SNicholas Piggin void radix__flush_all_lpid_guest(unsigned int lpid)
114047d99948SChristophe Leroy {
114199161de3SNicholas Piggin 	_tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
114247d99948SChristophe Leroy }
114347d99948SChristophe Leroy 
114447d99948SChristophe Leroy static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
114547d99948SChristophe Leroy 				  unsigned long end, int psize);
114647d99948SChristophe Leroy 
114747d99948SChristophe Leroy void radix__tlb_flush(struct mmu_gather *tlb)
114847d99948SChristophe Leroy {
114947d99948SChristophe Leroy 	int psize = 0;
115047d99948SChristophe Leroy 	struct mm_struct *mm = tlb->mm;
115147d99948SChristophe Leroy 	int page_size = tlb->page_size;
115247d99948SChristophe Leroy 	unsigned long start = tlb->start;
115347d99948SChristophe Leroy 	unsigned long end = tlb->end;
115447d99948SChristophe Leroy 
115547d99948SChristophe Leroy 	/*
115647d99948SChristophe Leroy 	 * if page size is not something we understand, do a full mm flush
115747d99948SChristophe Leroy 	 *
115847d99948SChristophe Leroy 	 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
115947d99948SChristophe Leroy 	 * that flushes the process table entry cache upon process teardown.
116047d99948SChristophe Leroy 	 * See the comment for radix in arch_exit_mmap().
116147d99948SChristophe Leroy 	 */
1162864edb75SAneesh Kumar K.V 	if (tlb->fullmm || tlb->need_flush_all) {
116347d99948SChristophe Leroy 		__flush_all_mm(mm, true);
116447d99948SChristophe Leroy 	} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
116552162ec7SAneesh Kumar K.V 		if (!tlb->freed_tables)
116647d99948SChristophe Leroy 			radix__flush_tlb_mm(mm);
116747d99948SChristophe Leroy 		else
116847d99948SChristophe Leroy 			radix__flush_all_mm(mm);
116947d99948SChristophe Leroy 	} else {
117052162ec7SAneesh Kumar K.V 		if (!tlb->freed_tables)
117147d99948SChristophe Leroy 			radix__flush_tlb_range_psize(mm, start, end, psize);
117247d99948SChristophe Leroy 		else
117347d99948SChristophe Leroy 			radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
117447d99948SChristophe Leroy 	}
117547d99948SChristophe Leroy }
117647d99948SChristophe Leroy 
1177e12d6d7dSMasahiro Yamada static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
117847d99948SChristophe Leroy 				unsigned long start, unsigned long end,
117947d99948SChristophe Leroy 				int psize, bool also_pwc)
118047d99948SChristophe Leroy {
118147d99948SChristophe Leroy 	unsigned long pid;
118247d99948SChristophe Leroy 	unsigned int page_shift = mmu_psize_defs[psize].shift;
118347d99948SChristophe Leroy 	unsigned long page_size = 1UL << page_shift;
118447d99948SChristophe Leroy 	unsigned long nr_pages = (end - start) >> page_shift;
118526418b36SNicholas Piggin 	bool fullmm = (end == TLB_FLUSH_ALL);
118626418b36SNicholas Piggin 	bool flush_pid;
118726418b36SNicholas Piggin 	enum tlb_flush_type type;
118847d99948SChristophe Leroy 
118947d99948SChristophe Leroy 	pid = mm->context.id;
119047d99948SChristophe Leroy 	if (unlikely(pid == MMU_NO_CONTEXT))
119147d99948SChristophe Leroy 		return;
119247d99948SChristophe Leroy 
119326418b36SNicholas Piggin 	fullmm = (end == TLB_FLUSH_ALL);
119426418b36SNicholas Piggin 
119547d99948SChristophe Leroy 	preempt_disable();
119647d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
119726418b36SNicholas Piggin 	type = flush_type_needed(mm, fullmm);
119854bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_NONE)
119954bb5033SNicholas Piggin 		goto out;
120047d99948SChristophe Leroy 
120126418b36SNicholas Piggin 	if (fullmm)
120226418b36SNicholas Piggin 		flush_pid = true;
120326418b36SNicholas Piggin 	else if (type == FLUSH_TYPE_GLOBAL)
120426418b36SNicholas Piggin 		flush_pid = nr_pages > tlb_single_page_flush_ceiling;
120526418b36SNicholas Piggin 	else
120626418b36SNicholas Piggin 		flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
120726418b36SNicholas Piggin 
120826418b36SNicholas Piggin 	if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
1209dd3d9aa5SNicholas Piggin 		unsigned long tgt = H_RPTI_TARGET_CMMU;
1210dd3d9aa5SNicholas Piggin 		unsigned long type = H_RPTI_TYPE_TLB;
1211dd3d9aa5SNicholas Piggin 		unsigned long pg_sizes = psize_to_rpti_pgsize(psize);
1212dd3d9aa5SNicholas Piggin 
1213dd3d9aa5SNicholas Piggin 		if (also_pwc)
1214dd3d9aa5SNicholas Piggin 			type |= H_RPTI_TYPE_PWC;
1215dd3d9aa5SNicholas Piggin 		if (atomic_read(&mm->context.copros) > 0)
1216dd3d9aa5SNicholas Piggin 			tgt |= H_RPTI_TARGET_NMMU;
1217dd3d9aa5SNicholas Piggin 		pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
121826418b36SNicholas Piggin 	} else if (flush_pid) {
121926418b36SNicholas Piggin 		if (type == FLUSH_TYPE_LOCAL) {
122047d99948SChristophe Leroy 			_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
122147d99948SChristophe Leroy 		} else {
12222275d7b5SNicholas Piggin 			if (cputlb_use_tlbie()) {
122347d99948SChristophe Leroy 				if (mm_needs_flush_escalation(mm))
122447d99948SChristophe Leroy 					also_pwc = true;
122547d99948SChristophe Leroy 
12262275d7b5SNicholas Piggin 				_tlbie_pid(pid,
12272275d7b5SNicholas Piggin 					also_pwc ?  RIC_FLUSH_ALL : RIC_FLUSH_TLB);
12282275d7b5SNicholas Piggin 			} else {
12292275d7b5SNicholas Piggin 				_tlbiel_pid_multicast(mm, pid,
12302275d7b5SNicholas Piggin 					also_pwc ?  RIC_FLUSH_ALL : RIC_FLUSH_TLB);
12312275d7b5SNicholas Piggin 			}
12322275d7b5SNicholas Piggin 
123347d99948SChristophe Leroy 		}
123447d99948SChristophe Leroy 	} else {
123526418b36SNicholas Piggin 		if (type == FLUSH_TYPE_LOCAL)
123647d99948SChristophe Leroy 			_tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
12372275d7b5SNicholas Piggin 		else if (cputlb_use_tlbie())
123847d99948SChristophe Leroy 			_tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
12392275d7b5SNicholas Piggin 		else
12402275d7b5SNicholas Piggin 			_tlbiel_va_range_multicast(mm,
12412275d7b5SNicholas Piggin 					start, end, pid, page_size, psize, also_pwc);
124247d99948SChristophe Leroy 	}
124354bb5033SNicholas Piggin out:
124447d99948SChristophe Leroy 	preempt_enable();
124547d99948SChristophe Leroy }
124647d99948SChristophe Leroy 
124747d99948SChristophe Leroy void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
124847d99948SChristophe Leroy 				  unsigned long end, int psize)
124947d99948SChristophe Leroy {
125047d99948SChristophe Leroy 	return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
125147d99948SChristophe Leroy }
125247d99948SChristophe Leroy 
125347d99948SChristophe Leroy static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
125447d99948SChristophe Leroy 				  unsigned long end, int psize)
125547d99948SChristophe Leroy {
125647d99948SChristophe Leroy 	__radix__flush_tlb_range_psize(mm, start, end, psize, true);
125747d99948SChristophe Leroy }
125847d99948SChristophe Leroy 
125947d99948SChristophe Leroy #ifdef CONFIG_TRANSPARENT_HUGEPAGE
126047d99948SChristophe Leroy void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
126147d99948SChristophe Leroy {
126247d99948SChristophe Leroy 	unsigned long pid, end;
126326418b36SNicholas Piggin 	enum tlb_flush_type type;
126447d99948SChristophe Leroy 
126547d99948SChristophe Leroy 	pid = mm->context.id;
126647d99948SChristophe Leroy 	if (unlikely(pid == MMU_NO_CONTEXT))
126747d99948SChristophe Leroy 		return;
126847d99948SChristophe Leroy 
126947d99948SChristophe Leroy 	/* 4k page size, just blow the world */
127047d99948SChristophe Leroy 	if (PAGE_SIZE == 0x1000) {
127147d99948SChristophe Leroy 		radix__flush_all_mm(mm);
127247d99948SChristophe Leroy 		return;
127347d99948SChristophe Leroy 	}
127447d99948SChristophe Leroy 
127547d99948SChristophe Leroy 	end = addr + HPAGE_PMD_SIZE;
127647d99948SChristophe Leroy 
127747d99948SChristophe Leroy 	/* Otherwise first do the PWC, then iterate the pages. */
127847d99948SChristophe Leroy 	preempt_disable();
127947d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
128026418b36SNicholas Piggin 	type = flush_type_needed(mm, false);
128154bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_LOCAL) {
128254bb5033SNicholas Piggin 		_tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
128354bb5033SNicholas Piggin 	} else if (type == FLUSH_TYPE_GLOBAL) {
1284dd3d9aa5SNicholas Piggin 		if (!mmu_has_feature(MMU_FTR_GTSE)) {
1285dd3d9aa5SNicholas Piggin 			unsigned long tgt, type, pg_sizes;
1286dd3d9aa5SNicholas Piggin 
1287dd3d9aa5SNicholas Piggin 			tgt = H_RPTI_TARGET_CMMU;
1288dd3d9aa5SNicholas Piggin 			type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
1289dd3d9aa5SNicholas Piggin 			       H_RPTI_TYPE_PRT;
1290dd3d9aa5SNicholas Piggin 			pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
1291dd3d9aa5SNicholas Piggin 
1292dd3d9aa5SNicholas Piggin 			if (atomic_read(&mm->context.copros) > 0)
1293dd3d9aa5SNicholas Piggin 				tgt |= H_RPTI_TARGET_NMMU;
1294dd3d9aa5SNicholas Piggin 			pseries_rpt_invalidate(pid, tgt, type, pg_sizes,
1295dd3d9aa5SNicholas Piggin 					       addr, end);
1296dd3d9aa5SNicholas Piggin 		} else if (cputlb_use_tlbie())
129747d99948SChristophe Leroy 			_tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
12982275d7b5SNicholas Piggin 		else
12992275d7b5SNicholas Piggin 			_tlbiel_va_range_multicast(mm,
13002275d7b5SNicholas Piggin 					addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
130147d99948SChristophe Leroy 	}
130247d99948SChristophe Leroy 
130347d99948SChristophe Leroy 	preempt_enable();
130447d99948SChristophe Leroy }
130547d99948SChristophe Leroy #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
130647d99948SChristophe Leroy 
130747d99948SChristophe Leroy void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
130847d99948SChristophe Leroy 				unsigned long start, unsigned long end)
130947d99948SChristophe Leroy {
131047d99948SChristophe Leroy 	radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
131147d99948SChristophe Leroy }
131247d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
131347d99948SChristophe Leroy 
131447d99948SChristophe Leroy void radix__flush_tlb_all(void)
131547d99948SChristophe Leroy {
131647d99948SChristophe Leroy 	unsigned long rb,prs,r,rs;
131747d99948SChristophe Leroy 	unsigned long ric = RIC_FLUSH_ALL;
131847d99948SChristophe Leroy 
131947d99948SChristophe Leroy 	rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
132047d99948SChristophe Leroy 	prs = 0; /* partition scoped */
132147d99948SChristophe Leroy 	r = 1;   /* radix format */
132247d99948SChristophe Leroy 	rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
132347d99948SChristophe Leroy 
132447d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
132547d99948SChristophe Leroy 	/*
132647d99948SChristophe Leroy 	 * now flush guest entries by passing PRS = 1 and LPID != 0
132747d99948SChristophe Leroy 	 */
132847d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
132947d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
133047d99948SChristophe Leroy 	/*
133147d99948SChristophe Leroy 	 * now flush host entires by passing PRS = 0 and LPID == 0
133247d99948SChristophe Leroy 	 */
133347d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
133447d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
133547d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
133647d99948SChristophe Leroy }
133747d99948SChristophe Leroy 
133847d99948SChristophe Leroy #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
133947d99948SChristophe Leroy extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
134047d99948SChristophe Leroy {
134147d99948SChristophe Leroy 	unsigned long pid = mm->context.id;
134247d99948SChristophe Leroy 
134347d99948SChristophe Leroy 	if (unlikely(pid == MMU_NO_CONTEXT))
134447d99948SChristophe Leroy 		return;
134547d99948SChristophe Leroy 
1346736bcdd3SJordan Niethe 	if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
1347736bcdd3SJordan Niethe 		return;
1348736bcdd3SJordan Niethe 
134947d99948SChristophe Leroy 	/*
135047d99948SChristophe Leroy 	 * If this context hasn't run on that CPU before and KVM is
135147d99948SChristophe Leroy 	 * around, there's a slim chance that the guest on another
135247d99948SChristophe Leroy 	 * CPU just brought in obsolete translation into the TLB of
135347d99948SChristophe Leroy 	 * this CPU due to a bad prefetch using the guest PID on
135447d99948SChristophe Leroy 	 * the way into the hypervisor.
135547d99948SChristophe Leroy 	 *
135647d99948SChristophe Leroy 	 * We work around this here. If KVM is possible, we check if
135747d99948SChristophe Leroy 	 * any sibling thread is in KVM. If it is, the window may exist
135847d99948SChristophe Leroy 	 * and thus we flush that PID from the core.
135947d99948SChristophe Leroy 	 *
136047d99948SChristophe Leroy 	 * A potential future improvement would be to mark which PIDs
136147d99948SChristophe Leroy 	 * have never been used on the system and avoid it if the PID
136247d99948SChristophe Leroy 	 * is new and the process has no other cpumask bit set.
136347d99948SChristophe Leroy 	 */
136447d99948SChristophe Leroy 	if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
136547d99948SChristophe Leroy 		int cpu = smp_processor_id();
136647d99948SChristophe Leroy 		int sib = cpu_first_thread_sibling(cpu);
136747d99948SChristophe Leroy 		bool flush = false;
136847d99948SChristophe Leroy 
136947d99948SChristophe Leroy 		for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
137047d99948SChristophe Leroy 			if (sib == cpu)
137147d99948SChristophe Leroy 				continue;
137247d99948SChristophe Leroy 			if (!cpu_possible(sib))
137347d99948SChristophe Leroy 				continue;
137447d99948SChristophe Leroy 			if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
137547d99948SChristophe Leroy 				flush = true;
137647d99948SChristophe Leroy 		}
137747d99948SChristophe Leroy 		if (flush)
137847d99948SChristophe Leroy 			_tlbiel_pid(pid, RIC_FLUSH_ALL);
137947d99948SChristophe Leroy 	}
138047d99948SChristophe Leroy }
138147d99948SChristophe Leroy EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
138247d99948SChristophe Leroy #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1383