12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
247d99948SChristophe Leroy /*
347d99948SChristophe Leroy  * TLB flush routines for radix kernels.
447d99948SChristophe Leroy  *
547d99948SChristophe Leroy  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
647d99948SChristophe Leroy  */
747d99948SChristophe Leroy 
847d99948SChristophe Leroy #include <linux/mm.h>
947d99948SChristophe Leroy #include <linux/hugetlb.h>
1047d99948SChristophe Leroy #include <linux/memblock.h>
1147d99948SChristophe Leroy #include <linux/mmu_context.h>
1247d99948SChristophe Leroy #include <linux/sched/mm.h>
13dbf77fedSAneesh Kumar K.V #include <linux/debugfs.h>
1447d99948SChristophe Leroy 
1547d99948SChristophe Leroy #include <asm/ppc-opcode.h>
1647d99948SChristophe Leroy #include <asm/tlb.h>
1747d99948SChristophe Leroy #include <asm/tlbflush.h>
1847d99948SChristophe Leroy #include <asm/trace.h>
1947d99948SChristophe Leroy #include <asm/cputhreads.h>
20dd3d9aa5SNicholas Piggin #include <asm/plpar_wrappers.h>
2147d99948SChristophe Leroy 
222bb421a3SMichael Ellerman #include "internal.h"
232bb421a3SMichael Ellerman 
2447d99948SChristophe Leroy /*
2547d99948SChristophe Leroy  * tlbiel instruction for radix, set invalidation
2647d99948SChristophe Leroy  * i.e., r=1 and is=01 or is=10 or is=11
2747d99948SChristophe Leroy  */
tlbiel_radix_set_isa300(unsigned int set,unsigned int is,unsigned int pid,unsigned int ric,unsigned int prs)286d3ca7e7SMasahiro Yamada static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
2947d99948SChristophe Leroy 					unsigned int pid,
3047d99948SChristophe Leroy 					unsigned int ric, unsigned int prs)
3147d99948SChristophe Leroy {
3247d99948SChristophe Leroy 	unsigned long rb;
3347d99948SChristophe Leroy 	unsigned long rs;
3447d99948SChristophe Leroy 
3547d99948SChristophe Leroy 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
3647d99948SChristophe Leroy 	rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
3747d99948SChristophe Leroy 
3847d99948SChristophe Leroy 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
3947d99948SChristophe Leroy 		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
4047d99948SChristophe Leroy 		     : "memory");
4147d99948SChristophe Leroy }
4247d99948SChristophe Leroy 
tlbiel_all_isa300(unsigned int num_sets,unsigned int is)4347d99948SChristophe Leroy static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
4447d99948SChristophe Leroy {
4547d99948SChristophe Leroy 	unsigned int set;
4647d99948SChristophe Leroy 
4747d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
4847d99948SChristophe Leroy 
4947d99948SChristophe Leroy 	/*
5047d99948SChristophe Leroy 	 * Flush the first set of the TLB, and the entire Page Walk Cache
5147d99948SChristophe Leroy 	 * and partition table entries. Then flush the remaining sets of the
5247d99948SChristophe Leroy 	 * TLB.
5347d99948SChristophe Leroy 	 */
547e71c428SNicholas Piggin 
557e71c428SNicholas Piggin 	if (early_cpu_has_feature(CPU_FTR_HVMODE)) {
567e71c428SNicholas Piggin 		/* MSR[HV] should flush partition scope translations first. */
5747d99948SChristophe Leroy 		tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
58e8063940SAneesh Kumar K.V 
59e8063940SAneesh Kumar K.V 		if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) {
6047d99948SChristophe Leroy 			for (set = 1; set < num_sets; set++)
61e8063940SAneesh Kumar K.V 				tlbiel_radix_set_isa300(set, is, 0,
62e8063940SAneesh Kumar K.V 							RIC_FLUSH_TLB, 0);
63e8063940SAneesh Kumar K.V 		}
647e71c428SNicholas Piggin 	}
6547d99948SChristophe Leroy 
667e71c428SNicholas Piggin 	/* Flush process scoped entries. */
6747d99948SChristophe Leroy 	tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
68e8063940SAneesh Kumar K.V 
69e8063940SAneesh Kumar K.V 	if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) {
7047d99948SChristophe Leroy 		for (set = 1; set < num_sets; set++)
7147d99948SChristophe Leroy 			tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
72e8063940SAneesh Kumar K.V 	}
7347d99948SChristophe Leroy 
7405504b42SNicholas Piggin 	ppc_after_tlbiel_barrier();
7547d99948SChristophe Leroy }
7647d99948SChristophe Leroy 
radix__tlbiel_all(unsigned int action)7747d99948SChristophe Leroy void radix__tlbiel_all(unsigned int action)
7847d99948SChristophe Leroy {
7947d99948SChristophe Leroy 	unsigned int is;
8047d99948SChristophe Leroy 
8147d99948SChristophe Leroy 	switch (action) {
8247d99948SChristophe Leroy 	case TLB_INVAL_SCOPE_GLOBAL:
8347d99948SChristophe Leroy 		is = 3;
8447d99948SChristophe Leroy 		break;
8547d99948SChristophe Leroy 	case TLB_INVAL_SCOPE_LPID:
8647d99948SChristophe Leroy 		is = 2;
8747d99948SChristophe Leroy 		break;
8847d99948SChristophe Leroy 	default:
8947d99948SChristophe Leroy 		BUG();
9047d99948SChristophe Leroy 	}
9147d99948SChristophe Leroy 
9247d99948SChristophe Leroy 	if (early_cpu_has_feature(CPU_FTR_ARCH_300))
9347d99948SChristophe Leroy 		tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
9447d99948SChristophe Leroy 	else
9547d99948SChristophe Leroy 		WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
9647d99948SChristophe Leroy 
97fe7946ceSNicholas Piggin 	asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
9847d99948SChristophe Leroy }
9947d99948SChristophe Leroy 
__tlbiel_pid(unsigned long pid,int set,unsigned long ric)100efc344c5SMasahiro Yamada static __always_inline void __tlbiel_pid(unsigned long pid, int set,
10147d99948SChristophe Leroy 				unsigned long ric)
10247d99948SChristophe Leroy {
10347d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
10447d99948SChristophe Leroy 
10547d99948SChristophe Leroy 	rb = PPC_BIT(53); /* IS = 1 */
10647d99948SChristophe Leroy 	rb |= set << PPC_BITLSHIFT(51);
10747d99948SChristophe Leroy 	rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
10847d99948SChristophe Leroy 	prs = 1; /* process scoped */
10947d99948SChristophe Leroy 	r = 1;   /* radix format */
11047d99948SChristophe Leroy 
11147d99948SChristophe Leroy 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
11247d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
11347d99948SChristophe Leroy 	trace_tlbie(0, 1, rb, rs, ric, prs, r);
11447d99948SChristophe Leroy }
11547d99948SChristophe Leroy 
__tlbie_pid(unsigned long pid,unsigned long ric)116efc344c5SMasahiro Yamada static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
11747d99948SChristophe Leroy {
11847d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
11947d99948SChristophe Leroy 
12047d99948SChristophe Leroy 	rb = PPC_BIT(53); /* IS = 1 */
12147d99948SChristophe Leroy 	rs = pid << PPC_BITLSHIFT(31);
12247d99948SChristophe Leroy 	prs = 1; /* process scoped */
12347d99948SChristophe Leroy 	r = 1;   /* radix format */
12447d99948SChristophe Leroy 
12547d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
12647d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
12747d99948SChristophe Leroy 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
12847d99948SChristophe Leroy }
12947d99948SChristophe Leroy 
__tlbie_lpid(unsigned long lpid,unsigned long ric)130efc344c5SMasahiro Yamada static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
13147d99948SChristophe Leroy {
13247d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
13347d99948SChristophe Leroy 
13447d99948SChristophe Leroy 	rb = PPC_BIT(52); /* IS = 2 */
13547d99948SChristophe Leroy 	rs = lpid;
13647d99948SChristophe Leroy 	prs = 0; /* partition scoped */
13747d99948SChristophe Leroy 	r = 1;   /* radix format */
13847d99948SChristophe Leroy 
13947d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
14047d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
14147d99948SChristophe Leroy 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
14247d99948SChristophe Leroy }
14347d99948SChristophe Leroy 
__tlbie_lpid_guest(unsigned long lpid,unsigned long ric)14499161de3SNicholas Piggin static __always_inline void __tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
14547d99948SChristophe Leroy {
14647d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
14747d99948SChristophe Leroy 
14847d99948SChristophe Leroy 	rb = PPC_BIT(52); /* IS = 2 */
14999161de3SNicholas Piggin 	rs = lpid;
15047d99948SChristophe Leroy 	prs = 1; /* process scoped */
15147d99948SChristophe Leroy 	r = 1;   /* radix format */
15247d99948SChristophe Leroy 
15399161de3SNicholas Piggin 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
15447d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
15599161de3SNicholas Piggin 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
15647d99948SChristophe Leroy }
15747d99948SChristophe Leroy 
__tlbiel_va(unsigned long va,unsigned long pid,unsigned long ap,unsigned long ric)1586d3ca7e7SMasahiro Yamada static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
15947d99948SChristophe Leroy 					unsigned long ap, unsigned long ric)
16047d99948SChristophe Leroy {
16147d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
16247d99948SChristophe Leroy 
16347d99948SChristophe Leroy 	rb = va & ~(PPC_BITMASK(52, 63));
16447d99948SChristophe Leroy 	rb |= ap << PPC_BITLSHIFT(58);
16547d99948SChristophe Leroy 	rs = pid << PPC_BITLSHIFT(31);
16647d99948SChristophe Leroy 	prs = 1; /* process scoped */
16747d99948SChristophe Leroy 	r = 1;   /* radix format */
16847d99948SChristophe Leroy 
16947d99948SChristophe Leroy 	asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
17047d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
17147d99948SChristophe Leroy 	trace_tlbie(0, 1, rb, rs, ric, prs, r);
17247d99948SChristophe Leroy }
17347d99948SChristophe Leroy 
__tlbie_va(unsigned long va,unsigned long pid,unsigned long ap,unsigned long ric)1746d3ca7e7SMasahiro Yamada static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
17547d99948SChristophe Leroy 				       unsigned long ap, unsigned long ric)
17647d99948SChristophe Leroy {
17747d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
17847d99948SChristophe Leroy 
17947d99948SChristophe Leroy 	rb = va & ~(PPC_BITMASK(52, 63));
18047d99948SChristophe Leroy 	rb |= ap << PPC_BITLSHIFT(58);
18147d99948SChristophe Leroy 	rs = pid << PPC_BITLSHIFT(31);
18247d99948SChristophe Leroy 	prs = 1; /* process scoped */
18347d99948SChristophe Leroy 	r = 1;   /* radix format */
18447d99948SChristophe Leroy 
18547d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
18647d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
18747d99948SChristophe Leroy 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
18847d99948SChristophe Leroy }
18947d99948SChristophe Leroy 
__tlbie_lpid_va(unsigned long va,unsigned long lpid,unsigned long ap,unsigned long ric)1906d3ca7e7SMasahiro Yamada static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
19147d99948SChristophe Leroy 					    unsigned long ap, unsigned long ric)
19247d99948SChristophe Leroy {
19347d99948SChristophe Leroy 	unsigned long rb,rs,prs,r;
19447d99948SChristophe Leroy 
19547d99948SChristophe Leroy 	rb = va & ~(PPC_BITMASK(52, 63));
19647d99948SChristophe Leroy 	rb |= ap << PPC_BITLSHIFT(58);
19747d99948SChristophe Leroy 	rs = lpid;
19847d99948SChristophe Leroy 	prs = 0; /* partition scoped */
19947d99948SChristophe Leroy 	r = 1;   /* radix format */
20047d99948SChristophe Leroy 
20147d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
20247d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
20347d99948SChristophe Leroy 	trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
20447d99948SChristophe Leroy }
20547d99948SChristophe Leroy 
206047e6575SAneesh Kumar K.V 
fixup_tlbie_va(unsigned long va,unsigned long pid,unsigned long ap)207047e6575SAneesh Kumar K.V static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
208047e6575SAneesh Kumar K.V 				  unsigned long ap)
20947d99948SChristophe Leroy {
210047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
211047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
212047e6575SAneesh Kumar K.V 		__tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
213047e6575SAneesh Kumar K.V 	}
214047e6575SAneesh Kumar K.V 
215047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
216047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
217047e6575SAneesh Kumar K.V 		__tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
218047e6575SAneesh Kumar K.V 	}
219047e6575SAneesh Kumar K.V }
220047e6575SAneesh Kumar K.V 
fixup_tlbie_va_range(unsigned long va,unsigned long pid,unsigned long ap)221047e6575SAneesh Kumar K.V static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
222047e6575SAneesh Kumar K.V 					unsigned long ap)
223047e6575SAneesh Kumar K.V {
224047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
225047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
226047e6575SAneesh Kumar K.V 		__tlbie_pid(0, RIC_FLUSH_TLB);
227047e6575SAneesh Kumar K.V 	}
228047e6575SAneesh Kumar K.V 
229047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
230047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
231047e6575SAneesh Kumar K.V 		__tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
232047e6575SAneesh Kumar K.V 	}
233047e6575SAneesh Kumar K.V }
234047e6575SAneesh Kumar K.V 
fixup_tlbie_pid(unsigned long pid)235047e6575SAneesh Kumar K.V static inline void fixup_tlbie_pid(unsigned long pid)
236047e6575SAneesh Kumar K.V {
237047e6575SAneesh Kumar K.V 	/*
238047e6575SAneesh Kumar K.V 	 * We can use any address for the invalidation, pick one which is
239047e6575SAneesh Kumar K.V 	 * probably unused as an optimisation.
240047e6575SAneesh Kumar K.V 	 */
24147d99948SChristophe Leroy 	unsigned long va = ((1UL << 52) - 1);
24247d99948SChristophe Leroy 
243047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
244047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
245047e6575SAneesh Kumar K.V 		__tlbie_pid(0, RIC_FLUSH_TLB);
246047e6575SAneesh Kumar K.V 	}
247047e6575SAneesh Kumar K.V 
24809ce98caSAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
24947d99948SChristophe Leroy 		asm volatile("ptesync": : :"memory");
25047d99948SChristophe Leroy 		__tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
25147d99948SChristophe Leroy 	}
25247d99948SChristophe Leroy }
25347d99948SChristophe Leroy 
fixup_tlbie_lpid_va(unsigned long va,unsigned long lpid,unsigned long ap)254047e6575SAneesh Kumar K.V static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
255047e6575SAneesh Kumar K.V 				       unsigned long ap)
256047e6575SAneesh Kumar K.V {
257047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
258047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
259047e6575SAneesh Kumar K.V 		__tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB);
260047e6575SAneesh Kumar K.V 	}
261047e6575SAneesh Kumar K.V 
262047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
263047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
264047e6575SAneesh Kumar K.V 		__tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB);
265047e6575SAneesh Kumar K.V 	}
266047e6575SAneesh Kumar K.V }
267047e6575SAneesh Kumar K.V 
fixup_tlbie_lpid(unsigned long lpid)26847d99948SChristophe Leroy static inline void fixup_tlbie_lpid(unsigned long lpid)
26947d99948SChristophe Leroy {
270047e6575SAneesh Kumar K.V 	/*
271047e6575SAneesh Kumar K.V 	 * We can use any address for the invalidation, pick one which is
272047e6575SAneesh Kumar K.V 	 * probably unused as an optimisation.
273047e6575SAneesh Kumar K.V 	 */
27447d99948SChristophe Leroy 	unsigned long va = ((1UL << 52) - 1);
27547d99948SChristophe Leroy 
276047e6575SAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
277047e6575SAneesh Kumar K.V 		asm volatile("ptesync": : :"memory");
278047e6575SAneesh Kumar K.V 		__tlbie_lpid(0, RIC_FLUSH_TLB);
279047e6575SAneesh Kumar K.V 	}
280047e6575SAneesh Kumar K.V 
28109ce98caSAneesh Kumar K.V 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
28247d99948SChristophe Leroy 		asm volatile("ptesync": : :"memory");
28347d99948SChristophe Leroy 		__tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
28447d99948SChristophe Leroy 	}
28547d99948SChristophe Leroy }
28647d99948SChristophe Leroy 
28747d99948SChristophe Leroy /*
28847d99948SChristophe Leroy  * We use 128 set in radix mode and 256 set in hpt mode.
28947d99948SChristophe Leroy  */
_tlbiel_pid(unsigned long pid,unsigned long ric)29007d8ad6fSAneesh Kumar K.V static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
29147d99948SChristophe Leroy {
29247d99948SChristophe Leroy 	int set;
29347d99948SChristophe Leroy 
29447d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
29547d99948SChristophe Leroy 
29607d8ad6fSAneesh Kumar K.V 	switch (ric) {
29707d8ad6fSAneesh Kumar K.V 	case RIC_FLUSH_PWC:
29847d99948SChristophe Leroy 
29947d99948SChristophe Leroy 		/* For PWC, only one flush is needed */
30007d8ad6fSAneesh Kumar K.V 		__tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
30105504b42SNicholas Piggin 		ppc_after_tlbiel_barrier();
30247d99948SChristophe Leroy 		return;
30307d8ad6fSAneesh Kumar K.V 	case RIC_FLUSH_TLB:
30407d8ad6fSAneesh Kumar K.V 		__tlbiel_pid(pid, 0, RIC_FLUSH_TLB);
30507d8ad6fSAneesh Kumar K.V 		break;
30607d8ad6fSAneesh Kumar K.V 	case RIC_FLUSH_ALL:
30707d8ad6fSAneesh Kumar K.V 	default:
30807d8ad6fSAneesh Kumar K.V 		/*
30907d8ad6fSAneesh Kumar K.V 		 * Flush the first set of the TLB, and if
31007d8ad6fSAneesh Kumar K.V 		 * we're doing a RIC_FLUSH_ALL, also flush
31107d8ad6fSAneesh Kumar K.V 		 * the entire Page Walk Cache.
31207d8ad6fSAneesh Kumar K.V 		 */
31307d8ad6fSAneesh Kumar K.V 		__tlbiel_pid(pid, 0, RIC_FLUSH_ALL);
31447d99948SChristophe Leroy 	}
31547d99948SChristophe Leroy 
316e8063940SAneesh Kumar K.V 	if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
31747d99948SChristophe Leroy 		/* For the remaining sets, just flush the TLB */
31847d99948SChristophe Leroy 		for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
31947d99948SChristophe Leroy 			__tlbiel_pid(pid, set, RIC_FLUSH_TLB);
320e8063940SAneesh Kumar K.V 	}
32147d99948SChristophe Leroy 
32205504b42SNicholas Piggin 	ppc_after_tlbiel_barrier();
3236c46fcceSNicholas Piggin 	asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory");
32447d99948SChristophe Leroy }
32547d99948SChristophe Leroy 
_tlbie_pid(unsigned long pid,unsigned long ric)32647d99948SChristophe Leroy static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
32747d99948SChristophe Leroy {
32847d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
32947d99948SChristophe Leroy 
33047d99948SChristophe Leroy 	/*
33147d99948SChristophe Leroy 	 * Workaround the fact that the "ric" argument to __tlbie_pid
3321fd02f66SJulia Lawall 	 * must be a compile-time constraint to match the "i" constraint
33347d99948SChristophe Leroy 	 * in the asm statement.
33447d99948SChristophe Leroy 	 */
33547d99948SChristophe Leroy 	switch (ric) {
33647d99948SChristophe Leroy 	case RIC_FLUSH_TLB:
33747d99948SChristophe Leroy 		__tlbie_pid(pid, RIC_FLUSH_TLB);
338047e6575SAneesh Kumar K.V 		fixup_tlbie_pid(pid);
33947d99948SChristophe Leroy 		break;
34047d99948SChristophe Leroy 	case RIC_FLUSH_PWC:
34147d99948SChristophe Leroy 		__tlbie_pid(pid, RIC_FLUSH_PWC);
34247d99948SChristophe Leroy 		break;
34347d99948SChristophe Leroy 	case RIC_FLUSH_ALL:
34447d99948SChristophe Leroy 	default:
34547d99948SChristophe Leroy 		__tlbie_pid(pid, RIC_FLUSH_ALL);
346047e6575SAneesh Kumar K.V 		fixup_tlbie_pid(pid);
34747d99948SChristophe Leroy 	}
34847d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
34947d99948SChristophe Leroy }
35047d99948SChristophe Leroy 
3512275d7b5SNicholas Piggin struct tlbiel_pid {
3522275d7b5SNicholas Piggin 	unsigned long pid;
3532275d7b5SNicholas Piggin 	unsigned long ric;
3542275d7b5SNicholas Piggin };
3552275d7b5SNicholas Piggin 
do_tlbiel_pid(void * info)3562275d7b5SNicholas Piggin static void do_tlbiel_pid(void *info)
3572275d7b5SNicholas Piggin {
3582275d7b5SNicholas Piggin 	struct tlbiel_pid *t = info;
3592275d7b5SNicholas Piggin 
3602275d7b5SNicholas Piggin 	if (t->ric == RIC_FLUSH_TLB)
3612275d7b5SNicholas Piggin 		_tlbiel_pid(t->pid, RIC_FLUSH_TLB);
3622275d7b5SNicholas Piggin 	else if (t->ric == RIC_FLUSH_PWC)
3632275d7b5SNicholas Piggin 		_tlbiel_pid(t->pid, RIC_FLUSH_PWC);
3642275d7b5SNicholas Piggin 	else
3652275d7b5SNicholas Piggin 		_tlbiel_pid(t->pid, RIC_FLUSH_ALL);
3662275d7b5SNicholas Piggin }
3672275d7b5SNicholas Piggin 
_tlbiel_pid_multicast(struct mm_struct * mm,unsigned long pid,unsigned long ric)3682275d7b5SNicholas Piggin static inline void _tlbiel_pid_multicast(struct mm_struct *mm,
3692275d7b5SNicholas Piggin 				unsigned long pid, unsigned long ric)
3702275d7b5SNicholas Piggin {
3712275d7b5SNicholas Piggin 	struct cpumask *cpus = mm_cpumask(mm);
3722275d7b5SNicholas Piggin 	struct tlbiel_pid t = { .pid = pid, .ric = ric };
3732275d7b5SNicholas Piggin 
3742275d7b5SNicholas Piggin 	on_each_cpu_mask(cpus, do_tlbiel_pid, &t, 1);
3752275d7b5SNicholas Piggin 	/*
3762275d7b5SNicholas Piggin 	 * Always want the CPU translations to be invalidated with tlbiel in
3772275d7b5SNicholas Piggin 	 * these paths, so while coprocessors must use tlbie, we can not
3782275d7b5SNicholas Piggin 	 * optimise away the tlbiel component.
3792275d7b5SNicholas Piggin 	 */
3802275d7b5SNicholas Piggin 	if (atomic_read(&mm->context.copros) > 0)
3812275d7b5SNicholas Piggin 		_tlbie_pid(pid, RIC_FLUSH_ALL);
3822275d7b5SNicholas Piggin }
3832275d7b5SNicholas Piggin 
_tlbie_lpid(unsigned long lpid,unsigned long ric)38447d99948SChristophe Leroy static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
38547d99948SChristophe Leroy {
38647d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
38747d99948SChristophe Leroy 
38847d99948SChristophe Leroy 	/*
38947d99948SChristophe Leroy 	 * Workaround the fact that the "ric" argument to __tlbie_pid
39047d99948SChristophe Leroy 	 * must be a compile-time contraint to match the "i" constraint
39147d99948SChristophe Leroy 	 * in the asm statement.
39247d99948SChristophe Leroy 	 */
39347d99948SChristophe Leroy 	switch (ric) {
39447d99948SChristophe Leroy 	case RIC_FLUSH_TLB:
39547d99948SChristophe Leroy 		__tlbie_lpid(lpid, RIC_FLUSH_TLB);
396047e6575SAneesh Kumar K.V 		fixup_tlbie_lpid(lpid);
39747d99948SChristophe Leroy 		break;
39847d99948SChristophe Leroy 	case RIC_FLUSH_PWC:
39947d99948SChristophe Leroy 		__tlbie_lpid(lpid, RIC_FLUSH_PWC);
40047d99948SChristophe Leroy 		break;
40147d99948SChristophe Leroy 	case RIC_FLUSH_ALL:
40247d99948SChristophe Leroy 	default:
40347d99948SChristophe Leroy 		__tlbie_lpid(lpid, RIC_FLUSH_ALL);
40447d99948SChristophe Leroy 		fixup_tlbie_lpid(lpid);
405047e6575SAneesh Kumar K.V 	}
40647d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
40747d99948SChristophe Leroy }
40847d99948SChristophe Leroy 
_tlbie_lpid_guest(unsigned long lpid,unsigned long ric)40999161de3SNicholas Piggin static __always_inline void _tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
41047d99948SChristophe Leroy {
41147d99948SChristophe Leroy 	/*
41299161de3SNicholas Piggin 	 * Workaround the fact that the "ric" argument to __tlbie_pid
41399161de3SNicholas Piggin 	 * must be a compile-time contraint to match the "i" constraint
41499161de3SNicholas Piggin 	 * in the asm statement.
41547d99948SChristophe Leroy 	 */
41699161de3SNicholas Piggin 	switch (ric) {
41799161de3SNicholas Piggin 	case RIC_FLUSH_TLB:
41899161de3SNicholas Piggin 		__tlbie_lpid_guest(lpid, RIC_FLUSH_TLB);
41999161de3SNicholas Piggin 		break;
42099161de3SNicholas Piggin 	case RIC_FLUSH_PWC:
42199161de3SNicholas Piggin 		__tlbie_lpid_guest(lpid, RIC_FLUSH_PWC);
42299161de3SNicholas Piggin 		break;
42399161de3SNicholas Piggin 	case RIC_FLUSH_ALL:
42499161de3SNicholas Piggin 	default:
42599161de3SNicholas Piggin 		__tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
42647d99948SChristophe Leroy 	}
42799161de3SNicholas Piggin 	fixup_tlbie_lpid(lpid);
42899161de3SNicholas Piggin 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
42947d99948SChristophe Leroy }
43047d99948SChristophe Leroy 
__tlbiel_va_range(unsigned long start,unsigned long end,unsigned long pid,unsigned long page_size,unsigned long psize)43147d99948SChristophe Leroy static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
43247d99948SChristophe Leroy 				    unsigned long pid, unsigned long page_size,
43347d99948SChristophe Leroy 				    unsigned long psize)
43447d99948SChristophe Leroy {
43547d99948SChristophe Leroy 	unsigned long addr;
43647d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
43747d99948SChristophe Leroy 
43847d99948SChristophe Leroy 	for (addr = start; addr < end; addr += page_size)
43947d99948SChristophe Leroy 		__tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
44047d99948SChristophe Leroy }
44147d99948SChristophe Leroy 
_tlbiel_va(unsigned long va,unsigned long pid,unsigned long psize,unsigned long ric)4426d3ca7e7SMasahiro Yamada static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
44347d99948SChristophe Leroy 				       unsigned long psize, unsigned long ric)
44447d99948SChristophe Leroy {
44547d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
44647d99948SChristophe Leroy 
44747d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
44847d99948SChristophe Leroy 	__tlbiel_va(va, pid, ap, ric);
44905504b42SNicholas Piggin 	ppc_after_tlbiel_barrier();
45047d99948SChristophe Leroy }
45147d99948SChristophe Leroy 
_tlbiel_va_range(unsigned long start,unsigned long end,unsigned long pid,unsigned long page_size,unsigned long psize,bool also_pwc)45247d99948SChristophe Leroy static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
45347d99948SChristophe Leroy 				    unsigned long pid, unsigned long page_size,
45447d99948SChristophe Leroy 				    unsigned long psize, bool also_pwc)
45547d99948SChristophe Leroy {
45647d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
45747d99948SChristophe Leroy 	if (also_pwc)
45847d99948SChristophe Leroy 		__tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
45947d99948SChristophe Leroy 	__tlbiel_va_range(start, end, pid, page_size, psize);
46005504b42SNicholas Piggin 	ppc_after_tlbiel_barrier();
46147d99948SChristophe Leroy }
46247d99948SChristophe Leroy 
__tlbie_va_range(unsigned long start,unsigned long end,unsigned long pid,unsigned long page_size,unsigned long psize)46347d99948SChristophe Leroy static inline void __tlbie_va_range(unsigned long start, unsigned long end,
46447d99948SChristophe Leroy 				    unsigned long pid, unsigned long page_size,
46547d99948SChristophe Leroy 				    unsigned long psize)
46647d99948SChristophe Leroy {
46747d99948SChristophe Leroy 	unsigned long addr;
46847d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
46947d99948SChristophe Leroy 
47047d99948SChristophe Leroy 	for (addr = start; addr < end; addr += page_size)
47147d99948SChristophe Leroy 		__tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
472047e6575SAneesh Kumar K.V 
473047e6575SAneesh Kumar K.V 	fixup_tlbie_va_range(addr - page_size, pid, ap);
47447d99948SChristophe Leroy }
47547d99948SChristophe Leroy 
_tlbie_va(unsigned long va,unsigned long pid,unsigned long psize,unsigned long ric)4766d3ca7e7SMasahiro Yamada static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
47747d99948SChristophe Leroy 				      unsigned long psize, unsigned long ric)
47847d99948SChristophe Leroy {
47947d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
48047d99948SChristophe Leroy 
48147d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
48247d99948SChristophe Leroy 	__tlbie_va(va, pid, ap, ric);
483047e6575SAneesh Kumar K.V 	fixup_tlbie_va(va, pid, ap);
48447d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
48547d99948SChristophe Leroy }
48647d99948SChristophe Leroy 
4872275d7b5SNicholas Piggin struct tlbiel_va {
4882275d7b5SNicholas Piggin 	unsigned long pid;
4892275d7b5SNicholas Piggin 	unsigned long va;
4902275d7b5SNicholas Piggin 	unsigned long psize;
4912275d7b5SNicholas Piggin 	unsigned long ric;
4922275d7b5SNicholas Piggin };
4932275d7b5SNicholas Piggin 
do_tlbiel_va(void * info)4942275d7b5SNicholas Piggin static void do_tlbiel_va(void *info)
4952275d7b5SNicholas Piggin {
4962275d7b5SNicholas Piggin 	struct tlbiel_va *t = info;
4972275d7b5SNicholas Piggin 
4982275d7b5SNicholas Piggin 	if (t->ric == RIC_FLUSH_TLB)
4992275d7b5SNicholas Piggin 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB);
5002275d7b5SNicholas Piggin 	else if (t->ric == RIC_FLUSH_PWC)
5012275d7b5SNicholas Piggin 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC);
5022275d7b5SNicholas Piggin 	else
5032275d7b5SNicholas Piggin 		_tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL);
5042275d7b5SNicholas Piggin }
5052275d7b5SNicholas Piggin 
_tlbiel_va_multicast(struct mm_struct * mm,unsigned long va,unsigned long pid,unsigned long psize,unsigned long ric)5062275d7b5SNicholas Piggin static inline void _tlbiel_va_multicast(struct mm_struct *mm,
5072275d7b5SNicholas Piggin 				unsigned long va, unsigned long pid,
5082275d7b5SNicholas Piggin 				unsigned long psize, unsigned long ric)
5092275d7b5SNicholas Piggin {
5102275d7b5SNicholas Piggin 	struct cpumask *cpus = mm_cpumask(mm);
5112275d7b5SNicholas Piggin 	struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric };
5122275d7b5SNicholas Piggin 	on_each_cpu_mask(cpus, do_tlbiel_va, &t, 1);
5132275d7b5SNicholas Piggin 	if (atomic_read(&mm->context.copros) > 0)
5142275d7b5SNicholas Piggin 		_tlbie_va(va, pid, psize, RIC_FLUSH_TLB);
5152275d7b5SNicholas Piggin }
5162275d7b5SNicholas Piggin 
5172275d7b5SNicholas Piggin struct tlbiel_va_range {
5182275d7b5SNicholas Piggin 	unsigned long pid;
5192275d7b5SNicholas Piggin 	unsigned long start;
5202275d7b5SNicholas Piggin 	unsigned long end;
5212275d7b5SNicholas Piggin 	unsigned long page_size;
5222275d7b5SNicholas Piggin 	unsigned long psize;
5232275d7b5SNicholas Piggin 	bool also_pwc;
5242275d7b5SNicholas Piggin };
5252275d7b5SNicholas Piggin 
do_tlbiel_va_range(void * info)5262275d7b5SNicholas Piggin static void do_tlbiel_va_range(void *info)
5272275d7b5SNicholas Piggin {
5282275d7b5SNicholas Piggin 	struct tlbiel_va_range *t = info;
5292275d7b5SNicholas Piggin 
5302275d7b5SNicholas Piggin 	_tlbiel_va_range(t->start, t->end, t->pid, t->page_size,
5312275d7b5SNicholas Piggin 				    t->psize, t->also_pwc);
5322275d7b5SNicholas Piggin }
5332275d7b5SNicholas Piggin 
_tlbie_lpid_va(unsigned long va,unsigned long lpid,unsigned long psize,unsigned long ric)5346d3ca7e7SMasahiro Yamada static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
53547d99948SChristophe Leroy 			      unsigned long psize, unsigned long ric)
53647d99948SChristophe Leroy {
53747d99948SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
53847d99948SChristophe Leroy 
53947d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
54047d99948SChristophe Leroy 	__tlbie_lpid_va(va, lpid, ap, ric);
541047e6575SAneesh Kumar K.V 	fixup_tlbie_lpid_va(va, lpid, ap);
54247d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
54347d99948SChristophe Leroy }
54447d99948SChristophe Leroy 
_tlbie_va_range(unsigned long start,unsigned long end,unsigned long pid,unsigned long page_size,unsigned long psize,bool also_pwc)54547d99948SChristophe Leroy static inline void _tlbie_va_range(unsigned long start, unsigned long end,
54647d99948SChristophe Leroy 				    unsigned long pid, unsigned long page_size,
54747d99948SChristophe Leroy 				    unsigned long psize, bool also_pwc)
54847d99948SChristophe Leroy {
54947d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
55047d99948SChristophe Leroy 	if (also_pwc)
55147d99948SChristophe Leroy 		__tlbie_pid(pid, RIC_FLUSH_PWC);
55247d99948SChristophe Leroy 	__tlbie_va_range(start, end, pid, page_size, psize);
55347d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
55447d99948SChristophe Leroy }
55547d99948SChristophe Leroy 
_tlbiel_va_range_multicast(struct mm_struct * mm,unsigned long start,unsigned long end,unsigned long pid,unsigned long page_size,unsigned long psize,bool also_pwc)5562275d7b5SNicholas Piggin static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
5572275d7b5SNicholas Piggin 				unsigned long start, unsigned long end,
5582275d7b5SNicholas Piggin 				unsigned long pid, unsigned long page_size,
5592275d7b5SNicholas Piggin 				unsigned long psize, bool also_pwc)
5602275d7b5SNicholas Piggin {
5612275d7b5SNicholas Piggin 	struct cpumask *cpus = mm_cpumask(mm);
5622275d7b5SNicholas Piggin 	struct tlbiel_va_range t = { .start = start, .end = end,
5632275d7b5SNicholas Piggin 				.pid = pid, .page_size = page_size,
5642275d7b5SNicholas Piggin 				.psize = psize, .also_pwc = also_pwc };
5652275d7b5SNicholas Piggin 
5662275d7b5SNicholas Piggin 	on_each_cpu_mask(cpus, do_tlbiel_va_range, &t, 1);
5672275d7b5SNicholas Piggin 	if (atomic_read(&mm->context.copros) > 0)
5682275d7b5SNicholas Piggin 		_tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
5692275d7b5SNicholas Piggin }
5702275d7b5SNicholas Piggin 
57147d99948SChristophe Leroy /*
57247d99948SChristophe Leroy  * Base TLB flushing operations:
57347d99948SChristophe Leroy  *
57447d99948SChristophe Leroy  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
57547d99948SChristophe Leroy  *  - flush_tlb_page(vma, vmaddr) flushes one page
57647d99948SChristophe Leroy  *  - flush_tlb_range(vma, start, end) flushes a range of pages
57747d99948SChristophe Leroy  *  - flush_tlb_kernel_range(start, end) flushes kernel pages
57847d99948SChristophe Leroy  *
57947d99948SChristophe Leroy  *  - local_* variants of page and mm only apply to the current
58047d99948SChristophe Leroy  *    processor
58147d99948SChristophe Leroy  */
radix__local_flush_tlb_mm(struct mm_struct * mm)58247d99948SChristophe Leroy void radix__local_flush_tlb_mm(struct mm_struct *mm)
58347d99948SChristophe Leroy {
584d01dc25eSNicholas Piggin 	unsigned long pid = mm->context.id;
585d01dc25eSNicholas Piggin 
586d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
587d01dc25eSNicholas Piggin 		return;
58847d99948SChristophe Leroy 
58947d99948SChristophe Leroy 	preempt_disable();
59047d99948SChristophe Leroy 	_tlbiel_pid(pid, RIC_FLUSH_TLB);
59147d99948SChristophe Leroy 	preempt_enable();
59247d99948SChristophe Leroy }
59347d99948SChristophe Leroy EXPORT_SYMBOL(radix__local_flush_tlb_mm);
59447d99948SChristophe Leroy 
59547d99948SChristophe Leroy #ifndef CONFIG_SMP
radix__local_flush_all_mm(struct mm_struct * mm)59647d99948SChristophe Leroy void radix__local_flush_all_mm(struct mm_struct *mm)
59747d99948SChristophe Leroy {
598d01dc25eSNicholas Piggin 	unsigned long pid = mm->context.id;
599d01dc25eSNicholas Piggin 
600d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
601d01dc25eSNicholas Piggin 		return;
60247d99948SChristophe Leroy 
60347d99948SChristophe Leroy 	preempt_disable();
60447d99948SChristophe Leroy 	_tlbiel_pid(pid, RIC_FLUSH_ALL);
60547d99948SChristophe Leroy 	preempt_enable();
60647d99948SChristophe Leroy }
60747d99948SChristophe Leroy EXPORT_SYMBOL(radix__local_flush_all_mm);
608993cfeccSNicholas Piggin 
__flush_all_mm(struct mm_struct * mm,bool fullmm)609993cfeccSNicholas Piggin static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
610993cfeccSNicholas Piggin {
611993cfeccSNicholas Piggin 	radix__local_flush_all_mm(mm);
612993cfeccSNicholas Piggin }
61347d99948SChristophe Leroy #endif /* CONFIG_SMP */
61447d99948SChristophe Leroy 
radix__local_flush_tlb_page_psize(struct mm_struct * mm,unsigned long vmaddr,int psize)61547d99948SChristophe Leroy void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
61647d99948SChristophe Leroy 				       int psize)
61747d99948SChristophe Leroy {
618d01dc25eSNicholas Piggin 	unsigned long pid = mm->context.id;
619d01dc25eSNicholas Piggin 
620d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
621d01dc25eSNicholas Piggin 		return;
62247d99948SChristophe Leroy 
62347d99948SChristophe Leroy 	preempt_disable();
62447d99948SChristophe Leroy 	_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
62547d99948SChristophe Leroy 	preempt_enable();
62647d99948SChristophe Leroy }
62747d99948SChristophe Leroy 
radix__local_flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)62847d99948SChristophe Leroy void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
62947d99948SChristophe Leroy {
63047d99948SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
63147d99948SChristophe Leroy 	/* need the return fix for nohash.c */
63247d99948SChristophe Leroy 	if (is_vm_hugetlb_page(vma))
63347d99948SChristophe Leroy 		return radix__local_flush_hugetlb_page(vma, vmaddr);
63447d99948SChristophe Leroy #endif
63547d99948SChristophe Leroy 	radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
63647d99948SChristophe Leroy }
63747d99948SChristophe Leroy EXPORT_SYMBOL(radix__local_flush_tlb_page);
63847d99948SChristophe Leroy 
mm_needs_flush_escalation(struct mm_struct * mm)63947d99948SChristophe Leroy static bool mm_needs_flush_escalation(struct mm_struct *mm)
64047d99948SChristophe Leroy {
64147d99948SChristophe Leroy 	/*
642abf0878cSNicholas Piggin 	 * The P9 nest MMU has issues with the page walk cache caching PTEs
643abf0878cSNicholas Piggin 	 * and not flushing them when RIC = 0 for a PID/LPID invalidate.
644abf0878cSNicholas Piggin 	 *
645abf0878cSNicholas Piggin 	 * This may have been fixed in shipping firmware (by disabling PWC
646abf0878cSNicholas Piggin 	 * or preventing it from caching PTEs), but until that is confirmed,
647abf0878cSNicholas Piggin 	 * this workaround is required - escalate all RIC=0 IS=1/2/3 flushes
648abf0878cSNicholas Piggin 	 * to RIC=2.
649abf0878cSNicholas Piggin 	 *
650abf0878cSNicholas Piggin 	 * POWER10 (and P9P) does not have this problem.
65147d99948SChristophe Leroy 	 */
652abf0878cSNicholas Piggin 	if (cpu_has_feature(CPU_FTR_ARCH_31))
653abf0878cSNicholas Piggin 		return false;
65447d99948SChristophe Leroy 	if (atomic_read(&mm->context.copros) > 0)
65547d99948SChristophe Leroy 		return true;
65647d99948SChristophe Leroy 	return false;
65747d99948SChristophe Leroy }
65847d99948SChristophe Leroy 
659032b7f08SNicholas Piggin /*
660032b7f08SNicholas Piggin  * If always_flush is true, then flush even if this CPU can't be removed
661032b7f08SNicholas Piggin  * from mm_cpumask.
662032b7f08SNicholas Piggin  */
exit_lazy_flush_tlb(struct mm_struct * mm,bool always_flush)663032b7f08SNicholas Piggin void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush)
66447d99948SChristophe Leroy {
66547d99948SChristophe Leroy 	unsigned long pid = mm->context.id;
666a2496049SNicholas Piggin 	int cpu = smp_processor_id();
66747d99948SChristophe Leroy 
668a665eec0SNicholas Piggin 	/*
669a665eec0SNicholas Piggin 	 * A kthread could have done a mmget_not_zero() after the flushing CPU
670a2496049SNicholas Piggin 	 * checked mm_cpumask, and be in the process of kthread_use_mm when
671a2496049SNicholas Piggin 	 * interrupted here. In that case, current->mm will be set to mm,
672a2496049SNicholas Piggin 	 * because kthread_use_mm() setting ->mm and switching to the mm is
673a2496049SNicholas Piggin 	 * done with interrupts off.
674a665eec0SNicholas Piggin 	 */
67547d99948SChristophe Leroy 	if (current->mm == mm)
676032b7f08SNicholas Piggin 		goto out;
67747d99948SChristophe Leroy 
67847d99948SChristophe Leroy 	if (current->active_mm == mm) {
679dfaed3e1SNicholas Piggin 		unsigned long flags;
680dfaed3e1SNicholas Piggin 
681a665eec0SNicholas Piggin 		WARN_ON_ONCE(current->mm != NULL);
682dfaed3e1SNicholas Piggin 		/*
683dfaed3e1SNicholas Piggin 		 * It is a kernel thread and is using mm as the lazy tlb, so
684dfaed3e1SNicholas Piggin 		 * switch it to init_mm. This is not always called from IPI
685dfaed3e1SNicholas Piggin 		 * (e.g., flush_type_needed), so must disable irqs.
686dfaed3e1SNicholas Piggin 		 */
687dfaed3e1SNicholas Piggin 		local_irq_save(flags);
688aa464ba9SNicholas Piggin 		mmgrab_lazy_tlb(&init_mm);
68947d99948SChristophe Leroy 		current->active_mm = &init_mm;
690a665eec0SNicholas Piggin 		switch_mm_irqs_off(mm, &init_mm, current);
691aa464ba9SNicholas Piggin 		mmdrop_lazy_tlb(mm);
692dfaed3e1SNicholas Piggin 		local_irq_restore(flags);
69347d99948SChristophe Leroy 	}
694a665eec0SNicholas Piggin 
695a2496049SNicholas Piggin 	/*
696780de406SNicholas Piggin 	 * This IPI may be initiated from any source including those not
697780de406SNicholas Piggin 	 * running the mm, so there may be a racing IPI that comes after
698780de406SNicholas Piggin 	 * this one which finds the cpumask already clear. Check and avoid
699780de406SNicholas Piggin 	 * underflowing the active_cpus count in that case. The race should
700780de406SNicholas Piggin 	 * not otherwise be a problem, but the TLB must be flushed because
701780de406SNicholas Piggin 	 * that's what the caller expects.
702a2496049SNicholas Piggin 	 */
703a2496049SNicholas Piggin 	if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
704f74b2a6cSNicholas Piggin 		dec_mm_active_cpus(mm);
705a2496049SNicholas Piggin 		cpumask_clear_cpu(cpu, mm_cpumask(mm));
706032b7f08SNicholas Piggin 		always_flush = true;
707a2496049SNicholas Piggin 	}
708a665eec0SNicholas Piggin 
709032b7f08SNicholas Piggin out:
710032b7f08SNicholas Piggin 	if (always_flush)
71147d99948SChristophe Leroy 		_tlbiel_pid(pid, RIC_FLUSH_ALL);
71247d99948SChristophe Leroy }
71347d99948SChristophe Leroy 
71493935448SNicholas Piggin #ifdef CONFIG_SMP
do_exit_flush_lazy_tlb(void * arg)71593935448SNicholas Piggin static void do_exit_flush_lazy_tlb(void *arg)
71693935448SNicholas Piggin {
71793935448SNicholas Piggin 	struct mm_struct *mm = arg;
718032b7f08SNicholas Piggin 	exit_lazy_flush_tlb(mm, true);
71993935448SNicholas Piggin }
72093935448SNicholas Piggin 
exit_flush_lazy_tlbs(struct mm_struct * mm)72147d99948SChristophe Leroy static void exit_flush_lazy_tlbs(struct mm_struct *mm)
72247d99948SChristophe Leroy {
72347d99948SChristophe Leroy 	/*
72447d99948SChristophe Leroy 	 * Would be nice if this was async so it could be run in
72547d99948SChristophe Leroy 	 * parallel with our local flush, but generic code does not
72647d99948SChristophe Leroy 	 * give a good API for it. Could extend the generic code or
72747d99948SChristophe Leroy 	 * make a special powerpc IPI for flushing TLBs.
72847d99948SChristophe Leroy 	 * For now it's not too performance critical.
72947d99948SChristophe Leroy 	 */
73047d99948SChristophe Leroy 	smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
73147d99948SChristophe Leroy 				(void *)mm, 1);
73247d99948SChristophe Leroy }
73393935448SNicholas Piggin 
73426418b36SNicholas Piggin #else /* CONFIG_SMP */
exit_flush_lazy_tlbs(struct mm_struct * mm)73526418b36SNicholas Piggin static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { }
73626418b36SNicholas Piggin #endif /* CONFIG_SMP */
73747d99948SChristophe Leroy 
73893935448SNicholas Piggin static DEFINE_PER_CPU(unsigned int, mm_cpumask_trim_clock);
73993935448SNicholas Piggin 
74093935448SNicholas Piggin /*
74193935448SNicholas Piggin  * Interval between flushes at which we send out IPIs to check whether the
74293935448SNicholas Piggin  * mm_cpumask can be trimmed for the case where it's not a single-threaded
74393935448SNicholas Piggin  * process flushing its own mm. The intent is to reduce the cost of later
74493935448SNicholas Piggin  * flushes. Don't want this to be so low that it adds noticable cost to TLB
74593935448SNicholas Piggin  * flushing, or so high that it doesn't help reduce global TLBIEs.
74693935448SNicholas Piggin  */
74793935448SNicholas Piggin static unsigned long tlb_mm_cpumask_trim_timer = 1073;
74893935448SNicholas Piggin 
tick_and_test_trim_clock(void)74993935448SNicholas Piggin static bool tick_and_test_trim_clock(void)
75093935448SNicholas Piggin {
75193935448SNicholas Piggin 	if (__this_cpu_inc_return(mm_cpumask_trim_clock) ==
75293935448SNicholas Piggin 			tlb_mm_cpumask_trim_timer) {
75393935448SNicholas Piggin 		__this_cpu_write(mm_cpumask_trim_clock, 0);
75493935448SNicholas Piggin 		return true;
75593935448SNicholas Piggin 	}
75693935448SNicholas Piggin 	return false;
75793935448SNicholas Piggin }
75893935448SNicholas Piggin 
75926418b36SNicholas Piggin enum tlb_flush_type {
76054bb5033SNicholas Piggin 	FLUSH_TYPE_NONE,
76126418b36SNicholas Piggin 	FLUSH_TYPE_LOCAL,
76226418b36SNicholas Piggin 	FLUSH_TYPE_GLOBAL,
76326418b36SNicholas Piggin };
76426418b36SNicholas Piggin 
flush_type_needed(struct mm_struct * mm,bool fullmm)76526418b36SNicholas Piggin static enum tlb_flush_type flush_type_needed(struct mm_struct *mm, bool fullmm)
76626418b36SNicholas Piggin {
76754bb5033SNicholas Piggin 	int active_cpus = atomic_read(&mm->context.active_cpus);
76854bb5033SNicholas Piggin 	int cpu = smp_processor_id();
76954bb5033SNicholas Piggin 
77054bb5033SNicholas Piggin 	if (active_cpus == 0)
77154bb5033SNicholas Piggin 		return FLUSH_TYPE_NONE;
77293935448SNicholas Piggin 	if (active_cpus == 1 && cpumask_test_cpu(cpu, mm_cpumask(mm))) {
77393935448SNicholas Piggin 		if (current->mm != mm) {
77493935448SNicholas Piggin 			/*
77593935448SNicholas Piggin 			 * Asynchronous flush sources may trim down to nothing
77693935448SNicholas Piggin 			 * if the process is not running, so occasionally try
77793935448SNicholas Piggin 			 * to trim.
77893935448SNicholas Piggin 			 */
77993935448SNicholas Piggin 			if (tick_and_test_trim_clock()) {
780032b7f08SNicholas Piggin 				exit_lazy_flush_tlb(mm, true);
78193935448SNicholas Piggin 				return FLUSH_TYPE_NONE;
78293935448SNicholas Piggin 			}
78393935448SNicholas Piggin 		}
78426418b36SNicholas Piggin 		return FLUSH_TYPE_LOCAL;
78593935448SNicholas Piggin 	}
78626418b36SNicholas Piggin 
78726418b36SNicholas Piggin 	/* Coprocessors require TLBIE to invalidate nMMU. */
78826418b36SNicholas Piggin 	if (atomic_read(&mm->context.copros) > 0)
78926418b36SNicholas Piggin 		return FLUSH_TYPE_GLOBAL;
79026418b36SNicholas Piggin 
79126418b36SNicholas Piggin 	/*
79226418b36SNicholas Piggin 	 * In the fullmm case there's no point doing the exit_flush_lazy_tlbs
79326418b36SNicholas Piggin 	 * because the mm is being taken down anyway, and a TLBIE tends to
79426418b36SNicholas Piggin 	 * be faster than an IPI+TLBIEL.
79526418b36SNicholas Piggin 	 */
79626418b36SNicholas Piggin 	if (fullmm)
79726418b36SNicholas Piggin 		return FLUSH_TYPE_GLOBAL;
79826418b36SNicholas Piggin 
79926418b36SNicholas Piggin 	/*
80026418b36SNicholas Piggin 	 * If we are running the only thread of a single-threaded process,
80126418b36SNicholas Piggin 	 * then we should almost always be able to trim off the rest of the
80226418b36SNicholas Piggin 	 * CPU mask (except in the case of use_mm() races), so always try
80326418b36SNicholas Piggin 	 * trimming the mask.
80426418b36SNicholas Piggin 	 */
80526418b36SNicholas Piggin 	if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) {
80626418b36SNicholas Piggin 		exit_flush_lazy_tlbs(mm);
80726418b36SNicholas Piggin 		/*
80826418b36SNicholas Piggin 		 * use_mm() race could prevent IPIs from being able to clear
80926418b36SNicholas Piggin 		 * the cpumask here, however those users are established
81026418b36SNicholas Piggin 		 * after our first check (and so after the PTEs are removed),
81126418b36SNicholas Piggin 		 * and the TLB still gets flushed by the IPI, so this CPU
81226418b36SNicholas Piggin 		 * will only require a local flush.
81326418b36SNicholas Piggin 		 */
81426418b36SNicholas Piggin 		return FLUSH_TYPE_LOCAL;
81526418b36SNicholas Piggin 	}
81626418b36SNicholas Piggin 
81793935448SNicholas Piggin 	/*
81893935448SNicholas Piggin 	 * Occasionally try to trim down the cpumask. It's possible this can
81993935448SNicholas Piggin 	 * bring the mask to zero, which results in no flush.
82093935448SNicholas Piggin 	 */
82193935448SNicholas Piggin 	if (tick_and_test_trim_clock()) {
82293935448SNicholas Piggin 		exit_flush_lazy_tlbs(mm);
82393935448SNicholas Piggin 		if (current->mm == mm)
82493935448SNicholas Piggin 			return FLUSH_TYPE_LOCAL;
82593935448SNicholas Piggin 		if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
826032b7f08SNicholas Piggin 			exit_lazy_flush_tlb(mm, true);
82793935448SNicholas Piggin 		return FLUSH_TYPE_NONE;
82893935448SNicholas Piggin 	}
82993935448SNicholas Piggin 
83026418b36SNicholas Piggin 	return FLUSH_TYPE_GLOBAL;
83126418b36SNicholas Piggin }
83226418b36SNicholas Piggin 
83326418b36SNicholas Piggin #ifdef CONFIG_SMP
radix__flush_tlb_mm(struct mm_struct * mm)83447d99948SChristophe Leroy void radix__flush_tlb_mm(struct mm_struct *mm)
83547d99948SChristophe Leroy {
83647d99948SChristophe Leroy 	unsigned long pid;
83726418b36SNicholas Piggin 	enum tlb_flush_type type;
83847d99948SChristophe Leroy 
83947d99948SChristophe Leroy 	pid = mm->context.id;
840d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
84147d99948SChristophe Leroy 		return;
84247d99948SChristophe Leroy 
84347d99948SChristophe Leroy 	preempt_disable();
84447d99948SChristophe Leroy 	/*
84526418b36SNicholas Piggin 	 * Order loads of mm_cpumask (in flush_type_needed) vs previous
84626418b36SNicholas Piggin 	 * stores to clear ptes before the invalidate. See barrier in
84726418b36SNicholas Piggin 	 * switch_mm_irqs_off
84847d99948SChristophe Leroy 	 */
84947d99948SChristophe Leroy 	smp_mb();
85026418b36SNicholas Piggin 	type = flush_type_needed(mm, false);
85154bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_LOCAL) {
85254bb5033SNicholas Piggin 		_tlbiel_pid(pid, RIC_FLUSH_TLB);
85354bb5033SNicholas Piggin 	} else if (type == FLUSH_TYPE_GLOBAL) {
854dd3d9aa5SNicholas Piggin 		if (!mmu_has_feature(MMU_FTR_GTSE)) {
855dd3d9aa5SNicholas Piggin 			unsigned long tgt = H_RPTI_TARGET_CMMU;
856dd3d9aa5SNicholas Piggin 
857dd3d9aa5SNicholas Piggin 			if (atomic_read(&mm->context.copros) > 0)
858dd3d9aa5SNicholas Piggin 				tgt |= H_RPTI_TARGET_NMMU;
859dd3d9aa5SNicholas Piggin 			pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
860dd3d9aa5SNicholas Piggin 					       H_RPTI_PAGE_ALL, 0, -1UL);
861dd3d9aa5SNicholas Piggin 		} else if (cputlb_use_tlbie()) {
86247d99948SChristophe Leroy 			if (mm_needs_flush_escalation(mm))
86347d99948SChristophe Leroy 				_tlbie_pid(pid, RIC_FLUSH_ALL);
86447d99948SChristophe Leroy 			else
86547d99948SChristophe Leroy 				_tlbie_pid(pid, RIC_FLUSH_TLB);
86647d99948SChristophe Leroy 		} else {
8672275d7b5SNicholas Piggin 			_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
8682275d7b5SNicholas Piggin 		}
86947d99948SChristophe Leroy 	}
87047d99948SChristophe Leroy 	preempt_enable();
8711af5a810SAlistair Popple 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
87247d99948SChristophe Leroy }
87347d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_tlb_mm);
87447d99948SChristophe Leroy 
__flush_all_mm(struct mm_struct * mm,bool fullmm)87547d99948SChristophe Leroy static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
87647d99948SChristophe Leroy {
87747d99948SChristophe Leroy 	unsigned long pid;
87826418b36SNicholas Piggin 	enum tlb_flush_type type;
87947d99948SChristophe Leroy 
88047d99948SChristophe Leroy 	pid = mm->context.id;
881d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
88247d99948SChristophe Leroy 		return;
88347d99948SChristophe Leroy 
88447d99948SChristophe Leroy 	preempt_disable();
88547d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
88626418b36SNicholas Piggin 	type = flush_type_needed(mm, fullmm);
88754bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_LOCAL) {
88854bb5033SNicholas Piggin 		_tlbiel_pid(pid, RIC_FLUSH_ALL);
88954bb5033SNicholas Piggin 	} else if (type == FLUSH_TYPE_GLOBAL) {
890dd3d9aa5SNicholas Piggin 		if (!mmu_has_feature(MMU_FTR_GTSE)) {
891dd3d9aa5SNicholas Piggin 			unsigned long tgt = H_RPTI_TARGET_CMMU;
892dd3d9aa5SNicholas Piggin 			unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
893dd3d9aa5SNicholas Piggin 					     H_RPTI_TYPE_PRT;
894dd3d9aa5SNicholas Piggin 
895dd3d9aa5SNicholas Piggin 			if (atomic_read(&mm->context.copros) > 0)
896dd3d9aa5SNicholas Piggin 				tgt |= H_RPTI_TARGET_NMMU;
897dd3d9aa5SNicholas Piggin 			pseries_rpt_invalidate(pid, tgt, type,
898dd3d9aa5SNicholas Piggin 					       H_RPTI_PAGE_ALL, 0, -1UL);
899dd3d9aa5SNicholas Piggin 		} else if (cputlb_use_tlbie())
90047d99948SChristophe Leroy 			_tlbie_pid(pid, RIC_FLUSH_ALL);
9012275d7b5SNicholas Piggin 		else
9022275d7b5SNicholas Piggin 			_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
90347d99948SChristophe Leroy 	}
90447d99948SChristophe Leroy 	preempt_enable();
9051af5a810SAlistair Popple 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
90647d99948SChristophe Leroy }
90752162ec7SAneesh Kumar K.V 
radix__flush_all_mm(struct mm_struct * mm)90847d99948SChristophe Leroy void radix__flush_all_mm(struct mm_struct *mm)
90947d99948SChristophe Leroy {
91047d99948SChristophe Leroy 	__flush_all_mm(mm, false);
91147d99948SChristophe Leroy }
91247d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_all_mm);
91347d99948SChristophe Leroy 
radix__flush_tlb_page_psize(struct mm_struct * mm,unsigned long vmaddr,int psize)91447d99948SChristophe Leroy void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
91547d99948SChristophe Leroy 				 int psize)
91647d99948SChristophe Leroy {
91747d99948SChristophe Leroy 	unsigned long pid;
91826418b36SNicholas Piggin 	enum tlb_flush_type type;
91947d99948SChristophe Leroy 
92047d99948SChristophe Leroy 	pid = mm->context.id;
921d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
92247d99948SChristophe Leroy 		return;
92347d99948SChristophe Leroy 
92447d99948SChristophe Leroy 	preempt_disable();
92547d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
92626418b36SNicholas Piggin 	type = flush_type_needed(mm, false);
92754bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_LOCAL) {
92854bb5033SNicholas Piggin 		_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
92954bb5033SNicholas Piggin 	} else if (type == FLUSH_TYPE_GLOBAL) {
930dd3d9aa5SNicholas Piggin 		if (!mmu_has_feature(MMU_FTR_GTSE)) {
931dd3d9aa5SNicholas Piggin 			unsigned long tgt, pg_sizes, size;
932dd3d9aa5SNicholas Piggin 
933dd3d9aa5SNicholas Piggin 			tgt = H_RPTI_TARGET_CMMU;
934dd3d9aa5SNicholas Piggin 			pg_sizes = psize_to_rpti_pgsize(psize);
935dd3d9aa5SNicholas Piggin 			size = 1UL << mmu_psize_to_shift(psize);
936dd3d9aa5SNicholas Piggin 
937dd3d9aa5SNicholas Piggin 			if (atomic_read(&mm->context.copros) > 0)
938dd3d9aa5SNicholas Piggin 				tgt |= H_RPTI_TARGET_NMMU;
939dd3d9aa5SNicholas Piggin 			pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
940dd3d9aa5SNicholas Piggin 					       pg_sizes, vmaddr,
941dd3d9aa5SNicholas Piggin 					       vmaddr + size);
942dd3d9aa5SNicholas Piggin 		} else if (cputlb_use_tlbie())
94347d99948SChristophe Leroy 			_tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
9442275d7b5SNicholas Piggin 		else
9452275d7b5SNicholas Piggin 			_tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB);
94647d99948SChristophe Leroy 	}
94747d99948SChristophe Leroy 	preempt_enable();
94847d99948SChristophe Leroy }
94947d99948SChristophe Leroy 
radix__flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)95047d99948SChristophe Leroy void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
95147d99948SChristophe Leroy {
95247d99948SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
95347d99948SChristophe Leroy 	if (is_vm_hugetlb_page(vma))
95447d99948SChristophe Leroy 		return radix__flush_hugetlb_page(vma, vmaddr);
95547d99948SChristophe Leroy #endif
95647d99948SChristophe Leroy 	radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
95747d99948SChristophe Leroy }
95847d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_tlb_page);
95947d99948SChristophe Leroy 
96047d99948SChristophe Leroy #endif /* CONFIG_SMP */
96147d99948SChristophe Leroy 
do_tlbiel_kernel(void * info)9622275d7b5SNicholas Piggin static void do_tlbiel_kernel(void *info)
9632275d7b5SNicholas Piggin {
9642275d7b5SNicholas Piggin 	_tlbiel_pid(0, RIC_FLUSH_ALL);
9652275d7b5SNicholas Piggin }
9662275d7b5SNicholas Piggin 
_tlbiel_kernel_broadcast(void)9672275d7b5SNicholas Piggin static inline void _tlbiel_kernel_broadcast(void)
9682275d7b5SNicholas Piggin {
9692275d7b5SNicholas Piggin 	on_each_cpu(do_tlbiel_kernel, NULL, 1);
9702275d7b5SNicholas Piggin 	if (tlbie_capable) {
9712275d7b5SNicholas Piggin 		/*
9722275d7b5SNicholas Piggin 		 * Coherent accelerators don't refcount kernel memory mappings,
9732275d7b5SNicholas Piggin 		 * so have to always issue a tlbie for them. This is quite a
9742275d7b5SNicholas Piggin 		 * slow path anyway.
9752275d7b5SNicholas Piggin 		 */
9762275d7b5SNicholas Piggin 		_tlbie_pid(0, RIC_FLUSH_ALL);
9772275d7b5SNicholas Piggin 	}
9782275d7b5SNicholas Piggin }
9792275d7b5SNicholas Piggin 
98060e8523eSAlastair D'Silva /*
98160e8523eSAlastair D'Silva  * If kernel TLBIs ever become local rather than global, then
98260e8523eSAlastair D'Silva  * drivers/misc/ocxl/link.c:ocxl_link_add_pe will need some work, as it
98360e8523eSAlastair D'Silva  * assumes kernel TLBIs are global.
98460e8523eSAlastair D'Silva  */
radix__flush_tlb_kernel_range(unsigned long start,unsigned long end)98547d99948SChristophe Leroy void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
98647d99948SChristophe Leroy {
987dd3d9aa5SNicholas Piggin 	if (!mmu_has_feature(MMU_FTR_GTSE)) {
988dd3d9aa5SNicholas Piggin 		unsigned long tgt = H_RPTI_TARGET_CMMU | H_RPTI_TARGET_NMMU;
989dd3d9aa5SNicholas Piggin 		unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
990dd3d9aa5SNicholas Piggin 				     H_RPTI_TYPE_PRT;
991dd3d9aa5SNicholas Piggin 
992dd3d9aa5SNicholas Piggin 		pseries_rpt_invalidate(0, tgt, type, H_RPTI_PAGE_ALL,
993dd3d9aa5SNicholas Piggin 				       start, end);
994dd3d9aa5SNicholas Piggin 	} else if (cputlb_use_tlbie())
99547d99948SChristophe Leroy 		_tlbie_pid(0, RIC_FLUSH_ALL);
9962275d7b5SNicholas Piggin 	else
9972275d7b5SNicholas Piggin 		_tlbiel_kernel_broadcast();
99847d99948SChristophe Leroy }
99947d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
100047d99948SChristophe Leroy 
1001dcfecb98SNicholas Piggin /*
1002dcfecb98SNicholas Piggin  * Doesn't appear to be used anywhere. Remove.
1003dcfecb98SNicholas Piggin  */
100447d99948SChristophe Leroy #define TLB_FLUSH_ALL -1UL
100547d99948SChristophe Leroy 
100647d99948SChristophe Leroy /*
100747d99948SChristophe Leroy  * Number of pages above which we invalidate the entire PID rather than
100847d99948SChristophe Leroy  * flush individual pages, for local and global flushes respectively.
100947d99948SChristophe Leroy  *
101047d99948SChristophe Leroy  * tlbie goes out to the interconnect and individual ops are more costly.
101147d99948SChristophe Leroy  * It also does not iterate over sets like the local tlbiel variant when
101247d99948SChristophe Leroy  * invalidating a full PID, so it has a far lower threshold to change from
101347d99948SChristophe Leroy  * individual page flushes to full-pid flushes.
101447d99948SChristophe Leroy  */
10153e188b1aSAneesh Kumar K.V static u32 tlb_single_page_flush_ceiling __read_mostly = 33;
10163e188b1aSAneesh Kumar K.V static u32 tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
101747d99948SChristophe Leroy 
__radix__flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)101847d99948SChristophe Leroy static inline void __radix__flush_tlb_range(struct mm_struct *mm,
1019a42d6ba8SAneesh Kumar K.V 					    unsigned long start, unsigned long end)
102047d99948SChristophe Leroy {
102147d99948SChristophe Leroy 	unsigned long pid;
102247d99948SChristophe Leroy 	unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
102347d99948SChristophe Leroy 	unsigned long page_size = 1UL << page_shift;
102447d99948SChristophe Leroy 	unsigned long nr_pages = (end - start) >> page_shift;
1025cec6515aSAneesh Kumar K.V 	bool flush_pid, flush_pwc = false;
102626418b36SNicholas Piggin 	enum tlb_flush_type type;
102747d99948SChristophe Leroy 
102847d99948SChristophe Leroy 	pid = mm->context.id;
1029d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
103047d99948SChristophe Leroy 		return;
103147d99948SChristophe Leroy 
1032dcfecb98SNicholas Piggin 	WARN_ON_ONCE(end == TLB_FLUSH_ALL);
1033dcfecb98SNicholas Piggin 
103447d99948SChristophe Leroy 	preempt_disable();
103547d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
1036dcfecb98SNicholas Piggin 	type = flush_type_needed(mm, false);
103754bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_NONE)
103854bb5033SNicholas Piggin 		goto out;
103947d99948SChristophe Leroy 
1040dcfecb98SNicholas Piggin 	if (type == FLUSH_TYPE_GLOBAL)
104126418b36SNicholas Piggin 		flush_pid = nr_pages > tlb_single_page_flush_ceiling;
104226418b36SNicholas Piggin 	else
104326418b36SNicholas Piggin 		flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
1044cec6515aSAneesh Kumar K.V 	/*
1045cec6515aSAneesh Kumar K.V 	 * full pid flush already does the PWC flush. if it is not full pid
1046cec6515aSAneesh Kumar K.V 	 * flush check the range is more than PMD and force a pwc flush
1047cec6515aSAneesh Kumar K.V 	 * mremap() depends on this behaviour.
1048cec6515aSAneesh Kumar K.V 	 */
1049cec6515aSAneesh Kumar K.V 	if (!flush_pid && (end - start) >= PMD_SIZE)
1050cec6515aSAneesh Kumar K.V 		flush_pwc = true;
105126418b36SNicholas Piggin 
105226418b36SNicholas Piggin 	if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
1053cec6515aSAneesh Kumar K.V 		unsigned long type = H_RPTI_TYPE_TLB;
1054dd3d9aa5SNicholas Piggin 		unsigned long tgt = H_RPTI_TARGET_CMMU;
1055dd3d9aa5SNicholas Piggin 		unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
1056dd3d9aa5SNicholas Piggin 
1057dd3d9aa5SNicholas Piggin 		if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1058dd3d9aa5SNicholas Piggin 			pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M);
1059dd3d9aa5SNicholas Piggin 		if (atomic_read(&mm->context.copros) > 0)
1060dd3d9aa5SNicholas Piggin 			tgt |= H_RPTI_TARGET_NMMU;
1061cec6515aSAneesh Kumar K.V 		if (flush_pwc)
1062cec6515aSAneesh Kumar K.V 			type |= H_RPTI_TYPE_PWC;
1063cec6515aSAneesh Kumar K.V 		pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
106426418b36SNicholas Piggin 	} else if (flush_pid) {
1065cec6515aSAneesh Kumar K.V 		/*
1066cec6515aSAneesh Kumar K.V 		 * We are now flushing a range larger than PMD size force a RIC_FLUSH_ALL
1067cec6515aSAneesh Kumar K.V 		 */
106826418b36SNicholas Piggin 		if (type == FLUSH_TYPE_LOCAL) {
1069cec6515aSAneesh Kumar K.V 			_tlbiel_pid(pid, RIC_FLUSH_ALL);
107047d99948SChristophe Leroy 		} else {
10712275d7b5SNicholas Piggin 			if (cputlb_use_tlbie()) {
107247d99948SChristophe Leroy 				_tlbie_pid(pid, RIC_FLUSH_ALL);
10732275d7b5SNicholas Piggin 			} else {
1074cec6515aSAneesh Kumar K.V 				_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
10752275d7b5SNicholas Piggin 			}
107647d99948SChristophe Leroy 		}
107747d99948SChristophe Leroy 	} else {
1078d78c8e32SAnders Roxell 		bool hflush;
107947d99948SChristophe Leroy 		unsigned long hstart, hend;
108047d99948SChristophe Leroy 
108147d99948SChristophe Leroy 		hstart = (start + PMD_SIZE - 1) & PMD_MASK;
108247d99948SChristophe Leroy 		hend = end & PMD_MASK;
1083d78c8e32SAnders Roxell 		hflush = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hstart < hend;
108447d99948SChristophe Leroy 
108526418b36SNicholas Piggin 		if (type == FLUSH_TYPE_LOCAL) {
10862275d7b5SNicholas Piggin 			asm volatile("ptesync": : :"memory");
1087cec6515aSAneesh Kumar K.V 			if (flush_pwc)
1088cec6515aSAneesh Kumar K.V 				/* For PWC, only one flush is needed */
1089cec6515aSAneesh Kumar K.V 				__tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
109047d99948SChristophe Leroy 			__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
109147d99948SChristophe Leroy 			if (hflush)
109247d99948SChristophe Leroy 				__tlbiel_va_range(hstart, hend, pid,
109347d99948SChristophe Leroy 						PMD_SIZE, MMU_PAGE_2M);
109405504b42SNicholas Piggin 			ppc_after_tlbiel_barrier();
10952275d7b5SNicholas Piggin 		} else if (cputlb_use_tlbie()) {
10962275d7b5SNicholas Piggin 			asm volatile("ptesync": : :"memory");
1097cec6515aSAneesh Kumar K.V 			if (flush_pwc)
1098cec6515aSAneesh Kumar K.V 				__tlbie_pid(pid, RIC_FLUSH_PWC);
109947d99948SChristophe Leroy 			__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
110047d99948SChristophe Leroy 			if (hflush)
110147d99948SChristophe Leroy 				__tlbie_va_range(hstart, hend, pid,
110247d99948SChristophe Leroy 						PMD_SIZE, MMU_PAGE_2M);
110347d99948SChristophe Leroy 			asm volatile("eieio; tlbsync; ptesync": : :"memory");
11042275d7b5SNicholas Piggin 		} else {
11052275d7b5SNicholas Piggin 			_tlbiel_va_range_multicast(mm,
1106cec6515aSAneesh Kumar K.V 					start, end, pid, page_size, mmu_virtual_psize, flush_pwc);
11072275d7b5SNicholas Piggin 			if (hflush)
11082275d7b5SNicholas Piggin 				_tlbiel_va_range_multicast(mm,
1109cec6515aSAneesh Kumar K.V 					hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, flush_pwc);
111047d99948SChristophe Leroy 		}
111147d99948SChristophe Leroy 	}
111254bb5033SNicholas Piggin out:
111347d99948SChristophe Leroy 	preempt_enable();
11141af5a810SAlistair Popple 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
111547d99948SChristophe Leroy }
111647d99948SChristophe Leroy 
radix__flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)111747d99948SChristophe Leroy void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
111847d99948SChristophe Leroy 		     unsigned long end)
111947d99948SChristophe Leroy 
112047d99948SChristophe Leroy {
112147d99948SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE
112247d99948SChristophe Leroy 	if (is_vm_hugetlb_page(vma))
112347d99948SChristophe Leroy 		return radix__flush_hugetlb_tlb_range(vma, start, end);
112447d99948SChristophe Leroy #endif
112547d99948SChristophe Leroy 
1126a42d6ba8SAneesh Kumar K.V 	__radix__flush_tlb_range(vma->vm_mm, start, end);
112747d99948SChristophe Leroy }
112847d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_tlb_range);
112947d99948SChristophe Leroy 
radix_get_mmu_psize(int page_size)113047d99948SChristophe Leroy static int radix_get_mmu_psize(int page_size)
113147d99948SChristophe Leroy {
113247d99948SChristophe Leroy 	int psize;
113347d99948SChristophe Leroy 
113447d99948SChristophe Leroy 	if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
113547d99948SChristophe Leroy 		psize = mmu_virtual_psize;
113647d99948SChristophe Leroy 	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
113747d99948SChristophe Leroy 		psize = MMU_PAGE_2M;
113847d99948SChristophe Leroy 	else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
113947d99948SChristophe Leroy 		psize = MMU_PAGE_1G;
114047d99948SChristophe Leroy 	else
114147d99948SChristophe Leroy 		return -1;
114247d99948SChristophe Leroy 	return psize;
114347d99948SChristophe Leroy }
114447d99948SChristophe Leroy 
114547d99948SChristophe Leroy /*
114647d99948SChristophe Leroy  * Flush partition scoped LPID address translation for all CPUs.
114747d99948SChristophe Leroy  */
radix__flush_tlb_lpid_page(unsigned int lpid,unsigned long addr,unsigned long page_size)114847d99948SChristophe Leroy void radix__flush_tlb_lpid_page(unsigned int lpid,
114947d99948SChristophe Leroy 					unsigned long addr,
115047d99948SChristophe Leroy 					unsigned long page_size)
115147d99948SChristophe Leroy {
115247d99948SChristophe Leroy 	int psize = radix_get_mmu_psize(page_size);
115347d99948SChristophe Leroy 
115447d99948SChristophe Leroy 	_tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB);
115547d99948SChristophe Leroy }
115647d99948SChristophe Leroy EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page);
115747d99948SChristophe Leroy 
115847d99948SChristophe Leroy /*
115947d99948SChristophe Leroy  * Flush partition scoped PWC from LPID for all CPUs.
116047d99948SChristophe Leroy  */
radix__flush_pwc_lpid(unsigned int lpid)116147d99948SChristophe Leroy void radix__flush_pwc_lpid(unsigned int lpid)
116247d99948SChristophe Leroy {
116347d99948SChristophe Leroy 	_tlbie_lpid(lpid, RIC_FLUSH_PWC);
116447d99948SChristophe Leroy }
116547d99948SChristophe Leroy EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
116647d99948SChristophe Leroy 
116747d99948SChristophe Leroy /*
116847d99948SChristophe Leroy  * Flush partition scoped translations from LPID (=LPIDR)
116947d99948SChristophe Leroy  */
radix__flush_all_lpid(unsigned int lpid)117099161de3SNicholas Piggin void radix__flush_all_lpid(unsigned int lpid)
117147d99948SChristophe Leroy {
117247d99948SChristophe Leroy 	_tlbie_lpid(lpid, RIC_FLUSH_ALL);
117347d99948SChristophe Leroy }
117499161de3SNicholas Piggin EXPORT_SYMBOL_GPL(radix__flush_all_lpid);
117547d99948SChristophe Leroy 
117647d99948SChristophe Leroy /*
117799161de3SNicholas Piggin  * Flush process scoped translations from LPID (=LPIDR)
117847d99948SChristophe Leroy  */
radix__flush_all_lpid_guest(unsigned int lpid)117999161de3SNicholas Piggin void radix__flush_all_lpid_guest(unsigned int lpid)
118047d99948SChristophe Leroy {
118199161de3SNicholas Piggin 	_tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
118247d99948SChristophe Leroy }
118347d99948SChristophe Leroy 
radix__tlb_flush(struct mmu_gather * tlb)118447d99948SChristophe Leroy void radix__tlb_flush(struct mmu_gather *tlb)
118547d99948SChristophe Leroy {
118647d99948SChristophe Leroy 	int psize = 0;
118747d99948SChristophe Leroy 	struct mm_struct *mm = tlb->mm;
118847d99948SChristophe Leroy 	int page_size = tlb->page_size;
118947d99948SChristophe Leroy 	unsigned long start = tlb->start;
119047d99948SChristophe Leroy 	unsigned long end = tlb->end;
119147d99948SChristophe Leroy 
119247d99948SChristophe Leroy 	/*
119347d99948SChristophe Leroy 	 * if page size is not something we understand, do a full mm flush
119447d99948SChristophe Leroy 	 *
119547d99948SChristophe Leroy 	 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
119647d99948SChristophe Leroy 	 * that flushes the process table entry cache upon process teardown.
119747d99948SChristophe Leroy 	 * See the comment for radix in arch_exit_mmap().
119847d99948SChristophe Leroy 	 */
119945abf5d9SNicholas Piggin 	if (tlb->fullmm) {
1200e43c0a0cSNicholas Piggin 		if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) {
1201e43c0a0cSNicholas Piggin 			/*
1202e43c0a0cSNicholas Piggin 			 * Shootdown based lazy tlb mm refcounting means we
1203e43c0a0cSNicholas Piggin 			 * have to IPI everyone in the mm_cpumask anyway soon
1204e43c0a0cSNicholas Piggin 			 * when the mm goes away, so might as well do it as
1205e43c0a0cSNicholas Piggin 			 * part of the final flush now.
1206e43c0a0cSNicholas Piggin 			 *
1207e43c0a0cSNicholas Piggin 			 * If lazy shootdown was improved to reduce IPIs (e.g.,
1208e43c0a0cSNicholas Piggin 			 * by batching), then it may end up being better to use
1209e43c0a0cSNicholas Piggin 			 * tlbies here instead.
1210e43c0a0cSNicholas Piggin 			 */
1211e43c0a0cSNicholas Piggin 			preempt_disable();
1212e43c0a0cSNicholas Piggin 
1213e43c0a0cSNicholas Piggin 			smp_mb(); /* see radix__flush_tlb_mm */
1214e43c0a0cSNicholas Piggin 			exit_flush_lazy_tlbs(mm);
121547d99948SChristophe Leroy 			__flush_all_mm(mm, true);
1216e43c0a0cSNicholas Piggin 
1217e43c0a0cSNicholas Piggin 			preempt_enable();
1218e43c0a0cSNicholas Piggin 		} else {
1219e43c0a0cSNicholas Piggin 			__flush_all_mm(mm, true);
1220e43c0a0cSNicholas Piggin 		}
1221e43c0a0cSNicholas Piggin 
122247d99948SChristophe Leroy 	} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
122352162ec7SAneesh Kumar K.V 		if (!tlb->freed_tables)
122447d99948SChristophe Leroy 			radix__flush_tlb_mm(mm);
122547d99948SChristophe Leroy 		else
122647d99948SChristophe Leroy 			radix__flush_all_mm(mm);
122747d99948SChristophe Leroy 	} else {
122852162ec7SAneesh Kumar K.V 		if (!tlb->freed_tables)
122947d99948SChristophe Leroy 			radix__flush_tlb_range_psize(mm, start, end, psize);
123047d99948SChristophe Leroy 		else
123147d99948SChristophe Leroy 			radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
123247d99948SChristophe Leroy 	}
123347d99948SChristophe Leroy }
123447d99948SChristophe Leroy 
__radix__flush_tlb_range_psize(struct mm_struct * mm,unsigned long start,unsigned long end,int psize,bool also_pwc)123507d8ad6fSAneesh Kumar K.V static void __radix__flush_tlb_range_psize(struct mm_struct *mm,
123647d99948SChristophe Leroy 				unsigned long start, unsigned long end,
123747d99948SChristophe Leroy 				int psize, bool also_pwc)
123847d99948SChristophe Leroy {
123947d99948SChristophe Leroy 	unsigned long pid;
124047d99948SChristophe Leroy 	unsigned int page_shift = mmu_psize_defs[psize].shift;
124147d99948SChristophe Leroy 	unsigned long page_size = 1UL << page_shift;
124247d99948SChristophe Leroy 	unsigned long nr_pages = (end - start) >> page_shift;
124326418b36SNicholas Piggin 	bool flush_pid;
124426418b36SNicholas Piggin 	enum tlb_flush_type type;
124547d99948SChristophe Leroy 
124647d99948SChristophe Leroy 	pid = mm->context.id;
1247d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
124847d99948SChristophe Leroy 		return;
124947d99948SChristophe Leroy 
1250dcfecb98SNicholas Piggin 	WARN_ON_ONCE(end == TLB_FLUSH_ALL);
125126418b36SNicholas Piggin 
125247d99948SChristophe Leroy 	preempt_disable();
125347d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
1254dcfecb98SNicholas Piggin 	type = flush_type_needed(mm, false);
125554bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_NONE)
125654bb5033SNicholas Piggin 		goto out;
125747d99948SChristophe Leroy 
1258dcfecb98SNicholas Piggin 	if (type == FLUSH_TYPE_GLOBAL)
125926418b36SNicholas Piggin 		flush_pid = nr_pages > tlb_single_page_flush_ceiling;
126026418b36SNicholas Piggin 	else
126126418b36SNicholas Piggin 		flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
126226418b36SNicholas Piggin 
126326418b36SNicholas Piggin 	if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
1264dd3d9aa5SNicholas Piggin 		unsigned long tgt = H_RPTI_TARGET_CMMU;
1265dd3d9aa5SNicholas Piggin 		unsigned long type = H_RPTI_TYPE_TLB;
1266dd3d9aa5SNicholas Piggin 		unsigned long pg_sizes = psize_to_rpti_pgsize(psize);
1267dd3d9aa5SNicholas Piggin 
1268dd3d9aa5SNicholas Piggin 		if (also_pwc)
1269dd3d9aa5SNicholas Piggin 			type |= H_RPTI_TYPE_PWC;
1270dd3d9aa5SNicholas Piggin 		if (atomic_read(&mm->context.copros) > 0)
1271dd3d9aa5SNicholas Piggin 			tgt |= H_RPTI_TARGET_NMMU;
1272dd3d9aa5SNicholas Piggin 		pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
127326418b36SNicholas Piggin 	} else if (flush_pid) {
127426418b36SNicholas Piggin 		if (type == FLUSH_TYPE_LOCAL) {
127547d99948SChristophe Leroy 			_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
127647d99948SChristophe Leroy 		} else {
12772275d7b5SNicholas Piggin 			if (cputlb_use_tlbie()) {
127847d99948SChristophe Leroy 				if (mm_needs_flush_escalation(mm))
127947d99948SChristophe Leroy 					also_pwc = true;
128047d99948SChristophe Leroy 
12812275d7b5SNicholas Piggin 				_tlbie_pid(pid,
12822275d7b5SNicholas Piggin 					also_pwc ?  RIC_FLUSH_ALL : RIC_FLUSH_TLB);
12832275d7b5SNicholas Piggin 			} else {
12842275d7b5SNicholas Piggin 				_tlbiel_pid_multicast(mm, pid,
12852275d7b5SNicholas Piggin 					also_pwc ?  RIC_FLUSH_ALL : RIC_FLUSH_TLB);
12862275d7b5SNicholas Piggin 			}
12872275d7b5SNicholas Piggin 
128847d99948SChristophe Leroy 		}
128947d99948SChristophe Leroy 	} else {
129026418b36SNicholas Piggin 		if (type == FLUSH_TYPE_LOCAL)
129147d99948SChristophe Leroy 			_tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
12922275d7b5SNicholas Piggin 		else if (cputlb_use_tlbie())
129347d99948SChristophe Leroy 			_tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
12942275d7b5SNicholas Piggin 		else
12952275d7b5SNicholas Piggin 			_tlbiel_va_range_multicast(mm,
12962275d7b5SNicholas Piggin 					start, end, pid, page_size, psize, also_pwc);
129747d99948SChristophe Leroy 	}
129854bb5033SNicholas Piggin out:
129947d99948SChristophe Leroy 	preempt_enable();
13001af5a810SAlistair Popple 	mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
130147d99948SChristophe Leroy }
130247d99948SChristophe Leroy 
radix__flush_tlb_range_psize(struct mm_struct * mm,unsigned long start,unsigned long end,int psize)130347d99948SChristophe Leroy void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
130447d99948SChristophe Leroy 				  unsigned long end, int psize)
130547d99948SChristophe Leroy {
130647d99948SChristophe Leroy 	return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
130747d99948SChristophe Leroy }
130847d99948SChristophe Leroy 
radix__flush_tlb_pwc_range_psize(struct mm_struct * mm,unsigned long start,unsigned long end,int psize)1309cec6515aSAneesh Kumar K.V void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
131047d99948SChristophe Leroy 				      unsigned long end, int psize)
131147d99948SChristophe Leroy {
131247d99948SChristophe Leroy 	__radix__flush_tlb_range_psize(mm, start, end, psize, true);
131347d99948SChristophe Leroy }
131447d99948SChristophe Leroy 
131547d99948SChristophe Leroy #ifdef CONFIG_TRANSPARENT_HUGEPAGE
radix__flush_tlb_collapsed_pmd(struct mm_struct * mm,unsigned long addr)131647d99948SChristophe Leroy void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
131747d99948SChristophe Leroy {
131847d99948SChristophe Leroy 	unsigned long pid, end;
131926418b36SNicholas Piggin 	enum tlb_flush_type type;
132047d99948SChristophe Leroy 
132147d99948SChristophe Leroy 	pid = mm->context.id;
1322d01dc25eSNicholas Piggin 	if (WARN_ON_ONCE(pid == MMU_NO_CONTEXT))
132347d99948SChristophe Leroy 		return;
132447d99948SChristophe Leroy 
132547d99948SChristophe Leroy 	/* 4k page size, just blow the world */
132647d99948SChristophe Leroy 	if (PAGE_SIZE == 0x1000) {
132747d99948SChristophe Leroy 		radix__flush_all_mm(mm);
132847d99948SChristophe Leroy 		return;
132947d99948SChristophe Leroy 	}
133047d99948SChristophe Leroy 
133147d99948SChristophe Leroy 	end = addr + HPAGE_PMD_SIZE;
133247d99948SChristophe Leroy 
133347d99948SChristophe Leroy 	/* Otherwise first do the PWC, then iterate the pages. */
133447d99948SChristophe Leroy 	preempt_disable();
133547d99948SChristophe Leroy 	smp_mb(); /* see radix__flush_tlb_mm */
133626418b36SNicholas Piggin 	type = flush_type_needed(mm, false);
133754bb5033SNicholas Piggin 	if (type == FLUSH_TYPE_LOCAL) {
133854bb5033SNicholas Piggin 		_tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
133954bb5033SNicholas Piggin 	} else if (type == FLUSH_TYPE_GLOBAL) {
1340dd3d9aa5SNicholas Piggin 		if (!mmu_has_feature(MMU_FTR_GTSE)) {
1341dd3d9aa5SNicholas Piggin 			unsigned long tgt, type, pg_sizes;
1342dd3d9aa5SNicholas Piggin 
1343dd3d9aa5SNicholas Piggin 			tgt = H_RPTI_TARGET_CMMU;
1344dd3d9aa5SNicholas Piggin 			type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
1345dd3d9aa5SNicholas Piggin 			       H_RPTI_TYPE_PRT;
1346dd3d9aa5SNicholas Piggin 			pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
1347dd3d9aa5SNicholas Piggin 
1348dd3d9aa5SNicholas Piggin 			if (atomic_read(&mm->context.copros) > 0)
1349dd3d9aa5SNicholas Piggin 				tgt |= H_RPTI_TARGET_NMMU;
1350dd3d9aa5SNicholas Piggin 			pseries_rpt_invalidate(pid, tgt, type, pg_sizes,
1351dd3d9aa5SNicholas Piggin 					       addr, end);
1352dd3d9aa5SNicholas Piggin 		} else if (cputlb_use_tlbie())
135347d99948SChristophe Leroy 			_tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
13542275d7b5SNicholas Piggin 		else
13552275d7b5SNicholas Piggin 			_tlbiel_va_range_multicast(mm,
13562275d7b5SNicholas Piggin 					addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
135747d99948SChristophe Leroy 	}
135847d99948SChristophe Leroy 
135947d99948SChristophe Leroy 	preempt_enable();
136047d99948SChristophe Leroy }
136147d99948SChristophe Leroy #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
136247d99948SChristophe Leroy 
radix__flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)136347d99948SChristophe Leroy void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
136447d99948SChristophe Leroy 				unsigned long start, unsigned long end)
136547d99948SChristophe Leroy {
136647d99948SChristophe Leroy 	radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
136747d99948SChristophe Leroy }
136847d99948SChristophe Leroy EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
136947d99948SChristophe Leroy 
radix__flush_pud_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1370*27af67f3SAneesh Kumar K.V void radix__flush_pud_tlb_range(struct vm_area_struct *vma,
1371*27af67f3SAneesh Kumar K.V 				unsigned long start, unsigned long end)
1372*27af67f3SAneesh Kumar K.V {
1373*27af67f3SAneesh Kumar K.V 	radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_1G);
1374*27af67f3SAneesh Kumar K.V }
1375*27af67f3SAneesh Kumar K.V EXPORT_SYMBOL(radix__flush_pud_tlb_range);
1376*27af67f3SAneesh Kumar K.V 
radix__flush_tlb_all(void)137747d99948SChristophe Leroy void radix__flush_tlb_all(void)
137847d99948SChristophe Leroy {
137947d99948SChristophe Leroy 	unsigned long rb,prs,r,rs;
138047d99948SChristophe Leroy 	unsigned long ric = RIC_FLUSH_ALL;
138147d99948SChristophe Leroy 
138247d99948SChristophe Leroy 	rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
138347d99948SChristophe Leroy 	prs = 0; /* partition scoped */
138447d99948SChristophe Leroy 	r = 1;   /* radix format */
138547d99948SChristophe Leroy 	rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
138647d99948SChristophe Leroy 
138747d99948SChristophe Leroy 	asm volatile("ptesync": : :"memory");
138847d99948SChristophe Leroy 	/*
138947d99948SChristophe Leroy 	 * now flush guest entries by passing PRS = 1 and LPID != 0
139047d99948SChristophe Leroy 	 */
139147d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
139247d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
139347d99948SChristophe Leroy 	/*
139447d99948SChristophe Leroy 	 * now flush host entires by passing PRS = 0 and LPID == 0
139547d99948SChristophe Leroy 	 */
139647d99948SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
139747d99948SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
139847d99948SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
139947d99948SChristophe Leroy }
1400f0c6fbbbSBharata B Rao 
1401f0c6fbbbSBharata B Rao #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
__tlbie_pid_lpid(unsigned long pid,unsigned long lpid,unsigned long ric)14024a9dd8f2SChristophe Leroy static __always_inline void __tlbie_pid_lpid(unsigned long pid,
14034a9dd8f2SChristophe Leroy 					     unsigned long lpid,
14044a9dd8f2SChristophe Leroy 					     unsigned long ric)
14054a9dd8f2SChristophe Leroy {
14064a9dd8f2SChristophe Leroy 	unsigned long rb, rs, prs, r;
14074a9dd8f2SChristophe Leroy 
14084a9dd8f2SChristophe Leroy 	rb = PPC_BIT(53); /* IS = 1 */
14094a9dd8f2SChristophe Leroy 	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
14104a9dd8f2SChristophe Leroy 	prs = 1; /* process scoped */
14114a9dd8f2SChristophe Leroy 	r = 1;   /* radix format */
14124a9dd8f2SChristophe Leroy 
14134a9dd8f2SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
14144a9dd8f2SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
14154a9dd8f2SChristophe Leroy 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
14164a9dd8f2SChristophe Leroy }
14174a9dd8f2SChristophe Leroy 
__tlbie_va_lpid(unsigned long va,unsigned long pid,unsigned long lpid,unsigned long ap,unsigned long ric)14184a9dd8f2SChristophe Leroy static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
14194a9dd8f2SChristophe Leroy 					    unsigned long lpid,
14204a9dd8f2SChristophe Leroy 					    unsigned long ap, unsigned long ric)
14214a9dd8f2SChristophe Leroy {
14224a9dd8f2SChristophe Leroy 	unsigned long rb, rs, prs, r;
14234a9dd8f2SChristophe Leroy 
14244a9dd8f2SChristophe Leroy 	rb = va & ~(PPC_BITMASK(52, 63));
14254a9dd8f2SChristophe Leroy 	rb |= ap << PPC_BITLSHIFT(58);
14264a9dd8f2SChristophe Leroy 	rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
14274a9dd8f2SChristophe Leroy 	prs = 1; /* process scoped */
14284a9dd8f2SChristophe Leroy 	r = 1;   /* radix format */
14294a9dd8f2SChristophe Leroy 
14304a9dd8f2SChristophe Leroy 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
14314a9dd8f2SChristophe Leroy 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
14324a9dd8f2SChristophe Leroy 	trace_tlbie(0, 0, rb, rs, ric, prs, r);
14334a9dd8f2SChristophe Leroy }
14344a9dd8f2SChristophe Leroy 
fixup_tlbie_pid_lpid(unsigned long pid,unsigned long lpid)14354a9dd8f2SChristophe Leroy static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
14364a9dd8f2SChristophe Leroy {
14374a9dd8f2SChristophe Leroy 	/*
14384a9dd8f2SChristophe Leroy 	 * We can use any address for the invalidation, pick one which is
14394a9dd8f2SChristophe Leroy 	 * probably unused as an optimisation.
14404a9dd8f2SChristophe Leroy 	 */
14414a9dd8f2SChristophe Leroy 	unsigned long va = ((1UL << 52) - 1);
14424a9dd8f2SChristophe Leroy 
14434a9dd8f2SChristophe Leroy 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
14444a9dd8f2SChristophe Leroy 		asm volatile("ptesync" : : : "memory");
14454a9dd8f2SChristophe Leroy 		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
14464a9dd8f2SChristophe Leroy 	}
14474a9dd8f2SChristophe Leroy 
14484a9dd8f2SChristophe Leroy 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
14494a9dd8f2SChristophe Leroy 		asm volatile("ptesync" : : : "memory");
14504a9dd8f2SChristophe Leroy 		__tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
14514a9dd8f2SChristophe Leroy 				RIC_FLUSH_TLB);
14524a9dd8f2SChristophe Leroy 	}
14534a9dd8f2SChristophe Leroy }
14544a9dd8f2SChristophe Leroy 
_tlbie_pid_lpid(unsigned long pid,unsigned long lpid,unsigned long ric)14554a9dd8f2SChristophe Leroy static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
14564a9dd8f2SChristophe Leroy 				   unsigned long ric)
14574a9dd8f2SChristophe Leroy {
14584a9dd8f2SChristophe Leroy 	asm volatile("ptesync" : : : "memory");
14594a9dd8f2SChristophe Leroy 
14604a9dd8f2SChristophe Leroy 	/*
14614a9dd8f2SChristophe Leroy 	 * Workaround the fact that the "ric" argument to __tlbie_pid
14624a9dd8f2SChristophe Leroy 	 * must be a compile-time contraint to match the "i" constraint
14634a9dd8f2SChristophe Leroy 	 * in the asm statement.
14644a9dd8f2SChristophe Leroy 	 */
14654a9dd8f2SChristophe Leroy 	switch (ric) {
14664a9dd8f2SChristophe Leroy 	case RIC_FLUSH_TLB:
14674a9dd8f2SChristophe Leroy 		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
14684a9dd8f2SChristophe Leroy 		fixup_tlbie_pid_lpid(pid, lpid);
14694a9dd8f2SChristophe Leroy 		break;
14704a9dd8f2SChristophe Leroy 	case RIC_FLUSH_PWC:
14714a9dd8f2SChristophe Leroy 		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
14724a9dd8f2SChristophe Leroy 		break;
14734a9dd8f2SChristophe Leroy 	case RIC_FLUSH_ALL:
14744a9dd8f2SChristophe Leroy 	default:
14754a9dd8f2SChristophe Leroy 		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
14764a9dd8f2SChristophe Leroy 		fixup_tlbie_pid_lpid(pid, lpid);
14774a9dd8f2SChristophe Leroy 	}
14784a9dd8f2SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
14794a9dd8f2SChristophe Leroy }
14804a9dd8f2SChristophe Leroy 
fixup_tlbie_va_range_lpid(unsigned long va,unsigned long pid,unsigned long lpid,unsigned long ap)14814a9dd8f2SChristophe Leroy static inline void fixup_tlbie_va_range_lpid(unsigned long va,
14824a9dd8f2SChristophe Leroy 					     unsigned long pid,
14834a9dd8f2SChristophe Leroy 					     unsigned long lpid,
14844a9dd8f2SChristophe Leroy 					     unsigned long ap)
14854a9dd8f2SChristophe Leroy {
14864a9dd8f2SChristophe Leroy 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
14874a9dd8f2SChristophe Leroy 		asm volatile("ptesync" : : : "memory");
14884a9dd8f2SChristophe Leroy 		__tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
14894a9dd8f2SChristophe Leroy 	}
14904a9dd8f2SChristophe Leroy 
14914a9dd8f2SChristophe Leroy 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
14924a9dd8f2SChristophe Leroy 		asm volatile("ptesync" : : : "memory");
14934a9dd8f2SChristophe Leroy 		__tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
14944a9dd8f2SChristophe Leroy 	}
14954a9dd8f2SChristophe Leroy }
14964a9dd8f2SChristophe Leroy 
__tlbie_va_range_lpid(unsigned long start,unsigned long end,unsigned long pid,unsigned long lpid,unsigned long page_size,unsigned long psize)14974a9dd8f2SChristophe Leroy static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
14984a9dd8f2SChristophe Leroy 					 unsigned long pid, unsigned long lpid,
14994a9dd8f2SChristophe Leroy 					 unsigned long page_size,
15004a9dd8f2SChristophe Leroy 					 unsigned long psize)
15014a9dd8f2SChristophe Leroy {
15024a9dd8f2SChristophe Leroy 	unsigned long addr;
15034a9dd8f2SChristophe Leroy 	unsigned long ap = mmu_get_ap(psize);
15044a9dd8f2SChristophe Leroy 
15054a9dd8f2SChristophe Leroy 	for (addr = start; addr < end; addr += page_size)
15064a9dd8f2SChristophe Leroy 		__tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
15074a9dd8f2SChristophe Leroy 
15084a9dd8f2SChristophe Leroy 	fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
15094a9dd8f2SChristophe Leroy }
15104a9dd8f2SChristophe Leroy 
_tlbie_va_range_lpid(unsigned long start,unsigned long end,unsigned long pid,unsigned long lpid,unsigned long page_size,unsigned long psize,bool also_pwc)15114a9dd8f2SChristophe Leroy static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
15124a9dd8f2SChristophe Leroy 					unsigned long pid, unsigned long lpid,
15134a9dd8f2SChristophe Leroy 					unsigned long page_size,
15144a9dd8f2SChristophe Leroy 					unsigned long psize, bool also_pwc)
15154a9dd8f2SChristophe Leroy {
15164a9dd8f2SChristophe Leroy 	asm volatile("ptesync" : : : "memory");
15174a9dd8f2SChristophe Leroy 	if (also_pwc)
15184a9dd8f2SChristophe Leroy 		__tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
15194a9dd8f2SChristophe Leroy 	__tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
15204a9dd8f2SChristophe Leroy 	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
15214a9dd8f2SChristophe Leroy }
15224a9dd8f2SChristophe Leroy 
1523f0c6fbbbSBharata B Rao /*
1524f0c6fbbbSBharata B Rao  * Performs process-scoped invalidations for a given LPID
1525f0c6fbbbSBharata B Rao  * as part of H_RPT_INVALIDATE hcall.
1526f0c6fbbbSBharata B Rao  */
do_h_rpt_invalidate_prt(unsigned long pid,unsigned long lpid,unsigned long type,unsigned long pg_sizes,unsigned long start,unsigned long end)1527f0c6fbbbSBharata B Rao void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
1528f0c6fbbbSBharata B Rao 			     unsigned long type, unsigned long pg_sizes,
1529f0c6fbbbSBharata B Rao 			     unsigned long start, unsigned long end)
1530f0c6fbbbSBharata B Rao {
1531f0c6fbbbSBharata B Rao 	unsigned long psize, nr_pages;
1532f0c6fbbbSBharata B Rao 	struct mmu_psize_def *def;
1533f0c6fbbbSBharata B Rao 	bool flush_pid;
1534f0c6fbbbSBharata B Rao 
1535f0c6fbbbSBharata B Rao 	/*
1536f0c6fbbbSBharata B Rao 	 * A H_RPTI_TYPE_ALL request implies RIC=3, hence
1537f0c6fbbbSBharata B Rao 	 * do a single IS=1 based flush.
1538f0c6fbbbSBharata B Rao 	 */
1539f0c6fbbbSBharata B Rao 	if ((type & H_RPTI_TYPE_ALL) == H_RPTI_TYPE_ALL) {
1540f0c6fbbbSBharata B Rao 		_tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
1541f0c6fbbbSBharata B Rao 		return;
1542f0c6fbbbSBharata B Rao 	}
1543f0c6fbbbSBharata B Rao 
1544f0c6fbbbSBharata B Rao 	if (type & H_RPTI_TYPE_PWC)
1545f0c6fbbbSBharata B Rao 		_tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
1546f0c6fbbbSBharata B Rao 
1547f0c6fbbbSBharata B Rao 	/* Full PID flush */
1548f0c6fbbbSBharata B Rao 	if (start == 0 && end == -1)
1549f0c6fbbbSBharata B Rao 		return _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
1550f0c6fbbbSBharata B Rao 
1551f0c6fbbbSBharata B Rao 	/* Do range invalidation for all the valid page sizes */
1552f0c6fbbbSBharata B Rao 	for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1553f0c6fbbbSBharata B Rao 		def = &mmu_psize_defs[psize];
1554f0c6fbbbSBharata B Rao 		if (!(pg_sizes & def->h_rpt_pgsize))
1555f0c6fbbbSBharata B Rao 			continue;
1556f0c6fbbbSBharata B Rao 
1557f0c6fbbbSBharata B Rao 		nr_pages = (end - start) >> def->shift;
1558f0c6fbbbSBharata B Rao 		flush_pid = nr_pages > tlb_single_page_flush_ceiling;
1559f0c6fbbbSBharata B Rao 
1560f0c6fbbbSBharata B Rao 		/*
1561f0c6fbbbSBharata B Rao 		 * If the number of pages spanning the range is above
1562f0c6fbbbSBharata B Rao 		 * the ceiling, convert the request into a full PID flush.
1563f0c6fbbbSBharata B Rao 		 * And since PID flush takes out all the page sizes, there
1564f0c6fbbbSBharata B Rao 		 * is no need to consider remaining page sizes.
1565f0c6fbbbSBharata B Rao 		 */
1566f0c6fbbbSBharata B Rao 		if (flush_pid) {
1567f0c6fbbbSBharata B Rao 			_tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
1568f0c6fbbbSBharata B Rao 			return;
1569f0c6fbbbSBharata B Rao 		}
1570f0c6fbbbSBharata B Rao 		_tlbie_va_range_lpid(start, end, pid, lpid,
1571f0c6fbbbSBharata B Rao 				     (1UL << def->shift), psize, false);
1572f0c6fbbbSBharata B Rao 	}
1573f0c6fbbbSBharata B Rao }
1574f0c6fbbbSBharata B Rao EXPORT_SYMBOL_GPL(do_h_rpt_invalidate_prt);
1575f0c6fbbbSBharata B Rao 
1576f0c6fbbbSBharata B Rao #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
15773e188b1aSAneesh Kumar K.V 
create_tlb_single_page_flush_ceiling(void)15783e188b1aSAneesh Kumar K.V static int __init create_tlb_single_page_flush_ceiling(void)
15793e188b1aSAneesh Kumar K.V {
15803e188b1aSAneesh Kumar K.V 	debugfs_create_u32("tlb_single_page_flush_ceiling", 0600,
1581dbf77fedSAneesh Kumar K.V 			   arch_debugfs_dir, &tlb_single_page_flush_ceiling);
15823e188b1aSAneesh Kumar K.V 	debugfs_create_u32("tlb_local_single_page_flush_ceiling", 0600,
1583dbf77fedSAneesh Kumar K.V 			   arch_debugfs_dir, &tlb_local_single_page_flush_ceiling);
15843e188b1aSAneesh Kumar K.V 	return 0;
15853e188b1aSAneesh Kumar K.V }
15863e188b1aSAneesh Kumar K.V late_initcall(create_tlb_single_page_flush_ceiling);
15873e188b1aSAneesh Kumar K.V 
1588