xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/tlb.c (revision 11a163f2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/tlbflush.h>
10 
11 struct tlb_inv_context {
12 	u64		tcr;
13 };
14 
15 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
16 				  struct tlb_inv_context *cxt)
17 {
18 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
19 		u64 val;
20 
21 		/*
22 		 * For CPUs that are affected by ARM 1319367, we need to
23 		 * avoid a host Stage-1 walk while we have the guest's
24 		 * VMID set in the VTTBR in order to invalidate TLBs.
25 		 * We're guaranteed that the S1 MMU is enabled, so we can
26 		 * simply set the EPD bits to avoid any further TLB fill.
27 		 */
28 		val = cxt->tcr = read_sysreg_el1(SYS_TCR);
29 		val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
30 		write_sysreg_el1(val, SYS_TCR);
31 		isb();
32 	}
33 
34 	/*
35 	 * __load_guest_stage2() includes an ISB only when the AT
36 	 * workaround is applied. Take care of the opposite condition,
37 	 * ensuring that we always have an ISB, but not two ISBs back
38 	 * to back.
39 	 */
40 	__load_guest_stage2(mmu);
41 	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
42 }
43 
44 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
45 {
46 	write_sysreg(0, vttbr_el2);
47 
48 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
49 		/* Ensure write of the host VMID */
50 		isb();
51 		/* Restore the host's TCR_EL1 */
52 		write_sysreg_el1(cxt->tcr, SYS_TCR);
53 	}
54 }
55 
56 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
57 			      phys_addr_t ipa, int level)
58 {
59 	struct tlb_inv_context cxt;
60 
61 	dsb(ishst);
62 
63 	/* Switch to requested VMID */
64 	__tlb_switch_to_guest(mmu, &cxt);
65 
66 	/*
67 	 * We could do so much better if we had the VA as well.
68 	 * Instead, we invalidate Stage-2 for this IPA, and the
69 	 * whole of Stage-1. Weep...
70 	 */
71 	ipa >>= 12;
72 	__tlbi_level(ipas2e1is, ipa, level);
73 
74 	/*
75 	 * We have to ensure completion of the invalidation at Stage-2,
76 	 * since a table walk on another CPU could refill a TLB with a
77 	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
78 	 * the Stage-1 invalidation happened first.
79 	 */
80 	dsb(ish);
81 	__tlbi(vmalle1is);
82 	dsb(ish);
83 	isb();
84 
85 	/*
86 	 * If the host is running at EL1 and we have a VPIPT I-cache,
87 	 * then we must perform I-cache maintenance at EL2 in order for
88 	 * it to have an effect on the guest. Since the guest cannot hit
89 	 * I-cache lines allocated with a different VMID, we don't need
90 	 * to worry about junk out of guest reset (we nuke the I-cache on
91 	 * VMID rollover), but we do need to be careful when remapping
92 	 * executable pages for the same guest. This can happen when KSM
93 	 * takes a CoW fault on an executable page, copies the page into
94 	 * a page that was previously mapped in the guest and then needs
95 	 * to invalidate the guest view of the I-cache for that page
96 	 * from EL1. To solve this, we invalidate the entire I-cache when
97 	 * unmapping a page from a guest if we have a VPIPT I-cache but
98 	 * the host is running at EL1. As above, we could do better if
99 	 * we had the VA.
100 	 *
101 	 * The moral of this story is: if you have a VPIPT I-cache, then
102 	 * you should be running with VHE enabled.
103 	 */
104 	if (icache_is_vpipt())
105 		__flush_icache_all();
106 
107 	__tlb_switch_to_host(&cxt);
108 }
109 
110 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
111 {
112 	struct tlb_inv_context cxt;
113 
114 	dsb(ishst);
115 
116 	/* Switch to requested VMID */
117 	__tlb_switch_to_guest(mmu, &cxt);
118 
119 	__tlbi(vmalls12e1is);
120 	dsb(ish);
121 	isb();
122 
123 	__tlb_switch_to_host(&cxt);
124 }
125 
126 void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
127 {
128 	struct tlb_inv_context cxt;
129 
130 	/* Switch to requested VMID */
131 	mmu = kern_hyp_va(mmu);
132 	__tlb_switch_to_guest(mmu, &cxt);
133 
134 	__tlbi(vmalle1);
135 	dsb(nsh);
136 	isb();
137 
138 	__tlb_switch_to_host(&cxt);
139 }
140 
141 void __kvm_flush_vm_context(void)
142 {
143 	dsb(ishst);
144 	__tlbi(alle1is);
145 
146 	/*
147 	 * VIPT and PIPT caches are not affected by VMID, so no maintenance
148 	 * is necessary across a VMID rollover.
149 	 *
150 	 * VPIPT caches constrain lookup and maintenance to the active VMID,
151 	 * so we need to invalidate lines with a stale VMID to avoid an ABA
152 	 * race after multiple rollovers.
153 	 *
154 	 */
155 	if (icache_is_vpipt())
156 		asm volatile("ic ialluis");
157 
158 	dsb(ish);
159 }
160