xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/tlb.c (revision b296a6d5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <asm/kvm_hyp.h>
8 #include <asm/kvm_mmu.h>
9 #include <asm/tlbflush.h>
10 
11 struct tlb_inv_context {
12 	u64		tcr;
13 };
14 
15 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
16 				  struct tlb_inv_context *cxt)
17 {
18 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
19 		u64 val;
20 
21 		/*
22 		 * For CPUs that are affected by ARM 1319367, we need to
23 		 * avoid a host Stage-1 walk while we have the guest's
24 		 * VMID set in the VTTBR in order to invalidate TLBs.
25 		 * We're guaranteed that the S1 MMU is enabled, so we can
26 		 * simply set the EPD bits to avoid any further TLB fill.
27 		 */
28 		val = cxt->tcr = read_sysreg_el1(SYS_TCR);
29 		val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
30 		write_sysreg_el1(val, SYS_TCR);
31 		isb();
32 	}
33 
34 	/*
35 	 * __load_guest_stage2() includes an ISB only when the AT
36 	 * workaround is applied. Take care of the opposite condition,
37 	 * ensuring that we always have an ISB, but not two ISBs back
38 	 * to back.
39 	 */
40 	__load_guest_stage2(mmu);
41 	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
42 }
43 
44 static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
45 {
46 	write_sysreg(0, vttbr_el2);
47 
48 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
49 		/* Ensure write of the host VMID */
50 		isb();
51 		/* Restore the host's TCR_EL1 */
52 		write_sysreg_el1(cxt->tcr, SYS_TCR);
53 	}
54 }
55 
56 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
57 			      phys_addr_t ipa, int level)
58 {
59 	struct tlb_inv_context cxt;
60 
61 	dsb(ishst);
62 
63 	/* Switch to requested VMID */
64 	mmu = kern_hyp_va(mmu);
65 	__tlb_switch_to_guest(mmu, &cxt);
66 
67 	/*
68 	 * We could do so much better if we had the VA as well.
69 	 * Instead, we invalidate Stage-2 for this IPA, and the
70 	 * whole of Stage-1. Weep...
71 	 */
72 	ipa >>= 12;
73 	__tlbi_level(ipas2e1is, ipa, level);
74 
75 	/*
76 	 * We have to ensure completion of the invalidation at Stage-2,
77 	 * since a table walk on another CPU could refill a TLB with a
78 	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
79 	 * the Stage-1 invalidation happened first.
80 	 */
81 	dsb(ish);
82 	__tlbi(vmalle1is);
83 	dsb(ish);
84 	isb();
85 
86 	/*
87 	 * If the host is running at EL1 and we have a VPIPT I-cache,
88 	 * then we must perform I-cache maintenance at EL2 in order for
89 	 * it to have an effect on the guest. Since the guest cannot hit
90 	 * I-cache lines allocated with a different VMID, we don't need
91 	 * to worry about junk out of guest reset (we nuke the I-cache on
92 	 * VMID rollover), but we do need to be careful when remapping
93 	 * executable pages for the same guest. This can happen when KSM
94 	 * takes a CoW fault on an executable page, copies the page into
95 	 * a page that was previously mapped in the guest and then needs
96 	 * to invalidate the guest view of the I-cache for that page
97 	 * from EL1. To solve this, we invalidate the entire I-cache when
98 	 * unmapping a page from a guest if we have a VPIPT I-cache but
99 	 * the host is running at EL1. As above, we could do better if
100 	 * we had the VA.
101 	 *
102 	 * The moral of this story is: if you have a VPIPT I-cache, then
103 	 * you should be running with VHE enabled.
104 	 */
105 	if (icache_is_vpipt())
106 		__flush_icache_all();
107 
108 	__tlb_switch_to_host(&cxt);
109 }
110 
111 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
112 {
113 	struct tlb_inv_context cxt;
114 
115 	dsb(ishst);
116 
117 	/* Switch to requested VMID */
118 	mmu = kern_hyp_va(mmu);
119 	__tlb_switch_to_guest(mmu, &cxt);
120 
121 	__tlbi(vmalls12e1is);
122 	dsb(ish);
123 	isb();
124 
125 	__tlb_switch_to_host(&cxt);
126 }
127 
128 void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
129 {
130 	struct tlb_inv_context cxt;
131 
132 	/* Switch to requested VMID */
133 	mmu = kern_hyp_va(mmu);
134 	__tlb_switch_to_guest(mmu, &cxt);
135 
136 	__tlbi(vmalle1);
137 	dsb(nsh);
138 	isb();
139 
140 	__tlb_switch_to_host(&cxt);
141 }
142 
143 void __kvm_flush_vm_context(void)
144 {
145 	dsb(ishst);
146 	__tlbi(alle1is);
147 
148 	/*
149 	 * VIPT and PIPT caches are not affected by VMID, so no maintenance
150 	 * is necessary across a VMID rollover.
151 	 *
152 	 * VPIPT caches constrain lookup and maintenance to the active VMID,
153 	 * so we need to invalidate lines with a stale VMID to avoid an ABA
154 	 * race after multiple rollovers.
155 	 *
156 	 */
157 	if (icache_is_vpipt())
158 		asm volatile("ic ialluis");
159 
160 	dsb(ish);
161 }
162