1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <linux/irqflags.h> 8 9 #include <asm/kvm_hyp.h> 10 #include <asm/kvm_mmu.h> 11 #include <asm/tlbflush.h> 12 13 struct tlb_inv_context { 14 unsigned long flags; 15 u64 tcr; 16 u64 sctlr; 17 }; 18 19 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, 20 struct tlb_inv_context *cxt) 21 { 22 u64 val; 23 24 local_irq_save(cxt->flags); 25 26 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 27 /* 28 * For CPUs that are affected by ARM errata 1165522 or 1530923, 29 * we cannot trust stage-1 to be in a correct state at that 30 * point. Since we do not want to force a full load of the 31 * vcpu state, we prevent the EL1 page-table walker to 32 * allocate new TLBs. This is done by setting the EPD bits 33 * in the TCR_EL1 register. We also need to prevent it to 34 * allocate IPA->PA walks, so we enable the S1 MMU... 35 */ 36 val = cxt->tcr = read_sysreg_el1(SYS_TCR); 37 val |= TCR_EPD1_MASK | TCR_EPD0_MASK; 38 write_sysreg_el1(val, SYS_TCR); 39 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR); 40 val |= SCTLR_ELx_M; 41 write_sysreg_el1(val, SYS_SCTLR); 42 } 43 44 /* 45 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and 46 * most TLB operations target EL2/EL0. In order to affect the 47 * guest TLBs (EL1/EL0), we need to change one of these two 48 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so 49 * let's flip TGE before executing the TLB operation. 50 * 51 * ARM erratum 1165522 requires some special handling (again), 52 * as we need to make sure both stages of translation are in 53 * place before clearing TGE. __load_stage2() already 54 * has an ISB in order to deal with this. 55 */ 56 __load_stage2(mmu, mmu->arch); 57 val = read_sysreg(hcr_el2); 58 val &= ~HCR_TGE; 59 write_sysreg(val, hcr_el2); 60 isb(); 61 } 62 63 static void __tlb_switch_to_host(struct tlb_inv_context *cxt) 64 { 65 /* 66 * We're done with the TLB operation, let's restore the host's 67 * view of HCR_EL2. 68 */ 69 write_sysreg(0, vttbr_el2); 70 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); 71 isb(); 72 73 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 74 /* Restore the registers to what they were */ 75 write_sysreg_el1(cxt->tcr, SYS_TCR); 76 write_sysreg_el1(cxt->sctlr, SYS_SCTLR); 77 } 78 79 local_irq_restore(cxt->flags); 80 } 81 82 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, 83 phys_addr_t ipa, int level) 84 { 85 struct tlb_inv_context cxt; 86 87 dsb(ishst); 88 89 /* Switch to requested VMID */ 90 __tlb_switch_to_guest(mmu, &cxt); 91 92 /* 93 * We could do so much better if we had the VA as well. 94 * Instead, we invalidate Stage-2 for this IPA, and the 95 * whole of Stage-1. Weep... 96 */ 97 ipa >>= 12; 98 __tlbi_level(ipas2e1is, ipa, level); 99 100 /* 101 * We have to ensure completion of the invalidation at Stage-2, 102 * since a table walk on another CPU could refill a TLB with a 103 * complete (S1 + S2) walk based on the old Stage-2 mapping if 104 * the Stage-1 invalidation happened first. 105 */ 106 dsb(ish); 107 __tlbi(vmalle1is); 108 dsb(ish); 109 isb(); 110 111 __tlb_switch_to_host(&cxt); 112 } 113 114 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, 115 phys_addr_t ipa, int level) 116 { 117 struct tlb_inv_context cxt; 118 119 dsb(nshst); 120 121 /* Switch to requested VMID */ 122 __tlb_switch_to_guest(mmu, &cxt); 123 124 /* 125 * We could do so much better if we had the VA as well. 126 * Instead, we invalidate Stage-2 for this IPA, and the 127 * whole of Stage-1. Weep... 128 */ 129 ipa >>= 12; 130 __tlbi_level(ipas2e1, ipa, level); 131 132 /* 133 * We have to ensure completion of the invalidation at Stage-2, 134 * since a table walk on another CPU could refill a TLB with a 135 * complete (S1 + S2) walk based on the old Stage-2 mapping if 136 * the Stage-1 invalidation happened first. 137 */ 138 dsb(nsh); 139 __tlbi(vmalle1); 140 dsb(nsh); 141 isb(); 142 143 __tlb_switch_to_host(&cxt); 144 } 145 146 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, 147 phys_addr_t start, unsigned long pages) 148 { 149 struct tlb_inv_context cxt; 150 unsigned long stride; 151 152 /* 153 * Since the range of addresses may not be mapped at 154 * the same level, assume the worst case as PAGE_SIZE 155 */ 156 stride = PAGE_SIZE; 157 start = round_down(start, stride); 158 159 dsb(ishst); 160 161 /* Switch to requested VMID */ 162 __tlb_switch_to_guest(mmu, &cxt); 163 164 __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0); 165 166 dsb(ish); 167 __tlbi(vmalle1is); 168 dsb(ish); 169 isb(); 170 171 __tlb_switch_to_host(&cxt); 172 } 173 174 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) 175 { 176 struct tlb_inv_context cxt; 177 178 dsb(ishst); 179 180 /* Switch to requested VMID */ 181 __tlb_switch_to_guest(mmu, &cxt); 182 183 __tlbi(vmalls12e1is); 184 dsb(ish); 185 isb(); 186 187 __tlb_switch_to_host(&cxt); 188 } 189 190 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) 191 { 192 struct tlb_inv_context cxt; 193 194 /* Switch to requested VMID */ 195 __tlb_switch_to_guest(mmu, &cxt); 196 197 __tlbi(vmalle1); 198 asm volatile("ic iallu"); 199 dsb(nsh); 200 isb(); 201 202 __tlb_switch_to_host(&cxt); 203 } 204 205 void __kvm_flush_vm_context(void) 206 { 207 dsb(ishst); 208 __tlbi(alle1is); 209 210 /* 211 * VIPT and PIPT caches are not affected by VMID, so no maintenance 212 * is necessary across a VMID rollover. 213 * 214 * VPIPT caches constrain lookup and maintenance to the active VMID, 215 * so we need to invalidate lines with a stale VMID to avoid an ABA 216 * race after multiple rollovers. 217 * 218 */ 219 if (icache_is_vpipt()) 220 asm volatile("ic ialluis"); 221 222 dsb(ish); 223 } 224