1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <linux/irqflags.h> 8 9 #include <asm/kvm_hyp.h> 10 #include <asm/kvm_mmu.h> 11 #include <asm/tlbflush.h> 12 13 struct tlb_inv_context { 14 unsigned long flags; 15 u64 tcr; 16 u64 sctlr; 17 }; 18 19 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, 20 struct tlb_inv_context *cxt) 21 { 22 u64 val; 23 24 local_irq_save(cxt->flags); 25 26 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 27 /* 28 * For CPUs that are affected by ARM errata 1165522 or 1530923, 29 * we cannot trust stage-1 to be in a correct state at that 30 * point. Since we do not want to force a full load of the 31 * vcpu state, we prevent the EL1 page-table walker to 32 * allocate new TLBs. This is done by setting the EPD bits 33 * in the TCR_EL1 register. We also need to prevent it to 34 * allocate IPA->PA walks, so we enable the S1 MMU... 35 */ 36 val = cxt->tcr = read_sysreg_el1(SYS_TCR); 37 val |= TCR_EPD1_MASK | TCR_EPD0_MASK; 38 write_sysreg_el1(val, SYS_TCR); 39 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR); 40 val |= SCTLR_ELx_M; 41 write_sysreg_el1(val, SYS_SCTLR); 42 } 43 44 /* 45 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and 46 * most TLB operations target EL2/EL0. In order to affect the 47 * guest TLBs (EL1/EL0), we need to change one of these two 48 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so 49 * let's flip TGE before executing the TLB operation. 50 * 51 * ARM erratum 1165522 requires some special handling (again), 52 * as we need to make sure both stages of translation are in 53 * place before clearing TGE. __load_stage2() already 54 * has an ISB in order to deal with this. 55 */ 56 __load_stage2(mmu, mmu->arch); 57 val = read_sysreg(hcr_el2); 58 val &= ~HCR_TGE; 59 write_sysreg(val, hcr_el2); 60 isb(); 61 } 62 63 static void __tlb_switch_to_host(struct tlb_inv_context *cxt) 64 { 65 /* 66 * We're done with the TLB operation, let's restore the host's 67 * view of HCR_EL2. 68 */ 69 write_sysreg(0, vttbr_el2); 70 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); 71 isb(); 72 73 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 74 /* Restore the registers to what they were */ 75 write_sysreg_el1(cxt->tcr, SYS_TCR); 76 write_sysreg_el1(cxt->sctlr, SYS_SCTLR); 77 } 78 79 local_irq_restore(cxt->flags); 80 } 81 82 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, 83 phys_addr_t ipa, int level) 84 { 85 struct tlb_inv_context cxt; 86 87 dsb(ishst); 88 89 /* Switch to requested VMID */ 90 __tlb_switch_to_guest(mmu, &cxt); 91 92 /* 93 * We could do so much better if we had the VA as well. 94 * Instead, we invalidate Stage-2 for this IPA, and the 95 * whole of Stage-1. Weep... 96 */ 97 ipa >>= 12; 98 __tlbi_level(ipas2e1is, ipa, level); 99 100 /* 101 * We have to ensure completion of the invalidation at Stage-2, 102 * since a table walk on another CPU could refill a TLB with a 103 * complete (S1 + S2) walk based on the old Stage-2 mapping if 104 * the Stage-1 invalidation happened first. 105 */ 106 dsb(ish); 107 __tlbi(vmalle1is); 108 dsb(ish); 109 isb(); 110 111 __tlb_switch_to_host(&cxt); 112 } 113 114 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, 115 phys_addr_t ipa, int level) 116 { 117 struct tlb_inv_context cxt; 118 119 dsb(nshst); 120 121 /* Switch to requested VMID */ 122 __tlb_switch_to_guest(mmu, &cxt); 123 124 /* 125 * We could do so much better if we had the VA as well. 126 * Instead, we invalidate Stage-2 for this IPA, and the 127 * whole of Stage-1. Weep... 128 */ 129 ipa >>= 12; 130 __tlbi_level(ipas2e1, ipa, level); 131 132 /* 133 * We have to ensure completion of the invalidation at Stage-2, 134 * since a table walk on another CPU could refill a TLB with a 135 * complete (S1 + S2) walk based on the old Stage-2 mapping if 136 * the Stage-1 invalidation happened first. 137 */ 138 dsb(nsh); 139 __tlbi(vmalle1); 140 dsb(nsh); 141 isb(); 142 143 __tlb_switch_to_host(&cxt); 144 } 145 146 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) 147 { 148 struct tlb_inv_context cxt; 149 150 dsb(ishst); 151 152 /* Switch to requested VMID */ 153 __tlb_switch_to_guest(mmu, &cxt); 154 155 __tlbi(vmalls12e1is); 156 dsb(ish); 157 isb(); 158 159 __tlb_switch_to_host(&cxt); 160 } 161 162 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) 163 { 164 struct tlb_inv_context cxt; 165 166 /* Switch to requested VMID */ 167 __tlb_switch_to_guest(mmu, &cxt); 168 169 __tlbi(vmalle1); 170 asm volatile("ic iallu"); 171 dsb(nsh); 172 isb(); 173 174 __tlb_switch_to_host(&cxt); 175 } 176 177 void __kvm_flush_vm_context(void) 178 { 179 dsb(ishst); 180 __tlbi(alle1is); 181 182 /* 183 * VIPT and PIPT caches are not affected by VMID, so no maintenance 184 * is necessary across a VMID rollover. 185 * 186 * VPIPT caches constrain lookup and maintenance to the active VMID, 187 * so we need to invalidate lines with a stale VMID to avoid an ABA 188 * race after multiple rollovers. 189 * 190 */ 191 if (icache_is_vpipt()) 192 asm volatile("ic ialluis"); 193 194 dsb(ish); 195 } 196