1e03fa291SDavid Brazdil // SPDX-License-Identifier: GPL-2.0-only 2e03fa291SDavid Brazdil /* 3e03fa291SDavid Brazdil * Copyright (C) 2015 - ARM Ltd 4e03fa291SDavid Brazdil * Author: Marc Zyngier <marc.zyngier@arm.com> 5e03fa291SDavid Brazdil */ 6e03fa291SDavid Brazdil 7e03fa291SDavid Brazdil #include <linux/irqflags.h> 8e03fa291SDavid Brazdil 9e03fa291SDavid Brazdil #include <asm/kvm_hyp.h> 10e03fa291SDavid Brazdil #include <asm/kvm_mmu.h> 11e03fa291SDavid Brazdil #include <asm/tlbflush.h> 12e03fa291SDavid Brazdil 13e03fa291SDavid Brazdil struct tlb_inv_context { 14e03fa291SDavid Brazdil unsigned long flags; 15e03fa291SDavid Brazdil u64 tcr; 16e03fa291SDavid Brazdil u64 sctlr; 17e03fa291SDavid Brazdil }; 18e03fa291SDavid Brazdil 19a0e50aa3SChristoffer Dall static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, 20a0e50aa3SChristoffer Dall struct tlb_inv_context *cxt) 21e03fa291SDavid Brazdil { 22e03fa291SDavid Brazdil u64 val; 23e03fa291SDavid Brazdil 24e03fa291SDavid Brazdil local_irq_save(cxt->flags); 25e03fa291SDavid Brazdil 26e03fa291SDavid Brazdil if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 27e03fa291SDavid Brazdil /* 28e03fa291SDavid Brazdil * For CPUs that are affected by ARM errata 1165522 or 1530923, 29e03fa291SDavid Brazdil * we cannot trust stage-1 to be in a correct state at that 30e03fa291SDavid Brazdil * point. Since we do not want to force a full load of the 31e03fa291SDavid Brazdil * vcpu state, we prevent the EL1 page-table walker to 32e03fa291SDavid Brazdil * allocate new TLBs. This is done by setting the EPD bits 33e03fa291SDavid Brazdil * in the TCR_EL1 register. We also need to prevent it to 34e03fa291SDavid Brazdil * allocate IPA->PA walks, so we enable the S1 MMU... 35e03fa291SDavid Brazdil */ 36e03fa291SDavid Brazdil val = cxt->tcr = read_sysreg_el1(SYS_TCR); 37e03fa291SDavid Brazdil val |= TCR_EPD1_MASK | TCR_EPD0_MASK; 38e03fa291SDavid Brazdil write_sysreg_el1(val, SYS_TCR); 39e03fa291SDavid Brazdil val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR); 40e03fa291SDavid Brazdil val |= SCTLR_ELx_M; 41e03fa291SDavid Brazdil write_sysreg_el1(val, SYS_SCTLR); 42e03fa291SDavid Brazdil } 43e03fa291SDavid Brazdil 44e03fa291SDavid Brazdil /* 45e03fa291SDavid Brazdil * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and 46e03fa291SDavid Brazdil * most TLB operations target EL2/EL0. In order to affect the 47e03fa291SDavid Brazdil * guest TLBs (EL1/EL0), we need to change one of these two 48e03fa291SDavid Brazdil * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so 49e03fa291SDavid Brazdil * let's flip TGE before executing the TLB operation. 50e03fa291SDavid Brazdil * 51e03fa291SDavid Brazdil * ARM erratum 1165522 requires some special handling (again), 52e03fa291SDavid Brazdil * as we need to make sure both stages of translation are in 53e03fa291SDavid Brazdil * place before clearing TGE. __load_guest_stage2() already 54e03fa291SDavid Brazdil * has an ISB in order to deal with this. 55e03fa291SDavid Brazdil */ 56a0e50aa3SChristoffer Dall __load_guest_stage2(mmu); 57e03fa291SDavid Brazdil val = read_sysreg(hcr_el2); 58e03fa291SDavid Brazdil val &= ~HCR_TGE; 59e03fa291SDavid Brazdil write_sysreg(val, hcr_el2); 60e03fa291SDavid Brazdil isb(); 61e03fa291SDavid Brazdil } 62e03fa291SDavid Brazdil 63a0e50aa3SChristoffer Dall static void __tlb_switch_to_host(struct tlb_inv_context *cxt) 64e03fa291SDavid Brazdil { 65e03fa291SDavid Brazdil /* 66e03fa291SDavid Brazdil * We're done with the TLB operation, let's restore the host's 67e03fa291SDavid Brazdil * view of HCR_EL2. 68e03fa291SDavid Brazdil */ 69e03fa291SDavid Brazdil write_sysreg(0, vttbr_el2); 70e03fa291SDavid Brazdil write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); 71e03fa291SDavid Brazdil isb(); 72e03fa291SDavid Brazdil 73e03fa291SDavid Brazdil if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 74e03fa291SDavid Brazdil /* Restore the registers to what they were */ 75e03fa291SDavid Brazdil write_sysreg_el1(cxt->tcr, SYS_TCR); 76e03fa291SDavid Brazdil write_sysreg_el1(cxt->sctlr, SYS_SCTLR); 77e03fa291SDavid Brazdil } 78e03fa291SDavid Brazdil 79e03fa291SDavid Brazdil local_irq_restore(cxt->flags); 80e03fa291SDavid Brazdil } 81e03fa291SDavid Brazdil 82efaa5b93SMarc Zyngier void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, 83efaa5b93SMarc Zyngier phys_addr_t ipa, int level) 84e03fa291SDavid Brazdil { 85e03fa291SDavid Brazdil struct tlb_inv_context cxt; 86e03fa291SDavid Brazdil 87e03fa291SDavid Brazdil dsb(ishst); 88e03fa291SDavid Brazdil 89e03fa291SDavid Brazdil /* Switch to requested VMID */ 90a0e50aa3SChristoffer Dall __tlb_switch_to_guest(mmu, &cxt); 91e03fa291SDavid Brazdil 92e03fa291SDavid Brazdil /* 93e03fa291SDavid Brazdil * We could do so much better if we had the VA as well. 94e03fa291SDavid Brazdil * Instead, we invalidate Stage-2 for this IPA, and the 95e03fa291SDavid Brazdil * whole of Stage-1. Weep... 96e03fa291SDavid Brazdil */ 97e03fa291SDavid Brazdil ipa >>= 12; 98efaa5b93SMarc Zyngier __tlbi_level(ipas2e1is, ipa, level); 99e03fa291SDavid Brazdil 100e03fa291SDavid Brazdil /* 101e03fa291SDavid Brazdil * We have to ensure completion of the invalidation at Stage-2, 102e03fa291SDavid Brazdil * since a table walk on another CPU could refill a TLB with a 103e03fa291SDavid Brazdil * complete (S1 + S2) walk based on the old Stage-2 mapping if 104e03fa291SDavid Brazdil * the Stage-1 invalidation happened first. 105e03fa291SDavid Brazdil */ 106e03fa291SDavid Brazdil dsb(ish); 107e03fa291SDavid Brazdil __tlbi(vmalle1is); 108e03fa291SDavid Brazdil dsb(ish); 109e03fa291SDavid Brazdil isb(); 110e03fa291SDavid Brazdil 111a0e50aa3SChristoffer Dall __tlb_switch_to_host(&cxt); 112e03fa291SDavid Brazdil } 113e03fa291SDavid Brazdil 114a0e50aa3SChristoffer Dall void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) 115e03fa291SDavid Brazdil { 116e03fa291SDavid Brazdil struct tlb_inv_context cxt; 117e03fa291SDavid Brazdil 118e03fa291SDavid Brazdil dsb(ishst); 119e03fa291SDavid Brazdil 120e03fa291SDavid Brazdil /* Switch to requested VMID */ 121a0e50aa3SChristoffer Dall __tlb_switch_to_guest(mmu, &cxt); 122e03fa291SDavid Brazdil 123e03fa291SDavid Brazdil __tlbi(vmalls12e1is); 124e03fa291SDavid Brazdil dsb(ish); 125e03fa291SDavid Brazdil isb(); 126e03fa291SDavid Brazdil 127a0e50aa3SChristoffer Dall __tlb_switch_to_host(&cxt); 128e03fa291SDavid Brazdil } 129e03fa291SDavid Brazdil 130a0e50aa3SChristoffer Dall void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) 131e03fa291SDavid Brazdil { 132e03fa291SDavid Brazdil struct tlb_inv_context cxt; 133e03fa291SDavid Brazdil 134e03fa291SDavid Brazdil /* Switch to requested VMID */ 135a0e50aa3SChristoffer Dall __tlb_switch_to_guest(mmu, &cxt); 136e03fa291SDavid Brazdil 137e03fa291SDavid Brazdil __tlbi(vmalle1); 138e03fa291SDavid Brazdil dsb(nsh); 139e03fa291SDavid Brazdil isb(); 140e03fa291SDavid Brazdil 141a0e50aa3SChristoffer Dall __tlb_switch_to_host(&cxt); 142e03fa291SDavid Brazdil } 143e03fa291SDavid Brazdil 144e03fa291SDavid Brazdil void __kvm_flush_vm_context(void) 145e03fa291SDavid Brazdil { 146e03fa291SDavid Brazdil dsb(ishst); 147e03fa291SDavid Brazdil __tlbi(alle1is); 148e03fa291SDavid Brazdil 149e03fa291SDavid Brazdil /* 150e03fa291SDavid Brazdil * VIPT and PIPT caches are not affected by VMID, so no maintenance 151e03fa291SDavid Brazdil * is necessary across a VMID rollover. 152e03fa291SDavid Brazdil * 153e03fa291SDavid Brazdil * VPIPT caches constrain lookup and maintenance to the active VMID, 154e03fa291SDavid Brazdil * so we need to invalidate lines with a stale VMID to avoid an ABA 155e03fa291SDavid Brazdil * race after multiple rollovers. 156e03fa291SDavid Brazdil * 157e03fa291SDavid Brazdil */ 158e03fa291SDavid Brazdil if (icache_is_vpipt()) 159e03fa291SDavid Brazdil asm volatile("ic ialluis"); 160e03fa291SDavid Brazdil 161e03fa291SDavid Brazdil dsb(ish); 162e03fa291SDavid Brazdil } 163