1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #include <asm/kvm_hyp.h> 8 #include <asm/kvm_mmu.h> 9 #include <asm/tlbflush.h> 10 11 #include <nvhe/mem_protect.h> 12 13 struct tlb_inv_context { 14 u64 tcr; 15 }; 16 17 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, 18 struct tlb_inv_context *cxt, 19 bool nsh) 20 { 21 /* 22 * We have two requirements: 23 * 24 * - ensure that the page table updates are visible to all 25 * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN 26 * being either ish or nsh, depending on the invalidation 27 * type. 28 * 29 * - complete any speculative page table walk started before 30 * we trapped to EL2 so that we can mess with the MM 31 * registers out of context, for which dsb(nsh) is enough 32 * 33 * The composition of these two barriers is a dsb(DOMAIN), and 34 * the 'nsh' parameter tracks the distinction between 35 * Inner-Shareable and Non-Shareable, as specified by the 36 * callers. 37 */ 38 if (nsh) 39 dsb(nsh); 40 else 41 dsb(ish); 42 43 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 44 u64 val; 45 46 /* 47 * For CPUs that are affected by ARM 1319367, we need to 48 * avoid a host Stage-1 walk while we have the guest's 49 * VMID set in the VTTBR in order to invalidate TLBs. 50 * We're guaranteed that the S1 MMU is enabled, so we can 51 * simply set the EPD bits to avoid any further TLB fill. 52 */ 53 val = cxt->tcr = read_sysreg_el1(SYS_TCR); 54 val |= TCR_EPD1_MASK | TCR_EPD0_MASK; 55 write_sysreg_el1(val, SYS_TCR); 56 isb(); 57 } 58 59 /* 60 * __load_stage2() includes an ISB only when the AT 61 * workaround is applied. Take care of the opposite condition, 62 * ensuring that we always have an ISB, but not two ISBs back 63 * to back. 64 */ 65 __load_stage2(mmu, kern_hyp_va(mmu->arch)); 66 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); 67 } 68 69 static void __tlb_switch_to_host(struct tlb_inv_context *cxt) 70 { 71 __load_host_stage2(); 72 73 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 74 /* Ensure write of the host VMID */ 75 isb(); 76 /* Restore the host's TCR_EL1 */ 77 write_sysreg_el1(cxt->tcr, SYS_TCR); 78 } 79 } 80 81 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, 82 phys_addr_t ipa, int level) 83 { 84 struct tlb_inv_context cxt; 85 86 /* Switch to requested VMID */ 87 __tlb_switch_to_guest(mmu, &cxt, false); 88 89 /* 90 * We could do so much better if we had the VA as well. 91 * Instead, we invalidate Stage-2 for this IPA, and the 92 * whole of Stage-1. Weep... 93 */ 94 ipa >>= 12; 95 __tlbi_level(ipas2e1is, ipa, level); 96 97 /* 98 * We have to ensure completion of the invalidation at Stage-2, 99 * since a table walk on another CPU could refill a TLB with a 100 * complete (S1 + S2) walk based on the old Stage-2 mapping if 101 * the Stage-1 invalidation happened first. 102 */ 103 dsb(ish); 104 __tlbi(vmalle1is); 105 dsb(ish); 106 isb(); 107 108 /* 109 * If the host is running at EL1 and we have a VPIPT I-cache, 110 * then we must perform I-cache maintenance at EL2 in order for 111 * it to have an effect on the guest. Since the guest cannot hit 112 * I-cache lines allocated with a different VMID, we don't need 113 * to worry about junk out of guest reset (we nuke the I-cache on 114 * VMID rollover), but we do need to be careful when remapping 115 * executable pages for the same guest. This can happen when KSM 116 * takes a CoW fault on an executable page, copies the page into 117 * a page that was previously mapped in the guest and then needs 118 * to invalidate the guest view of the I-cache for that page 119 * from EL1. To solve this, we invalidate the entire I-cache when 120 * unmapping a page from a guest if we have a VPIPT I-cache but 121 * the host is running at EL1. As above, we could do better if 122 * we had the VA. 123 * 124 * The moral of this story is: if you have a VPIPT I-cache, then 125 * you should be running with VHE enabled. 126 */ 127 if (icache_is_vpipt()) 128 icache_inval_all_pou(); 129 130 __tlb_switch_to_host(&cxt); 131 } 132 133 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) 134 { 135 struct tlb_inv_context cxt; 136 137 /* Switch to requested VMID */ 138 __tlb_switch_to_guest(mmu, &cxt, false); 139 140 __tlbi(vmalls12e1is); 141 dsb(ish); 142 isb(); 143 144 __tlb_switch_to_host(&cxt); 145 } 146 147 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) 148 { 149 struct tlb_inv_context cxt; 150 151 /* Switch to requested VMID */ 152 __tlb_switch_to_guest(mmu, &cxt, false); 153 154 __tlbi(vmalle1); 155 asm volatile("ic iallu"); 156 dsb(nsh); 157 isb(); 158 159 __tlb_switch_to_host(&cxt); 160 } 161 162 void __kvm_flush_vm_context(void) 163 { 164 /* Same remark as in __tlb_switch_to_guest() */ 165 dsb(ish); 166 __tlbi(alle1is); 167 168 /* 169 * VIPT and PIPT caches are not affected by VMID, so no maintenance 170 * is necessary across a VMID rollover. 171 * 172 * VPIPT caches constrain lookup and maintenance to the active VMID, 173 * so we need to invalidate lines with a stale VMID to avoid an ABA 174 * race after multiple rollovers. 175 * 176 */ 177 if (icache_is_vpipt()) 178 asm volatile("ic ialluis"); 179 180 dsb(ish); 181 } 182