1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/reset.c 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/errno.h> 23 #include <linux/kvm_host.h> 24 #include <linux/kvm.h> 25 #include <linux/hw_breakpoint.h> 26 27 #include <kvm/arm_arch_timer.h> 28 29 #include <asm/cpufeature.h> 30 #include <asm/cputype.h> 31 #include <asm/ptrace.h> 32 #include <asm/kvm_arm.h> 33 #include <asm/kvm_asm.h> 34 #include <asm/kvm_coproc.h> 35 #include <asm/kvm_emulate.h> 36 #include <asm/kvm_mmu.h> 37 38 /* Maximum phys_shift supported for any VM on this host */ 39 static u32 kvm_ipa_limit; 40 41 /* 42 * ARMv8 Reset Values 43 */ 44 static const struct kvm_regs default_regs_reset = { 45 .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | 46 PSR_F_BIT | PSR_D_BIT), 47 }; 48 49 static const struct kvm_regs default_regs_reset32 = { 50 .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | 51 PSR_AA32_I_BIT | PSR_AA32_F_BIT), 52 }; 53 54 static bool cpu_has_32bit_el1(void) 55 { 56 u64 pfr0; 57 58 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 59 return !!(pfr0 & 0x20); 60 } 61 62 /** 63 * kvm_arch_vm_ioctl_check_extension 64 * 65 * We currently assume that the number of HW registers is uniform 66 * across all CPUs (see cpuinfo_sanity_check). 67 */ 68 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) 69 { 70 int r; 71 72 switch (ext) { 73 case KVM_CAP_ARM_EL1_32BIT: 74 r = cpu_has_32bit_el1(); 75 break; 76 case KVM_CAP_GUEST_DEBUG_HW_BPS: 77 r = get_num_brps(); 78 break; 79 case KVM_CAP_GUEST_DEBUG_HW_WPS: 80 r = get_num_wrps(); 81 break; 82 case KVM_CAP_ARM_PMU_V3: 83 r = kvm_arm_support_pmu_v3(); 84 break; 85 case KVM_CAP_ARM_INJECT_SERROR_ESR: 86 r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 87 break; 88 case KVM_CAP_SET_GUEST_DEBUG: 89 case KVM_CAP_VCPU_ATTRIBUTES: 90 r = 1; 91 break; 92 case KVM_CAP_ARM_VM_IPA_SIZE: 93 r = kvm_ipa_limit; 94 break; 95 default: 96 r = 0; 97 } 98 99 return r; 100 } 101 102 /** 103 * kvm_reset_vcpu - sets core registers and sys_regs to reset value 104 * @vcpu: The VCPU pointer 105 * 106 * This function finds the right table above and sets the registers on 107 * the virtual CPU struct to their architecturally defined reset 108 * values. 109 * 110 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT 111 * ioctl or as part of handling a request issued by another VCPU in the PSCI 112 * handling code. In the first case, the VCPU will not be loaded, and in the 113 * second case the VCPU will be loaded. Because this function operates purely 114 * on the memory-backed valus of system registers, we want to do a full put if 115 * we were loaded (handling a request) and load the values back at the end of 116 * the function. Otherwise we leave the state alone. In both cases, we 117 * disable preemption around the vcpu reset as we would otherwise race with 118 * preempt notifiers which also call put/load. 119 */ 120 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 121 { 122 const struct kvm_regs *cpu_reset; 123 int ret = -EINVAL; 124 bool loaded; 125 126 /* Reset PMU outside of the non-preemptible section */ 127 kvm_pmu_vcpu_reset(vcpu); 128 129 preempt_disable(); 130 loaded = (vcpu->cpu != -1); 131 if (loaded) 132 kvm_arch_vcpu_put(vcpu); 133 134 switch (vcpu->arch.target) { 135 default: 136 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 137 if (!cpu_has_32bit_el1()) 138 goto out; 139 cpu_reset = &default_regs_reset32; 140 } else { 141 cpu_reset = &default_regs_reset; 142 } 143 144 break; 145 } 146 147 /* Reset core registers */ 148 memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); 149 150 /* Reset system registers */ 151 kvm_reset_sys_regs(vcpu); 152 153 /* 154 * Additional reset state handling that PSCI may have imposed on us. 155 * Must be done after all the sys_reg reset. 156 */ 157 if (vcpu->arch.reset_state.reset) { 158 unsigned long target_pc = vcpu->arch.reset_state.pc; 159 160 /* Gracefully handle Thumb2 entry point */ 161 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { 162 target_pc &= ~1UL; 163 vcpu_set_thumb(vcpu); 164 } 165 166 /* Propagate caller endianness */ 167 if (vcpu->arch.reset_state.be) 168 kvm_vcpu_set_be(vcpu); 169 170 *vcpu_pc(vcpu) = target_pc; 171 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); 172 173 vcpu->arch.reset_state.reset = false; 174 } 175 176 /* Default workaround setup is enabled (if supported) */ 177 if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) 178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 179 180 /* Reset timer */ 181 ret = kvm_timer_vcpu_reset(vcpu); 182 out: 183 if (loaded) 184 kvm_arch_vcpu_load(vcpu, smp_processor_id()); 185 preempt_enable(); 186 return ret; 187 } 188 189 void kvm_set_ipa_limit(void) 190 { 191 unsigned int ipa_max, pa_max, va_max, parange; 192 193 parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7; 194 pa_max = id_aa64mmfr0_parange_to_phys_shift(parange); 195 196 /* Clamp the IPA limit to the PA size supported by the kernel */ 197 ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max; 198 /* 199 * Since our stage2 table is dependent on the stage1 page table code, 200 * we must always honor the following condition: 201 * 202 * Number of levels in Stage1 >= Number of levels in Stage2. 203 * 204 * So clamp the ipa limit further down to limit the number of levels. 205 * Since we can concatenate upto 16 tables at entry level, we could 206 * go upto 4bits above the maximum VA addressible with the current 207 * number of levels. 208 */ 209 va_max = PGDIR_SHIFT + PAGE_SHIFT - 3; 210 va_max += 4; 211 212 if (va_max < ipa_max) 213 ipa_max = va_max; 214 215 /* 216 * If the final limit is lower than the real physical address 217 * limit of the CPUs, report the reason. 218 */ 219 if (ipa_max < pa_max) 220 pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", 221 (va_max < pa_max) ? "Virtual" : "Physical"); 222 223 WARN(ipa_max < KVM_PHYS_SHIFT, 224 "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); 225 kvm_ipa_limit = ipa_max; 226 kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); 227 } 228 229 /* 230 * Configure the VTCR_EL2 for this VM. The VTCR value is common 231 * across all the physical CPUs on the system. We use system wide 232 * sanitised values to fill in different fields, except for Hardware 233 * Management of Access Flags. HA Flag is set unconditionally on 234 * all CPUs, as it is safe to run with or without the feature and 235 * the bit is RES0 on CPUs that don't support it. 236 */ 237 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) 238 { 239 u64 vtcr = VTCR_EL2_FLAGS; 240 u32 parange, phys_shift; 241 u8 lvls; 242 243 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) 244 return -EINVAL; 245 246 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); 247 if (phys_shift) { 248 if (phys_shift > kvm_ipa_limit || 249 phys_shift < 32) 250 return -EINVAL; 251 } else { 252 phys_shift = KVM_PHYS_SHIFT; 253 } 254 255 parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; 256 if (parange > ID_AA64MMFR0_PARANGE_MAX) 257 parange = ID_AA64MMFR0_PARANGE_MAX; 258 vtcr |= parange << VTCR_EL2_PS_SHIFT; 259 260 vtcr |= VTCR_EL2_T0SZ(phys_shift); 261 /* 262 * Use a minimum 2 level page table to prevent splitting 263 * host PMD huge pages at stage2. 264 */ 265 lvls = stage2_pgtable_levels(phys_shift); 266 if (lvls < 2) 267 lvls = 2; 268 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); 269 270 /* 271 * Enable the Hardware Access Flag management, unconditionally 272 * on all CPUs. The features is RES0 on CPUs without the support 273 * and must be ignored by the CPUs. 274 */ 275 vtcr |= VTCR_EL2_HA; 276 277 /* Set the vmid bits */ 278 vtcr |= (kvm_get_vmid_bits() == 16) ? 279 VTCR_EL2_VS_16BIT : 280 VTCR_EL2_VS_8BIT; 281 kvm->arch.vtcr = vtcr; 282 return 0; 283 } 284