1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/reset.c 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/kvm_host.h> 14 #include <linux/kvm.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/types.h> 19 20 #include <kvm/arm_arch_timer.h> 21 22 #include <asm/cpufeature.h> 23 #include <asm/cputype.h> 24 #include <asm/fpsimd.h> 25 #include <asm/ptrace.h> 26 #include <asm/kvm_arm.h> 27 #include <asm/kvm_asm.h> 28 #include <asm/kvm_emulate.h> 29 #include <asm/kvm_mmu.h> 30 #include <asm/virt.h> 31 32 /* Maximum phys_shift supported for any VM on this host */ 33 static u32 kvm_ipa_limit; 34 35 /* 36 * ARMv8 Reset Values 37 */ 38 #define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \ 39 PSR_F_BIT | PSR_D_BIT) 40 41 #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ 42 PSR_AA32_I_BIT | PSR_AA32_F_BIT) 43 44 unsigned int kvm_sve_max_vl; 45 46 int kvm_arm_init_sve(void) 47 { 48 if (system_supports_sve()) { 49 kvm_sve_max_vl = sve_max_virtualisable_vl(); 50 51 /* 52 * The get_sve_reg()/set_sve_reg() ioctl interface will need 53 * to be extended with multiple register slice support in 54 * order to support vector lengths greater than 55 * VL_ARCH_MAX: 56 */ 57 if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX)) 58 kvm_sve_max_vl = VL_ARCH_MAX; 59 60 /* 61 * Don't even try to make use of vector lengths that 62 * aren't available on all CPUs, for now: 63 */ 64 if (kvm_sve_max_vl < sve_max_vl()) 65 pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", 66 kvm_sve_max_vl); 67 } 68 69 return 0; 70 } 71 72 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) 73 { 74 if (!system_supports_sve()) 75 return -EINVAL; 76 77 vcpu->arch.sve_max_vl = kvm_sve_max_vl; 78 79 /* 80 * Userspace can still customize the vector lengths by writing 81 * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until 82 * kvm_arm_vcpu_finalize(), which freezes the configuration. 83 */ 84 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; 85 86 return 0; 87 } 88 89 /* 90 * Finalize vcpu's maximum SVE vector length, allocating 91 * vcpu->arch.sve_state as necessary. 92 */ 93 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) 94 { 95 void *buf; 96 unsigned int vl; 97 size_t reg_sz; 98 int ret; 99 100 vl = vcpu->arch.sve_max_vl; 101 102 /* 103 * Responsibility for these properties is shared between 104 * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and 105 * set_sve_vls(). Double-check here just to be sure: 106 */ 107 if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() || 108 vl > VL_ARCH_MAX)) 109 return -EIO; 110 111 reg_sz = vcpu_sve_state_size(vcpu); 112 buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT); 113 if (!buf) 114 return -ENOMEM; 115 116 ret = kvm_share_hyp(buf, buf + reg_sz); 117 if (ret) { 118 kfree(buf); 119 return ret; 120 } 121 122 vcpu->arch.sve_state = buf; 123 vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; 124 return 0; 125 } 126 127 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) 128 { 129 switch (feature) { 130 case KVM_ARM_VCPU_SVE: 131 if (!vcpu_has_sve(vcpu)) 132 return -EINVAL; 133 134 if (kvm_arm_vcpu_sve_finalized(vcpu)) 135 return -EPERM; 136 137 return kvm_vcpu_finalize_sve(vcpu); 138 } 139 140 return -EINVAL; 141 } 142 143 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) 144 { 145 if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) 146 return false; 147 148 return true; 149 } 150 151 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) 152 { 153 void *sve_state = vcpu->arch.sve_state; 154 155 kvm_vcpu_unshare_task_fp(vcpu); 156 kvm_unshare_hyp(vcpu, vcpu + 1); 157 if (sve_state) 158 kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); 159 kfree(sve_state); 160 } 161 162 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) 163 { 164 if (vcpu_has_sve(vcpu)) 165 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); 166 } 167 168 static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) 169 { 170 /* 171 * For now make sure that both address/generic pointer authentication 172 * features are requested by the userspace together and the system 173 * supports these capabilities. 174 */ 175 if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 176 !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) || 177 !system_has_full_ptr_auth()) 178 return -EINVAL; 179 180 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; 181 return 0; 182 } 183 184 static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) 185 { 186 struct kvm_vcpu *tmp; 187 bool is32bit; 188 unsigned long i; 189 190 is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); 191 if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit) 192 return false; 193 194 /* MTE is incompatible with AArch32 */ 195 if (kvm_has_mte(vcpu->kvm) && is32bit) 196 return false; 197 198 /* Check that the vcpus are either all 32bit or all 64bit */ 199 kvm_for_each_vcpu(i, tmp, vcpu->kvm) { 200 if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit) 201 return false; 202 } 203 204 return true; 205 } 206 207 /** 208 * kvm_reset_vcpu - sets core registers and sys_regs to reset value 209 * @vcpu: The VCPU pointer 210 * 211 * This function sets the registers on the virtual CPU struct to their 212 * architecturally defined reset values, except for registers whose reset is 213 * deferred until kvm_arm_vcpu_finalize(). 214 * 215 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT 216 * ioctl or as part of handling a request issued by another VCPU in the PSCI 217 * handling code. In the first case, the VCPU will not be loaded, and in the 218 * second case the VCPU will be loaded. Because this function operates purely 219 * on the memory-backed values of system registers, we want to do a full put if 220 * we were loaded (handling a request) and load the values back at the end of 221 * the function. Otherwise we leave the state alone. In both cases, we 222 * disable preemption around the vcpu reset as we would otherwise race with 223 * preempt notifiers which also call put/load. 224 */ 225 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 226 { 227 struct vcpu_reset_state reset_state; 228 int ret; 229 bool loaded; 230 u32 pstate; 231 232 mutex_lock(&vcpu->kvm->lock); 233 reset_state = vcpu->arch.reset_state; 234 WRITE_ONCE(vcpu->arch.reset_state.reset, false); 235 mutex_unlock(&vcpu->kvm->lock); 236 237 /* Reset PMU outside of the non-preemptible section */ 238 kvm_pmu_vcpu_reset(vcpu); 239 240 preempt_disable(); 241 loaded = (vcpu->cpu != -1); 242 if (loaded) 243 kvm_arch_vcpu_put(vcpu); 244 245 if (!kvm_arm_vcpu_sve_finalized(vcpu)) { 246 if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { 247 ret = kvm_vcpu_enable_sve(vcpu); 248 if (ret) 249 goto out; 250 } 251 } else { 252 kvm_vcpu_reset_sve(vcpu); 253 } 254 255 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 256 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { 257 if (kvm_vcpu_enable_ptrauth(vcpu)) { 258 ret = -EINVAL; 259 goto out; 260 } 261 } 262 263 if (!vcpu_allowed_register_width(vcpu)) { 264 ret = -EINVAL; 265 goto out; 266 } 267 268 switch (vcpu->arch.target) { 269 default: 270 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 271 pstate = VCPU_RESET_PSTATE_SVC; 272 } else { 273 pstate = VCPU_RESET_PSTATE_EL1; 274 } 275 276 if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) { 277 ret = -EINVAL; 278 goto out; 279 } 280 break; 281 } 282 283 /* Reset core registers */ 284 memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu))); 285 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); 286 vcpu->arch.ctxt.spsr_abt = 0; 287 vcpu->arch.ctxt.spsr_und = 0; 288 vcpu->arch.ctxt.spsr_irq = 0; 289 vcpu->arch.ctxt.spsr_fiq = 0; 290 vcpu_gp_regs(vcpu)->pstate = pstate; 291 292 /* Reset system registers */ 293 kvm_reset_sys_regs(vcpu); 294 295 /* 296 * Additional reset state handling that PSCI may have imposed on us. 297 * Must be done after all the sys_reg reset. 298 */ 299 if (reset_state.reset) { 300 unsigned long target_pc = reset_state.pc; 301 302 /* Gracefully handle Thumb2 entry point */ 303 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { 304 target_pc &= ~1UL; 305 vcpu_set_thumb(vcpu); 306 } 307 308 /* Propagate caller endianness */ 309 if (reset_state.be) 310 kvm_vcpu_set_be(vcpu); 311 312 *vcpu_pc(vcpu) = target_pc; 313 vcpu_set_reg(vcpu, 0, reset_state.r0); 314 } 315 316 /* Reset timer */ 317 ret = kvm_timer_vcpu_reset(vcpu); 318 out: 319 if (loaded) 320 kvm_arch_vcpu_load(vcpu, smp_processor_id()); 321 preempt_enable(); 322 return ret; 323 } 324 325 u32 get_kvm_ipa_limit(void) 326 { 327 return kvm_ipa_limit; 328 } 329 330 int kvm_set_ipa_limit(void) 331 { 332 unsigned int parange; 333 u64 mmfr0; 334 335 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 336 parange = cpuid_feature_extract_unsigned_field(mmfr0, 337 ID_AA64MMFR0_PARANGE_SHIFT); 338 /* 339 * IPA size beyond 48 bits could not be supported 340 * on either 4K or 16K page size. Hence let's cap 341 * it to 48 bits, in case it's reported as larger 342 * on the system. 343 */ 344 if (PAGE_SIZE != SZ_64K) 345 parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48); 346 347 /* 348 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at 349 * Stage-2. If not, things will stop very quickly. 350 */ 351 switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) { 352 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: 353 kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); 354 return -EINVAL; 355 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: 356 kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); 357 break; 358 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: 359 kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); 360 break; 361 default: 362 kvm_err("Unsupported value for TGRAN_2, giving up\n"); 363 return -EINVAL; 364 } 365 366 kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange); 367 kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit, 368 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ? 369 " (Reduced IPA size, limited VM/VMM compatibility)" : "")); 370 371 return 0; 372 } 373 374 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) 375 { 376 u64 mmfr0, mmfr1; 377 u32 phys_shift; 378 379 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) 380 return -EINVAL; 381 382 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); 383 if (phys_shift) { 384 if (phys_shift > kvm_ipa_limit || 385 phys_shift < ARM64_MIN_PARANGE_BITS) 386 return -EINVAL; 387 } else { 388 phys_shift = KVM_PHYS_SHIFT; 389 if (phys_shift > kvm_ipa_limit) { 390 pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", 391 current->comm); 392 return -EINVAL; 393 } 394 } 395 396 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 397 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 398 kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); 399 400 return 0; 401 } 402