1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 * 6 * Derived from arch/arm/kvm/reset.c 7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/kvm_host.h> 14 #include <linux/kvm.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/types.h> 19 20 #include <kvm/arm_arch_timer.h> 21 22 #include <asm/cpufeature.h> 23 #include <asm/cputype.h> 24 #include <asm/fpsimd.h> 25 #include <asm/ptrace.h> 26 #include <asm/kvm_arm.h> 27 #include <asm/kvm_asm.h> 28 #include <asm/kvm_coproc.h> 29 #include <asm/kvm_emulate.h> 30 #include <asm/kvm_mmu.h> 31 #include <asm/virt.h> 32 33 /* Maximum phys_shift supported for any VM on this host */ 34 static u32 kvm_ipa_limit; 35 36 /* 37 * ARMv8 Reset Values 38 */ 39 #define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \ 40 PSR_F_BIT | PSR_D_BIT) 41 42 #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ 43 PSR_AA32_I_BIT | PSR_AA32_F_BIT) 44 45 /** 46 * kvm_arch_vm_ioctl_check_extension 47 * 48 * We currently assume that the number of HW registers is uniform 49 * across all CPUs (see cpuinfo_sanity_check). 50 */ 51 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) 52 { 53 int r; 54 55 switch (ext) { 56 case KVM_CAP_ARM_EL1_32BIT: 57 r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); 58 break; 59 case KVM_CAP_GUEST_DEBUG_HW_BPS: 60 r = get_num_brps(); 61 break; 62 case KVM_CAP_GUEST_DEBUG_HW_WPS: 63 r = get_num_wrps(); 64 break; 65 case KVM_CAP_ARM_PMU_V3: 66 r = kvm_arm_support_pmu_v3(); 67 break; 68 case KVM_CAP_ARM_INJECT_SERROR_ESR: 69 r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 70 break; 71 case KVM_CAP_SET_GUEST_DEBUG: 72 case KVM_CAP_VCPU_ATTRIBUTES: 73 r = 1; 74 break; 75 case KVM_CAP_ARM_VM_IPA_SIZE: 76 r = kvm_ipa_limit; 77 break; 78 case KVM_CAP_ARM_SVE: 79 r = system_supports_sve(); 80 break; 81 case KVM_CAP_ARM_PTRAUTH_ADDRESS: 82 case KVM_CAP_ARM_PTRAUTH_GENERIC: 83 r = has_vhe() && system_supports_address_auth() && 84 system_supports_generic_auth(); 85 break; 86 default: 87 r = 0; 88 } 89 90 return r; 91 } 92 93 unsigned int kvm_sve_max_vl; 94 95 int kvm_arm_init_sve(void) 96 { 97 if (system_supports_sve()) { 98 kvm_sve_max_vl = sve_max_virtualisable_vl; 99 100 /* 101 * The get_sve_reg()/set_sve_reg() ioctl interface will need 102 * to be extended with multiple register slice support in 103 * order to support vector lengths greater than 104 * SVE_VL_ARCH_MAX: 105 */ 106 if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) 107 kvm_sve_max_vl = SVE_VL_ARCH_MAX; 108 109 /* 110 * Don't even try to make use of vector lengths that 111 * aren't available on all CPUs, for now: 112 */ 113 if (kvm_sve_max_vl < sve_max_vl) 114 pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", 115 kvm_sve_max_vl); 116 } 117 118 return 0; 119 } 120 121 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) 122 { 123 if (!system_supports_sve()) 124 return -EINVAL; 125 126 /* Verify that KVM startup enforced this when SVE was detected: */ 127 if (WARN_ON(!has_vhe())) 128 return -EINVAL; 129 130 vcpu->arch.sve_max_vl = kvm_sve_max_vl; 131 132 /* 133 * Userspace can still customize the vector lengths by writing 134 * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until 135 * kvm_arm_vcpu_finalize(), which freezes the configuration. 136 */ 137 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; 138 139 return 0; 140 } 141 142 /* 143 * Finalize vcpu's maximum SVE vector length, allocating 144 * vcpu->arch.sve_state as necessary. 145 */ 146 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) 147 { 148 void *buf; 149 unsigned int vl; 150 151 vl = vcpu->arch.sve_max_vl; 152 153 /* 154 * Responsibility for these properties is shared between 155 * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and 156 * set_sve_vls(). Double-check here just to be sure: 157 */ 158 if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || 159 vl > SVE_VL_ARCH_MAX)) 160 return -EIO; 161 162 buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); 163 if (!buf) 164 return -ENOMEM; 165 166 vcpu->arch.sve_state = buf; 167 vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; 168 return 0; 169 } 170 171 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) 172 { 173 switch (feature) { 174 case KVM_ARM_VCPU_SVE: 175 if (!vcpu_has_sve(vcpu)) 176 return -EINVAL; 177 178 if (kvm_arm_vcpu_sve_finalized(vcpu)) 179 return -EPERM; 180 181 return kvm_vcpu_finalize_sve(vcpu); 182 } 183 184 return -EINVAL; 185 } 186 187 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) 188 { 189 if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) 190 return false; 191 192 return true; 193 } 194 195 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) 196 { 197 kfree(vcpu->arch.sve_state); 198 } 199 200 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) 201 { 202 if (vcpu_has_sve(vcpu)) 203 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); 204 } 205 206 static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) 207 { 208 /* Support ptrauth only if the system supports these capabilities. */ 209 if (!has_vhe()) 210 return -EINVAL; 211 212 if (!system_supports_address_auth() || 213 !system_supports_generic_auth()) 214 return -EINVAL; 215 /* 216 * For now make sure that both address/generic pointer authentication 217 * features are requested by the userspace together. 218 */ 219 if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 220 !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) 221 return -EINVAL; 222 223 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; 224 return 0; 225 } 226 227 /** 228 * kvm_reset_vcpu - sets core registers and sys_regs to reset value 229 * @vcpu: The VCPU pointer 230 * 231 * This function finds the right table above and sets the registers on 232 * the virtual CPU struct to their architecturally defined reset 233 * values, except for registers whose reset is deferred until 234 * kvm_arm_vcpu_finalize(). 235 * 236 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT 237 * ioctl or as part of handling a request issued by another VCPU in the PSCI 238 * handling code. In the first case, the VCPU will not be loaded, and in the 239 * second case the VCPU will be loaded. Because this function operates purely 240 * on the memory-backed values of system registers, we want to do a full put if 241 * we were loaded (handling a request) and load the values back at the end of 242 * the function. Otherwise we leave the state alone. In both cases, we 243 * disable preemption around the vcpu reset as we would otherwise race with 244 * preempt notifiers which also call put/load. 245 */ 246 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 247 { 248 int ret; 249 bool loaded; 250 u32 pstate; 251 252 /* Reset PMU outside of the non-preemptible section */ 253 kvm_pmu_vcpu_reset(vcpu); 254 255 preempt_disable(); 256 loaded = (vcpu->cpu != -1); 257 if (loaded) 258 kvm_arch_vcpu_put(vcpu); 259 260 if (!kvm_arm_vcpu_sve_finalized(vcpu)) { 261 if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { 262 ret = kvm_vcpu_enable_sve(vcpu); 263 if (ret) 264 goto out; 265 } 266 } else { 267 kvm_vcpu_reset_sve(vcpu); 268 } 269 270 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 271 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { 272 if (kvm_vcpu_enable_ptrauth(vcpu)) { 273 ret = -EINVAL; 274 goto out; 275 } 276 } 277 278 switch (vcpu->arch.target) { 279 default: 280 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 281 if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) { 282 ret = -EINVAL; 283 goto out; 284 } 285 pstate = VCPU_RESET_PSTATE_SVC; 286 } else { 287 pstate = VCPU_RESET_PSTATE_EL1; 288 } 289 290 break; 291 } 292 293 /* Reset core registers */ 294 memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu))); 295 vcpu_gp_regs(vcpu)->regs.pstate = pstate; 296 297 /* Reset system registers */ 298 kvm_reset_sys_regs(vcpu); 299 300 /* 301 * Additional reset state handling that PSCI may have imposed on us. 302 * Must be done after all the sys_reg reset. 303 */ 304 if (vcpu->arch.reset_state.reset) { 305 unsigned long target_pc = vcpu->arch.reset_state.pc; 306 307 /* Gracefully handle Thumb2 entry point */ 308 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { 309 target_pc &= ~1UL; 310 vcpu_set_thumb(vcpu); 311 } 312 313 /* Propagate caller endianness */ 314 if (vcpu->arch.reset_state.be) 315 kvm_vcpu_set_be(vcpu); 316 317 *vcpu_pc(vcpu) = target_pc; 318 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); 319 320 vcpu->arch.reset_state.reset = false; 321 } 322 323 /* Default workaround setup is enabled (if supported) */ 324 if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) 325 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 326 327 /* Reset timer */ 328 ret = kvm_timer_vcpu_reset(vcpu); 329 out: 330 if (loaded) 331 kvm_arch_vcpu_load(vcpu, smp_processor_id()); 332 preempt_enable(); 333 return ret; 334 } 335 336 u32 get_kvm_ipa_limit(void) 337 { 338 return kvm_ipa_limit; 339 } 340 341 int kvm_set_ipa_limit(void) 342 { 343 unsigned int ipa_max, pa_max, va_max, parange, tgran_2; 344 u64 mmfr0; 345 346 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 347 parange = cpuid_feature_extract_unsigned_field(mmfr0, 348 ID_AA64MMFR0_PARANGE_SHIFT); 349 350 /* 351 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at 352 * Stage-2. If not, things will stop very quickly. 353 */ 354 switch (PAGE_SIZE) { 355 default: 356 case SZ_4K: 357 tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT; 358 break; 359 case SZ_16K: 360 tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT; 361 break; 362 case SZ_64K: 363 tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT; 364 break; 365 } 366 367 switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) { 368 default: 369 case 1: 370 kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); 371 return -EINVAL; 372 case 0: 373 kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); 374 break; 375 case 2: 376 kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); 377 break; 378 } 379 380 pa_max = id_aa64mmfr0_parange_to_phys_shift(parange); 381 382 /* Clamp the IPA limit to the PA size supported by the kernel */ 383 ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max; 384 /* 385 * Since our stage2 table is dependent on the stage1 page table code, 386 * we must always honor the following condition: 387 * 388 * Number of levels in Stage1 >= Number of levels in Stage2. 389 * 390 * So clamp the ipa limit further down to limit the number of levels. 391 * Since we can concatenate upto 16 tables at entry level, we could 392 * go upto 4bits above the maximum VA addressable with the current 393 * number of levels. 394 */ 395 va_max = PGDIR_SHIFT + PAGE_SHIFT - 3; 396 va_max += 4; 397 398 if (va_max < ipa_max) 399 ipa_max = va_max; 400 401 /* 402 * If the final limit is lower than the real physical address 403 * limit of the CPUs, report the reason. 404 */ 405 if (ipa_max < pa_max) 406 pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", 407 (va_max < pa_max) ? "Virtual" : "Physical"); 408 409 WARN(ipa_max < KVM_PHYS_SHIFT, 410 "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); 411 kvm_ipa_limit = ipa_max; 412 kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); 413 414 return 0; 415 } 416 417 /* 418 * Configure the VTCR_EL2 for this VM. The VTCR value is common 419 * across all the physical CPUs on the system. We use system wide 420 * sanitised values to fill in different fields, except for Hardware 421 * Management of Access Flags. HA Flag is set unconditionally on 422 * all CPUs, as it is safe to run with or without the feature and 423 * the bit is RES0 on CPUs that don't support it. 424 */ 425 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) 426 { 427 u64 vtcr = VTCR_EL2_FLAGS, mmfr0; 428 u32 parange, phys_shift; 429 u8 lvls; 430 431 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) 432 return -EINVAL; 433 434 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); 435 if (phys_shift) { 436 if (phys_shift > kvm_ipa_limit || 437 phys_shift < 32) 438 return -EINVAL; 439 } else { 440 phys_shift = KVM_PHYS_SHIFT; 441 } 442 443 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); 444 parange = cpuid_feature_extract_unsigned_field(mmfr0, 445 ID_AA64MMFR0_PARANGE_SHIFT); 446 if (parange > ID_AA64MMFR0_PARANGE_MAX) 447 parange = ID_AA64MMFR0_PARANGE_MAX; 448 vtcr |= parange << VTCR_EL2_PS_SHIFT; 449 450 vtcr |= VTCR_EL2_T0SZ(phys_shift); 451 /* 452 * Use a minimum 2 level page table to prevent splitting 453 * host PMD huge pages at stage2. 454 */ 455 lvls = stage2_pgtable_levels(phys_shift); 456 if (lvls < 2) 457 lvls = 2; 458 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); 459 460 /* 461 * Enable the Hardware Access Flag management, unconditionally 462 * on all CPUs. The features is RES0 on CPUs without the support 463 * and must be ignored by the CPUs. 464 */ 465 vtcr |= VTCR_EL2_HA; 466 467 /* Set the vmid bits */ 468 vtcr |= (kvm_get_vmid_bits() == 16) ? 469 VTCR_EL2_VS_16BIT : 470 VTCR_EL2_VS_8BIT; 471 kvm->arch.vtcr = vtcr; 472 return 0; 473 } 474