1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2f4672752SMarc Zyngier /* 3f4672752SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 4f4672752SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 5f4672752SMarc Zyngier * 6f4672752SMarc Zyngier * Derived from arch/arm/kvm/reset.c 7f4672752SMarc Zyngier * Copyright (C) 2012 - Virtual Open Systems and Columbia University 8f4672752SMarc Zyngier * Author: Christoffer Dall <c.dall@virtualopensystems.com> 9f4672752SMarc Zyngier */ 10f4672752SMarc Zyngier 11f4672752SMarc Zyngier #include <linux/errno.h> 129a3cdf26SDave Martin #include <linux/kernel.h> 13f4672752SMarc Zyngier #include <linux/kvm_host.h> 14f4672752SMarc Zyngier #include <linux/kvm.h> 15834bf887SAlex Bennée #include <linux/hw_breakpoint.h> 169033bba4SDave Martin #include <linux/slab.h> 179a3cdf26SDave Martin #include <linux/string.h> 189033bba4SDave Martin #include <linux/types.h> 19f4672752SMarc Zyngier 20003300deSMarc Zyngier #include <kvm/arm_arch_timer.h> 21003300deSMarc Zyngier 227665f3a8SSuzuki K Poulose #include <asm/cpufeature.h> 23f4672752SMarc Zyngier #include <asm/cputype.h> 249033bba4SDave Martin #include <asm/fpsimd.h> 25f4672752SMarc Zyngier #include <asm/ptrace.h> 26f4672752SMarc Zyngier #include <asm/kvm_arm.h> 2767f69197SAKASHI Takahiro #include <asm/kvm_asm.h> 28f4672752SMarc Zyngier #include <asm/kvm_coproc.h> 29358b28f0SMarc Zyngier #include <asm/kvm_emulate.h> 3067f69197SAKASHI Takahiro #include <asm/kvm_mmu.h> 319a3cdf26SDave Martin #include <asm/virt.h> 32f4672752SMarc Zyngier 330f62f0e9SSuzuki K Poulose /* Maximum phys_shift supported for any VM on this host */ 340f62f0e9SSuzuki K Poulose static u32 kvm_ipa_limit; 350f62f0e9SSuzuki K Poulose 36f4672752SMarc Zyngier /* 37f4672752SMarc Zyngier * ARMv8 Reset Values 38f4672752SMarc Zyngier */ 39f4672752SMarc Zyngier static const struct kvm_regs default_regs_reset = { 40f4672752SMarc Zyngier .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | 41f4672752SMarc Zyngier PSR_F_BIT | PSR_D_BIT), 42f4672752SMarc Zyngier }; 43f4672752SMarc Zyngier 440d854a60SMarc Zyngier static const struct kvm_regs default_regs_reset32 = { 45256c0960SMark Rutland .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | 46256c0960SMark Rutland PSR_AA32_I_BIT | PSR_AA32_F_BIT), 470d854a60SMarc Zyngier }; 480d854a60SMarc Zyngier 490d854a60SMarc Zyngier static bool cpu_has_32bit_el1(void) 500d854a60SMarc Zyngier { 510d854a60SMarc Zyngier u64 pfr0; 520d854a60SMarc Zyngier 5346823dd1SDave Martin pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 540d854a60SMarc Zyngier return !!(pfr0 & 0x20); 550d854a60SMarc Zyngier } 560d854a60SMarc Zyngier 57834bf887SAlex Bennée /** 58375bdd3bSDongjiu Geng * kvm_arch_vm_ioctl_check_extension 59834bf887SAlex Bennée * 60834bf887SAlex Bennée * We currently assume that the number of HW registers is uniform 61834bf887SAlex Bennée * across all CPUs (see cpuinfo_sanity_check). 62834bf887SAlex Bennée */ 63375bdd3bSDongjiu Geng int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) 64f4672752SMarc Zyngier { 65f4672752SMarc Zyngier int r; 66f4672752SMarc Zyngier 67f4672752SMarc Zyngier switch (ext) { 680d854a60SMarc Zyngier case KVM_CAP_ARM_EL1_32BIT: 690d854a60SMarc Zyngier r = cpu_has_32bit_el1(); 700d854a60SMarc Zyngier break; 71834bf887SAlex Bennée case KVM_CAP_GUEST_DEBUG_HW_BPS: 72834bf887SAlex Bennée r = get_num_brps(); 73834bf887SAlex Bennée break; 74834bf887SAlex Bennée case KVM_CAP_GUEST_DEBUG_HW_WPS: 75834bf887SAlex Bennée r = get_num_wrps(); 76834bf887SAlex Bennée break; 77808e7381SShannon Zhao case KVM_CAP_ARM_PMU_V3: 78808e7381SShannon Zhao r = kvm_arm_support_pmu_v3(); 79808e7381SShannon Zhao break; 80be26b3a7SDongjiu Geng case KVM_CAP_ARM_INJECT_SERROR_ESR: 81be26b3a7SDongjiu Geng r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 82be26b3a7SDongjiu Geng break; 83834bf887SAlex Bennée case KVM_CAP_SET_GUEST_DEBUG: 84f577f6c2SShannon Zhao case KVM_CAP_VCPU_ATTRIBUTES: 85834bf887SAlex Bennée r = 1; 86834bf887SAlex Bennée break; 87233a7cb2SSuzuki K Poulose case KVM_CAP_ARM_VM_IPA_SIZE: 88233a7cb2SSuzuki K Poulose r = kvm_ipa_limit; 89233a7cb2SSuzuki K Poulose break; 90555f3d03SDave Martin case KVM_CAP_ARM_SVE: 91555f3d03SDave Martin r = system_supports_sve(); 92555f3d03SDave Martin break; 93a243c16dSAmit Daniel Kachhap case KVM_CAP_ARM_PTRAUTH_ADDRESS: 94a243c16dSAmit Daniel Kachhap case KVM_CAP_ARM_PTRAUTH_GENERIC: 95a243c16dSAmit Daniel Kachhap r = has_vhe() && system_supports_address_auth() && 96a243c16dSAmit Daniel Kachhap system_supports_generic_auth(); 97a243c16dSAmit Daniel Kachhap break; 98f4672752SMarc Zyngier default: 99f4672752SMarc Zyngier r = 0; 100f4672752SMarc Zyngier } 101f4672752SMarc Zyngier 102f4672752SMarc Zyngier return r; 103f4672752SMarc Zyngier } 104f4672752SMarc Zyngier 1059033bba4SDave Martin unsigned int kvm_sve_max_vl; 1069033bba4SDave Martin 107a3be836dSDave Martin int kvm_arm_init_sve(void) 1089033bba4SDave Martin { 1099033bba4SDave Martin if (system_supports_sve()) { 1109033bba4SDave Martin kvm_sve_max_vl = sve_max_virtualisable_vl; 1119033bba4SDave Martin 1129033bba4SDave Martin /* 1139033bba4SDave Martin * The get_sve_reg()/set_sve_reg() ioctl interface will need 1149033bba4SDave Martin * to be extended with multiple register slice support in 1159033bba4SDave Martin * order to support vector lengths greater than 1169033bba4SDave Martin * SVE_VL_ARCH_MAX: 1179033bba4SDave Martin */ 1189033bba4SDave Martin if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) 1199033bba4SDave Martin kvm_sve_max_vl = SVE_VL_ARCH_MAX; 1209033bba4SDave Martin 1219033bba4SDave Martin /* 1229033bba4SDave Martin * Don't even try to make use of vector lengths that 1239033bba4SDave Martin * aren't available on all CPUs, for now: 1249033bba4SDave Martin */ 1259033bba4SDave Martin if (kvm_sve_max_vl < sve_max_vl) 1269033bba4SDave Martin pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", 1279033bba4SDave Martin kvm_sve_max_vl); 1289033bba4SDave Martin } 1299033bba4SDave Martin 1309033bba4SDave Martin return 0; 1319033bba4SDave Martin } 1329033bba4SDave Martin 1339a3cdf26SDave Martin static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) 1349a3cdf26SDave Martin { 1359a3cdf26SDave Martin if (!system_supports_sve()) 1369a3cdf26SDave Martin return -EINVAL; 1379a3cdf26SDave Martin 1389a3cdf26SDave Martin /* Verify that KVM startup enforced this when SVE was detected: */ 1399a3cdf26SDave Martin if (WARN_ON(!has_vhe())) 1409a3cdf26SDave Martin return -EINVAL; 1419a3cdf26SDave Martin 1429a3cdf26SDave Martin vcpu->arch.sve_max_vl = kvm_sve_max_vl; 1439a3cdf26SDave Martin 1449a3cdf26SDave Martin /* 1459a3cdf26SDave Martin * Userspace can still customize the vector lengths by writing 1469a3cdf26SDave Martin * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until 1479a3cdf26SDave Martin * kvm_arm_vcpu_finalize(), which freezes the configuration. 1489a3cdf26SDave Martin */ 1499a3cdf26SDave Martin vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; 1509a3cdf26SDave Martin 1519a3cdf26SDave Martin return 0; 1529a3cdf26SDave Martin } 1539a3cdf26SDave Martin 1549033bba4SDave Martin /* 1559033bba4SDave Martin * Finalize vcpu's maximum SVE vector length, allocating 1569033bba4SDave Martin * vcpu->arch.sve_state as necessary. 1579033bba4SDave Martin */ 1589033bba4SDave Martin static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) 1599033bba4SDave Martin { 1609033bba4SDave Martin void *buf; 1619033bba4SDave Martin unsigned int vl; 1629033bba4SDave Martin 1639033bba4SDave Martin vl = vcpu->arch.sve_max_vl; 1649033bba4SDave Martin 1659033bba4SDave Martin /* 166656012c7SFuad Tabba * Responsibility for these properties is shared between 1679033bba4SDave Martin * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and 1689033bba4SDave Martin * set_sve_vls(). Double-check here just to be sure: 1699033bba4SDave Martin */ 1709033bba4SDave Martin if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || 1719033bba4SDave Martin vl > SVE_VL_ARCH_MAX)) 1729033bba4SDave Martin return -EIO; 1739033bba4SDave Martin 1749033bba4SDave Martin buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); 1759033bba4SDave Martin if (!buf) 1769033bba4SDave Martin return -ENOMEM; 1779033bba4SDave Martin 1789033bba4SDave Martin vcpu->arch.sve_state = buf; 1799033bba4SDave Martin vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; 1809033bba4SDave Martin return 0; 1819033bba4SDave Martin } 1829033bba4SDave Martin 18392e68b2bSDave Martin int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) 1849033bba4SDave Martin { 18592e68b2bSDave Martin switch (feature) { 1869033bba4SDave Martin case KVM_ARM_VCPU_SVE: 1879033bba4SDave Martin if (!vcpu_has_sve(vcpu)) 1889033bba4SDave Martin return -EINVAL; 1899033bba4SDave Martin 1909033bba4SDave Martin if (kvm_arm_vcpu_sve_finalized(vcpu)) 1919033bba4SDave Martin return -EPERM; 1929033bba4SDave Martin 1939033bba4SDave Martin return kvm_vcpu_finalize_sve(vcpu); 1949033bba4SDave Martin } 1959033bba4SDave Martin 1969033bba4SDave Martin return -EINVAL; 1979033bba4SDave Martin } 1989033bba4SDave Martin 1999033bba4SDave Martin bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) 2009033bba4SDave Martin { 2019033bba4SDave Martin if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) 2029033bba4SDave Martin return false; 2039033bba4SDave Martin 2049033bba4SDave Martin return true; 2059033bba4SDave Martin } 2069033bba4SDave Martin 20719bcc89eSSean Christopherson void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) 20819bcc89eSSean Christopherson { 2099033bba4SDave Martin kfree(vcpu->arch.sve_state); 2109033bba4SDave Martin } 2119033bba4SDave Martin 2129a3cdf26SDave Martin static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) 2139a3cdf26SDave Martin { 2149a3cdf26SDave Martin if (vcpu_has_sve(vcpu)) 2159a3cdf26SDave Martin memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); 2169a3cdf26SDave Martin } 2179a3cdf26SDave Martin 218a22fa321SAmit Daniel Kachhap static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) 219a22fa321SAmit Daniel Kachhap { 220a22fa321SAmit Daniel Kachhap /* Support ptrauth only if the system supports these capabilities. */ 221a22fa321SAmit Daniel Kachhap if (!has_vhe()) 222a22fa321SAmit Daniel Kachhap return -EINVAL; 223a22fa321SAmit Daniel Kachhap 224a22fa321SAmit Daniel Kachhap if (!system_supports_address_auth() || 225a22fa321SAmit Daniel Kachhap !system_supports_generic_auth()) 226a22fa321SAmit Daniel Kachhap return -EINVAL; 227a22fa321SAmit Daniel Kachhap /* 228a22fa321SAmit Daniel Kachhap * For now make sure that both address/generic pointer authentication 229a22fa321SAmit Daniel Kachhap * features are requested by the userspace together. 230a22fa321SAmit Daniel Kachhap */ 231a22fa321SAmit Daniel Kachhap if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 232a22fa321SAmit Daniel Kachhap !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) 233a22fa321SAmit Daniel Kachhap return -EINVAL; 234a22fa321SAmit Daniel Kachhap 235a22fa321SAmit Daniel Kachhap vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; 236a22fa321SAmit Daniel Kachhap return 0; 237a22fa321SAmit Daniel Kachhap } 238a22fa321SAmit Daniel Kachhap 239f4672752SMarc Zyngier /** 240f4672752SMarc Zyngier * kvm_reset_vcpu - sets core registers and sys_regs to reset value 241f4672752SMarc Zyngier * @vcpu: The VCPU pointer 242f4672752SMarc Zyngier * 243f4672752SMarc Zyngier * This function finds the right table above and sets the registers on 244edce2292SAndrea Gelmini * the virtual CPU struct to their architecturally defined reset 2459a3cdf26SDave Martin * values, except for registers whose reset is deferred until 2469a3cdf26SDave Martin * kvm_arm_vcpu_finalize(). 247e761a927SChristoffer Dall * 248e761a927SChristoffer Dall * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT 249e761a927SChristoffer Dall * ioctl or as part of handling a request issued by another VCPU in the PSCI 250e761a927SChristoffer Dall * handling code. In the first case, the VCPU will not be loaded, and in the 251e761a927SChristoffer Dall * second case the VCPU will be loaded. Because this function operates purely 252656012c7SFuad Tabba * on the memory-backed values of system registers, we want to do a full put if 253e761a927SChristoffer Dall * we were loaded (handling a request) and load the values back at the end of 254e761a927SChristoffer Dall * the function. Otherwise we leave the state alone. In both cases, we 255e761a927SChristoffer Dall * disable preemption around the vcpu reset as we would otherwise race with 256e761a927SChristoffer Dall * preempt notifiers which also call put/load. 257f4672752SMarc Zyngier */ 258f4672752SMarc Zyngier int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 259f4672752SMarc Zyngier { 260f4672752SMarc Zyngier const struct kvm_regs *cpu_reset; 261e761a927SChristoffer Dall int ret = -EINVAL; 262e761a927SChristoffer Dall bool loaded; 263e761a927SChristoffer Dall 264ebff0b0eSMarc Zyngier /* Reset PMU outside of the non-preemptible section */ 265ebff0b0eSMarc Zyngier kvm_pmu_vcpu_reset(vcpu); 266ebff0b0eSMarc Zyngier 267e761a927SChristoffer Dall preempt_disable(); 268e761a927SChristoffer Dall loaded = (vcpu->cpu != -1); 269e761a927SChristoffer Dall if (loaded) 270e761a927SChristoffer Dall kvm_arch_vcpu_put(vcpu); 271f4672752SMarc Zyngier 2729a3cdf26SDave Martin if (!kvm_arm_vcpu_sve_finalized(vcpu)) { 2739a3cdf26SDave Martin if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { 2749a3cdf26SDave Martin ret = kvm_vcpu_enable_sve(vcpu); 2759a3cdf26SDave Martin if (ret) 2769a3cdf26SDave Martin goto out; 2779a3cdf26SDave Martin } 2789a3cdf26SDave Martin } else { 2799a3cdf26SDave Martin kvm_vcpu_reset_sve(vcpu); 2809a3cdf26SDave Martin } 2819a3cdf26SDave Martin 282a22fa321SAmit Daniel Kachhap if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || 283a22fa321SAmit Daniel Kachhap test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { 284a22fa321SAmit Daniel Kachhap if (kvm_vcpu_enable_ptrauth(vcpu)) 285a22fa321SAmit Daniel Kachhap goto out; 286a22fa321SAmit Daniel Kachhap } 287a22fa321SAmit Daniel Kachhap 288f4672752SMarc Zyngier switch (vcpu->arch.target) { 289f4672752SMarc Zyngier default: 2900d854a60SMarc Zyngier if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 2910d854a60SMarc Zyngier if (!cpu_has_32bit_el1()) 292e761a927SChristoffer Dall goto out; 2930d854a60SMarc Zyngier cpu_reset = &default_regs_reset32; 2940d854a60SMarc Zyngier } else { 295f4672752SMarc Zyngier cpu_reset = &default_regs_reset; 2960d854a60SMarc Zyngier } 2970d854a60SMarc Zyngier 298f4672752SMarc Zyngier break; 299f4672752SMarc Zyngier } 300f4672752SMarc Zyngier 301f4672752SMarc Zyngier /* Reset core registers */ 302f4672752SMarc Zyngier memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); 303f4672752SMarc Zyngier 304f4672752SMarc Zyngier /* Reset system registers */ 305f4672752SMarc Zyngier kvm_reset_sys_regs(vcpu); 306f4672752SMarc Zyngier 307358b28f0SMarc Zyngier /* 308358b28f0SMarc Zyngier * Additional reset state handling that PSCI may have imposed on us. 309358b28f0SMarc Zyngier * Must be done after all the sys_reg reset. 310358b28f0SMarc Zyngier */ 311358b28f0SMarc Zyngier if (vcpu->arch.reset_state.reset) { 312358b28f0SMarc Zyngier unsigned long target_pc = vcpu->arch.reset_state.pc; 313358b28f0SMarc Zyngier 314358b28f0SMarc Zyngier /* Gracefully handle Thumb2 entry point */ 315358b28f0SMarc Zyngier if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { 316358b28f0SMarc Zyngier target_pc &= ~1UL; 317358b28f0SMarc Zyngier vcpu_set_thumb(vcpu); 318358b28f0SMarc Zyngier } 319358b28f0SMarc Zyngier 320358b28f0SMarc Zyngier /* Propagate caller endianness */ 321358b28f0SMarc Zyngier if (vcpu->arch.reset_state.be) 322358b28f0SMarc Zyngier kvm_vcpu_set_be(vcpu); 323358b28f0SMarc Zyngier 324358b28f0SMarc Zyngier *vcpu_pc(vcpu) = target_pc; 325358b28f0SMarc Zyngier vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); 326358b28f0SMarc Zyngier 327358b28f0SMarc Zyngier vcpu->arch.reset_state.reset = false; 328358b28f0SMarc Zyngier } 329358b28f0SMarc Zyngier 3305d81f7dcSMarc Zyngier /* Default workaround setup is enabled (if supported) */ 3315d81f7dcSMarc Zyngier if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) 3325d81f7dcSMarc Zyngier vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 3335d81f7dcSMarc Zyngier 334003300deSMarc Zyngier /* Reset timer */ 335e761a927SChristoffer Dall ret = kvm_timer_vcpu_reset(vcpu); 336e761a927SChristoffer Dall out: 337e761a927SChristoffer Dall if (loaded) 338e761a927SChristoffer Dall kvm_arch_vcpu_load(vcpu, smp_processor_id()); 339e761a927SChristoffer Dall preempt_enable(); 340e761a927SChristoffer Dall return ret; 341f4672752SMarc Zyngier } 3425b6c6742SSuzuki K Poulose 3430f62f0e9SSuzuki K Poulose void kvm_set_ipa_limit(void) 3440f62f0e9SSuzuki K Poulose { 3450f62f0e9SSuzuki K Poulose unsigned int ipa_max, pa_max, va_max, parange; 3460f62f0e9SSuzuki K Poulose 3470f62f0e9SSuzuki K Poulose parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7; 3480f62f0e9SSuzuki K Poulose pa_max = id_aa64mmfr0_parange_to_phys_shift(parange); 3490f62f0e9SSuzuki K Poulose 3500f62f0e9SSuzuki K Poulose /* Clamp the IPA limit to the PA size supported by the kernel */ 3510f62f0e9SSuzuki K Poulose ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max; 3520f62f0e9SSuzuki K Poulose /* 3530f62f0e9SSuzuki K Poulose * Since our stage2 table is dependent on the stage1 page table code, 3540f62f0e9SSuzuki K Poulose * we must always honor the following condition: 3550f62f0e9SSuzuki K Poulose * 3560f62f0e9SSuzuki K Poulose * Number of levels in Stage1 >= Number of levels in Stage2. 3570f62f0e9SSuzuki K Poulose * 3580f62f0e9SSuzuki K Poulose * So clamp the ipa limit further down to limit the number of levels. 3590f62f0e9SSuzuki K Poulose * Since we can concatenate upto 16 tables at entry level, we could 360656012c7SFuad Tabba * go upto 4bits above the maximum VA addressable with the current 3610f62f0e9SSuzuki K Poulose * number of levels. 3620f62f0e9SSuzuki K Poulose */ 3630f62f0e9SSuzuki K Poulose va_max = PGDIR_SHIFT + PAGE_SHIFT - 3; 3640f62f0e9SSuzuki K Poulose va_max += 4; 3650f62f0e9SSuzuki K Poulose 3660f62f0e9SSuzuki K Poulose if (va_max < ipa_max) 3670f62f0e9SSuzuki K Poulose ipa_max = va_max; 3680f62f0e9SSuzuki K Poulose 3690f62f0e9SSuzuki K Poulose /* 3700f62f0e9SSuzuki K Poulose * If the final limit is lower than the real physical address 3710f62f0e9SSuzuki K Poulose * limit of the CPUs, report the reason. 3720f62f0e9SSuzuki K Poulose */ 3730f62f0e9SSuzuki K Poulose if (ipa_max < pa_max) 3740f62f0e9SSuzuki K Poulose pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", 3750f62f0e9SSuzuki K Poulose (va_max < pa_max) ? "Virtual" : "Physical"); 3760f62f0e9SSuzuki K Poulose 3770f62f0e9SSuzuki K Poulose WARN(ipa_max < KVM_PHYS_SHIFT, 3780f62f0e9SSuzuki K Poulose "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); 3790f62f0e9SSuzuki K Poulose kvm_ipa_limit = ipa_max; 3800f62f0e9SSuzuki K Poulose kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); 3810f62f0e9SSuzuki K Poulose } 3820f62f0e9SSuzuki K Poulose 3837665f3a8SSuzuki K Poulose /* 3847665f3a8SSuzuki K Poulose * Configure the VTCR_EL2 for this VM. The VTCR value is common 3857665f3a8SSuzuki K Poulose * across all the physical CPUs on the system. We use system wide 3867665f3a8SSuzuki K Poulose * sanitised values to fill in different fields, except for Hardware 3877665f3a8SSuzuki K Poulose * Management of Access Flags. HA Flag is set unconditionally on 3887665f3a8SSuzuki K Poulose * all CPUs, as it is safe to run with or without the feature and 3897665f3a8SSuzuki K Poulose * the bit is RES0 on CPUs that don't support it. 3907665f3a8SSuzuki K Poulose */ 391bca607ebSMarc Zyngier int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) 3925b6c6742SSuzuki K Poulose { 3937665f3a8SSuzuki K Poulose u64 vtcr = VTCR_EL2_FLAGS; 3947665f3a8SSuzuki K Poulose u32 parange, phys_shift; 39558b3efc8SSuzuki K Poulose u8 lvls; 3967665f3a8SSuzuki K Poulose 397233a7cb2SSuzuki K Poulose if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) 3985b6c6742SSuzuki K Poulose return -EINVAL; 3997665f3a8SSuzuki K Poulose 400233a7cb2SSuzuki K Poulose phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); 401233a7cb2SSuzuki K Poulose if (phys_shift) { 402233a7cb2SSuzuki K Poulose if (phys_shift > kvm_ipa_limit || 403233a7cb2SSuzuki K Poulose phys_shift < 32) 404233a7cb2SSuzuki K Poulose return -EINVAL; 405233a7cb2SSuzuki K Poulose } else { 406233a7cb2SSuzuki K Poulose phys_shift = KVM_PHYS_SHIFT; 407233a7cb2SSuzuki K Poulose } 408233a7cb2SSuzuki K Poulose 4097665f3a8SSuzuki K Poulose parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; 4107665f3a8SSuzuki K Poulose if (parange > ID_AA64MMFR0_PARANGE_MAX) 4117665f3a8SSuzuki K Poulose parange = ID_AA64MMFR0_PARANGE_MAX; 4127665f3a8SSuzuki K Poulose vtcr |= parange << VTCR_EL2_PS_SHIFT; 4137665f3a8SSuzuki K Poulose 4147665f3a8SSuzuki K Poulose vtcr |= VTCR_EL2_T0SZ(phys_shift); 41558b3efc8SSuzuki K Poulose /* 41658b3efc8SSuzuki K Poulose * Use a minimum 2 level page table to prevent splitting 41758b3efc8SSuzuki K Poulose * host PMD huge pages at stage2. 41858b3efc8SSuzuki K Poulose */ 41958b3efc8SSuzuki K Poulose lvls = stage2_pgtable_levels(phys_shift); 42058b3efc8SSuzuki K Poulose if (lvls < 2) 42158b3efc8SSuzuki K Poulose lvls = 2; 42258b3efc8SSuzuki K Poulose vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); 4237665f3a8SSuzuki K Poulose 4247665f3a8SSuzuki K Poulose /* 4257665f3a8SSuzuki K Poulose * Enable the Hardware Access Flag management, unconditionally 4267665f3a8SSuzuki K Poulose * on all CPUs. The features is RES0 on CPUs without the support 4277665f3a8SSuzuki K Poulose * and must be ignored by the CPUs. 4287665f3a8SSuzuki K Poulose */ 4297665f3a8SSuzuki K Poulose vtcr |= VTCR_EL2_HA; 4307665f3a8SSuzuki K Poulose 4317665f3a8SSuzuki K Poulose /* Set the vmid bits */ 4327665f3a8SSuzuki K Poulose vtcr |= (kvm_get_vmid_bits() == 16) ? 4337665f3a8SSuzuki K Poulose VTCR_EL2_VS_16BIT : 4347665f3a8SSuzuki K Poulose VTCR_EL2_VS_8BIT; 4357665f3a8SSuzuki K Poulose kvm->arch.vtcr = vtcr; 4365b6c6742SSuzuki K Poulose return 0; 4375b6c6742SSuzuki K Poulose } 438