1f4672752SMarc Zyngier /* 2f4672752SMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 3f4672752SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 4f4672752SMarc Zyngier * 5f4672752SMarc Zyngier * Derived from arch/arm/kvm/reset.c 6f4672752SMarc Zyngier * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7f4672752SMarc Zyngier * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8f4672752SMarc Zyngier * 9f4672752SMarc Zyngier * This program is free software; you can redistribute it and/or modify 10f4672752SMarc Zyngier * it under the terms of the GNU General Public License, version 2, as 11f4672752SMarc Zyngier * published by the Free Software Foundation. 12f4672752SMarc Zyngier * 13f4672752SMarc Zyngier * This program is distributed in the hope that it will be useful, 14f4672752SMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of 15f4672752SMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16f4672752SMarc Zyngier * GNU General Public License for more details. 17f4672752SMarc Zyngier * 18f4672752SMarc Zyngier * You should have received a copy of the GNU General Public License 19f4672752SMarc Zyngier * along with this program. If not, see <http://www.gnu.org/licenses/>. 20f4672752SMarc Zyngier */ 21f4672752SMarc Zyngier 22f4672752SMarc Zyngier #include <linux/errno.h> 23f4672752SMarc Zyngier #include <linux/kvm_host.h> 24f4672752SMarc Zyngier #include <linux/kvm.h> 25834bf887SAlex Bennée #include <linux/hw_breakpoint.h> 269033bba4SDave Martin #include <linux/slab.h> 279033bba4SDave Martin #include <linux/types.h> 28f4672752SMarc Zyngier 29003300deSMarc Zyngier #include <kvm/arm_arch_timer.h> 30003300deSMarc Zyngier 317665f3a8SSuzuki K Poulose #include <asm/cpufeature.h> 32f4672752SMarc Zyngier #include <asm/cputype.h> 339033bba4SDave Martin #include <asm/fpsimd.h> 34f4672752SMarc Zyngier #include <asm/ptrace.h> 35f4672752SMarc Zyngier #include <asm/kvm_arm.h> 3667f69197SAKASHI Takahiro #include <asm/kvm_asm.h> 37f4672752SMarc Zyngier #include <asm/kvm_coproc.h> 38358b28f0SMarc Zyngier #include <asm/kvm_emulate.h> 3967f69197SAKASHI Takahiro #include <asm/kvm_mmu.h> 40f4672752SMarc Zyngier 410f62f0e9SSuzuki K Poulose /* Maximum phys_shift supported for any VM on this host */ 420f62f0e9SSuzuki K Poulose static u32 kvm_ipa_limit; 430f62f0e9SSuzuki K Poulose 44f4672752SMarc Zyngier /* 45f4672752SMarc Zyngier * ARMv8 Reset Values 46f4672752SMarc Zyngier */ 47f4672752SMarc Zyngier static const struct kvm_regs default_regs_reset = { 48f4672752SMarc Zyngier .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | 49f4672752SMarc Zyngier PSR_F_BIT | PSR_D_BIT), 50f4672752SMarc Zyngier }; 51f4672752SMarc Zyngier 520d854a60SMarc Zyngier static const struct kvm_regs default_regs_reset32 = { 53256c0960SMark Rutland .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | 54256c0960SMark Rutland PSR_AA32_I_BIT | PSR_AA32_F_BIT), 550d854a60SMarc Zyngier }; 560d854a60SMarc Zyngier 570d854a60SMarc Zyngier static bool cpu_has_32bit_el1(void) 580d854a60SMarc Zyngier { 590d854a60SMarc Zyngier u64 pfr0; 600d854a60SMarc Zyngier 6146823dd1SDave Martin pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 620d854a60SMarc Zyngier return !!(pfr0 & 0x20); 630d854a60SMarc Zyngier } 640d854a60SMarc Zyngier 65834bf887SAlex Bennée /** 66375bdd3bSDongjiu Geng * kvm_arch_vm_ioctl_check_extension 67834bf887SAlex Bennée * 68834bf887SAlex Bennée * We currently assume that the number of HW registers is uniform 69834bf887SAlex Bennée * across all CPUs (see cpuinfo_sanity_check). 70834bf887SAlex Bennée */ 71375bdd3bSDongjiu Geng int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) 72f4672752SMarc Zyngier { 73f4672752SMarc Zyngier int r; 74f4672752SMarc Zyngier 75f4672752SMarc Zyngier switch (ext) { 760d854a60SMarc Zyngier case KVM_CAP_ARM_EL1_32BIT: 770d854a60SMarc Zyngier r = cpu_has_32bit_el1(); 780d854a60SMarc Zyngier break; 79834bf887SAlex Bennée case KVM_CAP_GUEST_DEBUG_HW_BPS: 80834bf887SAlex Bennée r = get_num_brps(); 81834bf887SAlex Bennée break; 82834bf887SAlex Bennée case KVM_CAP_GUEST_DEBUG_HW_WPS: 83834bf887SAlex Bennée r = get_num_wrps(); 84834bf887SAlex Bennée break; 85808e7381SShannon Zhao case KVM_CAP_ARM_PMU_V3: 86808e7381SShannon Zhao r = kvm_arm_support_pmu_v3(); 87808e7381SShannon Zhao break; 88be26b3a7SDongjiu Geng case KVM_CAP_ARM_INJECT_SERROR_ESR: 89be26b3a7SDongjiu Geng r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 90be26b3a7SDongjiu Geng break; 91834bf887SAlex Bennée case KVM_CAP_SET_GUEST_DEBUG: 92f577f6c2SShannon Zhao case KVM_CAP_VCPU_ATTRIBUTES: 93834bf887SAlex Bennée r = 1; 94834bf887SAlex Bennée break; 95233a7cb2SSuzuki K Poulose case KVM_CAP_ARM_VM_IPA_SIZE: 96233a7cb2SSuzuki K Poulose r = kvm_ipa_limit; 97233a7cb2SSuzuki K Poulose break; 98f4672752SMarc Zyngier default: 99f4672752SMarc Zyngier r = 0; 100f4672752SMarc Zyngier } 101f4672752SMarc Zyngier 102f4672752SMarc Zyngier return r; 103f4672752SMarc Zyngier } 104f4672752SMarc Zyngier 1059033bba4SDave Martin unsigned int kvm_sve_max_vl; 1069033bba4SDave Martin 1079033bba4SDave Martin int kvm_arm_init_arch_resources(void) 1089033bba4SDave Martin { 1099033bba4SDave Martin if (system_supports_sve()) { 1109033bba4SDave Martin kvm_sve_max_vl = sve_max_virtualisable_vl; 1119033bba4SDave Martin 1129033bba4SDave Martin /* 1139033bba4SDave Martin * The get_sve_reg()/set_sve_reg() ioctl interface will need 1149033bba4SDave Martin * to be extended with multiple register slice support in 1159033bba4SDave Martin * order to support vector lengths greater than 1169033bba4SDave Martin * SVE_VL_ARCH_MAX: 1179033bba4SDave Martin */ 1189033bba4SDave Martin if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) 1199033bba4SDave Martin kvm_sve_max_vl = SVE_VL_ARCH_MAX; 1209033bba4SDave Martin 1219033bba4SDave Martin /* 1229033bba4SDave Martin * Don't even try to make use of vector lengths that 1239033bba4SDave Martin * aren't available on all CPUs, for now: 1249033bba4SDave Martin */ 1259033bba4SDave Martin if (kvm_sve_max_vl < sve_max_vl) 1269033bba4SDave Martin pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", 1279033bba4SDave Martin kvm_sve_max_vl); 1289033bba4SDave Martin } 1299033bba4SDave Martin 1309033bba4SDave Martin return 0; 1319033bba4SDave Martin } 1329033bba4SDave Martin 1339033bba4SDave Martin /* 1349033bba4SDave Martin * Finalize vcpu's maximum SVE vector length, allocating 1359033bba4SDave Martin * vcpu->arch.sve_state as necessary. 1369033bba4SDave Martin */ 1379033bba4SDave Martin static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) 1389033bba4SDave Martin { 1399033bba4SDave Martin void *buf; 1409033bba4SDave Martin unsigned int vl; 1419033bba4SDave Martin 1429033bba4SDave Martin vl = vcpu->arch.sve_max_vl; 1439033bba4SDave Martin 1449033bba4SDave Martin /* 1459033bba4SDave Martin * Resposibility for these properties is shared between 1469033bba4SDave Martin * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and 1479033bba4SDave Martin * set_sve_vls(). Double-check here just to be sure: 1489033bba4SDave Martin */ 1499033bba4SDave Martin if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || 1509033bba4SDave Martin vl > SVE_VL_ARCH_MAX)) 1519033bba4SDave Martin return -EIO; 1529033bba4SDave Martin 1539033bba4SDave Martin buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); 1549033bba4SDave Martin if (!buf) 1559033bba4SDave Martin return -ENOMEM; 1569033bba4SDave Martin 1579033bba4SDave Martin vcpu->arch.sve_state = buf; 1589033bba4SDave Martin vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; 1599033bba4SDave Martin return 0; 1609033bba4SDave Martin } 1619033bba4SDave Martin 1629033bba4SDave Martin int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int what) 1639033bba4SDave Martin { 1649033bba4SDave Martin switch (what) { 1659033bba4SDave Martin case KVM_ARM_VCPU_SVE: 1669033bba4SDave Martin if (!vcpu_has_sve(vcpu)) 1679033bba4SDave Martin return -EINVAL; 1689033bba4SDave Martin 1699033bba4SDave Martin if (kvm_arm_vcpu_sve_finalized(vcpu)) 1709033bba4SDave Martin return -EPERM; 1719033bba4SDave Martin 1729033bba4SDave Martin return kvm_vcpu_finalize_sve(vcpu); 1739033bba4SDave Martin } 1749033bba4SDave Martin 1759033bba4SDave Martin return -EINVAL; 1769033bba4SDave Martin } 1779033bba4SDave Martin 1789033bba4SDave Martin bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) 1799033bba4SDave Martin { 1809033bba4SDave Martin if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) 1819033bba4SDave Martin return false; 1829033bba4SDave Martin 1839033bba4SDave Martin return true; 1849033bba4SDave Martin } 1859033bba4SDave Martin 1869033bba4SDave Martin void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 1879033bba4SDave Martin { 1889033bba4SDave Martin kfree(vcpu->arch.sve_state); 1899033bba4SDave Martin } 1909033bba4SDave Martin 191f4672752SMarc Zyngier /** 192f4672752SMarc Zyngier * kvm_reset_vcpu - sets core registers and sys_regs to reset value 193f4672752SMarc Zyngier * @vcpu: The VCPU pointer 194f4672752SMarc Zyngier * 195f4672752SMarc Zyngier * This function finds the right table above and sets the registers on 196edce2292SAndrea Gelmini * the virtual CPU struct to their architecturally defined reset 197f4672752SMarc Zyngier * values. 198e761a927SChristoffer Dall * 199e761a927SChristoffer Dall * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT 200e761a927SChristoffer Dall * ioctl or as part of handling a request issued by another VCPU in the PSCI 201e761a927SChristoffer Dall * handling code. In the first case, the VCPU will not be loaded, and in the 202e761a927SChristoffer Dall * second case the VCPU will be loaded. Because this function operates purely 203e761a927SChristoffer Dall * on the memory-backed valus of system registers, we want to do a full put if 204e761a927SChristoffer Dall * we were loaded (handling a request) and load the values back at the end of 205e761a927SChristoffer Dall * the function. Otherwise we leave the state alone. In both cases, we 206e761a927SChristoffer Dall * disable preemption around the vcpu reset as we would otherwise race with 207e761a927SChristoffer Dall * preempt notifiers which also call put/load. 208f4672752SMarc Zyngier */ 209f4672752SMarc Zyngier int kvm_reset_vcpu(struct kvm_vcpu *vcpu) 210f4672752SMarc Zyngier { 211f4672752SMarc Zyngier const struct kvm_regs *cpu_reset; 212e761a927SChristoffer Dall int ret = -EINVAL; 213e761a927SChristoffer Dall bool loaded; 214e761a927SChristoffer Dall 215e761a927SChristoffer Dall preempt_disable(); 216e761a927SChristoffer Dall loaded = (vcpu->cpu != -1); 217e761a927SChristoffer Dall if (loaded) 218e761a927SChristoffer Dall kvm_arch_vcpu_put(vcpu); 219f4672752SMarc Zyngier 220f4672752SMarc Zyngier switch (vcpu->arch.target) { 221f4672752SMarc Zyngier default: 2220d854a60SMarc Zyngier if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { 2230d854a60SMarc Zyngier if (!cpu_has_32bit_el1()) 224e761a927SChristoffer Dall goto out; 2250d854a60SMarc Zyngier cpu_reset = &default_regs_reset32; 2260d854a60SMarc Zyngier } else { 227f4672752SMarc Zyngier cpu_reset = &default_regs_reset; 2280d854a60SMarc Zyngier } 2290d854a60SMarc Zyngier 230f4672752SMarc Zyngier break; 231f4672752SMarc Zyngier } 232f4672752SMarc Zyngier 233f4672752SMarc Zyngier /* Reset core registers */ 234f4672752SMarc Zyngier memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); 235f4672752SMarc Zyngier 236f4672752SMarc Zyngier /* Reset system registers */ 237f4672752SMarc Zyngier kvm_reset_sys_regs(vcpu); 238f4672752SMarc Zyngier 239358b28f0SMarc Zyngier /* 240358b28f0SMarc Zyngier * Additional reset state handling that PSCI may have imposed on us. 241358b28f0SMarc Zyngier * Must be done after all the sys_reg reset. 242358b28f0SMarc Zyngier */ 243358b28f0SMarc Zyngier if (vcpu->arch.reset_state.reset) { 244358b28f0SMarc Zyngier unsigned long target_pc = vcpu->arch.reset_state.pc; 245358b28f0SMarc Zyngier 246358b28f0SMarc Zyngier /* Gracefully handle Thumb2 entry point */ 247358b28f0SMarc Zyngier if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { 248358b28f0SMarc Zyngier target_pc &= ~1UL; 249358b28f0SMarc Zyngier vcpu_set_thumb(vcpu); 250358b28f0SMarc Zyngier } 251358b28f0SMarc Zyngier 252358b28f0SMarc Zyngier /* Propagate caller endianness */ 253358b28f0SMarc Zyngier if (vcpu->arch.reset_state.be) 254358b28f0SMarc Zyngier kvm_vcpu_set_be(vcpu); 255358b28f0SMarc Zyngier 256358b28f0SMarc Zyngier *vcpu_pc(vcpu) = target_pc; 257358b28f0SMarc Zyngier vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); 258358b28f0SMarc Zyngier 259358b28f0SMarc Zyngier vcpu->arch.reset_state.reset = false; 260358b28f0SMarc Zyngier } 261358b28f0SMarc Zyngier 2622aa36e98SShannon Zhao /* Reset PMU */ 2632aa36e98SShannon Zhao kvm_pmu_vcpu_reset(vcpu); 2642aa36e98SShannon Zhao 2655d81f7dcSMarc Zyngier /* Default workaround setup is enabled (if supported) */ 2665d81f7dcSMarc Zyngier if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) 2675d81f7dcSMarc Zyngier vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 2685d81f7dcSMarc Zyngier 269003300deSMarc Zyngier /* Reset timer */ 270e761a927SChristoffer Dall ret = kvm_timer_vcpu_reset(vcpu); 271e761a927SChristoffer Dall out: 272e761a927SChristoffer Dall if (loaded) 273e761a927SChristoffer Dall kvm_arch_vcpu_load(vcpu, smp_processor_id()); 274e761a927SChristoffer Dall preempt_enable(); 275e761a927SChristoffer Dall return ret; 276f4672752SMarc Zyngier } 2775b6c6742SSuzuki K Poulose 2780f62f0e9SSuzuki K Poulose void kvm_set_ipa_limit(void) 2790f62f0e9SSuzuki K Poulose { 2800f62f0e9SSuzuki K Poulose unsigned int ipa_max, pa_max, va_max, parange; 2810f62f0e9SSuzuki K Poulose 2820f62f0e9SSuzuki K Poulose parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7; 2830f62f0e9SSuzuki K Poulose pa_max = id_aa64mmfr0_parange_to_phys_shift(parange); 2840f62f0e9SSuzuki K Poulose 2850f62f0e9SSuzuki K Poulose /* Clamp the IPA limit to the PA size supported by the kernel */ 2860f62f0e9SSuzuki K Poulose ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max; 2870f62f0e9SSuzuki K Poulose /* 2880f62f0e9SSuzuki K Poulose * Since our stage2 table is dependent on the stage1 page table code, 2890f62f0e9SSuzuki K Poulose * we must always honor the following condition: 2900f62f0e9SSuzuki K Poulose * 2910f62f0e9SSuzuki K Poulose * Number of levels in Stage1 >= Number of levels in Stage2. 2920f62f0e9SSuzuki K Poulose * 2930f62f0e9SSuzuki K Poulose * So clamp the ipa limit further down to limit the number of levels. 2940f62f0e9SSuzuki K Poulose * Since we can concatenate upto 16 tables at entry level, we could 2950f62f0e9SSuzuki K Poulose * go upto 4bits above the maximum VA addressible with the current 2960f62f0e9SSuzuki K Poulose * number of levels. 2970f62f0e9SSuzuki K Poulose */ 2980f62f0e9SSuzuki K Poulose va_max = PGDIR_SHIFT + PAGE_SHIFT - 3; 2990f62f0e9SSuzuki K Poulose va_max += 4; 3000f62f0e9SSuzuki K Poulose 3010f62f0e9SSuzuki K Poulose if (va_max < ipa_max) 3020f62f0e9SSuzuki K Poulose ipa_max = va_max; 3030f62f0e9SSuzuki K Poulose 3040f62f0e9SSuzuki K Poulose /* 3050f62f0e9SSuzuki K Poulose * If the final limit is lower than the real physical address 3060f62f0e9SSuzuki K Poulose * limit of the CPUs, report the reason. 3070f62f0e9SSuzuki K Poulose */ 3080f62f0e9SSuzuki K Poulose if (ipa_max < pa_max) 3090f62f0e9SSuzuki K Poulose pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", 3100f62f0e9SSuzuki K Poulose (va_max < pa_max) ? "Virtual" : "Physical"); 3110f62f0e9SSuzuki K Poulose 3120f62f0e9SSuzuki K Poulose WARN(ipa_max < KVM_PHYS_SHIFT, 3130f62f0e9SSuzuki K Poulose "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); 3140f62f0e9SSuzuki K Poulose kvm_ipa_limit = ipa_max; 3150f62f0e9SSuzuki K Poulose kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); 3160f62f0e9SSuzuki K Poulose } 3170f62f0e9SSuzuki K Poulose 3187665f3a8SSuzuki K Poulose /* 3197665f3a8SSuzuki K Poulose * Configure the VTCR_EL2 for this VM. The VTCR value is common 3207665f3a8SSuzuki K Poulose * across all the physical CPUs on the system. We use system wide 3217665f3a8SSuzuki K Poulose * sanitised values to fill in different fields, except for Hardware 3227665f3a8SSuzuki K Poulose * Management of Access Flags. HA Flag is set unconditionally on 3237665f3a8SSuzuki K Poulose * all CPUs, as it is safe to run with or without the feature and 3247665f3a8SSuzuki K Poulose * the bit is RES0 on CPUs that don't support it. 3257665f3a8SSuzuki K Poulose */ 326bca607ebSMarc Zyngier int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) 3275b6c6742SSuzuki K Poulose { 3287665f3a8SSuzuki K Poulose u64 vtcr = VTCR_EL2_FLAGS; 3297665f3a8SSuzuki K Poulose u32 parange, phys_shift; 33058b3efc8SSuzuki K Poulose u8 lvls; 3317665f3a8SSuzuki K Poulose 332233a7cb2SSuzuki K Poulose if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) 3335b6c6742SSuzuki K Poulose return -EINVAL; 3347665f3a8SSuzuki K Poulose 335233a7cb2SSuzuki K Poulose phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); 336233a7cb2SSuzuki K Poulose if (phys_shift) { 337233a7cb2SSuzuki K Poulose if (phys_shift > kvm_ipa_limit || 338233a7cb2SSuzuki K Poulose phys_shift < 32) 339233a7cb2SSuzuki K Poulose return -EINVAL; 340233a7cb2SSuzuki K Poulose } else { 341233a7cb2SSuzuki K Poulose phys_shift = KVM_PHYS_SHIFT; 342233a7cb2SSuzuki K Poulose } 343233a7cb2SSuzuki K Poulose 3447665f3a8SSuzuki K Poulose parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; 3457665f3a8SSuzuki K Poulose if (parange > ID_AA64MMFR0_PARANGE_MAX) 3467665f3a8SSuzuki K Poulose parange = ID_AA64MMFR0_PARANGE_MAX; 3477665f3a8SSuzuki K Poulose vtcr |= parange << VTCR_EL2_PS_SHIFT; 3487665f3a8SSuzuki K Poulose 3497665f3a8SSuzuki K Poulose vtcr |= VTCR_EL2_T0SZ(phys_shift); 35058b3efc8SSuzuki K Poulose /* 35158b3efc8SSuzuki K Poulose * Use a minimum 2 level page table to prevent splitting 35258b3efc8SSuzuki K Poulose * host PMD huge pages at stage2. 35358b3efc8SSuzuki K Poulose */ 35458b3efc8SSuzuki K Poulose lvls = stage2_pgtable_levels(phys_shift); 35558b3efc8SSuzuki K Poulose if (lvls < 2) 35658b3efc8SSuzuki K Poulose lvls = 2; 35758b3efc8SSuzuki K Poulose vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); 3587665f3a8SSuzuki K Poulose 3597665f3a8SSuzuki K Poulose /* 3607665f3a8SSuzuki K Poulose * Enable the Hardware Access Flag management, unconditionally 3617665f3a8SSuzuki K Poulose * on all CPUs. The features is RES0 on CPUs without the support 3627665f3a8SSuzuki K Poulose * and must be ignored by the CPUs. 3637665f3a8SSuzuki K Poulose */ 3647665f3a8SSuzuki K Poulose vtcr |= VTCR_EL2_HA; 3657665f3a8SSuzuki K Poulose 3667665f3a8SSuzuki K Poulose /* Set the vmid bits */ 3677665f3a8SSuzuki K Poulose vtcr |= (kvm_get_vmid_bits() == 16) ? 3687665f3a8SSuzuki K Poulose VTCR_EL2_VS_16BIT : 3697665f3a8SSuzuki K Poulose VTCR_EL2_VS_8BIT; 3707665f3a8SSuzuki K Poulose kvm->arch.vtcr = vtcr; 3715b6c6742SSuzuki K Poulose return 0; 3725b6c6742SSuzuki K Poulose } 373