xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/sys_regs.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
16c30bfb1SFuad Tabba // SPDX-License-Identifier: GPL-2.0-only
26c30bfb1SFuad Tabba /*
36c30bfb1SFuad Tabba  * Copyright (C) 2021 Google LLC
46c30bfb1SFuad Tabba  * Author: Fuad Tabba <tabba@google.com>
56c30bfb1SFuad Tabba  */
66c30bfb1SFuad Tabba 
7cbca1973SMarc Zyngier #include <linux/irqchip/arm-gic-v3.h>
8cbca1973SMarc Zyngier 
96c30bfb1SFuad Tabba #include <asm/kvm_asm.h>
106c30bfb1SFuad Tabba #include <asm/kvm_mmu.h>
116c30bfb1SFuad Tabba 
126c30bfb1SFuad Tabba #include <hyp/adjust_pc.h>
136c30bfb1SFuad Tabba 
143061725dSMarc Zyngier #include <nvhe/fixed_config.h>
156c30bfb1SFuad Tabba 
166c30bfb1SFuad Tabba #include "../../sys_regs.h"
176c30bfb1SFuad Tabba 
186c30bfb1SFuad Tabba /*
196c30bfb1SFuad Tabba  * Copies of the host's CPU features registers holding sanitized values at hyp.
206c30bfb1SFuad Tabba  */
216c30bfb1SFuad Tabba u64 id_aa64pfr0_el1_sys_val;
226c30bfb1SFuad Tabba u64 id_aa64pfr1_el1_sys_val;
236c30bfb1SFuad Tabba u64 id_aa64isar0_el1_sys_val;
246c30bfb1SFuad Tabba u64 id_aa64isar1_el1_sys_val;
25def8c222SVladimir Murzin u64 id_aa64isar2_el1_sys_val;
2672e1be12SFuad Tabba u64 id_aa64mmfr0_el1_sys_val;
2772e1be12SFuad Tabba u64 id_aa64mmfr1_el1_sys_val;
286c30bfb1SFuad Tabba u64 id_aa64mmfr2_el1_sys_val;
29*8669651cSQuentin Perret u64 id_aa64smfr0_el1_sys_val;
306c30bfb1SFuad Tabba 
316c30bfb1SFuad Tabba /*
326c30bfb1SFuad Tabba  * Inject an unknown/undefined exception to an AArch64 guest while most of its
336c30bfb1SFuad Tabba  * sysregs are live.
346c30bfb1SFuad Tabba  */
inject_undef64(struct kvm_vcpu * vcpu)35746bdeadSMarc Zyngier static void inject_undef64(struct kvm_vcpu *vcpu)
366c30bfb1SFuad Tabba {
370b12620fSAlexandru Elisei 	u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
386c30bfb1SFuad Tabba 
396c30bfb1SFuad Tabba 	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
406c30bfb1SFuad Tabba 	*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
416c30bfb1SFuad Tabba 
42699bb2e0SMarc Zyngier 	kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
436c30bfb1SFuad Tabba 
446c30bfb1SFuad Tabba 	__kvm_adjust_pc(vcpu);
456c30bfb1SFuad Tabba 
466c30bfb1SFuad Tabba 	write_sysreg_el1(esr, SYS_ESR);
476c30bfb1SFuad Tabba 	write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
486c30bfb1SFuad Tabba 	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
496c30bfb1SFuad Tabba 	write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
506c30bfb1SFuad Tabba }
516c30bfb1SFuad Tabba 
526c30bfb1SFuad Tabba /*
536c30bfb1SFuad Tabba  * Returns the restricted features values of the feature register based on the
546c30bfb1SFuad Tabba  * limitations in restrict_fields.
556c30bfb1SFuad Tabba  * A feature id field value of 0b0000 does not impose any restrictions.
566c30bfb1SFuad Tabba  * Note: Use only for unsigned feature field values.
576c30bfb1SFuad Tabba  */
get_restricted_features_unsigned(u64 sys_reg_val,u64 restrict_fields)586c30bfb1SFuad Tabba static u64 get_restricted_features_unsigned(u64 sys_reg_val,
596c30bfb1SFuad Tabba 					    u64 restrict_fields)
606c30bfb1SFuad Tabba {
616c30bfb1SFuad Tabba 	u64 value = 0UL;
626c30bfb1SFuad Tabba 	u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
636c30bfb1SFuad Tabba 
646c30bfb1SFuad Tabba 	/*
656c30bfb1SFuad Tabba 	 * According to the Arm Architecture Reference Manual, feature fields
666c30bfb1SFuad Tabba 	 * use increasing values to indicate increases in functionality.
676c30bfb1SFuad Tabba 	 * Iterate over the restricted feature fields and calculate the minimum
686c30bfb1SFuad Tabba 	 * unsigned value between the one supported by the system, and what the
696c30bfb1SFuad Tabba 	 * value is being restricted to.
706c30bfb1SFuad Tabba 	 */
716c30bfb1SFuad Tabba 	while (sys_reg_val && restrict_fields) {
726c30bfb1SFuad Tabba 		value |= min(sys_reg_val & mask, restrict_fields & mask);
736c30bfb1SFuad Tabba 		sys_reg_val &= ~mask;
746c30bfb1SFuad Tabba 		restrict_fields &= ~mask;
756c30bfb1SFuad Tabba 		mask <<= ARM64_FEATURE_FIELD_BITS;
766c30bfb1SFuad Tabba 	}
776c30bfb1SFuad Tabba 
786c30bfb1SFuad Tabba 	return value;
796c30bfb1SFuad Tabba }
806c30bfb1SFuad Tabba 
816c30bfb1SFuad Tabba /*
826c30bfb1SFuad Tabba  * Functions that return the value of feature id registers for protected VMs
836c30bfb1SFuad Tabba  * based on allowed features, system features, and KVM support.
846c30bfb1SFuad Tabba  */
856c30bfb1SFuad Tabba 
get_pvm_id_aa64pfr0(const struct kvm_vcpu * vcpu)86ce759167SMarc Zyngier static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
876c30bfb1SFuad Tabba {
886c30bfb1SFuad Tabba 	u64 set_mask = 0;
896c30bfb1SFuad Tabba 	u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
906c30bfb1SFuad Tabba 
916c30bfb1SFuad Tabba 	set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
926c30bfb1SFuad Tabba 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
936c30bfb1SFuad Tabba 
946c30bfb1SFuad Tabba 	return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
956c30bfb1SFuad Tabba }
966c30bfb1SFuad Tabba 
get_pvm_id_aa64pfr1(const struct kvm_vcpu * vcpu)97ce759167SMarc Zyngier static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
986c30bfb1SFuad Tabba {
996c30bfb1SFuad Tabba 	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
1006c30bfb1SFuad Tabba 	u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
1016c30bfb1SFuad Tabba 
1026c30bfb1SFuad Tabba 	if (!kvm_has_mte(kvm))
1036ca2b9caSMark Brown 		allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1046c30bfb1SFuad Tabba 
1056c30bfb1SFuad Tabba 	return id_aa64pfr1_el1_sys_val & allow_mask;
1066c30bfb1SFuad Tabba }
1076c30bfb1SFuad Tabba 
get_pvm_id_aa64zfr0(const struct kvm_vcpu * vcpu)108ce759167SMarc Zyngier static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
1096c30bfb1SFuad Tabba {
1106c30bfb1SFuad Tabba 	/*
1116c30bfb1SFuad Tabba 	 * No support for Scalable Vectors, therefore, hyp has no sanitized
1126c30bfb1SFuad Tabba 	 * copy of the feature id register.
1136c30bfb1SFuad Tabba 	 */
1146c30bfb1SFuad Tabba 	BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL);
1156c30bfb1SFuad Tabba 	return 0;
1166c30bfb1SFuad Tabba }
1176c30bfb1SFuad Tabba 
get_pvm_id_aa64dfr0(const struct kvm_vcpu * vcpu)118ce759167SMarc Zyngier static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
1196c30bfb1SFuad Tabba {
1206c30bfb1SFuad Tabba 	/*
1216c30bfb1SFuad Tabba 	 * No support for debug, including breakpoints, and watchpoints,
1226c30bfb1SFuad Tabba 	 * therefore, pKVM has no sanitized copy of the feature id register.
1236c30bfb1SFuad Tabba 	 */
1246c30bfb1SFuad Tabba 	BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL);
1256c30bfb1SFuad Tabba 	return 0;
1266c30bfb1SFuad Tabba }
1276c30bfb1SFuad Tabba 
get_pvm_id_aa64dfr1(const struct kvm_vcpu * vcpu)128ce759167SMarc Zyngier static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu)
1296c30bfb1SFuad Tabba {
1306c30bfb1SFuad Tabba 	/*
1316c30bfb1SFuad Tabba 	 * No support for debug, therefore, hyp has no sanitized copy of the
1326c30bfb1SFuad Tabba 	 * feature id register.
1336c30bfb1SFuad Tabba 	 */
1346c30bfb1SFuad Tabba 	BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL);
1356c30bfb1SFuad Tabba 	return 0;
1366c30bfb1SFuad Tabba }
1376c30bfb1SFuad Tabba 
get_pvm_id_aa64afr0(const struct kvm_vcpu * vcpu)138ce759167SMarc Zyngier static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu)
1396c30bfb1SFuad Tabba {
1406c30bfb1SFuad Tabba 	/*
1416c30bfb1SFuad Tabba 	 * No support for implementation defined features, therefore, hyp has no
1426c30bfb1SFuad Tabba 	 * sanitized copy of the feature id register.
1436c30bfb1SFuad Tabba 	 */
1446c30bfb1SFuad Tabba 	BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL);
1456c30bfb1SFuad Tabba 	return 0;
1466c30bfb1SFuad Tabba }
1476c30bfb1SFuad Tabba 
get_pvm_id_aa64afr1(const struct kvm_vcpu * vcpu)148ce759167SMarc Zyngier static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu)
1496c30bfb1SFuad Tabba {
1506c30bfb1SFuad Tabba 	/*
1516c30bfb1SFuad Tabba 	 * No support for implementation defined features, therefore, hyp has no
1526c30bfb1SFuad Tabba 	 * sanitized copy of the feature id register.
1536c30bfb1SFuad Tabba 	 */
1546c30bfb1SFuad Tabba 	BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL);
1556c30bfb1SFuad Tabba 	return 0;
1566c30bfb1SFuad Tabba }
1576c30bfb1SFuad Tabba 
get_pvm_id_aa64isar0(const struct kvm_vcpu * vcpu)158ce759167SMarc Zyngier static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
1596c30bfb1SFuad Tabba {
1606c30bfb1SFuad Tabba 	return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW;
1616c30bfb1SFuad Tabba }
1626c30bfb1SFuad Tabba 
get_pvm_id_aa64isar1(const struct kvm_vcpu * vcpu)163ce759167SMarc Zyngier static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
1646c30bfb1SFuad Tabba {
1656c30bfb1SFuad Tabba 	u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
1666c30bfb1SFuad Tabba 
1676c30bfb1SFuad Tabba 	if (!vcpu_has_ptrauth(vcpu))
168aa50479bSMark Brown 		allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
169aa50479bSMark Brown 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
170aa50479bSMark Brown 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
171aa50479bSMark Brown 				ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1726c30bfb1SFuad Tabba 
1736c30bfb1SFuad Tabba 	return id_aa64isar1_el1_sys_val & allow_mask;
1746c30bfb1SFuad Tabba }
1756c30bfb1SFuad Tabba 
get_pvm_id_aa64isar2(const struct kvm_vcpu * vcpu)176def8c222SVladimir Murzin static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
177def8c222SVladimir Murzin {
178def8c222SVladimir Murzin 	u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
179def8c222SVladimir Murzin 
180def8c222SVladimir Murzin 	if (!vcpu_has_ptrauth(vcpu))
181b2d71f27SMark Brown 		allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
182b2d71f27SMark Brown 				ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
183def8c222SVladimir Murzin 
184def8c222SVladimir Murzin 	return id_aa64isar2_el1_sys_val & allow_mask;
185def8c222SVladimir Murzin }
186def8c222SVladimir Murzin 
get_pvm_id_aa64mmfr0(const struct kvm_vcpu * vcpu)187ce759167SMarc Zyngier static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
1886c30bfb1SFuad Tabba {
1896c30bfb1SFuad Tabba 	u64 set_mask;
1906c30bfb1SFuad Tabba 
1916c30bfb1SFuad Tabba 	set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
1926c30bfb1SFuad Tabba 		PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
1936c30bfb1SFuad Tabba 
1946c30bfb1SFuad Tabba 	return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
1956c30bfb1SFuad Tabba }
1966c30bfb1SFuad Tabba 
get_pvm_id_aa64mmfr1(const struct kvm_vcpu * vcpu)197ce759167SMarc Zyngier static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
1986c30bfb1SFuad Tabba {
1996c30bfb1SFuad Tabba 	return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW;
2006c30bfb1SFuad Tabba }
2016c30bfb1SFuad Tabba 
get_pvm_id_aa64mmfr2(const struct kvm_vcpu * vcpu)202ce759167SMarc Zyngier static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
2036c30bfb1SFuad Tabba {
2046c30bfb1SFuad Tabba 	return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
2056c30bfb1SFuad Tabba }
2066c30bfb1SFuad Tabba 
207ce759167SMarc Zyngier /* Read a sanitized cpufeature ID register by its encoding */
pvm_read_id_reg(const struct kvm_vcpu * vcpu,u32 id)208ce759167SMarc Zyngier u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
2096c30bfb1SFuad Tabba {
2106c30bfb1SFuad Tabba 	switch (id) {
2116c30bfb1SFuad Tabba 	case SYS_ID_AA64PFR0_EL1:
2126c30bfb1SFuad Tabba 		return get_pvm_id_aa64pfr0(vcpu);
2136c30bfb1SFuad Tabba 	case SYS_ID_AA64PFR1_EL1:
2146c30bfb1SFuad Tabba 		return get_pvm_id_aa64pfr1(vcpu);
2156c30bfb1SFuad Tabba 	case SYS_ID_AA64ZFR0_EL1:
2166c30bfb1SFuad Tabba 		return get_pvm_id_aa64zfr0(vcpu);
2176c30bfb1SFuad Tabba 	case SYS_ID_AA64DFR0_EL1:
2186c30bfb1SFuad Tabba 		return get_pvm_id_aa64dfr0(vcpu);
2196c30bfb1SFuad Tabba 	case SYS_ID_AA64DFR1_EL1:
2206c30bfb1SFuad Tabba 		return get_pvm_id_aa64dfr1(vcpu);
2216c30bfb1SFuad Tabba 	case SYS_ID_AA64AFR0_EL1:
2226c30bfb1SFuad Tabba 		return get_pvm_id_aa64afr0(vcpu);
2236c30bfb1SFuad Tabba 	case SYS_ID_AA64AFR1_EL1:
2246c30bfb1SFuad Tabba 		return get_pvm_id_aa64afr1(vcpu);
2256c30bfb1SFuad Tabba 	case SYS_ID_AA64ISAR0_EL1:
2266c30bfb1SFuad Tabba 		return get_pvm_id_aa64isar0(vcpu);
2276c30bfb1SFuad Tabba 	case SYS_ID_AA64ISAR1_EL1:
2286c30bfb1SFuad Tabba 		return get_pvm_id_aa64isar1(vcpu);
229def8c222SVladimir Murzin 	case SYS_ID_AA64ISAR2_EL1:
230def8c222SVladimir Murzin 		return get_pvm_id_aa64isar2(vcpu);
2316c30bfb1SFuad Tabba 	case SYS_ID_AA64MMFR0_EL1:
2326c30bfb1SFuad Tabba 		return get_pvm_id_aa64mmfr0(vcpu);
2336c30bfb1SFuad Tabba 	case SYS_ID_AA64MMFR1_EL1:
2346c30bfb1SFuad Tabba 		return get_pvm_id_aa64mmfr1(vcpu);
2356c30bfb1SFuad Tabba 	case SYS_ID_AA64MMFR2_EL1:
2366c30bfb1SFuad Tabba 		return get_pvm_id_aa64mmfr2(vcpu);
2376c30bfb1SFuad Tabba 	default:
238fa7a1721SMarc Zyngier 		/* Unhandled ID register, RAZ */
2396c30bfb1SFuad Tabba 		return 0;
2406c30bfb1SFuad Tabba 	}
241fa7a1721SMarc Zyngier }
2426c30bfb1SFuad Tabba 
read_id_reg(const struct kvm_vcpu * vcpu,struct sys_reg_desc const * r)243ce759167SMarc Zyngier static u64 read_id_reg(const struct kvm_vcpu *vcpu,
244ce759167SMarc Zyngier 		       struct sys_reg_desc const *r)
245ce759167SMarc Zyngier {
246ce759167SMarc Zyngier 	return pvm_read_id_reg(vcpu, reg_to_encoding(r));
247ce759167SMarc Zyngier }
248ce759167SMarc Zyngier 
2498ffb4188SMarc Zyngier /* Handler to RAZ/WI sysregs */
pvm_access_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2508ffb4188SMarc Zyngier static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2518ffb4188SMarc Zyngier 			      const struct sys_reg_desc *r)
2528ffb4188SMarc Zyngier {
2538ffb4188SMarc Zyngier 	if (!p->is_write)
2548ffb4188SMarc Zyngier 		p->regval = 0;
2558ffb4188SMarc Zyngier 
2568ffb4188SMarc Zyngier 	return true;
2578ffb4188SMarc Zyngier }
2588ffb4188SMarc Zyngier 
2596c30bfb1SFuad Tabba /*
2606c30bfb1SFuad Tabba  * Accessor for AArch32 feature id registers.
2616c30bfb1SFuad Tabba  *
2626c30bfb1SFuad Tabba  * The value of these registers is "unknown" according to the spec if AArch32
2636c30bfb1SFuad Tabba  * isn't supported.
2646c30bfb1SFuad Tabba  */
pvm_access_id_aarch32(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2656c30bfb1SFuad Tabba static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
2666c30bfb1SFuad Tabba 				  struct sys_reg_params *p,
2676c30bfb1SFuad Tabba 				  const struct sys_reg_desc *r)
2686c30bfb1SFuad Tabba {
2696c30bfb1SFuad Tabba 	if (p->is_write) {
2706c30bfb1SFuad Tabba 		inject_undef64(vcpu);
2716c30bfb1SFuad Tabba 		return false;
2726c30bfb1SFuad Tabba 	}
2736c30bfb1SFuad Tabba 
2746c30bfb1SFuad Tabba 	/*
2756c30bfb1SFuad Tabba 	 * No support for AArch32 guests, therefore, pKVM has no sanitized copy
2766c30bfb1SFuad Tabba 	 * of AArch32 feature id registers.
2776c30bfb1SFuad Tabba 	 */
27855adc08dSMark Brown 	BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
27955adc08dSMark Brown 		     PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
2806c30bfb1SFuad Tabba 
2818ffb4188SMarc Zyngier 	return pvm_access_raz_wi(vcpu, p, r);
2826c30bfb1SFuad Tabba }
2836c30bfb1SFuad Tabba 
2846c30bfb1SFuad Tabba /*
2856c30bfb1SFuad Tabba  * Accessor for AArch64 feature id registers.
2866c30bfb1SFuad Tabba  *
2876c30bfb1SFuad Tabba  * If access is allowed, set the regval to the protected VM's view of the
2886c30bfb1SFuad Tabba  * register and return true.
2896c30bfb1SFuad Tabba  * Otherwise, inject an undefined exception and return false.
2906c30bfb1SFuad Tabba  */
pvm_access_id_aarch64(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2916c30bfb1SFuad Tabba static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu,
2926c30bfb1SFuad Tabba 				  struct sys_reg_params *p,
2936c30bfb1SFuad Tabba 				  const struct sys_reg_desc *r)
2946c30bfb1SFuad Tabba {
2956c30bfb1SFuad Tabba 	if (p->is_write) {
2966c30bfb1SFuad Tabba 		inject_undef64(vcpu);
2976c30bfb1SFuad Tabba 		return false;
2986c30bfb1SFuad Tabba 	}
2996c30bfb1SFuad Tabba 
3006c30bfb1SFuad Tabba 	p->regval = read_id_reg(vcpu, r);
3016c30bfb1SFuad Tabba 	return true;
3026c30bfb1SFuad Tabba }
3036c30bfb1SFuad Tabba 
pvm_gic_read_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)304cbca1973SMarc Zyngier static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
305cbca1973SMarc Zyngier 			     struct sys_reg_params *p,
306cbca1973SMarc Zyngier 			     const struct sys_reg_desc *r)
307cbca1973SMarc Zyngier {
308cbca1973SMarc Zyngier 	/* pVMs only support GICv3. 'nuf said. */
309cbca1973SMarc Zyngier 	if (!p->is_write)
310cbca1973SMarc Zyngier 		p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE;
311cbca1973SMarc Zyngier 
312cbca1973SMarc Zyngier 	return true;
313cbca1973SMarc Zyngier }
314cbca1973SMarc Zyngier 
3156c30bfb1SFuad Tabba /* Mark the specified system register as an AArch32 feature id register. */
3166c30bfb1SFuad Tabba #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 }
3176c30bfb1SFuad Tabba 
3186c30bfb1SFuad Tabba /* Mark the specified system register as an AArch64 feature id register. */
3196c30bfb1SFuad Tabba #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
3206c30bfb1SFuad Tabba 
321fa7a1721SMarc Zyngier /*
322fa7a1721SMarc Zyngier  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
323fa7a1721SMarc Zyngier  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
324fa7a1721SMarc Zyngier  * (1 <= crm < 8, 0 <= Op2 < 8).
325fa7a1721SMarc Zyngier  */
326fa7a1721SMarc Zyngier #define ID_UNALLOCATED(crm, op2) {			\
327fa7a1721SMarc Zyngier 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
328fa7a1721SMarc Zyngier 	.access = pvm_access_id_aarch64,		\
329fa7a1721SMarc Zyngier }
330fa7a1721SMarc Zyngier 
3318ffb4188SMarc Zyngier /* Mark the specified system register as Read-As-Zero/Write-Ignored */
3328ffb4188SMarc Zyngier #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
3338ffb4188SMarc Zyngier 
3346c30bfb1SFuad Tabba /* Mark the specified system register as not being handled in hyp. */
3356c30bfb1SFuad Tabba #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL }
3366c30bfb1SFuad Tabba 
3376c30bfb1SFuad Tabba /*
3386c30bfb1SFuad Tabba  * Architected system registers.
3396c30bfb1SFuad Tabba  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
3406c30bfb1SFuad Tabba  *
3416c30bfb1SFuad Tabba  * NOTE: Anything not explicitly listed here is *restricted by default*, i.e.,
3426c30bfb1SFuad Tabba  * it will lead to injecting an exception into the guest.
3436c30bfb1SFuad Tabba  */
3446c30bfb1SFuad Tabba static const struct sys_reg_desc pvm_sys_reg_descs[] = {
3456c30bfb1SFuad Tabba 	/* Cache maintenance by set/way operations are restricted. */
3466c30bfb1SFuad Tabba 
3476c30bfb1SFuad Tabba 	/* Debug and Trace Registers are restricted. */
3486c30bfb1SFuad Tabba 
3496c30bfb1SFuad Tabba 	/* AArch64 mappings of the AArch32 ID registers */
3506c30bfb1SFuad Tabba 	/* CRm=1 */
3516c30bfb1SFuad Tabba 	AARCH32(SYS_ID_PFR0_EL1),
3526c30bfb1SFuad Tabba 	AARCH32(SYS_ID_PFR1_EL1),
3536c30bfb1SFuad Tabba 	AARCH32(SYS_ID_DFR0_EL1),
3546c30bfb1SFuad Tabba 	AARCH32(SYS_ID_AFR0_EL1),
3556c30bfb1SFuad Tabba 	AARCH32(SYS_ID_MMFR0_EL1),
3566c30bfb1SFuad Tabba 	AARCH32(SYS_ID_MMFR1_EL1),
3576c30bfb1SFuad Tabba 	AARCH32(SYS_ID_MMFR2_EL1),
3586c30bfb1SFuad Tabba 	AARCH32(SYS_ID_MMFR3_EL1),
3596c30bfb1SFuad Tabba 
3606c30bfb1SFuad Tabba 	/* CRm=2 */
3616c30bfb1SFuad Tabba 	AARCH32(SYS_ID_ISAR0_EL1),
3626c30bfb1SFuad Tabba 	AARCH32(SYS_ID_ISAR1_EL1),
3636c30bfb1SFuad Tabba 	AARCH32(SYS_ID_ISAR2_EL1),
3646c30bfb1SFuad Tabba 	AARCH32(SYS_ID_ISAR3_EL1),
3656c30bfb1SFuad Tabba 	AARCH32(SYS_ID_ISAR4_EL1),
3666c30bfb1SFuad Tabba 	AARCH32(SYS_ID_ISAR5_EL1),
3676c30bfb1SFuad Tabba 	AARCH32(SYS_ID_MMFR4_EL1),
3686c30bfb1SFuad Tabba 	AARCH32(SYS_ID_ISAR6_EL1),
3696c30bfb1SFuad Tabba 
3706c30bfb1SFuad Tabba 	/* CRm=3 */
3716c30bfb1SFuad Tabba 	AARCH32(SYS_MVFR0_EL1),
3726c30bfb1SFuad Tabba 	AARCH32(SYS_MVFR1_EL1),
3736c30bfb1SFuad Tabba 	AARCH32(SYS_MVFR2_EL1),
374fa7a1721SMarc Zyngier 	ID_UNALLOCATED(3,3),
3756c30bfb1SFuad Tabba 	AARCH32(SYS_ID_PFR2_EL1),
3766c30bfb1SFuad Tabba 	AARCH32(SYS_ID_DFR1_EL1),
3776c30bfb1SFuad Tabba 	AARCH32(SYS_ID_MMFR5_EL1),
378fa7a1721SMarc Zyngier 	ID_UNALLOCATED(3,7),
3796c30bfb1SFuad Tabba 
3806c30bfb1SFuad Tabba 	/* AArch64 ID registers */
3816c30bfb1SFuad Tabba 	/* CRm=4 */
3826c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64PFR0_EL1),
3836c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64PFR1_EL1),
384fa7a1721SMarc Zyngier 	ID_UNALLOCATED(4,2),
385fa7a1721SMarc Zyngier 	ID_UNALLOCATED(4,3),
3866c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64ZFR0_EL1),
387fa7a1721SMarc Zyngier 	ID_UNALLOCATED(4,5),
388fa7a1721SMarc Zyngier 	ID_UNALLOCATED(4,6),
389fa7a1721SMarc Zyngier 	ID_UNALLOCATED(4,7),
3906c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64DFR0_EL1),
3916c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64DFR1_EL1),
392fa7a1721SMarc Zyngier 	ID_UNALLOCATED(5,2),
393fa7a1721SMarc Zyngier 	ID_UNALLOCATED(5,3),
3946c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64AFR0_EL1),
3956c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64AFR1_EL1),
396fa7a1721SMarc Zyngier 	ID_UNALLOCATED(5,6),
397fa7a1721SMarc Zyngier 	ID_UNALLOCATED(5,7),
3986c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64ISAR0_EL1),
3996c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64ISAR1_EL1),
400fa7a1721SMarc Zyngier 	AARCH64(SYS_ID_AA64ISAR2_EL1),
401fa7a1721SMarc Zyngier 	ID_UNALLOCATED(6,3),
402fa7a1721SMarc Zyngier 	ID_UNALLOCATED(6,4),
403fa7a1721SMarc Zyngier 	ID_UNALLOCATED(6,5),
404fa7a1721SMarc Zyngier 	ID_UNALLOCATED(6,6),
405fa7a1721SMarc Zyngier 	ID_UNALLOCATED(6,7),
4066c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64MMFR0_EL1),
4076c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64MMFR1_EL1),
4086c30bfb1SFuad Tabba 	AARCH64(SYS_ID_AA64MMFR2_EL1),
409fa7a1721SMarc Zyngier 	ID_UNALLOCATED(7,3),
410fa7a1721SMarc Zyngier 	ID_UNALLOCATED(7,4),
411fa7a1721SMarc Zyngier 	ID_UNALLOCATED(7,5),
412fa7a1721SMarc Zyngier 	ID_UNALLOCATED(7,6),
413fa7a1721SMarc Zyngier 	ID_UNALLOCATED(7,7),
4146c30bfb1SFuad Tabba 
4156c30bfb1SFuad Tabba 	/* Scalable Vector Registers are restricted. */
4166c30bfb1SFuad Tabba 
4178ffb4188SMarc Zyngier 	RAZ_WI(SYS_ERRIDR_EL1),
4188ffb4188SMarc Zyngier 	RAZ_WI(SYS_ERRSELR_EL1),
4198ffb4188SMarc Zyngier 	RAZ_WI(SYS_ERXFR_EL1),
4208ffb4188SMarc Zyngier 	RAZ_WI(SYS_ERXCTLR_EL1),
4218ffb4188SMarc Zyngier 	RAZ_WI(SYS_ERXSTATUS_EL1),
4228ffb4188SMarc Zyngier 	RAZ_WI(SYS_ERXADDR_EL1),
4238ffb4188SMarc Zyngier 	RAZ_WI(SYS_ERXMISC0_EL1),
4248ffb4188SMarc Zyngier 	RAZ_WI(SYS_ERXMISC1_EL1),
4256c30bfb1SFuad Tabba 
4266c30bfb1SFuad Tabba 	/* Performance Monitoring Registers are restricted. */
4276c30bfb1SFuad Tabba 
4286c30bfb1SFuad Tabba 	/* Limited Ordering Regions Registers are restricted. */
4296c30bfb1SFuad Tabba 
430cbca1973SMarc Zyngier 	HOST_HANDLED(SYS_ICC_SGI1R_EL1),
431cbca1973SMarc Zyngier 	HOST_HANDLED(SYS_ICC_ASGI1R_EL1),
432cbca1973SMarc Zyngier 	HOST_HANDLED(SYS_ICC_SGI0R_EL1),
433cbca1973SMarc Zyngier 	{ SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, },
4346c30bfb1SFuad Tabba 
4356c30bfb1SFuad Tabba 	HOST_HANDLED(SYS_CCSIDR_EL1),
4366c30bfb1SFuad Tabba 	HOST_HANDLED(SYS_CLIDR_EL1),
4376c30bfb1SFuad Tabba 	HOST_HANDLED(SYS_CSSELR_EL1),
4386c30bfb1SFuad Tabba 	HOST_HANDLED(SYS_CTR_EL0),
4396c30bfb1SFuad Tabba 
4406c30bfb1SFuad Tabba 	/* Performance Monitoring Registers are restricted. */
4416c30bfb1SFuad Tabba 
4426c30bfb1SFuad Tabba 	/* Activity Monitoring Registers are restricted. */
4436c30bfb1SFuad Tabba 
4446c30bfb1SFuad Tabba 	HOST_HANDLED(SYS_CNTP_TVAL_EL0),
4456c30bfb1SFuad Tabba 	HOST_HANDLED(SYS_CNTP_CTL_EL0),
4466c30bfb1SFuad Tabba 	HOST_HANDLED(SYS_CNTP_CVAL_EL0),
4476c30bfb1SFuad Tabba 
4486c30bfb1SFuad Tabba 	/* Performance Monitoring Registers are restricted. */
4496c30bfb1SFuad Tabba };
4506c30bfb1SFuad Tabba 
4516c30bfb1SFuad Tabba /*
4526c30bfb1SFuad Tabba  * Checks that the sysreg table is unique and in-order.
4536c30bfb1SFuad Tabba  *
4546c30bfb1SFuad Tabba  * Returns 0 if the table is consistent, or 1 otherwise.
4556c30bfb1SFuad Tabba  */
kvm_check_pvm_sysreg_table(void)4566c30bfb1SFuad Tabba int kvm_check_pvm_sysreg_table(void)
4576c30bfb1SFuad Tabba {
4586c30bfb1SFuad Tabba 	unsigned int i;
4596c30bfb1SFuad Tabba 
4606c30bfb1SFuad Tabba 	for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) {
4616c30bfb1SFuad Tabba 		if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0)
4626c30bfb1SFuad Tabba 			return 1;
4636c30bfb1SFuad Tabba 	}
4646c30bfb1SFuad Tabba 
4656c30bfb1SFuad Tabba 	return 0;
4666c30bfb1SFuad Tabba }
4676c30bfb1SFuad Tabba 
4686c30bfb1SFuad Tabba /*
4696c30bfb1SFuad Tabba  * Handler for protected VM MSR, MRS or System instruction execution.
4706c30bfb1SFuad Tabba  *
4716c30bfb1SFuad Tabba  * Returns true if the hypervisor has handled the exit, and control should go
4726c30bfb1SFuad Tabba  * back to the guest, or false if it hasn't, to be handled by the host.
4736c30bfb1SFuad Tabba  */
kvm_handle_pvm_sysreg(struct kvm_vcpu * vcpu,u64 * exit_code)4746c30bfb1SFuad Tabba bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
4756c30bfb1SFuad Tabba {
4766c30bfb1SFuad Tabba 	const struct sys_reg_desc *r;
4776c30bfb1SFuad Tabba 	struct sys_reg_params params;
4786c30bfb1SFuad Tabba 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
4796c30bfb1SFuad Tabba 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
4806c30bfb1SFuad Tabba 
4816c30bfb1SFuad Tabba 	params = esr_sys64_to_params(esr);
4826c30bfb1SFuad Tabba 	params.regval = vcpu_get_reg(vcpu, Rt);
4836c30bfb1SFuad Tabba 
4846c30bfb1SFuad Tabba 	r = find_reg(&params, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs));
4856c30bfb1SFuad Tabba 
4866c30bfb1SFuad Tabba 	/* Undefined (RESTRICTED). */
4876c30bfb1SFuad Tabba 	if (r == NULL) {
4886c30bfb1SFuad Tabba 		inject_undef64(vcpu);
4896c30bfb1SFuad Tabba 		return true;
4906c30bfb1SFuad Tabba 	}
4916c30bfb1SFuad Tabba 
4926c30bfb1SFuad Tabba 	/* Handled by the host (HOST_HANDLED) */
4936c30bfb1SFuad Tabba 	if (r->access == NULL)
4946c30bfb1SFuad Tabba 		return false;
4956c30bfb1SFuad Tabba 
4966c30bfb1SFuad Tabba 	/* Handled by hyp: skip instruction if instructed to do so. */
4976c30bfb1SFuad Tabba 	if (r->access(vcpu, &params, r))
4986c30bfb1SFuad Tabba 		__kvm_skip_instr(vcpu);
4996c30bfb1SFuad Tabba 
5006c30bfb1SFuad Tabba 	if (!params.is_write)
5016c30bfb1SFuad Tabba 		vcpu_set_reg(vcpu, Rt, params.regval);
5026c30bfb1SFuad Tabba 
5036c30bfb1SFuad Tabba 	return true;
5046c30bfb1SFuad Tabba }
505746bdeadSMarc Zyngier 
506deacd669SRandy Dunlap /*
507746bdeadSMarc Zyngier  * Handler for protected VM restricted exceptions.
508746bdeadSMarc Zyngier  *
509746bdeadSMarc Zyngier  * Inject an undefined exception into the guest and return true to indicate that
510746bdeadSMarc Zyngier  * the hypervisor has handled the exit, and control should go back to the guest.
511746bdeadSMarc Zyngier  */
kvm_handle_pvm_restricted(struct kvm_vcpu * vcpu,u64 * exit_code)512746bdeadSMarc Zyngier bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
513746bdeadSMarc Zyngier {
514746bdeadSMarc Zyngier 	inject_undef64(vcpu);
515746bdeadSMarc Zyngier 	return true;
516746bdeadSMarc Zyngier }
517