xref: /openbmc/linux/arch/arm64/kvm/sys_regs.c (revision 16f6ccde74a6f8538c62f127f17207c75f4dba7a)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27c8c5e6aSMarc Zyngier /*
37c8c5e6aSMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
47c8c5e6aSMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
57c8c5e6aSMarc Zyngier  *
67c8c5e6aSMarc Zyngier  * Derived from arch/arm/kvm/coproc.c:
77c8c5e6aSMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
87c8c5e6aSMarc Zyngier  * Authors: Rusty Russell <rusty@rustcorp.com.au>
97c8c5e6aSMarc Zyngier  *          Christoffer Dall <c.dall@virtualopensystems.com>
107c8c5e6aSMarc Zyngier  */
117c8c5e6aSMarc Zyngier 
12c8857935SMarc Zyngier #include <linux/bitfield.h>
13623eefa8SMarc Zyngier #include <linux/bsearch.h>
147af0c253SAkihiko Odaki #include <linux/cacheinfo.h>
157c8c5e6aSMarc Zyngier #include <linux/kvm_host.h>
16c6d01a94SMark Rutland #include <linux/mm.h>
1707d79fe7SDave Martin #include <linux/printk.h>
187c8c5e6aSMarc Zyngier #include <linux/uaccess.h>
19c6d01a94SMark Rutland 
207c8c5e6aSMarc Zyngier #include <asm/cacheflush.h>
217c8c5e6aSMarc Zyngier #include <asm/cputype.h>
220c557ed4SMarc Zyngier #include <asm/debug-monitors.h>
23c6d01a94SMark Rutland #include <asm/esr.h>
24c6d01a94SMark Rutland #include <asm/kvm_arm.h>
25c6d01a94SMark Rutland #include <asm/kvm_emulate.h>
26d47533daSChristoffer Dall #include <asm/kvm_hyp.h>
27c6d01a94SMark Rutland #include <asm/kvm_mmu.h>
286ff9dc23SJintack Lim #include <asm/kvm_nested.h>
29ab946834SShannon Zhao #include <asm/perf_event.h>
301f3d8699SMark Rutland #include <asm/sysreg.h>
31c6d01a94SMark Rutland 
327c8c5e6aSMarc Zyngier #include <trace/events/kvm.h>
337c8c5e6aSMarc Zyngier 
347c8c5e6aSMarc Zyngier #include "sys_regs.h"
359d7629beSMarc Zyngier #include "vgic/vgic.h"
367c8c5e6aSMarc Zyngier 
37eef8c85aSAlex Bennée #include "trace.h"
38eef8c85aSAlex Bennée 
397c8c5e6aSMarc Zyngier /*
4062a89c44SMarc Zyngier  * For AArch32, we only take care of what is being trapped. Anything
4162a89c44SMarc Zyngier  * that has to do with init and userspace access has to go via the
4262a89c44SMarc Zyngier  * 64bit interface.
437c8c5e6aSMarc Zyngier  */
447c8c5e6aSMarc Zyngier 
45f24adc65SOliver Upton static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
46c118ceadSJing Zhang static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
47c118ceadSJing Zhang 		      u64 val);
48f24adc65SOliver Upton 
read_from_write_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)497b5b4df1SMarc Zyngier static bool read_from_write_only(struct kvm_vcpu *vcpu,
50e7f1d1eeSMarc Zyngier 				 struct sys_reg_params *params,
51e7f1d1eeSMarc Zyngier 				 const struct sys_reg_desc *r)
527b5b4df1SMarc Zyngier {
537b5b4df1SMarc Zyngier 	WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
547b5b4df1SMarc Zyngier 	print_sys_reg_instr(params);
557b5b4df1SMarc Zyngier 	kvm_inject_undefined(vcpu);
567b5b4df1SMarc Zyngier 	return false;
577b5b4df1SMarc Zyngier }
587b5b4df1SMarc Zyngier 
write_to_read_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)597b1dba1fSMarc Zyngier static bool write_to_read_only(struct kvm_vcpu *vcpu,
607b1dba1fSMarc Zyngier 			       struct sys_reg_params *params,
617b1dba1fSMarc Zyngier 			       const struct sys_reg_desc *r)
627b1dba1fSMarc Zyngier {
637b1dba1fSMarc Zyngier 	WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
647b1dba1fSMarc Zyngier 	print_sys_reg_instr(params);
657b1dba1fSMarc Zyngier 	kvm_inject_undefined(vcpu);
667b1dba1fSMarc Zyngier 	return false;
677b1dba1fSMarc Zyngier }
687b1dba1fSMarc Zyngier 
vcpu_read_sys_reg(const struct kvm_vcpu * vcpu,int reg)697ea90bddSMarc Zyngier u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
707ea90bddSMarc Zyngier {
717ea90bddSMarc Zyngier 	u64 val = 0x8badf00d8badf00d;
727ea90bddSMarc Zyngier 
7330b6ab45SMarc Zyngier 	if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
747ea90bddSMarc Zyngier 	    __vcpu_read_sys_reg_from_cpu(reg, &val))
757ea90bddSMarc Zyngier 		return val;
767ea90bddSMarc Zyngier 
777ea90bddSMarc Zyngier 	return __vcpu_sys_reg(vcpu, reg);
787ea90bddSMarc Zyngier }
797ea90bddSMarc Zyngier 
vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,int reg)807ea90bddSMarc Zyngier void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
817ea90bddSMarc Zyngier {
8230b6ab45SMarc Zyngier 	if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
837ea90bddSMarc Zyngier 	    __vcpu_write_sys_reg_to_cpu(val, reg))
847ea90bddSMarc Zyngier 		return;
857ea90bddSMarc Zyngier 
86d47533daSChristoffer Dall 	__vcpu_sys_reg(vcpu, reg) = val;
87d47533daSChristoffer Dall }
88d47533daSChristoffer Dall 
897c8c5e6aSMarc Zyngier /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
90c73a4416SAndrew Jones #define CSSELR_MAX 14
917c8c5e6aSMarc Zyngier 
927af0c253SAkihiko Odaki /*
937af0c253SAkihiko Odaki  * Returns the minimum line size for the selected cache, expressed as
947af0c253SAkihiko Odaki  * Log2(bytes).
957af0c253SAkihiko Odaki  */
get_min_cache_line_size(bool icache)967af0c253SAkihiko Odaki static u8 get_min_cache_line_size(bool icache)
977c8c5e6aSMarc Zyngier {
987af0c253SAkihiko Odaki 	u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
997af0c253SAkihiko Odaki 	u8 field;
1007c8c5e6aSMarc Zyngier 
1017af0c253SAkihiko Odaki 	if (icache)
1027af0c253SAkihiko Odaki 		field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
1037af0c253SAkihiko Odaki 	else
1047af0c253SAkihiko Odaki 		field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
1057c8c5e6aSMarc Zyngier 
1067af0c253SAkihiko Odaki 	/*
1077af0c253SAkihiko Odaki 	 * Cache line size is represented as Log2(words) in CTR_EL0.
1087af0c253SAkihiko Odaki 	 * Log2(bytes) can be derived with the following:
1097af0c253SAkihiko Odaki 	 *
1107af0c253SAkihiko Odaki 	 * Log2(words) + 2 = Log2(bytes / 4) + 2
1117af0c253SAkihiko Odaki 	 * 		   = Log2(bytes) - 2 + 2
1127af0c253SAkihiko Odaki 	 * 		   = Log2(bytes)
1137af0c253SAkihiko Odaki 	 */
1147af0c253SAkihiko Odaki 	return field + 2;
1157af0c253SAkihiko Odaki }
1167af0c253SAkihiko Odaki 
1177af0c253SAkihiko Odaki /* Which cache CCSIDR represents depends on CSSELR value. */
get_ccsidr(struct kvm_vcpu * vcpu,u32 csselr)1187af0c253SAkihiko Odaki static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
1197af0c253SAkihiko Odaki {
1207af0c253SAkihiko Odaki 	u8 line_size;
1217af0c253SAkihiko Odaki 
1227af0c253SAkihiko Odaki 	if (vcpu->arch.ccsidr)
1237af0c253SAkihiko Odaki 		return vcpu->arch.ccsidr[csselr];
1247af0c253SAkihiko Odaki 
1257af0c253SAkihiko Odaki 	line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
1267af0c253SAkihiko Odaki 
1277af0c253SAkihiko Odaki 	/*
1287af0c253SAkihiko Odaki 	 * Fabricate a CCSIDR value as the overriding value does not exist.
1297af0c253SAkihiko Odaki 	 * The real CCSIDR value will not be used as it can vary by the
1307af0c253SAkihiko Odaki 	 * physical CPU which the vcpu currently resides in.
1317af0c253SAkihiko Odaki 	 *
1327af0c253SAkihiko Odaki 	 * The line size is determined with get_min_cache_line_size(), which
1337af0c253SAkihiko Odaki 	 * should be valid for all CPUs even if they have different cache
1347af0c253SAkihiko Odaki 	 * configuration.
1357af0c253SAkihiko Odaki 	 *
1367af0c253SAkihiko Odaki 	 * The associativity bits are cleared, meaning the geometry of all data
1377af0c253SAkihiko Odaki 	 * and unified caches (which are guaranteed to be PIPT and thus
1387af0c253SAkihiko Odaki 	 * non-aliasing) are 1 set and 1 way.
1397af0c253SAkihiko Odaki 	 * Guests should not be doing cache operations by set/way at all, and
1407af0c253SAkihiko Odaki 	 * for this reason, we trap them and attempt to infer the intent, so
1417af0c253SAkihiko Odaki 	 * that we can flush the entire guest's address space at the appropriate
1427af0c253SAkihiko Odaki 	 * time. The exposed geometry minimizes the number of the traps.
1437af0c253SAkihiko Odaki 	 * [If guests should attempt to infer aliasing properties from the
1447af0c253SAkihiko Odaki 	 * geometry (which is not permitted by the architecture), they would
1457af0c253SAkihiko Odaki 	 * only do so for virtually indexed caches.]
1467af0c253SAkihiko Odaki 	 *
1477af0c253SAkihiko Odaki 	 * We don't check if the cache level exists as it is allowed to return
1487af0c253SAkihiko Odaki 	 * an UNKNOWN value if not.
1497af0c253SAkihiko Odaki 	 */
1507af0c253SAkihiko Odaki 	return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
1517af0c253SAkihiko Odaki }
1527af0c253SAkihiko Odaki 
set_ccsidr(struct kvm_vcpu * vcpu,u32 csselr,u32 val)1537af0c253SAkihiko Odaki static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
1547af0c253SAkihiko Odaki {
1557af0c253SAkihiko Odaki 	u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
1567af0c253SAkihiko Odaki 	u32 *ccsidr = vcpu->arch.ccsidr;
1577af0c253SAkihiko Odaki 	u32 i;
1587af0c253SAkihiko Odaki 
1597af0c253SAkihiko Odaki 	if ((val & CCSIDR_EL1_RES0) ||
1607af0c253SAkihiko Odaki 	    line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
1617af0c253SAkihiko Odaki 		return -EINVAL;
1627af0c253SAkihiko Odaki 
1637af0c253SAkihiko Odaki 	if (!ccsidr) {
1647af0c253SAkihiko Odaki 		if (val == get_ccsidr(vcpu, csselr))
1657af0c253SAkihiko Odaki 			return 0;
1667af0c253SAkihiko Odaki 
1675f623a59SOliver Upton 		ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
1687af0c253SAkihiko Odaki 		if (!ccsidr)
1697af0c253SAkihiko Odaki 			return -ENOMEM;
1707af0c253SAkihiko Odaki 
1717af0c253SAkihiko Odaki 		for (i = 0; i < CSSELR_MAX; i++)
1727af0c253SAkihiko Odaki 			ccsidr[i] = get_ccsidr(vcpu, i);
1737af0c253SAkihiko Odaki 
1747af0c253SAkihiko Odaki 		vcpu->arch.ccsidr = ccsidr;
1757af0c253SAkihiko Odaki 	}
1767af0c253SAkihiko Odaki 
1777af0c253SAkihiko Odaki 	ccsidr[csselr] = val;
1787af0c253SAkihiko Odaki 
1797af0c253SAkihiko Odaki 	return 0;
1807c8c5e6aSMarc Zyngier }
1817c8c5e6aSMarc Zyngier 
access_rw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1826ff9dc23SJintack Lim static bool access_rw(struct kvm_vcpu *vcpu,
1836ff9dc23SJintack Lim 		      struct sys_reg_params *p,
1846ff9dc23SJintack Lim 		      const struct sys_reg_desc *r)
1856ff9dc23SJintack Lim {
1866ff9dc23SJintack Lim 	if (p->is_write)
1876ff9dc23SJintack Lim 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
1886ff9dc23SJintack Lim 	else
1896ff9dc23SJintack Lim 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
1906ff9dc23SJintack Lim 
1916ff9dc23SJintack Lim 	return true;
1926ff9dc23SJintack Lim }
1936ff9dc23SJintack Lim 
1943c1e7165SMarc Zyngier /*
1953c1e7165SMarc Zyngier  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1963c1e7165SMarc Zyngier  */
access_dcsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1977c8c5e6aSMarc Zyngier static bool access_dcsw(struct kvm_vcpu *vcpu,
1983fec037dSPavel Fedin 			struct sys_reg_params *p,
1997c8c5e6aSMarc Zyngier 			const struct sys_reg_desc *r)
2007c8c5e6aSMarc Zyngier {
2017c8c5e6aSMarc Zyngier 	if (!p->is_write)
202e7f1d1eeSMarc Zyngier 		return read_from_write_only(vcpu, p, r);
2037c8c5e6aSMarc Zyngier 
20409605e94SMarc Zyngier 	/*
20509605e94SMarc Zyngier 	 * Only track S/W ops if we don't have FWB. It still indicates
20609605e94SMarc Zyngier 	 * that the guest is a bit broken (S/W operations should only
20709605e94SMarc Zyngier 	 * be done by firmware, knowing that there is only a single
20809605e94SMarc Zyngier 	 * CPU left in the system, and certainly not from non-secure
20909605e94SMarc Zyngier 	 * software).
21009605e94SMarc Zyngier 	 */
21109605e94SMarc Zyngier 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
2123c1e7165SMarc Zyngier 		kvm_set_way_flush(vcpu);
21309605e94SMarc Zyngier 
2147c8c5e6aSMarc Zyngier 	return true;
2157c8c5e6aSMarc Zyngier }
2167c8c5e6aSMarc Zyngier 
access_dcgsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)217d282fa3cSMarc Zyngier static bool access_dcgsw(struct kvm_vcpu *vcpu,
218d282fa3cSMarc Zyngier 			 struct sys_reg_params *p,
219d282fa3cSMarc Zyngier 			 const struct sys_reg_desc *r)
220d282fa3cSMarc Zyngier {
221d282fa3cSMarc Zyngier 	if (!kvm_has_mte(vcpu->kvm)) {
222d282fa3cSMarc Zyngier 		kvm_inject_undefined(vcpu);
223d282fa3cSMarc Zyngier 		return false;
224d282fa3cSMarc Zyngier 	}
225d282fa3cSMarc Zyngier 
226d282fa3cSMarc Zyngier 	/* Treat MTE S/W ops as we treat the classic ones: with contempt */
227d282fa3cSMarc Zyngier 	return access_dcsw(vcpu, p, r);
228d282fa3cSMarc Zyngier }
229d282fa3cSMarc Zyngier 
get_access_mask(const struct sys_reg_desc * r,u64 * mask,u64 * shift)230b1ea1d76SMarc Zyngier static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
231b1ea1d76SMarc Zyngier {
232b1ea1d76SMarc Zyngier 	switch (r->aarch32_map) {
233b1ea1d76SMarc Zyngier 	case AA32_LO:
234b1ea1d76SMarc Zyngier 		*mask = GENMASK_ULL(31, 0);
235b1ea1d76SMarc Zyngier 		*shift = 0;
236b1ea1d76SMarc Zyngier 		break;
237b1ea1d76SMarc Zyngier 	case AA32_HI:
238b1ea1d76SMarc Zyngier 		*mask = GENMASK_ULL(63, 32);
239b1ea1d76SMarc Zyngier 		*shift = 32;
240b1ea1d76SMarc Zyngier 		break;
241b1ea1d76SMarc Zyngier 	default:
242b1ea1d76SMarc Zyngier 		*mask = GENMASK_ULL(63, 0);
243b1ea1d76SMarc Zyngier 		*shift = 0;
244b1ea1d76SMarc Zyngier 		break;
245b1ea1d76SMarc Zyngier 	}
246b1ea1d76SMarc Zyngier }
247b1ea1d76SMarc Zyngier 
2487c8c5e6aSMarc Zyngier /*
2494d44923bSMarc Zyngier  * Generic accessor for VM registers. Only called as long as HCR_TVM
2503c1e7165SMarc Zyngier  * is set. If the guest enables the MMU, we stop trapping the VM
2513c1e7165SMarc Zyngier  * sys_regs and leave it in complete control of the caches.
2524d44923bSMarc Zyngier  */
access_vm_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2534d44923bSMarc Zyngier static bool access_vm_reg(struct kvm_vcpu *vcpu,
2543fec037dSPavel Fedin 			  struct sys_reg_params *p,
2554d44923bSMarc Zyngier 			  const struct sys_reg_desc *r)
2564d44923bSMarc Zyngier {
2573c1e7165SMarc Zyngier 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
258b1ea1d76SMarc Zyngier 	u64 val, mask, shift;
2594d44923bSMarc Zyngier 
2604d44923bSMarc Zyngier 	BUG_ON(!p->is_write);
2614d44923bSMarc Zyngier 
262b1ea1d76SMarc Zyngier 	get_access_mask(r, &mask, &shift);
26352f6c4f0SChristoffer Dall 
264b1ea1d76SMarc Zyngier 	if (~mask) {
265b1ea1d76SMarc Zyngier 		val = vcpu_read_sys_reg(vcpu, r->reg);
266b1ea1d76SMarc Zyngier 		val &= ~mask;
267dedf97e8SMarc Zyngier 	} else {
268b1ea1d76SMarc Zyngier 		val = 0;
269dedf97e8SMarc Zyngier 	}
270b1ea1d76SMarc Zyngier 
271b1ea1d76SMarc Zyngier 	val |= (p->regval & (mask >> shift)) << shift;
272b1ea1d76SMarc Zyngier 	vcpu_write_sys_reg(vcpu, val, r->reg);
273f0a3eaffSVictor Kamensky 
2743c1e7165SMarc Zyngier 	kvm_toggle_cache(vcpu, was_enabled);
2754d44923bSMarc Zyngier 	return true;
2764d44923bSMarc Zyngier }
2774d44923bSMarc Zyngier 
access_actlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)278af473829SJames Morse static bool access_actlr(struct kvm_vcpu *vcpu,
279af473829SJames Morse 			 struct sys_reg_params *p,
280af473829SJames Morse 			 const struct sys_reg_desc *r)
281af473829SJames Morse {
282b1ea1d76SMarc Zyngier 	u64 mask, shift;
283b1ea1d76SMarc Zyngier 
284af473829SJames Morse 	if (p->is_write)
285af473829SJames Morse 		return ignore_write(vcpu, p);
286af473829SJames Morse 
287b1ea1d76SMarc Zyngier 	get_access_mask(r, &mask, &shift);
288b1ea1d76SMarc Zyngier 	p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
289af473829SJames Morse 
290af473829SJames Morse 	return true;
291af473829SJames Morse }
292af473829SJames Morse 
2936d52f35aSAndre Przywara /*
2946d52f35aSAndre Przywara  * Trap handler for the GICv3 SGI generation system register.
2956d52f35aSAndre Przywara  * Forward the request to the VGIC emulation.
2966d52f35aSAndre Przywara  * The cp15_64 code makes sure this automatically works
2976d52f35aSAndre Przywara  * for both AArch64 and AArch32 accesses.
2986d52f35aSAndre Przywara  */
access_gic_sgi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2996d52f35aSAndre Przywara static bool access_gic_sgi(struct kvm_vcpu *vcpu,
3003fec037dSPavel Fedin 			   struct sys_reg_params *p,
3016d52f35aSAndre Przywara 			   const struct sys_reg_desc *r)
3026d52f35aSAndre Przywara {
30303bd646dSMarc Zyngier 	bool g1;
30403bd646dSMarc Zyngier 
3059d7629beSMarc Zyngier 	if (!kvm_has_gicv3(vcpu->kvm)) {
3069d7629beSMarc Zyngier 		kvm_inject_undefined(vcpu);
3079d7629beSMarc Zyngier 		return false;
3089d7629beSMarc Zyngier 	}
3099d7629beSMarc Zyngier 
3106d52f35aSAndre Przywara 	if (!p->is_write)
311e7f1d1eeSMarc Zyngier 		return read_from_write_only(vcpu, p, r);
3126d52f35aSAndre Przywara 
31303bd646dSMarc Zyngier 	/*
31403bd646dSMarc Zyngier 	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
31503bd646dSMarc Zyngier 	 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
31603bd646dSMarc Zyngier 	 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
31703bd646dSMarc Zyngier 	 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
31803bd646dSMarc Zyngier 	 * group.
31903bd646dSMarc Zyngier 	 */
32050f30453SMarc Zyngier 	if (p->Op0 == 0) {		/* AArch32 */
32103bd646dSMarc Zyngier 		switch (p->Op1) {
32203bd646dSMarc Zyngier 		default:		/* Keep GCC quiet */
32303bd646dSMarc Zyngier 		case 0:			/* ICC_SGI1R */
32403bd646dSMarc Zyngier 			g1 = true;
32503bd646dSMarc Zyngier 			break;
32603bd646dSMarc Zyngier 		case 1:			/* ICC_ASGI1R */
32703bd646dSMarc Zyngier 		case 2:			/* ICC_SGI0R */
32803bd646dSMarc Zyngier 			g1 = false;
32903bd646dSMarc Zyngier 			break;
33003bd646dSMarc Zyngier 		}
33150f30453SMarc Zyngier 	} else {			/* AArch64 */
33203bd646dSMarc Zyngier 		switch (p->Op2) {
33303bd646dSMarc Zyngier 		default:		/* Keep GCC quiet */
33403bd646dSMarc Zyngier 		case 5:			/* ICC_SGI1R_EL1 */
33503bd646dSMarc Zyngier 			g1 = true;
33603bd646dSMarc Zyngier 			break;
33703bd646dSMarc Zyngier 		case 6:			/* ICC_ASGI1R_EL1 */
33803bd646dSMarc Zyngier 		case 7:			/* ICC_SGI0R_EL1 */
33903bd646dSMarc Zyngier 			g1 = false;
34003bd646dSMarc Zyngier 			break;
34103bd646dSMarc Zyngier 		}
34203bd646dSMarc Zyngier 	}
34303bd646dSMarc Zyngier 
34403bd646dSMarc Zyngier 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
3456d52f35aSAndre Przywara 
3466d52f35aSAndre Przywara 	return true;
3476d52f35aSAndre Przywara }
3486d52f35aSAndre Przywara 
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)349b34f2bcbSMarc Zyngier static bool access_gic_sre(struct kvm_vcpu *vcpu,
350b34f2bcbSMarc Zyngier 			   struct sys_reg_params *p,
351b34f2bcbSMarc Zyngier 			   const struct sys_reg_desc *r)
352b34f2bcbSMarc Zyngier {
353b34f2bcbSMarc Zyngier 	if (p->is_write)
354b34f2bcbSMarc Zyngier 		return ignore_write(vcpu, p);
355b34f2bcbSMarc Zyngier 
356b34f2bcbSMarc Zyngier 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
357b34f2bcbSMarc Zyngier 	return true;
358b34f2bcbSMarc Zyngier }
359b34f2bcbSMarc Zyngier 
trap_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3607609c125SMarc Zyngier static bool trap_raz_wi(struct kvm_vcpu *vcpu,
3613fec037dSPavel Fedin 			struct sys_reg_params *p,
3627c8c5e6aSMarc Zyngier 			const struct sys_reg_desc *r)
3637c8c5e6aSMarc Zyngier {
3647c8c5e6aSMarc Zyngier 	if (p->is_write)
3657c8c5e6aSMarc Zyngier 		return ignore_write(vcpu, p);
3667c8c5e6aSMarc Zyngier 	else
3677c8c5e6aSMarc Zyngier 		return read_zero(vcpu, p);
3687c8c5e6aSMarc Zyngier }
3697c8c5e6aSMarc Zyngier 
trap_undef(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3706ff9dc23SJintack Lim static bool trap_undef(struct kvm_vcpu *vcpu,
3716ff9dc23SJintack Lim 		       struct sys_reg_params *p,
3726ff9dc23SJintack Lim 		       const struct sys_reg_desc *r)
3736ff9dc23SJintack Lim {
3746ff9dc23SJintack Lim 	kvm_inject_undefined(vcpu);
3756ff9dc23SJintack Lim 	return false;
3766ff9dc23SJintack Lim }
3776ff9dc23SJintack Lim 
37822925521SMarc Zyngier /*
37922925521SMarc Zyngier  * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
38022925521SMarc Zyngier  * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
38122925521SMarc Zyngier  * system, these registers should UNDEF. LORID_EL1 being a RO register, we
38222925521SMarc Zyngier  * treat it separately.
38322925521SMarc Zyngier  */
trap_loregion(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)38422925521SMarc Zyngier static bool trap_loregion(struct kvm_vcpu *vcpu,
385cc33c4e2SMark Rutland 			  struct sys_reg_params *p,
386cc33c4e2SMark Rutland 			  const struct sys_reg_desc *r)
387cc33c4e2SMark Rutland {
38822925521SMarc Zyngier 	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
3897ba8b438SAlexandru Elisei 	u32 sr = reg_to_encoding(r);
39022925521SMarc Zyngier 
3916fcd0193SKristina Martsenko 	if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
392cc33c4e2SMark Rutland 		kvm_inject_undefined(vcpu);
393cc33c4e2SMark Rutland 		return false;
394cc33c4e2SMark Rutland 	}
395cc33c4e2SMark Rutland 
39622925521SMarc Zyngier 	if (p->is_write && sr == SYS_LORID_EL1)
39722925521SMarc Zyngier 		return write_to_read_only(vcpu, p, r);
39822925521SMarc Zyngier 
39922925521SMarc Zyngier 	return trap_raz_wi(vcpu, p, r);
40022925521SMarc Zyngier }
40122925521SMarc Zyngier 
trap_oslar_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)402f24adc65SOliver Upton static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
403f24adc65SOliver Upton 			   struct sys_reg_params *p,
404f24adc65SOliver Upton 			   const struct sys_reg_desc *r)
405f24adc65SOliver Upton {
406f24adc65SOliver Upton 	u64 oslsr;
407f24adc65SOliver Upton 
408f24adc65SOliver Upton 	if (!p->is_write)
409f24adc65SOliver Upton 		return read_from_write_only(vcpu, p, r);
410f24adc65SOliver Upton 
411f24adc65SOliver Upton 	/* Forward the OSLK bit to OSLSR */
412187de7c2SMark Brown 	oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
413187de7c2SMark Brown 	if (p->regval & OSLAR_EL1_OSLK)
414187de7c2SMark Brown 		oslsr |= OSLSR_EL1_OSLK;
415f24adc65SOliver Upton 
416f24adc65SOliver Upton 	__vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
417f24adc65SOliver Upton 	return true;
418f24adc65SOliver Upton }
419f24adc65SOliver Upton 
trap_oslsr_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4200c557ed4SMarc Zyngier static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
4213fec037dSPavel Fedin 			   struct sys_reg_params *p,
4220c557ed4SMarc Zyngier 			   const struct sys_reg_desc *r)
4230c557ed4SMarc Zyngier {
424d42e2671SOliver Upton 	if (p->is_write)
425e2ffceaaSOliver Upton 		return write_to_read_only(vcpu, p, r);
426d42e2671SOliver Upton 
427d42e2671SOliver Upton 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
4280c557ed4SMarc Zyngier 	return true;
4290c557ed4SMarc Zyngier }
430d42e2671SOliver Upton 
set_oslsr_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)431d42e2671SOliver Upton static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
432978ceeb3SMarc Zyngier 			 u64 val)
433d42e2671SOliver Upton {
434f24adc65SOliver Upton 	/*
435f24adc65SOliver Upton 	 * The only modifiable bit is the OSLK bit. Refuse the write if
436f24adc65SOliver Upton 	 * userspace attempts to change any other bit in the register.
437f24adc65SOliver Upton 	 */
438187de7c2SMark Brown 	if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
439d42e2671SOliver Upton 		return -EINVAL;
440d42e2671SOliver Upton 
441f24adc65SOliver Upton 	__vcpu_sys_reg(vcpu, rd->reg) = val;
442d42e2671SOliver Upton 	return 0;
4430c557ed4SMarc Zyngier }
4440c557ed4SMarc Zyngier 
trap_dbgauthstatus_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4450c557ed4SMarc Zyngier static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
4463fec037dSPavel Fedin 				   struct sys_reg_params *p,
4470c557ed4SMarc Zyngier 				   const struct sys_reg_desc *r)
4480c557ed4SMarc Zyngier {
4490c557ed4SMarc Zyngier 	if (p->is_write) {
4500c557ed4SMarc Zyngier 		return ignore_write(vcpu, p);
4510c557ed4SMarc Zyngier 	} else {
4521f3d8699SMark Rutland 		p->regval = read_sysreg(dbgauthstatus_el1);
4530c557ed4SMarc Zyngier 		return true;
4540c557ed4SMarc Zyngier 	}
4550c557ed4SMarc Zyngier }
4560c557ed4SMarc Zyngier 
4570c557ed4SMarc Zyngier /*
4580c557ed4SMarc Zyngier  * We want to avoid world-switching all the DBG registers all the
4590c557ed4SMarc Zyngier  * time:
4600c557ed4SMarc Zyngier  *
4610c557ed4SMarc Zyngier  * - If we've touched any debug register, it is likely that we're
4620c557ed4SMarc Zyngier  *   going to touch more of them. It then makes sense to disable the
4630c557ed4SMarc Zyngier  *   traps and start doing the save/restore dance
4640c557ed4SMarc Zyngier  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
4650c557ed4SMarc Zyngier  *   then mandatory to save/restore the registers, as the guest
4660c557ed4SMarc Zyngier  *   depends on them.
4670c557ed4SMarc Zyngier  *
4680c557ed4SMarc Zyngier  * For this, we use a DIRTY bit, indicating the guest has modified the
4690c557ed4SMarc Zyngier  * debug registers, used as follow:
4700c557ed4SMarc Zyngier  *
4710c557ed4SMarc Zyngier  * On guest entry:
4720c557ed4SMarc Zyngier  * - If the dirty bit is set (because we're coming back from trapping),
4730c557ed4SMarc Zyngier  *   disable the traps, save host registers, restore guest registers.
4740c557ed4SMarc Zyngier  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
4750c557ed4SMarc Zyngier  *   set the dirty bit, disable the traps, save host registers,
4760c557ed4SMarc Zyngier  *   restore guest registers.
4770c557ed4SMarc Zyngier  * - Otherwise, enable the traps
4780c557ed4SMarc Zyngier  *
4790c557ed4SMarc Zyngier  * On guest exit:
4800c557ed4SMarc Zyngier  * - If the dirty bit is set, save guest registers, restore host
4810c557ed4SMarc Zyngier  *   registers and clear the dirty bit. This ensure that the host can
4820c557ed4SMarc Zyngier  *   now use the debug registers.
4830c557ed4SMarc Zyngier  */
trap_debug_regs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4840c557ed4SMarc Zyngier static bool trap_debug_regs(struct kvm_vcpu *vcpu,
4853fec037dSPavel Fedin 			    struct sys_reg_params *p,
4860c557ed4SMarc Zyngier 			    const struct sys_reg_desc *r)
4870c557ed4SMarc Zyngier {
4886ff9dc23SJintack Lim 	access_rw(vcpu, p, r);
4896ff9dc23SJintack Lim 	if (p->is_write)
490b1da4908SMarc Zyngier 		vcpu_set_flag(vcpu, DEBUG_DIRTY);
4910c557ed4SMarc Zyngier 
4922ec5be3dSPavel Fedin 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
493eef8c85aSAlex Bennée 
4940c557ed4SMarc Zyngier 	return true;
4950c557ed4SMarc Zyngier }
4960c557ed4SMarc Zyngier 
49784e690bfSAlex Bennée /*
49884e690bfSAlex Bennée  * reg_to_dbg/dbg_to_reg
49984e690bfSAlex Bennée  *
50084e690bfSAlex Bennée  * A 32 bit write to a debug register leave top bits alone
50184e690bfSAlex Bennée  * A 32 bit read from a debug register only returns the bottom bits
50284e690bfSAlex Bennée  *
503b1da4908SMarc Zyngier  * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
504b1da4908SMarc Zyngier  * switches between host and guest values in future.
50584e690bfSAlex Bennée  */
reg_to_dbg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)506281243cbSMarc Zyngier static void reg_to_dbg(struct kvm_vcpu *vcpu,
5073fec037dSPavel Fedin 		       struct sys_reg_params *p,
5081da42c34SMarc Zyngier 		       const struct sys_reg_desc *rd,
50984e690bfSAlex Bennée 		       u64 *dbg_reg)
51084e690bfSAlex Bennée {
5111da42c34SMarc Zyngier 	u64 mask, shift, val;
51284e690bfSAlex Bennée 
5131da42c34SMarc Zyngier 	get_access_mask(rd, &mask, &shift);
51484e690bfSAlex Bennée 
5151da42c34SMarc Zyngier 	val = *dbg_reg;
5161da42c34SMarc Zyngier 	val &= ~mask;
5171da42c34SMarc Zyngier 	val |= (p->regval & (mask >> shift)) << shift;
51884e690bfSAlex Bennée 	*dbg_reg = val;
5191da42c34SMarc Zyngier 
520b1da4908SMarc Zyngier 	vcpu_set_flag(vcpu, DEBUG_DIRTY);
52184e690bfSAlex Bennée }
52284e690bfSAlex Bennée 
dbg_to_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)523281243cbSMarc Zyngier static void dbg_to_reg(struct kvm_vcpu *vcpu,
5243fec037dSPavel Fedin 		       struct sys_reg_params *p,
5251da42c34SMarc Zyngier 		       const struct sys_reg_desc *rd,
52684e690bfSAlex Bennée 		       u64 *dbg_reg)
52784e690bfSAlex Bennée {
5281da42c34SMarc Zyngier 	u64 mask, shift;
5291da42c34SMarc Zyngier 
5301da42c34SMarc Zyngier 	get_access_mask(rd, &mask, &shift);
5311da42c34SMarc Zyngier 	p->regval = (*dbg_reg & mask) >> shift;
53284e690bfSAlex Bennée }
53384e690bfSAlex Bennée 
trap_bvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)534281243cbSMarc Zyngier static bool trap_bvr(struct kvm_vcpu *vcpu,
5353fec037dSPavel Fedin 		     struct sys_reg_params *p,
53684e690bfSAlex Bennée 		     const struct sys_reg_desc *rd)
53784e690bfSAlex Bennée {
538cb853dedSMarc Zyngier 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
53984e690bfSAlex Bennée 
54084e690bfSAlex Bennée 	if (p->is_write)
5411da42c34SMarc Zyngier 		reg_to_dbg(vcpu, p, rd, dbg_reg);
54284e690bfSAlex Bennée 	else
5431da42c34SMarc Zyngier 		dbg_to_reg(vcpu, p, rd, dbg_reg);
54484e690bfSAlex Bennée 
545cb853dedSMarc Zyngier 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
546eef8c85aSAlex Bennée 
54784e690bfSAlex Bennée 	return true;
54884e690bfSAlex Bennée }
54984e690bfSAlex Bennée 
set_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)55084e690bfSAlex Bennée static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
551978ceeb3SMarc Zyngier 		   u64 val)
55284e690bfSAlex Bennée {
553978ceeb3SMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
55484e690bfSAlex Bennée 	return 0;
55584e690bfSAlex Bennée }
55684e690bfSAlex Bennée 
get_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)55784e690bfSAlex Bennée static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
558978ceeb3SMarc Zyngier 		   u64 *val)
55984e690bfSAlex Bennée {
560978ceeb3SMarc Zyngier 	*val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
56184e690bfSAlex Bennée 	return 0;
56284e690bfSAlex Bennée }
56384e690bfSAlex Bennée 
reset_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)564d86cde6eSJing Zhang static u64 reset_bvr(struct kvm_vcpu *vcpu,
56584e690bfSAlex Bennée 		      const struct sys_reg_desc *rd)
56684e690bfSAlex Bennée {
567cb853dedSMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
568d86cde6eSJing Zhang 	return rd->val;
56984e690bfSAlex Bennée }
57084e690bfSAlex Bennée 
trap_bcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)571281243cbSMarc Zyngier static bool trap_bcr(struct kvm_vcpu *vcpu,
5723fec037dSPavel Fedin 		     struct sys_reg_params *p,
57384e690bfSAlex Bennée 		     const struct sys_reg_desc *rd)
57484e690bfSAlex Bennée {
575cb853dedSMarc Zyngier 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
57684e690bfSAlex Bennée 
57784e690bfSAlex Bennée 	if (p->is_write)
5781da42c34SMarc Zyngier 		reg_to_dbg(vcpu, p, rd, dbg_reg);
57984e690bfSAlex Bennée 	else
5801da42c34SMarc Zyngier 		dbg_to_reg(vcpu, p, rd, dbg_reg);
58184e690bfSAlex Bennée 
582cb853dedSMarc Zyngier 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
583eef8c85aSAlex Bennée 
58484e690bfSAlex Bennée 	return true;
58584e690bfSAlex Bennée }
58684e690bfSAlex Bennée 
set_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)58784e690bfSAlex Bennée static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
588978ceeb3SMarc Zyngier 		   u64 val)
58984e690bfSAlex Bennée {
590978ceeb3SMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
59184e690bfSAlex Bennée 	return 0;
59284e690bfSAlex Bennée }
59384e690bfSAlex Bennée 
get_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)59484e690bfSAlex Bennée static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
595978ceeb3SMarc Zyngier 		   u64 *val)
59684e690bfSAlex Bennée {
597978ceeb3SMarc Zyngier 	*val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
59884e690bfSAlex Bennée 	return 0;
59984e690bfSAlex Bennée }
60084e690bfSAlex Bennée 
reset_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)601d86cde6eSJing Zhang static u64 reset_bcr(struct kvm_vcpu *vcpu,
60284e690bfSAlex Bennée 		      const struct sys_reg_desc *rd)
60384e690bfSAlex Bennée {
604cb853dedSMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
605d86cde6eSJing Zhang 	return rd->val;
60684e690bfSAlex Bennée }
60784e690bfSAlex Bennée 
trap_wvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)608281243cbSMarc Zyngier static bool trap_wvr(struct kvm_vcpu *vcpu,
6093fec037dSPavel Fedin 		     struct sys_reg_params *p,
61084e690bfSAlex Bennée 		     const struct sys_reg_desc *rd)
61184e690bfSAlex Bennée {
612cb853dedSMarc Zyngier 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
61384e690bfSAlex Bennée 
61484e690bfSAlex Bennée 	if (p->is_write)
6151da42c34SMarc Zyngier 		reg_to_dbg(vcpu, p, rd, dbg_reg);
61684e690bfSAlex Bennée 	else
6171da42c34SMarc Zyngier 		dbg_to_reg(vcpu, p, rd, dbg_reg);
61884e690bfSAlex Bennée 
619cb853dedSMarc Zyngier 	trace_trap_reg(__func__, rd->CRm, p->is_write,
620cb853dedSMarc Zyngier 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
621eef8c85aSAlex Bennée 
62284e690bfSAlex Bennée 	return true;
62384e690bfSAlex Bennée }
62484e690bfSAlex Bennée 
set_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)62584e690bfSAlex Bennée static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
626978ceeb3SMarc Zyngier 		   u64 val)
62784e690bfSAlex Bennée {
628978ceeb3SMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
62984e690bfSAlex Bennée 	return 0;
63084e690bfSAlex Bennée }
63184e690bfSAlex Bennée 
get_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)63284e690bfSAlex Bennée static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
633978ceeb3SMarc Zyngier 		   u64 *val)
63484e690bfSAlex Bennée {
635978ceeb3SMarc Zyngier 	*val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
63684e690bfSAlex Bennée 	return 0;
63784e690bfSAlex Bennée }
63884e690bfSAlex Bennée 
reset_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)639d86cde6eSJing Zhang static u64 reset_wvr(struct kvm_vcpu *vcpu,
64084e690bfSAlex Bennée 		      const struct sys_reg_desc *rd)
64184e690bfSAlex Bennée {
642cb853dedSMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
643d86cde6eSJing Zhang 	return rd->val;
64484e690bfSAlex Bennée }
64584e690bfSAlex Bennée 
trap_wcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)646281243cbSMarc Zyngier static bool trap_wcr(struct kvm_vcpu *vcpu,
6473fec037dSPavel Fedin 		     struct sys_reg_params *p,
64884e690bfSAlex Bennée 		     const struct sys_reg_desc *rd)
64984e690bfSAlex Bennée {
650cb853dedSMarc Zyngier 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
65184e690bfSAlex Bennée 
65284e690bfSAlex Bennée 	if (p->is_write)
6531da42c34SMarc Zyngier 		reg_to_dbg(vcpu, p, rd, dbg_reg);
65484e690bfSAlex Bennée 	else
6551da42c34SMarc Zyngier 		dbg_to_reg(vcpu, p, rd, dbg_reg);
65684e690bfSAlex Bennée 
657cb853dedSMarc Zyngier 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
658eef8c85aSAlex Bennée 
65984e690bfSAlex Bennée 	return true;
66084e690bfSAlex Bennée }
66184e690bfSAlex Bennée 
set_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)66284e690bfSAlex Bennée static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
663978ceeb3SMarc Zyngier 		   u64 val)
66484e690bfSAlex Bennée {
665978ceeb3SMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
66684e690bfSAlex Bennée 	return 0;
66784e690bfSAlex Bennée }
66884e690bfSAlex Bennée 
get_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)66984e690bfSAlex Bennée static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
670978ceeb3SMarc Zyngier 		   u64 *val)
67184e690bfSAlex Bennée {
672978ceeb3SMarc Zyngier 	*val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
67384e690bfSAlex Bennée 	return 0;
67484e690bfSAlex Bennée }
67584e690bfSAlex Bennée 
reset_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)676d86cde6eSJing Zhang static u64 reset_wcr(struct kvm_vcpu *vcpu,
67784e690bfSAlex Bennée 		      const struct sys_reg_desc *rd)
67884e690bfSAlex Bennée {
679cb853dedSMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
680d86cde6eSJing Zhang 	return rd->val;
68184e690bfSAlex Bennée }
68284e690bfSAlex Bennée 
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)683d86cde6eSJing Zhang static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
6847c8c5e6aSMarc Zyngier {
6858d404c4cSChristoffer Dall 	u64 amair = read_sysreg(amair_el1);
6868d404c4cSChristoffer Dall 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
687d86cde6eSJing Zhang 	return amair;
6887c8c5e6aSMarc Zyngier }
6897c8c5e6aSMarc Zyngier 
reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)690d86cde6eSJing Zhang static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
691af473829SJames Morse {
692af473829SJames Morse 	u64 actlr = read_sysreg(actlr_el1);
693af473829SJames Morse 	vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
694d86cde6eSJing Zhang 	return actlr;
695af473829SJames Morse }
696af473829SJames Morse 
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)697d86cde6eSJing Zhang static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
6987c8c5e6aSMarc Zyngier {
6994429fc64SAndre Przywara 	u64 mpidr;
7004429fc64SAndre Przywara 
7017c8c5e6aSMarc Zyngier 	/*
7024429fc64SAndre Przywara 	 * Map the vcpu_id into the first three affinity level fields of
7034429fc64SAndre Przywara 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
7044429fc64SAndre Przywara 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
7054429fc64SAndre Przywara 	 * of the GICv3 to be able to address each CPU directly when
7064429fc64SAndre Przywara 	 * sending IPIs.
7077c8c5e6aSMarc Zyngier 	 */
7084429fc64SAndre Przywara 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
7094429fc64SAndre Przywara 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
7104429fc64SAndre Przywara 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
711d86cde6eSJing Zhang 	mpidr |= (1ULL << 31);
712d86cde6eSJing Zhang 	vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
713d86cde6eSJing Zhang 
714d86cde6eSJing Zhang 	return mpidr;
7157c8c5e6aSMarc Zyngier }
7167c8c5e6aSMarc Zyngier 
pmu_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)71711663111SMarc Zyngier static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
71811663111SMarc Zyngier 				   const struct sys_reg_desc *r)
71911663111SMarc Zyngier {
72011663111SMarc Zyngier 	if (kvm_vcpu_has_pmu(vcpu))
72111663111SMarc Zyngier 		return 0;
72211663111SMarc Zyngier 
72311663111SMarc Zyngier 	return REG_HIDDEN;
72411663111SMarc Zyngier }
72511663111SMarc Zyngier 
reset_pmu_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)726d86cde6eSJing Zhang static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7270ab410a9SMarc Zyngier {
7280ab410a9SMarc Zyngier 	u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
7290ab410a9SMarc Zyngier 
7300ab410a9SMarc Zyngier 	/* No PMU available, any PMU reg may UNDEF... */
7310ab410a9SMarc Zyngier 	if (!kvm_arm_support_pmu_v3())
732d86cde6eSJing Zhang 		return 0;
7330ab410a9SMarc Zyngier 
7340ab410a9SMarc Zyngier 	n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
7350ab410a9SMarc Zyngier 	n &= ARMV8_PMU_PMCR_N_MASK;
7360ab410a9SMarc Zyngier 	if (n)
7370ab410a9SMarc Zyngier 		mask |= GENMASK(n - 1, 0);
7380ab410a9SMarc Zyngier 
7390ab410a9SMarc Zyngier 	reset_unknown(vcpu, r);
7400ab410a9SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) &= mask;
741d86cde6eSJing Zhang 
742d86cde6eSJing Zhang 	return __vcpu_sys_reg(vcpu, r->reg);
7430ab410a9SMarc Zyngier }
7440ab410a9SMarc Zyngier 
reset_pmevcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)745d86cde6eSJing Zhang static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7460ab410a9SMarc Zyngier {
7470ab410a9SMarc Zyngier 	reset_unknown(vcpu, r);
7480ab410a9SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
749d86cde6eSJing Zhang 
750d86cde6eSJing Zhang 	return __vcpu_sys_reg(vcpu, r->reg);
7510ab410a9SMarc Zyngier }
7520ab410a9SMarc Zyngier 
reset_pmevtyper(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)753d86cde6eSJing Zhang static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7540ab410a9SMarc Zyngier {
7550ab410a9SMarc Zyngier 	reset_unknown(vcpu, r);
7560ab410a9SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
757d86cde6eSJing Zhang 
758d86cde6eSJing Zhang 	return __vcpu_sys_reg(vcpu, r->reg);
7590ab410a9SMarc Zyngier }
7600ab410a9SMarc Zyngier 
reset_pmselr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)761d86cde6eSJing Zhang static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7620ab410a9SMarc Zyngier {
7630ab410a9SMarc Zyngier 	reset_unknown(vcpu, r);
7640ab410a9SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
765d86cde6eSJing Zhang 
766d86cde6eSJing Zhang 	return __vcpu_sys_reg(vcpu, r->reg);
7670ab410a9SMarc Zyngier }
7680ab410a9SMarc Zyngier 
reset_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)769d86cde6eSJing Zhang static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
770ab946834SShannon Zhao {
771292e8f14SMarc Zyngier 	u64 pmcr;
772ab946834SShannon Zhao 
7732a5f1b67SMarc Zyngier 	/* No PMU available, PMCR_EL0 may UNDEF... */
7742a5f1b67SMarc Zyngier 	if (!kvm_arm_support_pmu_v3())
775d86cde6eSJing Zhang 		return 0;
7762a5f1b67SMarc Zyngier 
777292e8f14SMarc Zyngier 	/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
778aff23483SJames Clark 	pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
779f3c6efc7SOliver Upton 	if (!kvm_supports_32bit_el0())
780292e8f14SMarc Zyngier 		pmcr |= ARMV8_PMU_PMCR_LC;
781292e8f14SMarc Zyngier 
782292e8f14SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) = pmcr;
783d86cde6eSJing Zhang 
784d86cde6eSJing Zhang 	return __vcpu_sys_reg(vcpu, r->reg);
785ab946834SShannon Zhao }
786ab946834SShannon Zhao 
check_pmu_access_disabled(struct kvm_vcpu * vcpu,u64 flags)7876c007036SMarc Zyngier static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
788d692b8adSShannon Zhao {
7898d404c4cSChristoffer Dall 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
7907ded92e2SMarc Zyngier 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
791d692b8adSShannon Zhao 
79224d5950fSMarc Zyngier 	if (!enabled)
79324d5950fSMarc Zyngier 		kvm_inject_undefined(vcpu);
79424d5950fSMarc Zyngier 
7956c007036SMarc Zyngier 	return !enabled;
7966c007036SMarc Zyngier }
7976c007036SMarc Zyngier 
pmu_access_el0_disabled(struct kvm_vcpu * vcpu)7986c007036SMarc Zyngier static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
7996c007036SMarc Zyngier {
8006c007036SMarc Zyngier 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
801d692b8adSShannon Zhao }
802d692b8adSShannon Zhao 
pmu_write_swinc_el0_disabled(struct kvm_vcpu * vcpu)803d692b8adSShannon Zhao static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
804d692b8adSShannon Zhao {
8056c007036SMarc Zyngier 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
806d692b8adSShannon Zhao }
807d692b8adSShannon Zhao 
pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu * vcpu)808d692b8adSShannon Zhao static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
809d692b8adSShannon Zhao {
8106c007036SMarc Zyngier 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
811d692b8adSShannon Zhao }
812d692b8adSShannon Zhao 
pmu_access_event_counter_el0_disabled(struct kvm_vcpu * vcpu)813d692b8adSShannon Zhao static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
814d692b8adSShannon Zhao {
8156c007036SMarc Zyngier 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
816d692b8adSShannon Zhao }
817d692b8adSShannon Zhao 
access_pmcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)818ab946834SShannon Zhao static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
819ab946834SShannon Zhao 			const struct sys_reg_desc *r)
820ab946834SShannon Zhao {
821ab946834SShannon Zhao 	u64 val;
822ab946834SShannon Zhao 
823d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
824d692b8adSShannon Zhao 		return false;
825d692b8adSShannon Zhao 
826ab946834SShannon Zhao 	if (p->is_write) {
82764d6820dSMarc Zyngier 		/*
82864d6820dSMarc Zyngier 		 * Only update writeable bits of PMCR (continuing into
82964d6820dSMarc Zyngier 		 * kvm_pmu_handle_pmcr() as well)
83064d6820dSMarc Zyngier 		 */
8318d404c4cSChristoffer Dall 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
832ab946834SShannon Zhao 		val &= ~ARMV8_PMU_PMCR_MASK;
833ab946834SShannon Zhao 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
834f3c6efc7SOliver Upton 		if (!kvm_supports_32bit_el0())
8356f163714SMarc Zyngier 			val |= ARMV8_PMU_PMCR_LC;
83676993739SShannon Zhao 		kvm_pmu_handle_pmcr(vcpu, val);
837ab946834SShannon Zhao 	} else {
838ab946834SShannon Zhao 		/* PMCR.P & PMCR.C are RAZ */
8398d404c4cSChristoffer Dall 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
840ab946834SShannon Zhao 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
841ab946834SShannon Zhao 		p->regval = val;
842ab946834SShannon Zhao 	}
843ab946834SShannon Zhao 
844ab946834SShannon Zhao 	return true;
845ab946834SShannon Zhao }
846ab946834SShannon Zhao 
access_pmselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)8473965c3ceSShannon Zhao static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
8483965c3ceSShannon Zhao 			  const struct sys_reg_desc *r)
8493965c3ceSShannon Zhao {
850d692b8adSShannon Zhao 	if (pmu_access_event_counter_el0_disabled(vcpu))
851d692b8adSShannon Zhao 		return false;
852d692b8adSShannon Zhao 
8533965c3ceSShannon Zhao 	if (p->is_write)
8548d404c4cSChristoffer Dall 		__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
8553965c3ceSShannon Zhao 	else
8563965c3ceSShannon Zhao 		/* return PMSELR.SEL field */
8578d404c4cSChristoffer Dall 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
8583965c3ceSShannon Zhao 			    & ARMV8_PMU_COUNTER_MASK;
8593965c3ceSShannon Zhao 
8603965c3ceSShannon Zhao 	return true;
8613965c3ceSShannon Zhao }
8623965c3ceSShannon Zhao 
access_pmceid(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)863a86b5505SShannon Zhao static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
864a86b5505SShannon Zhao 			  const struct sys_reg_desc *r)
865a86b5505SShannon Zhao {
86699b6a401SMarc Zyngier 	u64 pmceid, mask, shift;
867a86b5505SShannon Zhao 
868a86b5505SShannon Zhao 	BUG_ON(p->is_write);
869a86b5505SShannon Zhao 
870d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
871d692b8adSShannon Zhao 		return false;
872d692b8adSShannon Zhao 
87399b6a401SMarc Zyngier 	get_access_mask(r, &mask, &shift);
87499b6a401SMarc Zyngier 
87588865becSMarc Zyngier 	pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
87699b6a401SMarc Zyngier 	pmceid &= mask;
87799b6a401SMarc Zyngier 	pmceid >>= shift;
878a86b5505SShannon Zhao 
879a86b5505SShannon Zhao 	p->regval = pmceid;
880a86b5505SShannon Zhao 
881a86b5505SShannon Zhao 	return true;
882a86b5505SShannon Zhao }
883a86b5505SShannon Zhao 
pmu_counter_idx_valid(struct kvm_vcpu * vcpu,u64 idx)884051ff581SShannon Zhao static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
885051ff581SShannon Zhao {
886051ff581SShannon Zhao 	u64 pmcr, val;
887051ff581SShannon Zhao 
8888d404c4cSChristoffer Dall 	pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
889051ff581SShannon Zhao 	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
89024d5950fSMarc Zyngier 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
89124d5950fSMarc Zyngier 		kvm_inject_undefined(vcpu);
892051ff581SShannon Zhao 		return false;
89324d5950fSMarc Zyngier 	}
894051ff581SShannon Zhao 
895051ff581SShannon Zhao 	return true;
896051ff581SShannon Zhao }
897051ff581SShannon Zhao 
get_pmu_evcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)8989228b261SReiji Watanabe static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
8999228b261SReiji Watanabe 			  u64 *val)
9009228b261SReiji Watanabe {
9019228b261SReiji Watanabe 	u64 idx;
9029228b261SReiji Watanabe 
9039228b261SReiji Watanabe 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
9049228b261SReiji Watanabe 		/* PMCCNTR_EL0 */
9059228b261SReiji Watanabe 		idx = ARMV8_PMU_CYCLE_IDX;
9069228b261SReiji Watanabe 	else
9079228b261SReiji Watanabe 		/* PMEVCNTRn_EL0 */
9089228b261SReiji Watanabe 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
9099228b261SReiji Watanabe 
9109228b261SReiji Watanabe 	*val = kvm_pmu_get_counter_value(vcpu, idx);
9119228b261SReiji Watanabe 	return 0;
9129228b261SReiji Watanabe }
9139228b261SReiji Watanabe 
access_pmu_evcntr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)914051ff581SShannon Zhao static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
915051ff581SShannon Zhao 			      struct sys_reg_params *p,
916051ff581SShannon Zhao 			      const struct sys_reg_desc *r)
917051ff581SShannon Zhao {
918a3da9358SMarc Zyngier 	u64 idx = ~0UL;
919051ff581SShannon Zhao 
920051ff581SShannon Zhao 	if (r->CRn == 9 && r->CRm == 13) {
921051ff581SShannon Zhao 		if (r->Op2 == 2) {
922051ff581SShannon Zhao 			/* PMXEVCNTR_EL0 */
923d692b8adSShannon Zhao 			if (pmu_access_event_counter_el0_disabled(vcpu))
924d692b8adSShannon Zhao 				return false;
925d692b8adSShannon Zhao 
9268d404c4cSChristoffer Dall 			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
927051ff581SShannon Zhao 			      & ARMV8_PMU_COUNTER_MASK;
928051ff581SShannon Zhao 		} else if (r->Op2 == 0) {
929051ff581SShannon Zhao 			/* PMCCNTR_EL0 */
930d692b8adSShannon Zhao 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
931d692b8adSShannon Zhao 				return false;
932d692b8adSShannon Zhao 
933051ff581SShannon Zhao 			idx = ARMV8_PMU_CYCLE_IDX;
934051ff581SShannon Zhao 		}
9359e3f7a29SWei Huang 	} else if (r->CRn == 0 && r->CRm == 9) {
9369e3f7a29SWei Huang 		/* PMCCNTR */
9379e3f7a29SWei Huang 		if (pmu_access_event_counter_el0_disabled(vcpu))
9389e3f7a29SWei Huang 			return false;
9399e3f7a29SWei Huang 
9409e3f7a29SWei Huang 		idx = ARMV8_PMU_CYCLE_IDX;
941051ff581SShannon Zhao 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
942051ff581SShannon Zhao 		/* PMEVCNTRn_EL0 */
943d692b8adSShannon Zhao 		if (pmu_access_event_counter_el0_disabled(vcpu))
944d692b8adSShannon Zhao 			return false;
945d692b8adSShannon Zhao 
946051ff581SShannon Zhao 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
947051ff581SShannon Zhao 	}
948051ff581SShannon Zhao 
949a3da9358SMarc Zyngier 	/* Catch any decoding mistake */
950a3da9358SMarc Zyngier 	WARN_ON(idx == ~0UL);
951a3da9358SMarc Zyngier 
952051ff581SShannon Zhao 	if (!pmu_counter_idx_valid(vcpu, idx))
953051ff581SShannon Zhao 		return false;
954051ff581SShannon Zhao 
955d692b8adSShannon Zhao 	if (p->is_write) {
956d692b8adSShannon Zhao 		if (pmu_access_el0_disabled(vcpu))
957d692b8adSShannon Zhao 			return false;
958d692b8adSShannon Zhao 
959051ff581SShannon Zhao 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
960d692b8adSShannon Zhao 	} else {
961051ff581SShannon Zhao 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
962d692b8adSShannon Zhao 	}
963051ff581SShannon Zhao 
964051ff581SShannon Zhao 	return true;
965051ff581SShannon Zhao }
966051ff581SShannon Zhao 
access_pmu_evtyper(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)9679feb21acSShannon Zhao static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
9689feb21acSShannon Zhao 			       const struct sys_reg_desc *r)
9699feb21acSShannon Zhao {
9709feb21acSShannon Zhao 	u64 idx, reg;
9719feb21acSShannon Zhao 
972d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
973d692b8adSShannon Zhao 		return false;
974d692b8adSShannon Zhao 
9759feb21acSShannon Zhao 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
9769feb21acSShannon Zhao 		/* PMXEVTYPER_EL0 */
9778d404c4cSChristoffer Dall 		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
9789feb21acSShannon Zhao 		reg = PMEVTYPER0_EL0 + idx;
9799feb21acSShannon Zhao 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
9809feb21acSShannon Zhao 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
9819feb21acSShannon Zhao 		if (idx == ARMV8_PMU_CYCLE_IDX)
9829feb21acSShannon Zhao 			reg = PMCCFILTR_EL0;
9839feb21acSShannon Zhao 		else
9849feb21acSShannon Zhao 			/* PMEVTYPERn_EL0 */
9859feb21acSShannon Zhao 			reg = PMEVTYPER0_EL0 + idx;
9869feb21acSShannon Zhao 	} else {
9879feb21acSShannon Zhao 		BUG();
9889feb21acSShannon Zhao 	}
9899feb21acSShannon Zhao 
9909feb21acSShannon Zhao 	if (!pmu_counter_idx_valid(vcpu, idx))
9919feb21acSShannon Zhao 		return false;
9929feb21acSShannon Zhao 
9939feb21acSShannon Zhao 	if (p->is_write) {
9949feb21acSShannon Zhao 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
995435e53fbSAndrew Murray 		kvm_vcpu_pmu_restore_guest(vcpu);
9969feb21acSShannon Zhao 	} else {
9978d404c4cSChristoffer Dall 		p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
9989feb21acSShannon Zhao 	}
9999feb21acSShannon Zhao 
10009feb21acSShannon Zhao 	return true;
10019feb21acSShannon Zhao }
10029feb21acSShannon Zhao 
access_pmcnten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)100396b0eebcSShannon Zhao static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
100496b0eebcSShannon Zhao 			   const struct sys_reg_desc *r)
100596b0eebcSShannon Zhao {
100696b0eebcSShannon Zhao 	u64 val, mask;
100796b0eebcSShannon Zhao 
1008d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
1009d692b8adSShannon Zhao 		return false;
1010d692b8adSShannon Zhao 
101196b0eebcSShannon Zhao 	mask = kvm_pmu_valid_counter_mask(vcpu);
101296b0eebcSShannon Zhao 	if (p->is_write) {
101396b0eebcSShannon Zhao 		val = p->regval & mask;
101496b0eebcSShannon Zhao 		if (r->Op2 & 0x1) {
101596b0eebcSShannon Zhao 			/* accessing PMCNTENSET_EL0 */
10168d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1017418e5ca8SAndrew Murray 			kvm_pmu_enable_counter_mask(vcpu, val);
1018435e53fbSAndrew Murray 			kvm_vcpu_pmu_restore_guest(vcpu);
101996b0eebcSShannon Zhao 		} else {
102096b0eebcSShannon Zhao 			/* accessing PMCNTENCLR_EL0 */
10218d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1022418e5ca8SAndrew Murray 			kvm_pmu_disable_counter_mask(vcpu, val);
102396b0eebcSShannon Zhao 		}
102496b0eebcSShannon Zhao 	} else {
1025f5eff400SMarc Zyngier 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
102696b0eebcSShannon Zhao 	}
102796b0eebcSShannon Zhao 
102896b0eebcSShannon Zhao 	return true;
102996b0eebcSShannon Zhao }
103096b0eebcSShannon Zhao 
access_pminten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)10319db52c78SShannon Zhao static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
10329db52c78SShannon Zhao 			   const struct sys_reg_desc *r)
10339db52c78SShannon Zhao {
10349db52c78SShannon Zhao 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
10359db52c78SShannon Zhao 
1036b0737e99SMarc Zyngier 	if (check_pmu_access_disabled(vcpu, 0))
1037d692b8adSShannon Zhao 		return false;
1038d692b8adSShannon Zhao 
10399db52c78SShannon Zhao 	if (p->is_write) {
10409db52c78SShannon Zhao 		u64 val = p->regval & mask;
10419db52c78SShannon Zhao 
10429db52c78SShannon Zhao 		if (r->Op2 & 0x1)
10439db52c78SShannon Zhao 			/* accessing PMINTENSET_EL1 */
10448d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
10459db52c78SShannon Zhao 		else
10469db52c78SShannon Zhao 			/* accessing PMINTENCLR_EL1 */
10478d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
10489db52c78SShannon Zhao 	} else {
1049f5eff400SMarc Zyngier 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
10509db52c78SShannon Zhao 	}
10519db52c78SShannon Zhao 
10529db52c78SShannon Zhao 	return true;
10539db52c78SShannon Zhao }
10549db52c78SShannon Zhao 
access_pmovs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)105576d883c4SShannon Zhao static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
105676d883c4SShannon Zhao 			 const struct sys_reg_desc *r)
105776d883c4SShannon Zhao {
105876d883c4SShannon Zhao 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
105976d883c4SShannon Zhao 
1060d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
1061d692b8adSShannon Zhao 		return false;
1062d692b8adSShannon Zhao 
106376d883c4SShannon Zhao 	if (p->is_write) {
106476d883c4SShannon Zhao 		if (r->CRm & 0x2)
106576d883c4SShannon Zhao 			/* accessing PMOVSSET_EL0 */
10668d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
106776d883c4SShannon Zhao 		else
106876d883c4SShannon Zhao 			/* accessing PMOVSCLR_EL0 */
10698d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
107076d883c4SShannon Zhao 	} else {
1071f5eff400SMarc Zyngier 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
107276d883c4SShannon Zhao 	}
107376d883c4SShannon Zhao 
107476d883c4SShannon Zhao 	return true;
107576d883c4SShannon Zhao }
107676d883c4SShannon Zhao 
access_pmswinc(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)10777a0adc70SShannon Zhao static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
10787a0adc70SShannon Zhao 			   const struct sys_reg_desc *r)
10797a0adc70SShannon Zhao {
10807a0adc70SShannon Zhao 	u64 mask;
10817a0adc70SShannon Zhao 
1082e0443230SMarc Zyngier 	if (!p->is_write)
1083e7f1d1eeSMarc Zyngier 		return read_from_write_only(vcpu, p, r);
1084e0443230SMarc Zyngier 
1085d692b8adSShannon Zhao 	if (pmu_write_swinc_el0_disabled(vcpu))
1086d692b8adSShannon Zhao 		return false;
1087d692b8adSShannon Zhao 
10887a0adc70SShannon Zhao 	mask = kvm_pmu_valid_counter_mask(vcpu);
10897a0adc70SShannon Zhao 	kvm_pmu_software_increment(vcpu, p->regval & mask);
10907a0adc70SShannon Zhao 	return true;
10917a0adc70SShannon Zhao }
10927a0adc70SShannon Zhao 
access_pmuserenr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1093d692b8adSShannon Zhao static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1094d692b8adSShannon Zhao 			     const struct sys_reg_desc *r)
1095d692b8adSShannon Zhao {
1096d692b8adSShannon Zhao 	if (p->is_write) {
10979008c235SMarc Zyngier 		if (!vcpu_mode_priv(vcpu)) {
10989008c235SMarc Zyngier 			kvm_inject_undefined(vcpu);
1099d692b8adSShannon Zhao 			return false;
11009008c235SMarc Zyngier 		}
1101d692b8adSShannon Zhao 
11028d404c4cSChristoffer Dall 		__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
11038d404c4cSChristoffer Dall 			       p->regval & ARMV8_PMU_USERENR_MASK;
1104d692b8adSShannon Zhao 	} else {
11058d404c4cSChristoffer Dall 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1106d692b8adSShannon Zhao 			    & ARMV8_PMU_USERENR_MASK;
1107d692b8adSShannon Zhao 	}
1108d692b8adSShannon Zhao 
1109d692b8adSShannon Zhao 	return true;
1110d692b8adSShannon Zhao }
1111d692b8adSShannon Zhao 
11120c557ed4SMarc Zyngier /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
11130c557ed4SMarc Zyngier #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
1114ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
111503fdfb26SMarc Zyngier 	  trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },		\
1116ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
111703fdfb26SMarc Zyngier 	  trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },		\
1118ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
111903fdfb26SMarc Zyngier 	  trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },		\
1120ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
112103fdfb26SMarc Zyngier 	  trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
11220c557ed4SMarc Zyngier 
11239d2a55b4SXiang Chen #define PMU_SYS_REG(name)						\
11249d2a55b4SXiang Chen 	SYS_DESC(SYS_##name), .reset = reset_pmu_reg,			\
11259d2a55b4SXiang Chen 	.visibility = pmu_visibility
112611663111SMarc Zyngier 
1127051ff581SShannon Zhao /* Macro to expand the PMEVCNTRn_EL0 register */
1128051ff581SShannon Zhao #define PMU_PMEVCNTR_EL0(n)						\
11299d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMEVCNTRn_EL0(n)),				\
11309228b261SReiji Watanabe 	  .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,		\
113111663111SMarc Zyngier 	  .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1132051ff581SShannon Zhao 
11339feb21acSShannon Zhao /* Macro to expand the PMEVTYPERn_EL0 register */
11349feb21acSShannon Zhao #define PMU_PMEVTYPER_EL0(n)						\
11359d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMEVTYPERn_EL0(n)),				\
11360ab410a9SMarc Zyngier 	  .reset = reset_pmevtyper,					\
113711663111SMarc Zyngier 	  .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
11389feb21acSShannon Zhao 
undef_access(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1139338b1793SMarc Zyngier static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
11404fcdf106SIonela Voinescu 			 const struct sys_reg_desc *r)
11414fcdf106SIonela Voinescu {
11424fcdf106SIonela Voinescu 	kvm_inject_undefined(vcpu);
11434fcdf106SIonela Voinescu 
11444fcdf106SIonela Voinescu 	return false;
11454fcdf106SIonela Voinescu }
11464fcdf106SIonela Voinescu 
11474fcdf106SIonela Voinescu /* Macro to expand the AMU counter and type registers*/
1148338b1793SMarc Zyngier #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1149338b1793SMarc Zyngier #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1150338b1793SMarc Zyngier #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1151338b1793SMarc Zyngier #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1152384b40caSMark Rutland 
ptrauth_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1153384b40caSMark Rutland static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1154384b40caSMark Rutland 			const struct sys_reg_desc *rd)
1155384b40caSMark Rutland {
115601fe5aceSAndrew Jones 	return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1157384b40caSMark Rutland }
1158384b40caSMark Rutland 
1159338b1793SMarc Zyngier /*
1160338b1793SMarc Zyngier  * If we land here on a PtrAuth access, that is because we didn't
1161338b1793SMarc Zyngier  * fixup the access on exit by allowing the PtrAuth sysregs. The only
1162338b1793SMarc Zyngier  * way this happens is when the guest does not have PtrAuth support
1163338b1793SMarc Zyngier  * enabled.
1164338b1793SMarc Zyngier  */
1165384b40caSMark Rutland #define __PTRAUTH_KEY(k)						\
1166338b1793SMarc Zyngier 	{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k,		\
1167384b40caSMark Rutland 	.visibility = ptrauth_visibility}
1168384b40caSMark Rutland 
1169384b40caSMark Rutland #define PTRAUTH_KEY(k)							\
1170384b40caSMark Rutland 	__PTRAUTH_KEY(k ## KEYLO_EL1),					\
1171384b40caSMark Rutland 	__PTRAUTH_KEY(k ## KEYHI_EL1)
1172384b40caSMark Rutland 
access_arch_timer(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)117384135d3dSAndre Przywara static bool access_arch_timer(struct kvm_vcpu *vcpu,
1174c9a3c58fSJintack Lim 			      struct sys_reg_params *p,
1175c9a3c58fSJintack Lim 			      const struct sys_reg_desc *r)
1176c9a3c58fSJintack Lim {
117784135d3dSAndre Przywara 	enum kvm_arch_timers tmr;
117884135d3dSAndre Przywara 	enum kvm_arch_timer_regs treg;
117984135d3dSAndre Przywara 	u64 reg = reg_to_encoding(r);
11807b6b4631SJintack Lim 
118184135d3dSAndre Przywara 	switch (reg) {
118284135d3dSAndre Przywara 	case SYS_CNTP_TVAL_EL0:
118384135d3dSAndre Przywara 	case SYS_AARCH32_CNTP_TVAL:
118484135d3dSAndre Przywara 		tmr = TIMER_PTIMER;
118584135d3dSAndre Przywara 		treg = TIMER_REG_TVAL;
118684135d3dSAndre Przywara 		break;
118784135d3dSAndre Przywara 	case SYS_CNTP_CTL_EL0:
118884135d3dSAndre Przywara 	case SYS_AARCH32_CNTP_CTL:
118984135d3dSAndre Przywara 		tmr = TIMER_PTIMER;
119084135d3dSAndre Przywara 		treg = TIMER_REG_CTL;
119184135d3dSAndre Przywara 		break;
119284135d3dSAndre Przywara 	case SYS_CNTP_CVAL_EL0:
119384135d3dSAndre Przywara 	case SYS_AARCH32_CNTP_CVAL:
119484135d3dSAndre Przywara 		tmr = TIMER_PTIMER;
119584135d3dSAndre Przywara 		treg = TIMER_REG_CVAL;
119684135d3dSAndre Przywara 		break;
1197c605ee24SMarc Zyngier 	case SYS_CNTPCT_EL0:
1198c605ee24SMarc Zyngier 	case SYS_CNTPCTSS_EL0:
1199c605ee24SMarc Zyngier 	case SYS_AARCH32_CNTPCT:
1200c605ee24SMarc Zyngier 		tmr = TIMER_PTIMER;
1201c605ee24SMarc Zyngier 		treg = TIMER_REG_CNT;
1202c605ee24SMarc Zyngier 		break;
120384135d3dSAndre Przywara 	default:
1204ba82e06cSMarc Zyngier 		print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1205ba82e06cSMarc Zyngier 		kvm_inject_undefined(vcpu);
1206ba82e06cSMarc Zyngier 		return false;
1207c1b135afSChristoffer Dall 	}
12087b6b4631SJintack Lim 
1209c1b135afSChristoffer Dall 	if (p->is_write)
121084135d3dSAndre Przywara 		kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1211c1b135afSChristoffer Dall 	else
121284135d3dSAndre Przywara 		p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
12137b6b4631SJintack Lim 
1214c9a3c58fSJintack Lim 	return true;
1215c9a3c58fSJintack Lim }
1216c9a3c58fSJintack Lim 
kvm_arm64_ftr_safe_value(u32 id,const struct arm64_ftr_bits * ftrp,s64 new,s64 cur)12172e8bf0cbSJing Zhang static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
12182e8bf0cbSJing Zhang 				    s64 new, s64 cur)
12193d0dba57SMarc Zyngier {
12202e8bf0cbSJing Zhang 	struct arm64_ftr_bits kvm_ftr = *ftrp;
12213d0dba57SMarc Zyngier 
12222e8bf0cbSJing Zhang 	/* Some features have different safe value type in KVM than host features */
12232e8bf0cbSJing Zhang 	switch (id) {
12242e8bf0cbSJing Zhang 	case SYS_ID_AA64DFR0_EL1:
12252e8bf0cbSJing Zhang 		if (kvm_ftr.shift == ID_AA64DFR0_EL1_PMUVer_SHIFT)
12262e8bf0cbSJing Zhang 			kvm_ftr.type = FTR_LOWER_SAFE;
12272e8bf0cbSJing Zhang 		break;
12282e8bf0cbSJing Zhang 	case SYS_ID_DFR0_EL1:
12292e8bf0cbSJing Zhang 		if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
12302e8bf0cbSJing Zhang 			kvm_ftr.type = FTR_LOWER_SAFE;
12312e8bf0cbSJing Zhang 		break;
12323d0dba57SMarc Zyngier 	}
12333d0dba57SMarc Zyngier 
12342e8bf0cbSJing Zhang 	return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1235d82e0dfdSMarc Zyngier }
12362e8bf0cbSJing Zhang 
12372e8bf0cbSJing Zhang /**
12382e8bf0cbSJing Zhang  * arm64_check_features() - Check if a feature register value constitutes
12392e8bf0cbSJing Zhang  * a subset of features indicated by the idreg's KVM sanitised limit.
12402e8bf0cbSJing Zhang  *
12412e8bf0cbSJing Zhang  * This function will check if each feature field of @val is the "safe" value
12422e8bf0cbSJing Zhang  * against idreg's KVM sanitised limit return from reset() callback.
12432e8bf0cbSJing Zhang  * If a field value in @val is the same as the one in limit, it is always
12442e8bf0cbSJing Zhang  * considered the safe value regardless For register fields that are not in
12452e8bf0cbSJing Zhang  * writable, only the value in limit is considered the safe value.
12462e8bf0cbSJing Zhang  *
12472e8bf0cbSJing Zhang  * Return: 0 if all the fields are safe. Otherwise, return negative errno.
12482e8bf0cbSJing Zhang  */
arm64_check_features(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)12492e8bf0cbSJing Zhang static int arm64_check_features(struct kvm_vcpu *vcpu,
12502e8bf0cbSJing Zhang 				const struct sys_reg_desc *rd,
12512e8bf0cbSJing Zhang 				u64 val)
12522e8bf0cbSJing Zhang {
12532e8bf0cbSJing Zhang 	const struct arm64_ftr_reg *ftr_reg;
12542e8bf0cbSJing Zhang 	const struct arm64_ftr_bits *ftrp = NULL;
12552e8bf0cbSJing Zhang 	u32 id = reg_to_encoding(rd);
12562e8bf0cbSJing Zhang 	u64 writable_mask = rd->val;
12572e8bf0cbSJing Zhang 	u64 limit = rd->reset(vcpu, rd);
12582e8bf0cbSJing Zhang 	u64 mask = 0;
12592e8bf0cbSJing Zhang 
12602e8bf0cbSJing Zhang 	/*
12612e8bf0cbSJing Zhang 	 * Hidden and unallocated ID registers may not have a corresponding
12622e8bf0cbSJing Zhang 	 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
12632e8bf0cbSJing Zhang 	 * only safe value is 0.
12642e8bf0cbSJing Zhang 	 */
12652e8bf0cbSJing Zhang 	if (sysreg_visible_as_raz(vcpu, rd))
12662e8bf0cbSJing Zhang 		return val ? -E2BIG : 0;
12672e8bf0cbSJing Zhang 
12682e8bf0cbSJing Zhang 	ftr_reg = get_arm64_ftr_reg(id);
12692e8bf0cbSJing Zhang 	if (!ftr_reg)
12702e8bf0cbSJing Zhang 		return -EINVAL;
12712e8bf0cbSJing Zhang 
12722e8bf0cbSJing Zhang 	ftrp = ftr_reg->ftr_bits;
12732e8bf0cbSJing Zhang 
12742e8bf0cbSJing Zhang 	for (; ftrp && ftrp->width; ftrp++) {
12752e8bf0cbSJing Zhang 		s64 f_val, f_lim, safe_val;
12762e8bf0cbSJing Zhang 		u64 ftr_mask;
12772e8bf0cbSJing Zhang 
12782e8bf0cbSJing Zhang 		ftr_mask = arm64_ftr_mask(ftrp);
12792e8bf0cbSJing Zhang 		if ((ftr_mask & writable_mask) != ftr_mask)
12802e8bf0cbSJing Zhang 			continue;
12812e8bf0cbSJing Zhang 
12822e8bf0cbSJing Zhang 		f_val = arm64_ftr_value(ftrp, val);
12832e8bf0cbSJing Zhang 		f_lim = arm64_ftr_value(ftrp, limit);
12842e8bf0cbSJing Zhang 		mask |= ftr_mask;
12852e8bf0cbSJing Zhang 
12862e8bf0cbSJing Zhang 		if (f_val == f_lim)
12872e8bf0cbSJing Zhang 			safe_val = f_val;
12882e8bf0cbSJing Zhang 		else
12892e8bf0cbSJing Zhang 			safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
12902e8bf0cbSJing Zhang 
12912e8bf0cbSJing Zhang 		if (safe_val != f_val)
12922e8bf0cbSJing Zhang 			return -E2BIG;
12932e8bf0cbSJing Zhang 	}
12942e8bf0cbSJing Zhang 
12952e8bf0cbSJing Zhang 	/* For fields that are not writable, values in limit are the safe values. */
12962e8bf0cbSJing Zhang 	if ((val & ~mask) != (limit & ~mask))
12972e8bf0cbSJing Zhang 		return -E2BIG;
12982e8bf0cbSJing Zhang 
12992e8bf0cbSJing Zhang 	return 0;
1300d82e0dfdSMarc Zyngier }
1301d82e0dfdSMarc Zyngier 
pmuver_to_perfmon(u8 pmuver)13023d0dba57SMarc Zyngier static u8 pmuver_to_perfmon(u8 pmuver)
13033d0dba57SMarc Zyngier {
13043d0dba57SMarc Zyngier 	switch (pmuver) {
13053d0dba57SMarc Zyngier 	case ID_AA64DFR0_EL1_PMUVer_IMP:
1306753d734fSMarc Zyngier 		return ID_DFR0_EL1_PerfMon_PMUv3;
13073d0dba57SMarc Zyngier 	case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1308753d734fSMarc Zyngier 		return ID_DFR0_EL1_PerfMon_IMPDEF;
13093d0dba57SMarc Zyngier 	default:
13103d0dba57SMarc Zyngier 		/* Anything ARMv8.1+ and NI have the same value. For now. */
13113d0dba57SMarc Zyngier 		return pmuver;
13123d0dba57SMarc Zyngier 	}
13133d0dba57SMarc Zyngier }
13143d0dba57SMarc Zyngier 
131593390c0aSDave Martin /* Read a sanitised cpufeature ID register by sys_reg_desc */
__kvm_read_sanitised_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1316d86cde6eSJing Zhang static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1317d86cde6eSJing Zhang 				       const struct sys_reg_desc *r)
131893390c0aSDave Martin {
13197ba8b438SAlexandru Elisei 	u32 id = reg_to_encoding(r);
132000d5101bSAlexandru Elisei 	u64 val;
132100d5101bSAlexandru Elisei 
1322cdd5036dSOliver Upton 	if (sysreg_visible_as_raz(vcpu, r))
132300d5101bSAlexandru Elisei 		return 0;
132400d5101bSAlexandru Elisei 
132500d5101bSAlexandru Elisei 	val = read_sanitised_ftr_reg(id);
132693390c0aSDave Martin 
1327c8857935SMarc Zyngier 	switch (id) {
1328c8857935SMarc Zyngier 	case SYS_ID_AA64PFR1_EL1:
132916dd1fbbSFuad Tabba 		if (!kvm_has_mte(vcpu->kvm))
13306ca2b9caSMark Brown 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
133190807748SMark Brown 
13326ca2b9caSMark Brown 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1333*4e76efdaSJames Morse 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
1334c8857935SMarc Zyngier 		break;
1335c8857935SMarc Zyngier 	case SYS_ID_AA64ISAR1_EL1:
1336c8857935SMarc Zyngier 		if (!vcpu_has_ptrauth(vcpu))
1337aa50479bSMark Brown 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1338aa50479bSMark Brown 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1339aa50479bSMark Brown 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1340aa50479bSMark Brown 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1341c8857935SMarc Zyngier 		break;
1342def8c222SVladimir Murzin 	case SYS_ID_AA64ISAR2_EL1:
1343def8c222SVladimir Murzin 		if (!vcpu_has_ptrauth(vcpu))
1344b2d71f27SMark Brown 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1345b2d71f27SMark Brown 				 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
134606e0b802SMarc Zyngier 		if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1347b2d71f27SMark Brown 			val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
13483172613fSKristina Martsenko 		val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
1349def8c222SVladimir Murzin 		break;
1350bf48040cSAkihiko Odaki 	case SYS_ID_AA64MMFR2_EL1:
1351bf48040cSAkihiko Odaki 		val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1352bf48040cSAkihiko Odaki 		break;
1353bf48040cSAkihiko Odaki 	case SYS_ID_MMFR4_EL1:
1354bf48040cSAkihiko Odaki 		val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1355bf48040cSAkihiko Odaki 		break;
135607d79fe7SDave Martin 	}
135707d79fe7SDave Martin 
135807d79fe7SDave Martin 	return val;
135993390c0aSDave Martin }
136093390c0aSDave Martin 
kvm_read_sanitised_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1361d86cde6eSJing Zhang static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1362d86cde6eSJing Zhang 				     const struct sys_reg_desc *r)
1363d86cde6eSJing Zhang {
1364d86cde6eSJing Zhang 	return __kvm_read_sanitised_id_reg(vcpu, r);
1365d86cde6eSJing Zhang }
1366d86cde6eSJing Zhang 
read_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1367d86cde6eSJing Zhang static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1368d86cde6eSJing Zhang {
13696db7af0dSOliver Upton 	return IDREG(vcpu->kvm, reg_to_encoding(r));
1370d86cde6eSJing Zhang }
1371d86cde6eSJing Zhang 
137247334146SJing Zhang /*
137347334146SJing Zhang  * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
137447334146SJing Zhang  * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
137547334146SJing Zhang  */
is_id_reg(u32 id)137647334146SJing Zhang static inline bool is_id_reg(u32 id)
137747334146SJing Zhang {
137847334146SJing Zhang 	return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
137947334146SJing Zhang 		sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
138047334146SJing Zhang 		sys_reg_CRm(id) < 8);
138147334146SJing Zhang }
138247334146SJing Zhang 
id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1383912dee57SAndrew Jones static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1384912dee57SAndrew Jones 				  const struct sys_reg_desc *r)
1385912dee57SAndrew Jones {
13867ba8b438SAlexandru Elisei 	u32 id = reg_to_encoding(r);
1387c512298eSAndrew Jones 
1388c512298eSAndrew Jones 	switch (id) {
1389c512298eSAndrew Jones 	case SYS_ID_AA64ZFR0_EL1:
1390c512298eSAndrew Jones 		if (!vcpu_has_sve(vcpu))
1391c512298eSAndrew Jones 			return REG_RAZ;
1392c512298eSAndrew Jones 		break;
1393c512298eSAndrew Jones 	}
1394c512298eSAndrew Jones 
1395912dee57SAndrew Jones 	return 0;
1396912dee57SAndrew Jones }
1397912dee57SAndrew Jones 
aa32_id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1398d5efec7eSOliver Upton static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1399d5efec7eSOliver Upton 				       const struct sys_reg_desc *r)
1400d5efec7eSOliver Upton {
1401d5efec7eSOliver Upton 	/*
1402d5efec7eSOliver Upton 	 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1403d5efec7eSOliver Upton 	 * EL. Promote to RAZ/WI in order to guarantee consistency between
1404d5efec7eSOliver Upton 	 * systems.
1405d5efec7eSOliver Upton 	 */
1406d5efec7eSOliver Upton 	if (!kvm_supports_32bit_el0())
1407d5efec7eSOliver Upton 		return REG_RAZ | REG_USER_WI;
1408d5efec7eSOliver Upton 
1409d5efec7eSOliver Upton 	return id_visibility(vcpu, r);
1410d5efec7eSOliver Upton }
1411d5efec7eSOliver Upton 
raz_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)141234b4d203SOliver Upton static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
141334b4d203SOliver Upton 				   const struct sys_reg_desc *r)
141434b4d203SOliver Upton {
141534b4d203SOliver Upton 	return REG_RAZ;
141634b4d203SOliver Upton }
141734b4d203SOliver Upton 
141893390c0aSDave Martin /* cpufeature ID register access trap handlers */
141993390c0aSDave Martin 
access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)142093390c0aSDave Martin static bool access_id_reg(struct kvm_vcpu *vcpu,
142193390c0aSDave Martin 			  struct sys_reg_params *p,
142293390c0aSDave Martin 			  const struct sys_reg_desc *r)
142393390c0aSDave Martin {
14244782ccc8SOliver Upton 	if (p->is_write)
14254782ccc8SOliver Upton 		return write_to_read_only(vcpu, p, r);
14264782ccc8SOliver Upton 
1427cdd5036dSOliver Upton 	p->regval = read_id_reg(vcpu, r);
14289f75b6d4SMarc Zyngier 	if (vcpu_has_nv(vcpu))
14299f75b6d4SMarc Zyngier 		access_nested_id_reg(vcpu, p, r);
14309f75b6d4SMarc Zyngier 
14314782ccc8SOliver Upton 	return true;
143293390c0aSDave Martin }
143393390c0aSDave Martin 
143473433762SDave Martin /* Visibility overrides for SVE-specific control registers */
sve_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)143573433762SDave Martin static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
143673433762SDave Martin 				   const struct sys_reg_desc *rd)
143773433762SDave Martin {
143873433762SDave Martin 	if (vcpu_has_sve(vcpu))
143973433762SDave Martin 		return 0;
144073433762SDave Martin 
144101fe5aceSAndrew Jones 	return REG_HIDDEN;
144273433762SDave Martin }
144373433762SDave Martin 
read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1444c39f5974SJing Zhang static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1445c39f5974SJing Zhang 					  const struct sys_reg_desc *rd)
144623711a5eSMarc Zyngier {
1447c39f5974SJing Zhang 	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1448c39f5974SJing Zhang 
1449c39f5974SJing Zhang 	if (!vcpu_has_sve(vcpu))
1450c39f5974SJing Zhang 		val &= ~ID_AA64PFR0_EL1_SVE_MASK;
145123711a5eSMarc Zyngier 
145223711a5eSMarc Zyngier 	/*
1453c39f5974SJing Zhang 	 * The default is to expose CSV2 == 1 if the HW isn't affected.
1454c39f5974SJing Zhang 	 * Although this is a per-CPU feature, we make it global because
1455c39f5974SJing Zhang 	 * asymmetric systems are just a nuisance.
1456c39f5974SJing Zhang 	 *
1457c39f5974SJing Zhang 	 * Userspace can override this as long as it doesn't promise
1458c39f5974SJing Zhang 	 * the impossible.
145923711a5eSMarc Zyngier 	 */
1460c39f5974SJing Zhang 	if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1461c39f5974SJing Zhang 		val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1462c39f5974SJing Zhang 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1463c39f5974SJing Zhang 	}
1464c39f5974SJing Zhang 	if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1465c39f5974SJing Zhang 		val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1466c39f5974SJing Zhang 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1467c39f5974SJing Zhang 	}
146823711a5eSMarc Zyngier 
1469c39f5974SJing Zhang 	if (kvm_vgic_global_state.type == VGIC_V3) {
1470c39f5974SJing Zhang 		val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1471c39f5974SJing Zhang 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1472c39f5974SJing Zhang 	}
14734f1df628SMarc Zyngier 
1474c39f5974SJing Zhang 	val &= ~ID_AA64PFR0_EL1_AMU_MASK;
147523711a5eSMarc Zyngier 
1476*4e76efdaSJames Morse 	/*
1477*4e76efdaSJames Morse 	 * MPAM is disabled by default as KVM also needs a set of PARTID to
1478*4e76efdaSJames Morse 	 * program the MPAMVPMx_EL2 PARTID remapping registers with. But some
1479*4e76efdaSJames Morse 	 * older kernels let the guest see the ID bit.
1480*4e76efdaSJames Morse 	 */
1481*4e76efdaSJames Morse 	val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1482*4e76efdaSJames Morse 
1483c39f5974SJing Zhang 	return val;
1484c39f5974SJing Zhang }
148523711a5eSMarc Zyngier 
read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1486c118ceadSJing Zhang static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1487c118ceadSJing Zhang 					  const struct sys_reg_desc *rd)
1488c118ceadSJing Zhang {
1489c118ceadSJing Zhang 	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1490c118ceadSJing Zhang 
1491c118ceadSJing Zhang 	/* Limit debug to ARMv8.0 */
1492c118ceadSJing Zhang 	val &= ~ID_AA64DFR0_EL1_DebugVer_MASK;
1493c118ceadSJing Zhang 	val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DebugVer, IMP);
1494c118ceadSJing Zhang 
1495c118ceadSJing Zhang 	/*
1496c118ceadSJing Zhang 	 * Only initialize the PMU version if the vCPU was configured with one.
1497c118ceadSJing Zhang 	 */
1498c118ceadSJing Zhang 	val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1499c118ceadSJing Zhang 	if (kvm_vcpu_has_pmu(vcpu))
1500c118ceadSJing Zhang 		val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1501c118ceadSJing Zhang 				      kvm_arm_pmu_get_pmuver_limit());
1502c118ceadSJing Zhang 
1503c118ceadSJing Zhang 	/* Hide SPE from guests */
1504c118ceadSJing Zhang 	val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1505c118ceadSJing Zhang 
1506c118ceadSJing Zhang 	return val;
150723711a5eSMarc Zyngier }
150823711a5eSMarc Zyngier 
set_id_aa64dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)150960e651ffSMarc Zyngier static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
151060e651ffSMarc Zyngier 			       const struct sys_reg_desc *rd,
151160e651ffSMarc Zyngier 			       u64 val)
151260e651ffSMarc Zyngier {
1513c118ceadSJing Zhang 	u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
151460e651ffSMarc Zyngier 
151560e651ffSMarc Zyngier 	/*
1516f90f9360SOliver Upton 	 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1517f90f9360SOliver Upton 	 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1518f90f9360SOliver Upton 	 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1519f90f9360SOliver Upton 	 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1520f90f9360SOliver Upton 	 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1521f90f9360SOliver Upton 	 *
1522f90f9360SOliver Upton 	 * At minimum, we're on the hook to allow values that were given to
1523f90f9360SOliver Upton 	 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1524f90f9360SOliver Upton 	 * with a more sensible NI. The value of an ID register changing under
1525f90f9360SOliver Upton 	 * the nose of the guest is unfortunate, but is certainly no more
1526f90f9360SOliver Upton 	 * surprising than an ill-guided PMU driver poking at impdef system
1527f90f9360SOliver Upton 	 * registers that end in an UNDEF...
152860e651ffSMarc Zyngier 	 */
152968667240SOliver Upton 	if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1530f90f9360SOliver Upton 		val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
153160e651ffSMarc Zyngier 
153268667240SOliver Upton 	return set_id_reg(vcpu, rd, val);
153360e651ffSMarc Zyngier }
153460e651ffSMarc Zyngier 
read_sanitised_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1535c118ceadSJing Zhang static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
1536c118ceadSJing Zhang 				      const struct sys_reg_desc *rd)
1537c118ceadSJing Zhang {
1538c118ceadSJing Zhang 	u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1539c118ceadSJing Zhang 	u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
154060e651ffSMarc Zyngier 
1541c118ceadSJing Zhang 	val &= ~ID_DFR0_EL1_PerfMon_MASK;
1542c118ceadSJing Zhang 	if (kvm_vcpu_has_pmu(vcpu))
1543c118ceadSJing Zhang 		val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
154460e651ffSMarc Zyngier 
1545c118ceadSJing Zhang 	return val;
154660e651ffSMarc Zyngier }
154760e651ffSMarc Zyngier 
set_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1548d82e0dfdSMarc Zyngier static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1549d82e0dfdSMarc Zyngier 			   const struct sys_reg_desc *rd,
1550d82e0dfdSMarc Zyngier 			   u64 val)
1551d82e0dfdSMarc Zyngier {
1552c118ceadSJing Zhang 	u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1553d82e0dfdSMarc Zyngier 
1554f90f9360SOliver Upton 	if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
1555f90f9360SOliver Upton 		val &= ~ID_DFR0_EL1_PerfMon_MASK;
1556f90f9360SOliver Upton 		perfmon = 0;
1557f90f9360SOliver Upton 	}
1558d82e0dfdSMarc Zyngier 
1559d82e0dfdSMarc Zyngier 	/*
1560d82e0dfdSMarc Zyngier 	 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1561d82e0dfdSMarc Zyngier 	 * it doesn't promise more than what the HW gives us on the
1562d82e0dfdSMarc Zyngier 	 * AArch64 side (as everything is emulated with that), and
1563d82e0dfdSMarc Zyngier 	 * that this is a PMUv3.
1564d82e0dfdSMarc Zyngier 	 */
1565c118ceadSJing Zhang 	if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
1566d82e0dfdSMarc Zyngier 		return -EINVAL;
1567d82e0dfdSMarc Zyngier 
156868667240SOliver Upton 	return set_id_reg(vcpu, rd, val);
1569d82e0dfdSMarc Zyngier }
1570d82e0dfdSMarc Zyngier 
set_id_aa64pfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)1571*4e76efdaSJames Morse static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1572*4e76efdaSJames Morse 			       const struct sys_reg_desc *rd, u64 user_val)
1573*4e76efdaSJames Morse {
1574*4e76efdaSJames Morse 	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1575*4e76efdaSJames Morse 	u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
1576*4e76efdaSJames Morse 
1577*4e76efdaSJames Morse 	/*
1578*4e76efdaSJames Morse 	 * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
1579*4e76efdaSJames Morse 	 * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
1580*4e76efdaSJames Morse 	 * guests, but didn't add trap handling. KVM doesn't support MPAM and
1581*4e76efdaSJames Morse 	 * always returns an UNDEF for these registers. The guest must see 0
1582*4e76efdaSJames Morse 	 * for this field.
1583*4e76efdaSJames Morse 	 *
1584*4e76efdaSJames Morse 	 * But KVM must also accept values from user-space that were provided
1585*4e76efdaSJames Morse 	 * by KVM. On CPUs that support MPAM, permit user-space to write
1586*4e76efdaSJames Morse 	 * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
1587*4e76efdaSJames Morse 	 */
1588*4e76efdaSJames Morse 	if ((hw_val & mpam_mask) == (user_val & mpam_mask))
1589*4e76efdaSJames Morse 		user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1590*4e76efdaSJames Morse 
1591*4e76efdaSJames Morse 	return set_id_reg(vcpu, rd, user_val);
1592*4e76efdaSJames Morse }
1593*4e76efdaSJames Morse 
set_id_aa64pfr1_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)1594*4e76efdaSJames Morse static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
1595*4e76efdaSJames Morse 			       const struct sys_reg_desc *rd, u64 user_val)
1596*4e76efdaSJames Morse {
1597*4e76efdaSJames Morse 	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
1598*4e76efdaSJames Morse 	u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
1599*4e76efdaSJames Morse 
1600*4e76efdaSJames Morse 	/* See set_id_aa64pfr0_el1 for comment about MPAM */
1601*4e76efdaSJames Morse 	if ((hw_val & mpam_mask) == (user_val & mpam_mask))
1602*4e76efdaSJames Morse 		user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
1603*4e76efdaSJames Morse 
1604*4e76efdaSJames Morse 	return set_id_reg(vcpu, rd, user_val);
1605*4e76efdaSJames Morse }
1606*4e76efdaSJames Morse 
160793390c0aSDave Martin /*
160893390c0aSDave Martin  * cpufeature ID register user accessors
160993390c0aSDave Martin  *
161093390c0aSDave Martin  * For now, these registers are immutable for userspace, so no values
161193390c0aSDave Martin  * are stored, and for set_id_reg() we don't allow the effective value
161293390c0aSDave Martin  * to be changed.
161393390c0aSDave Martin  */
get_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)161493390c0aSDave Martin static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1615978ceeb3SMarc Zyngier 		      u64 *val)
161693390c0aSDave Martin {
16176db7af0dSOliver Upton 	/*
16186db7af0dSOliver Upton 	 * Avoid locking if the VM has already started, as the ID registers are
16196db7af0dSOliver Upton 	 * guaranteed to be invariant at that point.
16206db7af0dSOliver Upton 	 */
16216db7af0dSOliver Upton 	if (kvm_vm_has_ran_once(vcpu->kvm)) {
1622cdd5036dSOliver Upton 		*val = read_id_reg(vcpu, rd);
16234782ccc8SOliver Upton 		return 0;
162493390c0aSDave Martin 	}
162593390c0aSDave Martin 
16266db7af0dSOliver Upton 	mutex_lock(&vcpu->kvm->arch.config_lock);
16276db7af0dSOliver Upton 	*val = read_id_reg(vcpu, rd);
16286db7af0dSOliver Upton 	mutex_unlock(&vcpu->kvm->arch.config_lock);
16296db7af0dSOliver Upton 
16306db7af0dSOliver Upton 	return 0;
16316db7af0dSOliver Upton }
16326db7af0dSOliver Upton 
set_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)163393390c0aSDave Martin static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1634978ceeb3SMarc Zyngier 		      u64 val)
163593390c0aSDave Martin {
16362e8bf0cbSJing Zhang 	u32 id = reg_to_encoding(rd);
16372e8bf0cbSJing Zhang 	int ret;
16384782ccc8SOliver Upton 
16392e8bf0cbSJing Zhang 	mutex_lock(&vcpu->kvm->arch.config_lock);
16402e8bf0cbSJing Zhang 
16412e8bf0cbSJing Zhang 	/*
16422e8bf0cbSJing Zhang 	 * Once the VM has started the ID registers are immutable. Reject any
16432e8bf0cbSJing Zhang 	 * write that does not match the final register value.
16442e8bf0cbSJing Zhang 	 */
16452e8bf0cbSJing Zhang 	if (kvm_vm_has_ran_once(vcpu->kvm)) {
16462e8bf0cbSJing Zhang 		if (val != read_id_reg(vcpu, rd))
16472e8bf0cbSJing Zhang 			ret = -EBUSY;
16482e8bf0cbSJing Zhang 		else
16492e8bf0cbSJing Zhang 			ret = 0;
16502e8bf0cbSJing Zhang 
16512e8bf0cbSJing Zhang 		mutex_unlock(&vcpu->kvm->arch.config_lock);
16522e8bf0cbSJing Zhang 		return ret;
16532e8bf0cbSJing Zhang 	}
16542e8bf0cbSJing Zhang 
16552e8bf0cbSJing Zhang 	ret = arm64_check_features(vcpu, rd, val);
16562e8bf0cbSJing Zhang 	if (!ret)
16572e8bf0cbSJing Zhang 		IDREG(vcpu->kvm, id) = val;
16582e8bf0cbSJing Zhang 
16592e8bf0cbSJing Zhang 	mutex_unlock(&vcpu->kvm->arch.config_lock);
16602e8bf0cbSJing Zhang 
16612e8bf0cbSJing Zhang 	/*
16622e8bf0cbSJing Zhang 	 * arm64_check_features() returns -E2BIG to indicate the register's
16632e8bf0cbSJing Zhang 	 * feature set is a superset of the maximally-allowed register value.
16642e8bf0cbSJing Zhang 	 * While it would be nice to precisely describe this to userspace, the
16652e8bf0cbSJing Zhang 	 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
16662e8bf0cbSJing Zhang 	 * writes return -EINVAL.
16672e8bf0cbSJing Zhang 	 */
16682e8bf0cbSJing Zhang 	if (ret == -E2BIG)
16692e8bf0cbSJing Zhang 		ret = -EINVAL;
16702e8bf0cbSJing Zhang 	return ret;
167193390c0aSDave Martin }
167293390c0aSDave Martin 
get_raz_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)16735a430976SAlexandru Elisei static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1674978ceeb3SMarc Zyngier 		       u64 *val)
16755a430976SAlexandru Elisei {
1676978ceeb3SMarc Zyngier 	*val = 0;
1677978ceeb3SMarc Zyngier 	return 0;
16785a430976SAlexandru Elisei }
16795a430976SAlexandru Elisei 
set_wi_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)16807a3ba309SMarc Zyngier static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1681978ceeb3SMarc Zyngier 		      u64 val)
16827a3ba309SMarc Zyngier {
16837a3ba309SMarc Zyngier 	return 0;
16847a3ba309SMarc Zyngier }
16857a3ba309SMarc Zyngier 
access_ctr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1686f7f2b15cSArd Biesheuvel static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1687f7f2b15cSArd Biesheuvel 		       const struct sys_reg_desc *r)
1688f7f2b15cSArd Biesheuvel {
1689f7f2b15cSArd Biesheuvel 	if (p->is_write)
1690f7f2b15cSArd Biesheuvel 		return write_to_read_only(vcpu, p, r);
1691f7f2b15cSArd Biesheuvel 
1692f7f2b15cSArd Biesheuvel 	p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1693f7f2b15cSArd Biesheuvel 	return true;
1694f7f2b15cSArd Biesheuvel }
1695f7f2b15cSArd Biesheuvel 
access_clidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1696f7f2b15cSArd Biesheuvel static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1697f7f2b15cSArd Biesheuvel 			 const struct sys_reg_desc *r)
1698f7f2b15cSArd Biesheuvel {
1699f7f2b15cSArd Biesheuvel 	if (p->is_write)
1700f7f2b15cSArd Biesheuvel 		return write_to_read_only(vcpu, p, r);
1701f7f2b15cSArd Biesheuvel 
17027af0c253SAkihiko Odaki 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
1703f7f2b15cSArd Biesheuvel 	return true;
1704f7f2b15cSArd Biesheuvel }
1705f7f2b15cSArd Biesheuvel 
17067af0c253SAkihiko Odaki /*
17077af0c253SAkihiko Odaki  * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
17087af0c253SAkihiko Odaki  * by the physical CPU which the vcpu currently resides in.
17097af0c253SAkihiko Odaki  */
reset_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1710d86cde6eSJing Zhang static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
17117af0c253SAkihiko Odaki {
17127af0c253SAkihiko Odaki 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
17137af0c253SAkihiko Odaki 	u64 clidr;
17147af0c253SAkihiko Odaki 	u8 loc;
17157af0c253SAkihiko Odaki 
17167af0c253SAkihiko Odaki 	if ((ctr_el0 & CTR_EL0_IDC)) {
17177af0c253SAkihiko Odaki 		/*
17187af0c253SAkihiko Odaki 		 * Data cache clean to the PoU is not required so LoUU and LoUIS
17197af0c253SAkihiko Odaki 		 * will not be set and a unified cache, which will be marked as
17207af0c253SAkihiko Odaki 		 * LoC, will be added.
17217af0c253SAkihiko Odaki 		 *
17227af0c253SAkihiko Odaki 		 * If not DIC, let the unified cache L2 so that an instruction
17237af0c253SAkihiko Odaki 		 * cache can be added as L1 later.
17247af0c253SAkihiko Odaki 		 */
17257af0c253SAkihiko Odaki 		loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
17267af0c253SAkihiko Odaki 		clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
17277af0c253SAkihiko Odaki 	} else {
17287af0c253SAkihiko Odaki 		/*
17297af0c253SAkihiko Odaki 		 * Data cache clean to the PoU is required so let L1 have a data
17307af0c253SAkihiko Odaki 		 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
17317af0c253SAkihiko Odaki 		 * it can be marked as LoC too.
17327af0c253SAkihiko Odaki 		 */
17337af0c253SAkihiko Odaki 		loc = 1;
17347af0c253SAkihiko Odaki 		clidr = 1 << CLIDR_LOUU_SHIFT;
17357af0c253SAkihiko Odaki 		clidr |= 1 << CLIDR_LOUIS_SHIFT;
17367af0c253SAkihiko Odaki 		clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
17377af0c253SAkihiko Odaki 	}
17387af0c253SAkihiko Odaki 
17397af0c253SAkihiko Odaki 	/*
17407af0c253SAkihiko Odaki 	 * Instruction cache invalidation to the PoU is required so let L1 have
17417af0c253SAkihiko Odaki 	 * an instruction cache. If L1 already has a data cache, it will be
17427af0c253SAkihiko Odaki 	 * CACHE_TYPE_SEPARATE.
17437af0c253SAkihiko Odaki 	 */
17447af0c253SAkihiko Odaki 	if (!(ctr_el0 & CTR_EL0_DIC))
17457af0c253SAkihiko Odaki 		clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
17467af0c253SAkihiko Odaki 
17477af0c253SAkihiko Odaki 	clidr |= loc << CLIDR_LOC_SHIFT;
17487af0c253SAkihiko Odaki 
17497af0c253SAkihiko Odaki 	/*
17507af0c253SAkihiko Odaki 	 * Add tag cache unified to data cache. Allocation tags and data are
17517af0c253SAkihiko Odaki 	 * unified in a cache line so that it looks valid even if there is only
17527af0c253SAkihiko Odaki 	 * one cache line.
17537af0c253SAkihiko Odaki 	 */
17547af0c253SAkihiko Odaki 	if (kvm_has_mte(vcpu->kvm))
175504ed2ba0SIlkka Koskinen 		clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
17567af0c253SAkihiko Odaki 
17577af0c253SAkihiko Odaki 	__vcpu_sys_reg(vcpu, r->reg) = clidr;
1758d86cde6eSJing Zhang 
1759d86cde6eSJing Zhang 	return __vcpu_sys_reg(vcpu, r->reg);
17607af0c253SAkihiko Odaki }
17617af0c253SAkihiko Odaki 
set_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)17627af0c253SAkihiko Odaki static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
17637af0c253SAkihiko Odaki 		      u64 val)
17647af0c253SAkihiko Odaki {
17657af0c253SAkihiko Odaki 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
17667af0c253SAkihiko Odaki 	u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
17677af0c253SAkihiko Odaki 
17687af0c253SAkihiko Odaki 	if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
17697af0c253SAkihiko Odaki 		return -EINVAL;
17707af0c253SAkihiko Odaki 
17717af0c253SAkihiko Odaki 	__vcpu_sys_reg(vcpu, rd->reg) = val;
17727af0c253SAkihiko Odaki 
17737af0c253SAkihiko Odaki 	return 0;
17747af0c253SAkihiko Odaki }
17757af0c253SAkihiko Odaki 
access_csselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1776f7f2b15cSArd Biesheuvel static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1777f7f2b15cSArd Biesheuvel 			  const struct sys_reg_desc *r)
1778f7f2b15cSArd Biesheuvel {
17797c582bf4SJames Morse 	int reg = r->reg;
17807c582bf4SJames Morse 
1781f7f2b15cSArd Biesheuvel 	if (p->is_write)
17827c582bf4SJames Morse 		vcpu_write_sys_reg(vcpu, p->regval, reg);
1783f7f2b15cSArd Biesheuvel 	else
17847c582bf4SJames Morse 		p->regval = vcpu_read_sys_reg(vcpu, reg);
1785f7f2b15cSArd Biesheuvel 	return true;
1786f7f2b15cSArd Biesheuvel }
1787f7f2b15cSArd Biesheuvel 
access_ccsidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1788f7f2b15cSArd Biesheuvel static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1789f7f2b15cSArd Biesheuvel 			  const struct sys_reg_desc *r)
1790f7f2b15cSArd Biesheuvel {
1791f7f2b15cSArd Biesheuvel 	u32 csselr;
1792f7f2b15cSArd Biesheuvel 
1793f7f2b15cSArd Biesheuvel 	if (p->is_write)
1794f7f2b15cSArd Biesheuvel 		return write_to_read_only(vcpu, p, r);
1795f7f2b15cSArd Biesheuvel 
1796f7f2b15cSArd Biesheuvel 	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
17977af0c253SAkihiko Odaki 	csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
17987af0c253SAkihiko Odaki 	if (csselr < CSSELR_MAX)
17997af0c253SAkihiko Odaki 		p->regval = get_ccsidr(vcpu, csselr);
1800793acf87SArd Biesheuvel 
1801f7f2b15cSArd Biesheuvel 	return true;
1802f7f2b15cSArd Biesheuvel }
1803f7f2b15cSArd Biesheuvel 
mte_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1804e1f358b5SSteven Price static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1805e1f358b5SSteven Price 				   const struct sys_reg_desc *rd)
1806e1f358b5SSteven Price {
1807673638f4SSteven Price 	if (kvm_has_mte(vcpu->kvm))
1808673638f4SSteven Price 		return 0;
1809673638f4SSteven Price 
1810e1f358b5SSteven Price 	return REG_HIDDEN;
1811e1f358b5SSteven Price }
1812e1f358b5SSteven Price 
1813e1f358b5SSteven Price #define MTE_REG(name) {				\
1814e1f358b5SSteven Price 	SYS_DESC(SYS_##name),			\
1815e1f358b5SSteven Price 	.access = undef_access,			\
1816e1f358b5SSteven Price 	.reset = reset_unknown,			\
1817e1f358b5SSteven Price 	.reg = name,				\
1818e1f358b5SSteven Price 	.visibility = mte_visibility,		\
1819e1f358b5SSteven Price }
1820e1f358b5SSteven Price 
el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)18216ff9dc23SJintack Lim static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
18226ff9dc23SJintack Lim 				   const struct sys_reg_desc *rd)
18236ff9dc23SJintack Lim {
18246ff9dc23SJintack Lim 	if (vcpu_has_nv(vcpu))
18256ff9dc23SJintack Lim 		return 0;
18266ff9dc23SJintack Lim 
18276ff9dc23SJintack Lim 	return REG_HIDDEN;
18286ff9dc23SJintack Lim }
18296ff9dc23SJintack Lim 
18306ff9dc23SJintack Lim #define EL2_REG(name, acc, rst, v) {		\
18316ff9dc23SJintack Lim 	SYS_DESC(SYS_##name),			\
18326ff9dc23SJintack Lim 	.access = acc,				\
18336ff9dc23SJintack Lim 	.reset = rst,				\
18346ff9dc23SJintack Lim 	.reg = name,				\
18356ff9dc23SJintack Lim 	.visibility = el2_visibility,		\
18366ff9dc23SJintack Lim 	.val = v,				\
18376ff9dc23SJintack Lim }
18386ff9dc23SJintack Lim 
1839280b748eSJintack Lim /*
1840280b748eSJintack Lim  * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
1841280b748eSJintack Lim  * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
1842280b748eSJintack Lim  * handling traps. Given that, they are always hidden from userspace.
1843280b748eSJintack Lim  */
elx2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1844280b748eSJintack Lim static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
1845280b748eSJintack Lim 				    const struct sys_reg_desc *rd)
1846280b748eSJintack Lim {
1847280b748eSJintack Lim 	return REG_HIDDEN_USER;
1848280b748eSJintack Lim }
1849280b748eSJintack Lim 
1850280b748eSJintack Lim #define EL12_REG(name, acc, rst, v) {		\
1851280b748eSJintack Lim 	SYS_DESC(SYS_##name##_EL12),		\
1852280b748eSJintack Lim 	.access = acc,				\
1853280b748eSJintack Lim 	.reset = rst,				\
1854280b748eSJintack Lim 	.reg = name##_EL1,			\
1855280b748eSJintack Lim 	.val = v,				\
1856280b748eSJintack Lim 	.visibility = elx2_visibility,		\
1857280b748eSJintack Lim }
1858280b748eSJintack Lim 
1859d86cde6eSJing Zhang /*
1860d86cde6eSJing Zhang  * Since reset() callback and field val are not used for idregs, they will be
1861d86cde6eSJing Zhang  * used for specific purposes for idregs.
1862d86cde6eSJing Zhang  * The reset() would return KVM sanitised register value. The value would be the
1863d86cde6eSJing Zhang  * same as the host kernel sanitised value if there is no KVM sanitisation.
1864d86cde6eSJing Zhang  * The val would be used as a mask indicating writable fields for the idreg.
1865d86cde6eSJing Zhang  * Only bits with 1 are writable from userspace. This mask might not be
1866d86cde6eSJing Zhang  * necessary in the future whenever all ID registers are enabled as writable
1867d86cde6eSJing Zhang  * from userspace.
1868d86cde6eSJing Zhang  */
1869d86cde6eSJing Zhang 
187093390c0aSDave Martin /* sys_reg_desc initialiser for known cpufeature ID registers */
187193390c0aSDave Martin #define ID_SANITISED(name) {			\
187293390c0aSDave Martin 	SYS_DESC(SYS_##name),			\
187393390c0aSDave Martin 	.access	= access_id_reg,		\
187493390c0aSDave Martin 	.get_user = get_id_reg,			\
187593390c0aSDave Martin 	.set_user = set_id_reg,			\
1876912dee57SAndrew Jones 	.visibility = id_visibility,		\
1877d86cde6eSJing Zhang 	.reset = kvm_read_sanitised_id_reg,	\
1878d86cde6eSJing Zhang 	.val = 0,				\
187993390c0aSDave Martin }
188093390c0aSDave Martin 
1881d5efec7eSOliver Upton /* sys_reg_desc initialiser for known cpufeature ID registers */
1882d5efec7eSOliver Upton #define AA32_ID_SANITISED(name) {		\
1883d5efec7eSOliver Upton 	SYS_DESC(SYS_##name),			\
1884d5efec7eSOliver Upton 	.access	= access_id_reg,		\
1885d5efec7eSOliver Upton 	.get_user = get_id_reg,			\
1886d5efec7eSOliver Upton 	.set_user = set_id_reg,			\
1887d5efec7eSOliver Upton 	.visibility = aa32_id_visibility,	\
1888d86cde6eSJing Zhang 	.reset = kvm_read_sanitised_id_reg,	\
1889d86cde6eSJing Zhang 	.val = 0,				\
1890d5efec7eSOliver Upton }
1891d5efec7eSOliver Upton 
189293390c0aSDave Martin /*
189393390c0aSDave Martin  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
189493390c0aSDave Martin  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
189593390c0aSDave Martin  * (1 <= crm < 8, 0 <= Op2 < 8).
189693390c0aSDave Martin  */
189793390c0aSDave Martin #define ID_UNALLOCATED(crm, op2) {			\
189893390c0aSDave Martin 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
189934b4d203SOliver Upton 	.access = access_id_reg,			\
190034b4d203SOliver Upton 	.get_user = get_id_reg,				\
190134b4d203SOliver Upton 	.set_user = set_id_reg,				\
1902d86cde6eSJing Zhang 	.visibility = raz_visibility,			\
1903d86cde6eSJing Zhang 	.reset = kvm_read_sanitised_id_reg,		\
1904d86cde6eSJing Zhang 	.val = 0,					\
190593390c0aSDave Martin }
190693390c0aSDave Martin 
190793390c0aSDave Martin /*
190893390c0aSDave Martin  * sys_reg_desc initialiser for known ID registers that we hide from guests.
190993390c0aSDave Martin  * For now, these are exposed just like unallocated ID regs: they appear
191093390c0aSDave Martin  * RAZ for the guest.
191193390c0aSDave Martin  */
191293390c0aSDave Martin #define ID_HIDDEN(name) {			\
191393390c0aSDave Martin 	SYS_DESC(SYS_##name),			\
191434b4d203SOliver Upton 	.access = access_id_reg,		\
191534b4d203SOliver Upton 	.get_user = get_id_reg,			\
191634b4d203SOliver Upton 	.set_user = set_id_reg,			\
191734b4d203SOliver Upton 	.visibility = raz_visibility,		\
1918d86cde6eSJing Zhang 	.reset = kvm_read_sanitised_id_reg,	\
1919d86cde6eSJing Zhang 	.val = 0,				\
192093390c0aSDave Martin }
192193390c0aSDave Martin 
access_sp_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)19226ff9dc23SJintack Lim static bool access_sp_el1(struct kvm_vcpu *vcpu,
19236ff9dc23SJintack Lim 			  struct sys_reg_params *p,
19246ff9dc23SJintack Lim 			  const struct sys_reg_desc *r)
19256ff9dc23SJintack Lim {
19266ff9dc23SJintack Lim 	if (p->is_write)
19276ff9dc23SJintack Lim 		__vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
19286ff9dc23SJintack Lim 	else
19296ff9dc23SJintack Lim 		p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
19306ff9dc23SJintack Lim 
19316ff9dc23SJintack Lim 	return true;
19326ff9dc23SJintack Lim }
19336ff9dc23SJintack Lim 
access_elr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)19349da117eeSJintack Lim static bool access_elr(struct kvm_vcpu *vcpu,
19359da117eeSJintack Lim 		       struct sys_reg_params *p,
19369da117eeSJintack Lim 		       const struct sys_reg_desc *r)
19379da117eeSJintack Lim {
19389da117eeSJintack Lim 	if (p->is_write)
19399da117eeSJintack Lim 		vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
19409da117eeSJintack Lim 	else
19419da117eeSJintack Lim 		p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
19429da117eeSJintack Lim 
19439da117eeSJintack Lim 	return true;
19449da117eeSJintack Lim }
19459da117eeSJintack Lim 
access_spsr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)19469da117eeSJintack Lim static bool access_spsr(struct kvm_vcpu *vcpu,
19479da117eeSJintack Lim 			struct sys_reg_params *p,
19489da117eeSJintack Lim 			const struct sys_reg_desc *r)
19499da117eeSJintack Lim {
19509da117eeSJintack Lim 	if (p->is_write)
19519da117eeSJintack Lim 		__vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
19529da117eeSJintack Lim 	else
19539da117eeSJintack Lim 		p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
19549da117eeSJintack Lim 
19559da117eeSJintack Lim 	return true;
19569da117eeSJintack Lim }
19579da117eeSJintack Lim 
19587c8c5e6aSMarc Zyngier /*
19597c8c5e6aSMarc Zyngier  * Architected system registers.
19607c8c5e6aSMarc Zyngier  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
19617609c125SMarc Zyngier  *
19620c557ed4SMarc Zyngier  * Debug handling: We do trap most, if not all debug related system
19630c557ed4SMarc Zyngier  * registers. The implementation is good enough to ensure that a guest
19640c557ed4SMarc Zyngier  * can use these with minimal performance degradation. The drawback is
19657dabf02fSOliver Upton  * that we don't implement any of the external debug architecture.
19667dabf02fSOliver Upton  * This should be revisited if we ever encounter a more demanding
19677dabf02fSOliver Upton  * guest...
19687c8c5e6aSMarc Zyngier  */
19697c8c5e6aSMarc Zyngier static const struct sys_reg_desc sys_reg_descs[] = {
19707606e078SMark Rutland 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
1971d282fa3cSMarc Zyngier 	{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
1972d282fa3cSMarc Zyngier 	{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
19737606e078SMark Rutland 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
1974d282fa3cSMarc Zyngier 	{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
1975d282fa3cSMarc Zyngier 	{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
19767606e078SMark Rutland 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
1977d282fa3cSMarc Zyngier 	{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
1978d282fa3cSMarc Zyngier 	{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
19797c8c5e6aSMarc Zyngier 
19800c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(0),
19810c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(1),
1982ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1983ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
19840c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(2),
19850c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(3),
19860c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(4),
19870c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(5),
19880c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(6),
19890c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(7),
19900c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(8),
19910c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(9),
19920c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(10),
19930c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(11),
19940c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(12),
19950c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(13),
19960c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(14),
19970c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(15),
19980c557ed4SMarc Zyngier 
1999ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
2000f24adc65SOliver Upton 	{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
2001d42e2671SOliver Upton 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
2002187de7c2SMark Brown 		OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
2003ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
2004ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
2005ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
2006ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
2007ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
20080c557ed4SMarc Zyngier 
2009ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
2010ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
2011ee1b64e6SMark Rutland 	// DBGDTR[TR]X_EL0 share the same encoding
2012ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
20130c557ed4SMarc Zyngier 
2014ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
201562a89c44SMarc Zyngier 
2016851050a5SMark Rutland 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
201793390c0aSDave Martin 
201893390c0aSDave Martin 	/*
201993390c0aSDave Martin 	 * ID regs: all ID_SANITISED() entries here must have corresponding
202093390c0aSDave Martin 	 * entries in arm64_ftr_regs[].
202193390c0aSDave Martin 	 */
202293390c0aSDave Martin 
202393390c0aSDave Martin 	/* AArch64 mappings of the AArch32 ID registers */
202493390c0aSDave Martin 	/* CRm=1 */
2025d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_PFR0_EL1),
2026d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_PFR1_EL1),
2027c118ceadSJing Zhang 	{ SYS_DESC(SYS_ID_DFR0_EL1),
2028c118ceadSJing Zhang 	  .access = access_id_reg,
2029c118ceadSJing Zhang 	  .get_user = get_id_reg,
2030c118ceadSJing Zhang 	  .set_user = set_id_dfr0_el1,
2031c118ceadSJing Zhang 	  .visibility = aa32_id_visibility,
2032c118ceadSJing Zhang 	  .reset = read_sanitised_id_dfr0_el1,
2033c118ceadSJing Zhang 	  .val = ID_DFR0_EL1_PerfMon_MASK, },
203493390c0aSDave Martin 	ID_HIDDEN(ID_AFR0_EL1),
2035d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR0_EL1),
2036d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR1_EL1),
2037d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR2_EL1),
2038d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR3_EL1),
203993390c0aSDave Martin 
204093390c0aSDave Martin 	/* CRm=2 */
2041d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR0_EL1),
2042d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR1_EL1),
2043d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR2_EL1),
2044d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR3_EL1),
2045d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR4_EL1),
2046d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR5_EL1),
2047d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR4_EL1),
2048d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR6_EL1),
204993390c0aSDave Martin 
205093390c0aSDave Martin 	/* CRm=3 */
2051d5efec7eSOliver Upton 	AA32_ID_SANITISED(MVFR0_EL1),
2052d5efec7eSOliver Upton 	AA32_ID_SANITISED(MVFR1_EL1),
2053d5efec7eSOliver Upton 	AA32_ID_SANITISED(MVFR2_EL1),
205493390c0aSDave Martin 	ID_UNALLOCATED(3,3),
2055d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_PFR2_EL1),
2056dd35ec07SAnshuman Khandual 	ID_HIDDEN(ID_DFR1_EL1),
2057d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR5_EL1),
205893390c0aSDave Martin 	ID_UNALLOCATED(3,7),
205993390c0aSDave Martin 
206093390c0aSDave Martin 	/* AArch64 ID registers */
206193390c0aSDave Martin 	/* CRm=4 */
2062c39f5974SJing Zhang 	{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
2063c39f5974SJing Zhang 	  .access = access_id_reg,
2064c39f5974SJing Zhang 	  .get_user = get_id_reg,
2065*4e76efdaSJames Morse 	  .set_user = set_id_aa64pfr0_el1,
2066c39f5974SJing Zhang 	  .reset = read_sanitised_id_aa64pfr0_el1,
2067c39f5974SJing Zhang 	  .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
2068*4e76efdaSJames Morse 	{ SYS_DESC(SYS_ID_AA64PFR1_EL1),
2069*4e76efdaSJames Morse 	  .access = access_id_reg,
2070*4e76efdaSJames Morse 	  .get_user = get_id_reg,
2071*4e76efdaSJames Morse 	  .set_user = set_id_aa64pfr1_el1,
2072*4e76efdaSJames Morse 	  .reset = kvm_read_sanitised_id_reg, },
207393390c0aSDave Martin 	ID_UNALLOCATED(4,2),
207493390c0aSDave Martin 	ID_UNALLOCATED(4,3),
2075c512298eSAndrew Jones 	ID_SANITISED(ID_AA64ZFR0_EL1),
207690807748SMark Brown 	ID_HIDDEN(ID_AA64SMFR0_EL1),
207793390c0aSDave Martin 	ID_UNALLOCATED(4,6),
207893390c0aSDave Martin 	ID_UNALLOCATED(4,7),
207993390c0aSDave Martin 
208093390c0aSDave Martin 	/* CRm=5 */
2081c118ceadSJing Zhang 	{ SYS_DESC(SYS_ID_AA64DFR0_EL1),
2082c118ceadSJing Zhang 	  .access = access_id_reg,
2083c118ceadSJing Zhang 	  .get_user = get_id_reg,
2084c118ceadSJing Zhang 	  .set_user = set_id_aa64dfr0_el1,
2085c118ceadSJing Zhang 	  .reset = read_sanitised_id_aa64dfr0_el1,
2086c118ceadSJing Zhang 	  .val = ID_AA64DFR0_EL1_PMUVer_MASK, },
208793390c0aSDave Martin 	ID_SANITISED(ID_AA64DFR1_EL1),
208893390c0aSDave Martin 	ID_UNALLOCATED(5,2),
208993390c0aSDave Martin 	ID_UNALLOCATED(5,3),
209093390c0aSDave Martin 	ID_HIDDEN(ID_AA64AFR0_EL1),
209193390c0aSDave Martin 	ID_HIDDEN(ID_AA64AFR1_EL1),
209293390c0aSDave Martin 	ID_UNALLOCATED(5,6),
209393390c0aSDave Martin 	ID_UNALLOCATED(5,7),
209493390c0aSDave Martin 
209593390c0aSDave Martin 	/* CRm=6 */
209693390c0aSDave Martin 	ID_SANITISED(ID_AA64ISAR0_EL1),
209793390c0aSDave Martin 	ID_SANITISED(ID_AA64ISAR1_EL1),
20989e45365fSJoey Gouly 	ID_SANITISED(ID_AA64ISAR2_EL1),
209993390c0aSDave Martin 	ID_UNALLOCATED(6,3),
210093390c0aSDave Martin 	ID_UNALLOCATED(6,4),
210193390c0aSDave Martin 	ID_UNALLOCATED(6,5),
210293390c0aSDave Martin 	ID_UNALLOCATED(6,6),
210393390c0aSDave Martin 	ID_UNALLOCATED(6,7),
210493390c0aSDave Martin 
210593390c0aSDave Martin 	/* CRm=7 */
210693390c0aSDave Martin 	ID_SANITISED(ID_AA64MMFR0_EL1),
210793390c0aSDave Martin 	ID_SANITISED(ID_AA64MMFR1_EL1),
210893390c0aSDave Martin 	ID_SANITISED(ID_AA64MMFR2_EL1),
21098ef67c67SJoey Gouly 	ID_SANITISED(ID_AA64MMFR3_EL1),
211093390c0aSDave Martin 	ID_UNALLOCATED(7,4),
211193390c0aSDave Martin 	ID_UNALLOCATED(7,5),
211293390c0aSDave Martin 	ID_UNALLOCATED(7,6),
211393390c0aSDave Martin 	ID_UNALLOCATED(7,7),
211493390c0aSDave Martin 
2115851050a5SMark Rutland 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
2116af473829SJames Morse 	{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
2117851050a5SMark Rutland 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
21182ac638fcSCatalin Marinas 
2119e1f358b5SSteven Price 	MTE_REG(RGSR_EL1),
2120e1f358b5SSteven Price 	MTE_REG(GCR_EL1),
21212ac638fcSCatalin Marinas 
212273433762SDave Martin 	{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
2123cc427cbbSSuzuki K Poulose 	{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
212490807748SMark Brown 	{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
212590807748SMark Brown 	{ SYS_DESC(SYS_SMCR_EL1), undef_access },
2126851050a5SMark Rutland 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
2127851050a5SMark Rutland 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
2128851050a5SMark Rutland 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
2129fbff5606SJoey Gouly 	{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
21307c8c5e6aSMarc Zyngier 
2131384b40caSMark Rutland 	PTRAUTH_KEY(APIA),
2132384b40caSMark Rutland 	PTRAUTH_KEY(APIB),
2133384b40caSMark Rutland 	PTRAUTH_KEY(APDA),
2134384b40caSMark Rutland 	PTRAUTH_KEY(APDB),
2135384b40caSMark Rutland 	PTRAUTH_KEY(APGA),
2136384b40caSMark Rutland 
21379da117eeSJintack Lim 	{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
21389da117eeSJintack Lim 	{ SYS_DESC(SYS_ELR_EL1), access_elr},
21399da117eeSJintack Lim 
2140851050a5SMark Rutland 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
2141851050a5SMark Rutland 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
2142851050a5SMark Rutland 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
2143558daf69SDongjiu Geng 
2144558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
2145558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
2146558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
2147558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
2148558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
2149558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
2150558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
2151558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
2152558daf69SDongjiu Geng 
2153e1f358b5SSteven Price 	MTE_REG(TFSR_EL1),
2154e1f358b5SSteven Price 	MTE_REG(TFSRE0_EL1),
21552ac638fcSCatalin Marinas 
2156851050a5SMark Rutland 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
2157851050a5SMark Rutland 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
21587c8c5e6aSMarc Zyngier 
215913611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
216013611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
216113611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
216213611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
216313611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
216413611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
216513611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
216613611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
216713611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
216813611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
216913611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
217013611bc8SAlexandru Elisei 	/* PMBIDR_EL1 is not trapped */
217113611bc8SAlexandru Elisei 
21729d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMINTENSET_EL1),
217311663111SMarc Zyngier 	  .access = access_pminten, .reg = PMINTENSET_EL1 },
21749d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMINTENCLR_EL1),
217511663111SMarc Zyngier 	  .access = access_pminten, .reg = PMINTENSET_EL1 },
217646081078SMarc Zyngier 	{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
21777c8c5e6aSMarc Zyngier 
2178851050a5SMark Rutland 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
2179839d9035SJoey Gouly 	{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
2180839d9035SJoey Gouly 	{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
2181851050a5SMark Rutland 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
21827c8c5e6aSMarc Zyngier 
218322925521SMarc Zyngier 	{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
218422925521SMarc Zyngier 	{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
218522925521SMarc Zyngier 	{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
218622925521SMarc Zyngier 	{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
218722925521SMarc Zyngier 	{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
2188cc33c4e2SMark Rutland 
21899da117eeSJintack Lim 	{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
2190c773ae2bSJames Morse 	{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
2191db7dedd0SChristoffer Dall 
21927b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
2193e7f1d1eeSMarc Zyngier 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
21947b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
2195e7f1d1eeSMarc Zyngier 	{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
21967b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
2197e804d208SMark Rutland 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
219803bd646dSMarc Zyngier 	{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
219903bd646dSMarc Zyngier 	{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
22007b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
2201e7f1d1eeSMarc Zyngier 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
22027b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
2203e804d208SMark Rutland 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
2204db7dedd0SChristoffer Dall 
2205851050a5SMark Rutland 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
2206851050a5SMark Rutland 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
22077c8c5e6aSMarc Zyngier 
2208484f8682SMarc Zyngier 	{ SYS_DESC(SYS_ACCDATA_EL1), undef_access },
2209484f8682SMarc Zyngier 
2210ed4ffaf4SMarc Zyngier 	{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
2211ed4ffaf4SMarc Zyngier 
2212851050a5SMark Rutland 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
22137c8c5e6aSMarc Zyngier 
2214f7f2b15cSArd Biesheuvel 	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
22157af0c253SAkihiko Odaki 	{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
22167af0c253SAkihiko Odaki 	  .set_user = set_clidr },
2217bf48040cSAkihiko Odaki 	{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
221890807748SMark Brown 	{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
2219f7f2b15cSArd Biesheuvel 	{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
2220f7f2b15cSArd Biesheuvel 	{ SYS_DESC(SYS_CTR_EL0), access_ctr },
2221ec0067a6SMark Brown 	{ SYS_DESC(SYS_SVCR), undef_access },
22227c8c5e6aSMarc Zyngier 
22239d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr,
222411663111SMarc Zyngier 	  .reset = reset_pmcr, .reg = PMCR_EL0 },
22259d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMCNTENSET_EL0),
222611663111SMarc Zyngier 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
22279d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMCNTENCLR_EL0),
222811663111SMarc Zyngier 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
22299d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMOVSCLR_EL0),
223011663111SMarc Zyngier 	  .access = access_pmovs, .reg = PMOVSSET_EL0 },
22317a3ba309SMarc Zyngier 	/*
22327a3ba309SMarc Zyngier 	 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
22337a3ba309SMarc Zyngier 	 * previously (and pointlessly) advertised in the past...
22347a3ba309SMarc Zyngier 	 */
22359d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMSWINC_EL0),
22365a430976SAlexandru Elisei 	  .get_user = get_raz_reg, .set_user = set_wi_reg,
22377a3ba309SMarc Zyngier 	  .access = access_pmswinc, .reset = NULL },
22389d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMSELR_EL0),
22390ab410a9SMarc Zyngier 	  .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
22409d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMCEID0_EL0),
224111663111SMarc Zyngier 	  .access = access_pmceid, .reset = NULL },
22429d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMCEID1_EL0),
224311663111SMarc Zyngier 	  .access = access_pmceid, .reset = NULL },
22449d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMCCNTR_EL0),
22459228b261SReiji Watanabe 	  .access = access_pmu_evcntr, .reset = reset_unknown,
22469228b261SReiji Watanabe 	  .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
22479d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMXEVTYPER_EL0),
224811663111SMarc Zyngier 	  .access = access_pmu_evtyper, .reset = NULL },
22499d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMXEVCNTR_EL0),
225011663111SMarc Zyngier 	  .access = access_pmu_evcntr, .reset = NULL },
2251174ed3e4SMark Rutland 	/*
2252174ed3e4SMark Rutland 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2253d692b8adSShannon Zhao 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
2254d692b8adSShannon Zhao 	 */
22559d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
225611663111SMarc Zyngier 	  .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
22579d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMOVSSET_EL0),
225811663111SMarc Zyngier 	  .access = access_pmovs, .reg = PMOVSSET_EL0 },
22597c8c5e6aSMarc Zyngier 
2260851050a5SMark Rutland 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2261851050a5SMark Rutland 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
226290807748SMark Brown 	{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
226362a89c44SMarc Zyngier 
2264ed4ffaf4SMarc Zyngier 	{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2265ed4ffaf4SMarc Zyngier 
2266338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCR_EL0), undef_access },
2267338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2268338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2269338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2270338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2271338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2272338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2273338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
22744fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(0),
22754fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(1),
22764fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(2),
22774fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(3),
22784fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(4),
22794fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(5),
22804fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(6),
22814fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(7),
22824fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(8),
22834fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(9),
22844fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(10),
22854fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(11),
22864fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(12),
22874fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(13),
22884fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(14),
22894fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(15),
2290493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(0),
2291493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(1),
2292493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(2),
2293493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(3),
2294493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(4),
2295493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(5),
2296493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(6),
2297493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(7),
2298493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(8),
2299493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(9),
2300493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(10),
2301493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(11),
2302493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(12),
2303493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(13),
2304493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(14),
2305493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(15),
23064fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(0),
23074fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(1),
23084fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(2),
23094fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(3),
23104fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(4),
23114fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(5),
23124fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(6),
23134fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(7),
23144fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(8),
23154fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(9),
23164fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(10),
23174fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(11),
23184fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(12),
23194fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(13),
23204fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(14),
23214fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(15),
2322493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(0),
2323493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(1),
2324493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(2),
2325493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(3),
2326493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(4),
2327493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(5),
2328493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(6),
2329493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(7),
2330493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(8),
2331493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(9),
2332493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(10),
2333493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(11),
2334493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(12),
2335493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(13),
2336493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(14),
2337493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(15),
23384fcdf106SIonela Voinescu 
2339c605ee24SMarc Zyngier 	{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2340c605ee24SMarc Zyngier 	{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
234184135d3dSAndre Przywara 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
234284135d3dSAndre Przywara 	{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
234384135d3dSAndre Przywara 	{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2344c9a3c58fSJintack Lim 
2345051ff581SShannon Zhao 	/* PMEVCNTRn_EL0 */
2346051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(0),
2347051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(1),
2348051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(2),
2349051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(3),
2350051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(4),
2351051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(5),
2352051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(6),
2353051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(7),
2354051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(8),
2355051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(9),
2356051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(10),
2357051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(11),
2358051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(12),
2359051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(13),
2360051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(14),
2361051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(15),
2362051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(16),
2363051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(17),
2364051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(18),
2365051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(19),
2366051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(20),
2367051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(21),
2368051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(22),
2369051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(23),
2370051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(24),
2371051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(25),
2372051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(26),
2373051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(27),
2374051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(28),
2375051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(29),
2376051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(30),
23779feb21acSShannon Zhao 	/* PMEVTYPERn_EL0 */
23789feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(0),
23799feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(1),
23809feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(2),
23819feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(3),
23829feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(4),
23839feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(5),
23849feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(6),
23859feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(7),
23869feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(8),
23879feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(9),
23889feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(10),
23899feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(11),
23909feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(12),
23919feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(13),
23929feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(14),
23939feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(15),
23949feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(16),
23959feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(17),
23969feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(18),
23979feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(19),
23989feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(20),
23999feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(21),
24009feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(22),
24019feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(23),
24029feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(24),
24039feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(25),
24049feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(26),
24059feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(27),
24069feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(28),
24079feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(29),
24089feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(30),
2409174ed3e4SMark Rutland 	/*
2410174ed3e4SMark Rutland 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
24119feb21acSShannon Zhao 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
24129feb21acSShannon Zhao 	 */
24139d2a55b4SXiang Chen 	{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
241411663111SMarc Zyngier 	  .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2415051ff581SShannon Zhao 
24166ff9dc23SJintack Lim 	EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
24176ff9dc23SJintack Lim 	EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
24186ff9dc23SJintack Lim 	EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
24196ff9dc23SJintack Lim 	EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
24206ff9dc23SJintack Lim 	EL2_REG(HCR_EL2, access_rw, reset_val, 0),
24216ff9dc23SJintack Lim 	EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
242275c76ab5SMarc Zyngier 	EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
24236ff9dc23SJintack Lim 	EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
242450d2fe46SMarc Zyngier 	EL2_REG(HFGRTR_EL2, access_rw, reset_val, 0),
242550d2fe46SMarc Zyngier 	EL2_REG(HFGWTR_EL2, access_rw, reset_val, 0),
242650d2fe46SMarc Zyngier 	EL2_REG(HFGITR_EL2, access_rw, reset_val, 0),
24276ff9dc23SJintack Lim 	EL2_REG(HACR_EL2, access_rw, reset_val, 0),
24286ff9dc23SJintack Lim 
242903fb54d0SMarc Zyngier 	EL2_REG(HCRX_EL2, access_rw, reset_val, 0),
243003fb54d0SMarc Zyngier 
24316ff9dc23SJintack Lim 	EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
24326ff9dc23SJintack Lim 	EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
24336ff9dc23SJintack Lim 	EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
24346ff9dc23SJintack Lim 	EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
24356ff9dc23SJintack Lim 	EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
24366ff9dc23SJintack Lim 
2437851050a5SMark Rutland 	{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
243850d2fe46SMarc Zyngier 	EL2_REG(HDFGRTR_EL2, access_rw, reset_val, 0),
243950d2fe46SMarc Zyngier 	EL2_REG(HDFGWTR_EL2, access_rw, reset_val, 0),
24406ff9dc23SJintack Lim 	EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
24416ff9dc23SJintack Lim 	EL2_REG(ELR_EL2, access_rw, reset_val, 0),
24426ff9dc23SJintack Lim 	{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
24436ff9dc23SJintack Lim 
2444851050a5SMark Rutland 	{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
24456ff9dc23SJintack Lim 	EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
24466ff9dc23SJintack Lim 	EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
24476ff9dc23SJintack Lim 	EL2_REG(ESR_EL2, access_rw, reset_val, 0),
2448c88b0936SDave Martin 	{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
24496ff9dc23SJintack Lim 
24506ff9dc23SJintack Lim 	EL2_REG(FAR_EL2, access_rw, reset_val, 0),
24516ff9dc23SJintack Lim 	EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
24526ff9dc23SJintack Lim 
24536ff9dc23SJintack Lim 	EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
24546ff9dc23SJintack Lim 	EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
24556ff9dc23SJintack Lim 
24566ff9dc23SJintack Lim 	EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
24576ff9dc23SJintack Lim 	EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
24586ff9dc23SJintack Lim 	{ SYS_DESC(SYS_RMR_EL2), trap_undef },
24596ff9dc23SJintack Lim 
24606ff9dc23SJintack Lim 	EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
24616ff9dc23SJintack Lim 	EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
24626ff9dc23SJintack Lim 
24636ff9dc23SJintack Lim 	EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
24646ff9dc23SJintack Lim 	EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
24656ff9dc23SJintack Lim 
2466280b748eSJintack Lim 	EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
2467280b748eSJintack Lim 	EL12_REG(CPACR, access_rw, reset_val, 0),
2468280b748eSJintack Lim 	EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
2469280b748eSJintack Lim 	EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
2470280b748eSJintack Lim 	EL12_REG(TCR, access_vm_reg, reset_val, 0),
2471280b748eSJintack Lim 	{ SYS_DESC(SYS_SPSR_EL12), access_spsr},
2472280b748eSJintack Lim 	{ SYS_DESC(SYS_ELR_EL12), access_elr},
2473280b748eSJintack Lim 	EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
2474280b748eSJintack Lim 	EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
2475280b748eSJintack Lim 	EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
2476280b748eSJintack Lim 	EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
2477280b748eSJintack Lim 	EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
2478280b748eSJintack Lim 	EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
2479280b748eSJintack Lim 	EL12_REG(VBAR, access_rw, reset_val, 0),
2480280b748eSJintack Lim 	EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
2481280b748eSJintack Lim 	EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2482280b748eSJintack Lim 
24836ff9dc23SJintack Lim 	EL2_REG(SP_EL2, NULL, reset_unknown, 0),
248462a89c44SMarc Zyngier };
248562a89c44SMarc Zyngier 
248647334146SJing Zhang static const struct sys_reg_desc *first_idreg;
248747334146SJing Zhang 
trap_dbgdidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)24888c358b29SAlexandru Elisei static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
24893fec037dSPavel Fedin 			struct sys_reg_params *p,
2490bdfb4b38SMarc Zyngier 			const struct sys_reg_desc *r)
2491bdfb4b38SMarc Zyngier {
2492bdfb4b38SMarc Zyngier 	if (p->is_write) {
2493bdfb4b38SMarc Zyngier 		return ignore_write(vcpu, p);
2494bdfb4b38SMarc Zyngier 	} else {
249546823dd1SDave Martin 		u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
249646823dd1SDave Martin 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
249755adc08dSMark Brown 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
2498bdfb4b38SMarc Zyngier 
2499fcf37b38SMark Brown 		p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
2500fcf37b38SMark Brown 			     (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
2501fcf37b38SMark Brown 			     (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
2502bea7e97fSMarc Zyngier 			     | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
2503bdfb4b38SMarc Zyngier 		return true;
2504bdfb4b38SMarc Zyngier 	}
2505bdfb4b38SMarc Zyngier }
2506bdfb4b38SMarc Zyngier 
25071da42c34SMarc Zyngier /*
25081da42c34SMarc Zyngier  * AArch32 debug register mappings
250984e690bfSAlex Bennée  *
251084e690bfSAlex Bennée  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
251184e690bfSAlex Bennée  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
251284e690bfSAlex Bennée  *
25131da42c34SMarc Zyngier  * None of the other registers share their location, so treat them as
25141da42c34SMarc Zyngier  * if they were 64bit.
251584e690bfSAlex Bennée  */
2516bdfb4b38SMarc Zyngier #define DBG_BCR_BVR_WCR_WVR(n)						      \
2517bdfb4b38SMarc Zyngier 	/* DBGBVRn */							      \
25181da42c34SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2519bdfb4b38SMarc Zyngier 	/* DBGBCRn */							      \
252084e690bfSAlex Bennée 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	      \
2521bdfb4b38SMarc Zyngier 	/* DBGWVRn */							      \
252284e690bfSAlex Bennée 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	      \
2523bdfb4b38SMarc Zyngier 	/* DBGWCRn */							      \
252484e690bfSAlex Bennée 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2525bdfb4b38SMarc Zyngier 
2526bdfb4b38SMarc Zyngier #define DBGBXVR(n)							      \
25271da42c34SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2528bdfb4b38SMarc Zyngier 
2529bdfb4b38SMarc Zyngier /*
2530bdfb4b38SMarc Zyngier  * Trapped cp14 registers. We generally ignore most of the external
2531bdfb4b38SMarc Zyngier  * debug, on the principle that they don't really make sense to a
253284e690bfSAlex Bennée  * guest. Revisit this one day, would this principle change.
2533bdfb4b38SMarc Zyngier  */
253472564016SMarc Zyngier static const struct sys_reg_desc cp14_regs[] = {
25358c358b29SAlexandru Elisei 	/* DBGDIDR */
25368c358b29SAlexandru Elisei 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
2537bdfb4b38SMarc Zyngier 	/* DBGDTRRXext */
2538bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
2539bdfb4b38SMarc Zyngier 
2540bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(0),
2541bdfb4b38SMarc Zyngier 	/* DBGDSCRint */
2542bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
2543bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(1),
2544bdfb4b38SMarc Zyngier 	/* DBGDCCINT */
25451da42c34SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
2546bdfb4b38SMarc Zyngier 	/* DBGDSCRext */
25471da42c34SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
2548bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(2),
2549bdfb4b38SMarc Zyngier 	/* DBGDTR[RT]Xint */
2550bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
2551bdfb4b38SMarc Zyngier 	/* DBGDTR[RT]Xext */
2552bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
2553bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(3),
2554bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(4),
2555bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(5),
2556bdfb4b38SMarc Zyngier 	/* DBGWFAR */
2557bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
2558bdfb4b38SMarc Zyngier 	/* DBGOSECCR */
2559bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
2560bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(6),
2561bdfb4b38SMarc Zyngier 	/* DBGVCR */
25621da42c34SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
2563bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(7),
2564bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(8),
2565bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(9),
2566bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(10),
2567bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(11),
2568bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(12),
2569bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(13),
2570bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(14),
2571bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(15),
2572bdfb4b38SMarc Zyngier 
2573bdfb4b38SMarc Zyngier 	/* DBGDRAR (32bit) */
2574bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2575bdfb4b38SMarc Zyngier 
2576bdfb4b38SMarc Zyngier 	DBGBXVR(0),
2577bdfb4b38SMarc Zyngier 	/* DBGOSLAR */
2578f24adc65SOliver Upton 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
2579bdfb4b38SMarc Zyngier 	DBGBXVR(1),
2580bdfb4b38SMarc Zyngier 	/* DBGOSLSR */
2581d42e2671SOliver Upton 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
2582bdfb4b38SMarc Zyngier 	DBGBXVR(2),
2583bdfb4b38SMarc Zyngier 	DBGBXVR(3),
2584bdfb4b38SMarc Zyngier 	/* DBGOSDLR */
2585bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2586bdfb4b38SMarc Zyngier 	DBGBXVR(4),
2587bdfb4b38SMarc Zyngier 	/* DBGPRCR */
2588bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2589bdfb4b38SMarc Zyngier 	DBGBXVR(5),
2590bdfb4b38SMarc Zyngier 	DBGBXVR(6),
2591bdfb4b38SMarc Zyngier 	DBGBXVR(7),
2592bdfb4b38SMarc Zyngier 	DBGBXVR(8),
2593bdfb4b38SMarc Zyngier 	DBGBXVR(9),
2594bdfb4b38SMarc Zyngier 	DBGBXVR(10),
2595bdfb4b38SMarc Zyngier 	DBGBXVR(11),
2596bdfb4b38SMarc Zyngier 	DBGBXVR(12),
2597bdfb4b38SMarc Zyngier 	DBGBXVR(13),
2598bdfb4b38SMarc Zyngier 	DBGBXVR(14),
2599bdfb4b38SMarc Zyngier 	DBGBXVR(15),
2600bdfb4b38SMarc Zyngier 
2601bdfb4b38SMarc Zyngier 	/* DBGDSAR (32bit) */
2602bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2603bdfb4b38SMarc Zyngier 
2604bdfb4b38SMarc Zyngier 	/* DBGDEVID2 */
2605bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2606bdfb4b38SMarc Zyngier 	/* DBGDEVID1 */
2607bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2608bdfb4b38SMarc Zyngier 	/* DBGDEVID */
2609bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2610bdfb4b38SMarc Zyngier 	/* DBGCLAIMSET */
2611bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2612bdfb4b38SMarc Zyngier 	/* DBGCLAIMCLR */
2613bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2614bdfb4b38SMarc Zyngier 	/* DBGAUTHSTATUS */
2615bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
261672564016SMarc Zyngier };
261772564016SMarc Zyngier 
2618a9866ba0SMarc Zyngier /* Trapped cp14 64bit registers */
2619a9866ba0SMarc Zyngier static const struct sys_reg_desc cp14_64_regs[] = {
2620bdfb4b38SMarc Zyngier 	/* DBGDRAR (64bit) */
2621bdfb4b38SMarc Zyngier 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
2622bdfb4b38SMarc Zyngier 
2623bdfb4b38SMarc Zyngier 	/* DBGDSAR (64bit) */
2624bdfb4b38SMarc Zyngier 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
2625a9866ba0SMarc Zyngier };
2626a9866ba0SMarc Zyngier 
2627a9e192cdSAlexandru Elisei #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2)			\
2628a9e192cdSAlexandru Elisei 	AA32(_map),							\
2629a9e192cdSAlexandru Elisei 	Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2),			\
2630a9e192cdSAlexandru Elisei 	.visibility = pmu_visibility
2631a9e192cdSAlexandru Elisei 
2632051ff581SShannon Zhao /* Macro to expand the PMEVCNTRn register */
2633051ff581SShannon Zhao #define PMU_PMEVCNTR(n)							\
2634a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
2635a9e192cdSAlexandru Elisei 	  (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
2636a9e192cdSAlexandru Elisei 	  .access = access_pmu_evcntr }
2637051ff581SShannon Zhao 
26389feb21acSShannon Zhao /* Macro to expand the PMEVTYPERn register */
26399feb21acSShannon Zhao #define PMU_PMEVTYPER(n)						\
2640a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
2641a9e192cdSAlexandru Elisei 	  (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
2642a9e192cdSAlexandru Elisei 	  .access = access_pmu_evtyper }
26434d44923bSMarc Zyngier /*
26444d44923bSMarc Zyngier  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
26454d44923bSMarc Zyngier  * depending on the way they are accessed (as a 32bit or a 64bit
26464d44923bSMarc Zyngier  * register).
26474d44923bSMarc Zyngier  */
264862a89c44SMarc Zyngier static const struct sys_reg_desc cp15_regs[] = {
2649f7f2b15cSArd Biesheuvel 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2650b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2651b1ea1d76SMarc Zyngier 	/* ACTLR */
2652b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2653b1ea1d76SMarc Zyngier 	/* ACTLR2 */
2654b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2655b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2656b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2657b1ea1d76SMarc Zyngier 	/* TTBCR */
2658b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2659b1ea1d76SMarc Zyngier 	/* TTBCR2 */
2660b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2661b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2662b1ea1d76SMarc Zyngier 	/* DFSR */
2663b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2664b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2665b1ea1d76SMarc Zyngier 	/* ADFSR */
2666b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2667b1ea1d76SMarc Zyngier 	/* AIFSR */
2668b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2669b1ea1d76SMarc Zyngier 	/* DFAR */
2670b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2671b1ea1d76SMarc Zyngier 	/* IFAR */
2672b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
26734d44923bSMarc Zyngier 
267462a89c44SMarc Zyngier 	/*
267562a89c44SMarc Zyngier 	 * DC{C,I,CI}SW operations:
267662a89c44SMarc Zyngier 	 */
267762a89c44SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
267862a89c44SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
267962a89c44SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
26804d44923bSMarc Zyngier 
26817609c125SMarc Zyngier 	/* PMU */
2682a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2683a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2684a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2685a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2686a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2687a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2688a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 6), .access = access_pmceid },
2689a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 7), .access = access_pmceid },
2690a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2691a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2692a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2693a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2694a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2695a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2696a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2697a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 4), .access = access_pmceid },
2698a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 5), .access = access_pmceid },
269946081078SMarc Zyngier 	/* PMMIR */
2700a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
27014d44923bSMarc Zyngier 
2702b1ea1d76SMarc Zyngier 	/* PRRR/MAIR0 */
2703b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2704b1ea1d76SMarc Zyngier 	/* NMRR/MAIR1 */
2705b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2706b1ea1d76SMarc Zyngier 	/* AMAIR0 */
2707b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2708b1ea1d76SMarc Zyngier 	/* AMAIR1 */
2709b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2710db7dedd0SChristoffer Dall 
2711db7dedd0SChristoffer Dall 	/* ICC_SRE */
2712f7f6f2d9SVladimir Murzin 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2713db7dedd0SChristoffer Dall 
2714b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2715051ff581SShannon Zhao 
271684135d3dSAndre Przywara 	/* Arch Tmers */
271784135d3dSAndre Przywara 	{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
271884135d3dSAndre Przywara 	{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2719eac137b4SJérémy Fanguède 
2720051ff581SShannon Zhao 	/* PMEVCNTRn */
2721051ff581SShannon Zhao 	PMU_PMEVCNTR(0),
2722051ff581SShannon Zhao 	PMU_PMEVCNTR(1),
2723051ff581SShannon Zhao 	PMU_PMEVCNTR(2),
2724051ff581SShannon Zhao 	PMU_PMEVCNTR(3),
2725051ff581SShannon Zhao 	PMU_PMEVCNTR(4),
2726051ff581SShannon Zhao 	PMU_PMEVCNTR(5),
2727051ff581SShannon Zhao 	PMU_PMEVCNTR(6),
2728051ff581SShannon Zhao 	PMU_PMEVCNTR(7),
2729051ff581SShannon Zhao 	PMU_PMEVCNTR(8),
2730051ff581SShannon Zhao 	PMU_PMEVCNTR(9),
2731051ff581SShannon Zhao 	PMU_PMEVCNTR(10),
2732051ff581SShannon Zhao 	PMU_PMEVCNTR(11),
2733051ff581SShannon Zhao 	PMU_PMEVCNTR(12),
2734051ff581SShannon Zhao 	PMU_PMEVCNTR(13),
2735051ff581SShannon Zhao 	PMU_PMEVCNTR(14),
2736051ff581SShannon Zhao 	PMU_PMEVCNTR(15),
2737051ff581SShannon Zhao 	PMU_PMEVCNTR(16),
2738051ff581SShannon Zhao 	PMU_PMEVCNTR(17),
2739051ff581SShannon Zhao 	PMU_PMEVCNTR(18),
2740051ff581SShannon Zhao 	PMU_PMEVCNTR(19),
2741051ff581SShannon Zhao 	PMU_PMEVCNTR(20),
2742051ff581SShannon Zhao 	PMU_PMEVCNTR(21),
2743051ff581SShannon Zhao 	PMU_PMEVCNTR(22),
2744051ff581SShannon Zhao 	PMU_PMEVCNTR(23),
2745051ff581SShannon Zhao 	PMU_PMEVCNTR(24),
2746051ff581SShannon Zhao 	PMU_PMEVCNTR(25),
2747051ff581SShannon Zhao 	PMU_PMEVCNTR(26),
2748051ff581SShannon Zhao 	PMU_PMEVCNTR(27),
2749051ff581SShannon Zhao 	PMU_PMEVCNTR(28),
2750051ff581SShannon Zhao 	PMU_PMEVCNTR(29),
2751051ff581SShannon Zhao 	PMU_PMEVCNTR(30),
27529feb21acSShannon Zhao 	/* PMEVTYPERn */
27539feb21acSShannon Zhao 	PMU_PMEVTYPER(0),
27549feb21acSShannon Zhao 	PMU_PMEVTYPER(1),
27559feb21acSShannon Zhao 	PMU_PMEVTYPER(2),
27569feb21acSShannon Zhao 	PMU_PMEVTYPER(3),
27579feb21acSShannon Zhao 	PMU_PMEVTYPER(4),
27589feb21acSShannon Zhao 	PMU_PMEVTYPER(5),
27599feb21acSShannon Zhao 	PMU_PMEVTYPER(6),
27609feb21acSShannon Zhao 	PMU_PMEVTYPER(7),
27619feb21acSShannon Zhao 	PMU_PMEVTYPER(8),
27629feb21acSShannon Zhao 	PMU_PMEVTYPER(9),
27639feb21acSShannon Zhao 	PMU_PMEVTYPER(10),
27649feb21acSShannon Zhao 	PMU_PMEVTYPER(11),
27659feb21acSShannon Zhao 	PMU_PMEVTYPER(12),
27669feb21acSShannon Zhao 	PMU_PMEVTYPER(13),
27679feb21acSShannon Zhao 	PMU_PMEVTYPER(14),
27689feb21acSShannon Zhao 	PMU_PMEVTYPER(15),
27699feb21acSShannon Zhao 	PMU_PMEVTYPER(16),
27709feb21acSShannon Zhao 	PMU_PMEVTYPER(17),
27719feb21acSShannon Zhao 	PMU_PMEVTYPER(18),
27729feb21acSShannon Zhao 	PMU_PMEVTYPER(19),
27739feb21acSShannon Zhao 	PMU_PMEVTYPER(20),
27749feb21acSShannon Zhao 	PMU_PMEVTYPER(21),
27759feb21acSShannon Zhao 	PMU_PMEVTYPER(22),
27769feb21acSShannon Zhao 	PMU_PMEVTYPER(23),
27779feb21acSShannon Zhao 	PMU_PMEVTYPER(24),
27789feb21acSShannon Zhao 	PMU_PMEVTYPER(25),
27799feb21acSShannon Zhao 	PMU_PMEVTYPER(26),
27809feb21acSShannon Zhao 	PMU_PMEVTYPER(27),
27819feb21acSShannon Zhao 	PMU_PMEVTYPER(28),
27829feb21acSShannon Zhao 	PMU_PMEVTYPER(29),
27839feb21acSShannon Zhao 	PMU_PMEVTYPER(30),
27849feb21acSShannon Zhao 	/* PMCCFILTR */
2785a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2786f7f2b15cSArd Biesheuvel 
2787f7f2b15cSArd Biesheuvel 	{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2788f7f2b15cSArd Biesheuvel 	{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2789bf48040cSAkihiko Odaki 
2790bf48040cSAkihiko Odaki 	/* CCSIDR2 */
2791bf48040cSAkihiko Odaki 	{ Op1(1), CRn( 0), CRm( 0),  Op2(2), undef_access },
2792bf48040cSAkihiko Odaki 
2793b1ea1d76SMarc Zyngier 	{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2794a9866ba0SMarc Zyngier };
2795a9866ba0SMarc Zyngier 
2796a9866ba0SMarc Zyngier static const struct sys_reg_desc cp15_64_regs[] = {
2797b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2798a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
279903bd646dSMarc Zyngier 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2800c605ee24SMarc Zyngier 	{ SYS_DESC(SYS_AARCH32_CNTPCT),	      access_arch_timer },
2801b1ea1d76SMarc Zyngier 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
280203bd646dSMarc Zyngier 	{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
280303bd646dSMarc Zyngier 	{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
280484135d3dSAndre Przywara 	{ SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
2805a6610435SMarc Zyngier 	{ SYS_DESC(SYS_AARCH32_CNTPCTSS),     access_arch_timer },
28067c8c5e6aSMarc Zyngier };
28077c8c5e6aSMarc Zyngier 
check_sysreg_table(const struct sys_reg_desc * table,unsigned int n,bool is_32)2808f1f0c0cfSAlexandru Elisei static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2809bb44a8dbSMarc Zyngier 			       bool is_32)
2810bb44a8dbSMarc Zyngier {
2811bb44a8dbSMarc Zyngier 	unsigned int i;
2812bb44a8dbSMarc Zyngier 
2813bb44a8dbSMarc Zyngier 	for (i = 0; i < n; i++) {
2814bb44a8dbSMarc Zyngier 		if (!is_32 && table[i].reg && !table[i].reset) {
2815325031d4SAlexandru Elisei 			kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2816f1f0c0cfSAlexandru Elisei 			return false;
2817bb44a8dbSMarc Zyngier 		}
2818bb44a8dbSMarc Zyngier 
2819bb44a8dbSMarc Zyngier 		if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2820325031d4SAlexandru Elisei 			kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2821f1f0c0cfSAlexandru Elisei 			return false;
2822bb44a8dbSMarc Zyngier 		}
2823bb44a8dbSMarc Zyngier 	}
2824bb44a8dbSMarc Zyngier 
2825f1f0c0cfSAlexandru Elisei 	return true;
2826bb44a8dbSMarc Zyngier }
2827bb44a8dbSMarc Zyngier 
kvm_handle_cp14_load_store(struct kvm_vcpu * vcpu)282874cc7e0cSTianjia Zhang int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
282962a89c44SMarc Zyngier {
283062a89c44SMarc Zyngier 	kvm_inject_undefined(vcpu);
283162a89c44SMarc Zyngier 	return 1;
283262a89c44SMarc Zyngier }
283362a89c44SMarc Zyngier 
perform_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)2834e70b9522SMarc Zyngier static void perform_access(struct kvm_vcpu *vcpu,
2835e70b9522SMarc Zyngier 			   struct sys_reg_params *params,
2836e70b9522SMarc Zyngier 			   const struct sys_reg_desc *r)
2837e70b9522SMarc Zyngier {
2838599d79dcSMarc Zyngier 	trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2839599d79dcSMarc Zyngier 
28407f34e409SDave Martin 	/* Check for regs disabled by runtime config */
284101fe5aceSAndrew Jones 	if (sysreg_hidden(vcpu, r)) {
28427f34e409SDave Martin 		kvm_inject_undefined(vcpu);
28437f34e409SDave Martin 		return;
28447f34e409SDave Martin 	}
28457f34e409SDave Martin 
2846e70b9522SMarc Zyngier 	/*
2847e70b9522SMarc Zyngier 	 * Not having an accessor means that we have configured a trap
2848e70b9522SMarc Zyngier 	 * that we don't know how to handle. This certainly qualifies
2849e70b9522SMarc Zyngier 	 * as a gross bug that should be fixed right away.
2850e70b9522SMarc Zyngier 	 */
2851e70b9522SMarc Zyngier 	BUG_ON(!r->access);
2852e70b9522SMarc Zyngier 
2853e70b9522SMarc Zyngier 	/* Skip instruction if instructed so */
2854e70b9522SMarc Zyngier 	if (likely(r->access(vcpu, params, r)))
2855cdb5e02eSMarc Zyngier 		kvm_incr_pc(vcpu);
2856e70b9522SMarc Zyngier }
2857e70b9522SMarc Zyngier 
285872564016SMarc Zyngier /*
285972564016SMarc Zyngier  * emulate_cp --  tries to match a sys_reg access in a handling table, and
286072564016SMarc Zyngier  *                call the corresponding trap handler.
286172564016SMarc Zyngier  *
286272564016SMarc Zyngier  * @params: pointer to the descriptor of the access
286372564016SMarc Zyngier  * @table: array of trap descriptors
286472564016SMarc Zyngier  * @num: size of the trap descriptor array
286572564016SMarc Zyngier  *
2866001bb819SOliver Upton  * Return true if the access has been handled, false if not.
286772564016SMarc Zyngier  */
emulate_cp(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * table,size_t num)2868001bb819SOliver Upton static bool emulate_cp(struct kvm_vcpu *vcpu,
28693fec037dSPavel Fedin 		       struct sys_reg_params *params,
287072564016SMarc Zyngier 		       const struct sys_reg_desc *table,
287172564016SMarc Zyngier 		       size_t num)
287262a89c44SMarc Zyngier {
287372564016SMarc Zyngier 	const struct sys_reg_desc *r;
287462a89c44SMarc Zyngier 
287572564016SMarc Zyngier 	if (!table)
2876001bb819SOliver Upton 		return false;	/* Not handled */
287762a89c44SMarc Zyngier 
287862a89c44SMarc Zyngier 	r = find_reg(params, table, num);
287962a89c44SMarc Zyngier 
288072564016SMarc Zyngier 	if (r) {
2881e70b9522SMarc Zyngier 		perform_access(vcpu, params, r);
2882001bb819SOliver Upton 		return true;
288372564016SMarc Zyngier 	}
288472564016SMarc Zyngier 
288572564016SMarc Zyngier 	/* Not handled */
2886001bb819SOliver Upton 	return false;
288772564016SMarc Zyngier }
288872564016SMarc Zyngier 
unhandled_cp_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params)288972564016SMarc Zyngier static void unhandled_cp_access(struct kvm_vcpu *vcpu,
289072564016SMarc Zyngier 				struct sys_reg_params *params)
289172564016SMarc Zyngier {
28923a949f4cSGavin Shan 	u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
289340c4f8d2SDan Carpenter 	int cp = -1;
289472564016SMarc Zyngier 
28953a949f4cSGavin Shan 	switch (esr_ec) {
2896c6d01a94SMark Rutland 	case ESR_ELx_EC_CP15_32:
2897c6d01a94SMark Rutland 	case ESR_ELx_EC_CP15_64:
289872564016SMarc Zyngier 		cp = 15;
289972564016SMarc Zyngier 		break;
2900c6d01a94SMark Rutland 	case ESR_ELx_EC_CP14_MR:
2901c6d01a94SMark Rutland 	case ESR_ELx_EC_CP14_64:
290272564016SMarc Zyngier 		cp = 14;
290372564016SMarc Zyngier 		break;
290472564016SMarc Zyngier 	default:
290540c4f8d2SDan Carpenter 		WARN_ON(1);
290672564016SMarc Zyngier 	}
290772564016SMarc Zyngier 
2908bf4b96bbSMark Rutland 	print_sys_reg_msg(params,
2909bf4b96bbSMark Rutland 			  "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2910d1878af3SMark Rutland 			  cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
291162a89c44SMarc Zyngier 	kvm_inject_undefined(vcpu);
291262a89c44SMarc Zyngier }
291362a89c44SMarc Zyngier 
291462a89c44SMarc Zyngier /**
29157769db90SShannon Zhao  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
291662a89c44SMarc Zyngier  * @vcpu: The VCPU pointer
291762a89c44SMarc Zyngier  * @run:  The kvm_run struct
291862a89c44SMarc Zyngier  */
kvm_handle_cp_64(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global)291972564016SMarc Zyngier static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
292072564016SMarc Zyngier 			    const struct sys_reg_desc *global,
2921dcaffa7bSJames Morse 			    size_t nr_global)
292262a89c44SMarc Zyngier {
292362a89c44SMarc Zyngier 	struct sys_reg_params params;
29240b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
2925c667186fSMarc Zyngier 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
29263a949f4cSGavin Shan 	int Rt2 = (esr >> 10) & 0x1f;
292762a89c44SMarc Zyngier 
29283a949f4cSGavin Shan 	params.CRm = (esr >> 1) & 0xf;
29293a949f4cSGavin Shan 	params.is_write = ((esr & 1) == 0);
293062a89c44SMarc Zyngier 
293162a89c44SMarc Zyngier 	params.Op0 = 0;
29323a949f4cSGavin Shan 	params.Op1 = (esr >> 16) & 0xf;
293362a89c44SMarc Zyngier 	params.Op2 = 0;
293462a89c44SMarc Zyngier 	params.CRn = 0;
293562a89c44SMarc Zyngier 
293662a89c44SMarc Zyngier 	/*
29372ec5be3dSPavel Fedin 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
293862a89c44SMarc Zyngier 	 * backends between AArch32 and AArch64, we get away with it.
293962a89c44SMarc Zyngier 	 */
294062a89c44SMarc Zyngier 	if (params.is_write) {
29412ec5be3dSPavel Fedin 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
29422ec5be3dSPavel Fedin 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
294362a89c44SMarc Zyngier 	}
294462a89c44SMarc Zyngier 
2945b6b7a806SMarc Zyngier 	/*
2946dcaffa7bSJames Morse 	 * If the table contains a handler, handle the
2947b6b7a806SMarc Zyngier 	 * potential register operation in the case of a read and return
2948b6b7a806SMarc Zyngier 	 * with success.
2949b6b7a806SMarc Zyngier 	 */
2950001bb819SOliver Upton 	if (emulate_cp(vcpu, &params, global, nr_global)) {
29512ec5be3dSPavel Fedin 		/* Split up the value between registers for the read side */
295262a89c44SMarc Zyngier 		if (!params.is_write) {
29532ec5be3dSPavel Fedin 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
29542ec5be3dSPavel Fedin 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
295562a89c44SMarc Zyngier 		}
295662a89c44SMarc Zyngier 
295762a89c44SMarc Zyngier 		return 1;
295862a89c44SMarc Zyngier 	}
295962a89c44SMarc Zyngier 
2960b6b7a806SMarc Zyngier 	unhandled_cp_access(vcpu, &params);
2961b6b7a806SMarc Zyngier 	return 1;
2962b6b7a806SMarc Zyngier }
2963b6b7a806SMarc Zyngier 
2964e6519766SOliver Upton static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2965e6519766SOliver Upton 
29669369bc5cSOliver Upton /*
29679369bc5cSOliver Upton  * The CP10 ID registers are architecturally mapped to AArch64 feature
29689369bc5cSOliver Upton  * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
29699369bc5cSOliver Upton  * from AArch32.
29709369bc5cSOliver Upton  */
kvm_esr_cp10_id_to_sys64(u64 esr,struct sys_reg_params * params)2971ee87a9bdSMarc Zyngier static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
29729369bc5cSOliver Upton {
29739369bc5cSOliver Upton 	u8 reg_id = (esr >> 10) & 0xf;
29749369bc5cSOliver Upton 	bool valid;
29759369bc5cSOliver Upton 
29769369bc5cSOliver Upton 	params->is_write = ((esr & 1) == 0);
29779369bc5cSOliver Upton 	params->Op0 = 3;
29789369bc5cSOliver Upton 	params->Op1 = 0;
29799369bc5cSOliver Upton 	params->CRn = 0;
29809369bc5cSOliver Upton 	params->CRm = 3;
29819369bc5cSOliver Upton 
29829369bc5cSOliver Upton 	/* CP10 ID registers are read-only */
29839369bc5cSOliver Upton 	valid = !params->is_write;
29849369bc5cSOliver Upton 
29859369bc5cSOliver Upton 	switch (reg_id) {
29869369bc5cSOliver Upton 	/* MVFR0 */
29879369bc5cSOliver Upton 	case 0b0111:
29889369bc5cSOliver Upton 		params->Op2 = 0;
29899369bc5cSOliver Upton 		break;
29909369bc5cSOliver Upton 	/* MVFR1 */
29919369bc5cSOliver Upton 	case 0b0110:
29929369bc5cSOliver Upton 		params->Op2 = 1;
29939369bc5cSOliver Upton 		break;
29949369bc5cSOliver Upton 	/* MVFR2 */
29959369bc5cSOliver Upton 	case 0b0101:
29969369bc5cSOliver Upton 		params->Op2 = 2;
29979369bc5cSOliver Upton 		break;
29989369bc5cSOliver Upton 	default:
29999369bc5cSOliver Upton 		valid = false;
30009369bc5cSOliver Upton 	}
30019369bc5cSOliver Upton 
30029369bc5cSOliver Upton 	if (valid)
30039369bc5cSOliver Upton 		return true;
30049369bc5cSOliver Upton 
30059369bc5cSOliver Upton 	kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
30069369bc5cSOliver Upton 		      params->is_write ? "write" : "read", reg_id);
30079369bc5cSOliver Upton 	return false;
30089369bc5cSOliver Upton }
30099369bc5cSOliver Upton 
30109369bc5cSOliver Upton /**
30119369bc5cSOliver Upton  * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
30129369bc5cSOliver Upton  *			  VFP Register' from AArch32.
30139369bc5cSOliver Upton  * @vcpu: The vCPU pointer
30149369bc5cSOliver Upton  *
30159369bc5cSOliver Upton  * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
30169369bc5cSOliver Upton  * Work out the correct AArch64 system register encoding and reroute to the
30179369bc5cSOliver Upton  * AArch64 system register emulation.
30189369bc5cSOliver Upton  */
kvm_handle_cp10_id(struct kvm_vcpu * vcpu)30199369bc5cSOliver Upton int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
30209369bc5cSOliver Upton {
30219369bc5cSOliver Upton 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
3022ee87a9bdSMarc Zyngier 	u64 esr = kvm_vcpu_get_esr(vcpu);
30239369bc5cSOliver Upton 	struct sys_reg_params params;
30249369bc5cSOliver Upton 
30259369bc5cSOliver Upton 	/* UNDEF on any unhandled register access */
30269369bc5cSOliver Upton 	if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
30279369bc5cSOliver Upton 		kvm_inject_undefined(vcpu);
30289369bc5cSOliver Upton 		return 1;
30299369bc5cSOliver Upton 	}
30309369bc5cSOliver Upton 
30319369bc5cSOliver Upton 	if (emulate_sys_reg(vcpu, &params))
30329369bc5cSOliver Upton 		vcpu_set_reg(vcpu, Rt, params.regval);
30339369bc5cSOliver Upton 
30349369bc5cSOliver Upton 	return 1;
30359369bc5cSOliver Upton }
30369369bc5cSOliver Upton 
3037e6519766SOliver Upton /**
3038e6519766SOliver Upton  * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
3039e6519766SOliver Upton  *			       CRn=0, which corresponds to the AArch32 feature
3040e6519766SOliver Upton  *			       registers.
3041e6519766SOliver Upton  * @vcpu: the vCPU pointer
3042e6519766SOliver Upton  * @params: the system register access parameters.
3043e6519766SOliver Upton  *
3044e6519766SOliver Upton  * Our cp15 system register tables do not enumerate the AArch32 feature
3045e6519766SOliver Upton  * registers. Conveniently, our AArch64 table does, and the AArch32 system
3046e6519766SOliver Upton  * register encoding can be trivially remapped into the AArch64 for the feature
3047e6519766SOliver Upton  * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
3048e6519766SOliver Upton  *
3049e6519766SOliver Upton  * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
3050e6519766SOliver Upton  * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
3051e6519766SOliver Upton  * range are either UNKNOWN or RES0. Rerouting remains architectural as we
3052e6519766SOliver Upton  * treat undefined registers in this range as RAZ.
3053e6519766SOliver Upton  */
kvm_emulate_cp15_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)3054e6519766SOliver Upton static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
3055e6519766SOliver Upton 				   struct sys_reg_params *params)
3056e6519766SOliver Upton {
3057e6519766SOliver Upton 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
3058e6519766SOliver Upton 
3059e6519766SOliver Upton 	/* Treat impossible writes to RO registers as UNDEFINED */
3060e6519766SOliver Upton 	if (params->is_write) {
3061e6519766SOliver Upton 		unhandled_cp_access(vcpu, params);
3062e6519766SOliver Upton 		return 1;
3063e6519766SOliver Upton 	}
3064e6519766SOliver Upton 
3065e6519766SOliver Upton 	params->Op0 = 3;
3066e6519766SOliver Upton 
3067e6519766SOliver Upton 	/*
3068e6519766SOliver Upton 	 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
3069e6519766SOliver Upton 	 * Avoid conflicting with future expansion of AArch64 feature registers
3070e6519766SOliver Upton 	 * and simply treat them as RAZ here.
3071e6519766SOliver Upton 	 */
3072e6519766SOliver Upton 	if (params->CRm > 3)
3073e6519766SOliver Upton 		params->regval = 0;
3074e6519766SOliver Upton 	else if (!emulate_sys_reg(vcpu, params))
3075e6519766SOliver Upton 		return 1;
3076e6519766SOliver Upton 
3077e6519766SOliver Upton 	vcpu_set_reg(vcpu, Rt, params->regval);
3078e6519766SOliver Upton 	return 1;
3079e6519766SOliver Upton }
3080e6519766SOliver Upton 
308162a89c44SMarc Zyngier /**
30827769db90SShannon Zhao  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
308362a89c44SMarc Zyngier  * @vcpu: The VCPU pointer
308462a89c44SMarc Zyngier  * @run:  The kvm_run struct
308562a89c44SMarc Zyngier  */
kvm_handle_cp_32(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * global,size_t nr_global)308672564016SMarc Zyngier static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
3087e6519766SOliver Upton 			    struct sys_reg_params *params,
308872564016SMarc Zyngier 			    const struct sys_reg_desc *global,
3089dcaffa7bSJames Morse 			    size_t nr_global)
309062a89c44SMarc Zyngier {
3091c667186fSMarc Zyngier 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
309262a89c44SMarc Zyngier 
3093e6519766SOliver Upton 	params->regval = vcpu_get_reg(vcpu, Rt);
309462a89c44SMarc Zyngier 
3095e6519766SOliver Upton 	if (emulate_cp(vcpu, params, global, nr_global)) {
3096e6519766SOliver Upton 		if (!params->is_write)
3097e6519766SOliver Upton 			vcpu_set_reg(vcpu, Rt, params->regval);
309862a89c44SMarc Zyngier 		return 1;
30992ec5be3dSPavel Fedin 	}
310072564016SMarc Zyngier 
3101e6519766SOliver Upton 	unhandled_cp_access(vcpu, params);
310272564016SMarc Zyngier 	return 1;
310372564016SMarc Zyngier }
310472564016SMarc Zyngier 
kvm_handle_cp15_64(struct kvm_vcpu * vcpu)310574cc7e0cSTianjia Zhang int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
310672564016SMarc Zyngier {
3107dcaffa7bSJames Morse 	return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
310872564016SMarc Zyngier }
310972564016SMarc Zyngier 
kvm_handle_cp15_32(struct kvm_vcpu * vcpu)311074cc7e0cSTianjia Zhang int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
311172564016SMarc Zyngier {
3112e6519766SOliver Upton 	struct sys_reg_params params;
3113e6519766SOliver Upton 
3114e6519766SOliver Upton 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3115e6519766SOliver Upton 
3116e6519766SOliver Upton 	/*
3117e6519766SOliver Upton 	 * Certain AArch32 ID registers are handled by rerouting to the AArch64
3118e6519766SOliver Upton 	 * system register table. Registers in the ID range where CRm=0 are
3119e6519766SOliver Upton 	 * excluded from this scheme as they do not trivially map into AArch64
3120e6519766SOliver Upton 	 * system register encodings.
3121e6519766SOliver Upton 	 */
3122e6519766SOliver Upton 	if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
3123e6519766SOliver Upton 		return kvm_emulate_cp15_id_reg(vcpu, &params);
3124e6519766SOliver Upton 
3125e6519766SOliver Upton 	return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
312672564016SMarc Zyngier }
312772564016SMarc Zyngier 
kvm_handle_cp14_64(struct kvm_vcpu * vcpu)312874cc7e0cSTianjia Zhang int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
312972564016SMarc Zyngier {
3130dcaffa7bSJames Morse 	return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
313172564016SMarc Zyngier }
313272564016SMarc Zyngier 
kvm_handle_cp14_32(struct kvm_vcpu * vcpu)313374cc7e0cSTianjia Zhang int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
313472564016SMarc Zyngier {
3135e6519766SOliver Upton 	struct sys_reg_params params;
3136e6519766SOliver Upton 
3137e6519766SOliver Upton 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3138e6519766SOliver Upton 
3139e6519766SOliver Upton 	return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
314062a89c44SMarc Zyngier }
314162a89c44SMarc Zyngier 
is_imp_def_sys_reg(struct sys_reg_params * params)314254ad68b7SMark Rutland static bool is_imp_def_sys_reg(struct sys_reg_params *params)
314354ad68b7SMark Rutland {
314454ad68b7SMark Rutland 	// See ARM DDI 0487E.a, section D12.3.2
314554ad68b7SMark Rutland 	return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
314654ad68b7SMark Rutland }
314754ad68b7SMark Rutland 
314828eda7b5SOliver Upton /**
314928eda7b5SOliver Upton  * emulate_sys_reg - Emulate a guest access to an AArch64 system register
315028eda7b5SOliver Upton  * @vcpu: The VCPU pointer
315128eda7b5SOliver Upton  * @params: Decoded system register parameters
315228eda7b5SOliver Upton  *
315328eda7b5SOliver Upton  * Return: true if the system register access was successful, false otherwise.
315428eda7b5SOliver Upton  */
emulate_sys_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)315528eda7b5SOliver Upton static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
31563fec037dSPavel Fedin 			   struct sys_reg_params *params)
31577c8c5e6aSMarc Zyngier {
3158dcaffa7bSJames Morse 	const struct sys_reg_desc *r;
31597c8c5e6aSMarc Zyngier 
31607c8c5e6aSMarc Zyngier 	r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
31617c8c5e6aSMarc Zyngier 
31627c8c5e6aSMarc Zyngier 	if (likely(r)) {
3163e70b9522SMarc Zyngier 		perform_access(vcpu, params, r);
316428eda7b5SOliver Upton 		return true;
316528eda7b5SOliver Upton 	}
316628eda7b5SOliver Upton 
316728eda7b5SOliver Upton 	if (is_imp_def_sys_reg(params)) {
316854ad68b7SMark Rutland 		kvm_inject_undefined(vcpu);
31697c8c5e6aSMarc Zyngier 	} else {
3170bf4b96bbSMark Rutland 		print_sys_reg_msg(params,
3171bf4b96bbSMark Rutland 				  "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3172d1878af3SMark Rutland 				  *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
31737c8c5e6aSMarc Zyngier 		kvm_inject_undefined(vcpu);
3174e70b9522SMarc Zyngier 	}
317528eda7b5SOliver Upton 	return false;
31767c8c5e6aSMarc Zyngier }
31777c8c5e6aSMarc Zyngier 
kvm_reset_id_regs(struct kvm_vcpu * vcpu)317847334146SJing Zhang static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
317947334146SJing Zhang {
318047334146SJing Zhang 	const struct sys_reg_desc *idreg = first_idreg;
318147334146SJing Zhang 	u32 id = reg_to_encoding(idreg);
318247334146SJing Zhang 	struct kvm *kvm = vcpu->kvm;
318347334146SJing Zhang 
318447334146SJing Zhang 	if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
318547334146SJing Zhang 		return;
318647334146SJing Zhang 
318747334146SJing Zhang 	lockdep_assert_held(&kvm->arch.config_lock);
318847334146SJing Zhang 
318947334146SJing Zhang 	/* Initialize all idregs */
319047334146SJing Zhang 	while (is_id_reg(id)) {
319147334146SJing Zhang 		IDREG(kvm, id) = idreg->reset(vcpu, idreg);
319247334146SJing Zhang 
319347334146SJing Zhang 		idreg++;
319447334146SJing Zhang 		id = reg_to_encoding(idreg);
319547334146SJing Zhang 	}
319647334146SJing Zhang 
319747334146SJing Zhang 	set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
319847334146SJing Zhang }
319947334146SJing Zhang 
3200750ed566SJames Morse /**
3201750ed566SJames Morse  * kvm_reset_sys_regs - sets system registers to reset value
3202750ed566SJames Morse  * @vcpu: The VCPU pointer
3203750ed566SJames Morse  *
3204750ed566SJames Morse  * This function finds the right table above and sets the registers on the
3205750ed566SJames Morse  * virtual CPU struct to their architecturally defined reset values.
3206750ed566SJames Morse  */
kvm_reset_sys_regs(struct kvm_vcpu * vcpu)3207750ed566SJames Morse void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
32087c8c5e6aSMarc Zyngier {
32097c8c5e6aSMarc Zyngier 	unsigned long i;
32107c8c5e6aSMarc Zyngier 
321147334146SJing Zhang 	kvm_reset_id_regs(vcpu);
321247334146SJing Zhang 
321347334146SJing Zhang 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
321447334146SJing Zhang 		const struct sys_reg_desc *r = &sys_reg_descs[i];
321547334146SJing Zhang 
321647334146SJing Zhang 		if (is_id_reg(reg_to_encoding(r)))
321747334146SJing Zhang 			continue;
321847334146SJing Zhang 
321947334146SJing Zhang 		if (r->reset)
322047334146SJing Zhang 			r->reset(vcpu, r);
322147334146SJing Zhang 	}
32227c8c5e6aSMarc Zyngier }
32237c8c5e6aSMarc Zyngier 
32247c8c5e6aSMarc Zyngier /**
32257c8c5e6aSMarc Zyngier  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
32267c8c5e6aSMarc Zyngier  * @vcpu: The VCPU pointer
32277c8c5e6aSMarc Zyngier  */
kvm_handle_sys_reg(struct kvm_vcpu * vcpu)322874cc7e0cSTianjia Zhang int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
32297c8c5e6aSMarc Zyngier {
32307c8c5e6aSMarc Zyngier 	struct sys_reg_params params;
32313a949f4cSGavin Shan 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
3232c667186fSMarc Zyngier 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
32337c8c5e6aSMarc Zyngier 
3234eef8c85aSAlex Bennée 	trace_kvm_handle_sys_reg(esr);
3235eef8c85aSAlex Bennée 
3236e58ec47bSMarc Zyngier 	if (__check_nv_sr_forward(vcpu))
3237e58ec47bSMarc Zyngier 		return 1;
3238e58ec47bSMarc Zyngier 
3239f76f89e2SFuad Tabba 	params = esr_sys64_to_params(esr);
32402ec5be3dSPavel Fedin 	params.regval = vcpu_get_reg(vcpu, Rt);
32417c8c5e6aSMarc Zyngier 
324228eda7b5SOliver Upton 	if (!emulate_sys_reg(vcpu, &params))
324328eda7b5SOliver Upton 		return 1;
32442ec5be3dSPavel Fedin 
32452ec5be3dSPavel Fedin 	if (!params.is_write)
32462ec5be3dSPavel Fedin 		vcpu_set_reg(vcpu, Rt, params.regval);
324728eda7b5SOliver Upton 	return 1;
32487c8c5e6aSMarc Zyngier }
32497c8c5e6aSMarc Zyngier 
32507c8c5e6aSMarc Zyngier /******************************************************************************
32517c8c5e6aSMarc Zyngier  * Userspace API
32527c8c5e6aSMarc Zyngier  *****************************************************************************/
32537c8c5e6aSMarc Zyngier 
index_to_params(u64 id,struct sys_reg_params * params)32547c8c5e6aSMarc Zyngier static bool index_to_params(u64 id, struct sys_reg_params *params)
32557c8c5e6aSMarc Zyngier {
32567c8c5e6aSMarc Zyngier 	switch (id & KVM_REG_SIZE_MASK) {
32577c8c5e6aSMarc Zyngier 	case KVM_REG_SIZE_U64:
32587c8c5e6aSMarc Zyngier 		/* Any unused index bits means it's not valid. */
32597c8c5e6aSMarc Zyngier 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
32607c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM_COPROC_MASK
32617c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
32627c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
32637c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
32647c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
32657c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
32667c8c5e6aSMarc Zyngier 			return false;
32677c8c5e6aSMarc Zyngier 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
32687c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
32697c8c5e6aSMarc Zyngier 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
32707c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
32717c8c5e6aSMarc Zyngier 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
32727c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
32737c8c5e6aSMarc Zyngier 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
32747c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
32757c8c5e6aSMarc Zyngier 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
32767c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
32777c8c5e6aSMarc Zyngier 		return true;
32787c8c5e6aSMarc Zyngier 	default:
32797c8c5e6aSMarc Zyngier 		return false;
32807c8c5e6aSMarc Zyngier 	}
32817c8c5e6aSMarc Zyngier }
32827c8c5e6aSMarc Zyngier 
get_reg_by_id(u64 id,const struct sys_reg_desc table[],unsigned int num)3283da8d120fSMarc Zyngier const struct sys_reg_desc *get_reg_by_id(u64 id,
32844b927b94SVijaya Kumar K 					 const struct sys_reg_desc table[],
32854b927b94SVijaya Kumar K 					 unsigned int num)
32864b927b94SVijaya Kumar K {
3287da8d120fSMarc Zyngier 	struct sys_reg_params params;
3288da8d120fSMarc Zyngier 
3289da8d120fSMarc Zyngier 	if (!index_to_params(id, &params))
32904b927b94SVijaya Kumar K 		return NULL;
32914b927b94SVijaya Kumar K 
3292da8d120fSMarc Zyngier 	return find_reg(&params, table, num);
32934b927b94SVijaya Kumar K }
32944b927b94SVijaya Kumar K 
32957c8c5e6aSMarc Zyngier /* Decode an index value, and find the sys_reg_desc entry. */
3296ba23aec9SMarc Zyngier static const struct sys_reg_desc *
id_to_sys_reg_desc(struct kvm_vcpu * vcpu,u64 id,const struct sys_reg_desc table[],unsigned int num)3297ba23aec9SMarc Zyngier id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
3298ba23aec9SMarc Zyngier 		   const struct sys_reg_desc table[], unsigned int num)
3299ba23aec9SMarc Zyngier 
33007c8c5e6aSMarc Zyngier {
3301dcaffa7bSJames Morse 	const struct sys_reg_desc *r;
33027c8c5e6aSMarc Zyngier 
33037c8c5e6aSMarc Zyngier 	/* We only do sys_reg for now. */
33047c8c5e6aSMarc Zyngier 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
33057c8c5e6aSMarc Zyngier 		return NULL;
33067c8c5e6aSMarc Zyngier 
3307ba23aec9SMarc Zyngier 	r = get_reg_by_id(id, table, num);
33087c8c5e6aSMarc Zyngier 
330993390c0aSDave Martin 	/* Not saved in the sys_reg array and not otherwise accessible? */
3310ba23aec9SMarc Zyngier 	if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
33117c8c5e6aSMarc Zyngier 		r = NULL;
33127c8c5e6aSMarc Zyngier 
33137c8c5e6aSMarc Zyngier 	return r;
33147c8c5e6aSMarc Zyngier }
33157c8c5e6aSMarc Zyngier 
33167c8c5e6aSMarc Zyngier /*
33177c8c5e6aSMarc Zyngier  * These are the invariant sys_reg registers: we let the guest see the
33187c8c5e6aSMarc Zyngier  * host versions of these, so they're part of the guest state.
33197c8c5e6aSMarc Zyngier  *
33207c8c5e6aSMarc Zyngier  * A future CPU may provide a mechanism to present different values to
33217c8c5e6aSMarc Zyngier  * the guest, or a future kvm may trap them.
33227c8c5e6aSMarc Zyngier  */
33237c8c5e6aSMarc Zyngier 
33247c8c5e6aSMarc Zyngier #define FUNCTION_INVARIANT(reg)						\
3325d86cde6eSJing Zhang 	static u64 get_##reg(struct kvm_vcpu *v,			\
33267c8c5e6aSMarc Zyngier 			      const struct sys_reg_desc *r)		\
33277c8c5e6aSMarc Zyngier 	{								\
33281f3d8699SMark Rutland 		((struct sys_reg_desc *)r)->val = read_sysreg(reg);	\
3329d86cde6eSJing Zhang 		return ((struct sys_reg_desc *)r)->val;			\
33307c8c5e6aSMarc Zyngier 	}
33317c8c5e6aSMarc Zyngier 
33327c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(revidr_el1)33337c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(revidr_el1)
33347c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(aidr_el1)
33357c8c5e6aSMarc Zyngier 
3336d86cde6eSJing Zhang static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
3337f7f2b15cSArd Biesheuvel {
3338f7f2b15cSArd Biesheuvel 	((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3339d86cde6eSJing Zhang 	return ((struct sys_reg_desc *)r)->val;
3340f7f2b15cSArd Biesheuvel }
3341f7f2b15cSArd Biesheuvel 
33427c8c5e6aSMarc Zyngier /* ->val is filled in by kvm_sys_reg_table_init() */
33438d20bd63SSean Christopherson static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
33440d449541SMark Rutland 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
33450d449541SMark Rutland 	{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
33460d449541SMark Rutland 	{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
33470d449541SMark Rutland 	{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
33487c8c5e6aSMarc Zyngier };
33497c8c5e6aSMarc Zyngier 
get_invariant_sys_reg(u64 id,u64 __user * uaddr)33505a420ed9SMarc Zyngier static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
33517c8c5e6aSMarc Zyngier {
33527c8c5e6aSMarc Zyngier 	const struct sys_reg_desc *r;
33537c8c5e6aSMarc Zyngier 
3354da8d120fSMarc Zyngier 	r = get_reg_by_id(id, invariant_sys_regs,
33554b927b94SVijaya Kumar K 			  ARRAY_SIZE(invariant_sys_regs));
33567c8c5e6aSMarc Zyngier 	if (!r)
33577c8c5e6aSMarc Zyngier 		return -ENOENT;
33587c8c5e6aSMarc Zyngier 
33595a420ed9SMarc Zyngier 	return put_user(r->val, uaddr);
33607c8c5e6aSMarc Zyngier }
33617c8c5e6aSMarc Zyngier 
set_invariant_sys_reg(u64 id,u64 __user * uaddr)33625a420ed9SMarc Zyngier static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
33637c8c5e6aSMarc Zyngier {
33647c8c5e6aSMarc Zyngier 	const struct sys_reg_desc *r;
33655a420ed9SMarc Zyngier 	u64 val;
33667c8c5e6aSMarc Zyngier 
3367da8d120fSMarc Zyngier 	r = get_reg_by_id(id, invariant_sys_regs,
33684b927b94SVijaya Kumar K 			  ARRAY_SIZE(invariant_sys_regs));
33697c8c5e6aSMarc Zyngier 	if (!r)
33707c8c5e6aSMarc Zyngier 		return -ENOENT;
33717c8c5e6aSMarc Zyngier 
33725a420ed9SMarc Zyngier 	if (get_user(val, uaddr))
33735a420ed9SMarc Zyngier 		return -EFAULT;
33747c8c5e6aSMarc Zyngier 
33757c8c5e6aSMarc Zyngier 	/* This is what we mean by invariant: you can't change it. */
33767c8c5e6aSMarc Zyngier 	if (r->val != val)
33777c8c5e6aSMarc Zyngier 		return -EINVAL;
33787c8c5e6aSMarc Zyngier 
33797c8c5e6aSMarc Zyngier 	return 0;
33807c8c5e6aSMarc Zyngier }
33817c8c5e6aSMarc Zyngier 
demux_c15_get(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)33827af0c253SAkihiko Odaki static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
33837c8c5e6aSMarc Zyngier {
33847c8c5e6aSMarc Zyngier 	u32 val;
33857c8c5e6aSMarc Zyngier 	u32 __user *uval = uaddr;
33867c8c5e6aSMarc Zyngier 
33877c8c5e6aSMarc Zyngier 	/* Fail if we have unknown bits set. */
33887c8c5e6aSMarc Zyngier 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
33897c8c5e6aSMarc Zyngier 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
33907c8c5e6aSMarc Zyngier 		return -ENOENT;
33917c8c5e6aSMarc Zyngier 
33927c8c5e6aSMarc Zyngier 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
33937c8c5e6aSMarc Zyngier 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
33947c8c5e6aSMarc Zyngier 		if (KVM_REG_SIZE(id) != 4)
33957c8c5e6aSMarc Zyngier 			return -ENOENT;
33967c8c5e6aSMarc Zyngier 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
33977c8c5e6aSMarc Zyngier 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
33987af0c253SAkihiko Odaki 		if (val >= CSSELR_MAX)
33997c8c5e6aSMarc Zyngier 			return -ENOENT;
34007c8c5e6aSMarc Zyngier 
34017af0c253SAkihiko Odaki 		return put_user(get_ccsidr(vcpu, val), uval);
34027c8c5e6aSMarc Zyngier 	default:
34037c8c5e6aSMarc Zyngier 		return -ENOENT;
34047c8c5e6aSMarc Zyngier 	}
34057c8c5e6aSMarc Zyngier }
34067c8c5e6aSMarc Zyngier 
demux_c15_set(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)34077af0c253SAkihiko Odaki static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
34087c8c5e6aSMarc Zyngier {
34097c8c5e6aSMarc Zyngier 	u32 val, newval;
34107c8c5e6aSMarc Zyngier 	u32 __user *uval = uaddr;
34117c8c5e6aSMarc Zyngier 
34127c8c5e6aSMarc Zyngier 	/* Fail if we have unknown bits set. */
34137c8c5e6aSMarc Zyngier 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
34147c8c5e6aSMarc Zyngier 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
34157c8c5e6aSMarc Zyngier 		return -ENOENT;
34167c8c5e6aSMarc Zyngier 
34177c8c5e6aSMarc Zyngier 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
34187c8c5e6aSMarc Zyngier 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
34197c8c5e6aSMarc Zyngier 		if (KVM_REG_SIZE(id) != 4)
34207c8c5e6aSMarc Zyngier 			return -ENOENT;
34217c8c5e6aSMarc Zyngier 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
34227c8c5e6aSMarc Zyngier 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
34237af0c253SAkihiko Odaki 		if (val >= CSSELR_MAX)
34247c8c5e6aSMarc Zyngier 			return -ENOENT;
34257c8c5e6aSMarc Zyngier 
34267c8c5e6aSMarc Zyngier 		if (get_user(newval, uval))
34277c8c5e6aSMarc Zyngier 			return -EFAULT;
34287c8c5e6aSMarc Zyngier 
34297af0c253SAkihiko Odaki 		return set_ccsidr(vcpu, val, newval);
34307c8c5e6aSMarc Zyngier 	default:
34317c8c5e6aSMarc Zyngier 		return -ENOENT;
34327c8c5e6aSMarc Zyngier 	}
34337c8c5e6aSMarc Zyngier }
34347c8c5e6aSMarc Zyngier 
kvm_sys_reg_get_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)3435ba23aec9SMarc Zyngier int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3436ba23aec9SMarc Zyngier 			 const struct sys_reg_desc table[], unsigned int num)
3437ba23aec9SMarc Zyngier {
3438978ceeb3SMarc Zyngier 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3439ba23aec9SMarc Zyngier 	const struct sys_reg_desc *r;
3440978ceeb3SMarc Zyngier 	u64 val;
3441978ceeb3SMarc Zyngier 	int ret;
3442ba23aec9SMarc Zyngier 
3443ba23aec9SMarc Zyngier 	r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3444e6b367dbSMarc Zyngier 	if (!r || sysreg_hidden_user(vcpu, r))
3445ba23aec9SMarc Zyngier 		return -ENOENT;
3446ba23aec9SMarc Zyngier 
3447978ceeb3SMarc Zyngier 	if (r->get_user) {
3448978ceeb3SMarc Zyngier 		ret = (r->get_user)(vcpu, r, &val);
3449978ceeb3SMarc Zyngier 	} else {
3450978ceeb3SMarc Zyngier 		val = __vcpu_sys_reg(vcpu, r->reg);
3451978ceeb3SMarc Zyngier 		ret = 0;
3452978ceeb3SMarc Zyngier 	}
3453ba23aec9SMarc Zyngier 
3454978ceeb3SMarc Zyngier 	if (!ret)
3455978ceeb3SMarc Zyngier 		ret = put_user(val, uaddr);
3456978ceeb3SMarc Zyngier 
3457978ceeb3SMarc Zyngier 	return ret;
3458ba23aec9SMarc Zyngier }
3459ba23aec9SMarc Zyngier 
kvm_arm_sys_reg_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)34607c8c5e6aSMarc Zyngier int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
34617c8c5e6aSMarc Zyngier {
34627c8c5e6aSMarc Zyngier 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
34631deeffb5SMarc Zyngier 	int err;
34647c8c5e6aSMarc Zyngier 
34657c8c5e6aSMarc Zyngier 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
34667af0c253SAkihiko Odaki 		return demux_c15_get(vcpu, reg->id, uaddr);
34677c8c5e6aSMarc Zyngier 
34681deeffb5SMarc Zyngier 	err = get_invariant_sys_reg(reg->id, uaddr);
34691deeffb5SMarc Zyngier 	if (err != -ENOENT)
34701deeffb5SMarc Zyngier 		return err;
34717c8c5e6aSMarc Zyngier 
3472ba23aec9SMarc Zyngier 	return kvm_sys_reg_get_user(vcpu, reg,
3473ba23aec9SMarc Zyngier 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3474ba23aec9SMarc Zyngier }
34757c8c5e6aSMarc Zyngier 
kvm_sys_reg_set_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)3476ba23aec9SMarc Zyngier int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3477ba23aec9SMarc Zyngier 			 const struct sys_reg_desc table[], unsigned int num)
3478ba23aec9SMarc Zyngier {
3479978ceeb3SMarc Zyngier 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3480ba23aec9SMarc Zyngier 	const struct sys_reg_desc *r;
3481978ceeb3SMarc Zyngier 	u64 val;
3482978ceeb3SMarc Zyngier 	int ret;
3483978ceeb3SMarc Zyngier 
3484978ceeb3SMarc Zyngier 	if (get_user(val, uaddr))
3485978ceeb3SMarc Zyngier 		return -EFAULT;
3486ba23aec9SMarc Zyngier 
3487ba23aec9SMarc Zyngier 	r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3488e6b367dbSMarc Zyngier 	if (!r || sysreg_hidden_user(vcpu, r))
34897f34e409SDave Martin 		return -ENOENT;
34907f34e409SDave Martin 
34914de06e4cSOliver Upton 	if (sysreg_user_write_ignore(vcpu, r))
34924de06e4cSOliver Upton 		return 0;
34934de06e4cSOliver Upton 
3494978ceeb3SMarc Zyngier 	if (r->set_user) {
3495978ceeb3SMarc Zyngier 		ret = (r->set_user)(vcpu, r, val);
3496978ceeb3SMarc Zyngier 	} else {
3497978ceeb3SMarc Zyngier 		__vcpu_sys_reg(vcpu, r->reg) = val;
3498978ceeb3SMarc Zyngier 		ret = 0;
3499978ceeb3SMarc Zyngier 	}
350084e690bfSAlex Bennée 
3501978ceeb3SMarc Zyngier 	return ret;
35027c8c5e6aSMarc Zyngier }
35037c8c5e6aSMarc Zyngier 
kvm_arm_sys_reg_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)35047c8c5e6aSMarc Zyngier int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
35057c8c5e6aSMarc Zyngier {
35067c8c5e6aSMarc Zyngier 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
35071deeffb5SMarc Zyngier 	int err;
35087c8c5e6aSMarc Zyngier 
35097c8c5e6aSMarc Zyngier 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
35107af0c253SAkihiko Odaki 		return demux_c15_set(vcpu, reg->id, uaddr);
35117c8c5e6aSMarc Zyngier 
35121deeffb5SMarc Zyngier 	err = set_invariant_sys_reg(reg->id, uaddr);
35131deeffb5SMarc Zyngier 	if (err != -ENOENT)
35141deeffb5SMarc Zyngier 		return err;
35157c8c5e6aSMarc Zyngier 
3516ba23aec9SMarc Zyngier 	return kvm_sys_reg_set_user(vcpu, reg,
3517ba23aec9SMarc Zyngier 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
35187c8c5e6aSMarc Zyngier }
35197c8c5e6aSMarc Zyngier 
num_demux_regs(void)35207c8c5e6aSMarc Zyngier static unsigned int num_demux_regs(void)
35217c8c5e6aSMarc Zyngier {
35227af0c253SAkihiko Odaki 	return CSSELR_MAX;
35237c8c5e6aSMarc Zyngier }
35247c8c5e6aSMarc Zyngier 
write_demux_regids(u64 __user * uindices)35257c8c5e6aSMarc Zyngier static int write_demux_regids(u64 __user *uindices)
35267c8c5e6aSMarc Zyngier {
3527efd48ceaSAlex Bennée 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
35287c8c5e6aSMarc Zyngier 	unsigned int i;
35297c8c5e6aSMarc Zyngier 
35307c8c5e6aSMarc Zyngier 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
35317c8c5e6aSMarc Zyngier 	for (i = 0; i < CSSELR_MAX; i++) {
35327c8c5e6aSMarc Zyngier 		if (put_user(val | i, uindices))
35337c8c5e6aSMarc Zyngier 			return -EFAULT;
35347c8c5e6aSMarc Zyngier 		uindices++;
35357c8c5e6aSMarc Zyngier 	}
35367c8c5e6aSMarc Zyngier 	return 0;
35377c8c5e6aSMarc Zyngier }
35387c8c5e6aSMarc Zyngier 
sys_reg_to_index(const struct sys_reg_desc * reg)35397c8c5e6aSMarc Zyngier static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
35407c8c5e6aSMarc Zyngier {
35417c8c5e6aSMarc Zyngier 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
35427c8c5e6aSMarc Zyngier 		KVM_REG_ARM64_SYSREG |
35437c8c5e6aSMarc Zyngier 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
35447c8c5e6aSMarc Zyngier 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
35457c8c5e6aSMarc Zyngier 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
35467c8c5e6aSMarc Zyngier 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
35477c8c5e6aSMarc Zyngier 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
35487c8c5e6aSMarc Zyngier }
35497c8c5e6aSMarc Zyngier 
copy_reg_to_user(const struct sys_reg_desc * reg,u64 __user ** uind)35507c8c5e6aSMarc Zyngier static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
35517c8c5e6aSMarc Zyngier {
35527c8c5e6aSMarc Zyngier 	if (!*uind)
35537c8c5e6aSMarc Zyngier 		return true;
35547c8c5e6aSMarc Zyngier 
35557c8c5e6aSMarc Zyngier 	if (put_user(sys_reg_to_index(reg), *uind))
35567c8c5e6aSMarc Zyngier 		return false;
35577c8c5e6aSMarc Zyngier 
35587c8c5e6aSMarc Zyngier 	(*uind)++;
35597c8c5e6aSMarc Zyngier 	return true;
35607c8c5e6aSMarc Zyngier }
35617c8c5e6aSMarc Zyngier 
walk_one_sys_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 __user ** uind,unsigned int * total)35627f34e409SDave Martin static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
35637f34e409SDave Martin 			    const struct sys_reg_desc *rd,
356493390c0aSDave Martin 			    u64 __user **uind,
356593390c0aSDave Martin 			    unsigned int *total)
356693390c0aSDave Martin {
356793390c0aSDave Martin 	/*
356893390c0aSDave Martin 	 * Ignore registers we trap but don't save,
356993390c0aSDave Martin 	 * and for which no custom user accessor is provided.
357093390c0aSDave Martin 	 */
357193390c0aSDave Martin 	if (!(rd->reg || rd->get_user))
357293390c0aSDave Martin 		return 0;
357393390c0aSDave Martin 
3574e6b367dbSMarc Zyngier 	if (sysreg_hidden_user(vcpu, rd))
35757f34e409SDave Martin 		return 0;
35767f34e409SDave Martin 
357793390c0aSDave Martin 	if (!copy_reg_to_user(rd, uind))
357893390c0aSDave Martin 		return -EFAULT;
357993390c0aSDave Martin 
358093390c0aSDave Martin 	(*total)++;
358193390c0aSDave Martin 	return 0;
358293390c0aSDave Martin }
358393390c0aSDave Martin 
35847c8c5e6aSMarc Zyngier /* Assumed ordered tables, see kvm_sys_reg_table_init. */
walk_sys_regs(struct kvm_vcpu * vcpu,u64 __user * uind)35857c8c5e6aSMarc Zyngier static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
35867c8c5e6aSMarc Zyngier {
3587dcaffa7bSJames Morse 	const struct sys_reg_desc *i2, *end2;
35887c8c5e6aSMarc Zyngier 	unsigned int total = 0;
358993390c0aSDave Martin 	int err;
35907c8c5e6aSMarc Zyngier 
35917c8c5e6aSMarc Zyngier 	i2 = sys_reg_descs;
35927c8c5e6aSMarc Zyngier 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
35937c8c5e6aSMarc Zyngier 
3594dcaffa7bSJames Morse 	while (i2 != end2) {
3595dcaffa7bSJames Morse 		err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
359693390c0aSDave Martin 		if (err)
359793390c0aSDave Martin 			return err;
35987c8c5e6aSMarc Zyngier 	}
35997c8c5e6aSMarc Zyngier 	return total;
36007c8c5e6aSMarc Zyngier }
36017c8c5e6aSMarc Zyngier 
kvm_arm_num_sys_reg_descs(struct kvm_vcpu * vcpu)36027c8c5e6aSMarc Zyngier unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
36037c8c5e6aSMarc Zyngier {
36047c8c5e6aSMarc Zyngier 	return ARRAY_SIZE(invariant_sys_regs)
36057c8c5e6aSMarc Zyngier 		+ num_demux_regs()
36067c8c5e6aSMarc Zyngier 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
36077c8c5e6aSMarc Zyngier }
36087c8c5e6aSMarc Zyngier 
kvm_arm_copy_sys_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)36097c8c5e6aSMarc Zyngier int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
36107c8c5e6aSMarc Zyngier {
36117c8c5e6aSMarc Zyngier 	unsigned int i;
36127c8c5e6aSMarc Zyngier 	int err;
36137c8c5e6aSMarc Zyngier 
36147c8c5e6aSMarc Zyngier 	/* Then give them all the invariant registers' indices. */
36157c8c5e6aSMarc Zyngier 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
36167c8c5e6aSMarc Zyngier 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
36177c8c5e6aSMarc Zyngier 			return -EFAULT;
36187c8c5e6aSMarc Zyngier 		uindices++;
36197c8c5e6aSMarc Zyngier 	}
36207c8c5e6aSMarc Zyngier 
36217c8c5e6aSMarc Zyngier 	err = walk_sys_regs(vcpu, uindices);
36227c8c5e6aSMarc Zyngier 	if (err < 0)
36237c8c5e6aSMarc Zyngier 		return err;
36247c8c5e6aSMarc Zyngier 	uindices += err;
36257c8c5e6aSMarc Zyngier 
36267c8c5e6aSMarc Zyngier 	return write_demux_regids(uindices);
36277c8c5e6aSMarc Zyngier }
36287c8c5e6aSMarc Zyngier 
kvm_sys_reg_table_init(void)36298d20bd63SSean Christopherson int __init kvm_sys_reg_table_init(void)
36307c8c5e6aSMarc Zyngier {
363147334146SJing Zhang 	struct sys_reg_params params;
3632f1f0c0cfSAlexandru Elisei 	bool valid = true;
36337c8c5e6aSMarc Zyngier 	unsigned int i;
36347c8c5e6aSMarc Zyngier 
36357c8c5e6aSMarc Zyngier 	/* Make sure tables are unique and in order. */
3636f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3637f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3638f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3639f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3640f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3641f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3642f1f0c0cfSAlexandru Elisei 
3643f1f0c0cfSAlexandru Elisei 	if (!valid)
3644f1f0c0cfSAlexandru Elisei 		return -EINVAL;
36457c8c5e6aSMarc Zyngier 
36467c8c5e6aSMarc Zyngier 	/* We abuse the reset function to overwrite the table itself. */
36477c8c5e6aSMarc Zyngier 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
36487c8c5e6aSMarc Zyngier 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
36497c8c5e6aSMarc Zyngier 
365047334146SJing Zhang 	/* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
365147334146SJing Zhang 	params = encoding_to_params(SYS_ID_PFR0_EL1);
365247334146SJing Zhang 	first_idreg = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
365347334146SJing Zhang 	if (!first_idreg)
365447334146SJing Zhang 		return -EINVAL;
365547334146SJing Zhang 
3656e58ec47bSMarc Zyngier 	if (kvm_get_mode() == KVM_MODE_NV)
3657e58ec47bSMarc Zyngier 		return populate_nv_trap_config();
3658e58ec47bSMarc Zyngier 
3659f1f0c0cfSAlexandru Elisei 	return 0;
36607c8c5e6aSMarc Zyngier }
3661