xref: /openbmc/linux/arch/arm64/kvm/sys_regs.c (revision fbff560682323dc171c89b4821308af47f166a8f)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27c8c5e6aSMarc Zyngier /*
37c8c5e6aSMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
47c8c5e6aSMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
57c8c5e6aSMarc Zyngier  *
67c8c5e6aSMarc Zyngier  * Derived from arch/arm/kvm/coproc.c:
77c8c5e6aSMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
87c8c5e6aSMarc Zyngier  * Authors: Rusty Russell <rusty@rustcorp.com.au>
97c8c5e6aSMarc Zyngier  *          Christoffer Dall <c.dall@virtualopensystems.com>
107c8c5e6aSMarc Zyngier  */
117c8c5e6aSMarc Zyngier 
12c8857935SMarc Zyngier #include <linux/bitfield.h>
13623eefa8SMarc Zyngier #include <linux/bsearch.h>
147af0c253SAkihiko Odaki #include <linux/cacheinfo.h>
157c8c5e6aSMarc Zyngier #include <linux/kvm_host.h>
16c6d01a94SMark Rutland #include <linux/mm.h>
1707d79fe7SDave Martin #include <linux/printk.h>
187c8c5e6aSMarc Zyngier #include <linux/uaccess.h>
19c6d01a94SMark Rutland 
207c8c5e6aSMarc Zyngier #include <asm/cacheflush.h>
217c8c5e6aSMarc Zyngier #include <asm/cputype.h>
220c557ed4SMarc Zyngier #include <asm/debug-monitors.h>
23c6d01a94SMark Rutland #include <asm/esr.h>
24c6d01a94SMark Rutland #include <asm/kvm_arm.h>
25c6d01a94SMark Rutland #include <asm/kvm_emulate.h>
26d47533daSChristoffer Dall #include <asm/kvm_hyp.h>
27c6d01a94SMark Rutland #include <asm/kvm_mmu.h>
286ff9dc23SJintack Lim #include <asm/kvm_nested.h>
29ab946834SShannon Zhao #include <asm/perf_event.h>
301f3d8699SMark Rutland #include <asm/sysreg.h>
31c6d01a94SMark Rutland 
327c8c5e6aSMarc Zyngier #include <trace/events/kvm.h>
337c8c5e6aSMarc Zyngier 
347c8c5e6aSMarc Zyngier #include "sys_regs.h"
357c8c5e6aSMarc Zyngier 
36eef8c85aSAlex Bennée #include "trace.h"
37eef8c85aSAlex Bennée 
387c8c5e6aSMarc Zyngier /*
3962a89c44SMarc Zyngier  * For AArch32, we only take care of what is being trapped. Anything
4062a89c44SMarc Zyngier  * that has to do with init and userspace access has to go via the
4162a89c44SMarc Zyngier  * 64bit interface.
427c8c5e6aSMarc Zyngier  */
437c8c5e6aSMarc Zyngier 
44f24adc65SOliver Upton static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
45f24adc65SOliver Upton 
467b5b4df1SMarc Zyngier static bool read_from_write_only(struct kvm_vcpu *vcpu,
47e7f1d1eeSMarc Zyngier 				 struct sys_reg_params *params,
48e7f1d1eeSMarc Zyngier 				 const struct sys_reg_desc *r)
497b5b4df1SMarc Zyngier {
507b5b4df1SMarc Zyngier 	WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
517b5b4df1SMarc Zyngier 	print_sys_reg_instr(params);
527b5b4df1SMarc Zyngier 	kvm_inject_undefined(vcpu);
537b5b4df1SMarc Zyngier 	return false;
547b5b4df1SMarc Zyngier }
557b5b4df1SMarc Zyngier 
567b1dba1fSMarc Zyngier static bool write_to_read_only(struct kvm_vcpu *vcpu,
577b1dba1fSMarc Zyngier 			       struct sys_reg_params *params,
587b1dba1fSMarc Zyngier 			       const struct sys_reg_desc *r)
597b1dba1fSMarc Zyngier {
607b1dba1fSMarc Zyngier 	WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
617b1dba1fSMarc Zyngier 	print_sys_reg_instr(params);
627b1dba1fSMarc Zyngier 	kvm_inject_undefined(vcpu);
637b1dba1fSMarc Zyngier 	return false;
647b1dba1fSMarc Zyngier }
657b1dba1fSMarc Zyngier 
667ea90bddSMarc Zyngier u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
677ea90bddSMarc Zyngier {
687ea90bddSMarc Zyngier 	u64 val = 0x8badf00d8badf00d;
697ea90bddSMarc Zyngier 
7030b6ab45SMarc Zyngier 	if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
717ea90bddSMarc Zyngier 	    __vcpu_read_sys_reg_from_cpu(reg, &val))
727ea90bddSMarc Zyngier 		return val;
737ea90bddSMarc Zyngier 
747ea90bddSMarc Zyngier 	return __vcpu_sys_reg(vcpu, reg);
757ea90bddSMarc Zyngier }
767ea90bddSMarc Zyngier 
777ea90bddSMarc Zyngier void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
787ea90bddSMarc Zyngier {
7930b6ab45SMarc Zyngier 	if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
807ea90bddSMarc Zyngier 	    __vcpu_write_sys_reg_to_cpu(val, reg))
817ea90bddSMarc Zyngier 		return;
827ea90bddSMarc Zyngier 
83d47533daSChristoffer Dall 	__vcpu_sys_reg(vcpu, reg) = val;
84d47533daSChristoffer Dall }
85d47533daSChristoffer Dall 
867c8c5e6aSMarc Zyngier /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
87c73a4416SAndrew Jones #define CSSELR_MAX 14
887c8c5e6aSMarc Zyngier 
897af0c253SAkihiko Odaki /*
907af0c253SAkihiko Odaki  * Returns the minimum line size for the selected cache, expressed as
917af0c253SAkihiko Odaki  * Log2(bytes).
927af0c253SAkihiko Odaki  */
937af0c253SAkihiko Odaki static u8 get_min_cache_line_size(bool icache)
947c8c5e6aSMarc Zyngier {
957af0c253SAkihiko Odaki 	u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
967af0c253SAkihiko Odaki 	u8 field;
977c8c5e6aSMarc Zyngier 
987af0c253SAkihiko Odaki 	if (icache)
997af0c253SAkihiko Odaki 		field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
1007af0c253SAkihiko Odaki 	else
1017af0c253SAkihiko Odaki 		field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
1027c8c5e6aSMarc Zyngier 
1037af0c253SAkihiko Odaki 	/*
1047af0c253SAkihiko Odaki 	 * Cache line size is represented as Log2(words) in CTR_EL0.
1057af0c253SAkihiko Odaki 	 * Log2(bytes) can be derived with the following:
1067af0c253SAkihiko Odaki 	 *
1077af0c253SAkihiko Odaki 	 * Log2(words) + 2 = Log2(bytes / 4) + 2
1087af0c253SAkihiko Odaki 	 * 		   = Log2(bytes) - 2 + 2
1097af0c253SAkihiko Odaki 	 * 		   = Log2(bytes)
1107af0c253SAkihiko Odaki 	 */
1117af0c253SAkihiko Odaki 	return field + 2;
1127af0c253SAkihiko Odaki }
1137af0c253SAkihiko Odaki 
1147af0c253SAkihiko Odaki /* Which cache CCSIDR represents depends on CSSELR value. */
1157af0c253SAkihiko Odaki static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
1167af0c253SAkihiko Odaki {
1177af0c253SAkihiko Odaki 	u8 line_size;
1187af0c253SAkihiko Odaki 
1197af0c253SAkihiko Odaki 	if (vcpu->arch.ccsidr)
1207af0c253SAkihiko Odaki 		return vcpu->arch.ccsidr[csselr];
1217af0c253SAkihiko Odaki 
1227af0c253SAkihiko Odaki 	line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
1237af0c253SAkihiko Odaki 
1247af0c253SAkihiko Odaki 	/*
1257af0c253SAkihiko Odaki 	 * Fabricate a CCSIDR value as the overriding value does not exist.
1267af0c253SAkihiko Odaki 	 * The real CCSIDR value will not be used as it can vary by the
1277af0c253SAkihiko Odaki 	 * physical CPU which the vcpu currently resides in.
1287af0c253SAkihiko Odaki 	 *
1297af0c253SAkihiko Odaki 	 * The line size is determined with get_min_cache_line_size(), which
1307af0c253SAkihiko Odaki 	 * should be valid for all CPUs even if they have different cache
1317af0c253SAkihiko Odaki 	 * configuration.
1327af0c253SAkihiko Odaki 	 *
1337af0c253SAkihiko Odaki 	 * The associativity bits are cleared, meaning the geometry of all data
1347af0c253SAkihiko Odaki 	 * and unified caches (which are guaranteed to be PIPT and thus
1357af0c253SAkihiko Odaki 	 * non-aliasing) are 1 set and 1 way.
1367af0c253SAkihiko Odaki 	 * Guests should not be doing cache operations by set/way at all, and
1377af0c253SAkihiko Odaki 	 * for this reason, we trap them and attempt to infer the intent, so
1387af0c253SAkihiko Odaki 	 * that we can flush the entire guest's address space at the appropriate
1397af0c253SAkihiko Odaki 	 * time. The exposed geometry minimizes the number of the traps.
1407af0c253SAkihiko Odaki 	 * [If guests should attempt to infer aliasing properties from the
1417af0c253SAkihiko Odaki 	 * geometry (which is not permitted by the architecture), they would
1427af0c253SAkihiko Odaki 	 * only do so for virtually indexed caches.]
1437af0c253SAkihiko Odaki 	 *
1447af0c253SAkihiko Odaki 	 * We don't check if the cache level exists as it is allowed to return
1457af0c253SAkihiko Odaki 	 * an UNKNOWN value if not.
1467af0c253SAkihiko Odaki 	 */
1477af0c253SAkihiko Odaki 	return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
1487af0c253SAkihiko Odaki }
1497af0c253SAkihiko Odaki 
1507af0c253SAkihiko Odaki static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
1517af0c253SAkihiko Odaki {
1527af0c253SAkihiko Odaki 	u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
1537af0c253SAkihiko Odaki 	u32 *ccsidr = vcpu->arch.ccsidr;
1547af0c253SAkihiko Odaki 	u32 i;
1557af0c253SAkihiko Odaki 
1567af0c253SAkihiko Odaki 	if ((val & CCSIDR_EL1_RES0) ||
1577af0c253SAkihiko Odaki 	    line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
1587af0c253SAkihiko Odaki 		return -EINVAL;
1597af0c253SAkihiko Odaki 
1607af0c253SAkihiko Odaki 	if (!ccsidr) {
1617af0c253SAkihiko Odaki 		if (val == get_ccsidr(vcpu, csselr))
1627af0c253SAkihiko Odaki 			return 0;
1637af0c253SAkihiko Odaki 
1645f623a59SOliver Upton 		ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
1657af0c253SAkihiko Odaki 		if (!ccsidr)
1667af0c253SAkihiko Odaki 			return -ENOMEM;
1677af0c253SAkihiko Odaki 
1687af0c253SAkihiko Odaki 		for (i = 0; i < CSSELR_MAX; i++)
1697af0c253SAkihiko Odaki 			ccsidr[i] = get_ccsidr(vcpu, i);
1707af0c253SAkihiko Odaki 
1717af0c253SAkihiko Odaki 		vcpu->arch.ccsidr = ccsidr;
1727af0c253SAkihiko Odaki 	}
1737af0c253SAkihiko Odaki 
1747af0c253SAkihiko Odaki 	ccsidr[csselr] = val;
1757af0c253SAkihiko Odaki 
1767af0c253SAkihiko Odaki 	return 0;
1777c8c5e6aSMarc Zyngier }
1787c8c5e6aSMarc Zyngier 
1796ff9dc23SJintack Lim static bool access_rw(struct kvm_vcpu *vcpu,
1806ff9dc23SJintack Lim 		      struct sys_reg_params *p,
1816ff9dc23SJintack Lim 		      const struct sys_reg_desc *r)
1826ff9dc23SJintack Lim {
1836ff9dc23SJintack Lim 	if (p->is_write)
1846ff9dc23SJintack Lim 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
1856ff9dc23SJintack Lim 	else
1866ff9dc23SJintack Lim 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
1876ff9dc23SJintack Lim 
1886ff9dc23SJintack Lim 	return true;
1896ff9dc23SJintack Lim }
1906ff9dc23SJintack Lim 
1913c1e7165SMarc Zyngier /*
1923c1e7165SMarc Zyngier  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1933c1e7165SMarc Zyngier  */
1947c8c5e6aSMarc Zyngier static bool access_dcsw(struct kvm_vcpu *vcpu,
1953fec037dSPavel Fedin 			struct sys_reg_params *p,
1967c8c5e6aSMarc Zyngier 			const struct sys_reg_desc *r)
1977c8c5e6aSMarc Zyngier {
1987c8c5e6aSMarc Zyngier 	if (!p->is_write)
199e7f1d1eeSMarc Zyngier 		return read_from_write_only(vcpu, p, r);
2007c8c5e6aSMarc Zyngier 
20109605e94SMarc Zyngier 	/*
20209605e94SMarc Zyngier 	 * Only track S/W ops if we don't have FWB. It still indicates
20309605e94SMarc Zyngier 	 * that the guest is a bit broken (S/W operations should only
20409605e94SMarc Zyngier 	 * be done by firmware, knowing that there is only a single
20509605e94SMarc Zyngier 	 * CPU left in the system, and certainly not from non-secure
20609605e94SMarc Zyngier 	 * software).
20709605e94SMarc Zyngier 	 */
20809605e94SMarc Zyngier 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
2093c1e7165SMarc Zyngier 		kvm_set_way_flush(vcpu);
21009605e94SMarc Zyngier 
2117c8c5e6aSMarc Zyngier 	return true;
2127c8c5e6aSMarc Zyngier }
2137c8c5e6aSMarc Zyngier 
214b1ea1d76SMarc Zyngier static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
215b1ea1d76SMarc Zyngier {
216b1ea1d76SMarc Zyngier 	switch (r->aarch32_map) {
217b1ea1d76SMarc Zyngier 	case AA32_LO:
218b1ea1d76SMarc Zyngier 		*mask = GENMASK_ULL(31, 0);
219b1ea1d76SMarc Zyngier 		*shift = 0;
220b1ea1d76SMarc Zyngier 		break;
221b1ea1d76SMarc Zyngier 	case AA32_HI:
222b1ea1d76SMarc Zyngier 		*mask = GENMASK_ULL(63, 32);
223b1ea1d76SMarc Zyngier 		*shift = 32;
224b1ea1d76SMarc Zyngier 		break;
225b1ea1d76SMarc Zyngier 	default:
226b1ea1d76SMarc Zyngier 		*mask = GENMASK_ULL(63, 0);
227b1ea1d76SMarc Zyngier 		*shift = 0;
228b1ea1d76SMarc Zyngier 		break;
229b1ea1d76SMarc Zyngier 	}
230b1ea1d76SMarc Zyngier }
231b1ea1d76SMarc Zyngier 
2327c8c5e6aSMarc Zyngier /*
2334d44923bSMarc Zyngier  * Generic accessor for VM registers. Only called as long as HCR_TVM
2343c1e7165SMarc Zyngier  * is set. If the guest enables the MMU, we stop trapping the VM
2353c1e7165SMarc Zyngier  * sys_regs and leave it in complete control of the caches.
2364d44923bSMarc Zyngier  */
2374d44923bSMarc Zyngier static bool access_vm_reg(struct kvm_vcpu *vcpu,
2383fec037dSPavel Fedin 			  struct sys_reg_params *p,
2394d44923bSMarc Zyngier 			  const struct sys_reg_desc *r)
2404d44923bSMarc Zyngier {
2413c1e7165SMarc Zyngier 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
242b1ea1d76SMarc Zyngier 	u64 val, mask, shift;
2434d44923bSMarc Zyngier 
2444d44923bSMarc Zyngier 	BUG_ON(!p->is_write);
2454d44923bSMarc Zyngier 
246b1ea1d76SMarc Zyngier 	get_access_mask(r, &mask, &shift);
24752f6c4f0SChristoffer Dall 
248b1ea1d76SMarc Zyngier 	if (~mask) {
249b1ea1d76SMarc Zyngier 		val = vcpu_read_sys_reg(vcpu, r->reg);
250b1ea1d76SMarc Zyngier 		val &= ~mask;
251dedf97e8SMarc Zyngier 	} else {
252b1ea1d76SMarc Zyngier 		val = 0;
253dedf97e8SMarc Zyngier 	}
254b1ea1d76SMarc Zyngier 
255b1ea1d76SMarc Zyngier 	val |= (p->regval & (mask >> shift)) << shift;
256b1ea1d76SMarc Zyngier 	vcpu_write_sys_reg(vcpu, val, r->reg);
257f0a3eaffSVictor Kamensky 
2583c1e7165SMarc Zyngier 	kvm_toggle_cache(vcpu, was_enabled);
2594d44923bSMarc Zyngier 	return true;
2604d44923bSMarc Zyngier }
2614d44923bSMarc Zyngier 
262af473829SJames Morse static bool access_actlr(struct kvm_vcpu *vcpu,
263af473829SJames Morse 			 struct sys_reg_params *p,
264af473829SJames Morse 			 const struct sys_reg_desc *r)
265af473829SJames Morse {
266b1ea1d76SMarc Zyngier 	u64 mask, shift;
267b1ea1d76SMarc Zyngier 
268af473829SJames Morse 	if (p->is_write)
269af473829SJames Morse 		return ignore_write(vcpu, p);
270af473829SJames Morse 
271b1ea1d76SMarc Zyngier 	get_access_mask(r, &mask, &shift);
272b1ea1d76SMarc Zyngier 	p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
273af473829SJames Morse 
274af473829SJames Morse 	return true;
275af473829SJames Morse }
276af473829SJames Morse 
2776d52f35aSAndre Przywara /*
2786d52f35aSAndre Przywara  * Trap handler for the GICv3 SGI generation system register.
2796d52f35aSAndre Przywara  * Forward the request to the VGIC emulation.
2806d52f35aSAndre Przywara  * The cp15_64 code makes sure this automatically works
2816d52f35aSAndre Przywara  * for both AArch64 and AArch32 accesses.
2826d52f35aSAndre Przywara  */
2836d52f35aSAndre Przywara static bool access_gic_sgi(struct kvm_vcpu *vcpu,
2843fec037dSPavel Fedin 			   struct sys_reg_params *p,
2856d52f35aSAndre Przywara 			   const struct sys_reg_desc *r)
2866d52f35aSAndre Przywara {
28703bd646dSMarc Zyngier 	bool g1;
28803bd646dSMarc Zyngier 
2896d52f35aSAndre Przywara 	if (!p->is_write)
290e7f1d1eeSMarc Zyngier 		return read_from_write_only(vcpu, p, r);
2916d52f35aSAndre Przywara 
29203bd646dSMarc Zyngier 	/*
29303bd646dSMarc Zyngier 	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
29403bd646dSMarc Zyngier 	 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
29503bd646dSMarc Zyngier 	 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
29603bd646dSMarc Zyngier 	 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
29703bd646dSMarc Zyngier 	 * group.
29803bd646dSMarc Zyngier 	 */
29950f30453SMarc Zyngier 	if (p->Op0 == 0) {		/* AArch32 */
30003bd646dSMarc Zyngier 		switch (p->Op1) {
30103bd646dSMarc Zyngier 		default:		/* Keep GCC quiet */
30203bd646dSMarc Zyngier 		case 0:			/* ICC_SGI1R */
30303bd646dSMarc Zyngier 			g1 = true;
30403bd646dSMarc Zyngier 			break;
30503bd646dSMarc Zyngier 		case 1:			/* ICC_ASGI1R */
30603bd646dSMarc Zyngier 		case 2:			/* ICC_SGI0R */
30703bd646dSMarc Zyngier 			g1 = false;
30803bd646dSMarc Zyngier 			break;
30903bd646dSMarc Zyngier 		}
31050f30453SMarc Zyngier 	} else {			/* AArch64 */
31103bd646dSMarc Zyngier 		switch (p->Op2) {
31203bd646dSMarc Zyngier 		default:		/* Keep GCC quiet */
31303bd646dSMarc Zyngier 		case 5:			/* ICC_SGI1R_EL1 */
31403bd646dSMarc Zyngier 			g1 = true;
31503bd646dSMarc Zyngier 			break;
31603bd646dSMarc Zyngier 		case 6:			/* ICC_ASGI1R_EL1 */
31703bd646dSMarc Zyngier 		case 7:			/* ICC_SGI0R_EL1 */
31803bd646dSMarc Zyngier 			g1 = false;
31903bd646dSMarc Zyngier 			break;
32003bd646dSMarc Zyngier 		}
32103bd646dSMarc Zyngier 	}
32203bd646dSMarc Zyngier 
32303bd646dSMarc Zyngier 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
3246d52f35aSAndre Przywara 
3256d52f35aSAndre Przywara 	return true;
3266d52f35aSAndre Przywara }
3276d52f35aSAndre Przywara 
328b34f2bcbSMarc Zyngier static bool access_gic_sre(struct kvm_vcpu *vcpu,
329b34f2bcbSMarc Zyngier 			   struct sys_reg_params *p,
330b34f2bcbSMarc Zyngier 			   const struct sys_reg_desc *r)
331b34f2bcbSMarc Zyngier {
332b34f2bcbSMarc Zyngier 	if (p->is_write)
333b34f2bcbSMarc Zyngier 		return ignore_write(vcpu, p);
334b34f2bcbSMarc Zyngier 
335b34f2bcbSMarc Zyngier 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
336b34f2bcbSMarc Zyngier 	return true;
337b34f2bcbSMarc Zyngier }
338b34f2bcbSMarc Zyngier 
3397609c125SMarc Zyngier static bool trap_raz_wi(struct kvm_vcpu *vcpu,
3403fec037dSPavel Fedin 			struct sys_reg_params *p,
3417c8c5e6aSMarc Zyngier 			const struct sys_reg_desc *r)
3427c8c5e6aSMarc Zyngier {
3437c8c5e6aSMarc Zyngier 	if (p->is_write)
3447c8c5e6aSMarc Zyngier 		return ignore_write(vcpu, p);
3457c8c5e6aSMarc Zyngier 	else
3467c8c5e6aSMarc Zyngier 		return read_zero(vcpu, p);
3477c8c5e6aSMarc Zyngier }
3487c8c5e6aSMarc Zyngier 
3496ff9dc23SJintack Lim static bool trap_undef(struct kvm_vcpu *vcpu,
3506ff9dc23SJintack Lim 		       struct sys_reg_params *p,
3516ff9dc23SJintack Lim 		       const struct sys_reg_desc *r)
3526ff9dc23SJintack Lim {
3536ff9dc23SJintack Lim 	kvm_inject_undefined(vcpu);
3546ff9dc23SJintack Lim 	return false;
3556ff9dc23SJintack Lim }
3566ff9dc23SJintack Lim 
35722925521SMarc Zyngier /*
35822925521SMarc Zyngier  * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
35922925521SMarc Zyngier  * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
36022925521SMarc Zyngier  * system, these registers should UNDEF. LORID_EL1 being a RO register, we
36122925521SMarc Zyngier  * treat it separately.
36222925521SMarc Zyngier  */
36322925521SMarc Zyngier static bool trap_loregion(struct kvm_vcpu *vcpu,
364cc33c4e2SMark Rutland 			  struct sys_reg_params *p,
365cc33c4e2SMark Rutland 			  const struct sys_reg_desc *r)
366cc33c4e2SMark Rutland {
36722925521SMarc Zyngier 	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
3687ba8b438SAlexandru Elisei 	u32 sr = reg_to_encoding(r);
36922925521SMarc Zyngier 
3706fcd0193SKristina Martsenko 	if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
371cc33c4e2SMark Rutland 		kvm_inject_undefined(vcpu);
372cc33c4e2SMark Rutland 		return false;
373cc33c4e2SMark Rutland 	}
374cc33c4e2SMark Rutland 
37522925521SMarc Zyngier 	if (p->is_write && sr == SYS_LORID_EL1)
37622925521SMarc Zyngier 		return write_to_read_only(vcpu, p, r);
37722925521SMarc Zyngier 
37822925521SMarc Zyngier 	return trap_raz_wi(vcpu, p, r);
37922925521SMarc Zyngier }
38022925521SMarc Zyngier 
381f24adc65SOliver Upton static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
382f24adc65SOliver Upton 			   struct sys_reg_params *p,
383f24adc65SOliver Upton 			   const struct sys_reg_desc *r)
384f24adc65SOliver Upton {
385f24adc65SOliver Upton 	u64 oslsr;
386f24adc65SOliver Upton 
387f24adc65SOliver Upton 	if (!p->is_write)
388f24adc65SOliver Upton 		return read_from_write_only(vcpu, p, r);
389f24adc65SOliver Upton 
390f24adc65SOliver Upton 	/* Forward the OSLK bit to OSLSR */
391f24adc65SOliver Upton 	oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
392f24adc65SOliver Upton 	if (p->regval & SYS_OSLAR_OSLK)
393f24adc65SOliver Upton 		oslsr |= SYS_OSLSR_OSLK;
394f24adc65SOliver Upton 
395f24adc65SOliver Upton 	__vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
396f24adc65SOliver Upton 	return true;
397f24adc65SOliver Upton }
398f24adc65SOliver Upton 
3990c557ed4SMarc Zyngier static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
4003fec037dSPavel Fedin 			   struct sys_reg_params *p,
4010c557ed4SMarc Zyngier 			   const struct sys_reg_desc *r)
4020c557ed4SMarc Zyngier {
403d42e2671SOliver Upton 	if (p->is_write)
404e2ffceaaSOliver Upton 		return write_to_read_only(vcpu, p, r);
405d42e2671SOliver Upton 
406d42e2671SOliver Upton 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
4070c557ed4SMarc Zyngier 	return true;
4080c557ed4SMarc Zyngier }
409d42e2671SOliver Upton 
410d42e2671SOliver Upton static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
411978ceeb3SMarc Zyngier 			 u64 val)
412d42e2671SOliver Upton {
413f24adc65SOliver Upton 	/*
414f24adc65SOliver Upton 	 * The only modifiable bit is the OSLK bit. Refuse the write if
415f24adc65SOliver Upton 	 * userspace attempts to change any other bit in the register.
416f24adc65SOliver Upton 	 */
417f24adc65SOliver Upton 	if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
418d42e2671SOliver Upton 		return -EINVAL;
419d42e2671SOliver Upton 
420f24adc65SOliver Upton 	__vcpu_sys_reg(vcpu, rd->reg) = val;
421d42e2671SOliver Upton 	return 0;
4220c557ed4SMarc Zyngier }
4230c557ed4SMarc Zyngier 
4240c557ed4SMarc Zyngier static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
4253fec037dSPavel Fedin 				   struct sys_reg_params *p,
4260c557ed4SMarc Zyngier 				   const struct sys_reg_desc *r)
4270c557ed4SMarc Zyngier {
4280c557ed4SMarc Zyngier 	if (p->is_write) {
4290c557ed4SMarc Zyngier 		return ignore_write(vcpu, p);
4300c557ed4SMarc Zyngier 	} else {
4311f3d8699SMark Rutland 		p->regval = read_sysreg(dbgauthstatus_el1);
4320c557ed4SMarc Zyngier 		return true;
4330c557ed4SMarc Zyngier 	}
4340c557ed4SMarc Zyngier }
4350c557ed4SMarc Zyngier 
4360c557ed4SMarc Zyngier /*
4370c557ed4SMarc Zyngier  * We want to avoid world-switching all the DBG registers all the
4380c557ed4SMarc Zyngier  * time:
4390c557ed4SMarc Zyngier  *
4400c557ed4SMarc Zyngier  * - If we've touched any debug register, it is likely that we're
4410c557ed4SMarc Zyngier  *   going to touch more of them. It then makes sense to disable the
4420c557ed4SMarc Zyngier  *   traps and start doing the save/restore dance
4430c557ed4SMarc Zyngier  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
4440c557ed4SMarc Zyngier  *   then mandatory to save/restore the registers, as the guest
4450c557ed4SMarc Zyngier  *   depends on them.
4460c557ed4SMarc Zyngier  *
4470c557ed4SMarc Zyngier  * For this, we use a DIRTY bit, indicating the guest has modified the
4480c557ed4SMarc Zyngier  * debug registers, used as follow:
4490c557ed4SMarc Zyngier  *
4500c557ed4SMarc Zyngier  * On guest entry:
4510c557ed4SMarc Zyngier  * - If the dirty bit is set (because we're coming back from trapping),
4520c557ed4SMarc Zyngier  *   disable the traps, save host registers, restore guest registers.
4530c557ed4SMarc Zyngier  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
4540c557ed4SMarc Zyngier  *   set the dirty bit, disable the traps, save host registers,
4550c557ed4SMarc Zyngier  *   restore guest registers.
4560c557ed4SMarc Zyngier  * - Otherwise, enable the traps
4570c557ed4SMarc Zyngier  *
4580c557ed4SMarc Zyngier  * On guest exit:
4590c557ed4SMarc Zyngier  * - If the dirty bit is set, save guest registers, restore host
4600c557ed4SMarc Zyngier  *   registers and clear the dirty bit. This ensure that the host can
4610c557ed4SMarc Zyngier  *   now use the debug registers.
4620c557ed4SMarc Zyngier  */
4630c557ed4SMarc Zyngier static bool trap_debug_regs(struct kvm_vcpu *vcpu,
4643fec037dSPavel Fedin 			    struct sys_reg_params *p,
4650c557ed4SMarc Zyngier 			    const struct sys_reg_desc *r)
4660c557ed4SMarc Zyngier {
4676ff9dc23SJintack Lim 	access_rw(vcpu, p, r);
4686ff9dc23SJintack Lim 	if (p->is_write)
469b1da4908SMarc Zyngier 		vcpu_set_flag(vcpu, DEBUG_DIRTY);
4700c557ed4SMarc Zyngier 
4712ec5be3dSPavel Fedin 	trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
472eef8c85aSAlex Bennée 
4730c557ed4SMarc Zyngier 	return true;
4740c557ed4SMarc Zyngier }
4750c557ed4SMarc Zyngier 
47684e690bfSAlex Bennée /*
47784e690bfSAlex Bennée  * reg_to_dbg/dbg_to_reg
47884e690bfSAlex Bennée  *
47984e690bfSAlex Bennée  * A 32 bit write to a debug register leave top bits alone
48084e690bfSAlex Bennée  * A 32 bit read from a debug register only returns the bottom bits
48184e690bfSAlex Bennée  *
482b1da4908SMarc Zyngier  * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
483b1da4908SMarc Zyngier  * switches between host and guest values in future.
48484e690bfSAlex Bennée  */
485281243cbSMarc Zyngier static void reg_to_dbg(struct kvm_vcpu *vcpu,
4863fec037dSPavel Fedin 		       struct sys_reg_params *p,
4871da42c34SMarc Zyngier 		       const struct sys_reg_desc *rd,
48884e690bfSAlex Bennée 		       u64 *dbg_reg)
48984e690bfSAlex Bennée {
4901da42c34SMarc Zyngier 	u64 mask, shift, val;
49184e690bfSAlex Bennée 
4921da42c34SMarc Zyngier 	get_access_mask(rd, &mask, &shift);
49384e690bfSAlex Bennée 
4941da42c34SMarc Zyngier 	val = *dbg_reg;
4951da42c34SMarc Zyngier 	val &= ~mask;
4961da42c34SMarc Zyngier 	val |= (p->regval & (mask >> shift)) << shift;
49784e690bfSAlex Bennée 	*dbg_reg = val;
4981da42c34SMarc Zyngier 
499b1da4908SMarc Zyngier 	vcpu_set_flag(vcpu, DEBUG_DIRTY);
50084e690bfSAlex Bennée }
50184e690bfSAlex Bennée 
502281243cbSMarc Zyngier static void dbg_to_reg(struct kvm_vcpu *vcpu,
5033fec037dSPavel Fedin 		       struct sys_reg_params *p,
5041da42c34SMarc Zyngier 		       const struct sys_reg_desc *rd,
50584e690bfSAlex Bennée 		       u64 *dbg_reg)
50684e690bfSAlex Bennée {
5071da42c34SMarc Zyngier 	u64 mask, shift;
5081da42c34SMarc Zyngier 
5091da42c34SMarc Zyngier 	get_access_mask(rd, &mask, &shift);
5101da42c34SMarc Zyngier 	p->regval = (*dbg_reg & mask) >> shift;
51184e690bfSAlex Bennée }
51284e690bfSAlex Bennée 
513281243cbSMarc Zyngier static bool trap_bvr(struct kvm_vcpu *vcpu,
5143fec037dSPavel Fedin 		     struct sys_reg_params *p,
51584e690bfSAlex Bennée 		     const struct sys_reg_desc *rd)
51684e690bfSAlex Bennée {
517cb853dedSMarc Zyngier 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
51884e690bfSAlex Bennée 
51984e690bfSAlex Bennée 	if (p->is_write)
5201da42c34SMarc Zyngier 		reg_to_dbg(vcpu, p, rd, dbg_reg);
52184e690bfSAlex Bennée 	else
5221da42c34SMarc Zyngier 		dbg_to_reg(vcpu, p, rd, dbg_reg);
52384e690bfSAlex Bennée 
524cb853dedSMarc Zyngier 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
525eef8c85aSAlex Bennée 
52684e690bfSAlex Bennée 	return true;
52784e690bfSAlex Bennée }
52884e690bfSAlex Bennée 
52984e690bfSAlex Bennée static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
530978ceeb3SMarc Zyngier 		   u64 val)
53184e690bfSAlex Bennée {
532978ceeb3SMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
53384e690bfSAlex Bennée 	return 0;
53484e690bfSAlex Bennée }
53584e690bfSAlex Bennée 
53684e690bfSAlex Bennée static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
537978ceeb3SMarc Zyngier 		   u64 *val)
53884e690bfSAlex Bennée {
539978ceeb3SMarc Zyngier 	*val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
54084e690bfSAlex Bennée 	return 0;
54184e690bfSAlex Bennée }
54284e690bfSAlex Bennée 
543281243cbSMarc Zyngier static void reset_bvr(struct kvm_vcpu *vcpu,
54484e690bfSAlex Bennée 		      const struct sys_reg_desc *rd)
54584e690bfSAlex Bennée {
546cb853dedSMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
54784e690bfSAlex Bennée }
54884e690bfSAlex Bennée 
549281243cbSMarc Zyngier static bool trap_bcr(struct kvm_vcpu *vcpu,
5503fec037dSPavel Fedin 		     struct sys_reg_params *p,
55184e690bfSAlex Bennée 		     const struct sys_reg_desc *rd)
55284e690bfSAlex Bennée {
553cb853dedSMarc Zyngier 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
55484e690bfSAlex Bennée 
55584e690bfSAlex Bennée 	if (p->is_write)
5561da42c34SMarc Zyngier 		reg_to_dbg(vcpu, p, rd, dbg_reg);
55784e690bfSAlex Bennée 	else
5581da42c34SMarc Zyngier 		dbg_to_reg(vcpu, p, rd, dbg_reg);
55984e690bfSAlex Bennée 
560cb853dedSMarc Zyngier 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
561eef8c85aSAlex Bennée 
56284e690bfSAlex Bennée 	return true;
56384e690bfSAlex Bennée }
56484e690bfSAlex Bennée 
56584e690bfSAlex Bennée static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
566978ceeb3SMarc Zyngier 		   u64 val)
56784e690bfSAlex Bennée {
568978ceeb3SMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
56984e690bfSAlex Bennée 	return 0;
57084e690bfSAlex Bennée }
57184e690bfSAlex Bennée 
57284e690bfSAlex Bennée static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
573978ceeb3SMarc Zyngier 		   u64 *val)
57484e690bfSAlex Bennée {
575978ceeb3SMarc Zyngier 	*val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
57684e690bfSAlex Bennée 	return 0;
57784e690bfSAlex Bennée }
57884e690bfSAlex Bennée 
579281243cbSMarc Zyngier static void reset_bcr(struct kvm_vcpu *vcpu,
58084e690bfSAlex Bennée 		      const struct sys_reg_desc *rd)
58184e690bfSAlex Bennée {
582cb853dedSMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
58384e690bfSAlex Bennée }
58484e690bfSAlex Bennée 
585281243cbSMarc Zyngier static bool trap_wvr(struct kvm_vcpu *vcpu,
5863fec037dSPavel Fedin 		     struct sys_reg_params *p,
58784e690bfSAlex Bennée 		     const struct sys_reg_desc *rd)
58884e690bfSAlex Bennée {
589cb853dedSMarc Zyngier 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
59084e690bfSAlex Bennée 
59184e690bfSAlex Bennée 	if (p->is_write)
5921da42c34SMarc Zyngier 		reg_to_dbg(vcpu, p, rd, dbg_reg);
59384e690bfSAlex Bennée 	else
5941da42c34SMarc Zyngier 		dbg_to_reg(vcpu, p, rd, dbg_reg);
59584e690bfSAlex Bennée 
596cb853dedSMarc Zyngier 	trace_trap_reg(__func__, rd->CRm, p->is_write,
597cb853dedSMarc Zyngier 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
598eef8c85aSAlex Bennée 
59984e690bfSAlex Bennée 	return true;
60084e690bfSAlex Bennée }
60184e690bfSAlex Bennée 
60284e690bfSAlex Bennée static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
603978ceeb3SMarc Zyngier 		   u64 val)
60484e690bfSAlex Bennée {
605978ceeb3SMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
60684e690bfSAlex Bennée 	return 0;
60784e690bfSAlex Bennée }
60884e690bfSAlex Bennée 
60984e690bfSAlex Bennée static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
610978ceeb3SMarc Zyngier 		   u64 *val)
61184e690bfSAlex Bennée {
612978ceeb3SMarc Zyngier 	*val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
61384e690bfSAlex Bennée 	return 0;
61484e690bfSAlex Bennée }
61584e690bfSAlex Bennée 
616281243cbSMarc Zyngier static void reset_wvr(struct kvm_vcpu *vcpu,
61784e690bfSAlex Bennée 		      const struct sys_reg_desc *rd)
61884e690bfSAlex Bennée {
619cb853dedSMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
62084e690bfSAlex Bennée }
62184e690bfSAlex Bennée 
622281243cbSMarc Zyngier static bool trap_wcr(struct kvm_vcpu *vcpu,
6233fec037dSPavel Fedin 		     struct sys_reg_params *p,
62484e690bfSAlex Bennée 		     const struct sys_reg_desc *rd)
62584e690bfSAlex Bennée {
626cb853dedSMarc Zyngier 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
62784e690bfSAlex Bennée 
62884e690bfSAlex Bennée 	if (p->is_write)
6291da42c34SMarc Zyngier 		reg_to_dbg(vcpu, p, rd, dbg_reg);
63084e690bfSAlex Bennée 	else
6311da42c34SMarc Zyngier 		dbg_to_reg(vcpu, p, rd, dbg_reg);
63284e690bfSAlex Bennée 
633cb853dedSMarc Zyngier 	trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
634eef8c85aSAlex Bennée 
63584e690bfSAlex Bennée 	return true;
63684e690bfSAlex Bennée }
63784e690bfSAlex Bennée 
63884e690bfSAlex Bennée static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
639978ceeb3SMarc Zyngier 		   u64 val)
64084e690bfSAlex Bennée {
641978ceeb3SMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
64284e690bfSAlex Bennée 	return 0;
64384e690bfSAlex Bennée }
64484e690bfSAlex Bennée 
64584e690bfSAlex Bennée static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
646978ceeb3SMarc Zyngier 		   u64 *val)
64784e690bfSAlex Bennée {
648978ceeb3SMarc Zyngier 	*val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
64984e690bfSAlex Bennée 	return 0;
65084e690bfSAlex Bennée }
65184e690bfSAlex Bennée 
652281243cbSMarc Zyngier static void reset_wcr(struct kvm_vcpu *vcpu,
65384e690bfSAlex Bennée 		      const struct sys_reg_desc *rd)
65484e690bfSAlex Bennée {
655cb853dedSMarc Zyngier 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
65684e690bfSAlex Bennée }
65784e690bfSAlex Bennée 
6587c8c5e6aSMarc Zyngier static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
6597c8c5e6aSMarc Zyngier {
6608d404c4cSChristoffer Dall 	u64 amair = read_sysreg(amair_el1);
6618d404c4cSChristoffer Dall 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
6627c8c5e6aSMarc Zyngier }
6637c8c5e6aSMarc Zyngier 
664af473829SJames Morse static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
665af473829SJames Morse {
666af473829SJames Morse 	u64 actlr = read_sysreg(actlr_el1);
667af473829SJames Morse 	vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
668af473829SJames Morse }
669af473829SJames Morse 
6707c8c5e6aSMarc Zyngier static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
6717c8c5e6aSMarc Zyngier {
6724429fc64SAndre Przywara 	u64 mpidr;
6734429fc64SAndre Przywara 
6747c8c5e6aSMarc Zyngier 	/*
6754429fc64SAndre Przywara 	 * Map the vcpu_id into the first three affinity level fields of
6764429fc64SAndre Przywara 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
6774429fc64SAndre Przywara 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
6784429fc64SAndre Przywara 	 * of the GICv3 to be able to address each CPU directly when
6794429fc64SAndre Przywara 	 * sending IPIs.
6807c8c5e6aSMarc Zyngier 	 */
6814429fc64SAndre Przywara 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
6824429fc64SAndre Przywara 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
6834429fc64SAndre Przywara 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
6848d404c4cSChristoffer Dall 	vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
6857c8c5e6aSMarc Zyngier }
6867c8c5e6aSMarc Zyngier 
68711663111SMarc Zyngier static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
68811663111SMarc Zyngier 				   const struct sys_reg_desc *r)
68911663111SMarc Zyngier {
69011663111SMarc Zyngier 	if (kvm_vcpu_has_pmu(vcpu))
69111663111SMarc Zyngier 		return 0;
69211663111SMarc Zyngier 
69311663111SMarc Zyngier 	return REG_HIDDEN;
69411663111SMarc Zyngier }
69511663111SMarc Zyngier 
6960ab410a9SMarc Zyngier static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
6970ab410a9SMarc Zyngier {
6980ab410a9SMarc Zyngier 	u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
6990ab410a9SMarc Zyngier 
7000ab410a9SMarc Zyngier 	/* No PMU available, any PMU reg may UNDEF... */
7010ab410a9SMarc Zyngier 	if (!kvm_arm_support_pmu_v3())
7020ab410a9SMarc Zyngier 		return;
7030ab410a9SMarc Zyngier 
7040ab410a9SMarc Zyngier 	n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
7050ab410a9SMarc Zyngier 	n &= ARMV8_PMU_PMCR_N_MASK;
7060ab410a9SMarc Zyngier 	if (n)
7070ab410a9SMarc Zyngier 		mask |= GENMASK(n - 1, 0);
7080ab410a9SMarc Zyngier 
7090ab410a9SMarc Zyngier 	reset_unknown(vcpu, r);
7100ab410a9SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) &= mask;
7110ab410a9SMarc Zyngier }
7120ab410a9SMarc Zyngier 
7130ab410a9SMarc Zyngier static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7140ab410a9SMarc Zyngier {
7150ab410a9SMarc Zyngier 	reset_unknown(vcpu, r);
7160ab410a9SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
7170ab410a9SMarc Zyngier }
7180ab410a9SMarc Zyngier 
7190ab410a9SMarc Zyngier static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7200ab410a9SMarc Zyngier {
7210ab410a9SMarc Zyngier 	reset_unknown(vcpu, r);
7220ab410a9SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
7230ab410a9SMarc Zyngier }
7240ab410a9SMarc Zyngier 
7250ab410a9SMarc Zyngier static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7260ab410a9SMarc Zyngier {
7270ab410a9SMarc Zyngier 	reset_unknown(vcpu, r);
7280ab410a9SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
7290ab410a9SMarc Zyngier }
7300ab410a9SMarc Zyngier 
731ab946834SShannon Zhao static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
732ab946834SShannon Zhao {
733292e8f14SMarc Zyngier 	u64 pmcr;
734ab946834SShannon Zhao 
7352a5f1b67SMarc Zyngier 	/* No PMU available, PMCR_EL0 may UNDEF... */
7362a5f1b67SMarc Zyngier 	if (!kvm_arm_support_pmu_v3())
7372a5f1b67SMarc Zyngier 		return;
7382a5f1b67SMarc Zyngier 
739292e8f14SMarc Zyngier 	/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
740aff23483SJames Clark 	pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
741f3c6efc7SOliver Upton 	if (!kvm_supports_32bit_el0())
742292e8f14SMarc Zyngier 		pmcr |= ARMV8_PMU_PMCR_LC;
743292e8f14SMarc Zyngier 
744292e8f14SMarc Zyngier 	__vcpu_sys_reg(vcpu, r->reg) = pmcr;
745ab946834SShannon Zhao }
746ab946834SShannon Zhao 
7476c007036SMarc Zyngier static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
748d692b8adSShannon Zhao {
7498d404c4cSChristoffer Dall 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
7507ded92e2SMarc Zyngier 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
751d692b8adSShannon Zhao 
75224d5950fSMarc Zyngier 	if (!enabled)
75324d5950fSMarc Zyngier 		kvm_inject_undefined(vcpu);
75424d5950fSMarc Zyngier 
7556c007036SMarc Zyngier 	return !enabled;
7566c007036SMarc Zyngier }
7576c007036SMarc Zyngier 
7586c007036SMarc Zyngier static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
7596c007036SMarc Zyngier {
7606c007036SMarc Zyngier 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
761d692b8adSShannon Zhao }
762d692b8adSShannon Zhao 
763d692b8adSShannon Zhao static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
764d692b8adSShannon Zhao {
7656c007036SMarc Zyngier 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
766d692b8adSShannon Zhao }
767d692b8adSShannon Zhao 
768d692b8adSShannon Zhao static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
769d692b8adSShannon Zhao {
7706c007036SMarc Zyngier 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
771d692b8adSShannon Zhao }
772d692b8adSShannon Zhao 
773d692b8adSShannon Zhao static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
774d692b8adSShannon Zhao {
7756c007036SMarc Zyngier 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
776d692b8adSShannon Zhao }
777d692b8adSShannon Zhao 
778ab946834SShannon Zhao static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
779ab946834SShannon Zhao 			const struct sys_reg_desc *r)
780ab946834SShannon Zhao {
781ab946834SShannon Zhao 	u64 val;
782ab946834SShannon Zhao 
783d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
784d692b8adSShannon Zhao 		return false;
785d692b8adSShannon Zhao 
786ab946834SShannon Zhao 	if (p->is_write) {
78764d6820dSMarc Zyngier 		/*
78864d6820dSMarc Zyngier 		 * Only update writeable bits of PMCR (continuing into
78964d6820dSMarc Zyngier 		 * kvm_pmu_handle_pmcr() as well)
79064d6820dSMarc Zyngier 		 */
7918d404c4cSChristoffer Dall 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
792ab946834SShannon Zhao 		val &= ~ARMV8_PMU_PMCR_MASK;
793ab946834SShannon Zhao 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
794f3c6efc7SOliver Upton 		if (!kvm_supports_32bit_el0())
7956f163714SMarc Zyngier 			val |= ARMV8_PMU_PMCR_LC;
79676993739SShannon Zhao 		kvm_pmu_handle_pmcr(vcpu, val);
797ab946834SShannon Zhao 	} else {
798ab946834SShannon Zhao 		/* PMCR.P & PMCR.C are RAZ */
7998d404c4cSChristoffer Dall 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
800ab946834SShannon Zhao 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
801ab946834SShannon Zhao 		p->regval = val;
802ab946834SShannon Zhao 	}
803ab946834SShannon Zhao 
804ab946834SShannon Zhao 	return true;
805ab946834SShannon Zhao }
806ab946834SShannon Zhao 
8073965c3ceSShannon Zhao static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
8083965c3ceSShannon Zhao 			  const struct sys_reg_desc *r)
8093965c3ceSShannon Zhao {
810d692b8adSShannon Zhao 	if (pmu_access_event_counter_el0_disabled(vcpu))
811d692b8adSShannon Zhao 		return false;
812d692b8adSShannon Zhao 
8133965c3ceSShannon Zhao 	if (p->is_write)
8148d404c4cSChristoffer Dall 		__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
8153965c3ceSShannon Zhao 	else
8163965c3ceSShannon Zhao 		/* return PMSELR.SEL field */
8178d404c4cSChristoffer Dall 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
8183965c3ceSShannon Zhao 			    & ARMV8_PMU_COUNTER_MASK;
8193965c3ceSShannon Zhao 
8203965c3ceSShannon Zhao 	return true;
8213965c3ceSShannon Zhao }
8223965c3ceSShannon Zhao 
823a86b5505SShannon Zhao static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
824a86b5505SShannon Zhao 			  const struct sys_reg_desc *r)
825a86b5505SShannon Zhao {
82699b6a401SMarc Zyngier 	u64 pmceid, mask, shift;
827a86b5505SShannon Zhao 
828a86b5505SShannon Zhao 	BUG_ON(p->is_write);
829a86b5505SShannon Zhao 
830d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
831d692b8adSShannon Zhao 		return false;
832d692b8adSShannon Zhao 
83399b6a401SMarc Zyngier 	get_access_mask(r, &mask, &shift);
83499b6a401SMarc Zyngier 
83588865becSMarc Zyngier 	pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
83699b6a401SMarc Zyngier 	pmceid &= mask;
83799b6a401SMarc Zyngier 	pmceid >>= shift;
838a86b5505SShannon Zhao 
839a86b5505SShannon Zhao 	p->regval = pmceid;
840a86b5505SShannon Zhao 
841a86b5505SShannon Zhao 	return true;
842a86b5505SShannon Zhao }
843a86b5505SShannon Zhao 
844051ff581SShannon Zhao static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
845051ff581SShannon Zhao {
846051ff581SShannon Zhao 	u64 pmcr, val;
847051ff581SShannon Zhao 
8488d404c4cSChristoffer Dall 	pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
849051ff581SShannon Zhao 	val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
85024d5950fSMarc Zyngier 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
85124d5950fSMarc Zyngier 		kvm_inject_undefined(vcpu);
852051ff581SShannon Zhao 		return false;
85324d5950fSMarc Zyngier 	}
854051ff581SShannon Zhao 
855051ff581SShannon Zhao 	return true;
856051ff581SShannon Zhao }
857051ff581SShannon Zhao 
8589228b261SReiji Watanabe static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
8599228b261SReiji Watanabe 			  u64 *val)
8609228b261SReiji Watanabe {
8619228b261SReiji Watanabe 	u64 idx;
8629228b261SReiji Watanabe 
8639228b261SReiji Watanabe 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
8649228b261SReiji Watanabe 		/* PMCCNTR_EL0 */
8659228b261SReiji Watanabe 		idx = ARMV8_PMU_CYCLE_IDX;
8669228b261SReiji Watanabe 	else
8679228b261SReiji Watanabe 		/* PMEVCNTRn_EL0 */
8689228b261SReiji Watanabe 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
8699228b261SReiji Watanabe 
8709228b261SReiji Watanabe 	*val = kvm_pmu_get_counter_value(vcpu, idx);
8719228b261SReiji Watanabe 	return 0;
8729228b261SReiji Watanabe }
8739228b261SReiji Watanabe 
874051ff581SShannon Zhao static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
875051ff581SShannon Zhao 			      struct sys_reg_params *p,
876051ff581SShannon Zhao 			      const struct sys_reg_desc *r)
877051ff581SShannon Zhao {
878a3da9358SMarc Zyngier 	u64 idx = ~0UL;
879051ff581SShannon Zhao 
880051ff581SShannon Zhao 	if (r->CRn == 9 && r->CRm == 13) {
881051ff581SShannon Zhao 		if (r->Op2 == 2) {
882051ff581SShannon Zhao 			/* PMXEVCNTR_EL0 */
883d692b8adSShannon Zhao 			if (pmu_access_event_counter_el0_disabled(vcpu))
884d692b8adSShannon Zhao 				return false;
885d692b8adSShannon Zhao 
8868d404c4cSChristoffer Dall 			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
887051ff581SShannon Zhao 			      & ARMV8_PMU_COUNTER_MASK;
888051ff581SShannon Zhao 		} else if (r->Op2 == 0) {
889051ff581SShannon Zhao 			/* PMCCNTR_EL0 */
890d692b8adSShannon Zhao 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
891d692b8adSShannon Zhao 				return false;
892d692b8adSShannon Zhao 
893051ff581SShannon Zhao 			idx = ARMV8_PMU_CYCLE_IDX;
894051ff581SShannon Zhao 		}
8959e3f7a29SWei Huang 	} else if (r->CRn == 0 && r->CRm == 9) {
8969e3f7a29SWei Huang 		/* PMCCNTR */
8979e3f7a29SWei Huang 		if (pmu_access_event_counter_el0_disabled(vcpu))
8989e3f7a29SWei Huang 			return false;
8999e3f7a29SWei Huang 
9009e3f7a29SWei Huang 		idx = ARMV8_PMU_CYCLE_IDX;
901051ff581SShannon Zhao 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
902051ff581SShannon Zhao 		/* PMEVCNTRn_EL0 */
903d692b8adSShannon Zhao 		if (pmu_access_event_counter_el0_disabled(vcpu))
904d692b8adSShannon Zhao 			return false;
905d692b8adSShannon Zhao 
906051ff581SShannon Zhao 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
907051ff581SShannon Zhao 	}
908051ff581SShannon Zhao 
909a3da9358SMarc Zyngier 	/* Catch any decoding mistake */
910a3da9358SMarc Zyngier 	WARN_ON(idx == ~0UL);
911a3da9358SMarc Zyngier 
912051ff581SShannon Zhao 	if (!pmu_counter_idx_valid(vcpu, idx))
913051ff581SShannon Zhao 		return false;
914051ff581SShannon Zhao 
915d692b8adSShannon Zhao 	if (p->is_write) {
916d692b8adSShannon Zhao 		if (pmu_access_el0_disabled(vcpu))
917d692b8adSShannon Zhao 			return false;
918d692b8adSShannon Zhao 
919051ff581SShannon Zhao 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
920d692b8adSShannon Zhao 	} else {
921051ff581SShannon Zhao 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
922d692b8adSShannon Zhao 	}
923051ff581SShannon Zhao 
924051ff581SShannon Zhao 	return true;
925051ff581SShannon Zhao }
926051ff581SShannon Zhao 
9279feb21acSShannon Zhao static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
9289feb21acSShannon Zhao 			       const struct sys_reg_desc *r)
9299feb21acSShannon Zhao {
9309feb21acSShannon Zhao 	u64 idx, reg;
9319feb21acSShannon Zhao 
932d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
933d692b8adSShannon Zhao 		return false;
934d692b8adSShannon Zhao 
9359feb21acSShannon Zhao 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
9369feb21acSShannon Zhao 		/* PMXEVTYPER_EL0 */
9378d404c4cSChristoffer Dall 		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
9389feb21acSShannon Zhao 		reg = PMEVTYPER0_EL0 + idx;
9399feb21acSShannon Zhao 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
9409feb21acSShannon Zhao 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
9419feb21acSShannon Zhao 		if (idx == ARMV8_PMU_CYCLE_IDX)
9429feb21acSShannon Zhao 			reg = PMCCFILTR_EL0;
9439feb21acSShannon Zhao 		else
9449feb21acSShannon Zhao 			/* PMEVTYPERn_EL0 */
9459feb21acSShannon Zhao 			reg = PMEVTYPER0_EL0 + idx;
9469feb21acSShannon Zhao 	} else {
9479feb21acSShannon Zhao 		BUG();
9489feb21acSShannon Zhao 	}
9499feb21acSShannon Zhao 
9509feb21acSShannon Zhao 	if (!pmu_counter_idx_valid(vcpu, idx))
9519feb21acSShannon Zhao 		return false;
9529feb21acSShannon Zhao 
9539feb21acSShannon Zhao 	if (p->is_write) {
9549feb21acSShannon Zhao 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
9558d404c4cSChristoffer Dall 		__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
956435e53fbSAndrew Murray 		kvm_vcpu_pmu_restore_guest(vcpu);
9579feb21acSShannon Zhao 	} else {
9588d404c4cSChristoffer Dall 		p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
9599feb21acSShannon Zhao 	}
9609feb21acSShannon Zhao 
9619feb21acSShannon Zhao 	return true;
9629feb21acSShannon Zhao }
9639feb21acSShannon Zhao 
96496b0eebcSShannon Zhao static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
96596b0eebcSShannon Zhao 			   const struct sys_reg_desc *r)
96696b0eebcSShannon Zhao {
96796b0eebcSShannon Zhao 	u64 val, mask;
96896b0eebcSShannon Zhao 
969d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
970d692b8adSShannon Zhao 		return false;
971d692b8adSShannon Zhao 
97296b0eebcSShannon Zhao 	mask = kvm_pmu_valid_counter_mask(vcpu);
97396b0eebcSShannon Zhao 	if (p->is_write) {
97496b0eebcSShannon Zhao 		val = p->regval & mask;
97596b0eebcSShannon Zhao 		if (r->Op2 & 0x1) {
97696b0eebcSShannon Zhao 			/* accessing PMCNTENSET_EL0 */
9778d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
978418e5ca8SAndrew Murray 			kvm_pmu_enable_counter_mask(vcpu, val);
979435e53fbSAndrew Murray 			kvm_vcpu_pmu_restore_guest(vcpu);
98096b0eebcSShannon Zhao 		} else {
98196b0eebcSShannon Zhao 			/* accessing PMCNTENCLR_EL0 */
9828d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
983418e5ca8SAndrew Murray 			kvm_pmu_disable_counter_mask(vcpu, val);
98496b0eebcSShannon Zhao 		}
98596b0eebcSShannon Zhao 	} else {
986f5eff400SMarc Zyngier 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
98796b0eebcSShannon Zhao 	}
98896b0eebcSShannon Zhao 
98996b0eebcSShannon Zhao 	return true;
99096b0eebcSShannon Zhao }
99196b0eebcSShannon Zhao 
9929db52c78SShannon Zhao static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
9939db52c78SShannon Zhao 			   const struct sys_reg_desc *r)
9949db52c78SShannon Zhao {
9959db52c78SShannon Zhao 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
9969db52c78SShannon Zhao 
997b0737e99SMarc Zyngier 	if (check_pmu_access_disabled(vcpu, 0))
998d692b8adSShannon Zhao 		return false;
999d692b8adSShannon Zhao 
10009db52c78SShannon Zhao 	if (p->is_write) {
10019db52c78SShannon Zhao 		u64 val = p->regval & mask;
10029db52c78SShannon Zhao 
10039db52c78SShannon Zhao 		if (r->Op2 & 0x1)
10049db52c78SShannon Zhao 			/* accessing PMINTENSET_EL1 */
10058d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
10069db52c78SShannon Zhao 		else
10079db52c78SShannon Zhao 			/* accessing PMINTENCLR_EL1 */
10088d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
10099db52c78SShannon Zhao 	} else {
1010f5eff400SMarc Zyngier 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
10119db52c78SShannon Zhao 	}
10129db52c78SShannon Zhao 
10139db52c78SShannon Zhao 	return true;
10149db52c78SShannon Zhao }
10159db52c78SShannon Zhao 
101676d883c4SShannon Zhao static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
101776d883c4SShannon Zhao 			 const struct sys_reg_desc *r)
101876d883c4SShannon Zhao {
101976d883c4SShannon Zhao 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
102076d883c4SShannon Zhao 
1021d692b8adSShannon Zhao 	if (pmu_access_el0_disabled(vcpu))
1022d692b8adSShannon Zhao 		return false;
1023d692b8adSShannon Zhao 
102476d883c4SShannon Zhao 	if (p->is_write) {
102576d883c4SShannon Zhao 		if (r->CRm & 0x2)
102676d883c4SShannon Zhao 			/* accessing PMOVSSET_EL0 */
10278d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
102876d883c4SShannon Zhao 		else
102976d883c4SShannon Zhao 			/* accessing PMOVSCLR_EL0 */
10308d404c4cSChristoffer Dall 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
103176d883c4SShannon Zhao 	} else {
1032f5eff400SMarc Zyngier 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
103376d883c4SShannon Zhao 	}
103476d883c4SShannon Zhao 
103576d883c4SShannon Zhao 	return true;
103676d883c4SShannon Zhao }
103776d883c4SShannon Zhao 
10387a0adc70SShannon Zhao static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
10397a0adc70SShannon Zhao 			   const struct sys_reg_desc *r)
10407a0adc70SShannon Zhao {
10417a0adc70SShannon Zhao 	u64 mask;
10427a0adc70SShannon Zhao 
1043e0443230SMarc Zyngier 	if (!p->is_write)
1044e7f1d1eeSMarc Zyngier 		return read_from_write_only(vcpu, p, r);
1045e0443230SMarc Zyngier 
1046d692b8adSShannon Zhao 	if (pmu_write_swinc_el0_disabled(vcpu))
1047d692b8adSShannon Zhao 		return false;
1048d692b8adSShannon Zhao 
10497a0adc70SShannon Zhao 	mask = kvm_pmu_valid_counter_mask(vcpu);
10507a0adc70SShannon Zhao 	kvm_pmu_software_increment(vcpu, p->regval & mask);
10517a0adc70SShannon Zhao 	return true;
10527a0adc70SShannon Zhao }
10537a0adc70SShannon Zhao 
1054d692b8adSShannon Zhao static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1055d692b8adSShannon Zhao 			     const struct sys_reg_desc *r)
1056d692b8adSShannon Zhao {
1057d692b8adSShannon Zhao 	if (p->is_write) {
10589008c235SMarc Zyngier 		if (!vcpu_mode_priv(vcpu)) {
10599008c235SMarc Zyngier 			kvm_inject_undefined(vcpu);
1060d692b8adSShannon Zhao 			return false;
10619008c235SMarc Zyngier 		}
1062d692b8adSShannon Zhao 
10638d404c4cSChristoffer Dall 		__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
10648d404c4cSChristoffer Dall 			       p->regval & ARMV8_PMU_USERENR_MASK;
1065d692b8adSShannon Zhao 	} else {
10668d404c4cSChristoffer Dall 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1067d692b8adSShannon Zhao 			    & ARMV8_PMU_USERENR_MASK;
1068d692b8adSShannon Zhao 	}
1069d692b8adSShannon Zhao 
1070d692b8adSShannon Zhao 	return true;
1071d692b8adSShannon Zhao }
1072d692b8adSShannon Zhao 
10730c557ed4SMarc Zyngier /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
10740c557ed4SMarc Zyngier #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
1075ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
107603fdfb26SMarc Zyngier 	  trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },		\
1077ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
107803fdfb26SMarc Zyngier 	  trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },		\
1079ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
108003fdfb26SMarc Zyngier 	  trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },		\
1081ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
108203fdfb26SMarc Zyngier 	  trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
10830c557ed4SMarc Zyngier 
108411663111SMarc Zyngier #define PMU_SYS_REG(r)						\
10850ab410a9SMarc Zyngier 	SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
108611663111SMarc Zyngier 
1087051ff581SShannon Zhao /* Macro to expand the PMEVCNTRn_EL0 register */
1088051ff581SShannon Zhao #define PMU_PMEVCNTR_EL0(n)						\
108911663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),				\
10909228b261SReiji Watanabe 	  .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,		\
109111663111SMarc Zyngier 	  .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1092051ff581SShannon Zhao 
10939feb21acSShannon Zhao /* Macro to expand the PMEVTYPERn_EL0 register */
10949feb21acSShannon Zhao #define PMU_PMEVTYPER_EL0(n)						\
109511663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)),				\
10960ab410a9SMarc Zyngier 	  .reset = reset_pmevtyper,					\
109711663111SMarc Zyngier 	  .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
10989feb21acSShannon Zhao 
1099338b1793SMarc Zyngier static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
11004fcdf106SIonela Voinescu 			 const struct sys_reg_desc *r)
11014fcdf106SIonela Voinescu {
11024fcdf106SIonela Voinescu 	kvm_inject_undefined(vcpu);
11034fcdf106SIonela Voinescu 
11044fcdf106SIonela Voinescu 	return false;
11054fcdf106SIonela Voinescu }
11064fcdf106SIonela Voinescu 
11074fcdf106SIonela Voinescu /* Macro to expand the AMU counter and type registers*/
1108338b1793SMarc Zyngier #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1109338b1793SMarc Zyngier #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1110338b1793SMarc Zyngier #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1111338b1793SMarc Zyngier #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1112384b40caSMark Rutland 
1113384b40caSMark Rutland static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1114384b40caSMark Rutland 			const struct sys_reg_desc *rd)
1115384b40caSMark Rutland {
111601fe5aceSAndrew Jones 	return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1117384b40caSMark Rutland }
1118384b40caSMark Rutland 
1119338b1793SMarc Zyngier /*
1120338b1793SMarc Zyngier  * If we land here on a PtrAuth access, that is because we didn't
1121338b1793SMarc Zyngier  * fixup the access on exit by allowing the PtrAuth sysregs. The only
1122338b1793SMarc Zyngier  * way this happens is when the guest does not have PtrAuth support
1123338b1793SMarc Zyngier  * enabled.
1124338b1793SMarc Zyngier  */
1125384b40caSMark Rutland #define __PTRAUTH_KEY(k)						\
1126338b1793SMarc Zyngier 	{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k,		\
1127384b40caSMark Rutland 	.visibility = ptrauth_visibility}
1128384b40caSMark Rutland 
1129384b40caSMark Rutland #define PTRAUTH_KEY(k)							\
1130384b40caSMark Rutland 	__PTRAUTH_KEY(k ## KEYLO_EL1),					\
1131384b40caSMark Rutland 	__PTRAUTH_KEY(k ## KEYHI_EL1)
1132384b40caSMark Rutland 
113384135d3dSAndre Przywara static bool access_arch_timer(struct kvm_vcpu *vcpu,
1134c9a3c58fSJintack Lim 			      struct sys_reg_params *p,
1135c9a3c58fSJintack Lim 			      const struct sys_reg_desc *r)
1136c9a3c58fSJintack Lim {
113784135d3dSAndre Przywara 	enum kvm_arch_timers tmr;
113884135d3dSAndre Przywara 	enum kvm_arch_timer_regs treg;
113984135d3dSAndre Przywara 	u64 reg = reg_to_encoding(r);
11407b6b4631SJintack Lim 
114184135d3dSAndre Przywara 	switch (reg) {
114284135d3dSAndre Przywara 	case SYS_CNTP_TVAL_EL0:
114384135d3dSAndre Przywara 	case SYS_AARCH32_CNTP_TVAL:
114484135d3dSAndre Przywara 		tmr = TIMER_PTIMER;
114584135d3dSAndre Przywara 		treg = TIMER_REG_TVAL;
114684135d3dSAndre Przywara 		break;
114784135d3dSAndre Przywara 	case SYS_CNTP_CTL_EL0:
114884135d3dSAndre Przywara 	case SYS_AARCH32_CNTP_CTL:
114984135d3dSAndre Przywara 		tmr = TIMER_PTIMER;
115084135d3dSAndre Przywara 		treg = TIMER_REG_CTL;
115184135d3dSAndre Przywara 		break;
115284135d3dSAndre Przywara 	case SYS_CNTP_CVAL_EL0:
115384135d3dSAndre Przywara 	case SYS_AARCH32_CNTP_CVAL:
115484135d3dSAndre Przywara 		tmr = TIMER_PTIMER;
115584135d3dSAndre Przywara 		treg = TIMER_REG_CVAL;
115684135d3dSAndre Przywara 		break;
1157c605ee24SMarc Zyngier 	case SYS_CNTPCT_EL0:
1158c605ee24SMarc Zyngier 	case SYS_CNTPCTSS_EL0:
1159c605ee24SMarc Zyngier 	case SYS_AARCH32_CNTPCT:
1160c605ee24SMarc Zyngier 		tmr = TIMER_PTIMER;
1161c605ee24SMarc Zyngier 		treg = TIMER_REG_CNT;
1162c605ee24SMarc Zyngier 		break;
116384135d3dSAndre Przywara 	default:
1164ba82e06cSMarc Zyngier 		print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1165ba82e06cSMarc Zyngier 		kvm_inject_undefined(vcpu);
1166ba82e06cSMarc Zyngier 		return false;
1167c1b135afSChristoffer Dall 	}
11687b6b4631SJintack Lim 
1169c1b135afSChristoffer Dall 	if (p->is_write)
117084135d3dSAndre Przywara 		kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1171c1b135afSChristoffer Dall 	else
117284135d3dSAndre Przywara 		p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
11737b6b4631SJintack Lim 
1174c9a3c58fSJintack Lim 	return true;
1175c9a3c58fSJintack Lim }
1176c9a3c58fSJintack Lim 
11773d0dba57SMarc Zyngier static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
11783d0dba57SMarc Zyngier {
11793d0dba57SMarc Zyngier 	if (kvm_vcpu_has_pmu(vcpu))
11803d0dba57SMarc Zyngier 		return vcpu->kvm->arch.dfr0_pmuver.imp;
11813d0dba57SMarc Zyngier 
11823d0dba57SMarc Zyngier 	return vcpu->kvm->arch.dfr0_pmuver.unimp;
11833d0dba57SMarc Zyngier }
11843d0dba57SMarc Zyngier 
1185d82e0dfdSMarc Zyngier static u8 perfmon_to_pmuver(u8 perfmon)
1186d82e0dfdSMarc Zyngier {
1187d82e0dfdSMarc Zyngier 	switch (perfmon) {
1188753d734fSMarc Zyngier 	case ID_DFR0_EL1_PerfMon_PMUv3:
1189d82e0dfdSMarc Zyngier 		return ID_AA64DFR0_EL1_PMUVer_IMP;
1190753d734fSMarc Zyngier 	case ID_DFR0_EL1_PerfMon_IMPDEF:
1191d82e0dfdSMarc Zyngier 		return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
1192d82e0dfdSMarc Zyngier 	default:
1193d82e0dfdSMarc Zyngier 		/* Anything ARMv8.1+ and NI have the same value. For now. */
1194d82e0dfdSMarc Zyngier 		return perfmon;
1195d82e0dfdSMarc Zyngier 	}
1196d82e0dfdSMarc Zyngier }
1197d82e0dfdSMarc Zyngier 
11983d0dba57SMarc Zyngier static u8 pmuver_to_perfmon(u8 pmuver)
11993d0dba57SMarc Zyngier {
12003d0dba57SMarc Zyngier 	switch (pmuver) {
12013d0dba57SMarc Zyngier 	case ID_AA64DFR0_EL1_PMUVer_IMP:
1202753d734fSMarc Zyngier 		return ID_DFR0_EL1_PerfMon_PMUv3;
12033d0dba57SMarc Zyngier 	case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1204753d734fSMarc Zyngier 		return ID_DFR0_EL1_PerfMon_IMPDEF;
12053d0dba57SMarc Zyngier 	default:
12063d0dba57SMarc Zyngier 		/* Anything ARMv8.1+ and NI have the same value. For now. */
12073d0dba57SMarc Zyngier 		return pmuver;
12083d0dba57SMarc Zyngier 	}
12093d0dba57SMarc Zyngier }
12103d0dba57SMarc Zyngier 
121193390c0aSDave Martin /* Read a sanitised cpufeature ID register by sys_reg_desc */
1212cdd5036dSOliver Upton static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
121393390c0aSDave Martin {
12147ba8b438SAlexandru Elisei 	u32 id = reg_to_encoding(r);
121500d5101bSAlexandru Elisei 	u64 val;
121600d5101bSAlexandru Elisei 
1217cdd5036dSOliver Upton 	if (sysreg_visible_as_raz(vcpu, r))
121800d5101bSAlexandru Elisei 		return 0;
121900d5101bSAlexandru Elisei 
122000d5101bSAlexandru Elisei 	val = read_sanitised_ftr_reg(id);
122193390c0aSDave Martin 
1222c8857935SMarc Zyngier 	switch (id) {
1223c8857935SMarc Zyngier 	case SYS_ID_AA64PFR0_EL1:
12244fcdf106SIonela Voinescu 		if (!vcpu_has_sve(vcpu))
122555adc08dSMark Brown 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
122655adc08dSMark Brown 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
122755adc08dSMark Brown 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
122855adc08dSMark Brown 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
122955adc08dSMark Brown 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
123055adc08dSMark Brown 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
12315163373aSMarc Zyngier 		if (kvm_vgic_global_state.type == VGIC_V3) {
123255adc08dSMark Brown 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
123355adc08dSMark Brown 			val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
1234562e530fSMarc Zyngier 		}
1235c8857935SMarc Zyngier 		break;
1236c8857935SMarc Zyngier 	case SYS_ID_AA64PFR1_EL1:
123716dd1fbbSFuad Tabba 		if (!kvm_has_mte(vcpu->kvm))
12386ca2b9caSMark Brown 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
123990807748SMark Brown 
12406ca2b9caSMark Brown 		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1241c8857935SMarc Zyngier 		break;
1242c8857935SMarc Zyngier 	case SYS_ID_AA64ISAR1_EL1:
1243c8857935SMarc Zyngier 		if (!vcpu_has_ptrauth(vcpu))
1244aa50479bSMark Brown 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1245aa50479bSMark Brown 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1246aa50479bSMark Brown 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1247aa50479bSMark Brown 				 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1248c8857935SMarc Zyngier 		break;
1249def8c222SVladimir Murzin 	case SYS_ID_AA64ISAR2_EL1:
1250def8c222SVladimir Murzin 		if (!vcpu_has_ptrauth(vcpu))
1251b2d71f27SMark Brown 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1252b2d71f27SMark Brown 				 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
125306e0b802SMarc Zyngier 		if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1254b2d71f27SMark Brown 			val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
12553172613fSKristina Martsenko 		val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
1256def8c222SVladimir Murzin 		break;
1257c8857935SMarc Zyngier 	case SYS_ID_AA64DFR0_EL1:
125894893fc9SMarc Zyngier 		/* Limit debug to ARMv8.0 */
1259fcf37b38SMark Brown 		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
1260fcf37b38SMark Brown 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
12613d0dba57SMarc Zyngier 		/* Set PMUver to the required version */
12623d0dba57SMarc Zyngier 		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
12633d0dba57SMarc Zyngier 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
12643d0dba57SMarc Zyngier 				  vcpu_pmuver(vcpu));
126596f4f680SAlexandru Elisei 		/* Hide SPE from guests */
1266fcf37b38SMark Brown 		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
1267c8857935SMarc Zyngier 		break;
1268c8857935SMarc Zyngier 	case SYS_ID_DFR0_EL1:
1269753d734fSMarc Zyngier 		val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1270753d734fSMarc Zyngier 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
12713d0dba57SMarc Zyngier 				  pmuver_to_perfmon(vcpu_pmuver(vcpu)));
1272c8857935SMarc Zyngier 		break;
1273bf48040cSAkihiko Odaki 	case SYS_ID_AA64MMFR2_EL1:
1274bf48040cSAkihiko Odaki 		val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1275bf48040cSAkihiko Odaki 		break;
1276bf48040cSAkihiko Odaki 	case SYS_ID_MMFR4_EL1:
1277bf48040cSAkihiko Odaki 		val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1278bf48040cSAkihiko Odaki 		break;
127907d79fe7SDave Martin 	}
128007d79fe7SDave Martin 
128107d79fe7SDave Martin 	return val;
128293390c0aSDave Martin }
128393390c0aSDave Martin 
1284912dee57SAndrew Jones static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1285912dee57SAndrew Jones 				  const struct sys_reg_desc *r)
1286912dee57SAndrew Jones {
12877ba8b438SAlexandru Elisei 	u32 id = reg_to_encoding(r);
1288c512298eSAndrew Jones 
1289c512298eSAndrew Jones 	switch (id) {
1290c512298eSAndrew Jones 	case SYS_ID_AA64ZFR0_EL1:
1291c512298eSAndrew Jones 		if (!vcpu_has_sve(vcpu))
1292c512298eSAndrew Jones 			return REG_RAZ;
1293c512298eSAndrew Jones 		break;
1294c512298eSAndrew Jones 	}
1295c512298eSAndrew Jones 
1296912dee57SAndrew Jones 	return 0;
1297912dee57SAndrew Jones }
1298912dee57SAndrew Jones 
1299d5efec7eSOliver Upton static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1300d5efec7eSOliver Upton 				       const struct sys_reg_desc *r)
1301d5efec7eSOliver Upton {
1302d5efec7eSOliver Upton 	/*
1303d5efec7eSOliver Upton 	 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1304d5efec7eSOliver Upton 	 * EL. Promote to RAZ/WI in order to guarantee consistency between
1305d5efec7eSOliver Upton 	 * systems.
1306d5efec7eSOliver Upton 	 */
1307d5efec7eSOliver Upton 	if (!kvm_supports_32bit_el0())
1308d5efec7eSOliver Upton 		return REG_RAZ | REG_USER_WI;
1309d5efec7eSOliver Upton 
1310d5efec7eSOliver Upton 	return id_visibility(vcpu, r);
1311d5efec7eSOliver Upton }
1312d5efec7eSOliver Upton 
131334b4d203SOliver Upton static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
131434b4d203SOliver Upton 				   const struct sys_reg_desc *r)
131534b4d203SOliver Upton {
131634b4d203SOliver Upton 	return REG_RAZ;
131734b4d203SOliver Upton }
131834b4d203SOliver Upton 
131993390c0aSDave Martin /* cpufeature ID register access trap handlers */
132093390c0aSDave Martin 
132193390c0aSDave Martin static bool access_id_reg(struct kvm_vcpu *vcpu,
132293390c0aSDave Martin 			  struct sys_reg_params *p,
132393390c0aSDave Martin 			  const struct sys_reg_desc *r)
132493390c0aSDave Martin {
13254782ccc8SOliver Upton 	if (p->is_write)
13264782ccc8SOliver Upton 		return write_to_read_only(vcpu, p, r);
13274782ccc8SOliver Upton 
1328cdd5036dSOliver Upton 	p->regval = read_id_reg(vcpu, r);
13299f75b6d4SMarc Zyngier 	if (vcpu_has_nv(vcpu))
13309f75b6d4SMarc Zyngier 		access_nested_id_reg(vcpu, p, r);
13319f75b6d4SMarc Zyngier 
13324782ccc8SOliver Upton 	return true;
133393390c0aSDave Martin }
133493390c0aSDave Martin 
133573433762SDave Martin /* Visibility overrides for SVE-specific control registers */
133673433762SDave Martin static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
133773433762SDave Martin 				   const struct sys_reg_desc *rd)
133873433762SDave Martin {
133973433762SDave Martin 	if (vcpu_has_sve(vcpu))
134073433762SDave Martin 		return 0;
134173433762SDave Martin 
134201fe5aceSAndrew Jones 	return REG_HIDDEN;
134373433762SDave Martin }
134473433762SDave Martin 
134523711a5eSMarc Zyngier static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
134623711a5eSMarc Zyngier 			       const struct sys_reg_desc *rd,
1347978ceeb3SMarc Zyngier 			       u64 val)
134823711a5eSMarc Zyngier {
13494f1df628SMarc Zyngier 	u8 csv2, csv3;
135023711a5eSMarc Zyngier 
135123711a5eSMarc Zyngier 	/*
135223711a5eSMarc Zyngier 	 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
135323711a5eSMarc Zyngier 	 * it doesn't promise more than what is actually provided (the
135423711a5eSMarc Zyngier 	 * guest could otherwise be covered in ectoplasmic residue).
135523711a5eSMarc Zyngier 	 */
135655adc08dSMark Brown 	csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
135723711a5eSMarc Zyngier 	if (csv2 > 1 ||
135823711a5eSMarc Zyngier 	    (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
135923711a5eSMarc Zyngier 		return -EINVAL;
136023711a5eSMarc Zyngier 
13614f1df628SMarc Zyngier 	/* Same thing for CSV3 */
136255adc08dSMark Brown 	csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
13634f1df628SMarc Zyngier 	if (csv3 > 1 ||
13644f1df628SMarc Zyngier 	    (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
13654f1df628SMarc Zyngier 		return -EINVAL;
13664f1df628SMarc Zyngier 
13674f1df628SMarc Zyngier 	/* We can only differ with CSV[23], and anything else is an error */
1368cdd5036dSOliver Upton 	val ^= read_id_reg(vcpu, rd);
1369b04b3315SMarc Zyngier 	val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
1370b04b3315SMarc Zyngier 		 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
137123711a5eSMarc Zyngier 	if (val)
137223711a5eSMarc Zyngier 		return -EINVAL;
137323711a5eSMarc Zyngier 
137423711a5eSMarc Zyngier 	vcpu->kvm->arch.pfr0_csv2 = csv2;
13754f1df628SMarc Zyngier 	vcpu->kvm->arch.pfr0_csv3 = csv3;
137623711a5eSMarc Zyngier 
137723711a5eSMarc Zyngier 	return 0;
137823711a5eSMarc Zyngier }
137923711a5eSMarc Zyngier 
138060e651ffSMarc Zyngier static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
138160e651ffSMarc Zyngier 			       const struct sys_reg_desc *rd,
138260e651ffSMarc Zyngier 			       u64 val)
138360e651ffSMarc Zyngier {
138460e651ffSMarc Zyngier 	u8 pmuver, host_pmuver;
138560e651ffSMarc Zyngier 	bool valid_pmu;
138660e651ffSMarc Zyngier 
138760e651ffSMarc Zyngier 	host_pmuver = kvm_arm_pmu_get_pmuver_limit();
138860e651ffSMarc Zyngier 
138960e651ffSMarc Zyngier 	/*
139060e651ffSMarc Zyngier 	 * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
139160e651ffSMarc Zyngier 	 * as it doesn't promise more than what the HW gives us. We
139260e651ffSMarc Zyngier 	 * allow an IMPDEF PMU though, only if no PMU is supported
139360e651ffSMarc Zyngier 	 * (KVM backward compatibility handling).
139460e651ffSMarc Zyngier 	 */
139560e651ffSMarc Zyngier 	pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
139660e651ffSMarc Zyngier 	if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
139760e651ffSMarc Zyngier 		return -EINVAL;
139860e651ffSMarc Zyngier 
139960e651ffSMarc Zyngier 	valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
140060e651ffSMarc Zyngier 
140160e651ffSMarc Zyngier 	/* Make sure view register and PMU support do match */
140260e651ffSMarc Zyngier 	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
140360e651ffSMarc Zyngier 		return -EINVAL;
140460e651ffSMarc Zyngier 
140560e651ffSMarc Zyngier 	/* We can only differ with PMUver, and anything else is an error */
140660e651ffSMarc Zyngier 	val ^= read_id_reg(vcpu, rd);
140760e651ffSMarc Zyngier 	val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
140860e651ffSMarc Zyngier 	if (val)
140960e651ffSMarc Zyngier 		return -EINVAL;
141060e651ffSMarc Zyngier 
141160e651ffSMarc Zyngier 	if (valid_pmu)
141260e651ffSMarc Zyngier 		vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
141360e651ffSMarc Zyngier 	else
141460e651ffSMarc Zyngier 		vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
141560e651ffSMarc Zyngier 
141660e651ffSMarc Zyngier 	return 0;
141760e651ffSMarc Zyngier }
141860e651ffSMarc Zyngier 
1419d82e0dfdSMarc Zyngier static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1420d82e0dfdSMarc Zyngier 			   const struct sys_reg_desc *rd,
1421d82e0dfdSMarc Zyngier 			   u64 val)
1422d82e0dfdSMarc Zyngier {
1423d82e0dfdSMarc Zyngier 	u8 perfmon, host_perfmon;
1424d82e0dfdSMarc Zyngier 	bool valid_pmu;
1425d82e0dfdSMarc Zyngier 
1426d82e0dfdSMarc Zyngier 	host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1427d82e0dfdSMarc Zyngier 
1428d82e0dfdSMarc Zyngier 	/*
1429d82e0dfdSMarc Zyngier 	 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1430d82e0dfdSMarc Zyngier 	 * it doesn't promise more than what the HW gives us on the
1431d82e0dfdSMarc Zyngier 	 * AArch64 side (as everything is emulated with that), and
1432d82e0dfdSMarc Zyngier 	 * that this is a PMUv3.
1433d82e0dfdSMarc Zyngier 	 */
1434753d734fSMarc Zyngier 	perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
1435753d734fSMarc Zyngier 	if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
1436753d734fSMarc Zyngier 	    (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
1437d82e0dfdSMarc Zyngier 		return -EINVAL;
1438d82e0dfdSMarc Zyngier 
1439753d734fSMarc Zyngier 	valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
1440d82e0dfdSMarc Zyngier 
1441d82e0dfdSMarc Zyngier 	/* Make sure view register and PMU support do match */
1442d82e0dfdSMarc Zyngier 	if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1443d82e0dfdSMarc Zyngier 		return -EINVAL;
1444d82e0dfdSMarc Zyngier 
1445d82e0dfdSMarc Zyngier 	/* We can only differ with PerfMon, and anything else is an error */
1446d82e0dfdSMarc Zyngier 	val ^= read_id_reg(vcpu, rd);
1447753d734fSMarc Zyngier 	val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1448d82e0dfdSMarc Zyngier 	if (val)
1449d82e0dfdSMarc Zyngier 		return -EINVAL;
1450d82e0dfdSMarc Zyngier 
1451d82e0dfdSMarc Zyngier 	if (valid_pmu)
1452d82e0dfdSMarc Zyngier 		vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
1453d82e0dfdSMarc Zyngier 	else
1454d82e0dfdSMarc Zyngier 		vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
1455d82e0dfdSMarc Zyngier 
1456d82e0dfdSMarc Zyngier 	return 0;
1457d82e0dfdSMarc Zyngier }
1458d82e0dfdSMarc Zyngier 
145993390c0aSDave Martin /*
146093390c0aSDave Martin  * cpufeature ID register user accessors
146193390c0aSDave Martin  *
146293390c0aSDave Martin  * For now, these registers are immutable for userspace, so no values
146393390c0aSDave Martin  * are stored, and for set_id_reg() we don't allow the effective value
146493390c0aSDave Martin  * to be changed.
146593390c0aSDave Martin  */
146693390c0aSDave Martin static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1467978ceeb3SMarc Zyngier 		      u64 *val)
146893390c0aSDave Martin {
1469cdd5036dSOliver Upton 	*val = read_id_reg(vcpu, rd);
14704782ccc8SOliver Upton 	return 0;
147193390c0aSDave Martin }
147293390c0aSDave Martin 
147393390c0aSDave Martin static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1474978ceeb3SMarc Zyngier 		      u64 val)
147593390c0aSDave Martin {
14764782ccc8SOliver Upton 	/* This is what we mean by invariant: you can't change it. */
1477cdd5036dSOliver Upton 	if (val != read_id_reg(vcpu, rd))
14784782ccc8SOliver Upton 		return -EINVAL;
14794782ccc8SOliver Upton 
14804782ccc8SOliver Upton 	return 0;
148193390c0aSDave Martin }
148293390c0aSDave Martin 
14835a430976SAlexandru Elisei static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1484978ceeb3SMarc Zyngier 		       u64 *val)
14855a430976SAlexandru Elisei {
1486978ceeb3SMarc Zyngier 	*val = 0;
1487978ceeb3SMarc Zyngier 	return 0;
14885a430976SAlexandru Elisei }
14895a430976SAlexandru Elisei 
14907a3ba309SMarc Zyngier static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1491978ceeb3SMarc Zyngier 		      u64 val)
14927a3ba309SMarc Zyngier {
14937a3ba309SMarc Zyngier 	return 0;
14947a3ba309SMarc Zyngier }
14957a3ba309SMarc Zyngier 
1496f7f2b15cSArd Biesheuvel static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1497f7f2b15cSArd Biesheuvel 		       const struct sys_reg_desc *r)
1498f7f2b15cSArd Biesheuvel {
1499f7f2b15cSArd Biesheuvel 	if (p->is_write)
1500f7f2b15cSArd Biesheuvel 		return write_to_read_only(vcpu, p, r);
1501f7f2b15cSArd Biesheuvel 
1502f7f2b15cSArd Biesheuvel 	p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1503f7f2b15cSArd Biesheuvel 	return true;
1504f7f2b15cSArd Biesheuvel }
1505f7f2b15cSArd Biesheuvel 
1506f7f2b15cSArd Biesheuvel static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1507f7f2b15cSArd Biesheuvel 			 const struct sys_reg_desc *r)
1508f7f2b15cSArd Biesheuvel {
1509f7f2b15cSArd Biesheuvel 	if (p->is_write)
1510f7f2b15cSArd Biesheuvel 		return write_to_read_only(vcpu, p, r);
1511f7f2b15cSArd Biesheuvel 
15127af0c253SAkihiko Odaki 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
1513f7f2b15cSArd Biesheuvel 	return true;
1514f7f2b15cSArd Biesheuvel }
1515f7f2b15cSArd Biesheuvel 
15167af0c253SAkihiko Odaki /*
15177af0c253SAkihiko Odaki  * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
15187af0c253SAkihiko Odaki  * by the physical CPU which the vcpu currently resides in.
15197af0c253SAkihiko Odaki  */
15207af0c253SAkihiko Odaki static void reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
15217af0c253SAkihiko Odaki {
15227af0c253SAkihiko Odaki 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
15237af0c253SAkihiko Odaki 	u64 clidr;
15247af0c253SAkihiko Odaki 	u8 loc;
15257af0c253SAkihiko Odaki 
15267af0c253SAkihiko Odaki 	if ((ctr_el0 & CTR_EL0_IDC)) {
15277af0c253SAkihiko Odaki 		/*
15287af0c253SAkihiko Odaki 		 * Data cache clean to the PoU is not required so LoUU and LoUIS
15297af0c253SAkihiko Odaki 		 * will not be set and a unified cache, which will be marked as
15307af0c253SAkihiko Odaki 		 * LoC, will be added.
15317af0c253SAkihiko Odaki 		 *
15327af0c253SAkihiko Odaki 		 * If not DIC, let the unified cache L2 so that an instruction
15337af0c253SAkihiko Odaki 		 * cache can be added as L1 later.
15347af0c253SAkihiko Odaki 		 */
15357af0c253SAkihiko Odaki 		loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
15367af0c253SAkihiko Odaki 		clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
15377af0c253SAkihiko Odaki 	} else {
15387af0c253SAkihiko Odaki 		/*
15397af0c253SAkihiko Odaki 		 * Data cache clean to the PoU is required so let L1 have a data
15407af0c253SAkihiko Odaki 		 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
15417af0c253SAkihiko Odaki 		 * it can be marked as LoC too.
15427af0c253SAkihiko Odaki 		 */
15437af0c253SAkihiko Odaki 		loc = 1;
15447af0c253SAkihiko Odaki 		clidr = 1 << CLIDR_LOUU_SHIFT;
15457af0c253SAkihiko Odaki 		clidr |= 1 << CLIDR_LOUIS_SHIFT;
15467af0c253SAkihiko Odaki 		clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
15477af0c253SAkihiko Odaki 	}
15487af0c253SAkihiko Odaki 
15497af0c253SAkihiko Odaki 	/*
15507af0c253SAkihiko Odaki 	 * Instruction cache invalidation to the PoU is required so let L1 have
15517af0c253SAkihiko Odaki 	 * an instruction cache. If L1 already has a data cache, it will be
15527af0c253SAkihiko Odaki 	 * CACHE_TYPE_SEPARATE.
15537af0c253SAkihiko Odaki 	 */
15547af0c253SAkihiko Odaki 	if (!(ctr_el0 & CTR_EL0_DIC))
15557af0c253SAkihiko Odaki 		clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
15567af0c253SAkihiko Odaki 
15577af0c253SAkihiko Odaki 	clidr |= loc << CLIDR_LOC_SHIFT;
15587af0c253SAkihiko Odaki 
15597af0c253SAkihiko Odaki 	/*
15607af0c253SAkihiko Odaki 	 * Add tag cache unified to data cache. Allocation tags and data are
15617af0c253SAkihiko Odaki 	 * unified in a cache line so that it looks valid even if there is only
15627af0c253SAkihiko Odaki 	 * one cache line.
15637af0c253SAkihiko Odaki 	 */
15647af0c253SAkihiko Odaki 	if (kvm_has_mte(vcpu->kvm))
15657af0c253SAkihiko Odaki 		clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
15667af0c253SAkihiko Odaki 
15677af0c253SAkihiko Odaki 	__vcpu_sys_reg(vcpu, r->reg) = clidr;
15687af0c253SAkihiko Odaki }
15697af0c253SAkihiko Odaki 
15707af0c253SAkihiko Odaki static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
15717af0c253SAkihiko Odaki 		      u64 val)
15727af0c253SAkihiko Odaki {
15737af0c253SAkihiko Odaki 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
15747af0c253SAkihiko Odaki 	u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
15757af0c253SAkihiko Odaki 
15767af0c253SAkihiko Odaki 	if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
15777af0c253SAkihiko Odaki 		return -EINVAL;
15787af0c253SAkihiko Odaki 
15797af0c253SAkihiko Odaki 	__vcpu_sys_reg(vcpu, rd->reg) = val;
15807af0c253SAkihiko Odaki 
15817af0c253SAkihiko Odaki 	return 0;
15827af0c253SAkihiko Odaki }
15837af0c253SAkihiko Odaki 
1584f7f2b15cSArd Biesheuvel static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1585f7f2b15cSArd Biesheuvel 			  const struct sys_reg_desc *r)
1586f7f2b15cSArd Biesheuvel {
15877c582bf4SJames Morse 	int reg = r->reg;
15887c582bf4SJames Morse 
1589f7f2b15cSArd Biesheuvel 	if (p->is_write)
15907c582bf4SJames Morse 		vcpu_write_sys_reg(vcpu, p->regval, reg);
1591f7f2b15cSArd Biesheuvel 	else
15927c582bf4SJames Morse 		p->regval = vcpu_read_sys_reg(vcpu, reg);
1593f7f2b15cSArd Biesheuvel 	return true;
1594f7f2b15cSArd Biesheuvel }
1595f7f2b15cSArd Biesheuvel 
1596f7f2b15cSArd Biesheuvel static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1597f7f2b15cSArd Biesheuvel 			  const struct sys_reg_desc *r)
1598f7f2b15cSArd Biesheuvel {
1599f7f2b15cSArd Biesheuvel 	u32 csselr;
1600f7f2b15cSArd Biesheuvel 
1601f7f2b15cSArd Biesheuvel 	if (p->is_write)
1602f7f2b15cSArd Biesheuvel 		return write_to_read_only(vcpu, p, r);
1603f7f2b15cSArd Biesheuvel 
1604f7f2b15cSArd Biesheuvel 	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
16057af0c253SAkihiko Odaki 	csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
16067af0c253SAkihiko Odaki 	if (csselr < CSSELR_MAX)
16077af0c253SAkihiko Odaki 		p->regval = get_ccsidr(vcpu, csselr);
1608793acf87SArd Biesheuvel 
1609f7f2b15cSArd Biesheuvel 	return true;
1610f7f2b15cSArd Biesheuvel }
1611f7f2b15cSArd Biesheuvel 
1612e1f358b5SSteven Price static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1613e1f358b5SSteven Price 				   const struct sys_reg_desc *rd)
1614e1f358b5SSteven Price {
1615673638f4SSteven Price 	if (kvm_has_mte(vcpu->kvm))
1616673638f4SSteven Price 		return 0;
1617673638f4SSteven Price 
1618e1f358b5SSteven Price 	return REG_HIDDEN;
1619e1f358b5SSteven Price }
1620e1f358b5SSteven Price 
1621e1f358b5SSteven Price #define MTE_REG(name) {				\
1622e1f358b5SSteven Price 	SYS_DESC(SYS_##name),			\
1623e1f358b5SSteven Price 	.access = undef_access,			\
1624e1f358b5SSteven Price 	.reset = reset_unknown,			\
1625e1f358b5SSteven Price 	.reg = name,				\
1626e1f358b5SSteven Price 	.visibility = mte_visibility,		\
1627e1f358b5SSteven Price }
1628e1f358b5SSteven Price 
16296ff9dc23SJintack Lim static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
16306ff9dc23SJintack Lim 				   const struct sys_reg_desc *rd)
16316ff9dc23SJintack Lim {
16326ff9dc23SJintack Lim 	if (vcpu_has_nv(vcpu))
16336ff9dc23SJintack Lim 		return 0;
16346ff9dc23SJintack Lim 
16356ff9dc23SJintack Lim 	return REG_HIDDEN;
16366ff9dc23SJintack Lim }
16376ff9dc23SJintack Lim 
16386ff9dc23SJintack Lim #define EL2_REG(name, acc, rst, v) {		\
16396ff9dc23SJintack Lim 	SYS_DESC(SYS_##name),			\
16406ff9dc23SJintack Lim 	.access = acc,				\
16416ff9dc23SJintack Lim 	.reset = rst,				\
16426ff9dc23SJintack Lim 	.reg = name,				\
16436ff9dc23SJintack Lim 	.visibility = el2_visibility,		\
16446ff9dc23SJintack Lim 	.val = v,				\
16456ff9dc23SJintack Lim }
16466ff9dc23SJintack Lim 
1647280b748eSJintack Lim /*
1648280b748eSJintack Lim  * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
1649280b748eSJintack Lim  * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
1650280b748eSJintack Lim  * handling traps. Given that, they are always hidden from userspace.
1651280b748eSJintack Lim  */
1652280b748eSJintack Lim static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu,
1653280b748eSJintack Lim 				    const struct sys_reg_desc *rd)
1654280b748eSJintack Lim {
1655280b748eSJintack Lim 	return REG_HIDDEN_USER;
1656280b748eSJintack Lim }
1657280b748eSJintack Lim 
1658280b748eSJintack Lim #define EL12_REG(name, acc, rst, v) {		\
1659280b748eSJintack Lim 	SYS_DESC(SYS_##name##_EL12),		\
1660280b748eSJintack Lim 	.access = acc,				\
1661280b748eSJintack Lim 	.reset = rst,				\
1662280b748eSJintack Lim 	.reg = name##_EL1,			\
1663280b748eSJintack Lim 	.val = v,				\
1664280b748eSJintack Lim 	.visibility = elx2_visibility,		\
1665280b748eSJintack Lim }
1666280b748eSJintack Lim 
166793390c0aSDave Martin /* sys_reg_desc initialiser for known cpufeature ID registers */
166893390c0aSDave Martin #define ID_SANITISED(name) {			\
166993390c0aSDave Martin 	SYS_DESC(SYS_##name),			\
167093390c0aSDave Martin 	.access	= access_id_reg,		\
167193390c0aSDave Martin 	.get_user = get_id_reg,			\
167293390c0aSDave Martin 	.set_user = set_id_reg,			\
1673912dee57SAndrew Jones 	.visibility = id_visibility,		\
167493390c0aSDave Martin }
167593390c0aSDave Martin 
1676d5efec7eSOliver Upton /* sys_reg_desc initialiser for known cpufeature ID registers */
1677d5efec7eSOliver Upton #define AA32_ID_SANITISED(name) {		\
1678d5efec7eSOliver Upton 	SYS_DESC(SYS_##name),			\
1679d5efec7eSOliver Upton 	.access	= access_id_reg,		\
1680d5efec7eSOliver Upton 	.get_user = get_id_reg,			\
1681d5efec7eSOliver Upton 	.set_user = set_id_reg,			\
1682d5efec7eSOliver Upton 	.visibility = aa32_id_visibility,	\
1683d5efec7eSOliver Upton }
1684d5efec7eSOliver Upton 
168593390c0aSDave Martin /*
168693390c0aSDave Martin  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
168793390c0aSDave Martin  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
168893390c0aSDave Martin  * (1 <= crm < 8, 0 <= Op2 < 8).
168993390c0aSDave Martin  */
169093390c0aSDave Martin #define ID_UNALLOCATED(crm, op2) {			\
169193390c0aSDave Martin 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
169234b4d203SOliver Upton 	.access = access_id_reg,			\
169334b4d203SOliver Upton 	.get_user = get_id_reg,				\
169434b4d203SOliver Upton 	.set_user = set_id_reg,				\
169534b4d203SOliver Upton 	.visibility = raz_visibility			\
169693390c0aSDave Martin }
169793390c0aSDave Martin 
169893390c0aSDave Martin /*
169993390c0aSDave Martin  * sys_reg_desc initialiser for known ID registers that we hide from guests.
170093390c0aSDave Martin  * For now, these are exposed just like unallocated ID regs: they appear
170193390c0aSDave Martin  * RAZ for the guest.
170293390c0aSDave Martin  */
170393390c0aSDave Martin #define ID_HIDDEN(name) {			\
170493390c0aSDave Martin 	SYS_DESC(SYS_##name),			\
170534b4d203SOliver Upton 	.access = access_id_reg,		\
170634b4d203SOliver Upton 	.get_user = get_id_reg,			\
170734b4d203SOliver Upton 	.set_user = set_id_reg,			\
170834b4d203SOliver Upton 	.visibility = raz_visibility,		\
170993390c0aSDave Martin }
171093390c0aSDave Martin 
17116ff9dc23SJintack Lim static bool access_sp_el1(struct kvm_vcpu *vcpu,
17126ff9dc23SJintack Lim 			  struct sys_reg_params *p,
17136ff9dc23SJintack Lim 			  const struct sys_reg_desc *r)
17146ff9dc23SJintack Lim {
17156ff9dc23SJintack Lim 	if (p->is_write)
17166ff9dc23SJintack Lim 		__vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
17176ff9dc23SJintack Lim 	else
17186ff9dc23SJintack Lim 		p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
17196ff9dc23SJintack Lim 
17206ff9dc23SJintack Lim 	return true;
17216ff9dc23SJintack Lim }
17226ff9dc23SJintack Lim 
17239da117eeSJintack Lim static bool access_elr(struct kvm_vcpu *vcpu,
17249da117eeSJintack Lim 		       struct sys_reg_params *p,
17259da117eeSJintack Lim 		       const struct sys_reg_desc *r)
17269da117eeSJintack Lim {
17279da117eeSJintack Lim 	if (p->is_write)
17289da117eeSJintack Lim 		vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
17299da117eeSJintack Lim 	else
17309da117eeSJintack Lim 		p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
17319da117eeSJintack Lim 
17329da117eeSJintack Lim 	return true;
17339da117eeSJintack Lim }
17349da117eeSJintack Lim 
17359da117eeSJintack Lim static bool access_spsr(struct kvm_vcpu *vcpu,
17369da117eeSJintack Lim 			struct sys_reg_params *p,
17379da117eeSJintack Lim 			const struct sys_reg_desc *r)
17389da117eeSJintack Lim {
17399da117eeSJintack Lim 	if (p->is_write)
17409da117eeSJintack Lim 		__vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
17419da117eeSJintack Lim 	else
17429da117eeSJintack Lim 		p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
17439da117eeSJintack Lim 
17449da117eeSJintack Lim 	return true;
17459da117eeSJintack Lim }
17469da117eeSJintack Lim 
17477c8c5e6aSMarc Zyngier /*
17487c8c5e6aSMarc Zyngier  * Architected system registers.
17497c8c5e6aSMarc Zyngier  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
17507609c125SMarc Zyngier  *
17510c557ed4SMarc Zyngier  * Debug handling: We do trap most, if not all debug related system
17520c557ed4SMarc Zyngier  * registers. The implementation is good enough to ensure that a guest
17530c557ed4SMarc Zyngier  * can use these with minimal performance degradation. The drawback is
17547dabf02fSOliver Upton  * that we don't implement any of the external debug architecture.
17557dabf02fSOliver Upton  * This should be revisited if we ever encounter a more demanding
17567dabf02fSOliver Upton  * guest...
17577c8c5e6aSMarc Zyngier  */
17587c8c5e6aSMarc Zyngier static const struct sys_reg_desc sys_reg_descs[] = {
17597606e078SMark Rutland 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
17607606e078SMark Rutland 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
17617606e078SMark Rutland 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
17627c8c5e6aSMarc Zyngier 
17630c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(0),
17640c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(1),
1765ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1766ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
17670c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(2),
17680c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(3),
17690c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(4),
17700c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(5),
17710c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(6),
17720c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(7),
17730c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(8),
17740c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(9),
17750c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(10),
17760c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(11),
17770c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(12),
17780c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(13),
17790c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(14),
17800c557ed4SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR_EL1(15),
17810c557ed4SMarc Zyngier 
1782ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1783f24adc65SOliver Upton 	{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1784d42e2671SOliver Upton 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1785d42e2671SOliver Upton 		SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1786ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1787ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1788ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1789ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1790ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
17910c557ed4SMarc Zyngier 
1792ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1793ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1794ee1b64e6SMark Rutland 	// DBGDTR[TR]X_EL0 share the same encoding
1795ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
17960c557ed4SMarc Zyngier 
1797ee1b64e6SMark Rutland 	{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
179862a89c44SMarc Zyngier 
1799851050a5SMark Rutland 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
180093390c0aSDave Martin 
180193390c0aSDave Martin 	/*
180293390c0aSDave Martin 	 * ID regs: all ID_SANITISED() entries here must have corresponding
180393390c0aSDave Martin 	 * entries in arm64_ftr_regs[].
180493390c0aSDave Martin 	 */
180593390c0aSDave Martin 
180693390c0aSDave Martin 	/* AArch64 mappings of the AArch32 ID registers */
180793390c0aSDave Martin 	/* CRm=1 */
1808d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_PFR0_EL1),
1809d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_PFR1_EL1),
1810d82e0dfdSMarc Zyngier 	{ SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
1811d82e0dfdSMarc Zyngier 	  .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
1812d82e0dfdSMarc Zyngier 	  .visibility = aa32_id_visibility, },
181393390c0aSDave Martin 	ID_HIDDEN(ID_AFR0_EL1),
1814d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR0_EL1),
1815d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR1_EL1),
1816d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR2_EL1),
1817d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR3_EL1),
181893390c0aSDave Martin 
181993390c0aSDave Martin 	/* CRm=2 */
1820d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR0_EL1),
1821d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR1_EL1),
1822d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR2_EL1),
1823d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR3_EL1),
1824d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR4_EL1),
1825d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR5_EL1),
1826d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR4_EL1),
1827d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_ISAR6_EL1),
182893390c0aSDave Martin 
182993390c0aSDave Martin 	/* CRm=3 */
1830d5efec7eSOliver Upton 	AA32_ID_SANITISED(MVFR0_EL1),
1831d5efec7eSOliver Upton 	AA32_ID_SANITISED(MVFR1_EL1),
1832d5efec7eSOliver Upton 	AA32_ID_SANITISED(MVFR2_EL1),
183393390c0aSDave Martin 	ID_UNALLOCATED(3,3),
1834d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_PFR2_EL1),
1835dd35ec07SAnshuman Khandual 	ID_HIDDEN(ID_DFR1_EL1),
1836d5efec7eSOliver Upton 	AA32_ID_SANITISED(ID_MMFR5_EL1),
183793390c0aSDave Martin 	ID_UNALLOCATED(3,7),
183893390c0aSDave Martin 
183993390c0aSDave Martin 	/* AArch64 ID registers */
184093390c0aSDave Martin 	/* CRm=4 */
184123711a5eSMarc Zyngier 	{ SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
184223711a5eSMarc Zyngier 	  .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
184393390c0aSDave Martin 	ID_SANITISED(ID_AA64PFR1_EL1),
184493390c0aSDave Martin 	ID_UNALLOCATED(4,2),
184593390c0aSDave Martin 	ID_UNALLOCATED(4,3),
1846c512298eSAndrew Jones 	ID_SANITISED(ID_AA64ZFR0_EL1),
184790807748SMark Brown 	ID_HIDDEN(ID_AA64SMFR0_EL1),
184893390c0aSDave Martin 	ID_UNALLOCATED(4,6),
184993390c0aSDave Martin 	ID_UNALLOCATED(4,7),
185093390c0aSDave Martin 
185193390c0aSDave Martin 	/* CRm=5 */
185260e651ffSMarc Zyngier 	{ SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
185360e651ffSMarc Zyngier 	  .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
185493390c0aSDave Martin 	ID_SANITISED(ID_AA64DFR1_EL1),
185593390c0aSDave Martin 	ID_UNALLOCATED(5,2),
185693390c0aSDave Martin 	ID_UNALLOCATED(5,3),
185793390c0aSDave Martin 	ID_HIDDEN(ID_AA64AFR0_EL1),
185893390c0aSDave Martin 	ID_HIDDEN(ID_AA64AFR1_EL1),
185993390c0aSDave Martin 	ID_UNALLOCATED(5,6),
186093390c0aSDave Martin 	ID_UNALLOCATED(5,7),
186193390c0aSDave Martin 
186293390c0aSDave Martin 	/* CRm=6 */
186393390c0aSDave Martin 	ID_SANITISED(ID_AA64ISAR0_EL1),
186493390c0aSDave Martin 	ID_SANITISED(ID_AA64ISAR1_EL1),
18659e45365fSJoey Gouly 	ID_SANITISED(ID_AA64ISAR2_EL1),
186693390c0aSDave Martin 	ID_UNALLOCATED(6,3),
186793390c0aSDave Martin 	ID_UNALLOCATED(6,4),
186893390c0aSDave Martin 	ID_UNALLOCATED(6,5),
186993390c0aSDave Martin 	ID_UNALLOCATED(6,6),
187093390c0aSDave Martin 	ID_UNALLOCATED(6,7),
187193390c0aSDave Martin 
187293390c0aSDave Martin 	/* CRm=7 */
187393390c0aSDave Martin 	ID_SANITISED(ID_AA64MMFR0_EL1),
187493390c0aSDave Martin 	ID_SANITISED(ID_AA64MMFR1_EL1),
187593390c0aSDave Martin 	ID_SANITISED(ID_AA64MMFR2_EL1),
187693390c0aSDave Martin 	ID_UNALLOCATED(7,3),
187793390c0aSDave Martin 	ID_UNALLOCATED(7,4),
187893390c0aSDave Martin 	ID_UNALLOCATED(7,5),
187993390c0aSDave Martin 	ID_UNALLOCATED(7,6),
188093390c0aSDave Martin 	ID_UNALLOCATED(7,7),
188193390c0aSDave Martin 
1882851050a5SMark Rutland 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1883af473829SJames Morse 	{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1884851050a5SMark Rutland 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
18852ac638fcSCatalin Marinas 
1886e1f358b5SSteven Price 	MTE_REG(RGSR_EL1),
1887e1f358b5SSteven Price 	MTE_REG(GCR_EL1),
18882ac638fcSCatalin Marinas 
188973433762SDave Martin 	{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1890cc427cbbSSuzuki K Poulose 	{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
189190807748SMark Brown 	{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
189290807748SMark Brown 	{ SYS_DESC(SYS_SMCR_EL1), undef_access },
1893851050a5SMark Rutland 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1894851050a5SMark Rutland 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1895851050a5SMark Rutland 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1896*fbff5606SJoey Gouly 	{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
18977c8c5e6aSMarc Zyngier 
1898384b40caSMark Rutland 	PTRAUTH_KEY(APIA),
1899384b40caSMark Rutland 	PTRAUTH_KEY(APIB),
1900384b40caSMark Rutland 	PTRAUTH_KEY(APDA),
1901384b40caSMark Rutland 	PTRAUTH_KEY(APDB),
1902384b40caSMark Rutland 	PTRAUTH_KEY(APGA),
1903384b40caSMark Rutland 
19049da117eeSJintack Lim 	{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
19059da117eeSJintack Lim 	{ SYS_DESC(SYS_ELR_EL1), access_elr},
19069da117eeSJintack Lim 
1907851050a5SMark Rutland 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1908851050a5SMark Rutland 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1909851050a5SMark Rutland 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1910558daf69SDongjiu Geng 
1911558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1912558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1913558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1914558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1915558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1916558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1917558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1918558daf69SDongjiu Geng 	{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1919558daf69SDongjiu Geng 
1920e1f358b5SSteven Price 	MTE_REG(TFSR_EL1),
1921e1f358b5SSteven Price 	MTE_REG(TFSRE0_EL1),
19222ac638fcSCatalin Marinas 
1923851050a5SMark Rutland 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1924851050a5SMark Rutland 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
19257c8c5e6aSMarc Zyngier 
192613611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
192713611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
192813611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
192913611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
193013611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
193113611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
193213611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
193313611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
193413611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
193513611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
193613611bc8SAlexandru Elisei 	{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
193713611bc8SAlexandru Elisei 	/* PMBIDR_EL1 is not trapped */
193813611bc8SAlexandru Elisei 
193911663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMINTENSET_EL1),
194011663111SMarc Zyngier 	  .access = access_pminten, .reg = PMINTENSET_EL1 },
194111663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
194211663111SMarc Zyngier 	  .access = access_pminten, .reg = PMINTENSET_EL1 },
194346081078SMarc Zyngier 	{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
19447c8c5e6aSMarc Zyngier 
1945851050a5SMark Rutland 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1946851050a5SMark Rutland 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
19477c8c5e6aSMarc Zyngier 
194822925521SMarc Zyngier 	{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
194922925521SMarc Zyngier 	{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
195022925521SMarc Zyngier 	{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
195122925521SMarc Zyngier 	{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
195222925521SMarc Zyngier 	{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
1953cc33c4e2SMark Rutland 
19549da117eeSJintack Lim 	{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
1955c773ae2bSJames Morse 	{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1956db7dedd0SChristoffer Dall 
19577b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1958e7f1d1eeSMarc Zyngier 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
19597b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1960e7f1d1eeSMarc Zyngier 	{ SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
19617b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1962e804d208SMark Rutland 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
196303bd646dSMarc Zyngier 	{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
196403bd646dSMarc Zyngier 	{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
19657b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1966e7f1d1eeSMarc Zyngier 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
19677b1dba1fSMarc Zyngier 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1968e804d208SMark Rutland 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1969db7dedd0SChristoffer Dall 
1970851050a5SMark Rutland 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1971851050a5SMark Rutland 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
19727c8c5e6aSMarc Zyngier 
1973ed4ffaf4SMarc Zyngier 	{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1974ed4ffaf4SMarc Zyngier 
1975851050a5SMark Rutland 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
19767c8c5e6aSMarc Zyngier 
1977f7f2b15cSArd Biesheuvel 	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
19787af0c253SAkihiko Odaki 	{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
19797af0c253SAkihiko Odaki 	  .set_user = set_clidr },
1980bf48040cSAkihiko Odaki 	{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
198190807748SMark Brown 	{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
1982f7f2b15cSArd Biesheuvel 	{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1983f7f2b15cSArd Biesheuvel 	{ SYS_DESC(SYS_CTR_EL0), access_ctr },
1984ec0067a6SMark Brown 	{ SYS_DESC(SYS_SVCR), undef_access },
19857c8c5e6aSMarc Zyngier 
198611663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
198711663111SMarc Zyngier 	  .reset = reset_pmcr, .reg = PMCR_EL0 },
198811663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMCNTENSET_EL0),
198911663111SMarc Zyngier 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
199011663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
199111663111SMarc Zyngier 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
199211663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMOVSCLR_EL0),
199311663111SMarc Zyngier 	  .access = access_pmovs, .reg = PMOVSSET_EL0 },
19947a3ba309SMarc Zyngier 	/*
19957a3ba309SMarc Zyngier 	 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
19967a3ba309SMarc Zyngier 	 * previously (and pointlessly) advertised in the past...
19977a3ba309SMarc Zyngier 	 */
199811663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMSWINC_EL0),
19995a430976SAlexandru Elisei 	  .get_user = get_raz_reg, .set_user = set_wi_reg,
20007a3ba309SMarc Zyngier 	  .access = access_pmswinc, .reset = NULL },
200111663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMSELR_EL0),
20020ab410a9SMarc Zyngier 	  .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
200311663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMCEID0_EL0),
200411663111SMarc Zyngier 	  .access = access_pmceid, .reset = NULL },
200511663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMCEID1_EL0),
200611663111SMarc Zyngier 	  .access = access_pmceid, .reset = NULL },
200711663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
20089228b261SReiji Watanabe 	  .access = access_pmu_evcntr, .reset = reset_unknown,
20099228b261SReiji Watanabe 	  .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
201011663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
201111663111SMarc Zyngier 	  .access = access_pmu_evtyper, .reset = NULL },
201211663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
201311663111SMarc Zyngier 	  .access = access_pmu_evcntr, .reset = NULL },
2014174ed3e4SMark Rutland 	/*
2015174ed3e4SMark Rutland 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2016d692b8adSShannon Zhao 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
2017d692b8adSShannon Zhao 	 */
201811663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
201911663111SMarc Zyngier 	  .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
202011663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMOVSSET_EL0),
202111663111SMarc Zyngier 	  .access = access_pmovs, .reg = PMOVSSET_EL0 },
20227c8c5e6aSMarc Zyngier 
2023851050a5SMark Rutland 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2024851050a5SMark Rutland 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
202590807748SMark Brown 	{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
202662a89c44SMarc Zyngier 
2027ed4ffaf4SMarc Zyngier 	{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2028ed4ffaf4SMarc Zyngier 
2029338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCR_EL0), undef_access },
2030338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2031338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2032338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2033338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2034338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2035338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2036338b1793SMarc Zyngier 	{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
20374fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(0),
20384fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(1),
20394fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(2),
20404fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(3),
20414fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(4),
20424fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(5),
20434fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(6),
20444fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(7),
20454fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(8),
20464fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(9),
20474fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(10),
20484fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(11),
20494fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(12),
20504fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(13),
20514fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(14),
20524fcdf106SIonela Voinescu 	AMU_AMEVCNTR0_EL0(15),
2053493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(0),
2054493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(1),
2055493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(2),
2056493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(3),
2057493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(4),
2058493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(5),
2059493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(6),
2060493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(7),
2061493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(8),
2062493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(9),
2063493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(10),
2064493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(11),
2065493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(12),
2066493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(13),
2067493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(14),
2068493cf9b7SVladimir Murzin 	AMU_AMEVTYPER0_EL0(15),
20694fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(0),
20704fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(1),
20714fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(2),
20724fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(3),
20734fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(4),
20744fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(5),
20754fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(6),
20764fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(7),
20774fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(8),
20784fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(9),
20794fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(10),
20804fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(11),
20814fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(12),
20824fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(13),
20834fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(14),
20844fcdf106SIonela Voinescu 	AMU_AMEVCNTR1_EL0(15),
2085493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(0),
2086493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(1),
2087493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(2),
2088493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(3),
2089493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(4),
2090493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(5),
2091493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(6),
2092493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(7),
2093493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(8),
2094493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(9),
2095493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(10),
2096493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(11),
2097493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(12),
2098493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(13),
2099493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(14),
2100493cf9b7SVladimir Murzin 	AMU_AMEVTYPER1_EL0(15),
21014fcdf106SIonela Voinescu 
2102c605ee24SMarc Zyngier 	{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2103c605ee24SMarc Zyngier 	{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
210484135d3dSAndre Przywara 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
210584135d3dSAndre Przywara 	{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
210684135d3dSAndre Przywara 	{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2107c9a3c58fSJintack Lim 
2108051ff581SShannon Zhao 	/* PMEVCNTRn_EL0 */
2109051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(0),
2110051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(1),
2111051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(2),
2112051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(3),
2113051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(4),
2114051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(5),
2115051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(6),
2116051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(7),
2117051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(8),
2118051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(9),
2119051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(10),
2120051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(11),
2121051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(12),
2122051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(13),
2123051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(14),
2124051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(15),
2125051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(16),
2126051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(17),
2127051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(18),
2128051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(19),
2129051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(20),
2130051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(21),
2131051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(22),
2132051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(23),
2133051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(24),
2134051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(25),
2135051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(26),
2136051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(27),
2137051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(28),
2138051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(29),
2139051ff581SShannon Zhao 	PMU_PMEVCNTR_EL0(30),
21409feb21acSShannon Zhao 	/* PMEVTYPERn_EL0 */
21419feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(0),
21429feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(1),
21439feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(2),
21449feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(3),
21459feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(4),
21469feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(5),
21479feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(6),
21489feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(7),
21499feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(8),
21509feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(9),
21519feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(10),
21529feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(11),
21539feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(12),
21549feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(13),
21559feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(14),
21569feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(15),
21579feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(16),
21589feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(17),
21599feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(18),
21609feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(19),
21619feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(20),
21629feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(21),
21639feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(22),
21649feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(23),
21659feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(24),
21669feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(25),
21679feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(26),
21689feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(27),
21699feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(28),
21709feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(29),
21719feb21acSShannon Zhao 	PMU_PMEVTYPER_EL0(30),
2172174ed3e4SMark Rutland 	/*
2173174ed3e4SMark Rutland 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
21749feb21acSShannon Zhao 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
21759feb21acSShannon Zhao 	 */
217611663111SMarc Zyngier 	{ PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
217711663111SMarc Zyngier 	  .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2178051ff581SShannon Zhao 
21796ff9dc23SJintack Lim 	EL2_REG(VPIDR_EL2, access_rw, reset_unknown, 0),
21806ff9dc23SJintack Lim 	EL2_REG(VMPIDR_EL2, access_rw, reset_unknown, 0),
21816ff9dc23SJintack Lim 	EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
21826ff9dc23SJintack Lim 	EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
21836ff9dc23SJintack Lim 	EL2_REG(HCR_EL2, access_rw, reset_val, 0),
21846ff9dc23SJintack Lim 	EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
21856ff9dc23SJintack Lim 	EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_EL2_DEFAULT ),
21866ff9dc23SJintack Lim 	EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
21876ff9dc23SJintack Lim 	EL2_REG(HACR_EL2, access_rw, reset_val, 0),
21886ff9dc23SJintack Lim 
21896ff9dc23SJintack Lim 	EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
21906ff9dc23SJintack Lim 	EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
21916ff9dc23SJintack Lim 	EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
21926ff9dc23SJintack Lim 	EL2_REG(VTTBR_EL2, access_rw, reset_val, 0),
21936ff9dc23SJintack Lim 	EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
21946ff9dc23SJintack Lim 
2195851050a5SMark Rutland 	{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
21966ff9dc23SJintack Lim 	EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
21976ff9dc23SJintack Lim 	EL2_REG(ELR_EL2, access_rw, reset_val, 0),
21986ff9dc23SJintack Lim 	{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
21996ff9dc23SJintack Lim 
2200851050a5SMark Rutland 	{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
22016ff9dc23SJintack Lim 	EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
22026ff9dc23SJintack Lim 	EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
22036ff9dc23SJintack Lim 	EL2_REG(ESR_EL2, access_rw, reset_val, 0),
2204c88b0936SDave Martin 	{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
22056ff9dc23SJintack Lim 
22066ff9dc23SJintack Lim 	EL2_REG(FAR_EL2, access_rw, reset_val, 0),
22076ff9dc23SJintack Lim 	EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
22086ff9dc23SJintack Lim 
22096ff9dc23SJintack Lim 	EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
22106ff9dc23SJintack Lim 	EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
22116ff9dc23SJintack Lim 
22126ff9dc23SJintack Lim 	EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
22136ff9dc23SJintack Lim 	EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
22146ff9dc23SJintack Lim 	{ SYS_DESC(SYS_RMR_EL2), trap_undef },
22156ff9dc23SJintack Lim 
22166ff9dc23SJintack Lim 	EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
22176ff9dc23SJintack Lim 	EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
22186ff9dc23SJintack Lim 
22196ff9dc23SJintack Lim 	EL2_REG(CNTVOFF_EL2, access_rw, reset_val, 0),
22206ff9dc23SJintack Lim 	EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
22216ff9dc23SJintack Lim 
2222280b748eSJintack Lim 	EL12_REG(SCTLR, access_vm_reg, reset_val, 0x00C50078),
2223280b748eSJintack Lim 	EL12_REG(CPACR, access_rw, reset_val, 0),
2224280b748eSJintack Lim 	EL12_REG(TTBR0, access_vm_reg, reset_unknown, 0),
2225280b748eSJintack Lim 	EL12_REG(TTBR1, access_vm_reg, reset_unknown, 0),
2226280b748eSJintack Lim 	EL12_REG(TCR, access_vm_reg, reset_val, 0),
2227280b748eSJintack Lim 	{ SYS_DESC(SYS_SPSR_EL12), access_spsr},
2228280b748eSJintack Lim 	{ SYS_DESC(SYS_ELR_EL12), access_elr},
2229280b748eSJintack Lim 	EL12_REG(AFSR0, access_vm_reg, reset_unknown, 0),
2230280b748eSJintack Lim 	EL12_REG(AFSR1, access_vm_reg, reset_unknown, 0),
2231280b748eSJintack Lim 	EL12_REG(ESR, access_vm_reg, reset_unknown, 0),
2232280b748eSJintack Lim 	EL12_REG(FAR, access_vm_reg, reset_unknown, 0),
2233280b748eSJintack Lim 	EL12_REG(MAIR, access_vm_reg, reset_unknown, 0),
2234280b748eSJintack Lim 	EL12_REG(AMAIR, access_vm_reg, reset_amair_el1, 0),
2235280b748eSJintack Lim 	EL12_REG(VBAR, access_rw, reset_val, 0),
2236280b748eSJintack Lim 	EL12_REG(CONTEXTIDR, access_vm_reg, reset_val, 0),
2237280b748eSJintack Lim 	EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2238280b748eSJintack Lim 
22396ff9dc23SJintack Lim 	EL2_REG(SP_EL2, NULL, reset_unknown, 0),
224062a89c44SMarc Zyngier };
224162a89c44SMarc Zyngier 
22428c358b29SAlexandru Elisei static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
22433fec037dSPavel Fedin 			struct sys_reg_params *p,
2244bdfb4b38SMarc Zyngier 			const struct sys_reg_desc *r)
2245bdfb4b38SMarc Zyngier {
2246bdfb4b38SMarc Zyngier 	if (p->is_write) {
2247bdfb4b38SMarc Zyngier 		return ignore_write(vcpu, p);
2248bdfb4b38SMarc Zyngier 	} else {
224946823dd1SDave Martin 		u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
225046823dd1SDave Martin 		u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
225155adc08dSMark Brown 		u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
2252bdfb4b38SMarc Zyngier 
2253fcf37b38SMark Brown 		p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
2254fcf37b38SMark Brown 			     (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
2255fcf37b38SMark Brown 			     (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
2256bea7e97fSMarc Zyngier 			     | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
2257bdfb4b38SMarc Zyngier 		return true;
2258bdfb4b38SMarc Zyngier 	}
2259bdfb4b38SMarc Zyngier }
2260bdfb4b38SMarc Zyngier 
22611da42c34SMarc Zyngier /*
22621da42c34SMarc Zyngier  * AArch32 debug register mappings
226384e690bfSAlex Bennée  *
226484e690bfSAlex Bennée  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
226584e690bfSAlex Bennée  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
226684e690bfSAlex Bennée  *
22671da42c34SMarc Zyngier  * None of the other registers share their location, so treat them as
22681da42c34SMarc Zyngier  * if they were 64bit.
226984e690bfSAlex Bennée  */
2270bdfb4b38SMarc Zyngier #define DBG_BCR_BVR_WCR_WVR(n)						      \
2271bdfb4b38SMarc Zyngier 	/* DBGBVRn */							      \
22721da42c34SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2273bdfb4b38SMarc Zyngier 	/* DBGBCRn */							      \
227484e690bfSAlex Bennée 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },	      \
2275bdfb4b38SMarc Zyngier 	/* DBGWVRn */							      \
227684e690bfSAlex Bennée 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },	      \
2277bdfb4b38SMarc Zyngier 	/* DBGWCRn */							      \
227884e690bfSAlex Bennée 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2279bdfb4b38SMarc Zyngier 
2280bdfb4b38SMarc Zyngier #define DBGBXVR(n)							      \
22811da42c34SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2282bdfb4b38SMarc Zyngier 
2283bdfb4b38SMarc Zyngier /*
2284bdfb4b38SMarc Zyngier  * Trapped cp14 registers. We generally ignore most of the external
2285bdfb4b38SMarc Zyngier  * debug, on the principle that they don't really make sense to a
228684e690bfSAlex Bennée  * guest. Revisit this one day, would this principle change.
2287bdfb4b38SMarc Zyngier  */
228872564016SMarc Zyngier static const struct sys_reg_desc cp14_regs[] = {
22898c358b29SAlexandru Elisei 	/* DBGDIDR */
22908c358b29SAlexandru Elisei 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
2291bdfb4b38SMarc Zyngier 	/* DBGDTRRXext */
2292bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
2293bdfb4b38SMarc Zyngier 
2294bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(0),
2295bdfb4b38SMarc Zyngier 	/* DBGDSCRint */
2296bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
2297bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(1),
2298bdfb4b38SMarc Zyngier 	/* DBGDCCINT */
22991da42c34SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
2300bdfb4b38SMarc Zyngier 	/* DBGDSCRext */
23011da42c34SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
2302bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(2),
2303bdfb4b38SMarc Zyngier 	/* DBGDTR[RT]Xint */
2304bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
2305bdfb4b38SMarc Zyngier 	/* DBGDTR[RT]Xext */
2306bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
2307bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(3),
2308bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(4),
2309bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(5),
2310bdfb4b38SMarc Zyngier 	/* DBGWFAR */
2311bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
2312bdfb4b38SMarc Zyngier 	/* DBGOSECCR */
2313bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
2314bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(6),
2315bdfb4b38SMarc Zyngier 	/* DBGVCR */
23161da42c34SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
2317bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(7),
2318bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(8),
2319bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(9),
2320bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(10),
2321bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(11),
2322bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(12),
2323bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(13),
2324bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(14),
2325bdfb4b38SMarc Zyngier 	DBG_BCR_BVR_WCR_WVR(15),
2326bdfb4b38SMarc Zyngier 
2327bdfb4b38SMarc Zyngier 	/* DBGDRAR (32bit) */
2328bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2329bdfb4b38SMarc Zyngier 
2330bdfb4b38SMarc Zyngier 	DBGBXVR(0),
2331bdfb4b38SMarc Zyngier 	/* DBGOSLAR */
2332f24adc65SOliver Upton 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
2333bdfb4b38SMarc Zyngier 	DBGBXVR(1),
2334bdfb4b38SMarc Zyngier 	/* DBGOSLSR */
2335d42e2671SOliver Upton 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
2336bdfb4b38SMarc Zyngier 	DBGBXVR(2),
2337bdfb4b38SMarc Zyngier 	DBGBXVR(3),
2338bdfb4b38SMarc Zyngier 	/* DBGOSDLR */
2339bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2340bdfb4b38SMarc Zyngier 	DBGBXVR(4),
2341bdfb4b38SMarc Zyngier 	/* DBGPRCR */
2342bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2343bdfb4b38SMarc Zyngier 	DBGBXVR(5),
2344bdfb4b38SMarc Zyngier 	DBGBXVR(6),
2345bdfb4b38SMarc Zyngier 	DBGBXVR(7),
2346bdfb4b38SMarc Zyngier 	DBGBXVR(8),
2347bdfb4b38SMarc Zyngier 	DBGBXVR(9),
2348bdfb4b38SMarc Zyngier 	DBGBXVR(10),
2349bdfb4b38SMarc Zyngier 	DBGBXVR(11),
2350bdfb4b38SMarc Zyngier 	DBGBXVR(12),
2351bdfb4b38SMarc Zyngier 	DBGBXVR(13),
2352bdfb4b38SMarc Zyngier 	DBGBXVR(14),
2353bdfb4b38SMarc Zyngier 	DBGBXVR(15),
2354bdfb4b38SMarc Zyngier 
2355bdfb4b38SMarc Zyngier 	/* DBGDSAR (32bit) */
2356bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2357bdfb4b38SMarc Zyngier 
2358bdfb4b38SMarc Zyngier 	/* DBGDEVID2 */
2359bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2360bdfb4b38SMarc Zyngier 	/* DBGDEVID1 */
2361bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2362bdfb4b38SMarc Zyngier 	/* DBGDEVID */
2363bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2364bdfb4b38SMarc Zyngier 	/* DBGCLAIMSET */
2365bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2366bdfb4b38SMarc Zyngier 	/* DBGCLAIMCLR */
2367bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2368bdfb4b38SMarc Zyngier 	/* DBGAUTHSTATUS */
2369bdfb4b38SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
237072564016SMarc Zyngier };
237172564016SMarc Zyngier 
2372a9866ba0SMarc Zyngier /* Trapped cp14 64bit registers */
2373a9866ba0SMarc Zyngier static const struct sys_reg_desc cp14_64_regs[] = {
2374bdfb4b38SMarc Zyngier 	/* DBGDRAR (64bit) */
2375bdfb4b38SMarc Zyngier 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
2376bdfb4b38SMarc Zyngier 
2377bdfb4b38SMarc Zyngier 	/* DBGDSAR (64bit) */
2378bdfb4b38SMarc Zyngier 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
2379a9866ba0SMarc Zyngier };
2380a9866ba0SMarc Zyngier 
2381a9e192cdSAlexandru Elisei #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2)			\
2382a9e192cdSAlexandru Elisei 	AA32(_map),							\
2383a9e192cdSAlexandru Elisei 	Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2),			\
2384a9e192cdSAlexandru Elisei 	.visibility = pmu_visibility
2385a9e192cdSAlexandru Elisei 
2386051ff581SShannon Zhao /* Macro to expand the PMEVCNTRn register */
2387051ff581SShannon Zhao #define PMU_PMEVCNTR(n)							\
2388a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
2389a9e192cdSAlexandru Elisei 	  (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
2390a9e192cdSAlexandru Elisei 	  .access = access_pmu_evcntr }
2391051ff581SShannon Zhao 
23929feb21acSShannon Zhao /* Macro to expand the PMEVTYPERn register */
23939feb21acSShannon Zhao #define PMU_PMEVTYPER(n)						\
2394a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
2395a9e192cdSAlexandru Elisei 	  (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
2396a9e192cdSAlexandru Elisei 	  .access = access_pmu_evtyper }
23974d44923bSMarc Zyngier /*
23984d44923bSMarc Zyngier  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
23994d44923bSMarc Zyngier  * depending on the way they are accessed (as a 32bit or a 64bit
24004d44923bSMarc Zyngier  * register).
24014d44923bSMarc Zyngier  */
240262a89c44SMarc Zyngier static const struct sys_reg_desc cp15_regs[] = {
2403f7f2b15cSArd Biesheuvel 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2404b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2405b1ea1d76SMarc Zyngier 	/* ACTLR */
2406b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2407b1ea1d76SMarc Zyngier 	/* ACTLR2 */
2408b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2409b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2410b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2411b1ea1d76SMarc Zyngier 	/* TTBCR */
2412b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2413b1ea1d76SMarc Zyngier 	/* TTBCR2 */
2414b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2415b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2416b1ea1d76SMarc Zyngier 	/* DFSR */
2417b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2418b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2419b1ea1d76SMarc Zyngier 	/* ADFSR */
2420b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2421b1ea1d76SMarc Zyngier 	/* AIFSR */
2422b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2423b1ea1d76SMarc Zyngier 	/* DFAR */
2424b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2425b1ea1d76SMarc Zyngier 	/* IFAR */
2426b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
24274d44923bSMarc Zyngier 
242862a89c44SMarc Zyngier 	/*
242962a89c44SMarc Zyngier 	 * DC{C,I,CI}SW operations:
243062a89c44SMarc Zyngier 	 */
243162a89c44SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
243262a89c44SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
243362a89c44SMarc Zyngier 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
24344d44923bSMarc Zyngier 
24357609c125SMarc Zyngier 	/* PMU */
2436a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2437a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2438a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2439a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2440a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2441a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2442a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 6), .access = access_pmceid },
2443a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 7), .access = access_pmceid },
2444a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2445a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2446a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2447a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2448a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2449a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2450a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2451a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 4), .access = access_pmceid },
2452a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 5), .access = access_pmceid },
245346081078SMarc Zyngier 	/* PMMIR */
2454a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
24554d44923bSMarc Zyngier 
2456b1ea1d76SMarc Zyngier 	/* PRRR/MAIR0 */
2457b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2458b1ea1d76SMarc Zyngier 	/* NMRR/MAIR1 */
2459b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2460b1ea1d76SMarc Zyngier 	/* AMAIR0 */
2461b1ea1d76SMarc Zyngier 	{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2462b1ea1d76SMarc Zyngier 	/* AMAIR1 */
2463b1ea1d76SMarc Zyngier 	{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2464db7dedd0SChristoffer Dall 
2465db7dedd0SChristoffer Dall 	/* ICC_SRE */
2466f7f6f2d9SVladimir Murzin 	{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2467db7dedd0SChristoffer Dall 
2468b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2469051ff581SShannon Zhao 
247084135d3dSAndre Przywara 	/* Arch Tmers */
247184135d3dSAndre Przywara 	{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
247284135d3dSAndre Przywara 	{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2473eac137b4SJérémy Fanguède 
2474051ff581SShannon Zhao 	/* PMEVCNTRn */
2475051ff581SShannon Zhao 	PMU_PMEVCNTR(0),
2476051ff581SShannon Zhao 	PMU_PMEVCNTR(1),
2477051ff581SShannon Zhao 	PMU_PMEVCNTR(2),
2478051ff581SShannon Zhao 	PMU_PMEVCNTR(3),
2479051ff581SShannon Zhao 	PMU_PMEVCNTR(4),
2480051ff581SShannon Zhao 	PMU_PMEVCNTR(5),
2481051ff581SShannon Zhao 	PMU_PMEVCNTR(6),
2482051ff581SShannon Zhao 	PMU_PMEVCNTR(7),
2483051ff581SShannon Zhao 	PMU_PMEVCNTR(8),
2484051ff581SShannon Zhao 	PMU_PMEVCNTR(9),
2485051ff581SShannon Zhao 	PMU_PMEVCNTR(10),
2486051ff581SShannon Zhao 	PMU_PMEVCNTR(11),
2487051ff581SShannon Zhao 	PMU_PMEVCNTR(12),
2488051ff581SShannon Zhao 	PMU_PMEVCNTR(13),
2489051ff581SShannon Zhao 	PMU_PMEVCNTR(14),
2490051ff581SShannon Zhao 	PMU_PMEVCNTR(15),
2491051ff581SShannon Zhao 	PMU_PMEVCNTR(16),
2492051ff581SShannon Zhao 	PMU_PMEVCNTR(17),
2493051ff581SShannon Zhao 	PMU_PMEVCNTR(18),
2494051ff581SShannon Zhao 	PMU_PMEVCNTR(19),
2495051ff581SShannon Zhao 	PMU_PMEVCNTR(20),
2496051ff581SShannon Zhao 	PMU_PMEVCNTR(21),
2497051ff581SShannon Zhao 	PMU_PMEVCNTR(22),
2498051ff581SShannon Zhao 	PMU_PMEVCNTR(23),
2499051ff581SShannon Zhao 	PMU_PMEVCNTR(24),
2500051ff581SShannon Zhao 	PMU_PMEVCNTR(25),
2501051ff581SShannon Zhao 	PMU_PMEVCNTR(26),
2502051ff581SShannon Zhao 	PMU_PMEVCNTR(27),
2503051ff581SShannon Zhao 	PMU_PMEVCNTR(28),
2504051ff581SShannon Zhao 	PMU_PMEVCNTR(29),
2505051ff581SShannon Zhao 	PMU_PMEVCNTR(30),
25069feb21acSShannon Zhao 	/* PMEVTYPERn */
25079feb21acSShannon Zhao 	PMU_PMEVTYPER(0),
25089feb21acSShannon Zhao 	PMU_PMEVTYPER(1),
25099feb21acSShannon Zhao 	PMU_PMEVTYPER(2),
25109feb21acSShannon Zhao 	PMU_PMEVTYPER(3),
25119feb21acSShannon Zhao 	PMU_PMEVTYPER(4),
25129feb21acSShannon Zhao 	PMU_PMEVTYPER(5),
25139feb21acSShannon Zhao 	PMU_PMEVTYPER(6),
25149feb21acSShannon Zhao 	PMU_PMEVTYPER(7),
25159feb21acSShannon Zhao 	PMU_PMEVTYPER(8),
25169feb21acSShannon Zhao 	PMU_PMEVTYPER(9),
25179feb21acSShannon Zhao 	PMU_PMEVTYPER(10),
25189feb21acSShannon Zhao 	PMU_PMEVTYPER(11),
25199feb21acSShannon Zhao 	PMU_PMEVTYPER(12),
25209feb21acSShannon Zhao 	PMU_PMEVTYPER(13),
25219feb21acSShannon Zhao 	PMU_PMEVTYPER(14),
25229feb21acSShannon Zhao 	PMU_PMEVTYPER(15),
25239feb21acSShannon Zhao 	PMU_PMEVTYPER(16),
25249feb21acSShannon Zhao 	PMU_PMEVTYPER(17),
25259feb21acSShannon Zhao 	PMU_PMEVTYPER(18),
25269feb21acSShannon Zhao 	PMU_PMEVTYPER(19),
25279feb21acSShannon Zhao 	PMU_PMEVTYPER(20),
25289feb21acSShannon Zhao 	PMU_PMEVTYPER(21),
25299feb21acSShannon Zhao 	PMU_PMEVTYPER(22),
25309feb21acSShannon Zhao 	PMU_PMEVTYPER(23),
25319feb21acSShannon Zhao 	PMU_PMEVTYPER(24),
25329feb21acSShannon Zhao 	PMU_PMEVTYPER(25),
25339feb21acSShannon Zhao 	PMU_PMEVTYPER(26),
25349feb21acSShannon Zhao 	PMU_PMEVTYPER(27),
25359feb21acSShannon Zhao 	PMU_PMEVTYPER(28),
25369feb21acSShannon Zhao 	PMU_PMEVTYPER(29),
25379feb21acSShannon Zhao 	PMU_PMEVTYPER(30),
25389feb21acSShannon Zhao 	/* PMCCFILTR */
2539a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2540f7f2b15cSArd Biesheuvel 
2541f7f2b15cSArd Biesheuvel 	{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2542f7f2b15cSArd Biesheuvel 	{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2543bf48040cSAkihiko Odaki 
2544bf48040cSAkihiko Odaki 	/* CCSIDR2 */
2545bf48040cSAkihiko Odaki 	{ Op1(1), CRn( 0), CRm( 0),  Op2(2), undef_access },
2546bf48040cSAkihiko Odaki 
2547b1ea1d76SMarc Zyngier 	{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2548a9866ba0SMarc Zyngier };
2549a9866ba0SMarc Zyngier 
2550a9866ba0SMarc Zyngier static const struct sys_reg_desc cp15_64_regs[] = {
2551b1ea1d76SMarc Zyngier 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2552a9e192cdSAlexandru Elisei 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
255303bd646dSMarc Zyngier 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2554c605ee24SMarc Zyngier 	{ SYS_DESC(SYS_AARCH32_CNTPCT),	      access_arch_timer },
2555b1ea1d76SMarc Zyngier 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
255603bd646dSMarc Zyngier 	{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
255703bd646dSMarc Zyngier 	{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
255884135d3dSAndre Przywara 	{ SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
2559a6610435SMarc Zyngier 	{ SYS_DESC(SYS_AARCH32_CNTPCTSS),     access_arch_timer },
25607c8c5e6aSMarc Zyngier };
25617c8c5e6aSMarc Zyngier 
2562f1f0c0cfSAlexandru Elisei static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2563bb44a8dbSMarc Zyngier 			       bool is_32)
2564bb44a8dbSMarc Zyngier {
2565bb44a8dbSMarc Zyngier 	unsigned int i;
2566bb44a8dbSMarc Zyngier 
2567bb44a8dbSMarc Zyngier 	for (i = 0; i < n; i++) {
2568bb44a8dbSMarc Zyngier 		if (!is_32 && table[i].reg && !table[i].reset) {
2569325031d4SAlexandru Elisei 			kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2570f1f0c0cfSAlexandru Elisei 			return false;
2571bb44a8dbSMarc Zyngier 		}
2572bb44a8dbSMarc Zyngier 
2573bb44a8dbSMarc Zyngier 		if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2574325031d4SAlexandru Elisei 			kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2575f1f0c0cfSAlexandru Elisei 			return false;
2576bb44a8dbSMarc Zyngier 		}
2577bb44a8dbSMarc Zyngier 	}
2578bb44a8dbSMarc Zyngier 
2579f1f0c0cfSAlexandru Elisei 	return true;
2580bb44a8dbSMarc Zyngier }
2581bb44a8dbSMarc Zyngier 
258274cc7e0cSTianjia Zhang int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
258362a89c44SMarc Zyngier {
258462a89c44SMarc Zyngier 	kvm_inject_undefined(vcpu);
258562a89c44SMarc Zyngier 	return 1;
258662a89c44SMarc Zyngier }
258762a89c44SMarc Zyngier 
2588e70b9522SMarc Zyngier static void perform_access(struct kvm_vcpu *vcpu,
2589e70b9522SMarc Zyngier 			   struct sys_reg_params *params,
2590e70b9522SMarc Zyngier 			   const struct sys_reg_desc *r)
2591e70b9522SMarc Zyngier {
2592599d79dcSMarc Zyngier 	trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2593599d79dcSMarc Zyngier 
25947f34e409SDave Martin 	/* Check for regs disabled by runtime config */
259501fe5aceSAndrew Jones 	if (sysreg_hidden(vcpu, r)) {
25967f34e409SDave Martin 		kvm_inject_undefined(vcpu);
25977f34e409SDave Martin 		return;
25987f34e409SDave Martin 	}
25997f34e409SDave Martin 
2600e70b9522SMarc Zyngier 	/*
2601e70b9522SMarc Zyngier 	 * Not having an accessor means that we have configured a trap
2602e70b9522SMarc Zyngier 	 * that we don't know how to handle. This certainly qualifies
2603e70b9522SMarc Zyngier 	 * as a gross bug that should be fixed right away.
2604e70b9522SMarc Zyngier 	 */
2605e70b9522SMarc Zyngier 	BUG_ON(!r->access);
2606e70b9522SMarc Zyngier 
2607e70b9522SMarc Zyngier 	/* Skip instruction if instructed so */
2608e70b9522SMarc Zyngier 	if (likely(r->access(vcpu, params, r)))
2609cdb5e02eSMarc Zyngier 		kvm_incr_pc(vcpu);
2610e70b9522SMarc Zyngier }
2611e70b9522SMarc Zyngier 
261272564016SMarc Zyngier /*
261372564016SMarc Zyngier  * emulate_cp --  tries to match a sys_reg access in a handling table, and
261472564016SMarc Zyngier  *                call the corresponding trap handler.
261572564016SMarc Zyngier  *
261672564016SMarc Zyngier  * @params: pointer to the descriptor of the access
261772564016SMarc Zyngier  * @table: array of trap descriptors
261872564016SMarc Zyngier  * @num: size of the trap descriptor array
261972564016SMarc Zyngier  *
2620001bb819SOliver Upton  * Return true if the access has been handled, false if not.
262172564016SMarc Zyngier  */
2622001bb819SOliver Upton static bool emulate_cp(struct kvm_vcpu *vcpu,
26233fec037dSPavel Fedin 		       struct sys_reg_params *params,
262472564016SMarc Zyngier 		       const struct sys_reg_desc *table,
262572564016SMarc Zyngier 		       size_t num)
262662a89c44SMarc Zyngier {
262772564016SMarc Zyngier 	const struct sys_reg_desc *r;
262862a89c44SMarc Zyngier 
262972564016SMarc Zyngier 	if (!table)
2630001bb819SOliver Upton 		return false;	/* Not handled */
263162a89c44SMarc Zyngier 
263262a89c44SMarc Zyngier 	r = find_reg(params, table, num);
263362a89c44SMarc Zyngier 
263472564016SMarc Zyngier 	if (r) {
2635e70b9522SMarc Zyngier 		perform_access(vcpu, params, r);
2636001bb819SOliver Upton 		return true;
263772564016SMarc Zyngier 	}
263872564016SMarc Zyngier 
263972564016SMarc Zyngier 	/* Not handled */
2640001bb819SOliver Upton 	return false;
264172564016SMarc Zyngier }
264272564016SMarc Zyngier 
264372564016SMarc Zyngier static void unhandled_cp_access(struct kvm_vcpu *vcpu,
264472564016SMarc Zyngier 				struct sys_reg_params *params)
264572564016SMarc Zyngier {
26463a949f4cSGavin Shan 	u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
264740c4f8d2SDan Carpenter 	int cp = -1;
264872564016SMarc Zyngier 
26493a949f4cSGavin Shan 	switch (esr_ec) {
2650c6d01a94SMark Rutland 	case ESR_ELx_EC_CP15_32:
2651c6d01a94SMark Rutland 	case ESR_ELx_EC_CP15_64:
265272564016SMarc Zyngier 		cp = 15;
265372564016SMarc Zyngier 		break;
2654c6d01a94SMark Rutland 	case ESR_ELx_EC_CP14_MR:
2655c6d01a94SMark Rutland 	case ESR_ELx_EC_CP14_64:
265672564016SMarc Zyngier 		cp = 14;
265772564016SMarc Zyngier 		break;
265872564016SMarc Zyngier 	default:
265940c4f8d2SDan Carpenter 		WARN_ON(1);
266072564016SMarc Zyngier 	}
266172564016SMarc Zyngier 
2662bf4b96bbSMark Rutland 	print_sys_reg_msg(params,
2663bf4b96bbSMark Rutland 			  "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2664d1878af3SMark Rutland 			  cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
266562a89c44SMarc Zyngier 	kvm_inject_undefined(vcpu);
266662a89c44SMarc Zyngier }
266762a89c44SMarc Zyngier 
266862a89c44SMarc Zyngier /**
26697769db90SShannon Zhao  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
267062a89c44SMarc Zyngier  * @vcpu: The VCPU pointer
267162a89c44SMarc Zyngier  * @run:  The kvm_run struct
267262a89c44SMarc Zyngier  */
267372564016SMarc Zyngier static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
267472564016SMarc Zyngier 			    const struct sys_reg_desc *global,
2675dcaffa7bSJames Morse 			    size_t nr_global)
267662a89c44SMarc Zyngier {
267762a89c44SMarc Zyngier 	struct sys_reg_params params;
26780b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
2679c667186fSMarc Zyngier 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
26803a949f4cSGavin Shan 	int Rt2 = (esr >> 10) & 0x1f;
268162a89c44SMarc Zyngier 
26823a949f4cSGavin Shan 	params.CRm = (esr >> 1) & 0xf;
26833a949f4cSGavin Shan 	params.is_write = ((esr & 1) == 0);
268462a89c44SMarc Zyngier 
268562a89c44SMarc Zyngier 	params.Op0 = 0;
26863a949f4cSGavin Shan 	params.Op1 = (esr >> 16) & 0xf;
268762a89c44SMarc Zyngier 	params.Op2 = 0;
268862a89c44SMarc Zyngier 	params.CRn = 0;
268962a89c44SMarc Zyngier 
269062a89c44SMarc Zyngier 	/*
26912ec5be3dSPavel Fedin 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
269262a89c44SMarc Zyngier 	 * backends between AArch32 and AArch64, we get away with it.
269362a89c44SMarc Zyngier 	 */
269462a89c44SMarc Zyngier 	if (params.is_write) {
26952ec5be3dSPavel Fedin 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
26962ec5be3dSPavel Fedin 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
269762a89c44SMarc Zyngier 	}
269862a89c44SMarc Zyngier 
2699b6b7a806SMarc Zyngier 	/*
2700dcaffa7bSJames Morse 	 * If the table contains a handler, handle the
2701b6b7a806SMarc Zyngier 	 * potential register operation in the case of a read and return
2702b6b7a806SMarc Zyngier 	 * with success.
2703b6b7a806SMarc Zyngier 	 */
2704001bb819SOliver Upton 	if (emulate_cp(vcpu, &params, global, nr_global)) {
27052ec5be3dSPavel Fedin 		/* Split up the value between registers for the read side */
270662a89c44SMarc Zyngier 		if (!params.is_write) {
27072ec5be3dSPavel Fedin 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
27082ec5be3dSPavel Fedin 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
270962a89c44SMarc Zyngier 		}
271062a89c44SMarc Zyngier 
271162a89c44SMarc Zyngier 		return 1;
271262a89c44SMarc Zyngier 	}
271362a89c44SMarc Zyngier 
2714b6b7a806SMarc Zyngier 	unhandled_cp_access(vcpu, &params);
2715b6b7a806SMarc Zyngier 	return 1;
2716b6b7a806SMarc Zyngier }
2717b6b7a806SMarc Zyngier 
2718e6519766SOliver Upton static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2719e6519766SOliver Upton 
27209369bc5cSOliver Upton /*
27219369bc5cSOliver Upton  * The CP10 ID registers are architecturally mapped to AArch64 feature
27229369bc5cSOliver Upton  * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
27239369bc5cSOliver Upton  * from AArch32.
27249369bc5cSOliver Upton  */
2725ee87a9bdSMarc Zyngier static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
27269369bc5cSOliver Upton {
27279369bc5cSOliver Upton 	u8 reg_id = (esr >> 10) & 0xf;
27289369bc5cSOliver Upton 	bool valid;
27299369bc5cSOliver Upton 
27309369bc5cSOliver Upton 	params->is_write = ((esr & 1) == 0);
27319369bc5cSOliver Upton 	params->Op0 = 3;
27329369bc5cSOliver Upton 	params->Op1 = 0;
27339369bc5cSOliver Upton 	params->CRn = 0;
27349369bc5cSOliver Upton 	params->CRm = 3;
27359369bc5cSOliver Upton 
27369369bc5cSOliver Upton 	/* CP10 ID registers are read-only */
27379369bc5cSOliver Upton 	valid = !params->is_write;
27389369bc5cSOliver Upton 
27399369bc5cSOliver Upton 	switch (reg_id) {
27409369bc5cSOliver Upton 	/* MVFR0 */
27419369bc5cSOliver Upton 	case 0b0111:
27429369bc5cSOliver Upton 		params->Op2 = 0;
27439369bc5cSOliver Upton 		break;
27449369bc5cSOliver Upton 	/* MVFR1 */
27459369bc5cSOliver Upton 	case 0b0110:
27469369bc5cSOliver Upton 		params->Op2 = 1;
27479369bc5cSOliver Upton 		break;
27489369bc5cSOliver Upton 	/* MVFR2 */
27499369bc5cSOliver Upton 	case 0b0101:
27509369bc5cSOliver Upton 		params->Op2 = 2;
27519369bc5cSOliver Upton 		break;
27529369bc5cSOliver Upton 	default:
27539369bc5cSOliver Upton 		valid = false;
27549369bc5cSOliver Upton 	}
27559369bc5cSOliver Upton 
27569369bc5cSOliver Upton 	if (valid)
27579369bc5cSOliver Upton 		return true;
27589369bc5cSOliver Upton 
27599369bc5cSOliver Upton 	kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
27609369bc5cSOliver Upton 		      params->is_write ? "write" : "read", reg_id);
27619369bc5cSOliver Upton 	return false;
27629369bc5cSOliver Upton }
27639369bc5cSOliver Upton 
27649369bc5cSOliver Upton /**
27659369bc5cSOliver Upton  * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
27669369bc5cSOliver Upton  *			  VFP Register' from AArch32.
27679369bc5cSOliver Upton  * @vcpu: The vCPU pointer
27689369bc5cSOliver Upton  *
27699369bc5cSOliver Upton  * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
27709369bc5cSOliver Upton  * Work out the correct AArch64 system register encoding and reroute to the
27719369bc5cSOliver Upton  * AArch64 system register emulation.
27729369bc5cSOliver Upton  */
27739369bc5cSOliver Upton int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
27749369bc5cSOliver Upton {
27759369bc5cSOliver Upton 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2776ee87a9bdSMarc Zyngier 	u64 esr = kvm_vcpu_get_esr(vcpu);
27779369bc5cSOliver Upton 	struct sys_reg_params params;
27789369bc5cSOliver Upton 
27799369bc5cSOliver Upton 	/* UNDEF on any unhandled register access */
27809369bc5cSOliver Upton 	if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
27819369bc5cSOliver Upton 		kvm_inject_undefined(vcpu);
27829369bc5cSOliver Upton 		return 1;
27839369bc5cSOliver Upton 	}
27849369bc5cSOliver Upton 
27859369bc5cSOliver Upton 	if (emulate_sys_reg(vcpu, &params))
27869369bc5cSOliver Upton 		vcpu_set_reg(vcpu, Rt, params.regval);
27879369bc5cSOliver Upton 
27889369bc5cSOliver Upton 	return 1;
27899369bc5cSOliver Upton }
27909369bc5cSOliver Upton 
2791e6519766SOliver Upton /**
2792e6519766SOliver Upton  * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2793e6519766SOliver Upton  *			       CRn=0, which corresponds to the AArch32 feature
2794e6519766SOliver Upton  *			       registers.
2795e6519766SOliver Upton  * @vcpu: the vCPU pointer
2796e6519766SOliver Upton  * @params: the system register access parameters.
2797e6519766SOliver Upton  *
2798e6519766SOliver Upton  * Our cp15 system register tables do not enumerate the AArch32 feature
2799e6519766SOliver Upton  * registers. Conveniently, our AArch64 table does, and the AArch32 system
2800e6519766SOliver Upton  * register encoding can be trivially remapped into the AArch64 for the feature
2801e6519766SOliver Upton  * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2802e6519766SOliver Upton  *
2803e6519766SOliver Upton  * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2804e6519766SOliver Upton  * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2805e6519766SOliver Upton  * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2806e6519766SOliver Upton  * treat undefined registers in this range as RAZ.
2807e6519766SOliver Upton  */
2808e6519766SOliver Upton static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2809e6519766SOliver Upton 				   struct sys_reg_params *params)
2810e6519766SOliver Upton {
2811e6519766SOliver Upton 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
2812e6519766SOliver Upton 
2813e6519766SOliver Upton 	/* Treat impossible writes to RO registers as UNDEFINED */
2814e6519766SOliver Upton 	if (params->is_write) {
2815e6519766SOliver Upton 		unhandled_cp_access(vcpu, params);
2816e6519766SOliver Upton 		return 1;
2817e6519766SOliver Upton 	}
2818e6519766SOliver Upton 
2819e6519766SOliver Upton 	params->Op0 = 3;
2820e6519766SOliver Upton 
2821e6519766SOliver Upton 	/*
2822e6519766SOliver Upton 	 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
2823e6519766SOliver Upton 	 * Avoid conflicting with future expansion of AArch64 feature registers
2824e6519766SOliver Upton 	 * and simply treat them as RAZ here.
2825e6519766SOliver Upton 	 */
2826e6519766SOliver Upton 	if (params->CRm > 3)
2827e6519766SOliver Upton 		params->regval = 0;
2828e6519766SOliver Upton 	else if (!emulate_sys_reg(vcpu, params))
2829e6519766SOliver Upton 		return 1;
2830e6519766SOliver Upton 
2831e6519766SOliver Upton 	vcpu_set_reg(vcpu, Rt, params->regval);
2832e6519766SOliver Upton 	return 1;
2833e6519766SOliver Upton }
2834e6519766SOliver Upton 
283562a89c44SMarc Zyngier /**
28367769db90SShannon Zhao  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
283762a89c44SMarc Zyngier  * @vcpu: The VCPU pointer
283862a89c44SMarc Zyngier  * @run:  The kvm_run struct
283962a89c44SMarc Zyngier  */
284072564016SMarc Zyngier static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2841e6519766SOliver Upton 			    struct sys_reg_params *params,
284272564016SMarc Zyngier 			    const struct sys_reg_desc *global,
2843dcaffa7bSJames Morse 			    size_t nr_global)
284462a89c44SMarc Zyngier {
2845c667186fSMarc Zyngier 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
284662a89c44SMarc Zyngier 
2847e6519766SOliver Upton 	params->regval = vcpu_get_reg(vcpu, Rt);
284862a89c44SMarc Zyngier 
2849e6519766SOliver Upton 	if (emulate_cp(vcpu, params, global, nr_global)) {
2850e6519766SOliver Upton 		if (!params->is_write)
2851e6519766SOliver Upton 			vcpu_set_reg(vcpu, Rt, params->regval);
285262a89c44SMarc Zyngier 		return 1;
28532ec5be3dSPavel Fedin 	}
285472564016SMarc Zyngier 
2855e6519766SOliver Upton 	unhandled_cp_access(vcpu, params);
285672564016SMarc Zyngier 	return 1;
285772564016SMarc Zyngier }
285872564016SMarc Zyngier 
285974cc7e0cSTianjia Zhang int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
286072564016SMarc Zyngier {
2861dcaffa7bSJames Morse 	return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
286272564016SMarc Zyngier }
286372564016SMarc Zyngier 
286474cc7e0cSTianjia Zhang int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
286572564016SMarc Zyngier {
2866e6519766SOliver Upton 	struct sys_reg_params params;
2867e6519766SOliver Upton 
2868e6519766SOliver Upton 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2869e6519766SOliver Upton 
2870e6519766SOliver Upton 	/*
2871e6519766SOliver Upton 	 * Certain AArch32 ID registers are handled by rerouting to the AArch64
2872e6519766SOliver Upton 	 * system register table. Registers in the ID range where CRm=0 are
2873e6519766SOliver Upton 	 * excluded from this scheme as they do not trivially map into AArch64
2874e6519766SOliver Upton 	 * system register encodings.
2875e6519766SOliver Upton 	 */
2876e6519766SOliver Upton 	if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
2877e6519766SOliver Upton 		return kvm_emulate_cp15_id_reg(vcpu, &params);
2878e6519766SOliver Upton 
2879e6519766SOliver Upton 	return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
288072564016SMarc Zyngier }
288172564016SMarc Zyngier 
288274cc7e0cSTianjia Zhang int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
288372564016SMarc Zyngier {
2884dcaffa7bSJames Morse 	return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
288572564016SMarc Zyngier }
288672564016SMarc Zyngier 
288774cc7e0cSTianjia Zhang int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
288872564016SMarc Zyngier {
2889e6519766SOliver Upton 	struct sys_reg_params params;
2890e6519766SOliver Upton 
2891e6519766SOliver Upton 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2892e6519766SOliver Upton 
2893e6519766SOliver Upton 	return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
289462a89c44SMarc Zyngier }
289562a89c44SMarc Zyngier 
289654ad68b7SMark Rutland static bool is_imp_def_sys_reg(struct sys_reg_params *params)
289754ad68b7SMark Rutland {
289854ad68b7SMark Rutland 	// See ARM DDI 0487E.a, section D12.3.2
289954ad68b7SMark Rutland 	return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
290054ad68b7SMark Rutland }
290154ad68b7SMark Rutland 
290228eda7b5SOliver Upton /**
290328eda7b5SOliver Upton  * emulate_sys_reg - Emulate a guest access to an AArch64 system register
290428eda7b5SOliver Upton  * @vcpu: The VCPU pointer
290528eda7b5SOliver Upton  * @params: Decoded system register parameters
290628eda7b5SOliver Upton  *
290728eda7b5SOliver Upton  * Return: true if the system register access was successful, false otherwise.
290828eda7b5SOliver Upton  */
290928eda7b5SOliver Upton static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
29103fec037dSPavel Fedin 			   struct sys_reg_params *params)
29117c8c5e6aSMarc Zyngier {
2912dcaffa7bSJames Morse 	const struct sys_reg_desc *r;
29137c8c5e6aSMarc Zyngier 
29147c8c5e6aSMarc Zyngier 	r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
29157c8c5e6aSMarc Zyngier 
29167c8c5e6aSMarc Zyngier 	if (likely(r)) {
2917e70b9522SMarc Zyngier 		perform_access(vcpu, params, r);
291828eda7b5SOliver Upton 		return true;
291928eda7b5SOliver Upton 	}
292028eda7b5SOliver Upton 
292128eda7b5SOliver Upton 	if (is_imp_def_sys_reg(params)) {
292254ad68b7SMark Rutland 		kvm_inject_undefined(vcpu);
29237c8c5e6aSMarc Zyngier 	} else {
2924bf4b96bbSMark Rutland 		print_sys_reg_msg(params,
2925bf4b96bbSMark Rutland 				  "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2926d1878af3SMark Rutland 				  *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
29277c8c5e6aSMarc Zyngier 		kvm_inject_undefined(vcpu);
2928e70b9522SMarc Zyngier 	}
292928eda7b5SOliver Upton 	return false;
29307c8c5e6aSMarc Zyngier }
29317c8c5e6aSMarc Zyngier 
2932750ed566SJames Morse /**
2933750ed566SJames Morse  * kvm_reset_sys_regs - sets system registers to reset value
2934750ed566SJames Morse  * @vcpu: The VCPU pointer
2935750ed566SJames Morse  *
2936750ed566SJames Morse  * This function finds the right table above and sets the registers on the
2937750ed566SJames Morse  * virtual CPU struct to their architecturally defined reset values.
2938750ed566SJames Morse  */
2939750ed566SJames Morse void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
29407c8c5e6aSMarc Zyngier {
29417c8c5e6aSMarc Zyngier 	unsigned long i;
29427c8c5e6aSMarc Zyngier 
2943750ed566SJames Morse 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2944750ed566SJames Morse 		if (sys_reg_descs[i].reset)
2945750ed566SJames Morse 			sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
29467c8c5e6aSMarc Zyngier }
29477c8c5e6aSMarc Zyngier 
29487c8c5e6aSMarc Zyngier /**
29497c8c5e6aSMarc Zyngier  * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
29507c8c5e6aSMarc Zyngier  * @vcpu: The VCPU pointer
29517c8c5e6aSMarc Zyngier  */
295274cc7e0cSTianjia Zhang int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
29537c8c5e6aSMarc Zyngier {
29547c8c5e6aSMarc Zyngier 	struct sys_reg_params params;
29553a949f4cSGavin Shan 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
2956c667186fSMarc Zyngier 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
29577c8c5e6aSMarc Zyngier 
2958eef8c85aSAlex Bennée 	trace_kvm_handle_sys_reg(esr);
2959eef8c85aSAlex Bennée 
2960f76f89e2SFuad Tabba 	params = esr_sys64_to_params(esr);
29612ec5be3dSPavel Fedin 	params.regval = vcpu_get_reg(vcpu, Rt);
29627c8c5e6aSMarc Zyngier 
296328eda7b5SOliver Upton 	if (!emulate_sys_reg(vcpu, &params))
296428eda7b5SOliver Upton 		return 1;
29652ec5be3dSPavel Fedin 
29662ec5be3dSPavel Fedin 	if (!params.is_write)
29672ec5be3dSPavel Fedin 		vcpu_set_reg(vcpu, Rt, params.regval);
296828eda7b5SOliver Upton 	return 1;
29697c8c5e6aSMarc Zyngier }
29707c8c5e6aSMarc Zyngier 
29717c8c5e6aSMarc Zyngier /******************************************************************************
29727c8c5e6aSMarc Zyngier  * Userspace API
29737c8c5e6aSMarc Zyngier  *****************************************************************************/
29747c8c5e6aSMarc Zyngier 
29757c8c5e6aSMarc Zyngier static bool index_to_params(u64 id, struct sys_reg_params *params)
29767c8c5e6aSMarc Zyngier {
29777c8c5e6aSMarc Zyngier 	switch (id & KVM_REG_SIZE_MASK) {
29787c8c5e6aSMarc Zyngier 	case KVM_REG_SIZE_U64:
29797c8c5e6aSMarc Zyngier 		/* Any unused index bits means it's not valid. */
29807c8c5e6aSMarc Zyngier 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
29817c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM_COPROC_MASK
29827c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
29837c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
29847c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
29857c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
29867c8c5e6aSMarc Zyngier 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
29877c8c5e6aSMarc Zyngier 			return false;
29887c8c5e6aSMarc Zyngier 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
29897c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
29907c8c5e6aSMarc Zyngier 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
29917c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
29927c8c5e6aSMarc Zyngier 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
29937c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
29947c8c5e6aSMarc Zyngier 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
29957c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
29967c8c5e6aSMarc Zyngier 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
29977c8c5e6aSMarc Zyngier 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
29987c8c5e6aSMarc Zyngier 		return true;
29997c8c5e6aSMarc Zyngier 	default:
30007c8c5e6aSMarc Zyngier 		return false;
30017c8c5e6aSMarc Zyngier 	}
30027c8c5e6aSMarc Zyngier }
30037c8c5e6aSMarc Zyngier 
3004da8d120fSMarc Zyngier const struct sys_reg_desc *get_reg_by_id(u64 id,
30054b927b94SVijaya Kumar K 					 const struct sys_reg_desc table[],
30064b927b94SVijaya Kumar K 					 unsigned int num)
30074b927b94SVijaya Kumar K {
3008da8d120fSMarc Zyngier 	struct sys_reg_params params;
3009da8d120fSMarc Zyngier 
3010da8d120fSMarc Zyngier 	if (!index_to_params(id, &params))
30114b927b94SVijaya Kumar K 		return NULL;
30124b927b94SVijaya Kumar K 
3013da8d120fSMarc Zyngier 	return find_reg(&params, table, num);
30144b927b94SVijaya Kumar K }
30154b927b94SVijaya Kumar K 
30167c8c5e6aSMarc Zyngier /* Decode an index value, and find the sys_reg_desc entry. */
3017ba23aec9SMarc Zyngier static const struct sys_reg_desc *
3018ba23aec9SMarc Zyngier id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
3019ba23aec9SMarc Zyngier 		   const struct sys_reg_desc table[], unsigned int num)
3020ba23aec9SMarc Zyngier 
30217c8c5e6aSMarc Zyngier {
3022dcaffa7bSJames Morse 	const struct sys_reg_desc *r;
30237c8c5e6aSMarc Zyngier 
30247c8c5e6aSMarc Zyngier 	/* We only do sys_reg for now. */
30257c8c5e6aSMarc Zyngier 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
30267c8c5e6aSMarc Zyngier 		return NULL;
30277c8c5e6aSMarc Zyngier 
3028ba23aec9SMarc Zyngier 	r = get_reg_by_id(id, table, num);
30297c8c5e6aSMarc Zyngier 
303093390c0aSDave Martin 	/* Not saved in the sys_reg array and not otherwise accessible? */
3031ba23aec9SMarc Zyngier 	if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
30327c8c5e6aSMarc Zyngier 		r = NULL;
30337c8c5e6aSMarc Zyngier 
30347c8c5e6aSMarc Zyngier 	return r;
30357c8c5e6aSMarc Zyngier }
30367c8c5e6aSMarc Zyngier 
30377c8c5e6aSMarc Zyngier /*
30387c8c5e6aSMarc Zyngier  * These are the invariant sys_reg registers: we let the guest see the
30397c8c5e6aSMarc Zyngier  * host versions of these, so they're part of the guest state.
30407c8c5e6aSMarc Zyngier  *
30417c8c5e6aSMarc Zyngier  * A future CPU may provide a mechanism to present different values to
30427c8c5e6aSMarc Zyngier  * the guest, or a future kvm may trap them.
30437c8c5e6aSMarc Zyngier  */
30447c8c5e6aSMarc Zyngier 
30457c8c5e6aSMarc Zyngier #define FUNCTION_INVARIANT(reg)						\
30467c8c5e6aSMarc Zyngier 	static void get_##reg(struct kvm_vcpu *v,			\
30477c8c5e6aSMarc Zyngier 			      const struct sys_reg_desc *r)		\
30487c8c5e6aSMarc Zyngier 	{								\
30491f3d8699SMark Rutland 		((struct sys_reg_desc *)r)->val = read_sysreg(reg);	\
30507c8c5e6aSMarc Zyngier 	}
30517c8c5e6aSMarc Zyngier 
30527c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(midr_el1)
30537c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(revidr_el1)
30547c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(aidr_el1)
30557c8c5e6aSMarc Zyngier 
3056f7f2b15cSArd Biesheuvel static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
3057f7f2b15cSArd Biesheuvel {
3058f7f2b15cSArd Biesheuvel 	((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3059f7f2b15cSArd Biesheuvel }
3060f7f2b15cSArd Biesheuvel 
30617c8c5e6aSMarc Zyngier /* ->val is filled in by kvm_sys_reg_table_init() */
30628d20bd63SSean Christopherson static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
30630d449541SMark Rutland 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
30640d449541SMark Rutland 	{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
30650d449541SMark Rutland 	{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
30660d449541SMark Rutland 	{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
30677c8c5e6aSMarc Zyngier };
30687c8c5e6aSMarc Zyngier 
30695a420ed9SMarc Zyngier static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
30707c8c5e6aSMarc Zyngier {
30717c8c5e6aSMarc Zyngier 	const struct sys_reg_desc *r;
30727c8c5e6aSMarc Zyngier 
3073da8d120fSMarc Zyngier 	r = get_reg_by_id(id, invariant_sys_regs,
30744b927b94SVijaya Kumar K 			  ARRAY_SIZE(invariant_sys_regs));
30757c8c5e6aSMarc Zyngier 	if (!r)
30767c8c5e6aSMarc Zyngier 		return -ENOENT;
30777c8c5e6aSMarc Zyngier 
30785a420ed9SMarc Zyngier 	return put_user(r->val, uaddr);
30797c8c5e6aSMarc Zyngier }
30807c8c5e6aSMarc Zyngier 
30815a420ed9SMarc Zyngier static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
30827c8c5e6aSMarc Zyngier {
30837c8c5e6aSMarc Zyngier 	const struct sys_reg_desc *r;
30845a420ed9SMarc Zyngier 	u64 val;
30857c8c5e6aSMarc Zyngier 
3086da8d120fSMarc Zyngier 	r = get_reg_by_id(id, invariant_sys_regs,
30874b927b94SVijaya Kumar K 			  ARRAY_SIZE(invariant_sys_regs));
30887c8c5e6aSMarc Zyngier 	if (!r)
30897c8c5e6aSMarc Zyngier 		return -ENOENT;
30907c8c5e6aSMarc Zyngier 
30915a420ed9SMarc Zyngier 	if (get_user(val, uaddr))
30925a420ed9SMarc Zyngier 		return -EFAULT;
30937c8c5e6aSMarc Zyngier 
30947c8c5e6aSMarc Zyngier 	/* This is what we mean by invariant: you can't change it. */
30957c8c5e6aSMarc Zyngier 	if (r->val != val)
30967c8c5e6aSMarc Zyngier 		return -EINVAL;
30977c8c5e6aSMarc Zyngier 
30987c8c5e6aSMarc Zyngier 	return 0;
30997c8c5e6aSMarc Zyngier }
31007c8c5e6aSMarc Zyngier 
31017af0c253SAkihiko Odaki static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
31027c8c5e6aSMarc Zyngier {
31037c8c5e6aSMarc Zyngier 	u32 val;
31047c8c5e6aSMarc Zyngier 	u32 __user *uval = uaddr;
31057c8c5e6aSMarc Zyngier 
31067c8c5e6aSMarc Zyngier 	/* Fail if we have unknown bits set. */
31077c8c5e6aSMarc Zyngier 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
31087c8c5e6aSMarc Zyngier 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
31097c8c5e6aSMarc Zyngier 		return -ENOENT;
31107c8c5e6aSMarc Zyngier 
31117c8c5e6aSMarc Zyngier 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
31127c8c5e6aSMarc Zyngier 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
31137c8c5e6aSMarc Zyngier 		if (KVM_REG_SIZE(id) != 4)
31147c8c5e6aSMarc Zyngier 			return -ENOENT;
31157c8c5e6aSMarc Zyngier 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
31167c8c5e6aSMarc Zyngier 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
31177af0c253SAkihiko Odaki 		if (val >= CSSELR_MAX)
31187c8c5e6aSMarc Zyngier 			return -ENOENT;
31197c8c5e6aSMarc Zyngier 
31207af0c253SAkihiko Odaki 		return put_user(get_ccsidr(vcpu, val), uval);
31217c8c5e6aSMarc Zyngier 	default:
31227c8c5e6aSMarc Zyngier 		return -ENOENT;
31237c8c5e6aSMarc Zyngier 	}
31247c8c5e6aSMarc Zyngier }
31257c8c5e6aSMarc Zyngier 
31267af0c253SAkihiko Odaki static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
31277c8c5e6aSMarc Zyngier {
31287c8c5e6aSMarc Zyngier 	u32 val, newval;
31297c8c5e6aSMarc Zyngier 	u32 __user *uval = uaddr;
31307c8c5e6aSMarc Zyngier 
31317c8c5e6aSMarc Zyngier 	/* Fail if we have unknown bits set. */
31327c8c5e6aSMarc Zyngier 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
31337c8c5e6aSMarc Zyngier 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
31347c8c5e6aSMarc Zyngier 		return -ENOENT;
31357c8c5e6aSMarc Zyngier 
31367c8c5e6aSMarc Zyngier 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
31377c8c5e6aSMarc Zyngier 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
31387c8c5e6aSMarc Zyngier 		if (KVM_REG_SIZE(id) != 4)
31397c8c5e6aSMarc Zyngier 			return -ENOENT;
31407c8c5e6aSMarc Zyngier 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
31417c8c5e6aSMarc Zyngier 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
31427af0c253SAkihiko Odaki 		if (val >= CSSELR_MAX)
31437c8c5e6aSMarc Zyngier 			return -ENOENT;
31447c8c5e6aSMarc Zyngier 
31457c8c5e6aSMarc Zyngier 		if (get_user(newval, uval))
31467c8c5e6aSMarc Zyngier 			return -EFAULT;
31477c8c5e6aSMarc Zyngier 
31487af0c253SAkihiko Odaki 		return set_ccsidr(vcpu, val, newval);
31497c8c5e6aSMarc Zyngier 	default:
31507c8c5e6aSMarc Zyngier 		return -ENOENT;
31517c8c5e6aSMarc Zyngier 	}
31527c8c5e6aSMarc Zyngier }
31537c8c5e6aSMarc Zyngier 
3154ba23aec9SMarc Zyngier int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3155ba23aec9SMarc Zyngier 			 const struct sys_reg_desc table[], unsigned int num)
3156ba23aec9SMarc Zyngier {
3157978ceeb3SMarc Zyngier 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3158ba23aec9SMarc Zyngier 	const struct sys_reg_desc *r;
3159978ceeb3SMarc Zyngier 	u64 val;
3160978ceeb3SMarc Zyngier 	int ret;
3161ba23aec9SMarc Zyngier 
3162ba23aec9SMarc Zyngier 	r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3163e6b367dbSMarc Zyngier 	if (!r || sysreg_hidden_user(vcpu, r))
3164ba23aec9SMarc Zyngier 		return -ENOENT;
3165ba23aec9SMarc Zyngier 
3166978ceeb3SMarc Zyngier 	if (r->get_user) {
3167978ceeb3SMarc Zyngier 		ret = (r->get_user)(vcpu, r, &val);
3168978ceeb3SMarc Zyngier 	} else {
3169978ceeb3SMarc Zyngier 		val = __vcpu_sys_reg(vcpu, r->reg);
3170978ceeb3SMarc Zyngier 		ret = 0;
3171978ceeb3SMarc Zyngier 	}
3172ba23aec9SMarc Zyngier 
3173978ceeb3SMarc Zyngier 	if (!ret)
3174978ceeb3SMarc Zyngier 		ret = put_user(val, uaddr);
3175978ceeb3SMarc Zyngier 
3176978ceeb3SMarc Zyngier 	return ret;
3177ba23aec9SMarc Zyngier }
3178ba23aec9SMarc Zyngier 
31797c8c5e6aSMarc Zyngier int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
31807c8c5e6aSMarc Zyngier {
31817c8c5e6aSMarc Zyngier 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
31821deeffb5SMarc Zyngier 	int err;
31837c8c5e6aSMarc Zyngier 
31847c8c5e6aSMarc Zyngier 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
31857af0c253SAkihiko Odaki 		return demux_c15_get(vcpu, reg->id, uaddr);
31867c8c5e6aSMarc Zyngier 
31871deeffb5SMarc Zyngier 	err = get_invariant_sys_reg(reg->id, uaddr);
31881deeffb5SMarc Zyngier 	if (err != -ENOENT)
31891deeffb5SMarc Zyngier 		return err;
31907c8c5e6aSMarc Zyngier 
3191ba23aec9SMarc Zyngier 	return kvm_sys_reg_get_user(vcpu, reg,
3192ba23aec9SMarc Zyngier 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3193ba23aec9SMarc Zyngier }
31947c8c5e6aSMarc Zyngier 
3195ba23aec9SMarc Zyngier int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3196ba23aec9SMarc Zyngier 			 const struct sys_reg_desc table[], unsigned int num)
3197ba23aec9SMarc Zyngier {
3198978ceeb3SMarc Zyngier 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
3199ba23aec9SMarc Zyngier 	const struct sys_reg_desc *r;
3200978ceeb3SMarc Zyngier 	u64 val;
3201978ceeb3SMarc Zyngier 	int ret;
3202978ceeb3SMarc Zyngier 
3203978ceeb3SMarc Zyngier 	if (get_user(val, uaddr))
3204978ceeb3SMarc Zyngier 		return -EFAULT;
3205ba23aec9SMarc Zyngier 
3206ba23aec9SMarc Zyngier 	r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
3207e6b367dbSMarc Zyngier 	if (!r || sysreg_hidden_user(vcpu, r))
32087f34e409SDave Martin 		return -ENOENT;
32097f34e409SDave Martin 
32104de06e4cSOliver Upton 	if (sysreg_user_write_ignore(vcpu, r))
32114de06e4cSOliver Upton 		return 0;
32124de06e4cSOliver Upton 
3213978ceeb3SMarc Zyngier 	if (r->set_user) {
3214978ceeb3SMarc Zyngier 		ret = (r->set_user)(vcpu, r, val);
3215978ceeb3SMarc Zyngier 	} else {
3216978ceeb3SMarc Zyngier 		__vcpu_sys_reg(vcpu, r->reg) = val;
3217978ceeb3SMarc Zyngier 		ret = 0;
3218978ceeb3SMarc Zyngier 	}
321984e690bfSAlex Bennée 
3220978ceeb3SMarc Zyngier 	return ret;
32217c8c5e6aSMarc Zyngier }
32227c8c5e6aSMarc Zyngier 
32237c8c5e6aSMarc Zyngier int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
32247c8c5e6aSMarc Zyngier {
32257c8c5e6aSMarc Zyngier 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
32261deeffb5SMarc Zyngier 	int err;
32277c8c5e6aSMarc Zyngier 
32287c8c5e6aSMarc Zyngier 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
32297af0c253SAkihiko Odaki 		return demux_c15_set(vcpu, reg->id, uaddr);
32307c8c5e6aSMarc Zyngier 
32311deeffb5SMarc Zyngier 	err = set_invariant_sys_reg(reg->id, uaddr);
32321deeffb5SMarc Zyngier 	if (err != -ENOENT)
32331deeffb5SMarc Zyngier 		return err;
32347c8c5e6aSMarc Zyngier 
3235ba23aec9SMarc Zyngier 	return kvm_sys_reg_set_user(vcpu, reg,
3236ba23aec9SMarc Zyngier 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
32377c8c5e6aSMarc Zyngier }
32387c8c5e6aSMarc Zyngier 
32397c8c5e6aSMarc Zyngier static unsigned int num_demux_regs(void)
32407c8c5e6aSMarc Zyngier {
32417af0c253SAkihiko Odaki 	return CSSELR_MAX;
32427c8c5e6aSMarc Zyngier }
32437c8c5e6aSMarc Zyngier 
32447c8c5e6aSMarc Zyngier static int write_demux_regids(u64 __user *uindices)
32457c8c5e6aSMarc Zyngier {
3246efd48ceaSAlex Bennée 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
32477c8c5e6aSMarc Zyngier 	unsigned int i;
32487c8c5e6aSMarc Zyngier 
32497c8c5e6aSMarc Zyngier 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
32507c8c5e6aSMarc Zyngier 	for (i = 0; i < CSSELR_MAX; i++) {
32517c8c5e6aSMarc Zyngier 		if (put_user(val | i, uindices))
32527c8c5e6aSMarc Zyngier 			return -EFAULT;
32537c8c5e6aSMarc Zyngier 		uindices++;
32547c8c5e6aSMarc Zyngier 	}
32557c8c5e6aSMarc Zyngier 	return 0;
32567c8c5e6aSMarc Zyngier }
32577c8c5e6aSMarc Zyngier 
32587c8c5e6aSMarc Zyngier static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
32597c8c5e6aSMarc Zyngier {
32607c8c5e6aSMarc Zyngier 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
32617c8c5e6aSMarc Zyngier 		KVM_REG_ARM64_SYSREG |
32627c8c5e6aSMarc Zyngier 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
32637c8c5e6aSMarc Zyngier 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
32647c8c5e6aSMarc Zyngier 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
32657c8c5e6aSMarc Zyngier 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
32667c8c5e6aSMarc Zyngier 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
32677c8c5e6aSMarc Zyngier }
32687c8c5e6aSMarc Zyngier 
32697c8c5e6aSMarc Zyngier static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
32707c8c5e6aSMarc Zyngier {
32717c8c5e6aSMarc Zyngier 	if (!*uind)
32727c8c5e6aSMarc Zyngier 		return true;
32737c8c5e6aSMarc Zyngier 
32747c8c5e6aSMarc Zyngier 	if (put_user(sys_reg_to_index(reg), *uind))
32757c8c5e6aSMarc Zyngier 		return false;
32767c8c5e6aSMarc Zyngier 
32777c8c5e6aSMarc Zyngier 	(*uind)++;
32787c8c5e6aSMarc Zyngier 	return true;
32797c8c5e6aSMarc Zyngier }
32807c8c5e6aSMarc Zyngier 
32817f34e409SDave Martin static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
32827f34e409SDave Martin 			    const struct sys_reg_desc *rd,
328393390c0aSDave Martin 			    u64 __user **uind,
328493390c0aSDave Martin 			    unsigned int *total)
328593390c0aSDave Martin {
328693390c0aSDave Martin 	/*
328793390c0aSDave Martin 	 * Ignore registers we trap but don't save,
328893390c0aSDave Martin 	 * and for which no custom user accessor is provided.
328993390c0aSDave Martin 	 */
329093390c0aSDave Martin 	if (!(rd->reg || rd->get_user))
329193390c0aSDave Martin 		return 0;
329293390c0aSDave Martin 
3293e6b367dbSMarc Zyngier 	if (sysreg_hidden_user(vcpu, rd))
32947f34e409SDave Martin 		return 0;
32957f34e409SDave Martin 
329693390c0aSDave Martin 	if (!copy_reg_to_user(rd, uind))
329793390c0aSDave Martin 		return -EFAULT;
329893390c0aSDave Martin 
329993390c0aSDave Martin 	(*total)++;
330093390c0aSDave Martin 	return 0;
330193390c0aSDave Martin }
330293390c0aSDave Martin 
33037c8c5e6aSMarc Zyngier /* Assumed ordered tables, see kvm_sys_reg_table_init. */
33047c8c5e6aSMarc Zyngier static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
33057c8c5e6aSMarc Zyngier {
3306dcaffa7bSJames Morse 	const struct sys_reg_desc *i2, *end2;
33077c8c5e6aSMarc Zyngier 	unsigned int total = 0;
330893390c0aSDave Martin 	int err;
33097c8c5e6aSMarc Zyngier 
33107c8c5e6aSMarc Zyngier 	i2 = sys_reg_descs;
33117c8c5e6aSMarc Zyngier 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
33127c8c5e6aSMarc Zyngier 
3313dcaffa7bSJames Morse 	while (i2 != end2) {
3314dcaffa7bSJames Morse 		err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
331593390c0aSDave Martin 		if (err)
331693390c0aSDave Martin 			return err;
33177c8c5e6aSMarc Zyngier 	}
33187c8c5e6aSMarc Zyngier 	return total;
33197c8c5e6aSMarc Zyngier }
33207c8c5e6aSMarc Zyngier 
33217c8c5e6aSMarc Zyngier unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
33227c8c5e6aSMarc Zyngier {
33237c8c5e6aSMarc Zyngier 	return ARRAY_SIZE(invariant_sys_regs)
33247c8c5e6aSMarc Zyngier 		+ num_demux_regs()
33257c8c5e6aSMarc Zyngier 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
33267c8c5e6aSMarc Zyngier }
33277c8c5e6aSMarc Zyngier 
33287c8c5e6aSMarc Zyngier int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
33297c8c5e6aSMarc Zyngier {
33307c8c5e6aSMarc Zyngier 	unsigned int i;
33317c8c5e6aSMarc Zyngier 	int err;
33327c8c5e6aSMarc Zyngier 
33337c8c5e6aSMarc Zyngier 	/* Then give them all the invariant registers' indices. */
33347c8c5e6aSMarc Zyngier 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
33357c8c5e6aSMarc Zyngier 		if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
33367c8c5e6aSMarc Zyngier 			return -EFAULT;
33377c8c5e6aSMarc Zyngier 		uindices++;
33387c8c5e6aSMarc Zyngier 	}
33397c8c5e6aSMarc Zyngier 
33407c8c5e6aSMarc Zyngier 	err = walk_sys_regs(vcpu, uindices);
33417c8c5e6aSMarc Zyngier 	if (err < 0)
33427c8c5e6aSMarc Zyngier 		return err;
33437c8c5e6aSMarc Zyngier 	uindices += err;
33447c8c5e6aSMarc Zyngier 
33457c8c5e6aSMarc Zyngier 	return write_demux_regids(uindices);
33467c8c5e6aSMarc Zyngier }
33477c8c5e6aSMarc Zyngier 
33488d20bd63SSean Christopherson int __init kvm_sys_reg_table_init(void)
33497c8c5e6aSMarc Zyngier {
3350f1f0c0cfSAlexandru Elisei 	bool valid = true;
33517c8c5e6aSMarc Zyngier 	unsigned int i;
33527c8c5e6aSMarc Zyngier 
33537c8c5e6aSMarc Zyngier 	/* Make sure tables are unique and in order. */
3354f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3355f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3356f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3357f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3358f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3359f1f0c0cfSAlexandru Elisei 	valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3360f1f0c0cfSAlexandru Elisei 
3361f1f0c0cfSAlexandru Elisei 	if (!valid)
3362f1f0c0cfSAlexandru Elisei 		return -EINVAL;
33637c8c5e6aSMarc Zyngier 
33647c8c5e6aSMarc Zyngier 	/* We abuse the reset function to overwrite the table itself. */
33657c8c5e6aSMarc Zyngier 	for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
33667c8c5e6aSMarc Zyngier 		invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
33677c8c5e6aSMarc Zyngier 
3368f1f0c0cfSAlexandru Elisei 	return 0;
33697c8c5e6aSMarc Zyngier }
3370