1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 27c8c5e6aSMarc Zyngier /* 37c8c5e6aSMarc Zyngier * Copyright (C) 2012,2013 - ARM Ltd 47c8c5e6aSMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com> 57c8c5e6aSMarc Zyngier * 67c8c5e6aSMarc Zyngier * Derived from arch/arm/kvm/coproc.c: 77c8c5e6aSMarc Zyngier * Copyright (C) 2012 - Virtual Open Systems and Columbia University 87c8c5e6aSMarc Zyngier * Authors: Rusty Russell <rusty@rustcorp.com.au> 97c8c5e6aSMarc Zyngier * Christoffer Dall <c.dall@virtualopensystems.com> 107c8c5e6aSMarc Zyngier */ 117c8c5e6aSMarc Zyngier 12c8857935SMarc Zyngier #include <linux/bitfield.h> 13623eefa8SMarc Zyngier #include <linux/bsearch.h> 147c8c5e6aSMarc Zyngier #include <linux/kvm_host.h> 15c6d01a94SMark Rutland #include <linux/mm.h> 1607d79fe7SDave Martin #include <linux/printk.h> 177c8c5e6aSMarc Zyngier #include <linux/uaccess.h> 18c6d01a94SMark Rutland 197c8c5e6aSMarc Zyngier #include <asm/cacheflush.h> 207c8c5e6aSMarc Zyngier #include <asm/cputype.h> 210c557ed4SMarc Zyngier #include <asm/debug-monitors.h> 22c6d01a94SMark Rutland #include <asm/esr.h> 23c6d01a94SMark Rutland #include <asm/kvm_arm.h> 24c6d01a94SMark Rutland #include <asm/kvm_emulate.h> 25d47533daSChristoffer Dall #include <asm/kvm_hyp.h> 26c6d01a94SMark Rutland #include <asm/kvm_mmu.h> 27ab946834SShannon Zhao #include <asm/perf_event.h> 281f3d8699SMark Rutland #include <asm/sysreg.h> 29c6d01a94SMark Rutland 307c8c5e6aSMarc Zyngier #include <trace/events/kvm.h> 317c8c5e6aSMarc Zyngier 327c8c5e6aSMarc Zyngier #include "sys_regs.h" 337c8c5e6aSMarc Zyngier 34eef8c85aSAlex Bennée #include "trace.h" 35eef8c85aSAlex Bennée 367c8c5e6aSMarc Zyngier /* 3762a89c44SMarc Zyngier * For AArch32, we only take care of what is being trapped. Anything 3862a89c44SMarc Zyngier * that has to do with init and userspace access has to go via the 3962a89c44SMarc Zyngier * 64bit interface. 407c8c5e6aSMarc Zyngier */ 417c8c5e6aSMarc Zyngier 42f24adc65SOliver Upton static u64 sys_reg_to_index(const struct sys_reg_desc *reg); 43f24adc65SOliver Upton 447b5b4df1SMarc Zyngier static bool read_from_write_only(struct kvm_vcpu *vcpu, 45e7f1d1eeSMarc Zyngier struct sys_reg_params *params, 46e7f1d1eeSMarc Zyngier const struct sys_reg_desc *r) 477b5b4df1SMarc Zyngier { 487b5b4df1SMarc Zyngier WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n"); 497b5b4df1SMarc Zyngier print_sys_reg_instr(params); 507b5b4df1SMarc Zyngier kvm_inject_undefined(vcpu); 517b5b4df1SMarc Zyngier return false; 527b5b4df1SMarc Zyngier } 537b5b4df1SMarc Zyngier 547b1dba1fSMarc Zyngier static bool write_to_read_only(struct kvm_vcpu *vcpu, 557b1dba1fSMarc Zyngier struct sys_reg_params *params, 567b1dba1fSMarc Zyngier const struct sys_reg_desc *r) 577b1dba1fSMarc Zyngier { 587b1dba1fSMarc Zyngier WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n"); 597b1dba1fSMarc Zyngier print_sys_reg_instr(params); 607b1dba1fSMarc Zyngier kvm_inject_undefined(vcpu); 617b1dba1fSMarc Zyngier return false; 627b1dba1fSMarc Zyngier } 637b1dba1fSMarc Zyngier 647ea90bddSMarc Zyngier u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg) 657ea90bddSMarc Zyngier { 667ea90bddSMarc Zyngier u64 val = 0x8badf00d8badf00d; 677ea90bddSMarc Zyngier 6830b6ab45SMarc Zyngier if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) && 697ea90bddSMarc Zyngier __vcpu_read_sys_reg_from_cpu(reg, &val)) 707ea90bddSMarc Zyngier return val; 717ea90bddSMarc Zyngier 727ea90bddSMarc Zyngier return __vcpu_sys_reg(vcpu, reg); 737ea90bddSMarc Zyngier } 747ea90bddSMarc Zyngier 757ea90bddSMarc Zyngier void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) 767ea90bddSMarc Zyngier { 7730b6ab45SMarc Zyngier if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) && 787ea90bddSMarc Zyngier __vcpu_write_sys_reg_to_cpu(val, reg)) 797ea90bddSMarc Zyngier return; 807ea90bddSMarc Zyngier 81d47533daSChristoffer Dall __vcpu_sys_reg(vcpu, reg) = val; 82d47533daSChristoffer Dall } 83d47533daSChristoffer Dall 847c8c5e6aSMarc Zyngier /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 857c8c5e6aSMarc Zyngier static u32 cache_levels; 867c8c5e6aSMarc Zyngier 877c8c5e6aSMarc Zyngier /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 88c73a4416SAndrew Jones #define CSSELR_MAX 14 897c8c5e6aSMarc Zyngier 907c8c5e6aSMarc Zyngier /* Which cache CCSIDR represents depends on CSSELR value. */ 917c8c5e6aSMarc Zyngier static u32 get_ccsidr(u32 csselr) 927c8c5e6aSMarc Zyngier { 937c8c5e6aSMarc Zyngier u32 ccsidr; 947c8c5e6aSMarc Zyngier 957c8c5e6aSMarc Zyngier /* Make sure noone else changes CSSELR during this! */ 967c8c5e6aSMarc Zyngier local_irq_disable(); 971f3d8699SMark Rutland write_sysreg(csselr, csselr_el1); 987c8c5e6aSMarc Zyngier isb(); 991f3d8699SMark Rutland ccsidr = read_sysreg(ccsidr_el1); 1007c8c5e6aSMarc Zyngier local_irq_enable(); 1017c8c5e6aSMarc Zyngier 1027c8c5e6aSMarc Zyngier return ccsidr; 1037c8c5e6aSMarc Zyngier } 1047c8c5e6aSMarc Zyngier 1053c1e7165SMarc Zyngier /* 1063c1e7165SMarc Zyngier * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 1073c1e7165SMarc Zyngier */ 1087c8c5e6aSMarc Zyngier static bool access_dcsw(struct kvm_vcpu *vcpu, 1093fec037dSPavel Fedin struct sys_reg_params *p, 1107c8c5e6aSMarc Zyngier const struct sys_reg_desc *r) 1117c8c5e6aSMarc Zyngier { 1127c8c5e6aSMarc Zyngier if (!p->is_write) 113e7f1d1eeSMarc Zyngier return read_from_write_only(vcpu, p, r); 1147c8c5e6aSMarc Zyngier 11509605e94SMarc Zyngier /* 11609605e94SMarc Zyngier * Only track S/W ops if we don't have FWB. It still indicates 11709605e94SMarc Zyngier * that the guest is a bit broken (S/W operations should only 11809605e94SMarc Zyngier * be done by firmware, knowing that there is only a single 11909605e94SMarc Zyngier * CPU left in the system, and certainly not from non-secure 12009605e94SMarc Zyngier * software). 12109605e94SMarc Zyngier */ 12209605e94SMarc Zyngier if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) 1233c1e7165SMarc Zyngier kvm_set_way_flush(vcpu); 12409605e94SMarc Zyngier 1257c8c5e6aSMarc Zyngier return true; 1267c8c5e6aSMarc Zyngier } 1277c8c5e6aSMarc Zyngier 128b1ea1d76SMarc Zyngier static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift) 129b1ea1d76SMarc Zyngier { 130b1ea1d76SMarc Zyngier switch (r->aarch32_map) { 131b1ea1d76SMarc Zyngier case AA32_LO: 132b1ea1d76SMarc Zyngier *mask = GENMASK_ULL(31, 0); 133b1ea1d76SMarc Zyngier *shift = 0; 134b1ea1d76SMarc Zyngier break; 135b1ea1d76SMarc Zyngier case AA32_HI: 136b1ea1d76SMarc Zyngier *mask = GENMASK_ULL(63, 32); 137b1ea1d76SMarc Zyngier *shift = 32; 138b1ea1d76SMarc Zyngier break; 139b1ea1d76SMarc Zyngier default: 140b1ea1d76SMarc Zyngier *mask = GENMASK_ULL(63, 0); 141b1ea1d76SMarc Zyngier *shift = 0; 142b1ea1d76SMarc Zyngier break; 143b1ea1d76SMarc Zyngier } 144b1ea1d76SMarc Zyngier } 145b1ea1d76SMarc Zyngier 1467c8c5e6aSMarc Zyngier /* 1474d44923bSMarc Zyngier * Generic accessor for VM registers. Only called as long as HCR_TVM 1483c1e7165SMarc Zyngier * is set. If the guest enables the MMU, we stop trapping the VM 1493c1e7165SMarc Zyngier * sys_regs and leave it in complete control of the caches. 1504d44923bSMarc Zyngier */ 1514d44923bSMarc Zyngier static bool access_vm_reg(struct kvm_vcpu *vcpu, 1523fec037dSPavel Fedin struct sys_reg_params *p, 1534d44923bSMarc Zyngier const struct sys_reg_desc *r) 1544d44923bSMarc Zyngier { 1553c1e7165SMarc Zyngier bool was_enabled = vcpu_has_cache_enabled(vcpu); 156b1ea1d76SMarc Zyngier u64 val, mask, shift; 1574d44923bSMarc Zyngier 1584d44923bSMarc Zyngier BUG_ON(!p->is_write); 1594d44923bSMarc Zyngier 160b1ea1d76SMarc Zyngier get_access_mask(r, &mask, &shift); 16152f6c4f0SChristoffer Dall 162b1ea1d76SMarc Zyngier if (~mask) { 163b1ea1d76SMarc Zyngier val = vcpu_read_sys_reg(vcpu, r->reg); 164b1ea1d76SMarc Zyngier val &= ~mask; 165dedf97e8SMarc Zyngier } else { 166b1ea1d76SMarc Zyngier val = 0; 167dedf97e8SMarc Zyngier } 168b1ea1d76SMarc Zyngier 169b1ea1d76SMarc Zyngier val |= (p->regval & (mask >> shift)) << shift; 170b1ea1d76SMarc Zyngier vcpu_write_sys_reg(vcpu, val, r->reg); 171f0a3eaffSVictor Kamensky 1723c1e7165SMarc Zyngier kvm_toggle_cache(vcpu, was_enabled); 1734d44923bSMarc Zyngier return true; 1744d44923bSMarc Zyngier } 1754d44923bSMarc Zyngier 176af473829SJames Morse static bool access_actlr(struct kvm_vcpu *vcpu, 177af473829SJames Morse struct sys_reg_params *p, 178af473829SJames Morse const struct sys_reg_desc *r) 179af473829SJames Morse { 180b1ea1d76SMarc Zyngier u64 mask, shift; 181b1ea1d76SMarc Zyngier 182af473829SJames Morse if (p->is_write) 183af473829SJames Morse return ignore_write(vcpu, p); 184af473829SJames Morse 185b1ea1d76SMarc Zyngier get_access_mask(r, &mask, &shift); 186b1ea1d76SMarc Zyngier p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift; 187af473829SJames Morse 188af473829SJames Morse return true; 189af473829SJames Morse } 190af473829SJames Morse 1916d52f35aSAndre Przywara /* 1926d52f35aSAndre Przywara * Trap handler for the GICv3 SGI generation system register. 1936d52f35aSAndre Przywara * Forward the request to the VGIC emulation. 1946d52f35aSAndre Przywara * The cp15_64 code makes sure this automatically works 1956d52f35aSAndre Przywara * for both AArch64 and AArch32 accesses. 1966d52f35aSAndre Przywara */ 1976d52f35aSAndre Przywara static bool access_gic_sgi(struct kvm_vcpu *vcpu, 1983fec037dSPavel Fedin struct sys_reg_params *p, 1996d52f35aSAndre Przywara const struct sys_reg_desc *r) 2006d52f35aSAndre Przywara { 20103bd646dSMarc Zyngier bool g1; 20203bd646dSMarc Zyngier 2036d52f35aSAndre Przywara if (!p->is_write) 204e7f1d1eeSMarc Zyngier return read_from_write_only(vcpu, p, r); 2056d52f35aSAndre Przywara 20603bd646dSMarc Zyngier /* 20703bd646dSMarc Zyngier * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates 20803bd646dSMarc Zyngier * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group, 20903bd646dSMarc Zyngier * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively 21003bd646dSMarc Zyngier * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure 21103bd646dSMarc Zyngier * group. 21203bd646dSMarc Zyngier */ 21350f30453SMarc Zyngier if (p->Op0 == 0) { /* AArch32 */ 21403bd646dSMarc Zyngier switch (p->Op1) { 21503bd646dSMarc Zyngier default: /* Keep GCC quiet */ 21603bd646dSMarc Zyngier case 0: /* ICC_SGI1R */ 21703bd646dSMarc Zyngier g1 = true; 21803bd646dSMarc Zyngier break; 21903bd646dSMarc Zyngier case 1: /* ICC_ASGI1R */ 22003bd646dSMarc Zyngier case 2: /* ICC_SGI0R */ 22103bd646dSMarc Zyngier g1 = false; 22203bd646dSMarc Zyngier break; 22303bd646dSMarc Zyngier } 22450f30453SMarc Zyngier } else { /* AArch64 */ 22503bd646dSMarc Zyngier switch (p->Op2) { 22603bd646dSMarc Zyngier default: /* Keep GCC quiet */ 22703bd646dSMarc Zyngier case 5: /* ICC_SGI1R_EL1 */ 22803bd646dSMarc Zyngier g1 = true; 22903bd646dSMarc Zyngier break; 23003bd646dSMarc Zyngier case 6: /* ICC_ASGI1R_EL1 */ 23103bd646dSMarc Zyngier case 7: /* ICC_SGI0R_EL1 */ 23203bd646dSMarc Zyngier g1 = false; 23303bd646dSMarc Zyngier break; 23403bd646dSMarc Zyngier } 23503bd646dSMarc Zyngier } 23603bd646dSMarc Zyngier 23703bd646dSMarc Zyngier vgic_v3_dispatch_sgi(vcpu, p->regval, g1); 2386d52f35aSAndre Przywara 2396d52f35aSAndre Przywara return true; 2406d52f35aSAndre Przywara } 2416d52f35aSAndre Przywara 242b34f2bcbSMarc Zyngier static bool access_gic_sre(struct kvm_vcpu *vcpu, 243b34f2bcbSMarc Zyngier struct sys_reg_params *p, 244b34f2bcbSMarc Zyngier const struct sys_reg_desc *r) 245b34f2bcbSMarc Zyngier { 246b34f2bcbSMarc Zyngier if (p->is_write) 247b34f2bcbSMarc Zyngier return ignore_write(vcpu, p); 248b34f2bcbSMarc Zyngier 249b34f2bcbSMarc Zyngier p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; 250b34f2bcbSMarc Zyngier return true; 251b34f2bcbSMarc Zyngier } 252b34f2bcbSMarc Zyngier 2537609c125SMarc Zyngier static bool trap_raz_wi(struct kvm_vcpu *vcpu, 2543fec037dSPavel Fedin struct sys_reg_params *p, 2557c8c5e6aSMarc Zyngier const struct sys_reg_desc *r) 2567c8c5e6aSMarc Zyngier { 2577c8c5e6aSMarc Zyngier if (p->is_write) 2587c8c5e6aSMarc Zyngier return ignore_write(vcpu, p); 2597c8c5e6aSMarc Zyngier else 2607c8c5e6aSMarc Zyngier return read_zero(vcpu, p); 2617c8c5e6aSMarc Zyngier } 2627c8c5e6aSMarc Zyngier 26322925521SMarc Zyngier /* 26422925521SMarc Zyngier * ARMv8.1 mandates at least a trivial LORegion implementation, where all the 26522925521SMarc Zyngier * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 26622925521SMarc Zyngier * system, these registers should UNDEF. LORID_EL1 being a RO register, we 26722925521SMarc Zyngier * treat it separately. 26822925521SMarc Zyngier */ 26922925521SMarc Zyngier static bool trap_loregion(struct kvm_vcpu *vcpu, 270cc33c4e2SMark Rutland struct sys_reg_params *p, 271cc33c4e2SMark Rutland const struct sys_reg_desc *r) 272cc33c4e2SMark Rutland { 27322925521SMarc Zyngier u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 2747ba8b438SAlexandru Elisei u32 sr = reg_to_encoding(r); 27522925521SMarc Zyngier 27622925521SMarc Zyngier if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { 277cc33c4e2SMark Rutland kvm_inject_undefined(vcpu); 278cc33c4e2SMark Rutland return false; 279cc33c4e2SMark Rutland } 280cc33c4e2SMark Rutland 28122925521SMarc Zyngier if (p->is_write && sr == SYS_LORID_EL1) 28222925521SMarc Zyngier return write_to_read_only(vcpu, p, r); 28322925521SMarc Zyngier 28422925521SMarc Zyngier return trap_raz_wi(vcpu, p, r); 28522925521SMarc Zyngier } 28622925521SMarc Zyngier 287f24adc65SOliver Upton static bool trap_oslar_el1(struct kvm_vcpu *vcpu, 288f24adc65SOliver Upton struct sys_reg_params *p, 289f24adc65SOliver Upton const struct sys_reg_desc *r) 290f24adc65SOliver Upton { 291f24adc65SOliver Upton u64 oslsr; 292f24adc65SOliver Upton 293f24adc65SOliver Upton if (!p->is_write) 294f24adc65SOliver Upton return read_from_write_only(vcpu, p, r); 295f24adc65SOliver Upton 296f24adc65SOliver Upton /* Forward the OSLK bit to OSLSR */ 297f24adc65SOliver Upton oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK; 298f24adc65SOliver Upton if (p->regval & SYS_OSLAR_OSLK) 299f24adc65SOliver Upton oslsr |= SYS_OSLSR_OSLK; 300f24adc65SOliver Upton 301f24adc65SOliver Upton __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr; 302f24adc65SOliver Upton return true; 303f24adc65SOliver Upton } 304f24adc65SOliver Upton 3050c557ed4SMarc Zyngier static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 3063fec037dSPavel Fedin struct sys_reg_params *p, 3070c557ed4SMarc Zyngier const struct sys_reg_desc *r) 3080c557ed4SMarc Zyngier { 309d42e2671SOliver Upton if (p->is_write) 310e2ffceaaSOliver Upton return write_to_read_only(vcpu, p, r); 311d42e2671SOliver Upton 312d42e2671SOliver Upton p->regval = __vcpu_sys_reg(vcpu, r->reg); 3130c557ed4SMarc Zyngier return true; 3140c557ed4SMarc Zyngier } 315d42e2671SOliver Upton 316d42e2671SOliver Upton static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 317978ceeb3SMarc Zyngier u64 val) 318d42e2671SOliver Upton { 319f24adc65SOliver Upton /* 320f24adc65SOliver Upton * The only modifiable bit is the OSLK bit. Refuse the write if 321f24adc65SOliver Upton * userspace attempts to change any other bit in the register. 322f24adc65SOliver Upton */ 323f24adc65SOliver Upton if ((val ^ rd->val) & ~SYS_OSLSR_OSLK) 324d42e2671SOliver Upton return -EINVAL; 325d42e2671SOliver Upton 326f24adc65SOliver Upton __vcpu_sys_reg(vcpu, rd->reg) = val; 327d42e2671SOliver Upton return 0; 3280c557ed4SMarc Zyngier } 3290c557ed4SMarc Zyngier 3300c557ed4SMarc Zyngier static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 3313fec037dSPavel Fedin struct sys_reg_params *p, 3320c557ed4SMarc Zyngier const struct sys_reg_desc *r) 3330c557ed4SMarc Zyngier { 3340c557ed4SMarc Zyngier if (p->is_write) { 3350c557ed4SMarc Zyngier return ignore_write(vcpu, p); 3360c557ed4SMarc Zyngier } else { 3371f3d8699SMark Rutland p->regval = read_sysreg(dbgauthstatus_el1); 3380c557ed4SMarc Zyngier return true; 3390c557ed4SMarc Zyngier } 3400c557ed4SMarc Zyngier } 3410c557ed4SMarc Zyngier 3420c557ed4SMarc Zyngier /* 3430c557ed4SMarc Zyngier * We want to avoid world-switching all the DBG registers all the 3440c557ed4SMarc Zyngier * time: 3450c557ed4SMarc Zyngier * 3460c557ed4SMarc Zyngier * - If we've touched any debug register, it is likely that we're 3470c557ed4SMarc Zyngier * going to touch more of them. It then makes sense to disable the 3480c557ed4SMarc Zyngier * traps and start doing the save/restore dance 3490c557ed4SMarc Zyngier * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is 3500c557ed4SMarc Zyngier * then mandatory to save/restore the registers, as the guest 3510c557ed4SMarc Zyngier * depends on them. 3520c557ed4SMarc Zyngier * 3530c557ed4SMarc Zyngier * For this, we use a DIRTY bit, indicating the guest has modified the 3540c557ed4SMarc Zyngier * debug registers, used as follow: 3550c557ed4SMarc Zyngier * 3560c557ed4SMarc Zyngier * On guest entry: 3570c557ed4SMarc Zyngier * - If the dirty bit is set (because we're coming back from trapping), 3580c557ed4SMarc Zyngier * disable the traps, save host registers, restore guest registers. 3590c557ed4SMarc Zyngier * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), 3600c557ed4SMarc Zyngier * set the dirty bit, disable the traps, save host registers, 3610c557ed4SMarc Zyngier * restore guest registers. 3620c557ed4SMarc Zyngier * - Otherwise, enable the traps 3630c557ed4SMarc Zyngier * 3640c557ed4SMarc Zyngier * On guest exit: 3650c557ed4SMarc Zyngier * - If the dirty bit is set, save guest registers, restore host 3660c557ed4SMarc Zyngier * registers and clear the dirty bit. This ensure that the host can 3670c557ed4SMarc Zyngier * now use the debug registers. 3680c557ed4SMarc Zyngier */ 3690c557ed4SMarc Zyngier static bool trap_debug_regs(struct kvm_vcpu *vcpu, 3703fec037dSPavel Fedin struct sys_reg_params *p, 3710c557ed4SMarc Zyngier const struct sys_reg_desc *r) 3720c557ed4SMarc Zyngier { 3730c557ed4SMarc Zyngier if (p->is_write) { 3748d404c4cSChristoffer Dall vcpu_write_sys_reg(vcpu, p->regval, r->reg); 375b1da4908SMarc Zyngier vcpu_set_flag(vcpu, DEBUG_DIRTY); 3760c557ed4SMarc Zyngier } else { 3778d404c4cSChristoffer Dall p->regval = vcpu_read_sys_reg(vcpu, r->reg); 3780c557ed4SMarc Zyngier } 3790c557ed4SMarc Zyngier 3802ec5be3dSPavel Fedin trace_trap_reg(__func__, r->reg, p->is_write, p->regval); 381eef8c85aSAlex Bennée 3820c557ed4SMarc Zyngier return true; 3830c557ed4SMarc Zyngier } 3840c557ed4SMarc Zyngier 38584e690bfSAlex Bennée /* 38684e690bfSAlex Bennée * reg_to_dbg/dbg_to_reg 38784e690bfSAlex Bennée * 38884e690bfSAlex Bennée * A 32 bit write to a debug register leave top bits alone 38984e690bfSAlex Bennée * A 32 bit read from a debug register only returns the bottom bits 39084e690bfSAlex Bennée * 391b1da4908SMarc Zyngier * All writes will set the DEBUG_DIRTY flag to ensure the hyp code 392b1da4908SMarc Zyngier * switches between host and guest values in future. 39384e690bfSAlex Bennée */ 394281243cbSMarc Zyngier static void reg_to_dbg(struct kvm_vcpu *vcpu, 3953fec037dSPavel Fedin struct sys_reg_params *p, 3961da42c34SMarc Zyngier const struct sys_reg_desc *rd, 39784e690bfSAlex Bennée u64 *dbg_reg) 39884e690bfSAlex Bennée { 3991da42c34SMarc Zyngier u64 mask, shift, val; 40084e690bfSAlex Bennée 4011da42c34SMarc Zyngier get_access_mask(rd, &mask, &shift); 40284e690bfSAlex Bennée 4031da42c34SMarc Zyngier val = *dbg_reg; 4041da42c34SMarc Zyngier val &= ~mask; 4051da42c34SMarc Zyngier val |= (p->regval & (mask >> shift)) << shift; 40684e690bfSAlex Bennée *dbg_reg = val; 4071da42c34SMarc Zyngier 408b1da4908SMarc Zyngier vcpu_set_flag(vcpu, DEBUG_DIRTY); 40984e690bfSAlex Bennée } 41084e690bfSAlex Bennée 411281243cbSMarc Zyngier static void dbg_to_reg(struct kvm_vcpu *vcpu, 4123fec037dSPavel Fedin struct sys_reg_params *p, 4131da42c34SMarc Zyngier const struct sys_reg_desc *rd, 41484e690bfSAlex Bennée u64 *dbg_reg) 41584e690bfSAlex Bennée { 4161da42c34SMarc Zyngier u64 mask, shift; 4171da42c34SMarc Zyngier 4181da42c34SMarc Zyngier get_access_mask(rd, &mask, &shift); 4191da42c34SMarc Zyngier p->regval = (*dbg_reg & mask) >> shift; 42084e690bfSAlex Bennée } 42184e690bfSAlex Bennée 422281243cbSMarc Zyngier static bool trap_bvr(struct kvm_vcpu *vcpu, 4233fec037dSPavel Fedin struct sys_reg_params *p, 42484e690bfSAlex Bennée const struct sys_reg_desc *rd) 42584e690bfSAlex Bennée { 426cb853dedSMarc Zyngier u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; 42784e690bfSAlex Bennée 42884e690bfSAlex Bennée if (p->is_write) 4291da42c34SMarc Zyngier reg_to_dbg(vcpu, p, rd, dbg_reg); 43084e690bfSAlex Bennée else 4311da42c34SMarc Zyngier dbg_to_reg(vcpu, p, rd, dbg_reg); 43284e690bfSAlex Bennée 433cb853dedSMarc Zyngier trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); 434eef8c85aSAlex Bennée 43584e690bfSAlex Bennée return true; 43684e690bfSAlex Bennée } 43784e690bfSAlex Bennée 43884e690bfSAlex Bennée static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 439978ceeb3SMarc Zyngier u64 val) 44084e690bfSAlex Bennée { 441978ceeb3SMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val; 44284e690bfSAlex Bennée return 0; 44384e690bfSAlex Bennée } 44484e690bfSAlex Bennée 44584e690bfSAlex Bennée static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 446978ceeb3SMarc Zyngier u64 *val) 44784e690bfSAlex Bennée { 448978ceeb3SMarc Zyngier *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; 44984e690bfSAlex Bennée return 0; 45084e690bfSAlex Bennée } 45184e690bfSAlex Bennée 452281243cbSMarc Zyngier static void reset_bvr(struct kvm_vcpu *vcpu, 45384e690bfSAlex Bennée const struct sys_reg_desc *rd) 45484e690bfSAlex Bennée { 455cb853dedSMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val; 45684e690bfSAlex Bennée } 45784e690bfSAlex Bennée 458281243cbSMarc Zyngier static bool trap_bcr(struct kvm_vcpu *vcpu, 4593fec037dSPavel Fedin struct sys_reg_params *p, 46084e690bfSAlex Bennée const struct sys_reg_desc *rd) 46184e690bfSAlex Bennée { 462cb853dedSMarc Zyngier u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; 46384e690bfSAlex Bennée 46484e690bfSAlex Bennée if (p->is_write) 4651da42c34SMarc Zyngier reg_to_dbg(vcpu, p, rd, dbg_reg); 46684e690bfSAlex Bennée else 4671da42c34SMarc Zyngier dbg_to_reg(vcpu, p, rd, dbg_reg); 46884e690bfSAlex Bennée 469cb853dedSMarc Zyngier trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); 470eef8c85aSAlex Bennée 47184e690bfSAlex Bennée return true; 47284e690bfSAlex Bennée } 47384e690bfSAlex Bennée 47484e690bfSAlex Bennée static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 475978ceeb3SMarc Zyngier u64 val) 47684e690bfSAlex Bennée { 477978ceeb3SMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val; 47884e690bfSAlex Bennée return 0; 47984e690bfSAlex Bennée } 48084e690bfSAlex Bennée 48184e690bfSAlex Bennée static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 482978ceeb3SMarc Zyngier u64 *val) 48384e690bfSAlex Bennée { 484978ceeb3SMarc Zyngier *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; 48584e690bfSAlex Bennée return 0; 48684e690bfSAlex Bennée } 48784e690bfSAlex Bennée 488281243cbSMarc Zyngier static void reset_bcr(struct kvm_vcpu *vcpu, 48984e690bfSAlex Bennée const struct sys_reg_desc *rd) 49084e690bfSAlex Bennée { 491cb853dedSMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val; 49284e690bfSAlex Bennée } 49384e690bfSAlex Bennée 494281243cbSMarc Zyngier static bool trap_wvr(struct kvm_vcpu *vcpu, 4953fec037dSPavel Fedin struct sys_reg_params *p, 49684e690bfSAlex Bennée const struct sys_reg_desc *rd) 49784e690bfSAlex Bennée { 498cb853dedSMarc Zyngier u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; 49984e690bfSAlex Bennée 50084e690bfSAlex Bennée if (p->is_write) 5011da42c34SMarc Zyngier reg_to_dbg(vcpu, p, rd, dbg_reg); 50284e690bfSAlex Bennée else 5031da42c34SMarc Zyngier dbg_to_reg(vcpu, p, rd, dbg_reg); 50484e690bfSAlex Bennée 505cb853dedSMarc Zyngier trace_trap_reg(__func__, rd->CRm, p->is_write, 506cb853dedSMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]); 507eef8c85aSAlex Bennée 50884e690bfSAlex Bennée return true; 50984e690bfSAlex Bennée } 51084e690bfSAlex Bennée 51184e690bfSAlex Bennée static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 512978ceeb3SMarc Zyngier u64 val) 51384e690bfSAlex Bennée { 514978ceeb3SMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val; 51584e690bfSAlex Bennée return 0; 51684e690bfSAlex Bennée } 51784e690bfSAlex Bennée 51884e690bfSAlex Bennée static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 519978ceeb3SMarc Zyngier u64 *val) 52084e690bfSAlex Bennée { 521978ceeb3SMarc Zyngier *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; 52284e690bfSAlex Bennée return 0; 52384e690bfSAlex Bennée } 52484e690bfSAlex Bennée 525281243cbSMarc Zyngier static void reset_wvr(struct kvm_vcpu *vcpu, 52684e690bfSAlex Bennée const struct sys_reg_desc *rd) 52784e690bfSAlex Bennée { 528cb853dedSMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val; 52984e690bfSAlex Bennée } 53084e690bfSAlex Bennée 531281243cbSMarc Zyngier static bool trap_wcr(struct kvm_vcpu *vcpu, 5323fec037dSPavel Fedin struct sys_reg_params *p, 53384e690bfSAlex Bennée const struct sys_reg_desc *rd) 53484e690bfSAlex Bennée { 535cb853dedSMarc Zyngier u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; 53684e690bfSAlex Bennée 53784e690bfSAlex Bennée if (p->is_write) 5381da42c34SMarc Zyngier reg_to_dbg(vcpu, p, rd, dbg_reg); 53984e690bfSAlex Bennée else 5401da42c34SMarc Zyngier dbg_to_reg(vcpu, p, rd, dbg_reg); 54184e690bfSAlex Bennée 542cb853dedSMarc Zyngier trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); 543eef8c85aSAlex Bennée 54484e690bfSAlex Bennée return true; 54584e690bfSAlex Bennée } 54684e690bfSAlex Bennée 54784e690bfSAlex Bennée static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 548978ceeb3SMarc Zyngier u64 val) 54984e690bfSAlex Bennée { 550978ceeb3SMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val; 55184e690bfSAlex Bennée return 0; 55284e690bfSAlex Bennée } 55384e690bfSAlex Bennée 55484e690bfSAlex Bennée static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 555978ceeb3SMarc Zyngier u64 *val) 55684e690bfSAlex Bennée { 557978ceeb3SMarc Zyngier *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; 55884e690bfSAlex Bennée return 0; 55984e690bfSAlex Bennée } 56084e690bfSAlex Bennée 561281243cbSMarc Zyngier static void reset_wcr(struct kvm_vcpu *vcpu, 56284e690bfSAlex Bennée const struct sys_reg_desc *rd) 56384e690bfSAlex Bennée { 564cb853dedSMarc Zyngier vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val; 56584e690bfSAlex Bennée } 56684e690bfSAlex Bennée 5677c8c5e6aSMarc Zyngier static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 5687c8c5e6aSMarc Zyngier { 5698d404c4cSChristoffer Dall u64 amair = read_sysreg(amair_el1); 5708d404c4cSChristoffer Dall vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); 5717c8c5e6aSMarc Zyngier } 5727c8c5e6aSMarc Zyngier 573af473829SJames Morse static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 574af473829SJames Morse { 575af473829SJames Morse u64 actlr = read_sysreg(actlr_el1); 576af473829SJames Morse vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1); 577af473829SJames Morse } 578af473829SJames Morse 5797c8c5e6aSMarc Zyngier static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 5807c8c5e6aSMarc Zyngier { 5814429fc64SAndre Przywara u64 mpidr; 5824429fc64SAndre Przywara 5837c8c5e6aSMarc Zyngier /* 5844429fc64SAndre Przywara * Map the vcpu_id into the first three affinity level fields of 5854429fc64SAndre Przywara * the MPIDR. We limit the number of VCPUs in level 0 due to a 5864429fc64SAndre Przywara * limitation to 16 CPUs in that level in the ICC_SGIxR registers 5874429fc64SAndre Przywara * of the GICv3 to be able to address each CPU directly when 5884429fc64SAndre Przywara * sending IPIs. 5897c8c5e6aSMarc Zyngier */ 5904429fc64SAndre Przywara mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); 5914429fc64SAndre Przywara mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); 5924429fc64SAndre Przywara mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); 5938d404c4cSChristoffer Dall vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1); 5947c8c5e6aSMarc Zyngier } 5957c8c5e6aSMarc Zyngier 59611663111SMarc Zyngier static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, 59711663111SMarc Zyngier const struct sys_reg_desc *r) 59811663111SMarc Zyngier { 59911663111SMarc Zyngier if (kvm_vcpu_has_pmu(vcpu)) 60011663111SMarc Zyngier return 0; 60111663111SMarc Zyngier 60211663111SMarc Zyngier return REG_HIDDEN; 60311663111SMarc Zyngier } 60411663111SMarc Zyngier 6050ab410a9SMarc Zyngier static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 6060ab410a9SMarc Zyngier { 6070ab410a9SMarc Zyngier u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX); 6080ab410a9SMarc Zyngier 6090ab410a9SMarc Zyngier /* No PMU available, any PMU reg may UNDEF... */ 6100ab410a9SMarc Zyngier if (!kvm_arm_support_pmu_v3()) 6110ab410a9SMarc Zyngier return; 6120ab410a9SMarc Zyngier 6130ab410a9SMarc Zyngier n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT; 6140ab410a9SMarc Zyngier n &= ARMV8_PMU_PMCR_N_MASK; 6150ab410a9SMarc Zyngier if (n) 6160ab410a9SMarc Zyngier mask |= GENMASK(n - 1, 0); 6170ab410a9SMarc Zyngier 6180ab410a9SMarc Zyngier reset_unknown(vcpu, r); 6190ab410a9SMarc Zyngier __vcpu_sys_reg(vcpu, r->reg) &= mask; 6200ab410a9SMarc Zyngier } 6210ab410a9SMarc Zyngier 6220ab410a9SMarc Zyngier static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 6230ab410a9SMarc Zyngier { 6240ab410a9SMarc Zyngier reset_unknown(vcpu, r); 6250ab410a9SMarc Zyngier __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0); 6260ab410a9SMarc Zyngier } 6270ab410a9SMarc Zyngier 6280ab410a9SMarc Zyngier static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 6290ab410a9SMarc Zyngier { 6300ab410a9SMarc Zyngier reset_unknown(vcpu, r); 6310ab410a9SMarc Zyngier __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK; 6320ab410a9SMarc Zyngier } 6330ab410a9SMarc Zyngier 6340ab410a9SMarc Zyngier static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 6350ab410a9SMarc Zyngier { 6360ab410a9SMarc Zyngier reset_unknown(vcpu, r); 6370ab410a9SMarc Zyngier __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK; 6380ab410a9SMarc Zyngier } 6390ab410a9SMarc Zyngier 640ab946834SShannon Zhao static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 641ab946834SShannon Zhao { 642ab946834SShannon Zhao u64 pmcr, val; 643ab946834SShannon Zhao 6442a5f1b67SMarc Zyngier /* No PMU available, PMCR_EL0 may UNDEF... */ 6452a5f1b67SMarc Zyngier if (!kvm_arm_support_pmu_v3()) 6462a5f1b67SMarc Zyngier return; 6472a5f1b67SMarc Zyngier 6481f3d8699SMark Rutland pmcr = read_sysreg(pmcr_el0); 6491f3d8699SMark Rutland /* 6501f3d8699SMark Rutland * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN 651ab946834SShannon Zhao * except PMCR.E resetting to zero. 652ab946834SShannon Zhao */ 653ab946834SShannon Zhao val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) 654ab946834SShannon Zhao | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); 655f3c6efc7SOliver Upton if (!kvm_supports_32bit_el0()) 6566f163714SMarc Zyngier val |= ARMV8_PMU_PMCR_LC; 65703fdfb26SMarc Zyngier __vcpu_sys_reg(vcpu, r->reg) = val; 658ab946834SShannon Zhao } 659ab946834SShannon Zhao 6606c007036SMarc Zyngier static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) 661d692b8adSShannon Zhao { 6628d404c4cSChristoffer Dall u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); 6637ded92e2SMarc Zyngier bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); 664d692b8adSShannon Zhao 66524d5950fSMarc Zyngier if (!enabled) 66624d5950fSMarc Zyngier kvm_inject_undefined(vcpu); 66724d5950fSMarc Zyngier 6686c007036SMarc Zyngier return !enabled; 6696c007036SMarc Zyngier } 6706c007036SMarc Zyngier 6716c007036SMarc Zyngier static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) 6726c007036SMarc Zyngier { 6736c007036SMarc Zyngier return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN); 674d692b8adSShannon Zhao } 675d692b8adSShannon Zhao 676d692b8adSShannon Zhao static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) 677d692b8adSShannon Zhao { 6786c007036SMarc Zyngier return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN); 679d692b8adSShannon Zhao } 680d692b8adSShannon Zhao 681d692b8adSShannon Zhao static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) 682d692b8adSShannon Zhao { 6836c007036SMarc Zyngier return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN); 684d692b8adSShannon Zhao } 685d692b8adSShannon Zhao 686d692b8adSShannon Zhao static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) 687d692b8adSShannon Zhao { 6886c007036SMarc Zyngier return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN); 689d692b8adSShannon Zhao } 690d692b8adSShannon Zhao 691ab946834SShannon Zhao static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 692ab946834SShannon Zhao const struct sys_reg_desc *r) 693ab946834SShannon Zhao { 694ab946834SShannon Zhao u64 val; 695ab946834SShannon Zhao 696d692b8adSShannon Zhao if (pmu_access_el0_disabled(vcpu)) 697d692b8adSShannon Zhao return false; 698d692b8adSShannon Zhao 699ab946834SShannon Zhao if (p->is_write) { 700ab946834SShannon Zhao /* Only update writeable bits of PMCR */ 7018d404c4cSChristoffer Dall val = __vcpu_sys_reg(vcpu, PMCR_EL0); 702ab946834SShannon Zhao val &= ~ARMV8_PMU_PMCR_MASK; 703ab946834SShannon Zhao val |= p->regval & ARMV8_PMU_PMCR_MASK; 704f3c6efc7SOliver Upton if (!kvm_supports_32bit_el0()) 7056f163714SMarc Zyngier val |= ARMV8_PMU_PMCR_LC; 7068d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMCR_EL0) = val; 70776993739SShannon Zhao kvm_pmu_handle_pmcr(vcpu, val); 708435e53fbSAndrew Murray kvm_vcpu_pmu_restore_guest(vcpu); 709ab946834SShannon Zhao } else { 710ab946834SShannon Zhao /* PMCR.P & PMCR.C are RAZ */ 7118d404c4cSChristoffer Dall val = __vcpu_sys_reg(vcpu, PMCR_EL0) 712ab946834SShannon Zhao & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); 713ab946834SShannon Zhao p->regval = val; 714ab946834SShannon Zhao } 715ab946834SShannon Zhao 716ab946834SShannon Zhao return true; 717ab946834SShannon Zhao } 718ab946834SShannon Zhao 7193965c3ceSShannon Zhao static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 7203965c3ceSShannon Zhao const struct sys_reg_desc *r) 7213965c3ceSShannon Zhao { 722d692b8adSShannon Zhao if (pmu_access_event_counter_el0_disabled(vcpu)) 723d692b8adSShannon Zhao return false; 724d692b8adSShannon Zhao 7253965c3ceSShannon Zhao if (p->is_write) 7268d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; 7273965c3ceSShannon Zhao else 7283965c3ceSShannon Zhao /* return PMSELR.SEL field */ 7298d404c4cSChristoffer Dall p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) 7303965c3ceSShannon Zhao & ARMV8_PMU_COUNTER_MASK; 7313965c3ceSShannon Zhao 7323965c3ceSShannon Zhao return true; 7333965c3ceSShannon Zhao } 7343965c3ceSShannon Zhao 735a86b5505SShannon Zhao static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 736a86b5505SShannon Zhao const struct sys_reg_desc *r) 737a86b5505SShannon Zhao { 73899b6a401SMarc Zyngier u64 pmceid, mask, shift; 739a86b5505SShannon Zhao 740a86b5505SShannon Zhao BUG_ON(p->is_write); 741a86b5505SShannon Zhao 742d692b8adSShannon Zhao if (pmu_access_el0_disabled(vcpu)) 743d692b8adSShannon Zhao return false; 744d692b8adSShannon Zhao 74599b6a401SMarc Zyngier get_access_mask(r, &mask, &shift); 74699b6a401SMarc Zyngier 74788865becSMarc Zyngier pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); 74899b6a401SMarc Zyngier pmceid &= mask; 74999b6a401SMarc Zyngier pmceid >>= shift; 750a86b5505SShannon Zhao 751a86b5505SShannon Zhao p->regval = pmceid; 752a86b5505SShannon Zhao 753a86b5505SShannon Zhao return true; 754a86b5505SShannon Zhao } 755a86b5505SShannon Zhao 756051ff581SShannon Zhao static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) 757051ff581SShannon Zhao { 758051ff581SShannon Zhao u64 pmcr, val; 759051ff581SShannon Zhao 7608d404c4cSChristoffer Dall pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); 761051ff581SShannon Zhao val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; 76224d5950fSMarc Zyngier if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { 76324d5950fSMarc Zyngier kvm_inject_undefined(vcpu); 764051ff581SShannon Zhao return false; 76524d5950fSMarc Zyngier } 766051ff581SShannon Zhao 767051ff581SShannon Zhao return true; 768051ff581SShannon Zhao } 769051ff581SShannon Zhao 770051ff581SShannon Zhao static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, 771051ff581SShannon Zhao struct sys_reg_params *p, 772051ff581SShannon Zhao const struct sys_reg_desc *r) 773051ff581SShannon Zhao { 774a3da9358SMarc Zyngier u64 idx = ~0UL; 775051ff581SShannon Zhao 776051ff581SShannon Zhao if (r->CRn == 9 && r->CRm == 13) { 777051ff581SShannon Zhao if (r->Op2 == 2) { 778051ff581SShannon Zhao /* PMXEVCNTR_EL0 */ 779d692b8adSShannon Zhao if (pmu_access_event_counter_el0_disabled(vcpu)) 780d692b8adSShannon Zhao return false; 781d692b8adSShannon Zhao 7828d404c4cSChristoffer Dall idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) 783051ff581SShannon Zhao & ARMV8_PMU_COUNTER_MASK; 784051ff581SShannon Zhao } else if (r->Op2 == 0) { 785051ff581SShannon Zhao /* PMCCNTR_EL0 */ 786d692b8adSShannon Zhao if (pmu_access_cycle_counter_el0_disabled(vcpu)) 787d692b8adSShannon Zhao return false; 788d692b8adSShannon Zhao 789051ff581SShannon Zhao idx = ARMV8_PMU_CYCLE_IDX; 790051ff581SShannon Zhao } 7919e3f7a29SWei Huang } else if (r->CRn == 0 && r->CRm == 9) { 7929e3f7a29SWei Huang /* PMCCNTR */ 7939e3f7a29SWei Huang if (pmu_access_event_counter_el0_disabled(vcpu)) 7949e3f7a29SWei Huang return false; 7959e3f7a29SWei Huang 7969e3f7a29SWei Huang idx = ARMV8_PMU_CYCLE_IDX; 797051ff581SShannon Zhao } else if (r->CRn == 14 && (r->CRm & 12) == 8) { 798051ff581SShannon Zhao /* PMEVCNTRn_EL0 */ 799d692b8adSShannon Zhao if (pmu_access_event_counter_el0_disabled(vcpu)) 800d692b8adSShannon Zhao return false; 801d692b8adSShannon Zhao 802051ff581SShannon Zhao idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 803051ff581SShannon Zhao } 804051ff581SShannon Zhao 805a3da9358SMarc Zyngier /* Catch any decoding mistake */ 806a3da9358SMarc Zyngier WARN_ON(idx == ~0UL); 807a3da9358SMarc Zyngier 808051ff581SShannon Zhao if (!pmu_counter_idx_valid(vcpu, idx)) 809051ff581SShannon Zhao return false; 810051ff581SShannon Zhao 811d692b8adSShannon Zhao if (p->is_write) { 812d692b8adSShannon Zhao if (pmu_access_el0_disabled(vcpu)) 813d692b8adSShannon Zhao return false; 814d692b8adSShannon Zhao 815051ff581SShannon Zhao kvm_pmu_set_counter_value(vcpu, idx, p->regval); 816d692b8adSShannon Zhao } else { 817051ff581SShannon Zhao p->regval = kvm_pmu_get_counter_value(vcpu, idx); 818d692b8adSShannon Zhao } 819051ff581SShannon Zhao 820051ff581SShannon Zhao return true; 821051ff581SShannon Zhao } 822051ff581SShannon Zhao 8239feb21acSShannon Zhao static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 8249feb21acSShannon Zhao const struct sys_reg_desc *r) 8259feb21acSShannon Zhao { 8269feb21acSShannon Zhao u64 idx, reg; 8279feb21acSShannon Zhao 828d692b8adSShannon Zhao if (pmu_access_el0_disabled(vcpu)) 829d692b8adSShannon Zhao return false; 830d692b8adSShannon Zhao 8319feb21acSShannon Zhao if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { 8329feb21acSShannon Zhao /* PMXEVTYPER_EL0 */ 8338d404c4cSChristoffer Dall idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; 8349feb21acSShannon Zhao reg = PMEVTYPER0_EL0 + idx; 8359feb21acSShannon Zhao } else if (r->CRn == 14 && (r->CRm & 12) == 12) { 8369feb21acSShannon Zhao idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 8379feb21acSShannon Zhao if (idx == ARMV8_PMU_CYCLE_IDX) 8389feb21acSShannon Zhao reg = PMCCFILTR_EL0; 8399feb21acSShannon Zhao else 8409feb21acSShannon Zhao /* PMEVTYPERn_EL0 */ 8419feb21acSShannon Zhao reg = PMEVTYPER0_EL0 + idx; 8429feb21acSShannon Zhao } else { 8439feb21acSShannon Zhao BUG(); 8449feb21acSShannon Zhao } 8459feb21acSShannon Zhao 8469feb21acSShannon Zhao if (!pmu_counter_idx_valid(vcpu, idx)) 8479feb21acSShannon Zhao return false; 8489feb21acSShannon Zhao 8499feb21acSShannon Zhao if (p->is_write) { 8509feb21acSShannon Zhao kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); 8518d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; 852435e53fbSAndrew Murray kvm_vcpu_pmu_restore_guest(vcpu); 8539feb21acSShannon Zhao } else { 8548d404c4cSChristoffer Dall p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; 8559feb21acSShannon Zhao } 8569feb21acSShannon Zhao 8579feb21acSShannon Zhao return true; 8589feb21acSShannon Zhao } 8599feb21acSShannon Zhao 86096b0eebcSShannon Zhao static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 86196b0eebcSShannon Zhao const struct sys_reg_desc *r) 86296b0eebcSShannon Zhao { 86396b0eebcSShannon Zhao u64 val, mask; 86496b0eebcSShannon Zhao 865d692b8adSShannon Zhao if (pmu_access_el0_disabled(vcpu)) 866d692b8adSShannon Zhao return false; 867d692b8adSShannon Zhao 86896b0eebcSShannon Zhao mask = kvm_pmu_valid_counter_mask(vcpu); 86996b0eebcSShannon Zhao if (p->is_write) { 87096b0eebcSShannon Zhao val = p->regval & mask; 87196b0eebcSShannon Zhao if (r->Op2 & 0x1) { 87296b0eebcSShannon Zhao /* accessing PMCNTENSET_EL0 */ 8738d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; 874418e5ca8SAndrew Murray kvm_pmu_enable_counter_mask(vcpu, val); 875435e53fbSAndrew Murray kvm_vcpu_pmu_restore_guest(vcpu); 87696b0eebcSShannon Zhao } else { 87796b0eebcSShannon Zhao /* accessing PMCNTENCLR_EL0 */ 8788d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; 879418e5ca8SAndrew Murray kvm_pmu_disable_counter_mask(vcpu, val); 88096b0eebcSShannon Zhao } 88196b0eebcSShannon Zhao } else { 882f5eff400SMarc Zyngier p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 88396b0eebcSShannon Zhao } 88496b0eebcSShannon Zhao 88596b0eebcSShannon Zhao return true; 88696b0eebcSShannon Zhao } 88796b0eebcSShannon Zhao 8889db52c78SShannon Zhao static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 8899db52c78SShannon Zhao const struct sys_reg_desc *r) 8909db52c78SShannon Zhao { 8919db52c78SShannon Zhao u64 mask = kvm_pmu_valid_counter_mask(vcpu); 8929db52c78SShannon Zhao 893b0737e99SMarc Zyngier if (check_pmu_access_disabled(vcpu, 0)) 894d692b8adSShannon Zhao return false; 895d692b8adSShannon Zhao 8969db52c78SShannon Zhao if (p->is_write) { 8979db52c78SShannon Zhao u64 val = p->regval & mask; 8989db52c78SShannon Zhao 8999db52c78SShannon Zhao if (r->Op2 & 0x1) 9009db52c78SShannon Zhao /* accessing PMINTENSET_EL1 */ 9018d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; 9029db52c78SShannon Zhao else 9039db52c78SShannon Zhao /* accessing PMINTENCLR_EL1 */ 9048d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; 9059db52c78SShannon Zhao } else { 906f5eff400SMarc Zyngier p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); 9079db52c78SShannon Zhao } 9089db52c78SShannon Zhao 9099db52c78SShannon Zhao return true; 9109db52c78SShannon Zhao } 9119db52c78SShannon Zhao 91276d883c4SShannon Zhao static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 91376d883c4SShannon Zhao const struct sys_reg_desc *r) 91476d883c4SShannon Zhao { 91576d883c4SShannon Zhao u64 mask = kvm_pmu_valid_counter_mask(vcpu); 91676d883c4SShannon Zhao 917d692b8adSShannon Zhao if (pmu_access_el0_disabled(vcpu)) 918d692b8adSShannon Zhao return false; 919d692b8adSShannon Zhao 92076d883c4SShannon Zhao if (p->is_write) { 92176d883c4SShannon Zhao if (r->CRm & 0x2) 92276d883c4SShannon Zhao /* accessing PMOVSSET_EL0 */ 9238d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); 92476d883c4SShannon Zhao else 92576d883c4SShannon Zhao /* accessing PMOVSCLR_EL0 */ 9268d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); 92776d883c4SShannon Zhao } else { 928f5eff400SMarc Zyngier p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); 92976d883c4SShannon Zhao } 93076d883c4SShannon Zhao 93176d883c4SShannon Zhao return true; 93276d883c4SShannon Zhao } 93376d883c4SShannon Zhao 9347a0adc70SShannon Zhao static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 9357a0adc70SShannon Zhao const struct sys_reg_desc *r) 9367a0adc70SShannon Zhao { 9377a0adc70SShannon Zhao u64 mask; 9387a0adc70SShannon Zhao 939e0443230SMarc Zyngier if (!p->is_write) 940e7f1d1eeSMarc Zyngier return read_from_write_only(vcpu, p, r); 941e0443230SMarc Zyngier 942d692b8adSShannon Zhao if (pmu_write_swinc_el0_disabled(vcpu)) 943d692b8adSShannon Zhao return false; 944d692b8adSShannon Zhao 9457a0adc70SShannon Zhao mask = kvm_pmu_valid_counter_mask(vcpu); 9467a0adc70SShannon Zhao kvm_pmu_software_increment(vcpu, p->regval & mask); 9477a0adc70SShannon Zhao return true; 9487a0adc70SShannon Zhao } 9497a0adc70SShannon Zhao 950d692b8adSShannon Zhao static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 951d692b8adSShannon Zhao const struct sys_reg_desc *r) 952d692b8adSShannon Zhao { 953d692b8adSShannon Zhao if (p->is_write) { 9549008c235SMarc Zyngier if (!vcpu_mode_priv(vcpu)) { 9559008c235SMarc Zyngier kvm_inject_undefined(vcpu); 956d692b8adSShannon Zhao return false; 9579008c235SMarc Zyngier } 958d692b8adSShannon Zhao 9598d404c4cSChristoffer Dall __vcpu_sys_reg(vcpu, PMUSERENR_EL0) = 9608d404c4cSChristoffer Dall p->regval & ARMV8_PMU_USERENR_MASK; 961d692b8adSShannon Zhao } else { 9628d404c4cSChristoffer Dall p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) 963d692b8adSShannon Zhao & ARMV8_PMU_USERENR_MASK; 964d692b8adSShannon Zhao } 965d692b8adSShannon Zhao 966d692b8adSShannon Zhao return true; 967d692b8adSShannon Zhao } 968d692b8adSShannon Zhao 9690c557ed4SMarc Zyngier /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 9700c557ed4SMarc Zyngier #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 971ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ 97203fdfb26SMarc Zyngier trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \ 973ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ 97403fdfb26SMarc Zyngier trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \ 975ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ 97603fdfb26SMarc Zyngier trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \ 977ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ 97803fdfb26SMarc Zyngier trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } 9790c557ed4SMarc Zyngier 98011663111SMarc Zyngier #define PMU_SYS_REG(r) \ 9810ab410a9SMarc Zyngier SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility 98211663111SMarc Zyngier 983051ff581SShannon Zhao /* Macro to expand the PMEVCNTRn_EL0 register */ 984051ff581SShannon Zhao #define PMU_PMEVCNTR_EL0(n) \ 98511663111SMarc Zyngier { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \ 9860ab410a9SMarc Zyngier .reset = reset_pmevcntr, \ 98711663111SMarc Zyngier .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } 988051ff581SShannon Zhao 9899feb21acSShannon Zhao /* Macro to expand the PMEVTYPERn_EL0 register */ 9909feb21acSShannon Zhao #define PMU_PMEVTYPER_EL0(n) \ 99111663111SMarc Zyngier { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \ 9920ab410a9SMarc Zyngier .reset = reset_pmevtyper, \ 99311663111SMarc Zyngier .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), } 9949feb21acSShannon Zhao 995338b1793SMarc Zyngier static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 9964fcdf106SIonela Voinescu const struct sys_reg_desc *r) 9974fcdf106SIonela Voinescu { 9984fcdf106SIonela Voinescu kvm_inject_undefined(vcpu); 9994fcdf106SIonela Voinescu 10004fcdf106SIonela Voinescu return false; 10014fcdf106SIonela Voinescu } 10024fcdf106SIonela Voinescu 10034fcdf106SIonela Voinescu /* Macro to expand the AMU counter and type registers*/ 1004338b1793SMarc Zyngier #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access } 1005338b1793SMarc Zyngier #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access } 1006338b1793SMarc Zyngier #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access } 1007338b1793SMarc Zyngier #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access } 1008384b40caSMark Rutland 1009384b40caSMark Rutland static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, 1010384b40caSMark Rutland const struct sys_reg_desc *rd) 1011384b40caSMark Rutland { 101201fe5aceSAndrew Jones return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN; 1013384b40caSMark Rutland } 1014384b40caSMark Rutland 1015338b1793SMarc Zyngier /* 1016338b1793SMarc Zyngier * If we land here on a PtrAuth access, that is because we didn't 1017338b1793SMarc Zyngier * fixup the access on exit by allowing the PtrAuth sysregs. The only 1018338b1793SMarc Zyngier * way this happens is when the guest does not have PtrAuth support 1019338b1793SMarc Zyngier * enabled. 1020338b1793SMarc Zyngier */ 1021384b40caSMark Rutland #define __PTRAUTH_KEY(k) \ 1022338b1793SMarc Zyngier { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \ 1023384b40caSMark Rutland .visibility = ptrauth_visibility} 1024384b40caSMark Rutland 1025384b40caSMark Rutland #define PTRAUTH_KEY(k) \ 1026384b40caSMark Rutland __PTRAUTH_KEY(k ## KEYLO_EL1), \ 1027384b40caSMark Rutland __PTRAUTH_KEY(k ## KEYHI_EL1) 1028384b40caSMark Rutland 102984135d3dSAndre Przywara static bool access_arch_timer(struct kvm_vcpu *vcpu, 1030c9a3c58fSJintack Lim struct sys_reg_params *p, 1031c9a3c58fSJintack Lim const struct sys_reg_desc *r) 1032c9a3c58fSJintack Lim { 103384135d3dSAndre Przywara enum kvm_arch_timers tmr; 103484135d3dSAndre Przywara enum kvm_arch_timer_regs treg; 103584135d3dSAndre Przywara u64 reg = reg_to_encoding(r); 10367b6b4631SJintack Lim 103784135d3dSAndre Przywara switch (reg) { 103884135d3dSAndre Przywara case SYS_CNTP_TVAL_EL0: 103984135d3dSAndre Przywara case SYS_AARCH32_CNTP_TVAL: 104084135d3dSAndre Przywara tmr = TIMER_PTIMER; 104184135d3dSAndre Przywara treg = TIMER_REG_TVAL; 104284135d3dSAndre Przywara break; 104384135d3dSAndre Przywara case SYS_CNTP_CTL_EL0: 104484135d3dSAndre Przywara case SYS_AARCH32_CNTP_CTL: 104584135d3dSAndre Przywara tmr = TIMER_PTIMER; 104684135d3dSAndre Przywara treg = TIMER_REG_CTL; 104784135d3dSAndre Przywara break; 104884135d3dSAndre Przywara case SYS_CNTP_CVAL_EL0: 104984135d3dSAndre Przywara case SYS_AARCH32_CNTP_CVAL: 105084135d3dSAndre Przywara tmr = TIMER_PTIMER; 105184135d3dSAndre Przywara treg = TIMER_REG_CVAL; 105284135d3dSAndre Przywara break; 105384135d3dSAndre Przywara default: 105484135d3dSAndre Przywara BUG(); 1055c1b135afSChristoffer Dall } 10567b6b4631SJintack Lim 1057c1b135afSChristoffer Dall if (p->is_write) 105884135d3dSAndre Przywara kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval); 1059c1b135afSChristoffer Dall else 106084135d3dSAndre Przywara p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg); 10617b6b4631SJintack Lim 1062c9a3c58fSJintack Lim return true; 1063c9a3c58fSJintack Lim } 1064c9a3c58fSJintack Lim 106593390c0aSDave Martin /* Read a sanitised cpufeature ID register by sys_reg_desc */ 1066*cdd5036dSOliver Upton static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r) 106793390c0aSDave Martin { 10687ba8b438SAlexandru Elisei u32 id = reg_to_encoding(r); 106900d5101bSAlexandru Elisei u64 val; 107000d5101bSAlexandru Elisei 1071*cdd5036dSOliver Upton if (sysreg_visible_as_raz(vcpu, r)) 107200d5101bSAlexandru Elisei return 0; 107300d5101bSAlexandru Elisei 107400d5101bSAlexandru Elisei val = read_sanitised_ftr_reg(id); 107593390c0aSDave Martin 1076c8857935SMarc Zyngier switch (id) { 1077c8857935SMarc Zyngier case SYS_ID_AA64PFR0_EL1: 10784fcdf106SIonela Voinescu if (!vcpu_has_sve(vcpu)) 1079f76f89e2SFuad Tabba val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE); 1080f76f89e2SFuad Tabba val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU); 1081f76f89e2SFuad Tabba val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2); 1082f76f89e2SFuad Tabba val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2); 1083f76f89e2SFuad Tabba val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3); 1084f76f89e2SFuad Tabba val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3); 10855163373aSMarc Zyngier if (kvm_vgic_global_state.type == VGIC_V3) { 1086562e530fSMarc Zyngier val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC); 1087562e530fSMarc Zyngier val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1); 1088562e530fSMarc Zyngier } 1089c8857935SMarc Zyngier break; 1090c8857935SMarc Zyngier case SYS_ID_AA64PFR1_EL1: 109116dd1fbbSFuad Tabba if (!kvm_has_mte(vcpu->kvm)) 1092f76f89e2SFuad Tabba val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE); 109390807748SMark Brown 109490807748SMark Brown val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME); 1095c8857935SMarc Zyngier break; 1096c8857935SMarc Zyngier case SYS_ID_AA64ISAR1_EL1: 1097c8857935SMarc Zyngier if (!vcpu_has_ptrauth(vcpu)) 1098aa50479bSMark Brown val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | 1099aa50479bSMark Brown ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | 1100aa50479bSMark Brown ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | 1101aa50479bSMark Brown ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI)); 1102c8857935SMarc Zyngier break; 1103def8c222SVladimir Murzin case SYS_ID_AA64ISAR2_EL1: 1104def8c222SVladimir Murzin if (!vcpu_has_ptrauth(vcpu)) 1105b2d71f27SMark Brown val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | 1106b2d71f27SMark Brown ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); 110706e0b802SMarc Zyngier if (!cpus_have_final_cap(ARM64_HAS_WFXT)) 1108b2d71f27SMark Brown val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); 1109def8c222SVladimir Murzin break; 1110c8857935SMarc Zyngier case SYS_ID_AA64DFR0_EL1: 111194893fc9SMarc Zyngier /* Limit debug to ARMv8.0 */ 1112f76f89e2SFuad Tabba val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER); 1113f76f89e2SFuad Tabba val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6); 111446081078SMarc Zyngier /* Limit guests to PMUv3 for ARMv8.4 */ 1115c854188eSAndrew Murray val = cpuid_feature_cap_perfmon_field(val, 1116c854188eSAndrew Murray ID_AA64DFR0_PMUVER_SHIFT, 111746081078SMarc Zyngier kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0); 111896f4f680SAlexandru Elisei /* Hide SPE from guests */ 1119f76f89e2SFuad Tabba val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER); 1120c8857935SMarc Zyngier break; 1121c8857935SMarc Zyngier case SYS_ID_DFR0_EL1: 112246081078SMarc Zyngier /* Limit guests to PMUv3 for ARMv8.4 */ 1123c854188eSAndrew Murray val = cpuid_feature_cap_perfmon_field(val, 1124c854188eSAndrew Murray ID_DFR0_PERFMON_SHIFT, 112546081078SMarc Zyngier kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0); 1126c8857935SMarc Zyngier break; 112707d79fe7SDave Martin } 112807d79fe7SDave Martin 112907d79fe7SDave Martin return val; 113093390c0aSDave Martin } 113193390c0aSDave Martin 1132912dee57SAndrew Jones static unsigned int id_visibility(const struct kvm_vcpu *vcpu, 1133912dee57SAndrew Jones const struct sys_reg_desc *r) 1134912dee57SAndrew Jones { 11357ba8b438SAlexandru Elisei u32 id = reg_to_encoding(r); 1136c512298eSAndrew Jones 1137c512298eSAndrew Jones switch (id) { 1138c512298eSAndrew Jones case SYS_ID_AA64ZFR0_EL1: 1139c512298eSAndrew Jones if (!vcpu_has_sve(vcpu)) 1140c512298eSAndrew Jones return REG_RAZ; 1141c512298eSAndrew Jones break; 1142c512298eSAndrew Jones } 1143c512298eSAndrew Jones 1144912dee57SAndrew Jones return 0; 1145912dee57SAndrew Jones } 1146912dee57SAndrew Jones 114734b4d203SOliver Upton static unsigned int raz_visibility(const struct kvm_vcpu *vcpu, 114834b4d203SOliver Upton const struct sys_reg_desc *r) 114934b4d203SOliver Upton { 115034b4d203SOliver Upton return REG_RAZ; 115134b4d203SOliver Upton } 115234b4d203SOliver Upton 115393390c0aSDave Martin /* cpufeature ID register access trap handlers */ 115493390c0aSDave Martin 115593390c0aSDave Martin static bool access_id_reg(struct kvm_vcpu *vcpu, 115693390c0aSDave Martin struct sys_reg_params *p, 115793390c0aSDave Martin const struct sys_reg_desc *r) 115893390c0aSDave Martin { 11594782ccc8SOliver Upton if (p->is_write) 11604782ccc8SOliver Upton return write_to_read_only(vcpu, p, r); 11614782ccc8SOliver Upton 1162*cdd5036dSOliver Upton p->regval = read_id_reg(vcpu, r); 11634782ccc8SOliver Upton return true; 116493390c0aSDave Martin } 116593390c0aSDave Martin 116673433762SDave Martin /* Visibility overrides for SVE-specific control registers */ 116773433762SDave Martin static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, 116873433762SDave Martin const struct sys_reg_desc *rd) 116973433762SDave Martin { 117073433762SDave Martin if (vcpu_has_sve(vcpu)) 117173433762SDave Martin return 0; 117273433762SDave Martin 117301fe5aceSAndrew Jones return REG_HIDDEN; 117473433762SDave Martin } 117573433762SDave Martin 117623711a5eSMarc Zyngier static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, 117723711a5eSMarc Zyngier const struct sys_reg_desc *rd, 1178978ceeb3SMarc Zyngier u64 val) 117923711a5eSMarc Zyngier { 11804f1df628SMarc Zyngier u8 csv2, csv3; 118123711a5eSMarc Zyngier 118223711a5eSMarc Zyngier /* 118323711a5eSMarc Zyngier * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as 118423711a5eSMarc Zyngier * it doesn't promise more than what is actually provided (the 118523711a5eSMarc Zyngier * guest could otherwise be covered in ectoplasmic residue). 118623711a5eSMarc Zyngier */ 118723711a5eSMarc Zyngier csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT); 118823711a5eSMarc Zyngier if (csv2 > 1 || 118923711a5eSMarc Zyngier (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED)) 119023711a5eSMarc Zyngier return -EINVAL; 119123711a5eSMarc Zyngier 11924f1df628SMarc Zyngier /* Same thing for CSV3 */ 11934f1df628SMarc Zyngier csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV3_SHIFT); 11944f1df628SMarc Zyngier if (csv3 > 1 || 11954f1df628SMarc Zyngier (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED)) 11964f1df628SMarc Zyngier return -EINVAL; 11974f1df628SMarc Zyngier 11984f1df628SMarc Zyngier /* We can only differ with CSV[23], and anything else is an error */ 1199*cdd5036dSOliver Upton val ^= read_id_reg(vcpu, rd); 12004f1df628SMarc Zyngier val &= ~((0xFUL << ID_AA64PFR0_CSV2_SHIFT) | 12014f1df628SMarc Zyngier (0xFUL << ID_AA64PFR0_CSV3_SHIFT)); 120223711a5eSMarc Zyngier if (val) 120323711a5eSMarc Zyngier return -EINVAL; 120423711a5eSMarc Zyngier 120523711a5eSMarc Zyngier vcpu->kvm->arch.pfr0_csv2 = csv2; 12064f1df628SMarc Zyngier vcpu->kvm->arch.pfr0_csv3 = csv3; 120723711a5eSMarc Zyngier 120823711a5eSMarc Zyngier return 0; 120923711a5eSMarc Zyngier } 121023711a5eSMarc Zyngier 121193390c0aSDave Martin /* 121293390c0aSDave Martin * cpufeature ID register user accessors 121393390c0aSDave Martin * 121493390c0aSDave Martin * For now, these registers are immutable for userspace, so no values 121593390c0aSDave Martin * are stored, and for set_id_reg() we don't allow the effective value 121693390c0aSDave Martin * to be changed. 121793390c0aSDave Martin */ 121893390c0aSDave Martin static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1219978ceeb3SMarc Zyngier u64 *val) 122093390c0aSDave Martin { 1221*cdd5036dSOliver Upton *val = read_id_reg(vcpu, rd); 12224782ccc8SOliver Upton return 0; 122393390c0aSDave Martin } 122493390c0aSDave Martin 122593390c0aSDave Martin static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1226978ceeb3SMarc Zyngier u64 val) 122793390c0aSDave Martin { 12284782ccc8SOliver Upton /* This is what we mean by invariant: you can't change it. */ 1229*cdd5036dSOliver Upton if (val != read_id_reg(vcpu, rd)) 12304782ccc8SOliver Upton return -EINVAL; 12314782ccc8SOliver Upton 12324782ccc8SOliver Upton return 0; 123393390c0aSDave Martin } 123493390c0aSDave Martin 12355a430976SAlexandru Elisei static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1236978ceeb3SMarc Zyngier u64 *val) 12375a430976SAlexandru Elisei { 1238978ceeb3SMarc Zyngier *val = 0; 1239978ceeb3SMarc Zyngier return 0; 12405a430976SAlexandru Elisei } 12415a430976SAlexandru Elisei 12427a3ba309SMarc Zyngier static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 1243978ceeb3SMarc Zyngier u64 val) 12447a3ba309SMarc Zyngier { 12457a3ba309SMarc Zyngier return 0; 12467a3ba309SMarc Zyngier } 12477a3ba309SMarc Zyngier 1248f7f2b15cSArd Biesheuvel static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1249f7f2b15cSArd Biesheuvel const struct sys_reg_desc *r) 1250f7f2b15cSArd Biesheuvel { 1251f7f2b15cSArd Biesheuvel if (p->is_write) 1252f7f2b15cSArd Biesheuvel return write_to_read_only(vcpu, p, r); 1253f7f2b15cSArd Biesheuvel 1254f7f2b15cSArd Biesheuvel p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0); 1255f7f2b15cSArd Biesheuvel return true; 1256f7f2b15cSArd Biesheuvel } 1257f7f2b15cSArd Biesheuvel 1258f7f2b15cSArd Biesheuvel static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1259f7f2b15cSArd Biesheuvel const struct sys_reg_desc *r) 1260f7f2b15cSArd Biesheuvel { 1261f7f2b15cSArd Biesheuvel if (p->is_write) 1262f7f2b15cSArd Biesheuvel return write_to_read_only(vcpu, p, r); 1263f7f2b15cSArd Biesheuvel 1264f7f2b15cSArd Biesheuvel p->regval = read_sysreg(clidr_el1); 1265f7f2b15cSArd Biesheuvel return true; 1266f7f2b15cSArd Biesheuvel } 1267f7f2b15cSArd Biesheuvel 1268f7f2b15cSArd Biesheuvel static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1269f7f2b15cSArd Biesheuvel const struct sys_reg_desc *r) 1270f7f2b15cSArd Biesheuvel { 12717c582bf4SJames Morse int reg = r->reg; 12727c582bf4SJames Morse 1273f7f2b15cSArd Biesheuvel if (p->is_write) 12747c582bf4SJames Morse vcpu_write_sys_reg(vcpu, p->regval, reg); 1275f7f2b15cSArd Biesheuvel else 12767c582bf4SJames Morse p->regval = vcpu_read_sys_reg(vcpu, reg); 1277f7f2b15cSArd Biesheuvel return true; 1278f7f2b15cSArd Biesheuvel } 1279f7f2b15cSArd Biesheuvel 1280f7f2b15cSArd Biesheuvel static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 1281f7f2b15cSArd Biesheuvel const struct sys_reg_desc *r) 1282f7f2b15cSArd Biesheuvel { 1283f7f2b15cSArd Biesheuvel u32 csselr; 1284f7f2b15cSArd Biesheuvel 1285f7f2b15cSArd Biesheuvel if (p->is_write) 1286f7f2b15cSArd Biesheuvel return write_to_read_only(vcpu, p, r); 1287f7f2b15cSArd Biesheuvel 1288f7f2b15cSArd Biesheuvel csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1); 1289f7f2b15cSArd Biesheuvel p->regval = get_ccsidr(csselr); 1290793acf87SArd Biesheuvel 1291793acf87SArd Biesheuvel /* 1292793acf87SArd Biesheuvel * Guests should not be doing cache operations by set/way at all, and 1293793acf87SArd Biesheuvel * for this reason, we trap them and attempt to infer the intent, so 1294793acf87SArd Biesheuvel * that we can flush the entire guest's address space at the appropriate 1295793acf87SArd Biesheuvel * time. 1296793acf87SArd Biesheuvel * To prevent this trapping from causing performance problems, let's 1297793acf87SArd Biesheuvel * expose the geometry of all data and unified caches (which are 1298793acf87SArd Biesheuvel * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way. 1299793acf87SArd Biesheuvel * [If guests should attempt to infer aliasing properties from the 1300793acf87SArd Biesheuvel * geometry (which is not permitted by the architecture), they would 1301793acf87SArd Biesheuvel * only do so for virtually indexed caches.] 1302793acf87SArd Biesheuvel */ 1303793acf87SArd Biesheuvel if (!(csselr & 1)) // data or unified cache 1304793acf87SArd Biesheuvel p->regval &= ~GENMASK(27, 3); 1305f7f2b15cSArd Biesheuvel return true; 1306f7f2b15cSArd Biesheuvel } 1307f7f2b15cSArd Biesheuvel 1308e1f358b5SSteven Price static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, 1309e1f358b5SSteven Price const struct sys_reg_desc *rd) 1310e1f358b5SSteven Price { 1311673638f4SSteven Price if (kvm_has_mte(vcpu->kvm)) 1312673638f4SSteven Price return 0; 1313673638f4SSteven Price 1314e1f358b5SSteven Price return REG_HIDDEN; 1315e1f358b5SSteven Price } 1316e1f358b5SSteven Price 1317e1f358b5SSteven Price #define MTE_REG(name) { \ 1318e1f358b5SSteven Price SYS_DESC(SYS_##name), \ 1319e1f358b5SSteven Price .access = undef_access, \ 1320e1f358b5SSteven Price .reset = reset_unknown, \ 1321e1f358b5SSteven Price .reg = name, \ 1322e1f358b5SSteven Price .visibility = mte_visibility, \ 1323e1f358b5SSteven Price } 1324e1f358b5SSteven Price 132593390c0aSDave Martin /* sys_reg_desc initialiser for known cpufeature ID registers */ 132693390c0aSDave Martin #define ID_SANITISED(name) { \ 132793390c0aSDave Martin SYS_DESC(SYS_##name), \ 132893390c0aSDave Martin .access = access_id_reg, \ 132993390c0aSDave Martin .get_user = get_id_reg, \ 133093390c0aSDave Martin .set_user = set_id_reg, \ 1331912dee57SAndrew Jones .visibility = id_visibility, \ 133293390c0aSDave Martin } 133393390c0aSDave Martin 133493390c0aSDave Martin /* 133593390c0aSDave Martin * sys_reg_desc initialiser for architecturally unallocated cpufeature ID 133693390c0aSDave Martin * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 133793390c0aSDave Martin * (1 <= crm < 8, 0 <= Op2 < 8). 133893390c0aSDave Martin */ 133993390c0aSDave Martin #define ID_UNALLOCATED(crm, op2) { \ 134093390c0aSDave Martin Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ 134134b4d203SOliver Upton .access = access_id_reg, \ 134234b4d203SOliver Upton .get_user = get_id_reg, \ 134334b4d203SOliver Upton .set_user = set_id_reg, \ 134434b4d203SOliver Upton .visibility = raz_visibility \ 134593390c0aSDave Martin } 134693390c0aSDave Martin 134793390c0aSDave Martin /* 134893390c0aSDave Martin * sys_reg_desc initialiser for known ID registers that we hide from guests. 134993390c0aSDave Martin * For now, these are exposed just like unallocated ID regs: they appear 135093390c0aSDave Martin * RAZ for the guest. 135193390c0aSDave Martin */ 135293390c0aSDave Martin #define ID_HIDDEN(name) { \ 135393390c0aSDave Martin SYS_DESC(SYS_##name), \ 135434b4d203SOliver Upton .access = access_id_reg, \ 135534b4d203SOliver Upton .get_user = get_id_reg, \ 135634b4d203SOliver Upton .set_user = set_id_reg, \ 135734b4d203SOliver Upton .visibility = raz_visibility, \ 135893390c0aSDave Martin } 135993390c0aSDave Martin 13607c8c5e6aSMarc Zyngier /* 13617c8c5e6aSMarc Zyngier * Architected system registers. 13627c8c5e6aSMarc Zyngier * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 13637609c125SMarc Zyngier * 13640c557ed4SMarc Zyngier * Debug handling: We do trap most, if not all debug related system 13650c557ed4SMarc Zyngier * registers. The implementation is good enough to ensure that a guest 13660c557ed4SMarc Zyngier * can use these with minimal performance degradation. The drawback is 13677dabf02fSOliver Upton * that we don't implement any of the external debug architecture. 13687dabf02fSOliver Upton * This should be revisited if we ever encounter a more demanding 13697dabf02fSOliver Upton * guest... 13707c8c5e6aSMarc Zyngier */ 13717c8c5e6aSMarc Zyngier static const struct sys_reg_desc sys_reg_descs[] = { 13727606e078SMark Rutland { SYS_DESC(SYS_DC_ISW), access_dcsw }, 13737606e078SMark Rutland { SYS_DESC(SYS_DC_CSW), access_dcsw }, 13747606e078SMark Rutland { SYS_DESC(SYS_DC_CISW), access_dcsw }, 13757c8c5e6aSMarc Zyngier 13760c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(0), 13770c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(1), 1378ee1b64e6SMark Rutland { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 1379ee1b64e6SMark Rutland { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 13800c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(2), 13810c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(3), 13820c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(4), 13830c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(5), 13840c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(6), 13850c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(7), 13860c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(8), 13870c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(9), 13880c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(10), 13890c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(11), 13900c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(12), 13910c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(13), 13920c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(14), 13930c557ed4SMarc Zyngier DBG_BCR_BVR_WCR_WVR_EL1(15), 13940c557ed4SMarc Zyngier 1395ee1b64e6SMark Rutland { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, 1396f24adc65SOliver Upton { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 }, 1397d42e2671SOliver Upton { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1, 1398d42e2671SOliver Upton SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, }, 1399ee1b64e6SMark Rutland { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, 1400ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, 1401ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi }, 1402ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi }, 1403ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 }, 14040c557ed4SMarc Zyngier 1405ee1b64e6SMark Rutland { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi }, 1406ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi }, 1407ee1b64e6SMark Rutland // DBGDTR[TR]X_EL0 share the same encoding 1408ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, 14090c557ed4SMarc Zyngier 1410ee1b64e6SMark Rutland { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, 141162a89c44SMarc Zyngier 1412851050a5SMark Rutland { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, 141393390c0aSDave Martin 141493390c0aSDave Martin /* 141593390c0aSDave Martin * ID regs: all ID_SANITISED() entries here must have corresponding 141693390c0aSDave Martin * entries in arm64_ftr_regs[]. 141793390c0aSDave Martin */ 141893390c0aSDave Martin 141993390c0aSDave Martin /* AArch64 mappings of the AArch32 ID registers */ 142093390c0aSDave Martin /* CRm=1 */ 142193390c0aSDave Martin ID_SANITISED(ID_PFR0_EL1), 142293390c0aSDave Martin ID_SANITISED(ID_PFR1_EL1), 142393390c0aSDave Martin ID_SANITISED(ID_DFR0_EL1), 142493390c0aSDave Martin ID_HIDDEN(ID_AFR0_EL1), 142593390c0aSDave Martin ID_SANITISED(ID_MMFR0_EL1), 142693390c0aSDave Martin ID_SANITISED(ID_MMFR1_EL1), 142793390c0aSDave Martin ID_SANITISED(ID_MMFR2_EL1), 142893390c0aSDave Martin ID_SANITISED(ID_MMFR3_EL1), 142993390c0aSDave Martin 143093390c0aSDave Martin /* CRm=2 */ 143193390c0aSDave Martin ID_SANITISED(ID_ISAR0_EL1), 143293390c0aSDave Martin ID_SANITISED(ID_ISAR1_EL1), 143393390c0aSDave Martin ID_SANITISED(ID_ISAR2_EL1), 143493390c0aSDave Martin ID_SANITISED(ID_ISAR3_EL1), 143593390c0aSDave Martin ID_SANITISED(ID_ISAR4_EL1), 143693390c0aSDave Martin ID_SANITISED(ID_ISAR5_EL1), 143793390c0aSDave Martin ID_SANITISED(ID_MMFR4_EL1), 14388e3747beSAnshuman Khandual ID_SANITISED(ID_ISAR6_EL1), 143993390c0aSDave Martin 144093390c0aSDave Martin /* CRm=3 */ 144193390c0aSDave Martin ID_SANITISED(MVFR0_EL1), 144293390c0aSDave Martin ID_SANITISED(MVFR1_EL1), 144393390c0aSDave Martin ID_SANITISED(MVFR2_EL1), 144493390c0aSDave Martin ID_UNALLOCATED(3,3), 144516824085SAnshuman Khandual ID_SANITISED(ID_PFR2_EL1), 1446dd35ec07SAnshuman Khandual ID_HIDDEN(ID_DFR1_EL1), 1447152accf8SAnshuman Khandual ID_SANITISED(ID_MMFR5_EL1), 144893390c0aSDave Martin ID_UNALLOCATED(3,7), 144993390c0aSDave Martin 145093390c0aSDave Martin /* AArch64 ID registers */ 145193390c0aSDave Martin /* CRm=4 */ 145223711a5eSMarc Zyngier { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg, 145323711a5eSMarc Zyngier .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, }, 145493390c0aSDave Martin ID_SANITISED(ID_AA64PFR1_EL1), 145593390c0aSDave Martin ID_UNALLOCATED(4,2), 145693390c0aSDave Martin ID_UNALLOCATED(4,3), 1457c512298eSAndrew Jones ID_SANITISED(ID_AA64ZFR0_EL1), 145890807748SMark Brown ID_HIDDEN(ID_AA64SMFR0_EL1), 145993390c0aSDave Martin ID_UNALLOCATED(4,6), 146093390c0aSDave Martin ID_UNALLOCATED(4,7), 146193390c0aSDave Martin 146293390c0aSDave Martin /* CRm=5 */ 146393390c0aSDave Martin ID_SANITISED(ID_AA64DFR0_EL1), 146493390c0aSDave Martin ID_SANITISED(ID_AA64DFR1_EL1), 146593390c0aSDave Martin ID_UNALLOCATED(5,2), 146693390c0aSDave Martin ID_UNALLOCATED(5,3), 146793390c0aSDave Martin ID_HIDDEN(ID_AA64AFR0_EL1), 146893390c0aSDave Martin ID_HIDDEN(ID_AA64AFR1_EL1), 146993390c0aSDave Martin ID_UNALLOCATED(5,6), 147093390c0aSDave Martin ID_UNALLOCATED(5,7), 147193390c0aSDave Martin 147293390c0aSDave Martin /* CRm=6 */ 147393390c0aSDave Martin ID_SANITISED(ID_AA64ISAR0_EL1), 147493390c0aSDave Martin ID_SANITISED(ID_AA64ISAR1_EL1), 14759e45365fSJoey Gouly ID_SANITISED(ID_AA64ISAR2_EL1), 147693390c0aSDave Martin ID_UNALLOCATED(6,3), 147793390c0aSDave Martin ID_UNALLOCATED(6,4), 147893390c0aSDave Martin ID_UNALLOCATED(6,5), 147993390c0aSDave Martin ID_UNALLOCATED(6,6), 148093390c0aSDave Martin ID_UNALLOCATED(6,7), 148193390c0aSDave Martin 148293390c0aSDave Martin /* CRm=7 */ 148393390c0aSDave Martin ID_SANITISED(ID_AA64MMFR0_EL1), 148493390c0aSDave Martin ID_SANITISED(ID_AA64MMFR1_EL1), 148593390c0aSDave Martin ID_SANITISED(ID_AA64MMFR2_EL1), 148693390c0aSDave Martin ID_UNALLOCATED(7,3), 148793390c0aSDave Martin ID_UNALLOCATED(7,4), 148893390c0aSDave Martin ID_UNALLOCATED(7,5), 148993390c0aSDave Martin ID_UNALLOCATED(7,6), 149093390c0aSDave Martin ID_UNALLOCATED(7,7), 149193390c0aSDave Martin 1492851050a5SMark Rutland { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 1493af473829SJames Morse { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, 1494851050a5SMark Rutland { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, 14952ac638fcSCatalin Marinas 1496e1f358b5SSteven Price MTE_REG(RGSR_EL1), 1497e1f358b5SSteven Price MTE_REG(GCR_EL1), 14982ac638fcSCatalin Marinas 149973433762SDave Martin { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, 1500cc427cbbSSuzuki K Poulose { SYS_DESC(SYS_TRFCR_EL1), undef_access }, 150190807748SMark Brown { SYS_DESC(SYS_SMPRI_EL1), undef_access }, 150290807748SMark Brown { SYS_DESC(SYS_SMCR_EL1), undef_access }, 1503851050a5SMark Rutland { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, 1504851050a5SMark Rutland { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, 1505851050a5SMark Rutland { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, 15067c8c5e6aSMarc Zyngier 1507384b40caSMark Rutland PTRAUTH_KEY(APIA), 1508384b40caSMark Rutland PTRAUTH_KEY(APIB), 1509384b40caSMark Rutland PTRAUTH_KEY(APDA), 1510384b40caSMark Rutland PTRAUTH_KEY(APDB), 1511384b40caSMark Rutland PTRAUTH_KEY(APGA), 1512384b40caSMark Rutland 1513851050a5SMark Rutland { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, 1514851050a5SMark Rutland { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, 1515851050a5SMark Rutland { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, 1516558daf69SDongjiu Geng 1517558daf69SDongjiu Geng { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi }, 1518558daf69SDongjiu Geng { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi }, 1519558daf69SDongjiu Geng { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi }, 1520558daf69SDongjiu Geng { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi }, 1521558daf69SDongjiu Geng { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi }, 1522558daf69SDongjiu Geng { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi }, 1523558daf69SDongjiu Geng { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, 1524558daf69SDongjiu Geng { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, 1525558daf69SDongjiu Geng 1526e1f358b5SSteven Price MTE_REG(TFSR_EL1), 1527e1f358b5SSteven Price MTE_REG(TFSRE0_EL1), 15282ac638fcSCatalin Marinas 1529851050a5SMark Rutland { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, 1530851050a5SMark Rutland { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, 15317c8c5e6aSMarc Zyngier 153213611bc8SAlexandru Elisei { SYS_DESC(SYS_PMSCR_EL1), undef_access }, 153313611bc8SAlexandru Elisei { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access }, 153413611bc8SAlexandru Elisei { SYS_DESC(SYS_PMSICR_EL1), undef_access }, 153513611bc8SAlexandru Elisei { SYS_DESC(SYS_PMSIRR_EL1), undef_access }, 153613611bc8SAlexandru Elisei { SYS_DESC(SYS_PMSFCR_EL1), undef_access }, 153713611bc8SAlexandru Elisei { SYS_DESC(SYS_PMSEVFR_EL1), undef_access }, 153813611bc8SAlexandru Elisei { SYS_DESC(SYS_PMSLATFR_EL1), undef_access }, 153913611bc8SAlexandru Elisei { SYS_DESC(SYS_PMSIDR_EL1), undef_access }, 154013611bc8SAlexandru Elisei { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access }, 154113611bc8SAlexandru Elisei { SYS_DESC(SYS_PMBPTR_EL1), undef_access }, 154213611bc8SAlexandru Elisei { SYS_DESC(SYS_PMBSR_EL1), undef_access }, 154313611bc8SAlexandru Elisei /* PMBIDR_EL1 is not trapped */ 154413611bc8SAlexandru Elisei 154511663111SMarc Zyngier { PMU_SYS_REG(SYS_PMINTENSET_EL1), 154611663111SMarc Zyngier .access = access_pminten, .reg = PMINTENSET_EL1 }, 154711663111SMarc Zyngier { PMU_SYS_REG(SYS_PMINTENCLR_EL1), 154811663111SMarc Zyngier .access = access_pminten, .reg = PMINTENSET_EL1 }, 154946081078SMarc Zyngier { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, 15507c8c5e6aSMarc Zyngier 1551851050a5SMark Rutland { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, 1552851050a5SMark Rutland { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 15537c8c5e6aSMarc Zyngier 155422925521SMarc Zyngier { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, 155522925521SMarc Zyngier { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, 155622925521SMarc Zyngier { SYS_DESC(SYS_LORN_EL1), trap_loregion }, 155722925521SMarc Zyngier { SYS_DESC(SYS_LORC_EL1), trap_loregion }, 155822925521SMarc Zyngier { SYS_DESC(SYS_LORID_EL1), trap_loregion }, 1559cc33c4e2SMark Rutland 1560851050a5SMark Rutland { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, 1561c773ae2bSJames Morse { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, 1562db7dedd0SChristoffer Dall 15637b1dba1fSMarc Zyngier { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only }, 1564e7f1d1eeSMarc Zyngier { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only }, 15657b1dba1fSMarc Zyngier { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only }, 1566e7f1d1eeSMarc Zyngier { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only }, 15677b1dba1fSMarc Zyngier { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only }, 1568e804d208SMark Rutland { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, 156903bd646dSMarc Zyngier { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi }, 157003bd646dSMarc Zyngier { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi }, 15717b1dba1fSMarc Zyngier { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only }, 1572e7f1d1eeSMarc Zyngier { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only }, 15737b1dba1fSMarc Zyngier { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only }, 1574e804d208SMark Rutland { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, 1575db7dedd0SChristoffer Dall 1576851050a5SMark Rutland { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 1577851050a5SMark Rutland { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, 15787c8c5e6aSMarc Zyngier 1579ed4ffaf4SMarc Zyngier { SYS_DESC(SYS_SCXTNUM_EL1), undef_access }, 1580ed4ffaf4SMarc Zyngier 1581851050a5SMark Rutland { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, 15827c8c5e6aSMarc Zyngier 1583f7f2b15cSArd Biesheuvel { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, 1584f7f2b15cSArd Biesheuvel { SYS_DESC(SYS_CLIDR_EL1), access_clidr }, 158590807748SMark Brown { SYS_DESC(SYS_SMIDR_EL1), undef_access }, 1586f7f2b15cSArd Biesheuvel { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, 1587f7f2b15cSArd Biesheuvel { SYS_DESC(SYS_CTR_EL0), access_ctr }, 1588ec0067a6SMark Brown { SYS_DESC(SYS_SVCR), undef_access }, 15897c8c5e6aSMarc Zyngier 159011663111SMarc Zyngier { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr, 159111663111SMarc Zyngier .reset = reset_pmcr, .reg = PMCR_EL0 }, 159211663111SMarc Zyngier { PMU_SYS_REG(SYS_PMCNTENSET_EL0), 159311663111SMarc Zyngier .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, 159411663111SMarc Zyngier { PMU_SYS_REG(SYS_PMCNTENCLR_EL0), 159511663111SMarc Zyngier .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, 159611663111SMarc Zyngier { PMU_SYS_REG(SYS_PMOVSCLR_EL0), 159711663111SMarc Zyngier .access = access_pmovs, .reg = PMOVSSET_EL0 }, 15987a3ba309SMarc Zyngier /* 15997a3ba309SMarc Zyngier * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was 16007a3ba309SMarc Zyngier * previously (and pointlessly) advertised in the past... 16017a3ba309SMarc Zyngier */ 160211663111SMarc Zyngier { PMU_SYS_REG(SYS_PMSWINC_EL0), 16035a430976SAlexandru Elisei .get_user = get_raz_reg, .set_user = set_wi_reg, 16047a3ba309SMarc Zyngier .access = access_pmswinc, .reset = NULL }, 160511663111SMarc Zyngier { PMU_SYS_REG(SYS_PMSELR_EL0), 16060ab410a9SMarc Zyngier .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 }, 160711663111SMarc Zyngier { PMU_SYS_REG(SYS_PMCEID0_EL0), 160811663111SMarc Zyngier .access = access_pmceid, .reset = NULL }, 160911663111SMarc Zyngier { PMU_SYS_REG(SYS_PMCEID1_EL0), 161011663111SMarc Zyngier .access = access_pmceid, .reset = NULL }, 161111663111SMarc Zyngier { PMU_SYS_REG(SYS_PMCCNTR_EL0), 16120ab410a9SMarc Zyngier .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 }, 161311663111SMarc Zyngier { PMU_SYS_REG(SYS_PMXEVTYPER_EL0), 161411663111SMarc Zyngier .access = access_pmu_evtyper, .reset = NULL }, 161511663111SMarc Zyngier { PMU_SYS_REG(SYS_PMXEVCNTR_EL0), 161611663111SMarc Zyngier .access = access_pmu_evcntr, .reset = NULL }, 1617174ed3e4SMark Rutland /* 1618174ed3e4SMark Rutland * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero 1619d692b8adSShannon Zhao * in 32bit mode. Here we choose to reset it as zero for consistency. 1620d692b8adSShannon Zhao */ 162111663111SMarc Zyngier { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr, 162211663111SMarc Zyngier .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, 162311663111SMarc Zyngier { PMU_SYS_REG(SYS_PMOVSSET_EL0), 162411663111SMarc Zyngier .access = access_pmovs, .reg = PMOVSSET_EL0 }, 16257c8c5e6aSMarc Zyngier 1626851050a5SMark Rutland { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, 1627851050a5SMark Rutland { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, 162890807748SMark Brown { SYS_DESC(SYS_TPIDR2_EL0), undef_access }, 162962a89c44SMarc Zyngier 1630ed4ffaf4SMarc Zyngier { SYS_DESC(SYS_SCXTNUM_EL0), undef_access }, 1631ed4ffaf4SMarc Zyngier 1632338b1793SMarc Zyngier { SYS_DESC(SYS_AMCR_EL0), undef_access }, 1633338b1793SMarc Zyngier { SYS_DESC(SYS_AMCFGR_EL0), undef_access }, 1634338b1793SMarc Zyngier { SYS_DESC(SYS_AMCGCR_EL0), undef_access }, 1635338b1793SMarc Zyngier { SYS_DESC(SYS_AMUSERENR_EL0), undef_access }, 1636338b1793SMarc Zyngier { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access }, 1637338b1793SMarc Zyngier { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access }, 1638338b1793SMarc Zyngier { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access }, 1639338b1793SMarc Zyngier { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access }, 16404fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(0), 16414fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(1), 16424fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(2), 16434fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(3), 16444fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(4), 16454fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(5), 16464fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(6), 16474fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(7), 16484fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(8), 16494fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(9), 16504fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(10), 16514fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(11), 16524fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(12), 16534fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(13), 16544fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(14), 16554fcdf106SIonela Voinescu AMU_AMEVCNTR0_EL0(15), 1656493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(0), 1657493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(1), 1658493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(2), 1659493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(3), 1660493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(4), 1661493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(5), 1662493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(6), 1663493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(7), 1664493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(8), 1665493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(9), 1666493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(10), 1667493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(11), 1668493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(12), 1669493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(13), 1670493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(14), 1671493cf9b7SVladimir Murzin AMU_AMEVTYPER0_EL0(15), 16724fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(0), 16734fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(1), 16744fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(2), 16754fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(3), 16764fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(4), 16774fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(5), 16784fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(6), 16794fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(7), 16804fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(8), 16814fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(9), 16824fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(10), 16834fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(11), 16844fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(12), 16854fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(13), 16864fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(14), 16874fcdf106SIonela Voinescu AMU_AMEVCNTR1_EL0(15), 1688493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(0), 1689493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(1), 1690493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(2), 1691493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(3), 1692493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(4), 1693493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(5), 1694493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(6), 1695493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(7), 1696493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(8), 1697493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(9), 1698493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(10), 1699493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(11), 1700493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(12), 1701493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(13), 1702493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(14), 1703493cf9b7SVladimir Murzin AMU_AMEVTYPER1_EL0(15), 17044fcdf106SIonela Voinescu 170584135d3dSAndre Przywara { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, 170684135d3dSAndre Przywara { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, 170784135d3dSAndre Przywara { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, 1708c9a3c58fSJintack Lim 1709051ff581SShannon Zhao /* PMEVCNTRn_EL0 */ 1710051ff581SShannon Zhao PMU_PMEVCNTR_EL0(0), 1711051ff581SShannon Zhao PMU_PMEVCNTR_EL0(1), 1712051ff581SShannon Zhao PMU_PMEVCNTR_EL0(2), 1713051ff581SShannon Zhao PMU_PMEVCNTR_EL0(3), 1714051ff581SShannon Zhao PMU_PMEVCNTR_EL0(4), 1715051ff581SShannon Zhao PMU_PMEVCNTR_EL0(5), 1716051ff581SShannon Zhao PMU_PMEVCNTR_EL0(6), 1717051ff581SShannon Zhao PMU_PMEVCNTR_EL0(7), 1718051ff581SShannon Zhao PMU_PMEVCNTR_EL0(8), 1719051ff581SShannon Zhao PMU_PMEVCNTR_EL0(9), 1720051ff581SShannon Zhao PMU_PMEVCNTR_EL0(10), 1721051ff581SShannon Zhao PMU_PMEVCNTR_EL0(11), 1722051ff581SShannon Zhao PMU_PMEVCNTR_EL0(12), 1723051ff581SShannon Zhao PMU_PMEVCNTR_EL0(13), 1724051ff581SShannon Zhao PMU_PMEVCNTR_EL0(14), 1725051ff581SShannon Zhao PMU_PMEVCNTR_EL0(15), 1726051ff581SShannon Zhao PMU_PMEVCNTR_EL0(16), 1727051ff581SShannon Zhao PMU_PMEVCNTR_EL0(17), 1728051ff581SShannon Zhao PMU_PMEVCNTR_EL0(18), 1729051ff581SShannon Zhao PMU_PMEVCNTR_EL0(19), 1730051ff581SShannon Zhao PMU_PMEVCNTR_EL0(20), 1731051ff581SShannon Zhao PMU_PMEVCNTR_EL0(21), 1732051ff581SShannon Zhao PMU_PMEVCNTR_EL0(22), 1733051ff581SShannon Zhao PMU_PMEVCNTR_EL0(23), 1734051ff581SShannon Zhao PMU_PMEVCNTR_EL0(24), 1735051ff581SShannon Zhao PMU_PMEVCNTR_EL0(25), 1736051ff581SShannon Zhao PMU_PMEVCNTR_EL0(26), 1737051ff581SShannon Zhao PMU_PMEVCNTR_EL0(27), 1738051ff581SShannon Zhao PMU_PMEVCNTR_EL0(28), 1739051ff581SShannon Zhao PMU_PMEVCNTR_EL0(29), 1740051ff581SShannon Zhao PMU_PMEVCNTR_EL0(30), 17419feb21acSShannon Zhao /* PMEVTYPERn_EL0 */ 17429feb21acSShannon Zhao PMU_PMEVTYPER_EL0(0), 17439feb21acSShannon Zhao PMU_PMEVTYPER_EL0(1), 17449feb21acSShannon Zhao PMU_PMEVTYPER_EL0(2), 17459feb21acSShannon Zhao PMU_PMEVTYPER_EL0(3), 17469feb21acSShannon Zhao PMU_PMEVTYPER_EL0(4), 17479feb21acSShannon Zhao PMU_PMEVTYPER_EL0(5), 17489feb21acSShannon Zhao PMU_PMEVTYPER_EL0(6), 17499feb21acSShannon Zhao PMU_PMEVTYPER_EL0(7), 17509feb21acSShannon Zhao PMU_PMEVTYPER_EL0(8), 17519feb21acSShannon Zhao PMU_PMEVTYPER_EL0(9), 17529feb21acSShannon Zhao PMU_PMEVTYPER_EL0(10), 17539feb21acSShannon Zhao PMU_PMEVTYPER_EL0(11), 17549feb21acSShannon Zhao PMU_PMEVTYPER_EL0(12), 17559feb21acSShannon Zhao PMU_PMEVTYPER_EL0(13), 17569feb21acSShannon Zhao PMU_PMEVTYPER_EL0(14), 17579feb21acSShannon Zhao PMU_PMEVTYPER_EL0(15), 17589feb21acSShannon Zhao PMU_PMEVTYPER_EL0(16), 17599feb21acSShannon Zhao PMU_PMEVTYPER_EL0(17), 17609feb21acSShannon Zhao PMU_PMEVTYPER_EL0(18), 17619feb21acSShannon Zhao PMU_PMEVTYPER_EL0(19), 17629feb21acSShannon Zhao PMU_PMEVTYPER_EL0(20), 17639feb21acSShannon Zhao PMU_PMEVTYPER_EL0(21), 17649feb21acSShannon Zhao PMU_PMEVTYPER_EL0(22), 17659feb21acSShannon Zhao PMU_PMEVTYPER_EL0(23), 17669feb21acSShannon Zhao PMU_PMEVTYPER_EL0(24), 17679feb21acSShannon Zhao PMU_PMEVTYPER_EL0(25), 17689feb21acSShannon Zhao PMU_PMEVTYPER_EL0(26), 17699feb21acSShannon Zhao PMU_PMEVTYPER_EL0(27), 17709feb21acSShannon Zhao PMU_PMEVTYPER_EL0(28), 17719feb21acSShannon Zhao PMU_PMEVTYPER_EL0(29), 17729feb21acSShannon Zhao PMU_PMEVTYPER_EL0(30), 1773174ed3e4SMark Rutland /* 1774174ed3e4SMark Rutland * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero 17759feb21acSShannon Zhao * in 32bit mode. Here we choose to reset it as zero for consistency. 17769feb21acSShannon Zhao */ 177711663111SMarc Zyngier { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper, 177811663111SMarc Zyngier .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 }, 1779051ff581SShannon Zhao 1780851050a5SMark Rutland { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, 1781851050a5SMark Rutland { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, 1782c88b0936SDave Martin { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 }, 178362a89c44SMarc Zyngier }; 178462a89c44SMarc Zyngier 17858c358b29SAlexandru Elisei static bool trap_dbgdidr(struct kvm_vcpu *vcpu, 17863fec037dSPavel Fedin struct sys_reg_params *p, 1787bdfb4b38SMarc Zyngier const struct sys_reg_desc *r) 1788bdfb4b38SMarc Zyngier { 1789bdfb4b38SMarc Zyngier if (p->is_write) { 1790bdfb4b38SMarc Zyngier return ignore_write(vcpu, p); 1791bdfb4b38SMarc Zyngier } else { 179246823dd1SDave Martin u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); 179346823dd1SDave Martin u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 179428c5dcb2SSuzuki K Poulose u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); 1795bdfb4b38SMarc Zyngier 17962ec5be3dSPavel Fedin p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 17974db8e5eaSSuzuki K. Poulose (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 17982ec5be3dSPavel Fedin (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) 1799bea7e97fSMarc Zyngier | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); 1800bdfb4b38SMarc Zyngier return true; 1801bdfb4b38SMarc Zyngier } 1802bdfb4b38SMarc Zyngier } 1803bdfb4b38SMarc Zyngier 18041da42c34SMarc Zyngier /* 18051da42c34SMarc Zyngier * AArch32 debug register mappings 180684e690bfSAlex Bennée * 180784e690bfSAlex Bennée * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] 180884e690bfSAlex Bennée * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] 180984e690bfSAlex Bennée * 18101da42c34SMarc Zyngier * None of the other registers share their location, so treat them as 18111da42c34SMarc Zyngier * if they were 64bit. 181284e690bfSAlex Bennée */ 1813bdfb4b38SMarc Zyngier #define DBG_BCR_BVR_WCR_WVR(n) \ 1814bdfb4b38SMarc Zyngier /* DBGBVRn */ \ 18151da42c34SMarc Zyngier { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ 1816bdfb4b38SMarc Zyngier /* DBGBCRn */ \ 181784e690bfSAlex Bennée { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ 1818bdfb4b38SMarc Zyngier /* DBGWVRn */ \ 181984e690bfSAlex Bennée { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ 1820bdfb4b38SMarc Zyngier /* DBGWCRn */ \ 182184e690bfSAlex Bennée { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } 1822bdfb4b38SMarc Zyngier 1823bdfb4b38SMarc Zyngier #define DBGBXVR(n) \ 18241da42c34SMarc Zyngier { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n } 1825bdfb4b38SMarc Zyngier 1826bdfb4b38SMarc Zyngier /* 1827bdfb4b38SMarc Zyngier * Trapped cp14 registers. We generally ignore most of the external 1828bdfb4b38SMarc Zyngier * debug, on the principle that they don't really make sense to a 182984e690bfSAlex Bennée * guest. Revisit this one day, would this principle change. 1830bdfb4b38SMarc Zyngier */ 183172564016SMarc Zyngier static const struct sys_reg_desc cp14_regs[] = { 18328c358b29SAlexandru Elisei /* DBGDIDR */ 18338c358b29SAlexandru Elisei { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr }, 1834bdfb4b38SMarc Zyngier /* DBGDTRRXext */ 1835bdfb4b38SMarc Zyngier { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 1836bdfb4b38SMarc Zyngier 1837bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(0), 1838bdfb4b38SMarc Zyngier /* DBGDSCRint */ 1839bdfb4b38SMarc Zyngier { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 1840bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(1), 1841bdfb4b38SMarc Zyngier /* DBGDCCINT */ 18421da42c34SMarc Zyngier { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 }, 1843bdfb4b38SMarc Zyngier /* DBGDSCRext */ 18441da42c34SMarc Zyngier { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 }, 1845bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(2), 1846bdfb4b38SMarc Zyngier /* DBGDTR[RT]Xint */ 1847bdfb4b38SMarc Zyngier { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 1848bdfb4b38SMarc Zyngier /* DBGDTR[RT]Xext */ 1849bdfb4b38SMarc Zyngier { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 1850bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(3), 1851bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(4), 1852bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(5), 1853bdfb4b38SMarc Zyngier /* DBGWFAR */ 1854bdfb4b38SMarc Zyngier { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 1855bdfb4b38SMarc Zyngier /* DBGOSECCR */ 1856bdfb4b38SMarc Zyngier { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 1857bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(6), 1858bdfb4b38SMarc Zyngier /* DBGVCR */ 18591da42c34SMarc Zyngier { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 }, 1860bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(7), 1861bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(8), 1862bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(9), 1863bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(10), 1864bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(11), 1865bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(12), 1866bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(13), 1867bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(14), 1868bdfb4b38SMarc Zyngier DBG_BCR_BVR_WCR_WVR(15), 1869bdfb4b38SMarc Zyngier 1870bdfb4b38SMarc Zyngier /* DBGDRAR (32bit) */ 1871bdfb4b38SMarc Zyngier { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 1872bdfb4b38SMarc Zyngier 1873bdfb4b38SMarc Zyngier DBGBXVR(0), 1874bdfb4b38SMarc Zyngier /* DBGOSLAR */ 1875f24adc65SOliver Upton { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 }, 1876bdfb4b38SMarc Zyngier DBGBXVR(1), 1877bdfb4b38SMarc Zyngier /* DBGOSLSR */ 1878d42e2671SOliver Upton { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 }, 1879bdfb4b38SMarc Zyngier DBGBXVR(2), 1880bdfb4b38SMarc Zyngier DBGBXVR(3), 1881bdfb4b38SMarc Zyngier /* DBGOSDLR */ 1882bdfb4b38SMarc Zyngier { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 1883bdfb4b38SMarc Zyngier DBGBXVR(4), 1884bdfb4b38SMarc Zyngier /* DBGPRCR */ 1885bdfb4b38SMarc Zyngier { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 1886bdfb4b38SMarc Zyngier DBGBXVR(5), 1887bdfb4b38SMarc Zyngier DBGBXVR(6), 1888bdfb4b38SMarc Zyngier DBGBXVR(7), 1889bdfb4b38SMarc Zyngier DBGBXVR(8), 1890bdfb4b38SMarc Zyngier DBGBXVR(9), 1891bdfb4b38SMarc Zyngier DBGBXVR(10), 1892bdfb4b38SMarc Zyngier DBGBXVR(11), 1893bdfb4b38SMarc Zyngier DBGBXVR(12), 1894bdfb4b38SMarc Zyngier DBGBXVR(13), 1895bdfb4b38SMarc Zyngier DBGBXVR(14), 1896bdfb4b38SMarc Zyngier DBGBXVR(15), 1897bdfb4b38SMarc Zyngier 1898bdfb4b38SMarc Zyngier /* DBGDSAR (32bit) */ 1899bdfb4b38SMarc Zyngier { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 1900bdfb4b38SMarc Zyngier 1901bdfb4b38SMarc Zyngier /* DBGDEVID2 */ 1902bdfb4b38SMarc Zyngier { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 1903bdfb4b38SMarc Zyngier /* DBGDEVID1 */ 1904bdfb4b38SMarc Zyngier { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 1905bdfb4b38SMarc Zyngier /* DBGDEVID */ 1906bdfb4b38SMarc Zyngier { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 1907bdfb4b38SMarc Zyngier /* DBGCLAIMSET */ 1908bdfb4b38SMarc Zyngier { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 1909bdfb4b38SMarc Zyngier /* DBGCLAIMCLR */ 1910bdfb4b38SMarc Zyngier { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 1911bdfb4b38SMarc Zyngier /* DBGAUTHSTATUS */ 1912bdfb4b38SMarc Zyngier { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 191372564016SMarc Zyngier }; 191472564016SMarc Zyngier 1915a9866ba0SMarc Zyngier /* Trapped cp14 64bit registers */ 1916a9866ba0SMarc Zyngier static const struct sys_reg_desc cp14_64_regs[] = { 1917bdfb4b38SMarc Zyngier /* DBGDRAR (64bit) */ 1918bdfb4b38SMarc Zyngier { Op1( 0), CRm( 1), .access = trap_raz_wi }, 1919bdfb4b38SMarc Zyngier 1920bdfb4b38SMarc Zyngier /* DBGDSAR (64bit) */ 1921bdfb4b38SMarc Zyngier { Op1( 0), CRm( 2), .access = trap_raz_wi }, 1922a9866ba0SMarc Zyngier }; 1923a9866ba0SMarc Zyngier 1924a9e192cdSAlexandru Elisei #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \ 1925a9e192cdSAlexandru Elisei AA32(_map), \ 1926a9e192cdSAlexandru Elisei Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \ 1927a9e192cdSAlexandru Elisei .visibility = pmu_visibility 1928a9e192cdSAlexandru Elisei 1929051ff581SShannon Zhao /* Macro to expand the PMEVCNTRn register */ 1930051ff581SShannon Zhao #define PMU_PMEVCNTR(n) \ 1931a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \ 1932a9e192cdSAlexandru Elisei (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \ 1933a9e192cdSAlexandru Elisei .access = access_pmu_evcntr } 1934051ff581SShannon Zhao 19359feb21acSShannon Zhao /* Macro to expand the PMEVTYPERn register */ 19369feb21acSShannon Zhao #define PMU_PMEVTYPER(n) \ 1937a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \ 1938a9e192cdSAlexandru Elisei (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \ 1939a9e192cdSAlexandru Elisei .access = access_pmu_evtyper } 19404d44923bSMarc Zyngier /* 19414d44923bSMarc Zyngier * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 19424d44923bSMarc Zyngier * depending on the way they are accessed (as a 32bit or a 64bit 19434d44923bSMarc Zyngier * register). 19444d44923bSMarc Zyngier */ 194562a89c44SMarc Zyngier static const struct sys_reg_desc cp15_regs[] = { 1946f7f2b15cSArd Biesheuvel { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr }, 1947b1ea1d76SMarc Zyngier { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 }, 1948b1ea1d76SMarc Zyngier /* ACTLR */ 1949b1ea1d76SMarc Zyngier { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 }, 1950b1ea1d76SMarc Zyngier /* ACTLR2 */ 1951b1ea1d76SMarc Zyngier { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 }, 1952b1ea1d76SMarc Zyngier { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, 1953b1ea1d76SMarc Zyngier { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 }, 1954b1ea1d76SMarc Zyngier /* TTBCR */ 1955b1ea1d76SMarc Zyngier { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 }, 1956b1ea1d76SMarc Zyngier /* TTBCR2 */ 1957b1ea1d76SMarc Zyngier { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 }, 1958b1ea1d76SMarc Zyngier { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 }, 1959b1ea1d76SMarc Zyngier /* DFSR */ 1960b1ea1d76SMarc Zyngier { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 }, 1961b1ea1d76SMarc Zyngier { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 }, 1962b1ea1d76SMarc Zyngier /* ADFSR */ 1963b1ea1d76SMarc Zyngier { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 }, 1964b1ea1d76SMarc Zyngier /* AIFSR */ 1965b1ea1d76SMarc Zyngier { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 }, 1966b1ea1d76SMarc Zyngier /* DFAR */ 1967b1ea1d76SMarc Zyngier { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 }, 1968b1ea1d76SMarc Zyngier /* IFAR */ 1969b1ea1d76SMarc Zyngier { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 }, 19704d44923bSMarc Zyngier 197162a89c44SMarc Zyngier /* 197262a89c44SMarc Zyngier * DC{C,I,CI}SW operations: 197362a89c44SMarc Zyngier */ 197462a89c44SMarc Zyngier { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 197562a89c44SMarc Zyngier { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 197662a89c44SMarc Zyngier { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 19774d44923bSMarc Zyngier 19787609c125SMarc Zyngier /* PMU */ 1979a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr }, 1980a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten }, 1981a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten }, 1982a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs }, 1983a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc }, 1984a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr }, 1985a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid }, 1986a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid }, 1987a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr }, 1988a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper }, 1989a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr }, 1990a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr }, 1991a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten }, 1992a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten }, 1993a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs }, 1994a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid }, 1995a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid }, 199646081078SMarc Zyngier /* PMMIR */ 1997a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi }, 19984d44923bSMarc Zyngier 1999b1ea1d76SMarc Zyngier /* PRRR/MAIR0 */ 2000b1ea1d76SMarc Zyngier { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 }, 2001b1ea1d76SMarc Zyngier /* NMRR/MAIR1 */ 2002b1ea1d76SMarc Zyngier { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 }, 2003b1ea1d76SMarc Zyngier /* AMAIR0 */ 2004b1ea1d76SMarc Zyngier { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 }, 2005b1ea1d76SMarc Zyngier /* AMAIR1 */ 2006b1ea1d76SMarc Zyngier { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 }, 2007db7dedd0SChristoffer Dall 2008db7dedd0SChristoffer Dall /* ICC_SRE */ 2009f7f6f2d9SVladimir Murzin { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, 2010db7dedd0SChristoffer Dall 2011b1ea1d76SMarc Zyngier { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 }, 2012051ff581SShannon Zhao 201384135d3dSAndre Przywara /* Arch Tmers */ 201484135d3dSAndre Przywara { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer }, 201584135d3dSAndre Przywara { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer }, 2016eac137b4SJérémy Fanguède 2017051ff581SShannon Zhao /* PMEVCNTRn */ 2018051ff581SShannon Zhao PMU_PMEVCNTR(0), 2019051ff581SShannon Zhao PMU_PMEVCNTR(1), 2020051ff581SShannon Zhao PMU_PMEVCNTR(2), 2021051ff581SShannon Zhao PMU_PMEVCNTR(3), 2022051ff581SShannon Zhao PMU_PMEVCNTR(4), 2023051ff581SShannon Zhao PMU_PMEVCNTR(5), 2024051ff581SShannon Zhao PMU_PMEVCNTR(6), 2025051ff581SShannon Zhao PMU_PMEVCNTR(7), 2026051ff581SShannon Zhao PMU_PMEVCNTR(8), 2027051ff581SShannon Zhao PMU_PMEVCNTR(9), 2028051ff581SShannon Zhao PMU_PMEVCNTR(10), 2029051ff581SShannon Zhao PMU_PMEVCNTR(11), 2030051ff581SShannon Zhao PMU_PMEVCNTR(12), 2031051ff581SShannon Zhao PMU_PMEVCNTR(13), 2032051ff581SShannon Zhao PMU_PMEVCNTR(14), 2033051ff581SShannon Zhao PMU_PMEVCNTR(15), 2034051ff581SShannon Zhao PMU_PMEVCNTR(16), 2035051ff581SShannon Zhao PMU_PMEVCNTR(17), 2036051ff581SShannon Zhao PMU_PMEVCNTR(18), 2037051ff581SShannon Zhao PMU_PMEVCNTR(19), 2038051ff581SShannon Zhao PMU_PMEVCNTR(20), 2039051ff581SShannon Zhao PMU_PMEVCNTR(21), 2040051ff581SShannon Zhao PMU_PMEVCNTR(22), 2041051ff581SShannon Zhao PMU_PMEVCNTR(23), 2042051ff581SShannon Zhao PMU_PMEVCNTR(24), 2043051ff581SShannon Zhao PMU_PMEVCNTR(25), 2044051ff581SShannon Zhao PMU_PMEVCNTR(26), 2045051ff581SShannon Zhao PMU_PMEVCNTR(27), 2046051ff581SShannon Zhao PMU_PMEVCNTR(28), 2047051ff581SShannon Zhao PMU_PMEVCNTR(29), 2048051ff581SShannon Zhao PMU_PMEVCNTR(30), 20499feb21acSShannon Zhao /* PMEVTYPERn */ 20509feb21acSShannon Zhao PMU_PMEVTYPER(0), 20519feb21acSShannon Zhao PMU_PMEVTYPER(1), 20529feb21acSShannon Zhao PMU_PMEVTYPER(2), 20539feb21acSShannon Zhao PMU_PMEVTYPER(3), 20549feb21acSShannon Zhao PMU_PMEVTYPER(4), 20559feb21acSShannon Zhao PMU_PMEVTYPER(5), 20569feb21acSShannon Zhao PMU_PMEVTYPER(6), 20579feb21acSShannon Zhao PMU_PMEVTYPER(7), 20589feb21acSShannon Zhao PMU_PMEVTYPER(8), 20599feb21acSShannon Zhao PMU_PMEVTYPER(9), 20609feb21acSShannon Zhao PMU_PMEVTYPER(10), 20619feb21acSShannon Zhao PMU_PMEVTYPER(11), 20629feb21acSShannon Zhao PMU_PMEVTYPER(12), 20639feb21acSShannon Zhao PMU_PMEVTYPER(13), 20649feb21acSShannon Zhao PMU_PMEVTYPER(14), 20659feb21acSShannon Zhao PMU_PMEVTYPER(15), 20669feb21acSShannon Zhao PMU_PMEVTYPER(16), 20679feb21acSShannon Zhao PMU_PMEVTYPER(17), 20689feb21acSShannon Zhao PMU_PMEVTYPER(18), 20699feb21acSShannon Zhao PMU_PMEVTYPER(19), 20709feb21acSShannon Zhao PMU_PMEVTYPER(20), 20719feb21acSShannon Zhao PMU_PMEVTYPER(21), 20729feb21acSShannon Zhao PMU_PMEVTYPER(22), 20739feb21acSShannon Zhao PMU_PMEVTYPER(23), 20749feb21acSShannon Zhao PMU_PMEVTYPER(24), 20759feb21acSShannon Zhao PMU_PMEVTYPER(25), 20769feb21acSShannon Zhao PMU_PMEVTYPER(26), 20779feb21acSShannon Zhao PMU_PMEVTYPER(27), 20789feb21acSShannon Zhao PMU_PMEVTYPER(28), 20799feb21acSShannon Zhao PMU_PMEVTYPER(29), 20809feb21acSShannon Zhao PMU_PMEVTYPER(30), 20819feb21acSShannon Zhao /* PMCCFILTR */ 2082a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper }, 2083f7f2b15cSArd Biesheuvel 2084f7f2b15cSArd Biesheuvel { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr }, 2085f7f2b15cSArd Biesheuvel { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr }, 2086b1ea1d76SMarc Zyngier { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 }, 2087a9866ba0SMarc Zyngier }; 2088a9866ba0SMarc Zyngier 2089a9866ba0SMarc Zyngier static const struct sys_reg_desc cp15_64_regs[] = { 2090b1ea1d76SMarc Zyngier { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, 2091a9e192cdSAlexandru Elisei { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr }, 209203bd646dSMarc Zyngier { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */ 2093b1ea1d76SMarc Zyngier { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 }, 209403bd646dSMarc Zyngier { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ 209503bd646dSMarc Zyngier { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ 209684135d3dSAndre Przywara { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer }, 20977c8c5e6aSMarc Zyngier }; 20987c8c5e6aSMarc Zyngier 2099f1f0c0cfSAlexandru Elisei static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n, 2100bb44a8dbSMarc Zyngier bool is_32) 2101bb44a8dbSMarc Zyngier { 2102bb44a8dbSMarc Zyngier unsigned int i; 2103bb44a8dbSMarc Zyngier 2104bb44a8dbSMarc Zyngier for (i = 0; i < n; i++) { 2105bb44a8dbSMarc Zyngier if (!is_32 && table[i].reg && !table[i].reset) { 2106325031d4SAlexandru Elisei kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i); 2107f1f0c0cfSAlexandru Elisei return false; 2108bb44a8dbSMarc Zyngier } 2109bb44a8dbSMarc Zyngier 2110bb44a8dbSMarc Zyngier if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 2111325031d4SAlexandru Elisei kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1); 2112f1f0c0cfSAlexandru Elisei return false; 2113bb44a8dbSMarc Zyngier } 2114bb44a8dbSMarc Zyngier } 2115bb44a8dbSMarc Zyngier 2116f1f0c0cfSAlexandru Elisei return true; 2117bb44a8dbSMarc Zyngier } 2118bb44a8dbSMarc Zyngier 211974cc7e0cSTianjia Zhang int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu) 212062a89c44SMarc Zyngier { 212162a89c44SMarc Zyngier kvm_inject_undefined(vcpu); 212262a89c44SMarc Zyngier return 1; 212362a89c44SMarc Zyngier } 212462a89c44SMarc Zyngier 2125e70b9522SMarc Zyngier static void perform_access(struct kvm_vcpu *vcpu, 2126e70b9522SMarc Zyngier struct sys_reg_params *params, 2127e70b9522SMarc Zyngier const struct sys_reg_desc *r) 2128e70b9522SMarc Zyngier { 2129599d79dcSMarc Zyngier trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); 2130599d79dcSMarc Zyngier 21317f34e409SDave Martin /* Check for regs disabled by runtime config */ 213201fe5aceSAndrew Jones if (sysreg_hidden(vcpu, r)) { 21337f34e409SDave Martin kvm_inject_undefined(vcpu); 21347f34e409SDave Martin return; 21357f34e409SDave Martin } 21367f34e409SDave Martin 2137e70b9522SMarc Zyngier /* 2138e70b9522SMarc Zyngier * Not having an accessor means that we have configured a trap 2139e70b9522SMarc Zyngier * that we don't know how to handle. This certainly qualifies 2140e70b9522SMarc Zyngier * as a gross bug that should be fixed right away. 2141e70b9522SMarc Zyngier */ 2142e70b9522SMarc Zyngier BUG_ON(!r->access); 2143e70b9522SMarc Zyngier 2144e70b9522SMarc Zyngier /* Skip instruction if instructed so */ 2145e70b9522SMarc Zyngier if (likely(r->access(vcpu, params, r))) 2146cdb5e02eSMarc Zyngier kvm_incr_pc(vcpu); 2147e70b9522SMarc Zyngier } 2148e70b9522SMarc Zyngier 214972564016SMarc Zyngier /* 215072564016SMarc Zyngier * emulate_cp -- tries to match a sys_reg access in a handling table, and 215172564016SMarc Zyngier * call the corresponding trap handler. 215272564016SMarc Zyngier * 215372564016SMarc Zyngier * @params: pointer to the descriptor of the access 215472564016SMarc Zyngier * @table: array of trap descriptors 215572564016SMarc Zyngier * @num: size of the trap descriptor array 215672564016SMarc Zyngier * 2157001bb819SOliver Upton * Return true if the access has been handled, false if not. 215872564016SMarc Zyngier */ 2159001bb819SOliver Upton static bool emulate_cp(struct kvm_vcpu *vcpu, 21603fec037dSPavel Fedin struct sys_reg_params *params, 216172564016SMarc Zyngier const struct sys_reg_desc *table, 216272564016SMarc Zyngier size_t num) 216362a89c44SMarc Zyngier { 216472564016SMarc Zyngier const struct sys_reg_desc *r; 216562a89c44SMarc Zyngier 216672564016SMarc Zyngier if (!table) 2167001bb819SOliver Upton return false; /* Not handled */ 216862a89c44SMarc Zyngier 216962a89c44SMarc Zyngier r = find_reg(params, table, num); 217062a89c44SMarc Zyngier 217172564016SMarc Zyngier if (r) { 2172e70b9522SMarc Zyngier perform_access(vcpu, params, r); 2173001bb819SOliver Upton return true; 217472564016SMarc Zyngier } 217572564016SMarc Zyngier 217672564016SMarc Zyngier /* Not handled */ 2177001bb819SOliver Upton return false; 217872564016SMarc Zyngier } 217972564016SMarc Zyngier 218072564016SMarc Zyngier static void unhandled_cp_access(struct kvm_vcpu *vcpu, 218172564016SMarc Zyngier struct sys_reg_params *params) 218272564016SMarc Zyngier { 21833a949f4cSGavin Shan u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); 218440c4f8d2SDan Carpenter int cp = -1; 218572564016SMarc Zyngier 21863a949f4cSGavin Shan switch (esr_ec) { 2187c6d01a94SMark Rutland case ESR_ELx_EC_CP15_32: 2188c6d01a94SMark Rutland case ESR_ELx_EC_CP15_64: 218972564016SMarc Zyngier cp = 15; 219072564016SMarc Zyngier break; 2191c6d01a94SMark Rutland case ESR_ELx_EC_CP14_MR: 2192c6d01a94SMark Rutland case ESR_ELx_EC_CP14_64: 219372564016SMarc Zyngier cp = 14; 219472564016SMarc Zyngier break; 219572564016SMarc Zyngier default: 219640c4f8d2SDan Carpenter WARN_ON(1); 219772564016SMarc Zyngier } 219872564016SMarc Zyngier 2199bf4b96bbSMark Rutland print_sys_reg_msg(params, 2200bf4b96bbSMark Rutland "Unsupported guest CP%d access at: %08lx [%08lx]\n", 2201d1878af3SMark Rutland cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 220262a89c44SMarc Zyngier kvm_inject_undefined(vcpu); 220362a89c44SMarc Zyngier } 220462a89c44SMarc Zyngier 220562a89c44SMarc Zyngier /** 22067769db90SShannon Zhao * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access 220762a89c44SMarc Zyngier * @vcpu: The VCPU pointer 220862a89c44SMarc Zyngier * @run: The kvm_run struct 220962a89c44SMarc Zyngier */ 221072564016SMarc Zyngier static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 221172564016SMarc Zyngier const struct sys_reg_desc *global, 2212dcaffa7bSJames Morse size_t nr_global) 221362a89c44SMarc Zyngier { 221462a89c44SMarc Zyngier struct sys_reg_params params; 22150b12620fSAlexandru Elisei u64 esr = kvm_vcpu_get_esr(vcpu); 2216c667186fSMarc Zyngier int Rt = kvm_vcpu_sys_get_rt(vcpu); 22173a949f4cSGavin Shan int Rt2 = (esr >> 10) & 0x1f; 221862a89c44SMarc Zyngier 22193a949f4cSGavin Shan params.CRm = (esr >> 1) & 0xf; 22203a949f4cSGavin Shan params.is_write = ((esr & 1) == 0); 222162a89c44SMarc Zyngier 222262a89c44SMarc Zyngier params.Op0 = 0; 22233a949f4cSGavin Shan params.Op1 = (esr >> 16) & 0xf; 222462a89c44SMarc Zyngier params.Op2 = 0; 222562a89c44SMarc Zyngier params.CRn = 0; 222662a89c44SMarc Zyngier 222762a89c44SMarc Zyngier /* 22282ec5be3dSPavel Fedin * Make a 64-bit value out of Rt and Rt2. As we use the same trap 222962a89c44SMarc Zyngier * backends between AArch32 and AArch64, we get away with it. 223062a89c44SMarc Zyngier */ 223162a89c44SMarc Zyngier if (params.is_write) { 22322ec5be3dSPavel Fedin params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; 22332ec5be3dSPavel Fedin params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; 223462a89c44SMarc Zyngier } 223562a89c44SMarc Zyngier 2236b6b7a806SMarc Zyngier /* 2237dcaffa7bSJames Morse * If the table contains a handler, handle the 2238b6b7a806SMarc Zyngier * potential register operation in the case of a read and return 2239b6b7a806SMarc Zyngier * with success. 2240b6b7a806SMarc Zyngier */ 2241001bb819SOliver Upton if (emulate_cp(vcpu, ¶ms, global, nr_global)) { 22422ec5be3dSPavel Fedin /* Split up the value between registers for the read side */ 224362a89c44SMarc Zyngier if (!params.is_write) { 22442ec5be3dSPavel Fedin vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); 22452ec5be3dSPavel Fedin vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); 224662a89c44SMarc Zyngier } 224762a89c44SMarc Zyngier 224862a89c44SMarc Zyngier return 1; 224962a89c44SMarc Zyngier } 225062a89c44SMarc Zyngier 2251b6b7a806SMarc Zyngier unhandled_cp_access(vcpu, ¶ms); 2252b6b7a806SMarc Zyngier return 1; 2253b6b7a806SMarc Zyngier } 2254b6b7a806SMarc Zyngier 2255e6519766SOliver Upton static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params); 2256e6519766SOliver Upton 22579369bc5cSOliver Upton /* 22589369bc5cSOliver Upton * The CP10 ID registers are architecturally mapped to AArch64 feature 22599369bc5cSOliver Upton * registers. Abuse that fact so we can rely on the AArch64 handler for accesses 22609369bc5cSOliver Upton * from AArch32. 22619369bc5cSOliver Upton */ 2262ee87a9bdSMarc Zyngier static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params) 22639369bc5cSOliver Upton { 22649369bc5cSOliver Upton u8 reg_id = (esr >> 10) & 0xf; 22659369bc5cSOliver Upton bool valid; 22669369bc5cSOliver Upton 22679369bc5cSOliver Upton params->is_write = ((esr & 1) == 0); 22689369bc5cSOliver Upton params->Op0 = 3; 22699369bc5cSOliver Upton params->Op1 = 0; 22709369bc5cSOliver Upton params->CRn = 0; 22719369bc5cSOliver Upton params->CRm = 3; 22729369bc5cSOliver Upton 22739369bc5cSOliver Upton /* CP10 ID registers are read-only */ 22749369bc5cSOliver Upton valid = !params->is_write; 22759369bc5cSOliver Upton 22769369bc5cSOliver Upton switch (reg_id) { 22779369bc5cSOliver Upton /* MVFR0 */ 22789369bc5cSOliver Upton case 0b0111: 22799369bc5cSOliver Upton params->Op2 = 0; 22809369bc5cSOliver Upton break; 22819369bc5cSOliver Upton /* MVFR1 */ 22829369bc5cSOliver Upton case 0b0110: 22839369bc5cSOliver Upton params->Op2 = 1; 22849369bc5cSOliver Upton break; 22859369bc5cSOliver Upton /* MVFR2 */ 22869369bc5cSOliver Upton case 0b0101: 22879369bc5cSOliver Upton params->Op2 = 2; 22889369bc5cSOliver Upton break; 22899369bc5cSOliver Upton default: 22909369bc5cSOliver Upton valid = false; 22919369bc5cSOliver Upton } 22929369bc5cSOliver Upton 22939369bc5cSOliver Upton if (valid) 22949369bc5cSOliver Upton return true; 22959369bc5cSOliver Upton 22969369bc5cSOliver Upton kvm_pr_unimpl("Unhandled cp10 register %s: %u\n", 22979369bc5cSOliver Upton params->is_write ? "write" : "read", reg_id); 22989369bc5cSOliver Upton return false; 22999369bc5cSOliver Upton } 23009369bc5cSOliver Upton 23019369bc5cSOliver Upton /** 23029369bc5cSOliver Upton * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and 23039369bc5cSOliver Upton * VFP Register' from AArch32. 23049369bc5cSOliver Upton * @vcpu: The vCPU pointer 23059369bc5cSOliver Upton * 23069369bc5cSOliver Upton * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers. 23079369bc5cSOliver Upton * Work out the correct AArch64 system register encoding and reroute to the 23089369bc5cSOliver Upton * AArch64 system register emulation. 23099369bc5cSOliver Upton */ 23109369bc5cSOliver Upton int kvm_handle_cp10_id(struct kvm_vcpu *vcpu) 23119369bc5cSOliver Upton { 23129369bc5cSOliver Upton int Rt = kvm_vcpu_sys_get_rt(vcpu); 2313ee87a9bdSMarc Zyngier u64 esr = kvm_vcpu_get_esr(vcpu); 23149369bc5cSOliver Upton struct sys_reg_params params; 23159369bc5cSOliver Upton 23169369bc5cSOliver Upton /* UNDEF on any unhandled register access */ 23179369bc5cSOliver Upton if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) { 23189369bc5cSOliver Upton kvm_inject_undefined(vcpu); 23199369bc5cSOliver Upton return 1; 23209369bc5cSOliver Upton } 23219369bc5cSOliver Upton 23229369bc5cSOliver Upton if (emulate_sys_reg(vcpu, ¶ms)) 23239369bc5cSOliver Upton vcpu_set_reg(vcpu, Rt, params.regval); 23249369bc5cSOliver Upton 23259369bc5cSOliver Upton return 1; 23269369bc5cSOliver Upton } 23279369bc5cSOliver Upton 2328e6519766SOliver Upton /** 2329e6519766SOliver Upton * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where 2330e6519766SOliver Upton * CRn=0, which corresponds to the AArch32 feature 2331e6519766SOliver Upton * registers. 2332e6519766SOliver Upton * @vcpu: the vCPU pointer 2333e6519766SOliver Upton * @params: the system register access parameters. 2334e6519766SOliver Upton * 2335e6519766SOliver Upton * Our cp15 system register tables do not enumerate the AArch32 feature 2336e6519766SOliver Upton * registers. Conveniently, our AArch64 table does, and the AArch32 system 2337e6519766SOliver Upton * register encoding can be trivially remapped into the AArch64 for the feature 2338e6519766SOliver Upton * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same. 2339e6519766SOliver Upton * 2340e6519766SOliver Upton * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit 2341e6519766SOliver Upton * System registers with (coproc=0b1111, CRn==c0)", read accesses from this 2342e6519766SOliver Upton * range are either UNKNOWN or RES0. Rerouting remains architectural as we 2343e6519766SOliver Upton * treat undefined registers in this range as RAZ. 2344e6519766SOliver Upton */ 2345e6519766SOliver Upton static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu, 2346e6519766SOliver Upton struct sys_reg_params *params) 2347e6519766SOliver Upton { 2348e6519766SOliver Upton int Rt = kvm_vcpu_sys_get_rt(vcpu); 2349e6519766SOliver Upton 2350e6519766SOliver Upton /* Treat impossible writes to RO registers as UNDEFINED */ 2351e6519766SOliver Upton if (params->is_write) { 2352e6519766SOliver Upton unhandled_cp_access(vcpu, params); 2353e6519766SOliver Upton return 1; 2354e6519766SOliver Upton } 2355e6519766SOliver Upton 2356e6519766SOliver Upton params->Op0 = 3; 2357e6519766SOliver Upton 2358e6519766SOliver Upton /* 2359e6519766SOliver Upton * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32. 2360e6519766SOliver Upton * Avoid conflicting with future expansion of AArch64 feature registers 2361e6519766SOliver Upton * and simply treat them as RAZ here. 2362e6519766SOliver Upton */ 2363e6519766SOliver Upton if (params->CRm > 3) 2364e6519766SOliver Upton params->regval = 0; 2365e6519766SOliver Upton else if (!emulate_sys_reg(vcpu, params)) 2366e6519766SOliver Upton return 1; 2367e6519766SOliver Upton 2368e6519766SOliver Upton vcpu_set_reg(vcpu, Rt, params->regval); 2369e6519766SOliver Upton return 1; 2370e6519766SOliver Upton } 2371e6519766SOliver Upton 237262a89c44SMarc Zyngier /** 23737769db90SShannon Zhao * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access 237462a89c44SMarc Zyngier * @vcpu: The VCPU pointer 237562a89c44SMarc Zyngier * @run: The kvm_run struct 237662a89c44SMarc Zyngier */ 237772564016SMarc Zyngier static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 2378e6519766SOliver Upton struct sys_reg_params *params, 237972564016SMarc Zyngier const struct sys_reg_desc *global, 2380dcaffa7bSJames Morse size_t nr_global) 238162a89c44SMarc Zyngier { 2382c667186fSMarc Zyngier int Rt = kvm_vcpu_sys_get_rt(vcpu); 238362a89c44SMarc Zyngier 2384e6519766SOliver Upton params->regval = vcpu_get_reg(vcpu, Rt); 238562a89c44SMarc Zyngier 2386e6519766SOliver Upton if (emulate_cp(vcpu, params, global, nr_global)) { 2387e6519766SOliver Upton if (!params->is_write) 2388e6519766SOliver Upton vcpu_set_reg(vcpu, Rt, params->regval); 238962a89c44SMarc Zyngier return 1; 23902ec5be3dSPavel Fedin } 239172564016SMarc Zyngier 2392e6519766SOliver Upton unhandled_cp_access(vcpu, params); 239372564016SMarc Zyngier return 1; 239472564016SMarc Zyngier } 239572564016SMarc Zyngier 239674cc7e0cSTianjia Zhang int kvm_handle_cp15_64(struct kvm_vcpu *vcpu) 239772564016SMarc Zyngier { 2398dcaffa7bSJames Morse return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs)); 239972564016SMarc Zyngier } 240072564016SMarc Zyngier 240174cc7e0cSTianjia Zhang int kvm_handle_cp15_32(struct kvm_vcpu *vcpu) 240272564016SMarc Zyngier { 2403e6519766SOliver Upton struct sys_reg_params params; 2404e6519766SOliver Upton 2405e6519766SOliver Upton params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu)); 2406e6519766SOliver Upton 2407e6519766SOliver Upton /* 2408e6519766SOliver Upton * Certain AArch32 ID registers are handled by rerouting to the AArch64 2409e6519766SOliver Upton * system register table. Registers in the ID range where CRm=0 are 2410e6519766SOliver Upton * excluded from this scheme as they do not trivially map into AArch64 2411e6519766SOliver Upton * system register encodings. 2412e6519766SOliver Upton */ 2413e6519766SOliver Upton if (params.Op1 == 0 && params.CRn == 0 && params.CRm) 2414e6519766SOliver Upton return kvm_emulate_cp15_id_reg(vcpu, ¶ms); 2415e6519766SOliver Upton 2416e6519766SOliver Upton return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); 241772564016SMarc Zyngier } 241872564016SMarc Zyngier 241974cc7e0cSTianjia Zhang int kvm_handle_cp14_64(struct kvm_vcpu *vcpu) 242072564016SMarc Zyngier { 2421dcaffa7bSJames Morse return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs)); 242272564016SMarc Zyngier } 242372564016SMarc Zyngier 242474cc7e0cSTianjia Zhang int kvm_handle_cp14_32(struct kvm_vcpu *vcpu) 242572564016SMarc Zyngier { 2426e6519766SOliver Upton struct sys_reg_params params; 2427e6519766SOliver Upton 2428e6519766SOliver Upton params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu)); 2429e6519766SOliver Upton 2430e6519766SOliver Upton return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs)); 243162a89c44SMarc Zyngier } 243262a89c44SMarc Zyngier 243354ad68b7SMark Rutland static bool is_imp_def_sys_reg(struct sys_reg_params *params) 243454ad68b7SMark Rutland { 243554ad68b7SMark Rutland // See ARM DDI 0487E.a, section D12.3.2 243654ad68b7SMark Rutland return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011; 243754ad68b7SMark Rutland } 243854ad68b7SMark Rutland 243928eda7b5SOliver Upton /** 244028eda7b5SOliver Upton * emulate_sys_reg - Emulate a guest access to an AArch64 system register 244128eda7b5SOliver Upton * @vcpu: The VCPU pointer 244228eda7b5SOliver Upton * @params: Decoded system register parameters 244328eda7b5SOliver Upton * 244428eda7b5SOliver Upton * Return: true if the system register access was successful, false otherwise. 244528eda7b5SOliver Upton */ 244628eda7b5SOliver Upton static bool emulate_sys_reg(struct kvm_vcpu *vcpu, 24473fec037dSPavel Fedin struct sys_reg_params *params) 24487c8c5e6aSMarc Zyngier { 2449dcaffa7bSJames Morse const struct sys_reg_desc *r; 24507c8c5e6aSMarc Zyngier 24517c8c5e6aSMarc Zyngier r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 24527c8c5e6aSMarc Zyngier 24537c8c5e6aSMarc Zyngier if (likely(r)) { 2454e70b9522SMarc Zyngier perform_access(vcpu, params, r); 245528eda7b5SOliver Upton return true; 245628eda7b5SOliver Upton } 245728eda7b5SOliver Upton 245828eda7b5SOliver Upton if (is_imp_def_sys_reg(params)) { 245954ad68b7SMark Rutland kvm_inject_undefined(vcpu); 24607c8c5e6aSMarc Zyngier } else { 2461bf4b96bbSMark Rutland print_sys_reg_msg(params, 2462bf4b96bbSMark Rutland "Unsupported guest sys_reg access at: %lx [%08lx]\n", 2463d1878af3SMark Rutland *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); 24647c8c5e6aSMarc Zyngier kvm_inject_undefined(vcpu); 2465e70b9522SMarc Zyngier } 246628eda7b5SOliver Upton return false; 24677c8c5e6aSMarc Zyngier } 24687c8c5e6aSMarc Zyngier 2469750ed566SJames Morse /** 2470750ed566SJames Morse * kvm_reset_sys_regs - sets system registers to reset value 2471750ed566SJames Morse * @vcpu: The VCPU pointer 2472750ed566SJames Morse * 2473750ed566SJames Morse * This function finds the right table above and sets the registers on the 2474750ed566SJames Morse * virtual CPU struct to their architecturally defined reset values. 2475750ed566SJames Morse */ 2476750ed566SJames Morse void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) 24777c8c5e6aSMarc Zyngier { 24787c8c5e6aSMarc Zyngier unsigned long i; 24797c8c5e6aSMarc Zyngier 2480750ed566SJames Morse for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) 2481750ed566SJames Morse if (sys_reg_descs[i].reset) 2482750ed566SJames Morse sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]); 24837c8c5e6aSMarc Zyngier } 24847c8c5e6aSMarc Zyngier 24857c8c5e6aSMarc Zyngier /** 24867c8c5e6aSMarc Zyngier * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access 24877c8c5e6aSMarc Zyngier * @vcpu: The VCPU pointer 24887c8c5e6aSMarc Zyngier */ 248974cc7e0cSTianjia Zhang int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) 24907c8c5e6aSMarc Zyngier { 24917c8c5e6aSMarc Zyngier struct sys_reg_params params; 24923a949f4cSGavin Shan unsigned long esr = kvm_vcpu_get_esr(vcpu); 2493c667186fSMarc Zyngier int Rt = kvm_vcpu_sys_get_rt(vcpu); 24947c8c5e6aSMarc Zyngier 2495eef8c85aSAlex Bennée trace_kvm_handle_sys_reg(esr); 2496eef8c85aSAlex Bennée 2497f76f89e2SFuad Tabba params = esr_sys64_to_params(esr); 24982ec5be3dSPavel Fedin params.regval = vcpu_get_reg(vcpu, Rt); 24997c8c5e6aSMarc Zyngier 250028eda7b5SOliver Upton if (!emulate_sys_reg(vcpu, ¶ms)) 250128eda7b5SOliver Upton return 1; 25022ec5be3dSPavel Fedin 25032ec5be3dSPavel Fedin if (!params.is_write) 25042ec5be3dSPavel Fedin vcpu_set_reg(vcpu, Rt, params.regval); 250528eda7b5SOliver Upton return 1; 25067c8c5e6aSMarc Zyngier } 25077c8c5e6aSMarc Zyngier 25087c8c5e6aSMarc Zyngier /****************************************************************************** 25097c8c5e6aSMarc Zyngier * Userspace API 25107c8c5e6aSMarc Zyngier *****************************************************************************/ 25117c8c5e6aSMarc Zyngier 25127c8c5e6aSMarc Zyngier static bool index_to_params(u64 id, struct sys_reg_params *params) 25137c8c5e6aSMarc Zyngier { 25147c8c5e6aSMarc Zyngier switch (id & KVM_REG_SIZE_MASK) { 25157c8c5e6aSMarc Zyngier case KVM_REG_SIZE_U64: 25167c8c5e6aSMarc Zyngier /* Any unused index bits means it's not valid. */ 25177c8c5e6aSMarc Zyngier if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 25187c8c5e6aSMarc Zyngier | KVM_REG_ARM_COPROC_MASK 25197c8c5e6aSMarc Zyngier | KVM_REG_ARM64_SYSREG_OP0_MASK 25207c8c5e6aSMarc Zyngier | KVM_REG_ARM64_SYSREG_OP1_MASK 25217c8c5e6aSMarc Zyngier | KVM_REG_ARM64_SYSREG_CRN_MASK 25227c8c5e6aSMarc Zyngier | KVM_REG_ARM64_SYSREG_CRM_MASK 25237c8c5e6aSMarc Zyngier | KVM_REG_ARM64_SYSREG_OP2_MASK)) 25247c8c5e6aSMarc Zyngier return false; 25257c8c5e6aSMarc Zyngier params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) 25267c8c5e6aSMarc Zyngier >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); 25277c8c5e6aSMarc Zyngier params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) 25287c8c5e6aSMarc Zyngier >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); 25297c8c5e6aSMarc Zyngier params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) 25307c8c5e6aSMarc Zyngier >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); 25317c8c5e6aSMarc Zyngier params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) 25327c8c5e6aSMarc Zyngier >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); 25337c8c5e6aSMarc Zyngier params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) 25347c8c5e6aSMarc Zyngier >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); 25357c8c5e6aSMarc Zyngier return true; 25367c8c5e6aSMarc Zyngier default: 25377c8c5e6aSMarc Zyngier return false; 25387c8c5e6aSMarc Zyngier } 25397c8c5e6aSMarc Zyngier } 25407c8c5e6aSMarc Zyngier 2541da8d120fSMarc Zyngier const struct sys_reg_desc *get_reg_by_id(u64 id, 25424b927b94SVijaya Kumar K const struct sys_reg_desc table[], 25434b927b94SVijaya Kumar K unsigned int num) 25444b927b94SVijaya Kumar K { 2545da8d120fSMarc Zyngier struct sys_reg_params params; 2546da8d120fSMarc Zyngier 2547da8d120fSMarc Zyngier if (!index_to_params(id, ¶ms)) 25484b927b94SVijaya Kumar K return NULL; 25494b927b94SVijaya Kumar K 2550da8d120fSMarc Zyngier return find_reg(¶ms, table, num); 25514b927b94SVijaya Kumar K } 25524b927b94SVijaya Kumar K 25537c8c5e6aSMarc Zyngier /* Decode an index value, and find the sys_reg_desc entry. */ 2554ba23aec9SMarc Zyngier static const struct sys_reg_desc * 2555ba23aec9SMarc Zyngier id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id, 2556ba23aec9SMarc Zyngier const struct sys_reg_desc table[], unsigned int num) 2557ba23aec9SMarc Zyngier 25587c8c5e6aSMarc Zyngier { 2559dcaffa7bSJames Morse const struct sys_reg_desc *r; 25607c8c5e6aSMarc Zyngier 25617c8c5e6aSMarc Zyngier /* We only do sys_reg for now. */ 25627c8c5e6aSMarc Zyngier if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 25637c8c5e6aSMarc Zyngier return NULL; 25647c8c5e6aSMarc Zyngier 2565ba23aec9SMarc Zyngier r = get_reg_by_id(id, table, num); 25667c8c5e6aSMarc Zyngier 256793390c0aSDave Martin /* Not saved in the sys_reg array and not otherwise accessible? */ 2568ba23aec9SMarc Zyngier if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r))) 25697c8c5e6aSMarc Zyngier r = NULL; 25707c8c5e6aSMarc Zyngier 25717c8c5e6aSMarc Zyngier return r; 25727c8c5e6aSMarc Zyngier } 25737c8c5e6aSMarc Zyngier 25747c8c5e6aSMarc Zyngier /* 25757c8c5e6aSMarc Zyngier * These are the invariant sys_reg registers: we let the guest see the 25767c8c5e6aSMarc Zyngier * host versions of these, so they're part of the guest state. 25777c8c5e6aSMarc Zyngier * 25787c8c5e6aSMarc Zyngier * A future CPU may provide a mechanism to present different values to 25797c8c5e6aSMarc Zyngier * the guest, or a future kvm may trap them. 25807c8c5e6aSMarc Zyngier */ 25817c8c5e6aSMarc Zyngier 25827c8c5e6aSMarc Zyngier #define FUNCTION_INVARIANT(reg) \ 25837c8c5e6aSMarc Zyngier static void get_##reg(struct kvm_vcpu *v, \ 25847c8c5e6aSMarc Zyngier const struct sys_reg_desc *r) \ 25857c8c5e6aSMarc Zyngier { \ 25861f3d8699SMark Rutland ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \ 25877c8c5e6aSMarc Zyngier } 25887c8c5e6aSMarc Zyngier 25897c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(midr_el1) 25907c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(revidr_el1) 25917c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(clidr_el1) 25927c8c5e6aSMarc Zyngier FUNCTION_INVARIANT(aidr_el1) 25937c8c5e6aSMarc Zyngier 2594f7f2b15cSArd Biesheuvel static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r) 2595f7f2b15cSArd Biesheuvel { 2596f7f2b15cSArd Biesheuvel ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0); 2597f7f2b15cSArd Biesheuvel } 2598f7f2b15cSArd Biesheuvel 25997c8c5e6aSMarc Zyngier /* ->val is filled in by kvm_sys_reg_table_init() */ 26007c8c5e6aSMarc Zyngier static struct sys_reg_desc invariant_sys_regs[] = { 26010d449541SMark Rutland { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 }, 26020d449541SMark Rutland { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 }, 26030d449541SMark Rutland { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 }, 26040d449541SMark Rutland { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 }, 26050d449541SMark Rutland { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 }, 26067c8c5e6aSMarc Zyngier }; 26077c8c5e6aSMarc Zyngier 26085a420ed9SMarc Zyngier static int get_invariant_sys_reg(u64 id, u64 __user *uaddr) 26097c8c5e6aSMarc Zyngier { 26107c8c5e6aSMarc Zyngier const struct sys_reg_desc *r; 26117c8c5e6aSMarc Zyngier 2612da8d120fSMarc Zyngier r = get_reg_by_id(id, invariant_sys_regs, 26134b927b94SVijaya Kumar K ARRAY_SIZE(invariant_sys_regs)); 26147c8c5e6aSMarc Zyngier if (!r) 26157c8c5e6aSMarc Zyngier return -ENOENT; 26167c8c5e6aSMarc Zyngier 26175a420ed9SMarc Zyngier return put_user(r->val, uaddr); 26187c8c5e6aSMarc Zyngier } 26197c8c5e6aSMarc Zyngier 26205a420ed9SMarc Zyngier static int set_invariant_sys_reg(u64 id, u64 __user *uaddr) 26217c8c5e6aSMarc Zyngier { 26227c8c5e6aSMarc Zyngier const struct sys_reg_desc *r; 26235a420ed9SMarc Zyngier u64 val; 26247c8c5e6aSMarc Zyngier 2625da8d120fSMarc Zyngier r = get_reg_by_id(id, invariant_sys_regs, 26264b927b94SVijaya Kumar K ARRAY_SIZE(invariant_sys_regs)); 26277c8c5e6aSMarc Zyngier if (!r) 26287c8c5e6aSMarc Zyngier return -ENOENT; 26297c8c5e6aSMarc Zyngier 26305a420ed9SMarc Zyngier if (get_user(val, uaddr)) 26315a420ed9SMarc Zyngier return -EFAULT; 26327c8c5e6aSMarc Zyngier 26337c8c5e6aSMarc Zyngier /* This is what we mean by invariant: you can't change it. */ 26347c8c5e6aSMarc Zyngier if (r->val != val) 26357c8c5e6aSMarc Zyngier return -EINVAL; 26367c8c5e6aSMarc Zyngier 26377c8c5e6aSMarc Zyngier return 0; 26387c8c5e6aSMarc Zyngier } 26397c8c5e6aSMarc Zyngier 26407c8c5e6aSMarc Zyngier static bool is_valid_cache(u32 val) 26417c8c5e6aSMarc Zyngier { 26427c8c5e6aSMarc Zyngier u32 level, ctype; 26437c8c5e6aSMarc Zyngier 26447c8c5e6aSMarc Zyngier if (val >= CSSELR_MAX) 264518d45766SWill Deacon return false; 26467c8c5e6aSMarc Zyngier 26477c8c5e6aSMarc Zyngier /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 26487c8c5e6aSMarc Zyngier level = (val >> 1); 26497c8c5e6aSMarc Zyngier ctype = (cache_levels >> (level * 3)) & 7; 26507c8c5e6aSMarc Zyngier 26517c8c5e6aSMarc Zyngier switch (ctype) { 26527c8c5e6aSMarc Zyngier case 0: /* No cache */ 26537c8c5e6aSMarc Zyngier return false; 26547c8c5e6aSMarc Zyngier case 1: /* Instruction cache only */ 26557c8c5e6aSMarc Zyngier return (val & 1); 26567c8c5e6aSMarc Zyngier case 2: /* Data cache only */ 26577c8c5e6aSMarc Zyngier case 4: /* Unified cache */ 26587c8c5e6aSMarc Zyngier return !(val & 1); 26597c8c5e6aSMarc Zyngier case 3: /* Separate instruction and data caches */ 26607c8c5e6aSMarc Zyngier return true; 26617c8c5e6aSMarc Zyngier default: /* Reserved: we can't know instruction or data. */ 26627c8c5e6aSMarc Zyngier return false; 26637c8c5e6aSMarc Zyngier } 26647c8c5e6aSMarc Zyngier } 26657c8c5e6aSMarc Zyngier 26667c8c5e6aSMarc Zyngier static int demux_c15_get(u64 id, void __user *uaddr) 26677c8c5e6aSMarc Zyngier { 26687c8c5e6aSMarc Zyngier u32 val; 26697c8c5e6aSMarc Zyngier u32 __user *uval = uaddr; 26707c8c5e6aSMarc Zyngier 26717c8c5e6aSMarc Zyngier /* Fail if we have unknown bits set. */ 26727c8c5e6aSMarc Zyngier if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 26737c8c5e6aSMarc Zyngier | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 26747c8c5e6aSMarc Zyngier return -ENOENT; 26757c8c5e6aSMarc Zyngier 26767c8c5e6aSMarc Zyngier switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 26777c8c5e6aSMarc Zyngier case KVM_REG_ARM_DEMUX_ID_CCSIDR: 26787c8c5e6aSMarc Zyngier if (KVM_REG_SIZE(id) != 4) 26797c8c5e6aSMarc Zyngier return -ENOENT; 26807c8c5e6aSMarc Zyngier val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 26817c8c5e6aSMarc Zyngier >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 26827c8c5e6aSMarc Zyngier if (!is_valid_cache(val)) 26837c8c5e6aSMarc Zyngier return -ENOENT; 26847c8c5e6aSMarc Zyngier 26857c8c5e6aSMarc Zyngier return put_user(get_ccsidr(val), uval); 26867c8c5e6aSMarc Zyngier default: 26877c8c5e6aSMarc Zyngier return -ENOENT; 26887c8c5e6aSMarc Zyngier } 26897c8c5e6aSMarc Zyngier } 26907c8c5e6aSMarc Zyngier 26917c8c5e6aSMarc Zyngier static int demux_c15_set(u64 id, void __user *uaddr) 26927c8c5e6aSMarc Zyngier { 26937c8c5e6aSMarc Zyngier u32 val, newval; 26947c8c5e6aSMarc Zyngier u32 __user *uval = uaddr; 26957c8c5e6aSMarc Zyngier 26967c8c5e6aSMarc Zyngier /* Fail if we have unknown bits set. */ 26977c8c5e6aSMarc Zyngier if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 26987c8c5e6aSMarc Zyngier | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 26997c8c5e6aSMarc Zyngier return -ENOENT; 27007c8c5e6aSMarc Zyngier 27017c8c5e6aSMarc Zyngier switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 27027c8c5e6aSMarc Zyngier case KVM_REG_ARM_DEMUX_ID_CCSIDR: 27037c8c5e6aSMarc Zyngier if (KVM_REG_SIZE(id) != 4) 27047c8c5e6aSMarc Zyngier return -ENOENT; 27057c8c5e6aSMarc Zyngier val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 27067c8c5e6aSMarc Zyngier >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 27077c8c5e6aSMarc Zyngier if (!is_valid_cache(val)) 27087c8c5e6aSMarc Zyngier return -ENOENT; 27097c8c5e6aSMarc Zyngier 27107c8c5e6aSMarc Zyngier if (get_user(newval, uval)) 27117c8c5e6aSMarc Zyngier return -EFAULT; 27127c8c5e6aSMarc Zyngier 27137c8c5e6aSMarc Zyngier /* This is also invariant: you can't change it. */ 27147c8c5e6aSMarc Zyngier if (newval != get_ccsidr(val)) 27157c8c5e6aSMarc Zyngier return -EINVAL; 27167c8c5e6aSMarc Zyngier return 0; 27177c8c5e6aSMarc Zyngier default: 27187c8c5e6aSMarc Zyngier return -ENOENT; 27197c8c5e6aSMarc Zyngier } 27207c8c5e6aSMarc Zyngier } 27217c8c5e6aSMarc Zyngier 2722ba23aec9SMarc Zyngier int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, 2723ba23aec9SMarc Zyngier const struct sys_reg_desc table[], unsigned int num) 2724ba23aec9SMarc Zyngier { 2725978ceeb3SMarc Zyngier u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 2726ba23aec9SMarc Zyngier const struct sys_reg_desc *r; 2727978ceeb3SMarc Zyngier u64 val; 2728978ceeb3SMarc Zyngier int ret; 2729ba23aec9SMarc Zyngier 2730ba23aec9SMarc Zyngier r = id_to_sys_reg_desc(vcpu, reg->id, table, num); 2731ba23aec9SMarc Zyngier if (!r) 2732ba23aec9SMarc Zyngier return -ENOENT; 2733ba23aec9SMarc Zyngier 2734978ceeb3SMarc Zyngier if (r->get_user) { 2735978ceeb3SMarc Zyngier ret = (r->get_user)(vcpu, r, &val); 2736978ceeb3SMarc Zyngier } else { 2737978ceeb3SMarc Zyngier val = __vcpu_sys_reg(vcpu, r->reg); 2738978ceeb3SMarc Zyngier ret = 0; 2739978ceeb3SMarc Zyngier } 2740ba23aec9SMarc Zyngier 2741978ceeb3SMarc Zyngier if (!ret) 2742978ceeb3SMarc Zyngier ret = put_user(val, uaddr); 2743978ceeb3SMarc Zyngier 2744978ceeb3SMarc Zyngier return ret; 2745ba23aec9SMarc Zyngier } 2746ba23aec9SMarc Zyngier 27477c8c5e6aSMarc Zyngier int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 27487c8c5e6aSMarc Zyngier { 27497c8c5e6aSMarc Zyngier void __user *uaddr = (void __user *)(unsigned long)reg->addr; 27501deeffb5SMarc Zyngier int err; 27517c8c5e6aSMarc Zyngier 27527c8c5e6aSMarc Zyngier if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 27537c8c5e6aSMarc Zyngier return demux_c15_get(reg->id, uaddr); 27547c8c5e6aSMarc Zyngier 27551deeffb5SMarc Zyngier err = get_invariant_sys_reg(reg->id, uaddr); 27561deeffb5SMarc Zyngier if (err != -ENOENT) 27571deeffb5SMarc Zyngier return err; 27587c8c5e6aSMarc Zyngier 2759ba23aec9SMarc Zyngier return kvm_sys_reg_get_user(vcpu, reg, 2760ba23aec9SMarc Zyngier sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 2761ba23aec9SMarc Zyngier } 27627c8c5e6aSMarc Zyngier 2763ba23aec9SMarc Zyngier int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, 2764ba23aec9SMarc Zyngier const struct sys_reg_desc table[], unsigned int num) 2765ba23aec9SMarc Zyngier { 2766978ceeb3SMarc Zyngier u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; 2767ba23aec9SMarc Zyngier const struct sys_reg_desc *r; 2768978ceeb3SMarc Zyngier u64 val; 2769978ceeb3SMarc Zyngier int ret; 2770978ceeb3SMarc Zyngier 2771978ceeb3SMarc Zyngier if (get_user(val, uaddr)) 2772978ceeb3SMarc Zyngier return -EFAULT; 2773ba23aec9SMarc Zyngier 2774ba23aec9SMarc Zyngier r = id_to_sys_reg_desc(vcpu, reg->id, table, num); 27757c8c5e6aSMarc Zyngier if (!r) 27767f34e409SDave Martin return -ENOENT; 27777f34e409SDave Martin 2778978ceeb3SMarc Zyngier if (r->set_user) { 2779978ceeb3SMarc Zyngier ret = (r->set_user)(vcpu, r, val); 2780978ceeb3SMarc Zyngier } else { 2781978ceeb3SMarc Zyngier __vcpu_sys_reg(vcpu, r->reg) = val; 2782978ceeb3SMarc Zyngier ret = 0; 2783978ceeb3SMarc Zyngier } 278484e690bfSAlex Bennée 2785978ceeb3SMarc Zyngier return ret; 27867c8c5e6aSMarc Zyngier } 27877c8c5e6aSMarc Zyngier 27887c8c5e6aSMarc Zyngier int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 27897c8c5e6aSMarc Zyngier { 27907c8c5e6aSMarc Zyngier void __user *uaddr = (void __user *)(unsigned long)reg->addr; 27911deeffb5SMarc Zyngier int err; 27927c8c5e6aSMarc Zyngier 27937c8c5e6aSMarc Zyngier if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 27947c8c5e6aSMarc Zyngier return demux_c15_set(reg->id, uaddr); 27957c8c5e6aSMarc Zyngier 27961deeffb5SMarc Zyngier err = set_invariant_sys_reg(reg->id, uaddr); 27971deeffb5SMarc Zyngier if (err != -ENOENT) 27981deeffb5SMarc Zyngier return err; 27997c8c5e6aSMarc Zyngier 2800ba23aec9SMarc Zyngier return kvm_sys_reg_set_user(vcpu, reg, 2801ba23aec9SMarc Zyngier sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 28027c8c5e6aSMarc Zyngier } 28037c8c5e6aSMarc Zyngier 28047c8c5e6aSMarc Zyngier static unsigned int num_demux_regs(void) 28057c8c5e6aSMarc Zyngier { 28067c8c5e6aSMarc Zyngier unsigned int i, count = 0; 28077c8c5e6aSMarc Zyngier 28087c8c5e6aSMarc Zyngier for (i = 0; i < CSSELR_MAX; i++) 28097c8c5e6aSMarc Zyngier if (is_valid_cache(i)) 28107c8c5e6aSMarc Zyngier count++; 28117c8c5e6aSMarc Zyngier 28127c8c5e6aSMarc Zyngier return count; 28137c8c5e6aSMarc Zyngier } 28147c8c5e6aSMarc Zyngier 28157c8c5e6aSMarc Zyngier static int write_demux_regids(u64 __user *uindices) 28167c8c5e6aSMarc Zyngier { 2817efd48ceaSAlex Bennée u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 28187c8c5e6aSMarc Zyngier unsigned int i; 28197c8c5e6aSMarc Zyngier 28207c8c5e6aSMarc Zyngier val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 28217c8c5e6aSMarc Zyngier for (i = 0; i < CSSELR_MAX; i++) { 28227c8c5e6aSMarc Zyngier if (!is_valid_cache(i)) 28237c8c5e6aSMarc Zyngier continue; 28247c8c5e6aSMarc Zyngier if (put_user(val | i, uindices)) 28257c8c5e6aSMarc Zyngier return -EFAULT; 28267c8c5e6aSMarc Zyngier uindices++; 28277c8c5e6aSMarc Zyngier } 28287c8c5e6aSMarc Zyngier return 0; 28297c8c5e6aSMarc Zyngier } 28307c8c5e6aSMarc Zyngier 28317c8c5e6aSMarc Zyngier static u64 sys_reg_to_index(const struct sys_reg_desc *reg) 28327c8c5e6aSMarc Zyngier { 28337c8c5e6aSMarc Zyngier return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | 28347c8c5e6aSMarc Zyngier KVM_REG_ARM64_SYSREG | 28357c8c5e6aSMarc Zyngier (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | 28367c8c5e6aSMarc Zyngier (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | 28377c8c5e6aSMarc Zyngier (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | 28387c8c5e6aSMarc Zyngier (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | 28397c8c5e6aSMarc Zyngier (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); 28407c8c5e6aSMarc Zyngier } 28417c8c5e6aSMarc Zyngier 28427c8c5e6aSMarc Zyngier static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) 28437c8c5e6aSMarc Zyngier { 28447c8c5e6aSMarc Zyngier if (!*uind) 28457c8c5e6aSMarc Zyngier return true; 28467c8c5e6aSMarc Zyngier 28477c8c5e6aSMarc Zyngier if (put_user(sys_reg_to_index(reg), *uind)) 28487c8c5e6aSMarc Zyngier return false; 28497c8c5e6aSMarc Zyngier 28507c8c5e6aSMarc Zyngier (*uind)++; 28517c8c5e6aSMarc Zyngier return true; 28527c8c5e6aSMarc Zyngier } 28537c8c5e6aSMarc Zyngier 28547f34e409SDave Martin static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, 28557f34e409SDave Martin const struct sys_reg_desc *rd, 285693390c0aSDave Martin u64 __user **uind, 285793390c0aSDave Martin unsigned int *total) 285893390c0aSDave Martin { 285993390c0aSDave Martin /* 286093390c0aSDave Martin * Ignore registers we trap but don't save, 286193390c0aSDave Martin * and for which no custom user accessor is provided. 286293390c0aSDave Martin */ 286393390c0aSDave Martin if (!(rd->reg || rd->get_user)) 286493390c0aSDave Martin return 0; 286593390c0aSDave Martin 286601fe5aceSAndrew Jones if (sysreg_hidden(vcpu, rd)) 28677f34e409SDave Martin return 0; 28687f34e409SDave Martin 286993390c0aSDave Martin if (!copy_reg_to_user(rd, uind)) 287093390c0aSDave Martin return -EFAULT; 287193390c0aSDave Martin 287293390c0aSDave Martin (*total)++; 287393390c0aSDave Martin return 0; 287493390c0aSDave Martin } 287593390c0aSDave Martin 28767c8c5e6aSMarc Zyngier /* Assumed ordered tables, see kvm_sys_reg_table_init. */ 28777c8c5e6aSMarc Zyngier static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) 28787c8c5e6aSMarc Zyngier { 2879dcaffa7bSJames Morse const struct sys_reg_desc *i2, *end2; 28807c8c5e6aSMarc Zyngier unsigned int total = 0; 288193390c0aSDave Martin int err; 28827c8c5e6aSMarc Zyngier 28837c8c5e6aSMarc Zyngier i2 = sys_reg_descs; 28847c8c5e6aSMarc Zyngier end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 28857c8c5e6aSMarc Zyngier 2886dcaffa7bSJames Morse while (i2 != end2) { 2887dcaffa7bSJames Morse err = walk_one_sys_reg(vcpu, i2++, &uind, &total); 288893390c0aSDave Martin if (err) 288993390c0aSDave Martin return err; 28907c8c5e6aSMarc Zyngier } 28917c8c5e6aSMarc Zyngier return total; 28927c8c5e6aSMarc Zyngier } 28937c8c5e6aSMarc Zyngier 28947c8c5e6aSMarc Zyngier unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) 28957c8c5e6aSMarc Zyngier { 28967c8c5e6aSMarc Zyngier return ARRAY_SIZE(invariant_sys_regs) 28977c8c5e6aSMarc Zyngier + num_demux_regs() 28987c8c5e6aSMarc Zyngier + walk_sys_regs(vcpu, (u64 __user *)NULL); 28997c8c5e6aSMarc Zyngier } 29007c8c5e6aSMarc Zyngier 29017c8c5e6aSMarc Zyngier int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 29027c8c5e6aSMarc Zyngier { 29037c8c5e6aSMarc Zyngier unsigned int i; 29047c8c5e6aSMarc Zyngier int err; 29057c8c5e6aSMarc Zyngier 29067c8c5e6aSMarc Zyngier /* Then give them all the invariant registers' indices. */ 29077c8c5e6aSMarc Zyngier for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { 29087c8c5e6aSMarc Zyngier if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) 29097c8c5e6aSMarc Zyngier return -EFAULT; 29107c8c5e6aSMarc Zyngier uindices++; 29117c8c5e6aSMarc Zyngier } 29127c8c5e6aSMarc Zyngier 29137c8c5e6aSMarc Zyngier err = walk_sys_regs(vcpu, uindices); 29147c8c5e6aSMarc Zyngier if (err < 0) 29157c8c5e6aSMarc Zyngier return err; 29167c8c5e6aSMarc Zyngier uindices += err; 29177c8c5e6aSMarc Zyngier 29187c8c5e6aSMarc Zyngier return write_demux_regids(uindices); 29197c8c5e6aSMarc Zyngier } 29207c8c5e6aSMarc Zyngier 2921f1f0c0cfSAlexandru Elisei int kvm_sys_reg_table_init(void) 29227c8c5e6aSMarc Zyngier { 2923f1f0c0cfSAlexandru Elisei bool valid = true; 29247c8c5e6aSMarc Zyngier unsigned int i; 29257c8c5e6aSMarc Zyngier struct sys_reg_desc clidr; 29267c8c5e6aSMarc Zyngier 29277c8c5e6aSMarc Zyngier /* Make sure tables are unique and in order. */ 2928f1f0c0cfSAlexandru Elisei valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false); 2929f1f0c0cfSAlexandru Elisei valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true); 2930f1f0c0cfSAlexandru Elisei valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true); 2931f1f0c0cfSAlexandru Elisei valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true); 2932f1f0c0cfSAlexandru Elisei valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true); 2933f1f0c0cfSAlexandru Elisei valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false); 2934f1f0c0cfSAlexandru Elisei 2935f1f0c0cfSAlexandru Elisei if (!valid) 2936f1f0c0cfSAlexandru Elisei return -EINVAL; 29377c8c5e6aSMarc Zyngier 29387c8c5e6aSMarc Zyngier /* We abuse the reset function to overwrite the table itself. */ 29397c8c5e6aSMarc Zyngier for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) 29407c8c5e6aSMarc Zyngier invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); 29417c8c5e6aSMarc Zyngier 29427c8c5e6aSMarc Zyngier /* 29437c8c5e6aSMarc Zyngier * CLIDR format is awkward, so clean it up. See ARM B4.1.20: 29447c8c5e6aSMarc Zyngier * 29457c8c5e6aSMarc Zyngier * If software reads the Cache Type fields from Ctype1 29467c8c5e6aSMarc Zyngier * upwards, once it has seen a value of 0b000, no caches 29477c8c5e6aSMarc Zyngier * exist at further-out levels of the hierarchy. So, for 29487c8c5e6aSMarc Zyngier * example, if Ctype3 is the first Cache Type field with a 29497c8c5e6aSMarc Zyngier * value of 0b000, the values of Ctype4 to Ctype7 must be 29507c8c5e6aSMarc Zyngier * ignored. 29517c8c5e6aSMarc Zyngier */ 29527c8c5e6aSMarc Zyngier get_clidr_el1(NULL, &clidr); /* Ugly... */ 29537c8c5e6aSMarc Zyngier cache_levels = clidr.val; 29547c8c5e6aSMarc Zyngier for (i = 0; i < 7; i++) 29557c8c5e6aSMarc Zyngier if (((cache_levels >> (i*3)) & 7) == 0) 29567c8c5e6aSMarc Zyngier break; 29577c8c5e6aSMarc Zyngier /* Clear all higher bits. */ 29587c8c5e6aSMarc Zyngier cache_levels &= (1 << (i*3))-1; 2959f1f0c0cfSAlexandru Elisei 2960f1f0c0cfSAlexandru Elisei return 0; 29617c8c5e6aSMarc Zyngier } 2962