Lines Matching +full:lock +full:- +full:detect +full:- +full:precision +full:- +full:6 +full:ns +full:- +full:enable
6 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "cpu-features.h"
15 #include "exec/helper-proto.h"
16 #include "qemu/main-loop.h"
20 #include "qemu/qemu-print.h"
21 #include "exec/exec-all.h"
24 #include "sysemu/cpu-timers.h"
28 #include "qemu/guest-random.h"
30 #include "semihosting/common-semi.h"
41 assert(ri->fieldoffset); in raw_read()
51 assert(ri->fieldoffset); in raw_write()
61 return (char *)env + ri->fieldoffset; in raw_ptr()
67 if (ri->type & ARM_CP_CONST) { in read_raw_cp_reg()
68 return ri->resetvalue; in read_raw_cp_reg()
69 } else if (ri->raw_readfn) { in read_raw_cp_reg()
70 return ri->raw_readfn(env, ri); in read_raw_cp_reg()
71 } else if (ri->readfn) { in read_raw_cp_reg()
72 return ri->readfn(env, ri); in read_raw_cp_reg()
83 * Note that constant registers are treated as write-ignored; the in write_raw_cp_reg()
87 if (ri->type & ARM_CP_CONST) { in write_raw_cp_reg()
89 } else if (ri->raw_writefn) { in write_raw_cp_reg()
90 ri->raw_writefn(env, ri, v); in write_raw_cp_reg()
91 } else if (ri->writefn) { in write_raw_cp_reg()
92 ri->writefn(env, ri, v); in write_raw_cp_reg()
112 if ((ri->type & ARM_CP_CONST) || in raw_accessors_invalid()
113 ri->fieldoffset || in raw_accessors_invalid()
114 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { in raw_accessors_invalid()
122 /* Write the coprocessor state from cpu->env to the (index,value) list. */ in write_cpustate_to_list()
126 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_cpustate_to_list()
127 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); in write_cpustate_to_list()
131 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in write_cpustate_to_list()
136 if (ri->type & ARM_CP_NO_RAW) { in write_cpustate_to_list()
140 newval = read_raw_cp_reg(&cpu->env, ri); in write_cpustate_to_list()
143 * Only sync if the previous list->cpustate sync succeeded. in write_cpustate_to_list()
148 uint64_t oldval = cpu->cpreg_values[i]; in write_cpustate_to_list()
154 write_raw_cp_reg(&cpu->env, ri, oldval); in write_cpustate_to_list()
155 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { in write_cpustate_to_list()
159 write_raw_cp_reg(&cpu->env, ri, newval); in write_cpustate_to_list()
161 cpu->cpreg_values[i] = newval; in write_cpustate_to_list()
171 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_list_to_cpustate()
172 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); in write_list_to_cpustate()
173 uint64_t v = cpu->cpreg_values[i]; in write_list_to_cpustate()
176 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in write_list_to_cpustate()
181 if (ri->type & ARM_CP_NO_RAW) { in write_list_to_cpustate()
186 * (to catch read-only registers and partially read-only in write_list_to_cpustate()
189 write_raw_cp_reg(&cpu->env, ri, v); in write_list_to_cpustate()
190 if (read_raw_cp_reg(&cpu->env, ri) != v) { in write_list_to_cpustate()
201 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in add_cpreg_to_list()
203 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { in add_cpreg_to_list()
204 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); in add_cpreg_to_list()
206 cpu->cpreg_array_len++; in add_cpreg_to_list()
215 ri = g_hash_table_lookup(cpu->cp_regs, key); in count_cpreg()
217 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { in count_cpreg()
218 cpu->cpreg_array_len++; in count_cpreg()
231 return -1; in cpreg_key_compare()
245 keys = g_hash_table_get_keys(cpu->cp_regs); in init_cpreg_list()
248 cpu->cpreg_array_len = 0; in init_cpreg_list()
252 arraylen = cpu->cpreg_array_len; in init_cpreg_list()
253 cpu->cpreg_indexes = g_new(uint64_t, arraylen); in init_cpreg_list()
254 cpu->cpreg_values = g_new(uint64_t, arraylen); in init_cpreg_list()
255 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); in init_cpreg_list()
256 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); in init_cpreg_list()
257 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; in init_cpreg_list()
258 cpu->cpreg_array_len = 0; in init_cpreg_list()
262 assert(cpu->cpreg_array_len == arraylen); in init_cpreg_list()
273 return env->pstate & PSTATE_PAN; in arm_pan_enabled()
275 return env->uncached_cpsr & CPSR_PAN; in arm_pan_enabled()
280 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
294 * Some secure-only AArch32 registers trap to EL3 if used from
295 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
307 if (env->cp15.scr_el3 & SCR_EEL2) { in access_trap_aa32s_el1()
312 /* This will be EL1 NS and EL2 NS, which just UNDEF */ in access_trap_aa32s_el1()
329 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { in access_tpm()
449 * writes, so only needs to apply to NS PL1&0, not S PL1&0. in alle1_tlbmask()
493 * Non-IS variants of TLB operations are upgraded to
628 * Define the secure and non-secure FCSE identifier CP registers
631 * v8 EL1 version of the register so the non-secure instance stands alone.
644 * Define the secure and non-secure context identifier CP registers
647 * non-secure case, the 32-bit register will have reset and migration
648 * disabled during registration as it is handled by the 64-bit instance.
698 * Not all pre-v6 cores implemented this WFI, so this is slightly
699 * over-broad.
707 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
728 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
737 * the unified TLB ops but also the dside/iside/inner-shareable variants.
782 * VFPv3 and upwards with NEON implement 32 double precision in cpacr_write()
783 * registers (D0-D31). in cpacr_write()
786 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ in cpacr_write()
794 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 in cpacr_write()
798 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cpacr_write()
800 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask); in cpacr_write()
803 env->cp15.cpacr_el1 = value; in cpacr_write()
809 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 in cpacr_read()
812 uint64_t value = env->cp15.cpacr_el1; in cpacr_read()
815 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cpacr_read()
837 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) { in cpacr_access()
841 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { in cpacr_access()
854 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { in cptr_access()
867 * We need to break the TB after ISB to execute self-modifying code
877 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
886 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
930 return -1; in swinc_ns_per()
993 return -1; in zero_event_ns_per()
1052 * Empty supported_event_map and cpu->pmceid[01] before adding supported in pmu_init()
1058 cpu->pmceid0 = 0; in pmu_init()
1059 cpu->pmceid1 = 0; in pmu_init()
1063 assert(cnt->number <= MAX_EVENT_ID); in pmu_init()
1065 assert(cnt->number <= 0x3f); in pmu_init()
1067 if (cnt->supported(&cpu->env)) { in pmu_init()
1068 supported_event_map[cnt->number] = i; in pmu_init()
1069 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); in pmu_init()
1070 if (cnt->number & 0x20) { in pmu_init()
1071 cpu->pmceid1 |= event_mask; in pmu_init()
1073 cpu->pmceid0 |= event_mask; in pmu_init()
1101 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { in pmreg_access()
1107 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { in pmreg_access()
1121 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 in pmreg_access_xevcntr()
1136 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 in pmreg_access_swinc()
1151 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { in pmreg_access_selr()
1165 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 in pmreg_access_ccntr()
1197 * We might be called for M-profile cores where MDCR_EL2 doesn't in pmu_counter_enabled()
1198 * exist and arm_mdcr_el2_eff() will assert, so this early-exit check in pmu_counter_enabled()
1210 e = env->cp15.c9_pmcr & PMCRE; in pmu_counter_enabled()
1214 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); in pmu_counter_enabled()
1221 prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); in pmu_counter_enabled()
1230 prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP; in pmu_counter_enabled()
1233 prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD); in pmu_counter_enabled()
1242 filter = env->cp15.pmccfiltr_el0; in pmu_counter_enabled()
1244 filter = env->cp15.c14_pmevtyper[counter]; in pmu_counter_enabled()
1282 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && in pmu_update_irq()
1283 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); in pmu_update_irq()
1291 * controlled by PMCR.D, but if PMCR.LC is set to enable the long in pmccntr_clockdiv_enabled()
1292 * (64-bit) cycle counter PMCR.D has no effect. in pmccntr_clockdiv_enabled()
1294 return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; in pmccntr_clockdiv_enabled()
1313 bool hlp = env->cp15.mdcr_el2 & MDCR_HLP; in pmevcntr_is_64_bit()
1314 int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; in pmevcntr_is_64_bit()
1320 return env->cp15.c9_pmcr & PMCRLP; in pmevcntr_is_64_bit()
1324 * Ensure c15_ccnt is the guest-visible count so that operations such as
1326 * etc. can be done logically. This is essentially a no-op if the counter is
1339 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; in pmccntr_op_start()
1341 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ in pmccntr_op_start()
1343 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { in pmccntr_op_start()
1344 env->cp15.c9_pmovsr |= (1ULL << 31); in pmccntr_op_start()
1348 env->cp15.c15_ccnt = new_pmccntr; in pmccntr_op_start()
1350 env->cp15.c15_ccnt_delta = cycles; in pmccntr_op_start()
1355 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1363 uint64_t remaining_cycles = -env->cp15.c15_ccnt; in pmccntr_op_finish()
1364 if (!(env->cp15.c9_pmcr & PMCRLC)) { in pmccntr_op_finish()
1375 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); in pmccntr_op_finish()
1380 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; in pmccntr_op_finish()
1384 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; in pmccntr_op_finish()
1391 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; in pmevcntr_op_start()
1399 uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; in pmevcntr_op_start()
1403 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) { in pmevcntr_op_start()
1404 env->cp15.c9_pmovsr |= (1 << counter); in pmevcntr_op_start()
1407 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; in pmevcntr_op_start()
1409 env->cp15.c14_pmevcntr_delta[counter] = count; in pmevcntr_op_start()
1416 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; in pmevcntr_op_finish()
1418 uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1); in pmevcntr_op_finish()
1432 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); in pmevcntr_op_finish()
1437 env->cp15.c14_pmevcntr_delta[counter] -= in pmevcntr_op_finish()
1438 env->cp15.c14_pmevcntr[counter]; in pmevcntr_op_finish()
1462 pmu_op_start(&cpu->env); in pmu_pre_el_change()
1467 pmu_op_finish(&cpu->env); in pmu_post_el_change()
1477 * has the effect of setting the cpu->pmu_timer to the next earliest time a in arm_pmu_timer_cb()
1480 pmu_op_start(&cpu->env); in arm_pmu_timer_cb()
1481 pmu_op_finish(&cpu->env); in arm_pmu_timer_cb()
1491 env->cp15.c15_ccnt = 0; in pmcr_write()
1497 env->cp15.c14_pmevcntr[i] = 0; in pmcr_write()
1501 env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; in pmcr_write()
1502 env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK); in pmcr_write()
1509 uint64_t pmcr = env->cp15.c9_pmcr; in pmcr_read()
1517 pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT; in pmcr_read()
1535 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { in pmswinc_write()
1539 * Detect if this write causes an overflow since we can't predict in pmswinc_write()
1542 new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; in pmswinc_write()
1547 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) { in pmswinc_write()
1548 env->cp15.c9_pmovsr |= (1 << i); in pmswinc_write()
1552 env->cp15.c14_pmevcntr[i] = new_pmswinc; in pmswinc_write()
1563 ret = env->cp15.c15_ccnt; in pmccntr_read()
1577 env->cp15.c9_pmselr = value & 0x1f; in pmselr_write()
1584 env->cp15.c15_ccnt = value; in pmccntr_write()
1600 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; in pmccfiltr_write()
1609 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | in pmccfiltr_write_a32()
1617 return env->cp15.pmccfiltr_el0 & PMCCFILTR; in pmccfiltr_read_a32()
1625 env->cp15.c9_pmcnten |= value; in pmcntenset_write()
1634 env->cp15.c9_pmcnten &= ~value; in pmcntenclr_write()
1642 env->cp15.c9_pmovsr &= ~value; in pmovsr_write()
1650 env->cp15.c9_pmovsr |= value; in pmovsset_write()
1668 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & in pmevtyper_write()
1677 env->cp15.c14_pmevcntr_delta[counter] = count; in pmevtyper_write()
1680 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; in pmevtyper_write()
1694 return env->cp15.pmccfiltr_el0; in pmevtyper_read()
1696 return env->cp15.c14_pmevtyper[counter]; in pmevtyper_read()
1709 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevtyper_writefn()
1716 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevtyper_rawwrite()
1717 env->cp15.c14_pmevtyper[counter] = value; in pmevtyper_rawwrite()
1731 env->cp15.c14_pmevcntr_delta[counter] = in pmevtyper_rawwrite()
1738 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevtyper_readfn()
1745 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); in pmxevtyper_write()
1750 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); in pmxevtyper_read()
1762 env->cp15.c14_pmevcntr[counter] = value; in pmevcntr_write()
1777 ret = env->cp15.c14_pmevcntr[counter]; in pmevcntr_read()
1796 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevcntr_writefn()
1802 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevcntr_readfn()
1809 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevcntr_rawwrite()
1811 env->cp15.c14_pmevcntr[counter] = value; in pmevcntr_rawwrite()
1817 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); in pmevcntr_rawread()
1819 return env->cp15.c14_pmevcntr[counter]; in pmevcntr_rawread()
1825 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); in pmxevcntr_write()
1830 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); in pmxevcntr_read()
1837 env->cp15.c9_pmuserenr = value & 0xf; in pmuserenr_write()
1839 env->cp15.c9_pmuserenr = value & 1; in pmuserenr_write()
1848 env->cp15.c9_pminten |= value; in pmintenset_write()
1856 env->cp15.c9_pminten &= ~value; in pmintenclr_write()
1905 /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */ in scr_write()
1955 /* Clear all-context RES0 bits. */ in scr_write()
1957 changed = env->cp15.scr_el3 ^ value; in scr_write()
1958 env->cp15.scr_el3 = value; in scr_write()
1961 * If SCR_EL3.{NS,NSE} changes, i.e. change of security state, in scr_write()
1978 * scr_write will set the RES1 bits on an AArch64-only CPU. in scr_reset()
1979 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. in scr_reset()
2005 ri->secure & ARM_CP_SECSTATE_S); in ccsidr_read()
2007 return cpu->ccsidr[index]; in ccsidr_read()
2024 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { in isr_read()
2027 if (cs->interrupt_request & CPU_INTERRUPT_VINMI) { in isr_read()
2032 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { in isr_read()
2036 if (cs->interrupt_request & CPU_INTERRUPT_NMI) { in isr_read()
2043 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { in isr_read()
2046 if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) { in isr_read()
2051 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { in isr_read()
2057 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { in isr_read()
2295 * MAIR can just read-as-written because we don't implement caches
2306 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2310 * For non-long-descriptor page tables these are PRRR and NMRR;
2311 * regardless they still act as reads-as-written for QEMU.
2314 * MAIR0/1 are defined separately from their 64-bit counterpart which
2345 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2348 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2351 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2408 env->teecr = value; in teecr_write()
2419 (env->cp15.hstr_el2 & HSTR_TTEE)) { in teecr_access()
2428 if (arm_current_el(env) == 0 && (env->teecr & 1)) { in teehbr_access()
2435 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2439 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2484 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; in arm_gt_cntfrq_reset()
2504 cntkctl = env->cp15.cnthctl_el2; in gt_cntfrq_access()
2506 cntkctl = env->cp15.c14_cntkctl; in gt_cntfrq_access()
2513 if (!isread && ri->state == ARM_CP_STATE_AA32 && in gt_cntfrq_access()
2515 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ in gt_cntfrq_access()
2542 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) in gt_counter_access()
2547 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { in gt_counter_access()
2555 ? !extract32(env->cp15.cnthctl_el2, 10, 1) in gt_counter_access()
2556 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { in gt_counter_access()
2560 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) { in gt_counter_access()
2580 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) in gt_timer_access()
2588 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { in gt_timer_access()
2597 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { in gt_timer_access()
2602 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { in gt_timer_access()
2608 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) { in gt_timer_access()
2660 if (!(env->cp15.scr_el3 & SCR_ST)) { in gt_stimer_access()
2703 if (env->cp15.scr_el3 & SCR_EEL2) { in gt_sel2timer_access()
2722 CPUARMState *env = &cpu->env; in gt_update_irq()
2723 uint64_t cnthctl = env->cp15.cnthctl_el2; in gt_update_irq()
2726 int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4; in gt_update_irq()
2738 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); in gt_update_irq()
2755 if ((env->cp15.scr_el3 & SCR_ECVEN) && in gt_phys_raw_cnt_offset()
2756 FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) && in gt_phys_raw_cnt_offset()
2759 return env->cp15.cntpoff_el2; in gt_phys_raw_cnt_offset()
2780 return env->cp15.cntvoff_el2; in gt_indirect_access_timer_offset()
2799 * This isn't exactly the same as the indirect-access offset, in gt_direct_access_timer_offset()
2828 return env->cp15.cntvoff_el2; in gt_direct_access_timer_offset()
2842 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; in gt_recalc_timer()
2844 if (gt->ctl & 1) { in gt_recalc_timer()
2849 uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx); in gt_recalc_timer()
2850 uint64_t count = gt_get_countervalue(&cpu->env); in gt_recalc_timer()
2852 int istatus = count - offset >= gt->cval; in gt_recalc_timer()
2855 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); in gt_recalc_timer()
2859 * Next transition is when (count - offset) rolls back over to 0. in gt_recalc_timer()
2872 * Next transition is when (count - offset) == cval, i.e. in gt_recalc_timer()
2877 if (uadd64_overflow(gt->cval, offset, &nexttick)) { in gt_recalc_timer()
2883 * signed-64-bit range of a QEMUTimer -- in this case we just in gt_recalc_timer()
2888 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); in gt_recalc_timer()
2890 timer_mod(cpu->gt_timer[timeridx], nexttick); in gt_recalc_timer()
2895 gt->ctl &= ~4; in gt_recalc_timer()
2896 timer_del(cpu->gt_timer[timeridx]); in gt_recalc_timer()
2907 timer_del(cpu->gt_timer[timeridx]); in gt_timer_reset()
2913 return gt_get_countervalue(env) - offset; in gt_cnt_read()
2919 return gt_get_countervalue(env) - offset; in gt_virt_cnt_read()
2927 env->cp15.c14_timer[timeridx].cval = value; in gt_cval_write()
2933 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - in do_tval_read()
2934 (gt_get_countervalue(env) - offset)); in do_tval_read()
2949 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + in do_tval_write()
2968 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; in gt_ctl_write()
2971 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); in gt_ctl_write()
2973 /* Enable toggled */ in gt_ctl_write()
3041 return env->cp15.c14_timer[timeridx].cval; in gt_phys_redir_cval_read()
3069 return env->cp15.c14_timer[timeridx].ctl; in gt_phys_redir_ctl_read()
3096 * to re-detect that it's this register. in gt_virt_tval_read()
3099 return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2); in gt_virt_tval_read()
3106 do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2); in gt_virt_tval_write()
3119 uint32_t oldval = env->cp15.cnthctl_el2; in gt_cnthctl_write()
3172 return env->cp15.c14_timer[timeridx].cval; in gt_virt_redir_cval_read()
3200 return env->cp15.c14_timer[timeridx].ctl; in gt_virt_redir_ctl_read()
3401 * Note that CNTFRQ is purely reads-as-written for the benefit
3423 /* per-timer control */
3569 * Secure timer -- this is actually restricted to only EL3
3570 * and configurably Secure-EL1 via the accessfn.
3599 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
3609 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
3630 !(env->cp15.scr_el3 & SCR_ECVEN)) { in gt_cntpoff_access()
3648 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
3657 * In user-mode most of the generic timer registers are inaccessible
3666 * Currently we have no support for QEMUTimer in linux-user so we in gt_virt_cnt_read()
3676 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3693 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
3713 /* get_phys_addr() isn't present for user-mode-only targets */
3718 if (ri->opc2 & 4) { in ats_access()
3722 * They are simply UNDEF if executed from NS EL1. in ats_access()
3727 if (env->cp15.scr_el3 & SCR_EEL2) { in ats_access()
3742 * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC in par_el1_shareability()
3743 * memory -- see pseudocode PAREncodeShareability(). in par_el1_shareability()
3745 if (((res->cacheattrs.attrs & 0xf0) == 0) || in par_el1_shareability()
3746 res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) { in par_el1_shareability()
3749 return res->cacheattrs.shareability; in par_el1_shareability()
3791 * executed from NS EL1. If this is a synchronous external abort in do_ats_write()
3797 (env->cp15.scr_el3 & SCR_EA)) { in do_ats_write()
3800 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; in do_ats_write()
3802 env->cp15.hpfar_el2 |= HPFAR_NS; in do_ats_write()
3829 fsc = extract32(fsr, 0, 6); in do_ats_write()
3840 env->exception.vaddress = value; in do_ats_write()
3841 env->exception.fsr = fsr; in do_ats_write()
3852 * 32-bit or the 64-bit PAR format in do_ats_write()
3856 * * The Non-secure TTBCR.EAE bit is set to 1 in do_ats_write()
3869 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); in do_ats_write()
3877 /* Create a 64-bit PAR */ in do_ats_write()
3882 par64 |= (1 << 9); /* NS */ in do_ats_write()
3902 * Convert it to a 32-bit PAR. in do_ats_write()
3913 par64 |= (1 << 9); /* NS */ in do_ats_write()
3918 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | in do_ats_write()
3929 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; in ats_write()
3935 switch (ri->opc2 & 6) { in ats_write()
3940 if (ri->crm == 9 && arm_pan_enabled(env)) { in ats_write()
3947 g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ in ats_write()
3950 if (ri->crm == 9 && arm_pan_enabled(env)) { in ats_write()
3967 g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ in ats_write()
3982 case 6: in ats_write()
4004 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; in ats1h_write()
4023 * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can in at_e012_access()
4028 if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) { in at_e012_access()
4038 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { in at_s1e2_access()
4057 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; in ats_write64()
4064 switch (ri->opc2 & 6) { in ats_write64()
4066 switch (ri->opc1) { in ats_write64()
4068 if (ri->crm == 9 && arm_pan_enabled(env)) { in ats_write64()
4078 case 6: /* AT S1E3R, AT S1E3W */ in ats_write64()
4092 case 6: /* AT S12E0R, AT S12E0W */ in ats_write64()
4100 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss); in ats_write64()
4141 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); in pmsav5_data_ap_write()
4146 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); in pmsav5_data_ap_read()
4152 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); in pmsav5_insn_ap_write()
4157 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); in pmsav5_insn_ap_read()
4168 u32p += env->pmsav7.rnr[M_REG_NS]; in pmsav7_read()
4182 u32p += env->pmsav7.rnr[M_REG_NS]; in pmsav7_write()
4183 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in pmsav7_write()
4191 uint32_t nrgs = cpu->pmsav7_dregion; in pmsav7_rgnr_write()
4208 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in prbar_write()
4209 env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; in prbar_write()
4214 return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; in prbar_read()
4222 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in prlar_write()
4223 env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; in prlar_write()
4228 return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; in prlar_read()
4240 if (value >= cpu->pmsav7_dregion) { in prselr_write()
4244 env->pmsav7.rnr[M_REG_NS] = value; in prselr_write()
4252 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprbar_write()
4253 env->pmsav8.hprbar[env->pmsav8.hprselr] = value; in hprbar_write()
4258 return env->pmsav8.hprbar[env->pmsav8.hprselr]; in hprbar_read()
4266 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprlar_write()
4267 env->pmsav8.hprlar[env->pmsav8.hprselr] = value; in hprlar_write()
4272 return env->pmsav8.hprlar[env->pmsav8.hprselr]; in hprlar_read()
4283 int rmax = MIN(cpu->pmsav8r_hdregion, 32); in hprenr_write()
4286 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprenr_write()
4291 env->pmsav8.hprlar[n] = deposit32( in hprenr_write()
4292 env->pmsav8.hprlar[n], 0, 1, bit); in hprenr_write()
4303 for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) { in hprenr_read()
4304 if (env->pmsav8.hprlar[n] & 0x1) { in hprenr_read()
4320 if (value >= cpu->pmsav8r_hdregion) { in hprselr_write()
4324 env->pmsav8.hprselr = value; in hprselr_write()
4331 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | in pmsav8r_regn_write()
4332 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); in pmsav8r_regn_write()
4334 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in pmsav8r_regn_write()
4336 if (ri->opc1 & 4) { in pmsav8r_regn_write()
4337 if (index >= cpu->pmsav8r_hdregion) { in pmsav8r_regn_write()
4340 if (ri->opc2 & 0x1) { in pmsav8r_regn_write()
4341 env->pmsav8.hprlar[index] = value; in pmsav8r_regn_write()
4343 env->pmsav8.hprbar[index] = value; in pmsav8r_regn_write()
4346 if (index >= cpu->pmsav7_dregion) { in pmsav8r_regn_write()
4349 if (ri->opc2 & 0x1) { in pmsav8r_regn_write()
4350 env->pmsav8.rlar[M_REG_NS][index] = value; in pmsav8r_regn_write()
4352 env->pmsav8.rbar[M_REG_NS][index] = value; in pmsav8r_regn_write()
4360 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | in pmsav8r_regn_read()
4361 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); in pmsav8r_regn_read()
4363 if (ri->opc1 & 4) { in pmsav8r_regn_read()
4364 if (index >= cpu->pmsav8r_hdregion) { in pmsav8r_regn_read()
4367 if (ri->opc2 & 0x1) { in pmsav8r_regn_read()
4368 return env->pmsav8.hprlar[index]; in pmsav8r_regn_read()
4370 return env->pmsav8.hprbar[index]; in pmsav8r_regn_read()
4373 if (index >= cpu->pmsav7_dregion) { in pmsav8r_regn_read()
4376 if (ri->opc2 & 0x1) { in pmsav8r_regn_read()
4377 return env->pmsav8.rlar[M_REG_NS][index]; in pmsav8r_regn_read()
4379 return env->pmsav8.rbar[M_REG_NS][index]; in pmsav8r_regn_read()
4386 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
4391 .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
4396 .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
4401 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
4405 .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
4409 .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
4414 .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
4422 * because the PMSAv7 is also used by M-profile CPUs, which do
4425 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
4430 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
4435 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
4440 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
4471 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
4474 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
4477 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
4480 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
4483 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
4486 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
4489 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
4491 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
4492 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
4505 * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when in vmsa_ttbcr_write()
4506 * using Long-descriptor translation table format in vmsa_ttbcr_write()
4513 * Short-descriptor translation table format. in vmsa_ttbcr_write()
4544 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ in vmsa_ttbr_write()
4597 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4602 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
4667 env->cp15.c15_ticonfig = value & 0xe7; in omap_ticonfig_write()
4669 env->cp15.c0_cpuid = (value & (1 << 5)) ? in omap_ticonfig_write()
4676 env->cp15.c15_threadid = value & 0xffff; in omap_threadid_write()
4682 /* Wait-for-interrupt (deprecated) */ in omap_wfi_write()
4693 env->cp15.c15_i_max = 0x000; in omap_cachemaint_write()
4694 env->cp15.c15_i_min = 0xff0; in omap_cachemaint_write()
4740 env->cp15.c15_cpar = value & 0x3fff; in xscale_cpar_write()
4753 * XScale specific cache-lockdown: since we have no cache we NOP these
4773 * implementation of this implementation-defined space.
4786 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4799 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4813 * The cache test-and-clean instructions always return (1 << 30)
4837 return env->cp15.vpidr_el2; in midr_read()
4845 uint64_t mpidr = cpu->mp_affinity; in mpidr_read_val()
4850 * Cores which are uniprocessor (non-coherent) in mpidr_read_val()
4852 * bit 30. (For instance, Cortex-R5). in mpidr_read_val()
4854 if (cpu->mp_is_up) { in mpidr_read_val()
4866 return env->cp15.vmpidr_el2; in mpidr_read()
4935 env->daif = value & PSTATE_DAIF; in aa64_daif_write()
4940 return env->pstate & PSTATE_PAN; in aa64_pan_read()
4946 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); in aa64_pan_write()
4958 return env->pstate & PSTATE_UAO; in aa64_uao_read()
4964 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); in aa64_uao_write()
4976 return env->pstate & PSTATE_DIT; in aa64_dit_read()
4982 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); in aa64_dit_write()
4994 return env->pstate & PSTATE_SSBS; in aa64_ssbs_read()
5000 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); in aa64_ssbs_write()
5005 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
5066 * Page D4-1736 (DDI0487A.b)
5240 * flush-last-level-only. in tlbi_aa64_vae2_write()
5256 * flush-last-level-only. in tlbi_aa64_vae3_write()
5282 * since we don't support flush-for-specific-ASID-only or in tlbi_aa64_vae1_write()
5283 * flush-last-level-only. in tlbi_aa64_vae1_write()
5322 * The MSB of value is the NS field, which only applies if SEL2 in ipas2e1_tlbmask()
5323 * is implemented and SCR_EL3.NS is not set (i.e. in secure mode). in ipas2e1_tlbmask()
5454 * since we don't support flush-for-specific-ASID-only or in tlbi_aa64_rvae1_write()
5455 * flush-last-level-only. in tlbi_aa64_rvae1_write()
5470 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer in tlbi_aa64_rvae1is_write()
5484 * since we don't support flush-for-specific-ASID-only or in tlbi_aa64_rvae2_write()
5485 * flush-last-level-only. in tlbi_aa64_rvae2_write()
5501 * since we don't support flush-for-specific-ASID-only, in tlbi_aa64_rvae2is_write()
5502 * flush-last-level-only or inner/outer shareable specific flushes. in tlbi_aa64_rvae2is_write()
5516 * since we don't support flush-for-specific-ASID-only or in tlbi_aa64_rvae3_write()
5517 * flush-last-level-only. in tlbi_aa64_rvae3_write()
5530 * since we don't support flush-for-specific-ASID-only, in tlbi_aa64_rvae3is_write()
5531 * flush-last-level-only or inner/outer specific flushes. in tlbi_aa64_rvae3is_write()
5562 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { in aa64_zva_access()
5566 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { in aa64_zva_access()
5589 return cpu->dcz_blocksize | dzp_bit; in aa64_dczid_read()
5595 if (!(env->pstate & PSTATE_SP)) { in sp_el0_access()
5607 return env->pstate & PSTATE_SP; in spsel_read()
5620 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { in sctlr_write()
5627 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) { in sctlr_write()
5628 if (ri->opc1 == 6) { /* SCTLR_EL3 */ in sctlr_write()
5646 /* This may enable/disable the MMU, so do a TLB flush. */ in sctlr_write()
5649 if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) { in sctlr_write()
5668 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS; in mdcr_el3_write()
5673 env->cp15.mdcr_el3 = value; in mdcr_el3_write()
5694 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS; in mdcr_el2_write()
5699 env->cp15.mdcr_el2 = value; in mdcr_el2_write()
5720 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
5724 * Since the executable region is never written to we cannot detect code
5736 icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1; in ic_ivau_write()
5750 * Minimal set of EL0-visible registers. This will need to be expanded
5779 /* Avoid overhead of an access check that always passes in user-mode */
5815 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5820 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5920 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
5936 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
5970 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
5979 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
5983 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
6033 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
6039 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
6043 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
6045 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
6116 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
6158 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { in do_hcr_write()
6215 * HCR_PTW forbids certain page-table setups in do_hcr_write()
6221 if ((env->cp15.hcr_el2 ^ value) & in do_hcr_write()
6225 env->cp15.hcr_el2 = value; in do_hcr_write()
6257 value = deposit64(env->cp15.hcr_el2, 32, 32, value); in hcr_writehigh()
6265 value = deposit64(env->cp15.hcr_el2, 0, 32, value); in hcr_writelow()
6276 uint64_t ret = env->cp15.hcr_el2; in arm_hcr_el2_eff_secstate()
6283 * current Security state". This is ARMv8.4-SecEL2 speak for in arm_hcr_el2_eff_secstate()
6284 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). in arm_hcr_el2_eff_secstate()
6287 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves in arm_hcr_el2_eff_secstate()
6290 * on a per-field basis. In current QEMU, this is condition in arm_hcr_el2_eff_secstate()
6308 * These bits are up-to-date as of ARMv8.6. in arm_hcr_el2_eff_secstate()
6319 /* These bits are up-to-date as of ARMv8.6. */ in arm_hcr_el2_eff_secstate()
6367 if ((env->cp15.hcr_el2 & mask) != mask) { in el_is_in_host()
6397 env->cp15.hcrx_el2 = value & valid_mask; in hcrx_write()
6422 && !(env->cp15.scr_el3 & SCR_HXEN)) { in access_hxen()
6445 * For the moment, we treat the EL2-disabled case as taking in arm_hcrx_el2_eff()
6446 * priority over the HXEn-disabled case. This is true for the only in arm_hcrx_el2_eff()
6459 if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { in arm_hcrx_el2_eff()
6462 return env->cp15.hcrx_el2; in arm_hcrx_el2_eff()
6469 * For A-profile AArch32 EL3, if NSACR.CP10 in cptr_el2_write()
6473 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cptr_el2_write()
6475 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask); in cptr_el2_write()
6477 env->cp15.cptr_el[2] = value; in cptr_el2_write()
6483 * For A-profile AArch32 EL3, if NSACR.CP10 in cptr_el2_read()
6486 uint64_t value = env->cp15.cptr_el[2]; in cptr_el2_read()
6489 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cptr_el2_read()
6521 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
6525 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
6539 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
6589 .cp = 15, .opc1 = 6, .crm = 2,
6662 * Unlike the other EL2-related AT operations, these must
6679 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
6680 * to behave as if SCR.NS was 1.
6713 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
6731 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6735 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6765 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
6770 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
6830 * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. in nsacr_access()
6837 if (env->cp15.scr_el3 & SCR_EEL2) { in nsacr_access()
6842 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ in nsacr_access()
6851 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
6860 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
6872 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
6876 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
6883 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
6887 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
6890 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
6894 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
6898 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
6903 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
6907 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
6911 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
6915 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
6919 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
6923 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
6927 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
6931 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
6935 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
6939 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
6943 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
6968 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) { in access_el1nvpct()
6980 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) { in access_el1nvvct()
6999 ri = ri->opaque; in el2_e2h_read()
7000 readfn = ri->readfn; in el2_e2h_read()
7002 readfn = ri->orig_readfn; in el2_e2h_read()
7017 ri = ri->opaque; in el2_e2h_write()
7018 writefn = ri->writefn; in el2_e2h_write()
7020 writefn = ri->orig_writefn; in el2_e2h_write()
7031 return ri->orig_readfn(env, ri->opaque); in el2_e2h_e12_read()
7038 return ri->orig_writefn(env, ri->opaque, value); in el2_e2h_e12_write()
7058 if (ri->orig_accessfn) { in el2_e2h_e12_access()
7059 return ri->orig_accessfn(env, ri->opaque, isread); in el2_e2h_e12_access()
7096 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), in define_arm_vh_e2h_redirects_aliases()
7116 { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6), in define_arm_vh_e2h_redirects_aliases()
7119 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0), in define_arm_vh_e2h_redirects_aliases()
7126 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ in define_arm_vh_e2h_redirects_aliases()
7127 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ in define_arm_vh_e2h_redirects_aliases()
7138 if (a->feature && !a->feature(&cpu->isar)) { in define_arm_vh_e2h_redirects_aliases()
7142 src_reg = g_hash_table_lookup(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
7143 (gpointer)(uintptr_t)a->src_key); in define_arm_vh_e2h_redirects_aliases()
7144 dst_reg = g_hash_table_lookup(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
7145 (gpointer)(uintptr_t)a->dst_key); in define_arm_vh_e2h_redirects_aliases()
7149 /* Cross-compare names to detect typos in the keys. */ in define_arm_vh_e2h_redirects_aliases()
7150 g_assert(strcmp(src_reg->name, a->src_name) == 0); in define_arm_vh_e2h_redirects_aliases()
7151 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); in define_arm_vh_e2h_redirects_aliases()
7154 g_assert(src_reg->opaque == NULL); in define_arm_vh_e2h_redirects_aliases()
7159 new_reg->name = a->new_name; in define_arm_vh_e2h_redirects_aliases()
7160 new_reg->type |= ARM_CP_ALIAS; in define_arm_vh_e2h_redirects_aliases()
7162 new_reg->access &= PL2_RW | PL3_RW; in define_arm_vh_e2h_redirects_aliases()
7164 new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK) in define_arm_vh_e2h_redirects_aliases()
7166 new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK) in define_arm_vh_e2h_redirects_aliases()
7168 new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK) in define_arm_vh_e2h_redirects_aliases()
7170 new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK) in define_arm_vh_e2h_redirects_aliases()
7172 new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK) in define_arm_vh_e2h_redirects_aliases()
7174 new_reg->opaque = src_reg; in define_arm_vh_e2h_redirects_aliases()
7175 new_reg->orig_readfn = src_reg->readfn ?: raw_read; in define_arm_vh_e2h_redirects_aliases()
7176 new_reg->orig_writefn = src_reg->writefn ?: raw_write; in define_arm_vh_e2h_redirects_aliases()
7177 new_reg->orig_accessfn = src_reg->accessfn; in define_arm_vh_e2h_redirects_aliases()
7178 if (!new_reg->raw_readfn) { in define_arm_vh_e2h_redirects_aliases()
7179 new_reg->raw_readfn = raw_read; in define_arm_vh_e2h_redirects_aliases()
7181 if (!new_reg->raw_writefn) { in define_arm_vh_e2h_redirects_aliases()
7182 new_reg->raw_writefn = raw_write; in define_arm_vh_e2h_redirects_aliases()
7184 new_reg->readfn = el2_e2h_e12_read; in define_arm_vh_e2h_redirects_aliases()
7185 new_reg->writefn = el2_e2h_e12_write; in define_arm_vh_e2h_redirects_aliases()
7186 new_reg->accessfn = el2_e2h_e12_access; in define_arm_vh_e2h_redirects_aliases()
7193 if (new_reg->nv2_redirect_offset) { in define_arm_vh_e2h_redirects_aliases()
7194 assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1); in define_arm_vh_e2h_redirects_aliases()
7195 new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1; in define_arm_vh_e2h_redirects_aliases()
7196 new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1; in define_arm_vh_e2h_redirects_aliases()
7199 ok = g_hash_table_insert(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
7200 (gpointer)(uintptr_t)a->new_key, new_reg); in define_arm_vh_e2h_redirects_aliases()
7203 src_reg->opaque = dst_reg; in define_arm_vh_e2h_redirects_aliases()
7204 src_reg->orig_readfn = src_reg->readfn ?: raw_read; in define_arm_vh_e2h_redirects_aliases()
7205 src_reg->orig_writefn = src_reg->writefn ?: raw_write; in define_arm_vh_e2h_redirects_aliases()
7206 if (!src_reg->raw_readfn) { in define_arm_vh_e2h_redirects_aliases()
7207 src_reg->raw_readfn = raw_read; in define_arm_vh_e2h_redirects_aliases()
7209 if (!src_reg->raw_writefn) { in define_arm_vh_e2h_redirects_aliases()
7210 src_reg->raw_writefn = raw_write; in define_arm_vh_e2h_redirects_aliases()
7212 src_reg->readfn = el2_e2h_read; in define_arm_vh_e2h_redirects_aliases()
7213 src_reg->writefn = el2_e2h_write; in define_arm_vh_e2h_redirects_aliases()
7228 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { in ctr_el0_access()
7232 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { in ctr_el0_access()
7263 if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) { in access_terr()
7274 return env->cp15.vdisr_el2; in disr_read()
7276 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { in disr_read()
7279 return env->cp15.disr_el1; in disr_read()
7287 env->cp15.vdisr_el2 = val; in disr_write()
7290 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { in disr_write()
7293 env->cp15.disr_el1 = val; in disr_write()
7315 * These registers have fine-grained trap bits, but UNDEF-to-EL1
7316 * is higher priority than FGT-to-EL2 so we do not need to list them
7353 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) { in sve_exception_el()
7367 if (env->cp15.hcr_el2 & HCR_E2H) { in sve_exception_el()
7368 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) { in sve_exception_el()
7370 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { in sve_exception_el()
7379 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) { in sve_exception_el()
7387 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) { in sve_exception_el()
7402 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) { in sme_exception_el()
7416 if (env->cp15.hcr_el2 & HCR_E2H) { in sme_exception_el()
7417 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) { in sme_exception_el()
7419 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { in sme_exception_el()
7428 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) { in sme_exception_el()
7436 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in sme_exception_el()
7449 uint64_t *cr = env->vfp.zcr_el; in sve_vqm1_for_el_sm()
7450 uint32_t map = cpu->sve_vq.map; in sve_vqm1_for_el_sm()
7451 uint32_t len = ARM_MAX_VQ - 1; in sve_vqm1_for_el_sm()
7454 cr = env->vfp.smcr_el; in sve_vqm1_for_el_sm()
7455 map = cpu->sme_vq.map; in sve_vqm1_for_el_sm()
7470 return 31 - clz32(map); in sve_vqm1_for_el_sm()
7473 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */ in sve_vqm1_for_el_sm()
7475 return ctz32(cpu->sme_vq.map); in sve_vqm1_for_el_sm()
7480 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM)); in sve_vqm1_for_el()
7517 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
7538 && !(env->cp15.scr_el3 & SCR_ENTP2)) { in access_tpidr2()
7550 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in access_smprimap()
7561 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in access_smpri()
7570 memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs)); in arm_reset_sve_state()
7572 memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs)); in arm_reset_sve_state()
7578 uint64_t change = (env->svcr ^ new) & mask; in aarch64_set_svcr()
7583 env->svcr ^= change; in aarch64_set_svcr()
7592 * SetPSTATE_ZA zeros on enable and disable. We can zero this only in aarch64_set_svcr()
7593 * on enable: while disabled, the storage is inaccessible and the in aarch64_set_svcr()
7598 memset(env->zarray, 0, sizeof(env->zarray)); in aarch64_set_svcr()
7609 aarch64_set_svcr(env, value, -1); in svcr_write()
7627 * apply the narrower SVL to the Zregs and Pregs -- see the comment in smcr_write()
7648 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
7654 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
7659 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
7664 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
7704 env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask); in gpccr_write()
7709 env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ, in gpccr_reset()
7710 env_archcpu(env)->reset_l0gptsz); in gpccr_reset()
7723 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
7727 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
7730 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
7733 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
7737 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
7747 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
7751 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
7755 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
7761 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
7768 env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT); in aa64_allint_write()
7773 return env->pstate & PSTATE_ALLINT; in aa64_allint_read()
7804 unsigned int i, pmcrn = pmu_num_counters(&cpu->env); in define_pmu_regs()
7822 .resetvalue = cpu->isar.reset_pmcr_el0, in define_pmu_regs()
7875 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, in define_pmu_regs()
7880 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, in define_pmu_regs()
7887 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, in define_pmu_regs()
7906 uint64_t pfr1 = cpu->isar.id_pfr1; in id_pfr1_read()
7908 if (env->gicv3state) { in id_pfr1_read()
7917 uint64_t pfr0 = cpu->isar.id_aa64pfr0; in id_aa64pfr0_read()
7919 if (env->gicv3state) { in id_aa64pfr0_read()
7938 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { in access_lor_ns()
7948 /* UNDEF if SCR_EL3.NS == 0 */ in access_lor_other()
7955 * A trivial implementation of ARMv8.1-LOR leaves all of these
8000 !(env->cp15.scr_el3 & SCR_APK)) { in access_pauth()
8101 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
8106 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
8111 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
8116 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
8125 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
8141 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
8153 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
8157 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
8161 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
8165 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
8169 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
8173 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
8177 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
8181 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
8234 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
8250 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
8254 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
8258 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
8269 env->NF = env->CF = env->VF = 0, env->ZF = 1; in rndr_readfn()
8275 * timed-out indication to the guest. There is no reason in rndr_readfn()
8280 ri->name, error_get_pretty(err)); in rndr_readfn()
8283 env->ZF = 0; /* NZCF = 0100 */ in rndr_readfn()
8289 /* We do not support re-seeding, so the two registers operate the same. */
8306 /* CTR_EL0 System register -> DminLine, bits [19:16] */ in dccvap_writefn()
8307 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); in dccvap_writefn()
8309 uint64_t vaddr = vaddr_in & ~(dline_size - 1); in dccvap_writefn()
8321 /* RCU lock is already being held */ in dccvap_writefn()
8373 !(env->cp15.scr_el3 & SCR_ATA)) { in access_mte()
8413 !(env->cp15.scr_el3 & SCR_ATA)) { in access_tfsr_el2()
8421 return env->pstate & PSTATE_TCO; in tco_read()
8426 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO); in tco_write()
8431 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
8435 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
8441 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
8445 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
8453 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
8461 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
8466 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
8470 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
8475 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
8483 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
8491 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
8547 /* Avoid overhead of an access check that always passes in user-mode */
8556 /* Avoid overhead of an access check that always passes in user-mode */
8570 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) { in access_scxtnum()
8576 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) { in access_scxtnum()
8584 && !(env->cp15.scr_el3 & SCR_ENSCXT)) { in access_scxtnum()
8619 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
8628 arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) { in access_fgt()
8656 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
8667 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything in vncr_write()
8668 * about the RESS bits at the top -- we choose the "generate an EL2 in vncr_write()
8670 * the ptw.c code detect the resulting invalid address). in vncr_write()
8672 env->cp15.vncr_el2 = value & ~0xfffULL; in vncr_write()
8788 (env->cp15.hstr_el2 & HSTR_TJDBX)) { in access_joscr_jmcr()
8910 * is non-zero, which is never for ARMv7, optionally in ARMv8
8912 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
8930 CPUARMState *env = &cpu->env; in register_cp_regs_for_features()
8952 .resetvalue = cpu->isar.id_pfr0 }, in register_cp_regs_for_features()
8963 .resetvalue = cpu->isar.id_pfr1, in register_cp_regs_for_features()
8975 .resetvalue = cpu->isar.id_dfr0 }, in register_cp_regs_for_features()
8980 .resetvalue = cpu->id_afr0 }, in register_cp_regs_for_features()
8985 .resetvalue = cpu->isar.id_mmfr0 }, in register_cp_regs_for_features()
8990 .resetvalue = cpu->isar.id_mmfr1 }, in register_cp_regs_for_features()
8992 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, in register_cp_regs_for_features()
8995 .resetvalue = cpu->isar.id_mmfr2 }, in register_cp_regs_for_features()
9000 .resetvalue = cpu->isar.id_mmfr3 }, in register_cp_regs_for_features()
9005 .resetvalue = cpu->isar.id_isar0 }, in register_cp_regs_for_features()
9010 .resetvalue = cpu->isar.id_isar1 }, in register_cp_regs_for_features()
9015 .resetvalue = cpu->isar.id_isar2 }, in register_cp_regs_for_features()
9020 .resetvalue = cpu->isar.id_isar3 }, in register_cp_regs_for_features()
9025 .resetvalue = cpu->isar.id_isar4 }, in register_cp_regs_for_features()
9030 .resetvalue = cpu->isar.id_isar5 }, in register_cp_regs_for_features()
9032 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, in register_cp_regs_for_features()
9035 .resetvalue = cpu->isar.id_mmfr4 }, in register_cp_regs_for_features()
9040 .resetvalue = cpu->isar.id_isar6 }, in register_cp_regs_for_features()
9064 .resetvalue = cpu->clidr in register_cp_regs_for_features()
9095 .resetvalue = cpu->isar.id_aa64pfr0 in register_cp_regs_for_features()
9107 .resetvalue = cpu->isar.id_aa64pfr1}, in register_cp_regs_for_features()
9122 .resetvalue = cpu->isar.id_aa64zfr0 }, in register_cp_regs_for_features()
9127 .resetvalue = cpu->isar.id_aa64smfr0 }, in register_cp_regs_for_features()
9129 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, in register_cp_regs_for_features()
9142 .resetvalue = cpu->isar.id_aa64dfr0 }, in register_cp_regs_for_features()
9147 .resetvalue = cpu->isar.id_aa64dfr1 }, in register_cp_regs_for_features()
9162 .resetvalue = cpu->id_aa64afr0 }, in register_cp_regs_for_features()
9167 .resetvalue = cpu->id_aa64afr1 }, in register_cp_regs_for_features()
9169 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, in register_cp_regs_for_features()
9179 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, in register_cp_regs_for_features()
9182 .resetvalue = cpu->isar.id_aa64isar0 }, in register_cp_regs_for_features()
9184 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, in register_cp_regs_for_features()
9187 .resetvalue = cpu->isar.id_aa64isar1 }, in register_cp_regs_for_features()
9189 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, in register_cp_regs_for_features()
9192 .resetvalue = cpu->isar.id_aa64isar2 }, in register_cp_regs_for_features()
9194 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, in register_cp_regs_for_features()
9199 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, in register_cp_regs_for_features()
9204 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, in register_cp_regs_for_features()
9209 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, in register_cp_regs_for_features()
9214 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, in register_cp_regs_for_features()
9222 .resetvalue = cpu->isar.id_aa64mmfr0 }, in register_cp_regs_for_features()
9227 .resetvalue = cpu->isar.id_aa64mmfr1 }, in register_cp_regs_for_features()
9232 .resetvalue = cpu->isar.id_aa64mmfr2 }, in register_cp_regs_for_features()
9237 .resetvalue = cpu->isar.id_aa64mmfr3 }, in register_cp_regs_for_features()
9249 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, in register_cp_regs_for_features()
9262 .resetvalue = cpu->isar.mvfr0 }, in register_cp_regs_for_features()
9267 .resetvalue = cpu->isar.mvfr1 }, in register_cp_regs_for_features()
9272 .resetvalue = cpu->isar.mvfr2 }, in register_cp_regs_for_features()
9297 * being filled with AArch64-view-of-AArch32-ID-register in register_cp_regs_for_features()
9309 .resetvalue = cpu->isar.id_pfr2 }, in register_cp_regs_for_features()
9314 .resetvalue = cpu->isar.id_dfr1 }, in register_cp_regs_for_features()
9316 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, in register_cp_regs_for_features()
9319 .resetvalue = cpu->isar.id_mmfr5 }, in register_cp_regs_for_features()
9326 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, in register_cp_regs_for_features()
9329 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, in register_cp_regs_for_features()
9331 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, in register_cp_regs_for_features()
9334 .resetvalue = cpu->pmceid0 }, in register_cp_regs_for_features()
9339 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, in register_cp_regs_for_features()
9344 .resetvalue = cpu->pmceid1 }, in register_cp_regs_for_features()
9475 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32. in register_cp_regs_for_features()
9476 * For pre-v8 cores there are RAZ patterns for these in in register_cp_regs_for_features()
9479 * to also cover c0, 0, c{8-15}, {0-7}. in register_cp_regs_for_features()
9481 * c4-c7 is where the AArch64 ID registers live (and we've in register_cp_regs_for_features()
9482 * already defined those in v8_idregs[]), and c8-c15 are not in register_cp_regs_for_features()
9512 .resetvalue = cpu->midr, in register_cp_regs_for_features()
9517 .access = PL2_RW, .resetvalue = cpu->midr, in register_cp_regs_for_features()
9581 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, in register_cp_regs_for_features()
9585 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2, in register_cp_regs_for_features()
9592 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, in register_cp_regs_for_features()
9596 .resetvalue = cpu->reset_sctlr }, in register_cp_regs_for_features()
9605 * reads as constant 0xc00 from NS EL1 and NS EL2 in register_cp_regs_for_features()
9606 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 in register_cp_regs_for_features()
9608 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 in register_cp_regs_for_features()
9653 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ in register_cp_regs_for_features()
9688 * When LPAE exists this 32-bit PAR register is an alias of the in register_cp_regs_for_features()
9689 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[] in register_cp_regs_for_features()
9725 * cp15 crn=0 to be writes-ignored, whereas for other cores they should in register_cp_regs_for_features()
9726 * be read-only (ie write causes UNDEF exception). in register_cp_regs_for_features()
9731 * Pre-v8 MIDR space. in register_cp_regs_for_features()
9742 .access = PL1_R, .resetvalue = cpu->midr, in register_cp_regs_for_features()
9758 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, in register_cp_regs_for_features()
9767 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, in register_cp_regs_for_features()
9774 .access = PL1_R, .resetvalue = cpu->midr }, in register_cp_regs_for_features()
9776 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, in register_cp_regs_for_features()
9780 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, in register_cp_regs_for_features()
9785 .access = PL1_R, .resetvalue = cpu->midr in register_cp_regs_for_features()
9788 /* These are common to v8 and pre-v8 */ in register_cp_regs_for_features()
9792 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, in register_cp_regs_for_features()
9797 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, in register_cp_regs_for_features()
9798 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ in register_cp_regs_for_features()
9818 .resetvalue = cpu->pmsav7_dregion << 8 in register_cp_regs_for_features()
9825 .resetvalue = cpu->pmsav8r_hdregion in register_cp_regs_for_features()
9884 for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) { in register_cp_regs_for_features()
9892 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, in register_cp_regs_for_features()
9904 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, in register_cp_regs_for_features()
9914 for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) { in register_cp_regs_for_features()
9923 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, in register_cp_regs_for_features()
9935 .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, in register_cp_regs_for_features()
9970 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, in register_cp_regs_for_features()
9976 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, in register_cp_regs_for_features()
9988 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. in register_cp_regs_for_features()
9990 * (1) older 32-bit only cores have a simple 32-bit CBAR in register_cp_regs_for_features()
9991 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a in register_cp_regs_for_features()
9992 * 32-bit register visible to AArch32 at a different encoding in register_cp_regs_for_features()
9994 * be able to squash a 64-bit address into the 32-bit view. in register_cp_regs_for_features()
9996 * in future if we support AArch32-only configs of some of the in register_cp_regs_for_features()
10002 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) in register_cp_regs_for_features()
10003 | extract64(cpu->reset_cbar, 32, 12); in register_cp_regs_for_features()
10012 .access = PL1_R, .resetvalue = cpu->reset_cbar }, in register_cp_regs_for_features()
10021 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar, in register_cp_regs_for_features()
10059 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, in register_cp_regs_for_features()
10065 * arch/arm/mach-pxa/sleep.S expects two instructions following in register_cp_regs_for_features()
10066 * an MMU enable to execute from cache. Imitate this behaviour. in register_cp_regs_for_features()
10163 .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize, in register_cp_regs_for_features()
10226 CPUARMState *env = &cpu->env; in add_cpreg_to_hashtable()
10229 bool is64 = r->type & ARM_CP_64BIT; in add_cpreg_to_hashtable()
10230 bool ns = secstate & ARM_CP_SECSTATE_NS; in add_cpreg_to_hashtable() local
10231 int cp = r->cp; in add_cpreg_to_hashtable()
10238 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) { in add_cpreg_to_hashtable()
10241 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2); in add_cpreg_to_hashtable()
10246 * cp == 0 as equivalent to the value for "standard guest-visible in add_cpreg_to_hashtable()
10248 * in their AArch64 view (the .cp value may be non-zero for the in add_cpreg_to_hashtable()
10251 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) { in add_cpreg_to_hashtable()
10254 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2); in add_cpreg_to_hashtable()
10261 if (!(r->type & ARM_CP_OVERRIDE)) { in add_cpreg_to_hashtable()
10262 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key); in add_cpreg_to_hashtable()
10264 assert(oldreg->type & ARM_CP_OVERRIDE); in add_cpreg_to_hashtable()
10279 int min_el = ctz32(r->access) / 2; in add_cpreg_to_hashtable()
10281 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) { in add_cpreg_to_hashtable()
10284 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP); in add_cpreg_to_hashtable()
10289 if ((r->access & max_el) == 0) { in add_cpreg_to_hashtable()
10298 r2->name = memcpy(r2 + 1, name, name_len); in add_cpreg_to_hashtable()
10304 r2->cp = cp; in add_cpreg_to_hashtable()
10305 r2->crm = crm; in add_cpreg_to_hashtable()
10306 r2->opc1 = opc1; in add_cpreg_to_hashtable()
10307 r2->opc2 = opc2; in add_cpreg_to_hashtable()
10308 r2->state = state; in add_cpreg_to_hashtable()
10309 r2->secure = secstate; in add_cpreg_to_hashtable()
10311 r2->opaque = opaque; in add_cpreg_to_hashtable()
10316 int old_special = r2->type & ARM_CP_SPECIAL_MASK; in add_cpreg_to_hashtable()
10323 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST; in add_cpreg_to_hashtable()
10326 * special cases like VPIDR_EL2 which have a constant non-zero in add_cpreg_to_hashtable()
10329 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) { in add_cpreg_to_hashtable()
10330 r2->resetvalue = 0; in add_cpreg_to_hashtable()
10337 r2->readfn = NULL; in add_cpreg_to_hashtable()
10338 r2->writefn = NULL; in add_cpreg_to_hashtable()
10339 r2->raw_readfn = NULL; in add_cpreg_to_hashtable()
10340 r2->raw_writefn = NULL; in add_cpreg_to_hashtable()
10341 r2->resetfn = NULL; in add_cpreg_to_hashtable()
10342 r2->fieldoffset = 0; in add_cpreg_to_hashtable()
10343 r2->bank_fieldoffsets[0] = 0; in add_cpreg_to_hashtable()
10344 r2->bank_fieldoffsets[1] = 0; in add_cpreg_to_hashtable()
10346 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]; in add_cpreg_to_hashtable()
10354 r2->fieldoffset = r->bank_fieldoffsets[ns]; in add_cpreg_to_hashtable()
10360 * reset the 32-bit instance in certain cases: in add_cpreg_to_hashtable()
10362 * 1) If the register has both 32-bit and 64-bit instances in add_cpreg_to_hashtable()
10363 * then we can count on the 64-bit instance taking care in add_cpreg_to_hashtable()
10364 * of the non-secure bank. in add_cpreg_to_hashtable()
10365 * 2) If ARMv8 is enabled then we can count on a 64-bit in add_cpreg_to_hashtable()
10367 * that separate 32 and 64-bit definitions are provided. in add_cpreg_to_hashtable()
10369 if ((r->state == ARM_CP_STATE_BOTH && ns) || in add_cpreg_to_hashtable()
10370 (arm_feature(env, ARM_FEATURE_V8) && !ns)) { in add_cpreg_to_hashtable()
10371 r2->type |= ARM_CP_ALIAS; in add_cpreg_to_hashtable()
10373 } else if ((secstate != r->secure) && !ns) { in add_cpreg_to_hashtable()
10376 * migration of the non-secure instance. in add_cpreg_to_hashtable()
10378 r2->type |= ARM_CP_ALIAS; in add_cpreg_to_hashtable()
10382 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) { in add_cpreg_to_hashtable()
10383 r2->fieldoffset += sizeof(uint32_t); in add_cpreg_to_hashtable()
10393 * never migratable and not even raw-accessible. in add_cpreg_to_hashtable()
10395 if (r2->type & ARM_CP_SPECIAL_MASK) { in add_cpreg_to_hashtable()
10396 r2->type |= ARM_CP_NO_RAW; in add_cpreg_to_hashtable()
10398 if (((r->crm == CP_ANY) && crm != 0) || in add_cpreg_to_hashtable()
10399 ((r->opc1 == CP_ANY) && opc1 != 0) || in add_cpreg_to_hashtable()
10400 ((r->opc2 == CP_ANY) && opc2 != 0)) { in add_cpreg_to_hashtable()
10401 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; in add_cpreg_to_hashtable()
10409 if (!(r2->type & ARM_CP_NO_RAW)) { in add_cpreg_to_hashtable()
10413 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2); in add_cpreg_to_hashtable()
10430 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard in define_one_arm_cp_reg_with_opaque()
10439 * Only registers visible in AArch64 may set r->opc0; opc0 cannot in define_one_arm_cp_reg_with_opaque()
10445 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; in define_one_arm_cp_reg_with_opaque()
10446 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; in define_one_arm_cp_reg_with_opaque()
10447 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; in define_one_arm_cp_reg_with_opaque()
10448 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; in define_one_arm_cp_reg_with_opaque()
10449 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; in define_one_arm_cp_reg_with_opaque()
10450 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; in define_one_arm_cp_reg_with_opaque()
10454 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); in define_one_arm_cp_reg_with_opaque()
10456 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); in define_one_arm_cp_reg_with_opaque()
10458 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); in define_one_arm_cp_reg_with_opaque()
10461 * (M-profile or v7A-and-earlier only) for implementation defined in define_one_arm_cp_reg_with_opaque()
10467 switch (r->state) { in define_one_arm_cp_reg_with_opaque()
10470 if (r->cp == 0) { in define_one_arm_cp_reg_with_opaque()
10475 if (arm_feature(&cpu->env, ARM_FEATURE_V8) && in define_one_arm_cp_reg_with_opaque()
10476 !arm_feature(&cpu->env, ARM_FEATURE_M)) { in define_one_arm_cp_reg_with_opaque()
10477 assert(r->cp >= 14 && r->cp <= 15); in define_one_arm_cp_reg_with_opaque()
10479 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15)); in define_one_arm_cp_reg_with_opaque()
10483 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP); in define_one_arm_cp_reg_with_opaque()
10495 if (r->state != ARM_CP_STATE_AA32) { in define_one_arm_cp_reg_with_opaque()
10497 switch (r->opc1) { in define_one_arm_cp_reg_with_opaque()
10515 case 6: in define_one_arm_cp_reg_with_opaque()
10524 /* broken reginfo with out-of-range opc1 */ in define_one_arm_cp_reg_with_opaque()
10528 assert((r->access & ~mask) == 0); in define_one_arm_cp_reg_with_opaque()
10535 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) { in define_one_arm_cp_reg_with_opaque()
10536 if (r->access & PL3_R) { in define_one_arm_cp_reg_with_opaque()
10537 assert((r->fieldoffset || in define_one_arm_cp_reg_with_opaque()
10538 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || in define_one_arm_cp_reg_with_opaque()
10539 r->readfn); in define_one_arm_cp_reg_with_opaque()
10541 if (r->access & PL3_W) { in define_one_arm_cp_reg_with_opaque()
10542 assert((r->fieldoffset || in define_one_arm_cp_reg_with_opaque()
10543 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || in define_one_arm_cp_reg_with_opaque()
10544 r->writefn); in define_one_arm_cp_reg_with_opaque()
10553 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { in define_one_arm_cp_reg_with_opaque()
10559 * (same for secure and non-secure world) or banked. in define_one_arm_cp_reg_with_opaque()
10563 switch (r->secure) { in define_one_arm_cp_reg_with_opaque()
10567 r->secure, crm, opc1, opc2, in define_one_arm_cp_reg_with_opaque()
10568 r->name); in define_one_arm_cp_reg_with_opaque()
10571 name = g_strdup_printf("%s_S", r->name); in define_one_arm_cp_reg_with_opaque()
10578 crm, opc1, opc2, r->name); in define_one_arm_cp_reg_with_opaque()
10585 * AArch64 registers get mapped to non-secure instance in define_one_arm_cp_reg_with_opaque()
10590 crm, opc1, opc2, r->name); in define_one_arm_cp_reg_with_opaque()
10613 * user-space cannot alter any values and dynamic values pertaining to
10624 if (m->is_glob) { in modify_arm_cp_regs_with_len()
10625 pat = g_pattern_spec_new(m->name); in modify_arm_cp_regs_with_len()
10630 if (pat && g_pattern_match_string(pat, r->name)) { in modify_arm_cp_regs_with_len()
10631 r->type = ARM_CP_CONST; in modify_arm_cp_regs_with_len()
10632 r->access = PL0U_R; in modify_arm_cp_regs_with_len()
10633 r->resetvalue = 0; in modify_arm_cp_regs_with_len()
10635 } else if (strcmp(r->name, m->name) == 0) { in modify_arm_cp_regs_with_len()
10636 r->type = ARM_CP_CONST; in modify_arm_cp_regs_with_len()
10637 r->access = PL0U_R; in modify_arm_cp_regs_with_len()
10638 r->resetvalue &= m->exported_bits; in modify_arm_cp_regs_with_len()
10639 r->resetvalue |= m->fixed_bits; in modify_arm_cp_regs_with_len()
10657 /* Helper coprocessor write function for write-ignore registers */ in arm_cp_write_ignore()
10662 /* Helper coprocessor write function for read-as-zero registers */ in arm_cp_read_zero()
10668 /* Helper coprocessor reset function for do-nothing-on-reset registers */ in arm_cp_reset_ignore()
10681 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || in bad_mode_switch()
10697 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) in bad_mode_switch()
10700 * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR in bad_mode_switch()
10704 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && in bad_mode_switch()
10721 ZF = (env->ZF == 0); in cpsr_read()
10722 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | in cpsr_read()
10723 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) in cpsr_read()
10724 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) in cpsr_read()
10725 | ((env->condexec_bits & 0xfc) << 8) in cpsr_read()
10726 | (env->GE << 16) | (env->daif & CPSR_AIF); in cpsr_read()
10737 env->ZF = (~val) & CPSR_Z; in cpsr_write()
10738 env->NF = val; in cpsr_write()
10739 env->CF = (val >> 29) & 1; in cpsr_write()
10740 env->VF = (val << 3) & 0x80000000; in cpsr_write()
10743 env->QF = ((val & CPSR_Q) != 0); in cpsr_write()
10746 env->thumb = ((val & CPSR_T) != 0); in cpsr_write()
10749 env->condexec_bits &= ~3; in cpsr_write()
10750 env->condexec_bits |= (val >> 25) & 3; in cpsr_write()
10753 env->condexec_bits &= 3; in cpsr_write()
10754 env->condexec_bits |= (val >> 8) & 0xfc; in cpsr_write()
10757 env->GE = (val >> 16) & 0xf; in cpsr_write()
10763 * whether non-secure software is allowed to change the CPSR_F and CPSR_A in cpsr_write()
10774 changed_daif = (env->daif ^ val) & mask; in cpsr_write()
10779 * abort exceptions from a non-secure state. in cpsr_write()
10781 if (!(env->cp15.scr_el3 & SCR_AW)) { in cpsr_write()
10784 "non-secure world with SCR.AW bit clear\n"); in cpsr_write()
10792 * exceptions from a non-secure state. in cpsr_write()
10794 if (!(env->cp15.scr_el3 & SCR_FW)) { in cpsr_write()
10797 "non-secure world with SCR.FW bit clear\n"); in cpsr_write()
10802 * Check whether non-maskable FIQ (NMFI) support is enabled. in cpsr_write()
10809 "Ignoring attempt to enable CPSR_F flag " in cpsr_write()
10810 "(non-maskable FIQ [NMFI] support enabled)\n"); in cpsr_write()
10816 env->daif &= ~(CPSR_AIF & mask); in cpsr_write()
10817 env->daif |= val & CPSR_AIF & mask; in cpsr_write()
10820 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { in cpsr_write()
10821 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { in cpsr_write()
10848 aarch32_mode_name(env->uncached_cpsr), in cpsr_write()
10855 aarch32_mode_name(env->uncached_cpsr), in cpsr_write()
10856 aarch32_mode_name(val), env->regs[15]); in cpsr_write()
10861 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); in cpsr_write()
10896 old_mode = env->uncached_cpsr & CPSR_M; in switch_mode()
10902 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); in switch_mode()
10903 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); in switch_mode()
10905 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); in switch_mode()
10906 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); in switch_mode()
10910 env->banked_r13[i] = env->regs[13]; in switch_mode()
10911 env->banked_spsr[i] = env->spsr; in switch_mode()
10914 env->regs[13] = env->banked_r13[i]; in switch_mode()
10915 env->spsr = env->banked_spsr[i]; in switch_mode()
10917 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; in switch_mode()
10918 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; in switch_mode()
10924 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
10926 * The below multi-dimensional table is used for looking up the target
10933 * | | | | | +--- Current EL
10934 * | | | | +------ Non-secure(0)/Secure(1)
10935 * | | | +--------- HCR mask override
10936 * | | +------------ SCR exec state control
10937 * | +--------------- SCR mask override
10938 * +------------------ 32-bit(0)/64-bit(1) EL3
10941 * 0-3 = EL0-EL3
10942 * -1 = Cannot occur
10956 * BIT IRQ IMO Non-secure Secure
10960 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
10961 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
10962 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
10963 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
10964 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
10965 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
10966 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
10967 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
10968 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
10969 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
10970 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
10971 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
10972 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
10973 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
10974 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
10975 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
10994 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); in arm_phys_excp_target_el()
11008 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); in arm_phys_excp_target_el()
11012 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); in arm_phys_excp_target_el()
11016 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); in arm_phys_excp_target_el()
11027 /* Perform a table-lookup for the target EL given the current state */ in arm_phys_excp_target_el()
11037 int idx = cs->exception_index; in arm_log_exception()
11079 idx, exc, cs->cpu_index); in arm_log_exception()
11091 uint32_t mode = env->uncached_cpsr & CPSR_M; in aarch64_sync_32_to_64()
11095 env->xregs[i] = env->regs[i]; in aarch64_sync_32_to_64()
11099 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. in aarch64_sync_32_to_64()
11104 env->xregs[i] = env->usr_regs[i - 8]; in aarch64_sync_32_to_64()
11108 env->xregs[i] = env->regs[i]; in aarch64_sync_32_to_64()
11113 * Registers x13-x23 are the various mode SP and FP registers. Registers in aarch64_sync_32_to_64()
11118 env->xregs[13] = env->regs[13]; in aarch64_sync_32_to_64()
11119 env->xregs[14] = env->regs[14]; in aarch64_sync_32_to_64()
11121 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; in aarch64_sync_32_to_64()
11124 env->xregs[14] = env->regs[14]; in aarch64_sync_32_to_64()
11126 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; in aarch64_sync_32_to_64()
11131 env->xregs[15] = env->regs[13]; in aarch64_sync_32_to_64()
11133 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; in aarch64_sync_32_to_64()
11137 env->xregs[16] = env->regs[14]; in aarch64_sync_32_to_64()
11138 env->xregs[17] = env->regs[13]; in aarch64_sync_32_to_64()
11140 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; in aarch64_sync_32_to_64()
11141 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; in aarch64_sync_32_to_64()
11145 env->xregs[18] = env->regs[14]; in aarch64_sync_32_to_64()
11146 env->xregs[19] = env->regs[13]; in aarch64_sync_32_to_64()
11148 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; in aarch64_sync_32_to_64()
11149 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; in aarch64_sync_32_to_64()
11153 env->xregs[20] = env->regs[14]; in aarch64_sync_32_to_64()
11154 env->xregs[21] = env->regs[13]; in aarch64_sync_32_to_64()
11156 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; in aarch64_sync_32_to_64()
11157 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; in aarch64_sync_32_to_64()
11161 env->xregs[22] = env->regs[14]; in aarch64_sync_32_to_64()
11162 env->xregs[23] = env->regs[13]; in aarch64_sync_32_to_64()
11164 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; in aarch64_sync_32_to_64()
11165 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; in aarch64_sync_32_to_64()
11169 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ in aarch64_sync_32_to_64()
11170 * mode, then we can copy from r8-r14. Otherwise, we copy from the in aarch64_sync_32_to_64()
11171 * FIQ bank for r8-r14. in aarch64_sync_32_to_64()
11175 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ in aarch64_sync_32_to_64()
11179 env->xregs[i] = env->fiq_regs[i - 24]; in aarch64_sync_32_to_64()
11181 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; in aarch64_sync_32_to_64()
11182 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; in aarch64_sync_32_to_64()
11185 env->pc = env->regs[15]; in aarch64_sync_32_to_64()
11196 uint32_t mode = env->uncached_cpsr & CPSR_M; in aarch64_sync_64_to_32()
11200 env->regs[i] = env->xregs[i]; in aarch64_sync_64_to_32()
11204 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. in aarch64_sync_64_to_32()
11205 * Otherwise, we copy x8-x12 into the banked user regs. in aarch64_sync_64_to_32()
11209 env->usr_regs[i - 8] = env->xregs[i]; in aarch64_sync_64_to_32()
11213 env->regs[i] = env->xregs[i]; in aarch64_sync_64_to_32()
11224 env->regs[13] = env->xregs[13]; in aarch64_sync_64_to_32()
11225 env->regs[14] = env->xregs[14]; in aarch64_sync_64_to_32()
11227 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; in aarch64_sync_64_to_32()
11234 env->regs[14] = env->xregs[14]; in aarch64_sync_64_to_32()
11236 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; in aarch64_sync_64_to_32()
11241 env->regs[13] = env->xregs[15]; in aarch64_sync_64_to_32()
11243 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; in aarch64_sync_64_to_32()
11247 env->regs[14] = env->xregs[16]; in aarch64_sync_64_to_32()
11248 env->regs[13] = env->xregs[17]; in aarch64_sync_64_to_32()
11250 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; in aarch64_sync_64_to_32()
11251 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; in aarch64_sync_64_to_32()
11255 env->regs[14] = env->xregs[18]; in aarch64_sync_64_to_32()
11256 env->regs[13] = env->xregs[19]; in aarch64_sync_64_to_32()
11258 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; in aarch64_sync_64_to_32()
11259 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; in aarch64_sync_64_to_32()
11263 env->regs[14] = env->xregs[20]; in aarch64_sync_64_to_32()
11264 env->regs[13] = env->xregs[21]; in aarch64_sync_64_to_32()
11266 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; in aarch64_sync_64_to_32()
11267 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; in aarch64_sync_64_to_32()
11271 env->regs[14] = env->xregs[22]; in aarch64_sync_64_to_32()
11272 env->regs[13] = env->xregs[23]; in aarch64_sync_64_to_32()
11274 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; in aarch64_sync_64_to_32()
11275 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; in aarch64_sync_64_to_32()
11279 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ in aarch64_sync_64_to_32()
11280 * mode, then we can copy to r8-r14. Otherwise, we copy to the in aarch64_sync_64_to_32()
11281 * FIQ bank for r8-r14. in aarch64_sync_64_to_32()
11285 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ in aarch64_sync_64_to_32()
11289 env->fiq_regs[i - 24] = env->xregs[i]; in aarch64_sync_64_to_32()
11291 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; in aarch64_sync_64_to_32()
11292 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; in aarch64_sync_64_to_32()
11295 env->regs[15] = env->pc; in aarch64_sync_64_to_32()
11309 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. in take_aarch32_exception()
11311 env->pstate &= ~PSTATE_SS; in take_aarch32_exception()
11312 env->spsr = cpsr_read(env); in take_aarch32_exception()
11314 env->condexec_bits = 0; in take_aarch32_exception()
11316 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; in take_aarch32_exception()
11322 env->uncached_cpsr &= ~CPSR_E; in take_aarch32_exception()
11323 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { in take_aarch32_exception()
11324 env->uncached_cpsr |= CPSR_E; in take_aarch32_exception()
11327 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); in take_aarch32_exception()
11328 env->daif |= mask; in take_aarch32_exception()
11331 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) { in take_aarch32_exception()
11332 env->uncached_cpsr |= CPSR_SSBS; in take_aarch32_exception()
11334 env->uncached_cpsr &= ~CPSR_SSBS; in take_aarch32_exception()
11339 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; in take_aarch32_exception()
11340 env->elr_el[2] = env->regs[15]; in take_aarch32_exception()
11347 /* ... the target is EL3, from non-secure state. */ in take_aarch32_exception()
11348 env->uncached_cpsr &= ~CPSR_PAN; in take_aarch32_exception()
11355 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { in take_aarch32_exception()
11356 env->uncached_cpsr |= CPSR_PAN; in take_aarch32_exception()
11366 env->thumb = in take_aarch32_exception()
11369 env->regs[14] = env->regs[15] + offset; in take_aarch32_exception()
11371 env->regs[15] = newpc; in take_aarch32_exception()
11393 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch32_hyp()
11395 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch32_hyp()
11405 env->cp15.ifar_s = env->exception.vaddress; in arm_cpu_do_interrupt_aarch32_hyp()
11407 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32_hyp()
11411 env->cp15.dfar_s = env->exception.vaddress; in arm_cpu_do_interrupt_aarch32_hyp()
11413 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32_hyp()
11429 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch32_hyp()
11432 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { in arm_cpu_do_interrupt_aarch32_hyp()
11435 * QEMU syndrome values are v8-style. v7 has the IL bit in arm_cpu_do_interrupt_aarch32_hyp()
11439 if (cs->exception_index == EXCP_PREFETCH_ABORT || in arm_cpu_do_interrupt_aarch32_hyp()
11440 (cs->exception_index == EXCP_DATA_ABORT && in arm_cpu_do_interrupt_aarch32_hyp()
11441 !(env->exception.syndrome & ARM_EL_ISV)) || in arm_cpu_do_interrupt_aarch32_hyp()
11442 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { in arm_cpu_do_interrupt_aarch32_hyp()
11443 env->exception.syndrome &= ~ARM_EL_IL; in arm_cpu_do_interrupt_aarch32_hyp()
11446 env->cp15.esr_el[2] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch32_hyp()
11454 if (!(env->cp15.scr_el3 & SCR_EA)) { in arm_cpu_do_interrupt_aarch32_hyp()
11457 if (!(env->cp15.scr_el3 & SCR_IRQ)) { in arm_cpu_do_interrupt_aarch32_hyp()
11460 if (!(env->cp15.scr_el3 & SCR_FIQ)) { in arm_cpu_do_interrupt_aarch32_hyp()
11464 addr += env->cp15.hvbar; in arm_cpu_do_interrupt_aarch32_hyp()
11472 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch32()
11480 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch32()
11501 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); in arm_cpu_do_interrupt_aarch32()
11504 if (env->exception.target_el == 2) { in arm_cpu_do_interrupt_aarch32()
11506 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch32()
11511 env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2, in arm_cpu_do_interrupt_aarch32()
11515 env->exception.syndrome = syn_set_ec(env->exception.syndrome, in arm_cpu_do_interrupt_aarch32()
11519 env->exception.syndrome = syn_set_ec(env->exception.syndrome, in arm_cpu_do_interrupt_aarch32()
11527 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch32()
11532 if (env->thumb) { in arm_cpu_do_interrupt_aarch32()
11548 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
11549 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
11551 env->exception.fsr, (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
11558 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
11559 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
11561 env->exception.fsr, in arm_cpu_do_interrupt_aarch32()
11562 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
11574 if (env->cp15.scr_el3 & SCR_IRQ) { in arm_cpu_do_interrupt_aarch32()
11585 if (env->cp15.scr_el3 & SCR_FIQ) { in arm_cpu_do_interrupt_aarch32()
11615 env->exception.fsr = arm_fi_to_lfsc(&fi); in arm_cpu_do_interrupt_aarch32()
11617 env->exception.fsr = arm_fi_to_sfsc(&fi); in arm_cpu_do_interrupt_aarch32()
11619 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000; in arm_cpu_do_interrupt_aarch32()
11620 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
11622 env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
11640 if (env->thumb) { in arm_cpu_do_interrupt_aarch32()
11647 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch32()
11652 addr += env->cp15.mvbar; in arm_cpu_do_interrupt_aarch32()
11660 * This register is only followed in non-monitor mode, and is banked. in arm_cpu_do_interrupt_aarch32()
11666 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { in arm_cpu_do_interrupt_aarch32()
11667 env->cp15.scr_el3 &= ~SCR_NS; in arm_cpu_do_interrupt_aarch32()
11680 int mode = env->uncached_cpsr & CPSR_M; in aarch64_regnum()
11743 ret |= env->pstate & PSTATE_SS; in cpsr_read_for_spsr_elx()
11777 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch64()
11778 unsigned int new_el = env->exception.target_el; in arm_cpu_do_interrupt_aarch64()
11779 target_ulong addr = env->cp15.vbar_el[new_el]; in arm_cpu_do_interrupt_aarch64()
11803 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; in arm_cpu_do_interrupt_aarch64()
11828 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch64()
11831 env->cp15.mfar_el3); in arm_cpu_do_interrupt_aarch64()
11839 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) && in arm_cpu_do_interrupt_aarch64()
11840 syndrome_is_sync_extabt(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch64()
11843 env->cp15.far_el[new_el] = env->exception.vaddress; in arm_cpu_do_interrupt_aarch64()
11845 env->cp15.far_el[new_el]); in arm_cpu_do_interrupt_aarch64()
11853 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch64()
11861 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); in arm_cpu_do_interrupt_aarch64()
11870 * number. Notice that we read a 4-bit AArch32 register number and in arm_cpu_do_interrupt_aarch64()
11871 * write back a 5-bit AArch64 one. in arm_cpu_do_interrupt_aarch64()
11873 rt = extract32(env->exception.syndrome, 5, 4); in arm_cpu_do_interrupt_aarch64()
11875 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
11881 rt = extract32(env->exception.syndrome, 5, 4); in arm_cpu_do_interrupt_aarch64()
11883 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
11885 rt = extract32(env->exception.syndrome, 10, 4); in arm_cpu_do_interrupt_aarch64()
11887 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
11891 env->cp15.esr_el[new_el] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch64()
11907 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff); in arm_cpu_do_interrupt_aarch64()
11908 env->cp15.esr_el[new_el] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch64()
11911 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch64()
11917 env->elr_el[new_el] = env->pc; in arm_cpu_do_interrupt_aarch64()
11934 env->elr_el[new_el] = env->regs[15]; in arm_cpu_do_interrupt_aarch64()
11938 env->condexec_bits = 0; in arm_cpu_do_interrupt_aarch64()
11940 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; in arm_cpu_do_interrupt_aarch64()
11944 env->elr_el[new_el]); in arm_cpu_do_interrupt_aarch64()
11960 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { in arm_cpu_do_interrupt_aarch64()
11971 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) { in arm_cpu_do_interrupt_aarch64()
11979 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) { in arm_cpu_do_interrupt_aarch64()
11987 env->aarch64 = true; in arm_cpu_do_interrupt_aarch64()
11994 env->pc = addr; in arm_cpu_do_interrupt_aarch64()
11997 new_el, env->pc, pstate_read(env)); in arm_cpu_do_interrupt_aarch64()
12011 CPUARMState *env = &cpu->env; in tcg_handle_semihosting()
12016 env->xregs[0]); in tcg_handle_semihosting()
12018 env->pc += 4; in tcg_handle_semihosting()
12022 env->regs[0]); in tcg_handle_semihosting()
12024 env->regs[15] += env->thumb ? 2 : 4; in tcg_handle_semihosting()
12032 * to the AArch64-entry or AArch32-entry function depending on the
12036 * and KVM to re-inject guest debug exceptions, and to
12037 * inject a Synchronous-External-Abort.
12042 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt()
12043 unsigned int new_el = env->exception.target_el; in arm_cpu_do_interrupt()
12051 && !excp_is_internal(cs->exception_index)) { in arm_cpu_do_interrupt()
12053 syn_get_ec(env->exception.syndrome), in arm_cpu_do_interrupt()
12054 env->exception.syndrome); in arm_cpu_do_interrupt()
12057 if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) { in arm_cpu_do_interrupt()
12069 if (cs->exception_index == EXCP_SEMIHOST) { in arm_cpu_do_interrupt()
12078 * cs->interrupt_request. in arm_cpu_do_interrupt()
12084 assert(!excp_is_internal(cs->exception_index)); in arm_cpu_do_interrupt()
12094 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; in arm_cpu_do_interrupt()
12116 return env->cp15.sctlr_el[el]; in arm_sctlr()
12250 tsz = extract32(tcr, 0, 6); in aa64_va_parameters()
12273 tsz = extract32(tcr, 0, 6); in aa64_va_parameters()
12280 tsz = extract32(tcr, 16, 6); in aa64_va_parameters()
12301 max_tsz = 48 - (gran == Gran64K); in aa64_va_parameters()
12386 /* Perform 16-bit signed saturating addition. */
12402 /* Perform 8-bit signed saturating addition. */
12418 /* Perform 16-bit signed saturating subtraction. */
12423 res = a - b; in sub16_sat()
12434 /* Perform 8-bit signed saturating subtraction. */
12439 res = a - b; in sub8_sat()
12472 return a - b; in sub16_usat()
12491 return a - b; in sub8_usat()
12524 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12526 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12551 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12559 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12574 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12578 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12587 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12591 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12599 return a - b; in do_usad()
12601 return b - a; in do_usad()
12663 * Return the exception level to which FP-disabled exceptions should
12681 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { in fp_exception_el()
12685 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { in fp_exception_el()
12686 if (!extract32(env->v7m.nsacr, 10, 1)) { in fp_exception_el()
12705 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN); in fp_exception_el()
12728 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode in fp_exception_el()
12729 * to control non-secure access to the FPU. It doesn't have any in fp_exception_el()
12734 if (!extract32(env->cp15.nsacr, 10, 1)) { in fp_exception_el()
12746 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) { in fp_exception_el()
12757 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) { in fp_exception_el()
12764 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) { in fp_exception_el()
12812 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); in arm_mmu_idx_el()
12815 /* See ARM pseudo-function ELIsInHost. */ in arm_mmu_idx_el()
12873 * NOTE: if you change this logic, the "recalculate s->mve_no_pred" in mve_no_pred()
12876 * We do not include the effect of the ECI bits here -- they are in mve_no_pred()
12884 if (env->v7m.vpr) { in mve_no_pred()
12887 if (env->v7m.ltpsize < 4) { in mve_no_pred()
12899 flags = env->hflags; in cpu_get_tb_cpu_state()
12902 *pc = env->pc; in cpu_get_tb_cpu_state()
12904 DP_TBFLAG_A64(flags, BTYPE, env->btype); in cpu_get_tb_cpu_state()
12907 *pc = env->regs[15]; in cpu_get_tb_cpu_state()
12911 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) in cpu_get_tb_cpu_state()
12912 != env->v7m.secure) { in cpu_get_tb_cpu_state()
12916 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && in cpu_get_tb_cpu_state()
12917 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || in cpu_get_tb_cpu_state()
12918 (env->v7m.secure && in cpu_get_tb_cpu_state()
12919 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { in cpu_get_tb_cpu_state()
12928 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; in cpu_get_tb_cpu_state()
12929 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { in cpu_get_tb_cpu_state()
12939 * Note that VECLEN+VECSTRIDE are RES0 for M-profile. in cpu_get_tb_cpu_state()
12942 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); in cpu_get_tb_cpu_state()
12944 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); in cpu_get_tb_cpu_state()
12945 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); in cpu_get_tb_cpu_state()
12947 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { in cpu_get_tb_cpu_state()
12952 DP_TBFLAG_AM32(flags, THUMB, env->thumb); in cpu_get_tb_cpu_state()
12953 DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); in cpu_get_tb_cpu_state()
12961 * 1 0 Active-pending in cpu_get_tb_cpu_state()
12962 * 1 1 Active-not-pending in cpu_get_tb_cpu_state()
12965 if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { in cpu_get_tb_cpu_state()
12994 assert(vq <= env_archcpu(env)->sve_max_vq); in aarch64_sve_narrow_vq()
12998 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); in aarch64_sve_narrow_vq()
13004 pmask = ~(-1ULL << (16 * (vq & 3))); in aarch64_sve_narrow_vq()
13008 env->vfp.pregs[i].p[j] &= pmask; in aarch64_sve_narrow_vq()
13057 sm = FIELD_EX64(env->svcr, SVCR, SM); in aarch64_sve_change_el()
13069 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). in aarch64_sve_change_el()
13070 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition in aarch64_sve_change_el()
13071 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that in aarch64_sve_change_el()
13073 * vq0->vq0 transition between EL0->EL1. in aarch64_sve_change_el()
13094 return arm_secure_to_space(env->v7m.secure); in arm_security_space()
13099 * defined, in which case QEMU defaults to non-secure. in arm_security_space()
13107 if (extract32(env->pstate, 2, 2) == 3) { in arm_security_space()
13115 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { in arm_security_space()
13129 * defined, in which case QEMU defaults to non-secure. in arm_security_space_below_el3()
13136 * Note NSE cannot be set without RME, and NSE & !NS is Reserved. in arm_security_space_below_el3()
13137 * Ignoring NSE when !NS retains consistency without having to in arm_security_space_below_el3()
13140 if (!(env->cp15.scr_el3 & SCR_NS)) { in arm_security_space_below_el3()
13142 } else if (env->cp15.scr_el3 & SCR_NSE) { in arm_security_space_below_el3()