Lines Matching +full:no +full:- +full:cs +full:- +full:readback

6  * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "cpu-features.h"
15 #include "exec/page-protection.h"
16 #include "exec/mmap-lock.h"
17 #include "qemu/main-loop.h"
20 #include "qemu/qemu-print.h"
22 #include "exec/translation-block.h"
24 #include "system/cpu-timers.h"
29 #include "qemu/guest-random.h"
33 #include "semihosting/common-semi.h"
39 #include "exec/helper-proto.h.inc"
45 assert(ri->fieldoffset); in raw_read()
55 assert(ri->fieldoffset); in raw_write()
65 return (char *)env + ri->fieldoffset; in raw_ptr()
71 if (ri->type & ARM_CP_CONST) { in read_raw_cp_reg()
72 return ri->resetvalue; in read_raw_cp_reg()
73 } else if (ri->raw_readfn) { in read_raw_cp_reg()
74 return ri->raw_readfn(env, ri); in read_raw_cp_reg()
75 } else if (ri->readfn) { in read_raw_cp_reg()
76 return ri->readfn(env, ri); in read_raw_cp_reg()
87 * Note that constant registers are treated as write-ignored; the in write_raw_cp_reg()
88 * caller should check for success by whether a readback gives the in write_raw_cp_reg()
91 if (ri->type & ARM_CP_CONST) { in write_raw_cp_reg()
93 } else if (ri->raw_writefn) { in write_raw_cp_reg()
94 ri->raw_writefn(env, ri, v); in write_raw_cp_reg()
95 } else if (ri->writefn) { in write_raw_cp_reg()
96 ri->writefn(env, ri, v); in write_raw_cp_reg()
116 if ((ri->type & ARM_CP_CONST) || in raw_accessors_invalid()
117 ri->fieldoffset || in raw_accessors_invalid()
118 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { in raw_accessors_invalid()
126 /* Write the coprocessor state from cpu->env to the (index,value) list. */ in write_cpustate_to_list()
130 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_cpustate_to_list()
131 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); in write_cpustate_to_list()
135 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in write_cpustate_to_list()
140 if (ri->type & ARM_CP_NO_RAW) { in write_cpustate_to_list()
144 newval = read_raw_cp_reg(&cpu->env, ri); in write_cpustate_to_list()
147 * Only sync if the previous list->cpustate sync succeeded. in write_cpustate_to_list()
152 uint64_t oldval = cpu->cpreg_values[i]; in write_cpustate_to_list()
158 write_raw_cp_reg(&cpu->env, ri, oldval); in write_cpustate_to_list()
159 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { in write_cpustate_to_list()
163 write_raw_cp_reg(&cpu->env, ri, newval); in write_cpustate_to_list()
165 cpu->cpreg_values[i] = newval; in write_cpustate_to_list()
175 for (i = 0; i < cpu->cpreg_array_len; i++) { in write_list_to_cpustate()
176 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); in write_list_to_cpustate()
177 uint64_t v = cpu->cpreg_values[i]; in write_list_to_cpustate()
180 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in write_list_to_cpustate()
185 if (ri->type & ARM_CP_NO_RAW) { in write_list_to_cpustate()
190 * (to catch read-only registers and partially read-only in write_list_to_cpustate()
193 write_raw_cp_reg(&cpu->env, ri, v); in write_list_to_cpustate()
194 if (read_raw_cp_reg(&cpu->env, ri) != v) { in write_list_to_cpustate()
205 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); in add_cpreg_to_list()
207 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { in add_cpreg_to_list()
208 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); in add_cpreg_to_list()
210 cpu->cpreg_array_len++; in add_cpreg_to_list()
219 ri = g_hash_table_lookup(cpu->cp_regs, key); in count_cpreg()
221 if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { in count_cpreg()
222 cpu->cpreg_array_len++; in count_cpreg()
235 return -1; in cpreg_key_compare()
249 keys = g_hash_table_get_keys(cpu->cp_regs); in init_cpreg_list()
252 cpu->cpreg_array_len = 0; in init_cpreg_list()
256 arraylen = cpu->cpreg_array_len; in init_cpreg_list()
257 cpu->cpreg_indexes = g_new(uint64_t, arraylen); in init_cpreg_list()
258 cpu->cpreg_values = g_new(uint64_t, arraylen); in init_cpreg_list()
259 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); in init_cpreg_list()
260 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); in init_cpreg_list()
261 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; in init_cpreg_list()
262 cpu->cpreg_array_len = 0; in init_cpreg_list()
266 assert(cpu->cpreg_array_len == arraylen); in init_cpreg_list()
277 return env->pstate & PSTATE_PAN; in arm_pan_enabled()
279 return env->uncached_cpsr & CPSR_PAN; in arm_pan_enabled()
298 * Some secure-only AArch32 registers trap to EL3 if used from
299 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
311 if (env->cp15.scr_el3 & SCR_EEL2) { in access_trap_aa32s_el1()
385 * For PMSA it is purely a process ID and no action is needed. in contextidr_write()
411 * Define the secure and non-secure FCSE identifier CP registers
412 * separately because there is no secure bank in V8 (no _EL3). This allows
413 * the secure register to be properly reset and migrated. There is also no
414 * v8 EL1 version of the register so the non-secure instance stands alone.
427 * Define the secure and non-secure context identifier CP registers
428 * separately because there is no secure bank in V8 (no _EL3). This allows
430 * non-secure case, the 32-bit register will have reset and migration
431 * disabled during registration as it is handled by the 64-bit instance.
481 * Not all pre-v6 cores implemented this WFI, so this is slightly
482 * over-broad.
490 * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
511 * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
550 * registers (D0-D31). in cpacr_write()
553 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ in cpacr_write()
561 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 in cpacr_write()
565 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cpacr_write()
567 value = (value & ~mask) | (env->cp15.cpacr_el1 & mask); in cpacr_write()
570 env->cp15.cpacr_el1 = value; in cpacr_write()
576 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 in cpacr_read()
579 uint64_t value = env->cp15.cpacr_el1; in cpacr_read()
582 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cpacr_read()
604 FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) { in cpacr_access()
608 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { in cpacr_access()
621 FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { in cptr_access()
634 * We need to break the TB after ISB to execute self-modifying code
679 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 in vbar_write()
767 /* Clear all-context RES0 bits. */ in scr_write()
769 changed = env->cp15.scr_el3 ^ value; in scr_write()
770 env->cp15.scr_el3 = value; in scr_write()
790 * scr_write will set the RES1 bits on an AArch64-only CPU. in scr_reset()
791 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. in scr_reset()
817 ri->secure & ARM_CP_SECSTATE_S); in ccsidr_read()
819 return cpu->ccsidr[index]; in ccsidr_read()
830 CPUState *cs = env_cpu(env); in isr_read() local
836 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { in isr_read()
839 if (cs->interrupt_request & CPU_INTERRUPT_VINMI) { in isr_read()
844 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { in isr_read()
848 if (cs->interrupt_request & CPU_INTERRUPT_NMI) { in isr_read()
855 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { in isr_read()
858 if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) { in isr_read()
863 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { in isr_read()
869 if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { in isr_read()
942 * MAIR can just read-as-written because we don't implement caches
957 * For non-long-descriptor page tables these are PRRR and NMRR;
958 * regardless they still act as reads-as-written for QEMU.
961 * MAIR0/1 are defined separately from their 64-bit counterpart which
987 env->teecr = value; in teecr_write()
998 (env->cp15.hstr_el2 & HSTR_TTEE)) { in teecr_access()
1007 if (arm_current_el(env) == 0 && (env->teecr & 1)) { in teehbr_access()
1063 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; in arm_gt_cntfrq_reset()
1083 cntkctl = env->cp15.cnthctl_el2; in gt_cntfrq_access()
1085 cntkctl = env->cp15.c14_cntkctl; in gt_cntfrq_access()
1092 if (!isread && ri->state == ARM_CP_STATE_AA32 && in gt_cntfrq_access()
1094 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ in gt_cntfrq_access()
1121 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) in gt_counter_access()
1126 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { in gt_counter_access()
1134 ? !extract32(env->cp15.cnthctl_el2, 10, 1) in gt_counter_access()
1135 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { in gt_counter_access()
1139 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) { in gt_counter_access()
1159 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) in gt_timer_access()
1167 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { in gt_timer_access()
1176 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { in gt_timer_access()
1181 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { in gt_timer_access()
1187 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) { in gt_timer_access()
1239 if (!(env->cp15.scr_el3 & SCR_ST)) { in gt_stimer_access()
1282 if (env->cp15.scr_el3 & SCR_EEL2) { in gt_sel2timer_access()
1301 CPUARMState *env = &cpu->env; in gt_update_irq()
1302 uint64_t cnthctl = env->cp15.cnthctl_el2; in gt_update_irq()
1305 int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4; in gt_update_irq()
1317 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); in gt_update_irq()
1334 if ((env->cp15.scr_el3 & SCR_ECVEN) && in gt_phys_raw_cnt_offset()
1335 FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) && in gt_phys_raw_cnt_offset()
1338 return env->cp15.cntpoff_el2; in gt_phys_raw_cnt_offset()
1359 return env->cp15.cntvoff_el2; in gt_indirect_access_timer_offset()
1378 * This isn't exactly the same as the indirect-access offset, in gt_direct_access_timer_offset()
1407 return env->cp15.cntvoff_el2; in gt_direct_access_timer_offset()
1421 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; in gt_recalc_timer()
1423 if (gt->ctl & 1) { in gt_recalc_timer()
1428 uint64_t offset = gt_indirect_access_timer_offset(&cpu->env, timeridx); in gt_recalc_timer()
1429 uint64_t count = gt_get_countervalue(&cpu->env); in gt_recalc_timer()
1431 int istatus = count - offset >= gt->cval; in gt_recalc_timer()
1434 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); in gt_recalc_timer()
1438 * Next transition is when (count - offset) rolls back over to 0. in gt_recalc_timer()
1451 * Next transition is when (count - offset) == cval, i.e. in gt_recalc_timer()
1456 if (uadd64_overflow(gt->cval, offset, &nexttick)) { in gt_recalc_timer()
1462 * signed-64-bit range of a QEMUTimer -- in this case we just in gt_recalc_timer()
1467 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); in gt_recalc_timer()
1469 timer_mod(cpu->gt_timer[timeridx], nexttick); in gt_recalc_timer()
1474 gt->ctl &= ~4; in gt_recalc_timer()
1475 timer_del(cpu->gt_timer[timeridx]); in gt_recalc_timer()
1486 timer_del(cpu->gt_timer[timeridx]); in gt_timer_reset()
1492 return gt_get_countervalue(env) - offset; in gt_cnt_read()
1498 return gt_get_countervalue(env) - offset; in gt_virt_cnt_read()
1506 env->cp15.c14_timer[timeridx].cval = value; in gt_cval_write()
1512 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - in do_tval_read()
1513 (gt_get_countervalue(env) - offset)); in do_tval_read()
1528 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + in do_tval_write()
1547 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; in gt_ctl_write()
1550 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); in gt_ctl_write()
1620 return env->cp15.c14_timer[timeridx].cval; in gt_phys_redir_cval_read()
1648 return env->cp15.c14_timer[timeridx].ctl; in gt_phys_redir_ctl_read()
1675 * to re-detect that it's this register. in gt_virt_tval_read()
1678 return do_tval_read(env, GTIMER_VIRT, env->cp15.cntvoff_el2); in gt_virt_tval_read()
1685 do_tval_write(env, GTIMER_VIRT, value, env->cp15.cntvoff_el2); in gt_virt_tval_write()
1698 uint32_t oldval = env->cp15.cnthctl_el2; in gt_cnthctl_write()
1751 return env->cp15.c14_timer[timeridx].cval; in gt_virt_redir_cval_read()
1779 return env->cp15.c14_timer[timeridx].ctl; in gt_virt_redir_ctl_read()
1980 * Note that CNTFRQ is purely reads-as-written for the benefit
2002 /* per-timer control */
2148 * Secure timer -- this is actually restricted to only EL3
2149 * and configurably Secure-EL1 via the accessfn.
2178 * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
2209 !(env->cp15.scr_el3 & SCR_ECVEN)) { in gt_cntpoff_access()
2236 * In user-mode most of the generic timer registers are inaccessible
2245 * Currently we have no support for QEMUTimer in linux-user so we in gt_virt_cnt_read()
2255 .access = PL0_R /* no PL1_RW in linux-user */,
2324 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); in pmsav5_data_ap_write()
2329 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); in pmsav5_data_ap_read()
2335 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); in pmsav5_insn_ap_write()
2340 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); in pmsav5_insn_ap_read()
2351 u32p += env->pmsav7.rnr[M_REG_NS]; in pmsav7_read()
2365 u32p += env->pmsav7.rnr[M_REG_NS]; in pmsav7_write()
2366 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in pmsav7_write()
2374 uint32_t nrgs = cpu->pmsav7_dregion; in pmsav7_rgnr_write()
2391 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in prbar_write()
2392 env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; in prbar_write()
2397 return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; in prbar_read()
2405 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in prlar_write()
2406 env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; in prlar_write()
2411 return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; in prlar_read()
2423 if (value >= cpu->pmsav7_dregion) { in prselr_write()
2427 env->pmsav7.rnr[M_REG_NS] = value; in prselr_write()
2435 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprbar_write()
2436 env->pmsav8.hprbar[env->pmsav8.hprselr] = value; in hprbar_write()
2441 return env->pmsav8.hprbar[env->pmsav8.hprselr]; in hprbar_read()
2449 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprlar_write()
2450 env->pmsav8.hprlar[env->pmsav8.hprselr] = value; in hprlar_write()
2455 return env->pmsav8.hprlar[env->pmsav8.hprselr]; in hprlar_read()
2466 int rmax = MIN(cpu->pmsav8r_hdregion, 32); in hprenr_write()
2469 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in hprenr_write()
2474 env->pmsav8.hprlar[n] = deposit32( in hprenr_write()
2475 env->pmsav8.hprlar[n], 0, 1, bit); in hprenr_write()
2486 for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) { in hprenr_read()
2487 if (env->pmsav8.hprlar[n] & 0x1) { in hprenr_read()
2503 if (value >= cpu->pmsav8r_hdregion) { in hprselr_write()
2507 env->pmsav8.hprselr = value; in hprselr_write()
2514 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | in pmsav8r_regn_write()
2515 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); in pmsav8r_regn_write()
2517 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ in pmsav8r_regn_write()
2519 if (ri->opc1 & 4) { in pmsav8r_regn_write()
2520 if (index >= cpu->pmsav8r_hdregion) { in pmsav8r_regn_write()
2523 if (ri->opc2 & 0x1) { in pmsav8r_regn_write()
2524 env->pmsav8.hprlar[index] = value; in pmsav8r_regn_write()
2526 env->pmsav8.hprbar[index] = value; in pmsav8r_regn_write()
2529 if (index >= cpu->pmsav7_dregion) { in pmsav8r_regn_write()
2532 if (ri->opc2 & 0x1) { in pmsav8r_regn_write()
2533 env->pmsav8.rlar[M_REG_NS][index] = value; in pmsav8r_regn_write()
2535 env->pmsav8.rbar[M_REG_NS][index] = value; in pmsav8r_regn_write()
2543 uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | in pmsav8r_regn_read()
2544 (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); in pmsav8r_regn_read()
2546 if (ri->opc1 & 4) { in pmsav8r_regn_read()
2547 if (index >= cpu->pmsav8r_hdregion) { in pmsav8r_regn_read()
2550 if (ri->opc2 & 0x1) { in pmsav8r_regn_read()
2551 return env->pmsav8.hprlar[index]; in pmsav8r_regn_read()
2553 return env->pmsav8.hprbar[index]; in pmsav8r_regn_read()
2556 if (index >= cpu->pmsav7_dregion) { in pmsav8r_regn_read()
2559 if (ri->opc2 & 0x1) { in pmsav8r_regn_read()
2560 return env->pmsav8.rlar[M_REG_NS][index]; in pmsav8r_regn_read()
2562 return env->pmsav8.rbar[M_REG_NS][index]; in pmsav8r_regn_read()
2605 * because the PMSAv7 is also used by M-profile CPUs, which do
2689 * using Long-descriptor translation table format in vmsa_ttbcr_write()
2696 * Short-descriptor translation table format. in vmsa_ttbcr_write()
2727 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ in vmsa_ttbr_write()
2759 CPUState *cs = CPU(cpu); in vttbr_write() local
2766 tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); in vttbr_write()
2850 env->cp15.c15_ticonfig = value & 0xe7; in omap_ticonfig_write()
2852 env->cp15.c0_cpuid = (value & (1 << 5)) ? in omap_ticonfig_write()
2859 env->cp15.c15_threadid = value & 0xffff; in omap_threadid_write()
2865 /* Wait-for-interrupt (deprecated) */ in omap_wfi_write()
2876 env->cp15.c15_i_max = 0x000; in omap_cachemaint_write()
2877 env->cp15.c15_i_min = 0xff0; in omap_cachemaint_write()
2923 env->cp15.c15_cpar = value & 0x3fff; in xscale_cpar_write()
2936 * XScale specific cache-lockdown: since we have no cache we NOP these
2956 * implementation of this implementation-defined space.
2968 /* Cache status: RAZ because we have no cache so it's always clean */
2996 * The cache test-and-clean instructions always return (1 << 30)
2997 * to indicate that there are no dirty cache lines.
3020 return env->cp15.vpidr_el2; in midr_read()
3028 uint64_t mpidr = cpu->mp_affinity; in mpidr_read_val()
3033 * Cores which are uniprocessor (non-coherent) in mpidr_read_val()
3035 * bit 30. (For instance, Cortex-R5). in mpidr_read_val()
3037 if (cpu->mp_is_up) { in mpidr_read_val()
3049 return env->cp15.vmpidr_el2; in mpidr_read()
3118 env->daif = value & PSTATE_DAIF; in aa64_daif_write()
3123 return env->pstate & PSTATE_PAN; in aa64_pan_read()
3129 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); in aa64_pan_write()
3141 return env->pstate & PSTATE_UAO; in aa64_uao_read()
3147 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); in aa64_uao_write()
3159 return env->pstate & PSTATE_DIT; in aa64_dit_read()
3165 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); in aa64_dit_write()
3177 return env->pstate & PSTATE_SSBS; in aa64_ssbs_read()
3183 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); in aa64_ssbs_write()
3257 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { in aa64_zva_access()
3261 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { in aa64_zva_access()
3284 return cpu->dcz_blocksize | dzp_bit; in aa64_dczid_read()
3290 if (!(env->pstate & PSTATE_SP)) { in sp_el0_access()
3302 return env->pstate & PSTATE_SP; in spsel_read()
3315 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { in sctlr_write()
3316 /* M bit is RAZ/WI for PMSA with no MPU implemented */ in sctlr_write()
3322 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) { in sctlr_write()
3323 if (ri->opc1 == 6) { /* SCTLR_EL3 */ in sctlr_write()
3344 if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) { in sctlr_write()
3363 bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS; in mdcr_el3_write()
3368 env->cp15.mdcr_el3 = value; in mdcr_el3_write()
3389 bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS; in mdcr_el2_write()
3394 env->cp15.mdcr_el2 = value; in mdcr_el2_write()
3415 * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
3431 icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1; in ic_ivau_write()
3445 * Minimal set of EL0-visible registers. This will need to be expanded
3474 /* Avoid overhead of an access check that always passes in user-mode */
3676 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { in do_hcr_write()
3739 * HCR_PTW forbids certain page-table setups in do_hcr_write()
3745 if ((env->cp15.hcr_el2 ^ value) & in do_hcr_write()
3749 env->cp15.hcr_el2 = value; in do_hcr_write()
3781 value = deposit64(env->cp15.hcr_el2, 32, 32, value); in hcr_writehigh()
3789 value = deposit64(env->cp15.hcr_el2, 0, 32, value); in hcr_writelow()
3795 /* hcr_write will set the RES1 bits on an AArch64-only CPU */ in hcr_reset()
3806 uint64_t ret = env->cp15.hcr_el2; in arm_hcr_el2_eff_secstate()
3812 * "This register has no effect if EL2 is not enabled in the in arm_hcr_el2_eff_secstate()
3813 * current Security state". This is ARMv8.4-SecEL2 speak for in arm_hcr_el2_eff_secstate()
3820 * on a per-field basis. In current QEMU, this is condition in arm_hcr_el2_eff_secstate()
3838 * These bits are up-to-date as of ARMv8.6. in arm_hcr_el2_eff_secstate()
3849 /* These bits are up-to-date as of ARMv8.6. */ in arm_hcr_el2_eff_secstate()
3897 if ((env->cp15.hcr_el2 & mask) != mask) { in el_is_in_host()
3930 env->cp15.hcrx_el2 = value & valid_mask; in hcrx_write()
3955 && !(env->cp15.scr_el3 & SCR_HXEN)) { in access_hxen()
3978 * For the moment, we treat the EL2-disabled case as taking in arm_hcrx_el2_eff()
3979 * priority over the HXEn-disabled case. This is true for the only in arm_hcrx_el2_eff()
3992 if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { in arm_hcrx_el2_eff()
3995 return env->cp15.hcrx_el2; in arm_hcrx_el2_eff()
4002 * For A-profile AArch32 EL3, if NSACR.CP10 in cptr_el2_write()
4006 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cptr_el2_write()
4008 value = (value & ~mask) | (env->cp15.cptr_el[2] & mask); in cptr_el2_write()
4010 env->cp15.cptr_el[2] = value; in cptr_el2_write()
4016 * For A-profile AArch32 EL3, if NSACR.CP10 in cptr_el2_read()
4019 uint64_t value = env->cp15.cptr_el[2]; in cptr_el2_read()
4022 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { in cptr_el2_read()
4120 /* no .writefn needed as this can't cause an ASID change */
4300 if (env->cp15.scr_el3 & SCR_EEL2) { in nsacr_access()
4341 /* no .writefn needed as this can't cause an ASID change */
4407 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) { in access_el1nvpct()
4419 if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) { in access_el1nvvct()
4438 ri = ri->opaque; in el2_e2h_read()
4439 readfn = ri->readfn; in el2_e2h_read()
4441 readfn = ri->orig_readfn; in el2_e2h_read()
4456 ri = ri->opaque; in el2_e2h_write()
4457 writefn = ri->writefn; in el2_e2h_write()
4459 writefn = ri->orig_writefn; in el2_e2h_write()
4470 return ri->orig_readfn(env, ri->opaque); in el2_e2h_e12_read()
4477 return ri->orig_writefn(env, ri->opaque, value); in el2_e2h_e12_write()
4497 if (ri->orig_accessfn) { in el2_e2h_e12_access()
4498 return ri->orig_accessfn(env, ri->opaque, isread); in el2_e2h_e12_access()
4565 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ in define_arm_vh_e2h_redirects_aliases()
4566 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ in define_arm_vh_e2h_redirects_aliases()
4577 if (a->feature && !a->feature(&cpu->isar)) { in define_arm_vh_e2h_redirects_aliases()
4581 src_reg = g_hash_table_lookup(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
4582 (gpointer)(uintptr_t)a->src_key); in define_arm_vh_e2h_redirects_aliases()
4583 dst_reg = g_hash_table_lookup(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
4584 (gpointer)(uintptr_t)a->dst_key); in define_arm_vh_e2h_redirects_aliases()
4588 /* Cross-compare names to detect typos in the keys. */ in define_arm_vh_e2h_redirects_aliases()
4589 g_assert(strcmp(src_reg->name, a->src_name) == 0); in define_arm_vh_e2h_redirects_aliases()
4590 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); in define_arm_vh_e2h_redirects_aliases()
4593 g_assert(src_reg->opaque == NULL); in define_arm_vh_e2h_redirects_aliases()
4598 new_reg->name = a->new_name; in define_arm_vh_e2h_redirects_aliases()
4599 new_reg->type |= ARM_CP_ALIAS; in define_arm_vh_e2h_redirects_aliases()
4601 new_reg->access &= PL2_RW | PL3_RW; in define_arm_vh_e2h_redirects_aliases()
4603 new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK) in define_arm_vh_e2h_redirects_aliases()
4605 new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK) in define_arm_vh_e2h_redirects_aliases()
4607 new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK) in define_arm_vh_e2h_redirects_aliases()
4609 new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK) in define_arm_vh_e2h_redirects_aliases()
4611 new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK) in define_arm_vh_e2h_redirects_aliases()
4613 new_reg->opaque = src_reg; in define_arm_vh_e2h_redirects_aliases()
4614 new_reg->orig_readfn = src_reg->readfn ?: raw_read; in define_arm_vh_e2h_redirects_aliases()
4615 new_reg->orig_writefn = src_reg->writefn ?: raw_write; in define_arm_vh_e2h_redirects_aliases()
4616 new_reg->orig_accessfn = src_reg->accessfn; in define_arm_vh_e2h_redirects_aliases()
4617 if (!new_reg->raw_readfn) { in define_arm_vh_e2h_redirects_aliases()
4618 new_reg->raw_readfn = raw_read; in define_arm_vh_e2h_redirects_aliases()
4620 if (!new_reg->raw_writefn) { in define_arm_vh_e2h_redirects_aliases()
4621 new_reg->raw_writefn = raw_write; in define_arm_vh_e2h_redirects_aliases()
4623 new_reg->readfn = el2_e2h_e12_read; in define_arm_vh_e2h_redirects_aliases()
4624 new_reg->writefn = el2_e2h_e12_write; in define_arm_vh_e2h_redirects_aliases()
4625 new_reg->accessfn = el2_e2h_e12_access; in define_arm_vh_e2h_redirects_aliases()
4632 if (new_reg->nv2_redirect_offset) { in define_arm_vh_e2h_redirects_aliases()
4633 assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1); in define_arm_vh_e2h_redirects_aliases()
4634 new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1; in define_arm_vh_e2h_redirects_aliases()
4635 new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1; in define_arm_vh_e2h_redirects_aliases()
4638 ok = g_hash_table_insert(cpu->cp_regs, in define_arm_vh_e2h_redirects_aliases()
4639 (gpointer)(uintptr_t)a->new_key, new_reg); in define_arm_vh_e2h_redirects_aliases()
4642 src_reg->opaque = dst_reg; in define_arm_vh_e2h_redirects_aliases()
4643 src_reg->orig_readfn = src_reg->readfn ?: raw_read; in define_arm_vh_e2h_redirects_aliases()
4644 src_reg->orig_writefn = src_reg->writefn ?: raw_write; in define_arm_vh_e2h_redirects_aliases()
4645 if (!src_reg->raw_readfn) { in define_arm_vh_e2h_redirects_aliases()
4646 src_reg->raw_readfn = raw_read; in define_arm_vh_e2h_redirects_aliases()
4648 if (!src_reg->raw_writefn) { in define_arm_vh_e2h_redirects_aliases()
4649 src_reg->raw_writefn = raw_write; in define_arm_vh_e2h_redirects_aliases()
4651 src_reg->readfn = el2_e2h_read; in define_arm_vh_e2h_redirects_aliases()
4652 src_reg->writefn = el2_e2h_write; in define_arm_vh_e2h_redirects_aliases()
4667 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { in ctr_el0_access()
4671 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { in ctr_el0_access()
4702 if (!arm_is_el3_or_mon(env) && (env->cp15.scr_el3 & SCR_TERR)) { in access_terr()
4713 return env->cp15.vdisr_el2; in disr_read()
4715 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { in disr_read()
4718 return env->cp15.disr_el1; in disr_read()
4726 env->cp15.vdisr_el2 = val; in disr_write()
4729 if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { in disr_write()
4732 env->cp15.disr_el1 = val; in disr_write()
4736 * Minimal RAS implementation with no Error Records.
4754 * These registers have fine-grained trap bits, but UNDEF-to-EL1
4755 * is higher priority than FGT-to-EL2 so we do not need to list them
4792 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) { in sve_exception_el()
4806 if (env->cp15.hcr_el2 & HCR_E2H) { in sve_exception_el()
4807 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) { in sve_exception_el()
4809 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { in sve_exception_el()
4818 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) { in sve_exception_el()
4826 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) { in sve_exception_el()
4841 switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) { in sme_exception_el()
4855 if (env->cp15.hcr_el2 & HCR_E2H) { in sme_exception_el()
4856 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) { in sme_exception_el()
4858 if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { in sme_exception_el()
4867 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) { in sme_exception_el()
4875 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in sme_exception_el()
4888 uint64_t *cr = env->vfp.zcr_el; in sve_vqm1_for_el_sm()
4889 uint32_t map = cpu->sve_vq.map; in sve_vqm1_for_el_sm()
4890 uint32_t len = ARM_MAX_VQ - 1; in sve_vqm1_for_el_sm()
4893 cr = env->vfp.smcr_el; in sve_vqm1_for_el_sm()
4894 map = cpu->sme_vq.map; in sve_vqm1_for_el_sm()
4909 return 31 - clz32(map); in sve_vqm1_for_el_sm()
4912 /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */ in sve_vqm1_for_el_sm()
4914 return ctz32(cpu->sme_vq.map); in sve_vqm1_for_el_sm()
4919 return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM)); in sve_vqm1_for_el()
4976 && !(env->cp15.scr_el3 & SCR_ENTP2)) { in access_tpidr2()
4988 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in access_smprimap()
4999 && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { in access_smpri()
5008 memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs)); in arm_reset_sve_state()
5010 memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs)); in arm_reset_sve_state()
5016 uint64_t change = (env->svcr ^ new) & mask; in aarch64_set_svcr()
5021 env->svcr ^= change; in aarch64_set_svcr()
5036 memset(&env->za_state, 0, sizeof(env->za_state)); in aarch64_set_svcr()
5047 aarch64_set_svcr(env, value, -1); in svcr_write()
5069 * apply the narrower SVL to the Zregs and Pregs -- see the comment in smcr_write()
5111 * SMPS = 0 (no streaming execution priority in QEMU)
5138 env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask); in gpccr_write()
5143 env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ, in gpccr_reset()
5144 env_archcpu(env)->reset_l0gptsz); in gpccr_reset()
5172 env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT); in aa64_allint_write()
5177 return env->pstate & PSTATE_ALLINT; in aa64_allint_read()
5210 uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1); in id_pfr1_read()
5212 if (env->gicv3state) { in id_pfr1_read()
5221 uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0); in id_aa64pfr0_read()
5223 if (env->gicv3state) { in id_aa64pfr0_read()
5242 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { in access_lor_ns()
5259 * A trivial implementation of ARMv8.1-LOR leaves all of these
5303 !(env->cp15.scr_el3 & SCR_APK)) { in access_pauth()
5368 env->NF = env->CF = env->VF = 0, env->ZF = 1; in rndr_readfn()
5374 * timed-out indication to the guest. There is no reason in rndr_readfn()
5379 ri->name, error_get_pretty(err)); in rndr_readfn()
5382 env->ZF = 0; /* NZCF = 0100 */ in rndr_readfn()
5388 /* We do not support re-seeding, so the two registers operate the same. */
5405 /* CTR_EL0 System register -> DminLine, bits [19:16] */ in dccvap_writefn()
5406 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); in dccvap_writefn()
5408 uint64_t vaddr = vaddr_in & ~(dline_size - 1); in dccvap_writefn()
5472 !(env->cp15.scr_el3 & SCR_ATA)) { in access_mte()
5512 !(env->cp15.scr_el3 & SCR_ATA)) { in access_tfsr_el2()
5520 return env->pstate & PSTATE_TCO; in tco_read()
5525 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO); in tco_write()
5646 /* Avoid overhead of an access check that always passes in user-mode */
5655 /* Avoid overhead of an access check that always passes in user-mode */
5669 if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) { in access_scxtnum()
5675 } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) { in access_scxtnum()
5683 && !(env->cp15.scr_el3 & SCR_ENSCXT)) { in access_scxtnum()
5727 arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) { in access_fgt()
5766 * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything in vncr_write()
5767 * about the RESS bits at the top -- we choose the "generate an EL2 in vncr_write()
5771 env->cp15.vncr_el2 = value & ~0xfffULL; in vncr_write()
5885 (env->cp15.hstr_el2 & HSTR_TJDBX)) { in access_joscr_jmcr()
5981 * is non-zero, which is never for ARMv7, optionally in ARMv8
6001 CPUARMState *env = &cpu->env; in register_cp_regs_for_features()
6002 ARMISARegisters *isar = &cpu->isar; in register_cp_regs_for_features()
6005 /* M profile has no coprocessor registers */ in register_cp_regs_for_features()
6334 .resetvalue = cpu->isar.mvfr0 }, in register_cp_regs_for_features()
6339 .resetvalue = cpu->isar.mvfr1 }, in register_cp_regs_for_features()
6344 .resetvalue = cpu->isar.mvfr2 }, in register_cp_regs_for_features()
6369 * being filled with AArch64-view-of-AArch32-ID-register in register_cp_regs_for_features()
6527 * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32. in register_cp_regs_for_features()
6528 * For pre-v8 cores there are RAZ patterns for these in in register_cp_regs_for_features()
6531 * to also cover c0, 0, c{8-15}, {0-7}. in register_cp_regs_for_features()
6533 * c4-c7 is where the AArch64 ID registers live (and we've in register_cp_regs_for_features()
6534 * already defined those in v8_idregs[]), and c8-c15 are not in register_cp_regs_for_features()
6564 .resetvalue = cpu->midr, in register_cp_regs_for_features()
6569 .access = PL2_RW, .resetvalue = cpu->midr, in register_cp_regs_for_features()
6648 .resetvalue = cpu->reset_sctlr }, in register_cp_regs_for_features()
6705 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ in register_cp_regs_for_features()
6734 * When LPAE exists this 32-bit PAR register is an alias of the in register_cp_regs_for_features()
6735 * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[] in register_cp_regs_for_features()
6771 * cp15 crn=0 to be writes-ignored, whereas for other cores they should in register_cp_regs_for_features()
6772 * be read-only (ie write causes UNDEF exception). in register_cp_regs_for_features()
6777 * Pre-v8 MIDR space. in register_cp_regs_for_features()
6788 .access = PL1_R, .resetvalue = cpu->midr, in register_cp_regs_for_features()
6813 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, in register_cp_regs_for_features()
6820 .access = PL1_R, .resetvalue = cpu->midr }, in register_cp_regs_for_features()
6826 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, in register_cp_regs_for_features()
6831 .access = PL1_R, .resetvalue = cpu->midr in register_cp_regs_for_features()
6834 /* These are common to v8 and pre-v8 */ in register_cp_regs_for_features()
6838 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, in register_cp_regs_for_features()
6843 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, in register_cp_regs_for_features()
6844 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ in register_cp_regs_for_features()
6864 .resetvalue = cpu->pmsav7_dregion << 8 in register_cp_regs_for_features()
6871 .resetvalue = cpu->pmsav8r_hdregion in register_cp_regs_for_features()
6930 for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) { in register_cp_regs_for_features()
6960 for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) { in register_cp_regs_for_features()
7016 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, in register_cp_regs_for_features()
7034 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. in register_cp_regs_for_features()
7036 * (1) older 32-bit only cores have a simple 32-bit CBAR in register_cp_regs_for_features()
7037 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a in register_cp_regs_for_features()
7038 * 32-bit register visible to AArch32 at a different encoding in register_cp_regs_for_features()
7040 * be able to squash a 64-bit address into the 32-bit view. in register_cp_regs_for_features()
7042 * in future if we support AArch32-only configs of some of the in register_cp_regs_for_features()
7048 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) in register_cp_regs_for_features()
7049 | extract64(cpu->reset_cbar, 32, 12); in register_cp_regs_for_features()
7058 .access = PL1_R, .resetvalue = cpu->reset_cbar }, in register_cp_regs_for_features()
7067 .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar, in register_cp_regs_for_features()
7105 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, in register_cp_regs_for_features()
7111 * arch/arm/mach-pxa/sleep.S expects two instructions following in register_cp_regs_for_features()
7194 .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize, in register_cp_regs_for_features()
7258 CPUARMState *env = &cpu->env; in add_cpreg_to_hashtable()
7261 bool is64 = r->type & ARM_CP_64BIT; in add_cpreg_to_hashtable()
7263 int cp = r->cp; in add_cpreg_to_hashtable()
7270 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) { in add_cpreg_to_hashtable()
7273 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2); in add_cpreg_to_hashtable()
7278 * cp == 0 as equivalent to the value for "standard guest-visible in add_cpreg_to_hashtable()
7280 * in their AArch64 view (the .cp value may be non-zero for the in add_cpreg_to_hashtable()
7283 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) { in add_cpreg_to_hashtable()
7286 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2); in add_cpreg_to_hashtable()
7293 if (!(r->type & ARM_CP_OVERRIDE)) { in add_cpreg_to_hashtable()
7294 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key); in add_cpreg_to_hashtable()
7296 assert(oldreg->type & ARM_CP_OVERRIDE); in add_cpreg_to_hashtable()
7311 int min_el = ctz32(r->access) / 2; in add_cpreg_to_hashtable()
7313 if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) { in add_cpreg_to_hashtable()
7316 make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP); in add_cpreg_to_hashtable()
7321 if ((r->access & max_el) == 0) { in add_cpreg_to_hashtable()
7330 r2->name = memcpy(r2 + 1, name, name_len); in add_cpreg_to_hashtable()
7336 r2->cp = cp; in add_cpreg_to_hashtable()
7337 r2->crm = crm; in add_cpreg_to_hashtable()
7338 r2->opc1 = opc1; in add_cpreg_to_hashtable()
7339 r2->opc2 = opc2; in add_cpreg_to_hashtable()
7340 r2->state = state; in add_cpreg_to_hashtable()
7341 r2->secure = secstate; in add_cpreg_to_hashtable()
7343 r2->opaque = opaque; in add_cpreg_to_hashtable()
7348 int old_special = r2->type & ARM_CP_SPECIAL_MASK; in add_cpreg_to_hashtable()
7355 r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST; in add_cpreg_to_hashtable()
7358 * special cases like VPIDR_EL2 which have a constant non-zero in add_cpreg_to_hashtable()
7361 if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) { in add_cpreg_to_hashtable()
7362 r2->resetvalue = 0; in add_cpreg_to_hashtable()
7369 r2->readfn = NULL; in add_cpreg_to_hashtable()
7370 r2->writefn = NULL; in add_cpreg_to_hashtable()
7371 r2->raw_readfn = NULL; in add_cpreg_to_hashtable()
7372 r2->raw_writefn = NULL; in add_cpreg_to_hashtable()
7373 r2->resetfn = NULL; in add_cpreg_to_hashtable()
7374 r2->fieldoffset = 0; in add_cpreg_to_hashtable()
7375 r2->bank_fieldoffsets[0] = 0; in add_cpreg_to_hashtable()
7376 r2->bank_fieldoffsets[1] = 0; in add_cpreg_to_hashtable()
7378 bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]; in add_cpreg_to_hashtable()
7386 r2->fieldoffset = r->bank_fieldoffsets[ns]; in add_cpreg_to_hashtable()
7392 * reset the 32-bit instance in certain cases: in add_cpreg_to_hashtable()
7394 * 1) If the register has both 32-bit and 64-bit instances in add_cpreg_to_hashtable()
7395 * then we can count on the 64-bit instance taking care in add_cpreg_to_hashtable()
7396 * of the non-secure bank. in add_cpreg_to_hashtable()
7397 * 2) If ARMv8 is enabled then we can count on a 64-bit in add_cpreg_to_hashtable()
7399 * that separate 32 and 64-bit definitions are provided. in add_cpreg_to_hashtable()
7401 if ((r->state == ARM_CP_STATE_BOTH && ns) || in add_cpreg_to_hashtable()
7403 r2->type |= ARM_CP_ALIAS; in add_cpreg_to_hashtable()
7405 } else if ((secstate != r->secure) && !ns) { in add_cpreg_to_hashtable()
7408 * migration of the non-secure instance. in add_cpreg_to_hashtable()
7410 r2->type |= ARM_CP_ALIAS; in add_cpreg_to_hashtable()
7414 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) { in add_cpreg_to_hashtable()
7415 r2->fieldoffset += sizeof(uint32_t); in add_cpreg_to_hashtable()
7425 * never migratable and not even raw-accessible. in add_cpreg_to_hashtable()
7427 if (r2->type & ARM_CP_SPECIAL_MASK) { in add_cpreg_to_hashtable()
7428 r2->type |= ARM_CP_NO_RAW; in add_cpreg_to_hashtable()
7430 if (((r->crm == CP_ANY) && crm != 0) || in add_cpreg_to_hashtable()
7431 ((r->opc1 == CP_ANY) && opc1 != 0) || in add_cpreg_to_hashtable()
7432 ((r->opc2 == CP_ANY) && opc2 != 0)) { in add_cpreg_to_hashtable()
7433 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; in add_cpreg_to_hashtable()
7441 if (!(r2->type & ARM_CP_NO_RAW)) { in add_cpreg_to_hashtable()
7445 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2); in add_cpreg_to_hashtable()
7462 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard in define_one_arm_cp_reg_with_opaque()
7471 * Only registers visible in AArch64 may set r->opc0; opc0 cannot in define_one_arm_cp_reg_with_opaque()
7477 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; in define_one_arm_cp_reg_with_opaque()
7478 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; in define_one_arm_cp_reg_with_opaque()
7479 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; in define_one_arm_cp_reg_with_opaque()
7480 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; in define_one_arm_cp_reg_with_opaque()
7481 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; in define_one_arm_cp_reg_with_opaque()
7482 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; in define_one_arm_cp_reg_with_opaque()
7486 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); in define_one_arm_cp_reg_with_opaque()
7488 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); in define_one_arm_cp_reg_with_opaque()
7490 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); in define_one_arm_cp_reg_with_opaque()
7493 * (M-profile or v7A-and-earlier only) for implementation defined in define_one_arm_cp_reg_with_opaque()
7499 switch (r->state) { in define_one_arm_cp_reg_with_opaque()
7502 if (r->cp == 0) { in define_one_arm_cp_reg_with_opaque()
7507 if (arm_feature(&cpu->env, ARM_FEATURE_V8) && in define_one_arm_cp_reg_with_opaque()
7508 !arm_feature(&cpu->env, ARM_FEATURE_M)) { in define_one_arm_cp_reg_with_opaque()
7509 assert(r->cp >= 14 && r->cp <= 15); in define_one_arm_cp_reg_with_opaque()
7511 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15)); in define_one_arm_cp_reg_with_opaque()
7515 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP); in define_one_arm_cp_reg_with_opaque()
7527 if (r->state != ARM_CP_STATE_AA32) { in define_one_arm_cp_reg_with_opaque()
7529 switch (r->opc1) { in define_one_arm_cp_reg_with_opaque()
7556 /* broken reginfo with out-of-range opc1 */ in define_one_arm_cp_reg_with_opaque()
7560 assert((r->access & ~mask) == 0); in define_one_arm_cp_reg_with_opaque()
7567 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) { in define_one_arm_cp_reg_with_opaque()
7568 if (r->access & PL3_R) { in define_one_arm_cp_reg_with_opaque()
7569 assert((r->fieldoffset || in define_one_arm_cp_reg_with_opaque()
7570 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || in define_one_arm_cp_reg_with_opaque()
7571 r->readfn); in define_one_arm_cp_reg_with_opaque()
7573 if (r->access & PL3_W) { in define_one_arm_cp_reg_with_opaque()
7574 assert((r->fieldoffset || in define_one_arm_cp_reg_with_opaque()
7575 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || in define_one_arm_cp_reg_with_opaque()
7576 r->writefn); in define_one_arm_cp_reg_with_opaque()
7585 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { in define_one_arm_cp_reg_with_opaque()
7588 if ((r->type & ARM_CP_ADD_TLBI_NXS) && in define_one_arm_cp_reg_with_opaque()
7594 * fine-grained trapping. Add the NXS insn here and in define_one_arm_cp_reg_with_opaque()
7601 g_autofree char *name = g_strdup_printf("%sNXS", r->name); in define_one_arm_cp_reg_with_opaque()
7616 * (same for secure and non-secure world) or banked. in define_one_arm_cp_reg_with_opaque()
7620 switch (r->secure) { in define_one_arm_cp_reg_with_opaque()
7624 r->secure, crm, opc1, opc2, in define_one_arm_cp_reg_with_opaque()
7625 r->name); in define_one_arm_cp_reg_with_opaque()
7628 name = g_strdup_printf("%s_S", r->name); in define_one_arm_cp_reg_with_opaque()
7635 crm, opc1, opc2, r->name); in define_one_arm_cp_reg_with_opaque()
7642 * AArch64 registers get mapped to non-secure instance in define_one_arm_cp_reg_with_opaque()
7647 crm, opc1, opc2, r->name); in define_one_arm_cp_reg_with_opaque()
7670 * user-space cannot alter any values and dynamic values pertaining to
7681 if (m->is_glob) { in modify_arm_cp_regs_with_len()
7682 pat = g_pattern_spec_new(m->name); in modify_arm_cp_regs_with_len()
7687 if (pat && g_pattern_match_string(pat, r->name)) { in modify_arm_cp_regs_with_len()
7688 r->type = ARM_CP_CONST; in modify_arm_cp_regs_with_len()
7689 r->access = PL0U_R; in modify_arm_cp_regs_with_len()
7690 r->resetvalue = 0; in modify_arm_cp_regs_with_len()
7692 } else if (strcmp(r->name, m->name) == 0) { in modify_arm_cp_regs_with_len()
7693 r->type = ARM_CP_CONST; in modify_arm_cp_regs_with_len()
7694 r->access = PL0U_R; in modify_arm_cp_regs_with_len()
7695 r->resetvalue &= m->exported_bits; in modify_arm_cp_regs_with_len()
7696 r->resetvalue |= m->fixed_bits; in modify_arm_cp_regs_with_len()
7714 /* Helper coprocessor write function for write-ignore registers */ in arm_cp_write_ignore()
7719 /* Helper coprocessor write function for read-as-zero registers */ in arm_cp_read_zero()
7725 /* Helper coprocessor reset function for do-nothing-on-reset registers */ in arm_cp_reset_ignore()
7738 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || in bad_mode_switch()
7754 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) in bad_mode_switch()
7761 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && in bad_mode_switch()
7778 ZF = (env->ZF == 0); in cpsr_read()
7779 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | in cpsr_read()
7780 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) in cpsr_read()
7781 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) in cpsr_read()
7782 | ((env->condexec_bits & 0xfc) << 8) in cpsr_read()
7783 | (env->GE << 16) | (env->daif & CPSR_AIF); in cpsr_read()
7794 env->ZF = (~val) & CPSR_Z; in cpsr_write()
7795 env->NF = val; in cpsr_write()
7796 env->CF = (val >> 29) & 1; in cpsr_write()
7797 env->VF = (val << 3) & 0x80000000; in cpsr_write()
7800 env->QF = ((val & CPSR_Q) != 0); in cpsr_write()
7803 env->thumb = ((val & CPSR_T) != 0); in cpsr_write()
7806 env->condexec_bits &= ~3; in cpsr_write()
7807 env->condexec_bits |= (val >> 25) & 3; in cpsr_write()
7810 env->condexec_bits &= 3; in cpsr_write()
7811 env->condexec_bits |= (val >> 8) & 0xfc; in cpsr_write()
7814 env->GE = (val >> 16) & 0xf; in cpsr_write()
7820 * whether non-secure software is allowed to change the CPSR_F and CPSR_A in cpsr_write()
7831 changed_daif = (env->daif ^ val) & mask; in cpsr_write()
7836 * abort exceptions from a non-secure state. in cpsr_write()
7838 if (!(env->cp15.scr_el3 & SCR_AW)) { in cpsr_write()
7841 "non-secure world with SCR.AW bit clear\n"); in cpsr_write()
7849 * exceptions from a non-secure state. in cpsr_write()
7851 if (!(env->cp15.scr_el3 & SCR_FW)) { in cpsr_write()
7854 "non-secure world with SCR.FW bit clear\n"); in cpsr_write()
7859 * Check whether non-maskable FIQ (NMFI) support is enabled. in cpsr_write()
7867 "(non-maskable FIQ [NMFI] support enabled)\n"); in cpsr_write()
7873 env->daif &= ~(CPSR_AIF & mask); in cpsr_write()
7874 env->daif |= val & CPSR_AIF & mask; in cpsr_write()
7877 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { in cpsr_write()
7878 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { in cpsr_write()
7905 aarch32_mode_name(env->uncached_cpsr), in cpsr_write()
7912 aarch32_mode_name(env->uncached_cpsr), in cpsr_write()
7913 aarch32_mode_name(val), env->regs[15]); in cpsr_write()
7918 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); in cpsr_write()
7935 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, in arm_phys_excp_target_el() argument
7953 old_mode = env->uncached_cpsr & CPSR_M; in switch_mode()
7959 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); in switch_mode()
7960 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); in switch_mode()
7962 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); in switch_mode()
7963 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); in switch_mode()
7967 env->banked_r13[i] = env->regs[13]; in switch_mode()
7968 env->banked_spsr[i] = env->spsr; in switch_mode()
7971 env->regs[13] = env->banked_r13[i]; in switch_mode()
7972 env->spsr = env->banked_spsr[i]; in switch_mode()
7974 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; in switch_mode()
7975 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; in switch_mode()
7981 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7983 * The below multi-dimensional table is used for looking up the target
7990 * | | | | | +--- Current EL
7991 * | | | | +------ Non-secure(0)/Secure(1)
7992 * | | | +--------- HCR mask override
7993 * | | +------------ SCR exec state control
7994 * | +--------------- SCR mask override
7995 * +------------------ 32-bit(0)/64-bit(1) EL3
7998 * 0-3 = EL0-EL3
7999 * -1 = Cannot occur
8013 * BIT IRQ IMO Non-secure Secure
8017 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8018 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
8019 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
8020 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
8021 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8022 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
8023 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
8024 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
8025 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
8026 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
8027 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
8028 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
8029 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
8030 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
8031 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
8032 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
8038 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, in arm_phys_excp_target_el() argument
8041 CPUARMState *env = cpu_env(cs); in arm_phys_excp_target_el()
8055 * is given by is64); or there is no EL2 or EL3, in which case in arm_phys_excp_target_el()
8065 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); in arm_phys_excp_target_el()
8069 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); in arm_phys_excp_target_el()
8073 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); in arm_phys_excp_target_el()
8084 /* Perform a table-lookup for the target EL given the current state */ in arm_phys_excp_target_el()
8092 void arm_log_exception(CPUState *cs) in arm_log_exception() argument
8094 int idx = cs->exception_index; in arm_log_exception()
8136 idx, exc, cs->cpu_index); in arm_log_exception()
8148 uint32_t mode = env->uncached_cpsr & CPSR_M; in aarch64_sync_32_to_64()
8152 env->xregs[i] = env->regs[i]; in aarch64_sync_32_to_64()
8156 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. in aarch64_sync_32_to_64()
8161 env->xregs[i] = env->usr_regs[i - 8]; in aarch64_sync_32_to_64()
8165 env->xregs[i] = env->regs[i]; in aarch64_sync_32_to_64()
8170 * Registers x13-x23 are the various mode SP and FP registers. Registers in aarch64_sync_32_to_64()
8175 env->xregs[13] = env->regs[13]; in aarch64_sync_32_to_64()
8176 env->xregs[14] = env->regs[14]; in aarch64_sync_32_to_64()
8178 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; in aarch64_sync_32_to_64()
8181 env->xregs[14] = env->regs[14]; in aarch64_sync_32_to_64()
8183 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; in aarch64_sync_32_to_64()
8188 env->xregs[15] = env->regs[13]; in aarch64_sync_32_to_64()
8190 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; in aarch64_sync_32_to_64()
8194 env->xregs[16] = env->regs[14]; in aarch64_sync_32_to_64()
8195 env->xregs[17] = env->regs[13]; in aarch64_sync_32_to_64()
8197 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; in aarch64_sync_32_to_64()
8198 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; in aarch64_sync_32_to_64()
8202 env->xregs[18] = env->regs[14]; in aarch64_sync_32_to_64()
8203 env->xregs[19] = env->regs[13]; in aarch64_sync_32_to_64()
8205 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; in aarch64_sync_32_to_64()
8206 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; in aarch64_sync_32_to_64()
8210 env->xregs[20] = env->regs[14]; in aarch64_sync_32_to_64()
8211 env->xregs[21] = env->regs[13]; in aarch64_sync_32_to_64()
8213 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; in aarch64_sync_32_to_64()
8214 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; in aarch64_sync_32_to_64()
8218 env->xregs[22] = env->regs[14]; in aarch64_sync_32_to_64()
8219 env->xregs[23] = env->regs[13]; in aarch64_sync_32_to_64()
8221 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; in aarch64_sync_32_to_64()
8222 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; in aarch64_sync_32_to_64()
8226 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ in aarch64_sync_32_to_64()
8227 * mode, then we can copy from r8-r14. Otherwise, we copy from the in aarch64_sync_32_to_64()
8228 * FIQ bank for r8-r14. in aarch64_sync_32_to_64()
8232 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ in aarch64_sync_32_to_64()
8236 env->xregs[i] = env->fiq_regs[i - 24]; in aarch64_sync_32_to_64()
8238 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; in aarch64_sync_32_to_64()
8239 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; in aarch64_sync_32_to_64()
8242 env->pc = env->regs[15]; in aarch64_sync_32_to_64()
8253 uint32_t mode = env->uncached_cpsr & CPSR_M; in aarch64_sync_64_to_32()
8257 env->regs[i] = env->xregs[i]; in aarch64_sync_64_to_32()
8261 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. in aarch64_sync_64_to_32()
8262 * Otherwise, we copy x8-x12 into the banked user regs. in aarch64_sync_64_to_32()
8266 env->usr_regs[i - 8] = env->xregs[i]; in aarch64_sync_64_to_32()
8270 env->regs[i] = env->xregs[i]; in aarch64_sync_64_to_32()
8281 env->regs[13] = env->xregs[13]; in aarch64_sync_64_to_32()
8282 env->regs[14] = env->xregs[14]; in aarch64_sync_64_to_32()
8284 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; in aarch64_sync_64_to_32()
8291 env->regs[14] = env->xregs[14]; in aarch64_sync_64_to_32()
8293 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; in aarch64_sync_64_to_32()
8298 env->regs[13] = env->xregs[15]; in aarch64_sync_64_to_32()
8300 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; in aarch64_sync_64_to_32()
8304 env->regs[14] = env->xregs[16]; in aarch64_sync_64_to_32()
8305 env->regs[13] = env->xregs[17]; in aarch64_sync_64_to_32()
8307 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; in aarch64_sync_64_to_32()
8308 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; in aarch64_sync_64_to_32()
8312 env->regs[14] = env->xregs[18]; in aarch64_sync_64_to_32()
8313 env->regs[13] = env->xregs[19]; in aarch64_sync_64_to_32()
8315 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; in aarch64_sync_64_to_32()
8316 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; in aarch64_sync_64_to_32()
8320 env->regs[14] = env->xregs[20]; in aarch64_sync_64_to_32()
8321 env->regs[13] = env->xregs[21]; in aarch64_sync_64_to_32()
8323 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; in aarch64_sync_64_to_32()
8324 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; in aarch64_sync_64_to_32()
8328 env->regs[14] = env->xregs[22]; in aarch64_sync_64_to_32()
8329 env->regs[13] = env->xregs[23]; in aarch64_sync_64_to_32()
8331 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; in aarch64_sync_64_to_32()
8332 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; in aarch64_sync_64_to_32()
8336 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ in aarch64_sync_64_to_32()
8337 * mode, then we can copy to r8-r14. Otherwise, we copy to the in aarch64_sync_64_to_32()
8338 * FIQ bank for r8-r14. in aarch64_sync_64_to_32()
8342 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ in aarch64_sync_64_to_32()
8346 env->fiq_regs[i - 24] = env->xregs[i]; in aarch64_sync_64_to_32()
8348 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; in aarch64_sync_64_to_32()
8349 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; in aarch64_sync_64_to_32()
8352 env->regs[15] = env->pc; in aarch64_sync_64_to_32()
8366 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. in take_aarch32_exception()
8368 env->pstate &= ~PSTATE_SS; in take_aarch32_exception()
8369 env->spsr = cpsr_read(env); in take_aarch32_exception()
8371 env->condexec_bits = 0; in take_aarch32_exception()
8373 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; in take_aarch32_exception()
8379 env->uncached_cpsr &= ~CPSR_E; in take_aarch32_exception()
8380 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { in take_aarch32_exception()
8381 env->uncached_cpsr |= CPSR_E; in take_aarch32_exception()
8384 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); in take_aarch32_exception()
8385 env->daif |= mask; in take_aarch32_exception()
8388 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) { in take_aarch32_exception()
8389 env->uncached_cpsr |= CPSR_SSBS; in take_aarch32_exception()
8391 env->uncached_cpsr &= ~CPSR_SSBS; in take_aarch32_exception()
8396 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; in take_aarch32_exception()
8397 env->elr_el[2] = env->regs[15]; in take_aarch32_exception()
8404 /* ... the target is EL3, from non-secure state. */ in take_aarch32_exception()
8405 env->uncached_cpsr &= ~CPSR_PAN; in take_aarch32_exception()
8412 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { in take_aarch32_exception()
8413 env->uncached_cpsr |= CPSR_PAN; in take_aarch32_exception()
8419 * this is a lie, as there was no c1_sys on V4T/V5, but who cares in take_aarch32_exception()
8423 env->thumb = in take_aarch32_exception()
8426 env->regs[14] = env->regs[15] + offset; in take_aarch32_exception()
8428 env->regs[15] = newpc; in take_aarch32_exception()
8435 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) in arm_cpu_do_interrupt_aarch32_hyp() argument
8449 ARMCPU *cpu = ARM_CPU(cs); in arm_cpu_do_interrupt_aarch32_hyp()
8450 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch32_hyp()
8452 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch32_hyp()
8462 env->cp15.ifar_s = env->exception.vaddress; in arm_cpu_do_interrupt_aarch32_hyp()
8464 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32_hyp()
8468 env->cp15.dfar_s = env->exception.vaddress; in arm_cpu_do_interrupt_aarch32_hyp()
8470 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32_hyp()
8486 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch32_hyp()
8489 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { in arm_cpu_do_interrupt_aarch32_hyp()
8492 * QEMU syndrome values are v8-style. v7 has the IL bit in arm_cpu_do_interrupt_aarch32_hyp()
8496 if (cs->exception_index == EXCP_PREFETCH_ABORT || in arm_cpu_do_interrupt_aarch32_hyp()
8497 (cs->exception_index == EXCP_DATA_ABORT && in arm_cpu_do_interrupt_aarch32_hyp()
8498 !(env->exception.syndrome & ARM_EL_ISV)) || in arm_cpu_do_interrupt_aarch32_hyp()
8499 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { in arm_cpu_do_interrupt_aarch32_hyp()
8500 env->exception.syndrome &= ~ARM_EL_IL; in arm_cpu_do_interrupt_aarch32_hyp()
8503 env->cp15.esr_el[2] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch32_hyp()
8511 if (!(env->cp15.scr_el3 & SCR_EA)) { in arm_cpu_do_interrupt_aarch32_hyp()
8514 if (!(env->cp15.scr_el3 & SCR_IRQ)) { in arm_cpu_do_interrupt_aarch32_hyp()
8517 if (!(env->cp15.scr_el3 & SCR_FIQ)) { in arm_cpu_do_interrupt_aarch32_hyp()
8521 addr += env->cp15.hvbar; in arm_cpu_do_interrupt_aarch32_hyp()
8526 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) in arm_cpu_do_interrupt_aarch32() argument
8528 ARMCPU *cpu = ARM_CPU(cs); in arm_cpu_do_interrupt_aarch32()
8529 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch32()
8537 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch32()
8558 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); in arm_cpu_do_interrupt_aarch32()
8561 if (env->exception.target_el == 2) { in arm_cpu_do_interrupt_aarch32()
8563 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch32()
8568 env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2, in arm_cpu_do_interrupt_aarch32()
8572 env->exception.syndrome = syn_set_ec(env->exception.syndrome, in arm_cpu_do_interrupt_aarch32()
8576 env->exception.syndrome = syn_set_ec(env->exception.syndrome, in arm_cpu_do_interrupt_aarch32()
8580 arm_cpu_do_interrupt_aarch32_hyp(cs); in arm_cpu_do_interrupt_aarch32()
8584 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch32()
8589 if (env->thumb) { in arm_cpu_do_interrupt_aarch32()
8605 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
8606 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
8608 env->exception.fsr, (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
8615 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
8616 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
8618 env->exception.fsr, in arm_cpu_do_interrupt_aarch32()
8619 (uint32_t)env->exception.vaddress); in arm_cpu_do_interrupt_aarch32()
8631 if (env->cp15.scr_el3 & SCR_IRQ) { in arm_cpu_do_interrupt_aarch32()
8642 if (env->cp15.scr_el3 & SCR_FIQ) { in arm_cpu_do_interrupt_aarch32()
8672 env->exception.fsr = arm_fi_to_lfsc(&fi); in arm_cpu_do_interrupt_aarch32()
8674 env->exception.fsr = arm_fi_to_sfsc(&fi); in arm_cpu_do_interrupt_aarch32()
8676 env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000; in arm_cpu_do_interrupt_aarch32()
8677 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
8679 env->exception.fsr); in arm_cpu_do_interrupt_aarch32()
8697 if (env->thumb) { in arm_cpu_do_interrupt_aarch32()
8704 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch32()
8709 addr += env->cp15.mvbar; in arm_cpu_do_interrupt_aarch32()
8717 * This register is only followed in non-monitor mode, and is banked. in arm_cpu_do_interrupt_aarch32()
8723 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { in arm_cpu_do_interrupt_aarch32()
8724 env->cp15.scr_el3 &= ~SCR_NS; in arm_cpu_do_interrupt_aarch32()
8737 int mode = env->uncached_cpsr & CPSR_M; in aarch64_regnum()
8800 ret |= env->pstate & PSTATE_SS; in cpsr_read_for_spsr_elx()
8831 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) in arm_cpu_do_interrupt_aarch64() argument
8833 ARMCPU *cpu = ARM_CPU(cs); in arm_cpu_do_interrupt_aarch64()
8834 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt_aarch64()
8835 unsigned int new_el = env->exception.target_el; in arm_cpu_do_interrupt_aarch64()
8836 vaddr addr = env->cp15.vbar_el[new_el]; in arm_cpu_do_interrupt_aarch64()
8885 switch (cs->exception_index) { in arm_cpu_do_interrupt_aarch64()
8888 env->cp15.mfar_el3); in arm_cpu_do_interrupt_aarch64()
8896 if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) && in arm_cpu_do_interrupt_aarch64()
8897 syndrome_is_sync_extabt(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch64()
8900 env->cp15.far_el[new_el] = env->exception.vaddress; in arm_cpu_do_interrupt_aarch64()
8902 env->cp15.far_el[new_el]); in arm_cpu_do_interrupt_aarch64()
8910 switch (syn_get_ec(env->exception.syndrome)) { in arm_cpu_do_interrupt_aarch64()
8918 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); in arm_cpu_do_interrupt_aarch64()
8927 * number. Notice that we read a 4-bit AArch32 register number and in arm_cpu_do_interrupt_aarch64()
8928 * write back a 5-bit AArch64 one. in arm_cpu_do_interrupt_aarch64()
8930 rt = extract32(env->exception.syndrome, 5, 4); in arm_cpu_do_interrupt_aarch64()
8932 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
8938 rt = extract32(env->exception.syndrome, 5, 4); in arm_cpu_do_interrupt_aarch64()
8940 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
8942 rt = extract32(env->exception.syndrome, 10, 4); in arm_cpu_do_interrupt_aarch64()
8944 env->exception.syndrome = deposit32(env->exception.syndrome, in arm_cpu_do_interrupt_aarch64()
8948 env->cp15.esr_el[new_el] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch64()
8964 env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff); in arm_cpu_do_interrupt_aarch64()
8965 env->cp15.esr_el[new_el] = env->exception.syndrome; in arm_cpu_do_interrupt_aarch64()
8968 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); in arm_cpu_do_interrupt_aarch64()
8974 env->elr_el[new_el] = env->pc; in arm_cpu_do_interrupt_aarch64()
8991 env->elr_el[new_el] = env->regs[15]; in arm_cpu_do_interrupt_aarch64()
8995 env->condexec_bits = 0; in arm_cpu_do_interrupt_aarch64()
8997 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; in arm_cpu_do_interrupt_aarch64()
9001 env->elr_el[new_el]); in arm_cpu_do_interrupt_aarch64()
9017 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { in arm_cpu_do_interrupt_aarch64()
9028 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) { in arm_cpu_do_interrupt_aarch64()
9036 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) { in arm_cpu_do_interrupt_aarch64()
9044 env->aarch64 = true; in arm_cpu_do_interrupt_aarch64()
9051 env->pc = addr; in arm_cpu_do_interrupt_aarch64()
9054 new_el, env->pc, pstate_read(env)); in arm_cpu_do_interrupt_aarch64()
9065 static void tcg_handle_semihosting(CPUState *cs) in tcg_handle_semihosting() argument
9067 ARMCPU *cpu = ARM_CPU(cs); in tcg_handle_semihosting()
9068 CPUARMState *env = &cpu->env; in tcg_handle_semihosting()
9073 env->xregs[0]); in tcg_handle_semihosting()
9074 do_common_semihosting(cs); in tcg_handle_semihosting()
9075 env->pc += 4; in tcg_handle_semihosting()
9079 env->regs[0]); in tcg_handle_semihosting()
9080 do_common_semihosting(cs); in tcg_handle_semihosting()
9081 env->regs[15] += env->thumb ? 2 : 4; in tcg_handle_semihosting()
9089 * to the AArch64-entry or AArch32-entry function depending on the
9093 * and KVM to re-inject guest debug exceptions, and to
9094 * inject a Synchronous-External-Abort.
9096 void arm_cpu_do_interrupt(CPUState *cs) in arm_cpu_do_interrupt() argument
9098 ARMCPU *cpu = ARM_CPU(cs); in arm_cpu_do_interrupt()
9099 CPUARMState *env = &cpu->env; in arm_cpu_do_interrupt()
9100 unsigned int new_el = env->exception.target_el; in arm_cpu_do_interrupt()
9104 arm_log_exception(cs); in arm_cpu_do_interrupt()
9108 && !excp_is_internal(cs->exception_index)) { in arm_cpu_do_interrupt()
9110 syn_get_ec(env->exception.syndrome), in arm_cpu_do_interrupt()
9111 env->exception.syndrome); in arm_cpu_do_interrupt()
9114 if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) { in arm_cpu_do_interrupt()
9126 if (cs->exception_index == EXCP_SEMIHOST) { in arm_cpu_do_interrupt()
9127 tcg_handle_semihosting(cs); in arm_cpu_do_interrupt()
9135 * cs->interrupt_request. in arm_cpu_do_interrupt()
9141 assert(!excp_is_internal(cs->exception_index)); in arm_cpu_do_interrupt()
9143 arm_cpu_do_interrupt_aarch64(cs); in arm_cpu_do_interrupt()
9145 arm_cpu_do_interrupt_aarch32(cs); in arm_cpu_do_interrupt()
9151 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; in arm_cpu_do_interrupt()
9173 return env->cp15.sctlr_el[el]; in arm_sctlr()
9358 max_tsz = 48 - (gran == Gran64K); in aa64_va_parameters()
9437 * Return the exception level to which FP-disabled exceptions should
9455 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { in fp_exception_el()
9459 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { in fp_exception_el()
9460 if (!extract32(env->v7m.nsacr, 10, 1)) { in fp_exception_el()
9475 * 3 : trap no accesses in fp_exception_el()
9479 int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN); in fp_exception_el()
9502 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode in fp_exception_el()
9503 * to control non-secure access to the FPU. It doesn't have any in fp_exception_el()
9508 if (!extract32(env->cp15.nsacr, 10, 1)) { in fp_exception_el()
9520 switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) { in fp_exception_el()
9531 if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) { in fp_exception_el()
9538 if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) { in fp_exception_el()
9586 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); in arm_mmu_idx_el()
9589 /* See ARM pseudo-function ELIsInHost. */ in arm_mmu_idx_el()
9645 * The intent of this is that no predicate bit beyond VQ is ever set.
9658 assert(vq <= env_archcpu(env)->sve_max_vq); in aarch64_sve_narrow_vq()
9662 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); in aarch64_sve_narrow_vq()
9668 pmask = ~(-1ULL << (16 * (vq & 3))); in aarch64_sve_narrow_vq()
9672 env->vfp.pregs[i].p[j] &= pmask; in aarch64_sve_narrow_vq()
9703 /* Nothing to do if no SVE. */ in aarch64_sve_change_el()
9721 sm = FIELD_EX64(env->svcr, SVCR, SM); in aarch64_sve_change_el()
9733 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). in aarch64_sve_change_el()
9734 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition in aarch64_sve_change_el()
9735 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that in aarch64_sve_change_el()
9737 * vq0->vq0 transition between EL0->EL1. in aarch64_sve_change_el()
9757 return arm_secure_to_space(env->v7m.secure); in arm_security_space()
9762 * defined, in which case QEMU defaults to non-secure. in arm_security_space()
9770 if (extract32(env->pstate, 2, 2) == 3) { in arm_security_space()
9778 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { in arm_security_space()
9792 * defined, in which case QEMU defaults to non-secure. in arm_security_space_below_el3()
9803 if (!(env->cp15.scr_el3 & SCR_NS)) { in arm_security_space_below_el3()
9805 } else if (env->cp15.scr_el3 & SCR_NSE) { in arm_security_space_below_el3()