Lines Matching +full:low +full:- +full:profile

2  * QEMU ARM CPU -- internal functions and types
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
31 #include "accel/tcg/tb-cpu-state.h"
33 #include "tcg/tcg-gvec-desc.h"
36 #include "cpu-features.h"
50 return EX_TBFLAG_ANY(env->hflags, MMUIDX); in arm_env_mmu_index()
55 /* Return true if this exception number represents a QEMU-internal in excp_is_internal()
73 * We will use the back-compat value:
74 * - for QEMU CPU types added before we standardized on 1GHz
75 * - for versioned machine types with a version of 9.0 or earlier
95 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
183 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
267 /* We use a few fake FSR values for internal purposes in M profile.
268 * M profile cores don't have A/R format FSRs, but currently our
269 * get_phys_addr() code assumes A/R profile and reports failures via
271 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
274 * only for M profile and have no A/R equivalent, though, so we have
301 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
347 * This should be used as the index into env->banked_r14[], and
348 * bank_number() used for the index into env->banked_r13[] and
349 * env->banked_spsr[].
405 * - we are NS and EL2 is implemented but doesn't support AArch32 in arm_scr_rw_eff()
406 * - we are S and EL2 is enabled (in which case it must be AArch64) in arm_scr_rw_eff()
410 if (env->cp15.scr_el3 & SCR_RW) { in arm_scr_rw_eff()
413 if (env->cp15.scr_el3 & SCR_NS) { in arm_scr_rw_eff()
417 return env->cp15.scr_el3 & SCR_EEL2; in arm_scr_rw_eff()
449 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); in arm_el_is_aa64()
463 !(env->v7m.control[env->v7m.secure] & 1); in arm_current_el()
467 return extract32(env->pstate, 2, 2); in arm_current_el()
470 switch (env->uncached_cpsr & 0x1f) { in arm_current_el()
479 /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ in arm_current_el()
493 * architecture (as word-invariant big-endianness), where loads in arm_cpu_data_is_big_endian_a32()
498 * In user mode, however, we model BE32 as byte-invariant in arm_cpu_data_is_big_endian_a32()
499 * big-endianness (because user-only code cannot tell the in arm_cpu_data_is_big_endian_a32()
508 return env->uncached_cpsr & CPSR_E; in arm_cpu_data_is_big_endian_a32()
516 /* Return true if the processor is in big-endian mode. */
537 if (env->pstate & PSTATE_SP) { in aarch64_save_sp()
538 env->sp_el[el] = env->xregs[31]; in aarch64_save_sp()
540 env->sp_el[0] = env->xregs[31]; in aarch64_save_sp()
546 if (env->pstate & PSTATE_SP) { in aarch64_restore_sp()
547 env->xregs[31] = env->sp_el[el]; in aarch64_restore_sp()
549 env->xregs[31] = env->sp_el[0]; in aarch64_restore_sp()
559 if (!((imm ^ env->pstate) & PSTATE_SP)) { in update_spsel()
563 env->pstate = deposit32(env->pstate, 0, 1, imm); in update_spsel()
576 * Returns the implementation defined bit-width of physical addresses.
606 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; in extended_addresses_enabled()
620 * complete delete-and-reinstate of the QEMU watchpoint list and so is
629 * complete delete-and-reinstate of the QEMU breakpoint list and so is
667 env->exclusive_addr = -1; in arm_clear_exclusive()
715 * @domain: Domain of the fault address (for non-LPAE CPUs only)
720 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
721 * @s1ns: True if we faulted on a non-secure IPA while in secure state
740 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
742 * we set up a whole FSR-format code including domain field and
749 switch (fi->type) { in arm_fi_to_sfsc()
753 fsc = fi->level == 1 ? 0x3 : 0x6; in arm_fi_to_sfsc()
759 fsc = fi->level == 1 ? 0xd : 0xf; in arm_fi_to_sfsc()
762 fsc = fi->level == 1 ? 0x9 : 0xb; in arm_fi_to_sfsc()
765 fsc = fi->level == 1 ? 0x5 : 0x7; in arm_fi_to_sfsc()
768 fsc = 0x8 | (fi->ea << 12); in arm_fi_to_sfsc()
771 fsc = fi->level == 1 ? 0xc : 0xe; in arm_fi_to_sfsc()
772 fsc |= (fi->ea << 12); in arm_fi_to_sfsc()
778 fsc = fi->level == 1 ? 0x40c : 0x40e; in arm_fi_to_sfsc()
784 fsc = 0x406 | (fi->ea << 12); in arm_fi_to_sfsc()
812 * short-format status code. in arm_fi_to_sfsc()
817 fsc |= (fi->domain << 4); in arm_fi_to_sfsc()
822 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
830 switch (fi->type) { in arm_fi_to_lfsc()
834 assert(fi->level >= -1 && fi->level <= 3); in arm_fi_to_lfsc()
835 if (fi->level < 0) { in arm_fi_to_lfsc()
838 fsc = fi->level; in arm_fi_to_lfsc()
842 assert(fi->level >= 0 && fi->level <= 3); in arm_fi_to_lfsc()
843 fsc = 0b001000 | fi->level; in arm_fi_to_lfsc()
846 assert(fi->level >= 0 && fi->level <= 3); in arm_fi_to_lfsc()
847 fsc = 0b001100 | fi->level; in arm_fi_to_lfsc()
850 assert(fi->level >= -1 && fi->level <= 3); in arm_fi_to_lfsc()
851 if (fi->level < 0) { in arm_fi_to_lfsc()
854 fsc = 0b000100 | fi->level; in arm_fi_to_lfsc()
858 fsc = 0x10 | (fi->ea << 12); in arm_fi_to_lfsc()
861 assert(fi->level >= -1 && fi->level <= 3); in arm_fi_to_lfsc()
862 if (fi->level < 0) { in arm_fi_to_lfsc()
865 fsc = 0b010100 | fi->level; in arm_fi_to_lfsc()
867 fsc |= fi->ea << 12; in arm_fi_to_lfsc()
873 assert(fi->level >= -1 && fi->level <= 3); in arm_fi_to_lfsc()
874 if (fi->level < 0) { in arm_fi_to_lfsc()
877 fsc = 0b011100 | fi->level; in arm_fi_to_lfsc()
884 fsc = 0x11 | (fi->ea << 12); in arm_fi_to_lfsc()
905 assert(fi->level >= -1 && fi->level <= 3); in arm_fi_to_lfsc()
906 if (fi->level < 0) { in arm_fi_to_lfsc()
909 fsc = 0b100100 | fi->level; in arm_fi_to_lfsc()
917 * long-format status code. in arm_fi_to_lfsc()
964 /* AArch64 is always a-profile. */ in core_to_aa64_mmu_idx()
1000 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { in arm_call_pre_el_change_hook()
1001 hook->hook(cpu, hook->opaque); in arm_call_pre_el_change_hook()
1007 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { in arm_call_el_change_hook()
1008 hook->hook(cpu, hook->opaque); in arm_call_el_change_hook()
1113 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; in regime_sctlr()
1118 * and the Non-Secure stage 2 translation regimes (and hence which are
1130 return env->cp15.vtcr_el2; in regime_tcr()
1141 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; in regime_tcr()
1142 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; in regime_tcr()
1145 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; in regime_tcr()
1168 * Note that the ID register BRPS field is "number of bps - 1",
1173 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { in arm_num_brps()
1174 return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, BRPS) + 1; in arm_num_brps()
1176 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; in arm_num_brps()
1182 * Note that the ID register WRPS field is "number of wps - 1",
1187 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { in arm_num_wrps()
1188 return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, WRPS) + 1; in arm_num_wrps()
1190 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; in arm_num_wrps()
1196 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1201 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { in arm_num_ctx_cmps()
1202 return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, CTX_CMPS) + 1; in arm_num_ctx_cmps()
1204 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; in arm_num_ctx_cmps()
1218 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. in v7m_using_psp()
1221 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; in v7m_using_psp()
1232 return env->v7m.psplim[env->v7m.secure]; in v7m_sp_limit()
1234 return env->v7m.msplim[env->v7m.secure]; in v7m_sp_limit()
1246 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { in v7m_cpacr_pass()
1265 * the low bits of the specified PSR.
1278 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1280 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1287 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1289 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1296 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request
1298 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following
1305 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request
1307 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following
1316 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
1511 && !(env->cp15.scr_el3 & SCR_ATA)) { in allocation_tag_access_enabled()
1545 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1548 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1632 FIELD(MTEDESC, SIZEM1, 12, 32 - 12) /* size - 1 */
1722 * for the tag to be present in the FAR_ELx register. But for user-only
1746 /* Values for M-profile PSR.ECI for MVE insns */
1795 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; in pmu_num_counters()
1801 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); in pmu_counter_mask()
1825 return arm_feature(&cpu->env, ARM_FEATURE_AARCH64); in arm_gdbstub_is_aarch64()
1857 int bot_pac_bit = 64 - param.tsz; in pauth_ptr_mask()
1858 int top_pac_bit = 64 - 8 * param.tbi; in pauth_ptr_mask()
1860 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); in pauth_ptr_mask()
1876 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; in arm_mdcr_el2_eff()
1881 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1882 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1885 * Return true if it is possible to take a fine-grained-trap to EL2.
1901 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); in arm_fgt_active()
1906 * allows for different breakpoints per-core, the current GDB
1940 #define cur_hw_wps (hw_watchpoints->len)
1941 #define cur_hw_bps (hw_breakpoints->len)