Lines Matching +full:supervisor +full:- +full:mode +full:- +full:visible

5  *  Copyright (c) 2005-2007 CodeSourcery
24 #include "translate-a32.h"
29 #include "exec/helper-proto.h"
32 #include "exec/helper-info.c.inc"
87 /* no-op */ in asimd_imm_const()
155 if (!s->condjmp) { in arm_gen_condlabel()
156 s->condlabel = gen_disas_label(s); in arm_gen_condlabel()
157 s->condjmp = 1; in arm_gen_condlabel()
229 switch (s->mmu_idx) { in get_a32_user_mem_index()
259 return diff + (s->thumb ? 4 : 8); in jmp_diff()
264 assert(s->pc_save != -1); in gen_pc_plus_diff()
265 if (tb_cflags(s->base.tb) & CF_PCREL) { in gen_pc_plus_diff()
266 tcg_gen_addi_i32(var, cpu_R[15], (s->pc_curr - s->pc_save) + diff); in gen_pc_plus_diff()
268 tcg_gen_movi_i32(var, s->pc_curr + diff); in gen_pc_plus_diff()
296 gen_pc_plus_diff(s, tmp, jmp_diff(s, ofs - (s->pc_curr & 3))); in add_reg_for_lit()
308 /* In Thumb mode, we must ignore bit 0. in store_reg()
309 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0] in store_reg()
311 * We choose to ignore [1:0] in ARM mode for all architecture versions. in store_reg()
313 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3); in store_reg()
314 s->base.is_jmp = DISAS_JUMP; in store_reg()
315 s->pc_save = -1; in store_reg()
317 /* For M-profile SP bits [1:0] are always zero */ in store_reg()
324 * Variant of store_reg which applies v8M stack-limit checks before updating
333 if (s->v8m_stackcheck) { in store_sp_checked()
365 TCGv_i32 tcg_el = tcg_constant_i32(s->current_el); in gen_rebuild_hflags()
382 /* We just completed step of an insn. Move from Active-not-pending in gen_singlestep_exception()
383 * to Active-pending, and then also take the swstep exception. in gen_singlestep_exception()
392 gen_swstep_exception(s, 1, s->is_ldex); in gen_singlestep_exception()
393 s->base.is_jmp = DISAS_NORETURN; in gen_singlestep_exception()
402 if (s->eci) { in clear_eci_state()
404 s->eci = 0; in clear_eci_state()
439 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
471 /* dest = T0 - T1 + CF - 1. */
517 /* dest = T0 - T1. Compute C, N, V and Z flags */
576 shifter_out_im(var, 32 - shift); in gen_arm_shift_im()
588 shifter_out_im(var, shift - 1); in gen_arm_shift_im()
596 shifter_out_im(var, shift - 1); in gen_arm_shift_im()
604 shifter_out_im(var, shift - 1); in gen_arm_shift_im()
679 case 9: /* ls: !C || Z -> !(C && !Z) */ in arm_test_cc()
682 /* CF is 1 for C, so -CF is an all-bits-set mask for C; in arm_test_cc()
683 ZF is non-zero for !Z; so AND the two subexpressions. */ in arm_test_cc()
688 case 10: /* ge: N == V -> N ^ V == 0 */ in arm_test_cc()
689 case 11: /* lt: N != V -> N ^ V != 0 */ in arm_test_cc()
725 cmp->cond = cond; in arm_test_cc()
726 cmp->value = value; in arm_test_cc()
731 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label); in arm_jump_cc()
743 if (s->condexec_mask) { in gen_set_condexec()
744 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); in gen_set_condexec()
753 s->pc_save = s->pc_curr + diff; in gen_update_pc()
759 s->base.is_jmp = DISAS_JUMP; in gen_bx()
763 s->pc_save = -1; in gen_bx()
768 * For M-profile CPUs, include logic to detect exception-return
770 * and BX reg, and no others, and happens only for code in Handler mode.
772 * which signals a function return from non-secure state; this can happen
773 * in both Handler and Thread mode.
776 * in Thread mode. For system emulation do_v7m_exception_exit() checks
778 * the same behaviour as for a branch to a non-magic address).
780 * In linux-user mode it is unclear what the right behaviour for an
791 * s->base.is_jmp that we need to do the rest of the work later. in gen_bx_excret()
796 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) { in gen_bx_excret()
797 s->base.is_jmp = DISAS_BX_EXCRET; in gen_bx_excret()
819 if (s->ss_active) { in gen_bx_excret_final_code()
826 * At this point in runtime env->regs[15] and env->thumb will hold in gen_bx_excret_final_code()
827 * the exception-return magic number, which do_v7m_exception_exit() in gen_bx_excret_final_code()
829 * the cpu-exec main loop guarantees that we will always go straight in gen_bx_excret_final_code()
830 * from raising the exception to the exception-handling code. in gen_bx_excret_final_code()
846 * - we don't need to do gen_update_pc() because the bxns helper will in gen_bxns()
848 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE in gen_bxns()
851 * is correct in the non-UNPREDICTABLE cases, and we can choose in gen_bxns()
855 s->base.is_jmp = DISAS_EXIT; in gen_bxns()
868 s->base.is_jmp = DISAS_EXIT; in gen_blxns()
925 /* Not needed for user-mode BE32, where we use MO_BE instead. */ in gen_aa32_addr()
926 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) { in gen_aa32_addr()
927 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE))); in gen_aa32_addr()
957 /* Not needed for user-mode BE32, where we use MO_BE instead. */ in gen_aa32_ld_internal_i64()
958 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { in gen_aa32_ld_internal_i64()
968 /* Not needed for user-mode BE32, where we use MO_BE instead. */ in gen_aa32_st_internal_i64()
969 if (!IS_USER_ONLY && s->sctlr_b && (opc & MO_SIZE) == MO_64) { in gen_aa32_st_internal_i64()
1029 s->svc_imm = imm16; in gen_hvc()
1031 s->base.is_jmp = DISAS_HVC; in gen_hvc()
1042 s->base.is_jmp = DISAS_SMC; in gen_smc()
1050 s->base.is_jmp = DISAS_NORETURN; in gen_exception_internal_insn()
1073 if (s->aarch64) { in gen_exception_insn_el_v()
1080 s->base.is_jmp = DISAS_NORETURN; in gen_exception_insn_el_v()
1093 if (s->aarch64) { in gen_exception_insn()
1100 s->base.is_jmp = DISAS_NORETURN; in gen_exception_insn()
1108 s->base.is_jmp = DISAS_NORETURN; in gen_exception_bkpt_insn()
1121 s->base.is_jmp = DISAS_EXIT; in gen_lookup_tb()
1134 * In system mode, we don't allow userspace access to in gen_hlt()
1136 * (and for consistency with our 32-bit semihosting). in gen_hlt()
1138 if (semihosting_enabled(s->current_el == 0) && in gen_hlt()
1139 (imm == (s->thumb ? 0x3c : 0xf000))) { in gen_hlt()
1165 * Calculate the offset assuming fully little-endian, in neon_element_offset()
1166 * then XOR to account for the order of the 8-byte units. in neon_element_offset()
1169 ofs ^= 8 - element_size; in neon_element_offset()
1443 tcg_gen_addi_i32(tmp, tmp, -offset); in gen_iwmmxt_address()
1454 tcg_gen_addi_i32(tmp, tmp, -offset); in gen_iwmmxt_address()
2511 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1); in disas_dsp_insn()
2533 if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) { in gen_goto_tb()
2535 * For pcrel, the pc must always be up-to-date on entry to in gen_goto_tb()
2542 if (tb_cflags(s->base.tb) & CF_PCREL) { in gen_goto_tb()
2549 tcg_gen_exit_tb(s->base.tb, n); in gen_goto_tb()
2554 s->base.is_jmp = DISAS_NORETURN; in gen_goto_tb()
2560 if (unlikely(s->ss_active)) { in gen_jmp_tb()
2563 s->base.is_jmp = DISAS_JUMP; in gen_jmp_tb()
2566 switch (s->base.is_jmp) { in gen_jmp_tb()
2590 s->base.is_jmp = DISAS_NORETURN; in gen_jmp_tb()
2638 mask &= aarch32_cpsr_valid_mask(s->features, s->isar); in msr_mask()
2657 /* ??? This is also undefined in system mode. */ in gen_set_psr()
2686 * the target mode and register number, and identify the various in msr_banked_access_decode()
2689 * + executed in user mode in msr_banked_access_decode()
2697 * Accesses to Monitor mode registers from Secure EL1 (which implies in msr_banked_access_decode()
2794 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) { in msr_banked_access_decode()
2797 if (s->current_el == 1) { in msr_banked_access_decode()
2821 * r13_hyp can only be accessed from Monitor mode, and so we in msr_banked_access_decode()
2823 * elr_hyp can be accessed also from Hyp mode, so forbid in msr_banked_access_decode()
2827 * mode. However there is some real-world code that will do in msr_banked_access_decode()
2829 * access. (Notably a standard Cortex-R52 startup code fragment in msr_banked_access_decode()
2830 * does this.) So we permit SPSR_hyp from Hyp mode also, to allow in msr_banked_access_decode()
2833 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 in msr_banked_access_decode()
2834 || (s->current_el < 3 && *regno != 16 && *regno != 17)) { in msr_banked_access_decode()
2866 s->base.is_jmp = DISAS_UPDATE_EXIT; in gen_msr_banked()
2886 s->base.is_jmp = DISAS_UPDATE_EXIT; in gen_mrs_banked()
2906 translator_io_start(&s->base); in gen_rfe()
2908 /* Must exit loop to check un-masked IRQs */ in gen_rfe()
2909 s->base.is_jmp = DISAS_EXIT; in gen_rfe()
2912 /* Generate an old-style exception return. Marks pc as dead. */
2921 0b0000000111100111, /* crn == 9, crm == {c0-c2, c5-c8} */ in aa32_cpreg_encoding_in_impdef_space()
2923 0b1000000111111111, /* crn == 11, crm == {c0-c8, c15} */ in aa32_cpreg_encoding_in_impdef_space()
2927 return (mask[crn - 9] >> crm) & 1; in aa32_cpreg_encoding_in_impdef_space()
2936 uint32_t key = ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2); in do_coproc_insn()
2937 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); in do_coproc_insn()
2975 * guest visible. in do_coproc_insn()
2982 if (s->hstr_active && cpnum == 15 && s->current_el == 1) { in do_coproc_insn()
3006 * to assume continue-to-next-instruction. in do_coproc_insn()
3008 s->base.is_jmp = DISAS_NEXT; in do_coproc_insn()
3019 switch (s->current_el) { in do_coproc_insn()
3042 s->ns ? "non-secure" : "secure"); in do_coproc_insn()
3048 crm, opc2, s->ns ? "non-secure" : "secure"); in do_coproc_insn()
3055 if (!cp_access_ok(s->current_el, ri, isread)) { in do_coproc_insn()
3060 if ((s->hstr_active && s->current_el == 0) || ri->accessfn || in do_coproc_insn()
3061 (ri->fgt && s->fgt_active) || in do_coproc_insn()
3076 } else if (ri->type & ARM_CP_RAISES_EXC) { in do_coproc_insn()
3086 switch (ri->type & ARM_CP_SPECIAL_MASK) { in do_coproc_insn()
3096 s->base.is_jmp = DISAS_WFI; in do_coproc_insn()
3103 if (ri->type & ARM_CP_IO) { in do_coproc_insn()
3105 need_exit_tb = translator_io_start(&s->base); in do_coproc_insn()
3113 if (ri->type & ARM_CP_CONST) { in do_coproc_insn()
3114 tmp64 = tcg_constant_i64(ri->resetvalue); in do_coproc_insn()
3115 } else if (ri->readfn) { in do_coproc_insn()
3123 tcg_gen_ld_i64(tmp64, tcg_env, ri->fieldoffset); in do_coproc_insn()
3133 if (ri->type & ARM_CP_CONST) { in do_coproc_insn()
3134 tmp = tcg_constant_i32(ri->resetvalue); in do_coproc_insn()
3135 } else if (ri->readfn) { in do_coproc_insn()
3142 tmp = load_cpu_offset(ri->fieldoffset); in do_coproc_insn()
3155 if (ri->type & ARM_CP_CONST) { in do_coproc_insn()
3166 if (ri->writefn) { in do_coproc_insn()
3172 tcg_gen_st_i64(tmp64, tcg_env, ri->fieldoffset); in do_coproc_insn()
3176 if (ri->writefn) { in do_coproc_insn()
3182 store_cpu_offset(tmp, ri->fieldoffset, 4); in do_coproc_insn()
3187 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { in do_coproc_insn()
3192 gen_rebuild_hflags(s, ri->type & ARM_CP_NEWEL); in do_coproc_insn()
3210 if (extract32(s->c15_cpar, cpnum, 1) == 0) { in disas_xscale_insn()
3223 /* Store a 64-bit value to a register pair. Clobbers val. */
3235 /* load and add a 64-bit value from a register pair. */
3242 /* Load 64-bit value rd:rn. */ in gen_addq()
3267 MemOp opc = size | MO_ALIGN | s->be_data; in gen_load_exclusive()
3269 s->is_ldex = true; in gen_load_exclusive()
3276 * For AArch32, architecturally the 32-bit word at the lowest in gen_load_exclusive()
3278 * the CPU is big-endian. That means we don't want to do a in gen_load_exclusive()
3280 * architecturally 64-bit access, but instead do a 64-bit access in gen_load_exclusive()
3287 if (s->be_data == MO_BE) { in gen_load_exclusive()
3304 tcg_gen_movi_i64(cpu_exclusive_addr, -1); in gen_clrex()
3315 MemOp opc = size | MO_ALIGN | s->be_data; in gen_store_exclusive()
3317 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) { in gen_store_exclusive()
3339 * For AArch32, architecturally the 32-bit word at the lowest in gen_store_exclusive()
3341 * the CPU is big-endian. Since we're going to treat this as a in gen_store_exclusive()
3342 * single 64-bit BE store, we need to put the two halves in the in gen_store_exclusive()
3345 * SCTLR_B as if for an architectural 64-bit access. in gen_store_exclusive()
3347 if (s->be_data == MO_BE) { in gen_store_exclusive()
3370 tcg_gen_movi_i64(cpu_exclusive_addr, -1); in gen_store_exclusive()
3376 * @mode: mode field from insn (which stack to store to)
3377 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
3383 uint32_t mode, uint32_t amode, bool writeback) in gen_srs() argument
3390 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1 in gen_srs()
3391 * and specified mode is monitor mode in gen_srs()
3392 * - UNDEFINED in Hyp mode in gen_srs()
3393 * - UNPREDICTABLE in User or System mode in gen_srs()
3394 * - UNPREDICTABLE if the specified mode is: in gen_srs()
3395 * -- not implemented in gen_srs()
3396 * -- not a valid mode number in gen_srs()
3397 * -- a mode that's at a higher exception level in gen_srs()
3398 * -- Monitor, if we are Non-secure in gen_srs()
3401 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) { in gen_srs()
3406 if (s->current_el == 0 || s->current_el == 2) { in gen_srs()
3410 switch (mode) { in gen_srs()
3420 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) { in gen_srs()
3425 /* No need to check specifically for "are we non-secure" because in gen_srs()
3426 * we've already made EL0 UNDEF and handled the trap for S-EL1; in gen_srs()
3427 * so if this isn't EL3 then we must be non-secure. in gen_srs()
3429 if (s->current_el != 3) { in gen_srs()
3443 /* get_r13_banked() will raise an exception if called from System mode */ in gen_srs()
3446 gen_helper_get_r13_banked(addr, tcg_env, tcg_constant_i32(mode)); in gen_srs()
3449 offset = -4; in gen_srs()
3455 offset = -8; in gen_srs()
3472 offset = -8; in gen_srs()
3478 offset = -4; in gen_srs()
3487 gen_helper_set_r13_banked(tcg_env, tcg_constant_i32(mode), addr); in gen_srs()
3489 s->base.is_jmp = DISAS_UPDATE_EXIT; in gen_srs()
3496 arm_gen_test_cc(cond ^ 1, s->condlabel.label); in arm_skip_unless()
3546 return s->condexec_mask == 0; in t16_setflags()
3551 return (x & 0xff) | (x & 0x100) << (14 - 8); in t16_push_list()
3556 return (x & 0xff) | (x & 0x100) << (15 - 8); in t16_pop_list()
3563 #include "decode-a32.c.inc"
3564 #include "decode-a32-uncond.c.inc"
3565 #include "decode-t32.c.inc"
3566 #include "decode-t16.c.inc"
3580 * to be in the coprocessor-instruction space at all. v8M still in valid_cp()
3599 if (!valid_cp(s, a->cp)) { in trans_MCR()
3602 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2, in trans_MCR()
3603 false, a->rt, 0); in trans_MCR()
3609 if (!valid_cp(s, a->cp)) { in trans_MRC()
3612 do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2, in trans_MRC()
3613 true, a->rt, 0); in trans_MRC()
3619 if (!valid_cp(s, a->cp)) { in trans_MCRR()
3622 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0, in trans_MCRR()
3623 false, a->rt, a->rt2); in trans_MCRR()
3629 if (!valid_cp(s, a->cp)) { in trans_MRRC()
3632 do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0, in trans_MRRC()
3633 true, a->rt, a->rt2); in trans_MRRC()
3637 /* Helpers to swap operands for reverse-subtract. */
3682 /* See ALUWritePC: Interworking only from a32 mode. */ in store_reg_kind()
3683 if (s->thumb) { in store_reg_kind()
3711 tmp2 = load_reg(s, a->rm); in op_s_rrr_shi()
3712 gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc); in op_s_rrr_shi()
3713 tmp1 = load_reg(s, a->rn); in op_s_rrr_shi()
3720 return store_reg_kind(s, a->rd, tmp1, kind); in op_s_rrr_shi()
3729 tmp = load_reg(s, a->rm); in op_s_rxr_shi()
3730 gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc); in op_s_rxr_shi()
3736 return store_reg_kind(s, a->rd, tmp, kind); in op_s_rxr_shi()
3740 * Data-processing (register-shifted register)
3751 tmp1 = load_reg(s, a->rs); in op_s_rrr_shr()
3752 tmp2 = load_reg(s, a->rm); in op_s_rrr_shr()
3753 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc); in op_s_rrr_shr()
3754 tmp1 = load_reg(s, a->rn); in op_s_rrr_shr()
3761 return store_reg_kind(s, a->rd, tmp1, kind); in op_s_rrr_shr()
3770 tmp1 = load_reg(s, a->rs); in op_s_rxr_shr()
3771 tmp2 = load_reg(s, a->rm); in op_s_rxr_shr()
3772 gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc); in op_s_rxr_shr()
3778 return store_reg_kind(s, a->rd, tmp2, kind); in op_s_rxr_shr()
3782 * Data-processing (immediate)
3787 * Note that logic_cc && a->rot setting CF based on the msb of the
3798 imm = ror32(a->imm, a->rot); in op_s_rri_rot()
3799 if (logic_cc && a->rot) { in op_s_rri_rot()
3802 tmp1 = load_reg(s, a->rn); in op_s_rri_rot()
3809 return store_reg_kind(s, a->rd, tmp1, kind); in op_s_rri_rot()
3819 imm = ror32(a->imm, a->rot); in op_s_rxi_rot()
3820 if (logic_cc && a->rot) { in op_s_rxi_rot()
3830 return store_reg_kind(s, a->rd, tmp, kind); in op_s_rxi_rot()
3857 DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
3858 DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
3859 DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
3860 DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
3862 DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
3863 DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
3864 DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
3865 DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
3872 DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false, in DO_CMP2()
3873 a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL) in DO_CMP2()
3878 * we modify a->s via that parameter before it is used by OP. in DO_CMP2()
3880 DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false, in DO_CMP2()
3883 if (a->rd == 15 && a->s) { in DO_CMP2()
3886 * In User mode, UNPREDICTABLE; we choose UNDEF. in DO_CMP2()
3887 * In Hyp mode, UNDEFINED. in DO_CMP2()
3889 if (IS_USER(s) || s->current_el == 2) { in DO_CMP2()
3894 a->s = 0; in DO_CMP2()
3896 } else if (a->rd == 13 && a->rn == 13) { in DO_CMP2()
3902 DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
3905 if (a->rd == 15 && a->s) {
3908 * In User mode, UNPREDICTABLE; we choose UNDEF.
3909 * In Hyp mode, UNDEFINED.
3911 if (IS_USER(s) || s->current_el == 2) {
3916 a->s = 0;
3918 } else if (a->rd == 13) {
3924 DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
3927 * ORN is only available with T32, so there is no register-shifted-register
3932 return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
3937 return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL); in trans_ORN_rri()
3946 store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm)); in trans_ADR()
3956 store_reg(s, a->rd, tcg_constant_i32(a->imm)); in trans_MOVW()
3968 tmp = load_reg(s, a->rd); in trans_MOVT()
3970 tcg_gen_ori_i32(tmp, tmp, a->imm << 16); in trans_MOVT()
3971 store_reg(s, a->rd, tmp); in trans_MOVT()
3976 * v8.1M MVE wide-shifts
3988 if (a->rdahi == 15) { in do_mve_shl_ri()
3994 a->rdahi == 13) { in do_mve_shl_ri()
4000 if (a->shim == 0) { in do_mve_shl_ri()
4001 a->shim = 32; in do_mve_shl_ri()
4005 rdalo = load_reg(s, a->rdalo); in do_mve_shl_ri()
4006 rdahi = load_reg(s, a->rdahi); in do_mve_shl_ri()
4009 fn(rda, rda, a->shim); in do_mve_shl_ri()
4013 store_reg(s, a->rdalo, rdalo); in do_mve_shl_ri()
4014 store_reg(s, a->rdahi, rdahi); in do_mve_shl_ri()
4073 if (a->rdahi == 15) { in do_mve_shl_rr()
4079 a->rdahi == 13 || a->rm == 13 || a->rm == 15 || in do_mve_shl_rr()
4080 a->rm == a->rdahi || a->rm == a->rdalo) { in do_mve_shl_rr()
4087 rdalo = load_reg(s, a->rdalo); in do_mve_shl_rr()
4088 rdahi = load_reg(s, a->rdahi); in do_mve_shl_rr()
4091 /* The helper takes care of the sign-extension of the low 8 bits of Rm */ in do_mve_shl_rr()
4092 fn(rda, tcg_env, rda, cpu_R[a->rm]); in do_mve_shl_rr()
4096 store_reg(s, a->rdalo, rdalo); in do_mve_shl_rr()
4097 store_reg(s, a->rdahi, rdahi); in do_mve_shl_rr()
4140 a->rda == 13 || a->rda == 15) { in do_mve_sh_ri()
4146 if (a->shim == 0) { in do_mve_sh_ri()
4147 a->shim = 32; in do_mve_sh_ri()
4149 fn(cpu_R[a->rda], cpu_R[a->rda], a->shim); in do_mve_sh_ri()
4192 a->rda == 13 || a->rda == 15 || a->rm == 13 || a->rm == 15 || in do_mve_sh_rr()
4193 a->rm == a->rda) { in do_mve_sh_rr()
4199 /* The helper takes care of the sign-extension of the low 8 bits of Rm */ in do_mve_sh_rr()
4200 fn(cpu_R[a->rda], tcg_env, cpu_R[a->rda], cpu_R[a->rm]); in do_mve_sh_rr()
4222 t1 = load_reg(s, a->rn); in op_mla()
4223 t2 = load_reg(s, a->rm); in op_mla()
4226 t2 = load_reg(s, a->ra); in op_mla()
4229 if (a->s) { in op_mla()
4232 store_reg(s, a->rd, t1); in op_mla()
4253 t1 = load_reg(s, a->rn); in trans_MLS()
4254 t2 = load_reg(s, a->rm); in trans_MLS()
4256 t2 = load_reg(s, a->ra); in trans_MLS()
4258 store_reg(s, a->rd, t1); in trans_MLS()
4266 t0 = load_reg(s, a->rm); in op_mlal()
4267 t1 = load_reg(s, a->rn); in op_mlal()
4274 t2 = load_reg(s, a->ra); in op_mlal()
4275 t3 = load_reg(s, a->rd); in op_mlal()
4278 if (a->s) { in op_mlal()
4281 store_reg(s, a->ra, t0); in op_mlal()
4282 store_reg(s, a->rd, t1); in op_mlal()
4310 if (s->thumb in trans_UMAAL()
4316 t0 = load_reg(s, a->rm); in trans_UMAAL()
4317 t1 = load_reg(s, a->rn); in trans_UMAAL()
4320 t2 = load_reg(s, a->ra); in trans_UMAAL()
4322 t2 = load_reg(s, a->rd); in trans_UMAAL()
4324 store_reg(s, a->ra, t0); in trans_UMAAL()
4325 store_reg(s, a->rd, t1); in trans_UMAAL()
4337 if (s->thumb in op_qaddsub()
4343 t0 = load_reg(s, a->rm); in op_qaddsub()
4344 t1 = load_reg(s, a->rn); in op_qaddsub()
4353 store_reg(s, a->rd, t0); in op_qaddsub()
4379 if (s->thumb in DO_QADDSUB()
4385 t0 = load_reg(s, a->rn); in DO_QADDSUB()
4386 t1 = load_reg(s, a->rm); in DO_QADDSUB()
4391 store_reg(s, a->rd, t0); in DO_QADDSUB()
4394 t1 = load_reg(s, a->ra); in DO_QADDSUB()
4396 store_reg(s, a->rd, t0); in DO_QADDSUB()
4399 tl = load_reg(s, a->ra); in DO_QADDSUB()
4400 th = load_reg(s, a->rd); in DO_QADDSUB()
4401 /* Sign-extend the 32-bit product to 64 bits. */ in DO_QADDSUB()
4405 store_reg(s, a->ra, tl); in DO_QADDSUB()
4406 store_reg(s, a->rd, th); in DO_QADDSUB()
4445 t0 = load_reg(s, a->rn); in op_smlawx()
4446 t1 = load_reg(s, a->rm); in op_smlawx()
4448 * Since the nominal result is product<47:16>, shift the 16-bit in op_smlawx()
4458 t0 = load_reg(s, a->ra); in op_smlawx()
4461 store_reg(s, a->rd, t1); in op_smlawx()
4485 * When running single-threaded TCG code, use the helper to ensure that in trans_YIELD()
4486 * the next round-robin scheduled vCPU gets a crack. When running in in trans_YIELD()
4490 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { in trans_YIELD()
4492 s->base.is_jmp = DISAS_YIELD; in trans_YIELD()
4500 * When running single-threaded TCG code, use the helper to ensure that in trans_WFE()
4501 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we in trans_WFE()
4506 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { in trans_WFE()
4508 s->base.is_jmp = DISAS_WFE; in trans_WFE()
4517 s->base.is_jmp = DISAS_WFI; in trans_WFI()
4524 * For M-profile, minimal-RAS ESB can be a NOP. in trans_ESB()
4537 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) { in trans_ESB()
4551 uint32_t val = ror32(a->imm, a->rot * 2); in trans_MSR_imm()
4552 uint32_t mask = msr_mask(s, a->mask, a->r); in trans_MSR_imm()
4554 if (gen_set_psr_im(s, mask, a->r, val)) { in trans_MSR_imm()
4572 t1 = load_reg(s, a->rn); in op_crc32()
4573 t2 = load_reg(s, a->rm); in op_crc32()
4592 store_reg(s, a->rd, t1); in op_crc32()
4618 gen_mrs_banked(s, a->r, a->sysm, a->rd); in DO_CRC32()
4627 gen_msr_banked(s, a->r, a->sysm, a->rn); in trans_MSR_bank()
4638 if (a->r) { in trans_MRS_reg()
4648 store_reg(s, a->rd, tmp); in trans_MRS_reg()
4655 uint32_t mask = msr_mask(s, a->mask, a->r); in trans_MSR_reg()
4660 tmp = load_reg(s, a->rn); in trans_MSR_reg()
4661 if (gen_set_psr(s, mask, a->r, tmp)) { in trans_MSR_reg()
4675 gen_helper_v7m_mrs(tmp, tcg_env, tcg_constant_i32(a->sysm)); in trans_MRS_v7m()
4676 store_reg(s, a->rd, tmp); in trans_MRS_v7m()
4687 addr = tcg_constant_i32((a->mask << 10) | a->sysm); in trans_MSR_v7m()
4688 reg = load_reg(s, a->rn); in trans_MSR_v7m()
4701 gen_bx_excret(s, load_reg(s, a->rm)); in trans_BX()
4712 * TBFLAGS bit on a basically-never-happens case, so call a helper in trans_BXJ()
4719 s->current_el < 2 && s->ns) { in trans_BXJ()
4720 gen_helper_check_bxj_trap(tcg_env, tcg_constant_i32(a->rm)); in trans_BXJ()
4723 gen_bx(s, load_reg(s, a->rm)); in trans_BXJ()
4734 tmp = load_reg(s, a->rm); in trans_BLX_r()
4735 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); in trans_BLX_r()
4743 * the user-only mode either (in theory you can use them from
4744 * Secure User mode but they are too tied in to system emulation).
4748 if (!s->v8m_secure || IS_USER_ONLY) { in trans_BXNS()
4751 gen_bxns(s, a->rm); in trans_BXNS()
4758 if (!s->v8m_secure || IS_USER_ONLY) { in trans_BLXNS()
4761 gen_blxns(s, a->rm); in trans_BLXNS()
4773 tmp = load_reg(s, a->rm); in trans_CLZ()
4775 store_reg(s, a->rd, tmp); in trans_CLZ()
4790 if (s->current_el == 2) { in trans_ERET()
4802 gen_hlt(s, a->imm); in trans_HLT()
4812 s->eci_handled = true; in trans_BKPT()
4814 semihosting_enabled(s->current_el == 0) && in trans_BKPT()
4815 (a->imm == 0xab)) { in trans_BKPT()
4818 gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false)); in trans_BKPT()
4831 gen_hvc(s, a->imm); in trans_HVC()
4859 * it is executed by a CPU in non-secure state from memory in trans_SG()
4860 * which is Secure & NonSecure-Callable. in trans_SG()
4869 if (s->v8m_secure) { in trans_SG()
4871 s->condexec_cond = 0; in trans_SG()
4872 s->condexec_mask = 0; in trans_SG()
4885 if (a->rd == 13 || a->rd == 15 || a->rn == 15) { in trans_TT()
4890 if (a->A && !s->v8m_secure) { in trans_TT()
4896 addr = load_reg(s, a->rn); in trans_TT()
4898 gen_helper_v7m_tt(tmp, tcg_env, addr, tcg_constant_i32((a->A << 1) | a->T)); in trans_TT()
4899 store_reg(s, a->rd, tmp); in trans_TT()
4925 TCGv_i32 addr = load_reg(s, a->rn); in op_addr_rr_pre()
4927 if (s->v8m_stackcheck && a->rn == 13 && a->w) { in op_addr_rr_pre()
4931 if (a->p) { in op_addr_rr_pre()
4932 TCGv_i32 ofs = load_reg(s, a->rm); in op_addr_rr_pre()
4933 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0); in op_addr_rr_pre()
4934 if (a->u) { in op_addr_rr_pre()
4946 if (!a->p) { in op_addr_rr_post()
4947 TCGv_i32 ofs = load_reg(s, a->rm); in op_addr_rr_post()
4948 gen_arm_shift_im(ofs, a->shtype, a->shimm, 0); in op_addr_rr_post()
4949 if (a->u) { in op_addr_rr_post()
4954 } else if (!a->w) { in op_addr_rr_post()
4958 store_reg(s, a->rn, addr); in op_addr_rr_post()
4964 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w); in op_load_rr()
4978 store_reg_from_load(s, a->rt, tmp); in op_load_rr()
4985 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; in op_store_rr()
4992 if (s->thumb && a->rn == 15) { in op_store_rr()
4998 tmp = load_reg(s, a->rt); in op_store_rr()
5009 * LDRD is required to be an atomic 64-bit access if the in do_ldrd_load()
5010 * address is 8-aligned, two atomic 32-bit accesses if in do_ldrd_load()
5011 * it's only 4-aligned, and to give an alignment fault in do_ldrd_load()
5012 * if it's not 4-aligned. This is MO_ALIGN_4 | MO_ATOM_SUBALIGN. in do_ldrd_load()
5016 * so we don't get its SCTLR_B check, and instead do a 64-bit access in do_ldrd_load()
5019 * For M-profile, and for A-profile before LPAE, the 64-bit in do_ldrd_load()
5030 MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data; in do_ldrd_load()
5037 if (s->be_data == MO_BE) { in do_ldrd_load()
5053 if (a->rt & 1) { in trans_LDRD_rr()
5059 do_ldrd_load(s, addr, a->rt, a->rt + 1); in trans_LDRD_rr()
5069 * STRD is required to be an atomic 64-bit access if the in do_strd_store()
5070 * address is 8-aligned, two atomic 32-bit accesses if in do_strd_store()
5071 * it's only 4-aligned, and to give an alignment fault in do_strd_store()
5072 * if it's not 4-aligned. in do_strd_store()
5076 * so we don't get its SCTLR_B check, and instead do a 64-bit access in do_strd_store()
5080 * As with LDRD, the 64-bit atomicity is not required for in do_strd_store()
5081 * M-profile, or for A-profile before LPAE, and we provide in do_strd_store()
5085 MemOp opc = MO_64 | MO_ALIGN_4 | MO_ATOM_SUBALIGN | s->be_data; in do_strd_store()
5091 if (s->be_data == MO_BE) { in do_strd_store()
5106 if (a->rt & 1) { in trans_STRD_rr()
5112 do_strd_store(s, addr, a->rt, a->rt + 1); in trans_STRD_rr()
5124 int ofs = a->imm; in op_addr_ri_pre()
5126 if (!a->u) { in op_addr_ri_pre()
5127 ofs = -ofs; in op_addr_ri_pre()
5130 if (s->v8m_stackcheck && a->rn == 13 && a->w) { in op_addr_ri_pre()
5137 if (!a->u) { in op_addr_ri_pre()
5146 return add_reg_for_lit(s, a->rn, a->p ? ofs : 0); in op_addr_ri_pre()
5152 if (!a->p) { in op_addr_ri_post()
5153 if (a->u) { in op_addr_ri_post()
5154 address_offset += a->imm; in op_addr_ri_post()
5156 address_offset -= a->imm; in op_addr_ri_post()
5158 } else if (!a->w) { in op_addr_ri_post()
5162 store_reg(s, a->rn, addr); in op_addr_ri_post()
5168 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w); in op_load_ri()
5182 store_reg_from_load(s, a->rt, tmp); in op_load_ri()
5189 ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite; in op_store_ri()
5196 if (s->thumb && a->rn == 15) { in op_store_ri()
5202 tmp = load_reg(s, a->rt); in op_store_ri()
5216 do_ldrd_load(s, addr, a->rt, rt2); in op_ldrd_ri()
5225 if (!ENABLE_ARCH_5TE || (a->rt & 1)) { in trans_LDRD_ri_a32()
5228 return op_ldrd_ri(s, a, a->rt + 1); in trans_LDRD_ri_a32()
5234 .u = a->u, .w = a->w, .p = a->p, in trans_LDRD_ri_t32()
5235 .rn = a->rn, .rt = a->rt, .imm = a->imm in trans_LDRD_ri_t32()
5237 return op_ldrd_ri(s, &b, a->rt2); in trans_LDRD_ri_t32()
5246 do_strd_store(s, addr, a->rt, rt2); in op_strd_ri()
5254 if (!ENABLE_ARCH_5TE || (a->rt & 1)) { in trans_STRD_ri_a32()
5257 return op_strd_ri(s, a, a->rt + 1); in trans_STRD_ri_a32()
5263 .u = a->u, .w = a->w, .p = a->p, in trans_STRD_ri_t32()
5264 .rn = a->rn, .rt = a->rt, .imm = a->imm in trans_STRD_ri_t32()
5266 return op_strd_ri(s, &b, a->rt2); in trans_STRD_ri_t32()
5308 opc |= s->be_data; in DO_LDST()
5309 addr = load_reg(s, a->rn); in DO_LDST()
5312 tmp = load_reg(s, a->rt2); in DO_LDST()
5315 store_reg(s, a->rt, tmp); in DO_LDST()
5330 * Load/Store Exclusive and Load-Acquire/Store-Release
5340 if (a->rd == 15 || a->rn == 15 || a->rt == 15 in op_strex()
5341 || a->rd == a->rn || a->rd == a->rt in op_strex()
5342 || (!v8a && s->thumb && (a->rd == 13 || a->rt == 13)) in op_strex()
5344 && (a->rt2 == 15 in op_strex()
5345 || a->rd == a->rt2 in op_strex()
5346 || (!v8a && s->thumb && a->rt2 == 13)))) { in op_strex()
5356 load_reg_var(s, addr, a->rn); in op_strex()
5357 tcg_gen_addi_i32(addr, addr, a->imm); in op_strex()
5359 gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop); in op_strex()
5377 if (a->rt & 1) { in trans_STREXD_a32()
5381 a->rt2 = a->rt + 1; in trans_STREXD_a32()
5392 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { in trans_STREXB()
5400 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { in trans_STREXH()
5420 if (a->rt & 1) { in trans_STLEXD_a32()
5424 a->rt2 = a->rt + 1; in trans_STLEXD_a32()
5460 if (a->rn == 15 || a->rt == 15) { in op_stl()
5465 addr = load_reg(s, a->rn); in op_stl()
5466 tmp = load_reg(s, a->rt); in op_stl()
5469 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite); in op_stl()
5496 if (a->rn == 15 || a->rt == 15 in op_ldrex()
5497 || (!v8a && s->thumb && a->rt == 13) in op_ldrex()
5499 && (a->rt2 == 15 || a->rt == a->rt2 in op_ldrex()
5500 || (!v8a && s->thumb && a->rt2 == 13)))) { in op_ldrex()
5506 load_reg_var(s, addr, a->rn); in op_ldrex()
5507 tcg_gen_addi_i32(addr, addr, a->imm); in op_ldrex()
5509 gen_load_exclusive(s, a->rt, a->rt2, addr, mop); in op_ldrex()
5531 if (a->rt & 1) { in trans_LDREXD_a32()
5535 a->rt2 = a->rt + 1; in trans_LDREXD_a32()
5546 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { in trans_LDREXB()
5554 if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) { in trans_LDREXH()
5574 if (a->rt & 1) { in trans_LDAEXD_a32()
5578 a->rt2 = a->rt + 1; in trans_LDAEXD_a32()
5614 if (a->rn == 15 || a->rt == 15) { in op_lda()
5619 addr = load_reg(s, a->rn); in op_lda()
5622 disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel); in op_lda()
5624 store_reg(s, a->rt, tmp); in op_lda()
5656 t1 = load_reg(s, a->rn); in trans_USADA8()
5657 t2 = load_reg(s, a->rm); in trans_USADA8()
5659 if (a->ra != 15) { in trans_USADA8()
5660 t2 = load_reg(s, a->ra); in trans_USADA8()
5663 store_reg(s, a->rd, t1); in trans_USADA8()
5670 int width = a->widthm1 + 1; in op_bfx()
5671 int shift = a->lsb; in op_bfx()
5682 tmp = load_reg(s, a->rn); in op_bfx()
5688 store_reg(s, a->rd, tmp); in op_bfx()
5704 int msb = a->msb, lsb = a->lsb; in trans_BFCI()
5717 width = msb + 1 - lsb; in trans_BFCI()
5718 if (a->rn == 15) { in trans_BFCI()
5723 t_in = load_reg(s, a->rn); in trans_BFCI()
5725 t_rd = load_reg(s, a->rd); in trans_BFCI()
5727 store_reg(s, a->rd, t_rd); in trans_BFCI()
5746 if (s->thumb in op_par_addsub()
5752 t0 = load_reg(s, a->rn); in op_par_addsub()
5753 t1 = load_reg(s, a->rm); in op_par_addsub()
5757 store_reg(s, a->rd, t0); in op_par_addsub()
5768 if (s->thumb in op_par_addsub_ge()
5774 t0 = load_reg(s, a->rn); in op_par_addsub_ge()
5775 t1 = load_reg(s, a->rm); in op_par_addsub_ge()
5781 store_reg(s, a->rd, t0); in op_par_addsub_ge()
5849 int shift = a->imm; in DO_PAR_ADDSUB_GE()
5851 if (s->thumb in DO_PAR_ADDSUB_GE()
5857 tn = load_reg(s, a->rn); in DO_PAR_ADDSUB_GE()
5858 tm = load_reg(s, a->rm); in DO_PAR_ADDSUB_GE()
5859 if (a->tb) { in DO_PAR_ADDSUB_GE()
5871 store_reg(s, a->rd, tn); in DO_PAR_ADDSUB_GE()
5879 int shift = a->imm; in op_sat()
5885 tmp = load_reg(s, a->rn); in op_sat()
5886 if (a->sh) { in op_sat()
5892 gen(tmp, tcg_env, tmp, tcg_constant_i32(a->satimm)); in op_sat()
5894 store_reg(s, a->rd, tmp); in op_sat()
5910 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { in trans_SSAT16()
5918 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { in trans_USAT16()
5934 tmp = load_reg(s, a->rm); in op_xta()
5939 tcg_gen_rotri_i32(tmp, tmp, a->rot * 8); in op_xta()
5942 if (a->rn != 15) { in op_xta()
5943 TCGv_i32 tmp2 = load_reg(s, a->rn); in op_xta()
5946 store_reg(s, a->rd, tmp); in op_xta()
5962 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { in trans_SXTAB16()
5980 if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { in trans_UXTAB16()
5990 if (s->thumb in trans_SEL()
5996 t1 = load_reg(s, a->rn); in trans_SEL()
5997 t2 = load_reg(s, a->rm); in trans_SEL()
6001 store_reg(s, a->rd, t1); in trans_SEL()
6010 tmp = load_reg(s, a->rm); in op_rr()
6012 store_reg(s, a->rd, tmp); in op_rr()
6060 t1 = load_reg(s, a->rn); in op_smlad()
6061 t2 = load_reg(s, a->rm); in op_smlad()
6070 * 32-bit subtraction and then a possible 32-bit saturating in op_smlad()
6075 if (a->ra != 15) { in op_smlad()
6076 t2 = load_reg(s, a->ra); in op_smlad()
6079 } else if (a->ra == 15) { in op_smlad()
6080 /* Single saturation-checking addition */ in op_smlad()
6086 * this as two separate add-and-check-overflow steps incorrectly in op_smlad()
6087 * sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1. in op_smlad()
6088 * Do all the arithmetic at 64-bits and then check for overflow. in op_smlad()
6098 load_reg_var(s, t2, a->ra); in op_smlad()
6106 * is different from the sign-extension of t1. in op_smlad()
6115 store_reg(s, a->rd, t1); in op_smlad()
6148 t1 = load_reg(s, a->rn); in op_smlald()
6149 t2 = load_reg(s, a->rm); in op_smlald()
6166 gen_addq(s, l1, a->ra, a->rd); in op_smlald()
6167 gen_storeq_reg(s, a->ra, a->rd, l1); in op_smlald()
6195 if (s->thumb in op_smmla()
6201 t1 = load_reg(s, a->rn); in op_smmla()
6202 t2 = load_reg(s, a->rm); in op_smmla()
6205 if (a->ra != 15) { in op_smmla()
6206 TCGv_i32 t3 = load_reg(s, a->ra); in op_smmla()
6209 * For SMMLS, we need a 64-bit subtract. Borrow caused by in op_smmla()
6210 * a non-zero multiplicand lowpart, and the correct result in op_smmla()
6220 * Adding 0x80000000 to the 64-bit quantity means that we have in op_smmla()
6226 store_reg(s, a->rd, t1); in op_smmla()
6254 if (s->thumb in op_div()
6260 t1 = load_reg(s, a->rn); in op_div()
6261 t2 = load_reg(s, a->rm); in op_div()
6267 store_reg(s, a->rd, t1); in op_div()
6287 TCGv_i32 addr = load_reg(s, a->rn); in op_addr_block_pre()
6289 if (a->b) { in op_addr_block_pre()
6290 if (a->i) { in op_addr_block_pre()
6295 tcg_gen_addi_i32(addr, addr, -(n * 4)); in op_addr_block_pre()
6297 } else if (!a->i && n != 1) { in op_addr_block_pre()
6299 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); in op_addr_block_pre()
6302 if (s->v8m_stackcheck && a->rn == 13 && a->w) { in op_addr_block_pre()
6306 * stack limit but the final written-back SP would in op_addr_block_pre()
6323 if (a->w) { in op_addr_block_post()
6325 if (!a->b) { in op_addr_block_post()
6326 if (a->i) { in op_addr_block_post()
6331 tcg_gen_addi_i32(addr, addr, -(n * 4)); in op_addr_block_post()
6333 } else if (!a->i && n != 1) { in op_addr_block_post()
6335 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4)); in op_addr_block_post()
6337 store_reg(s, a->rn, addr); in op_addr_block_post()
6344 bool user = a->u; in op_stm()
6350 /* Only usable in supervisor mode. */ in op_stm()
6356 list = a->list; in op_stm()
6362 * single-register-store, and some in-the-wild (buggy) software in op_stm()
6365 if (n < 1 || a->rn == 15) { in op_stm()
6370 s->eci_handled = true; in op_stm()
6407 if (a->w && (a->list & (1 << a->rn))) { in trans_STM_t32()
6418 bool user = a->u; in do_ldm()
6425 /* Only usable in supervisor mode. */ in do_ldm()
6429 if (extract32(a->list, 15, 1)) { in do_ldm()
6434 if (a->w) { in do_ldm()
6441 list = a->list; in do_ldm()
6447 * single-register-load, and some in-the-wild (buggy) software in do_ldm()
6450 if (n < 1 || a->rn == 15) { in do_ldm()
6455 s->eci_handled = true; in do_ldm()
6471 } else if (i == a->rn) { in do_ldm()
6490 store_reg(s, a->rn, loaded_var); in do_ldm()
6496 translator_io_start(&s->base); in do_ldm()
6498 /* Must exit loop to check un-masked IRQs */ in do_ldm()
6499 s->base.is_jmp = DISAS_EXIT; in do_ldm()
6512 if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) { in trans_LDM_a32()
6522 if (a->w && (a->list & (1 << a->rn))) { in trans_LDM_t32()
6532 a->w = !(a->list & (1 << a->rn)); in trans_LDM_t16()
6545 if (extract32(a->list, 13, 1)) { in trans_CLRM()
6549 if (!a->list) { in trans_CLRM()
6554 s->eci_handled = true; in trans_CLRM()
6558 if (extract32(a->list, i, 1)) { in trans_CLRM()
6563 if (extract32(a->list, 15, 1)) { in trans_CLRM()
6580 gen_jmp(s, jmp_diff(s, a->imm)); in trans_B()
6587 if (a->cond >= 0xe) { in trans_B_cond_thumb()
6590 if (s->condexec_mask) { in trans_B_cond_thumb()
6594 arm_skip_unless(s, a->cond); in trans_B_cond_thumb()
6595 gen_jmp(s, jmp_diff(s, a->imm)); in trans_B_cond_thumb()
6601 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); in trans_BL()
6602 gen_jmp(s, jmp_diff(s, a->imm)); in trans_BL()
6609 * BLX <imm> would be useless on M-profile; the encoding space in trans_BLX_i()
6617 if (s->thumb && (a->imm & 2)) { in trans_BLX_i()
6620 gen_pc_plus_diff(s, cpu_R[14], curr_insn_len(s) | s->thumb); in trans_BLX_i()
6621 store_cpu_field_constant(!s->thumb, thumb); in trans_BLX_i()
6623 gen_jmp(s, jmp_diff(s, a->imm - (s->pc_curr & 3))); in trans_BLX_i()
6630 gen_pc_plus_diff(s, cpu_R[14], jmp_diff(s, a->imm << 12)); in trans_BL_BLX_prefix()
6639 tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1); in trans_BL_suffix()
6654 tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1); in trans_BLX_suffix()
6664 * M-profile branch future insns. The architecture permits an in trans_BF()
6673 if (a->boff == 0) { in trans_BF()
6683 /* M-profile low-overhead loop start */ in trans_DLS()
6689 if (a->rn == 13 || a->rn == 15) { in trans_DLS()
6698 if (a->size != 4) { in trans_DLS()
6709 tmp = load_reg(s, a->rn); in trans_DLS()
6711 if (a->size != 4) { in trans_DLS()
6713 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize); in trans_DLS()
6714 s->base.is_jmp = DISAS_UPDATE_NOCHAIN; in trans_DLS()
6721 /* M-profile low-overhead while-loop start */ in trans_WLS()
6728 if (a->rn == 13 || a->rn == 15) { in trans_WLS()
6736 if (s->condexec_mask) { in trans_WLS()
6741 * in the dc->condjmp condition-failed codepath in in trans_WLS()
6746 if (a->size != 4) { in trans_WLS()
6755 * Do the check-and-raise-exception by hand. in trans_WLS()
6757 if (s->fp_excp_el) { in trans_WLS()
6759 syn_uncategorized(), s->fp_excp_el); in trans_WLS()
6765 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel.label); in trans_WLS()
6766 tmp = load_reg(s, a->rn); in trans_WLS()
6768 if (a->size != 4) { in trans_WLS()
6778 store_cpu_field(tcg_constant_i32(a->size), v7m.ltpsize); in trans_WLS()
6787 gen_jmp(s, jmp_diff(s, a->imm)); in trans_WLS()
6794 * M-profile low-overhead loop end. The architecture permits an in trans_LE()
6808 if (a->f && a->tp) { in trans_LE()
6811 if (s->condexec_mask) { in trans_LE()
6816 * in the dc->condjmp condition-failed codepath in in trans_LE()
6821 if (a->tp) { in trans_LE()
6827 s->eci_handled = true; in trans_LE()
6833 s->eci_handled = true; in trans_LE()
6841 * can identify not-active purely from our TB state flags, as the in trans_LE()
6850 * the FPU not active. But LE is an unusual case of a non-FP insn in trans_LE()
6853 fpu_active = !s->fp_excp_el && !s->v7m_lspact && !s->v7m_new_fp_ctxt_needed; in trans_LE()
6855 if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) { in trans_LE()
6864 if (a->f) { in trans_LE()
6865 /* Loop-forever: just jump back to the loop start */ in trans_LE()
6866 gen_jmp(s, jmp_diff(s, -a->imm)); in trans_LE()
6871 * Not loop-forever. If LR <= loop-decrement-value this is the last loop. in trans_LE()
6877 if (!a->tp) { in trans_LE()
6879 tcg_gen_addi_i32(cpu_R[14], cpu_R[14], -1); in trans_LE()
6882 * Decrement by 1 << (4 - LTPSIZE). We need to use a TCG local in trans_LE()
6895 gen_jmp(s, jmp_diff(s, -a->imm)); in trans_LE()
6898 if (a->tp) { in trans_LE()
6899 /* Exits from tail-pred loops must reset LTPSIZE to 4 */ in trans_LE()
6910 * M-profile Loop Clear with Tail Predication. Since our implementation in trans_LCTP()
6931 * M-profile Create Vector Tail Predicate. This insn is itself in trans_VCTP()
6936 if (!dc_isar_feature(aa32_mve, s) || a->rn == 13 || a->rn == 15) { in trans_VCTP()
6945 * We pre-calculate the mask length here to avoid having in trans_VCTP()
6947 * We pass the helper "rn <= (1 << (4 - size)) ? (rn << size) : 16". in trans_VCTP()
6950 masklen = load_reg(s, a->rn); in trans_VCTP()
6951 tcg_gen_shli_i32(rn_shifted, masklen, a->size); in trans_VCTP()
6953 masklen, tcg_constant_i32(1 << (4 - a->size)), in trans_VCTP()
6957 s->base.is_jmp = DISAS_UPDATE_NOCHAIN; in trans_VCTP()
6966 tmp = load_reg(s, a->rm); in op_tbranch()
6970 addr = load_reg(s, a->rn); in op_tbranch()
6994 TCGv_i32 tmp = load_reg(s, a->rn); in trans_CBZ()
6997 tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE, in trans_CBZ()
6998 tmp, 0, s->condlabel.label); in trans_CBZ()
6999 gen_jmp(s, jmp_diff(s, a->imm)); in trans_CBZ()
7004 * Supervisor call - both T32 & A32 come here so we need to check
7005 * which mode we are in when checking for semihosting.
7010 const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456; in trans_SVC()
7013 semihosting_enabled(s->current_el == 0) && in trans_SVC()
7014 (a->imm == semihost_imm)) { in trans_SVC()
7017 if (s->fgt_svc) { in trans_SVC()
7018 uint32_t syndrome = syn_aa32_svc(a->imm, s->thumb); in trans_SVC()
7022 s->svc_imm = a->imm; in trans_SVC()
7023 s->base.is_jmp = DISAS_SWI; in trans_SVC()
7036 /* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4 in trans_RFE()
7039 /* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0 in trans_RFE()
7051 addr = load_reg(s, a->rn); in trans_RFE()
7052 tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]); in trans_RFE()
7061 if (a->w) { in trans_RFE()
7063 tcg_gen_addi_i32(addr, addr, post_offset[a->pu]); in trans_RFE()
7064 store_reg(s, a->rn, addr); in trans_RFE()
7075 gen_srs(s, a->mode, a->pu, a->w); in trans_SRS()
7087 /* Implemented as NOP in user mode. */ in trans_CPS()
7093 if (a->imod & 2) { in trans_CPS()
7094 if (a->A) { in trans_CPS()
7097 if (a->I) { in trans_CPS()
7100 if (a->F) { in trans_CPS()
7103 if (a->imod & 1) { in trans_CPS()
7107 if (a->M) { in trans_CPS()
7109 val |= a->mode; in trans_CPS()
7125 /* Implemented as NOP in user mode. */ in trans_CPS_v7m()
7129 tmp = tcg_constant_i32(a->im); in trans_CPS_v7m()
7131 if (a->F) { in trans_CPS_v7m()
7136 if (a->I) { in trans_CPS_v7m()
7146 * Clear-Exclusive, Barriers
7151 if (s->thumb in trans_CLREX()
7181 * self-modifying code correctly and also to take in trans_ISB()
7184 s->base.is_jmp = DISAS_TOO_MANY; in trans_ISB()
7198 s->base.is_jmp = DISAS_TOO_MANY; in trans_SB()
7207 if (a->E != (s->be_data == MO_BE)) { in trans_SETEND()
7209 s->base.is_jmp = DISAS_UPDATE_EXIT; in trans_SETEND()
7235 * If-then
7240 int cond_mask = a->cond_mask; in trans_IT()
7250 s->condexec_cond = (cond_mask >> 4) & 0xe; in trans_IT()
7251 s->condexec_mask = cond_mask & 0x1f; in trans_IT()
7265 if (a->rm == 13) { in trans_CSEL()
7270 if (a->rd == 13 || a->rd == 15 || a->rn == 13 || a->fcond >= 14) { in trans_CSEL()
7278 if (a->rn == 15) { in trans_CSEL()
7281 load_reg_var(s, rn, a->rn); in trans_CSEL()
7283 if (a->rm == 15) { in trans_CSEL()
7286 load_reg_var(s, rm, a->rm); in trans_CSEL()
7289 switch (a->op) { in trans_CSEL()
7305 arm_test_cc(&c, a->fcond); in trans_CSEL()
7308 store_reg(s, a->rd, rn); in trans_CSEL()
7320 /* M variants do not implement ARM mode; this must raise the INVSTATE in disas_arm_insn()
7328 if (s->pstate_il) { in disas_arm_insn()
7361 if (extract32(s->c15_cpar, 1, 1)) { in disas_arm_insn()
7403 /* Definitely a 16-bit instruction */ in thumb_insn_is_16bit()
7408 * first half of a 32-bit Thumb insn. Thumb-1 cores might in thumb_insn_is_16bit()
7409 * end up actually treating this as two 16-bit insns, though, in thumb_insn_is_16bit()
7415 * 32-bit insns as 32-bit. in thumb_insn_is_16bit()
7420 if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) { in thumb_insn_is_16bit()
7422 * is not on the next page; we merge this into a 32-bit in thumb_insn_is_16bit()
7430 * -- handle as single 16 bit insn in thumb_insn_is_16bit()
7435 /* Translate a 32-bit thumb instruction. */
7439 * ARMv6-M supports a limited subset of Thumb2 instructions. in disas_thumb2_insn()
7440 * Other Thumb1 architectures allow only 32-bit in disas_thumb2_insn()
7479 * entire wide range of coprocessor-space encodings, so check in disas_thumb2_insn()
7542 /* Return true if the insn at dc->base.pc_next might cross a page boundary. in insn_crosses_page()
7545 * only called if dc->base.pc_next is less than 4 bytes from the page in insn_crosses_page()
7549 uint16_t insn = arm_lduw_code(env, &s->base, s->base.pc_next, s->sctlr_b); in insn_crosses_page()
7551 return !thumb_insn_is_16bit(s, s->base.pc_next, insn); in insn_crosses_page()
7559 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); in arm_tr_init_disas_context()
7562 dc->isar = &cpu->isar; in arm_tr_init_disas_context()
7563 dc->condjmp = 0; in arm_tr_init_disas_context()
7564 dc->pc_save = dc->base.pc_first; in arm_tr_init_disas_context()
7565 dc->aarch64 = false; in arm_tr_init_disas_context()
7566 dc->thumb = EX_TBFLAG_AM32(tb_flags, THUMB); in arm_tr_init_disas_context()
7567 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; in arm_tr_init_disas_context()
7570 * the CONDEXEC TB flags are CPSR bits [15:10][26:25]. On A-profile this in arm_tr_init_disas_context()
7571 * is always the IT bits. On M-profile, some of the reserved encodings in arm_tr_init_disas_context()
7580 dc->eci = dc->condexec_mask = dc->condexec_cond = 0; in arm_tr_init_disas_context()
7581 dc->eci_handled = false; in arm_tr_init_disas_context()
7583 dc->condexec_mask = (condexec & 0xf) << 1; in arm_tr_init_disas_context()
7584 dc->condexec_cond = condexec >> 4; in arm_tr_init_disas_context()
7587 dc->eci = condexec >> 4; in arm_tr_init_disas_context()
7592 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx); in arm_tr_init_disas_context()
7593 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); in arm_tr_init_disas_context()
7595 dc->user = (dc->current_el == 0); in arm_tr_init_disas_context()
7597 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); in arm_tr_init_disas_context()
7598 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); in arm_tr_init_disas_context()
7599 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); in arm_tr_init_disas_context()
7600 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE); in arm_tr_init_disas_context()
7601 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC); in arm_tr_init_disas_context()
7604 dc->vfp_enabled = 1; in arm_tr_init_disas_context()
7605 dc->be_data = MO_TE; in arm_tr_init_disas_context()
7606 dc->v7m_handler_mode = EX_TBFLAG_M32(tb_flags, HANDLER); in arm_tr_init_disas_context()
7607 dc->v8m_secure = EX_TBFLAG_M32(tb_flags, SECURE); in arm_tr_init_disas_context()
7608 dc->v8m_stackcheck = EX_TBFLAG_M32(tb_flags, STACKCHECK); in arm_tr_init_disas_context()
7609 dc->v8m_fpccr_s_wrong = EX_TBFLAG_M32(tb_flags, FPCCR_S_WRONG); in arm_tr_init_disas_context()
7610 dc->v7m_new_fp_ctxt_needed = in arm_tr_init_disas_context()
7612 dc->v7m_lspact = EX_TBFLAG_M32(tb_flags, LSPACT); in arm_tr_init_disas_context()
7613 dc->mve_no_pred = EX_TBFLAG_M32(tb_flags, MVE_NO_PRED); in arm_tr_init_disas_context()
7615 dc->sctlr_b = EX_TBFLAG_A32(tb_flags, SCTLR__B); in arm_tr_init_disas_context()
7616 dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE); in arm_tr_init_disas_context()
7617 dc->ns = EX_TBFLAG_A32(tb_flags, NS); in arm_tr_init_disas_context()
7618 dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN); in arm_tr_init_disas_context()
7620 dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR); in arm_tr_init_disas_context()
7622 dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); in arm_tr_init_disas_context()
7623 dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); in arm_tr_init_disas_context()
7625 dc->sme_trap_nonstreaming = in arm_tr_init_disas_context()
7628 dc->lse2 = false; /* applies only to aarch64 */ in arm_tr_init_disas_context()
7629 dc->cp_regs = cpu->cp_regs; in arm_tr_init_disas_context()
7630 dc->features = env->features; in arm_tr_init_disas_context()
7632 /* Single step state. The code-generation logic here is: in arm_tr_init_disas_context()
7634 * generate code with no special handling for single-stepping (except in arm_tr_init_disas_context()
7638 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) in arm_tr_init_disas_context()
7643 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) in arm_tr_init_disas_context()
7647 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE); in arm_tr_init_disas_context()
7648 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS); in arm_tr_init_disas_context()
7649 dc->is_ldex = false; in arm_tr_init_disas_context()
7651 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; in arm_tr_init_disas_context()
7654 if (dc->ss_active) { in arm_tr_init_disas_context()
7655 dc->base.max_insns = 1; in arm_tr_init_disas_context()
7658 /* ARM is a fixed-length ISA. Bound the number of insns to execute in arm_tr_init_disas_context()
7660 if (!dc->thumb) { in arm_tr_init_disas_context()
7661 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; in arm_tr_init_disas_context()
7662 dc->base.max_insns = MIN(dc->base.max_insns, bound); in arm_tr_init_disas_context()
7699 * bits, and none which can write non-static values to them, so in arm_tr_tb_start()
7706 if (dc->condexec_mask || dc->condexec_cond) { in arm_tr_tb_start()
7716 * need to reconstitute the bits from the split-out DisasContext in arm_tr_insn_start()
7720 target_ulong pc_arg = dc->base.pc_next; in arm_tr_insn_start()
7722 if (tb_cflags(dcbase->tb) & CF_PCREL) { in arm_tr_insn_start()
7725 if (dc->eci) { in arm_tr_insn_start()
7726 condexec_bits = dc->eci << 4; in arm_tr_insn_start()
7728 condexec_bits = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); in arm_tr_insn_start()
7731 dc->insn_start_updated = false; in arm_tr_insn_start()
7738 if (dc->base.pc_next >= 0xffff0000) { in arm_check_kernelpage()
7742 dc->base.is_jmp = DISAS_NORETURN; in arm_check_kernelpage()
7751 if (dc->ss_active && !dc->pstate_ss) { in arm_check_ss_active()
7752 /* Singlestep state is Active-pending. in arm_check_ss_active()
7762 assert(dc->base.num_insns == 1); in arm_check_ss_active()
7764 dc->base.is_jmp = DISAS_NORETURN; in arm_check_ss_active()
7773 if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) { in arm_post_translate_insn()
7774 if (dc->pc_save != dc->condlabel.pc_save) { in arm_post_translate_insn()
7775 gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save); in arm_post_translate_insn()
7777 gen_set_label(dc->condlabel.label); in arm_post_translate_insn()
7778 dc->condjmp = 0; in arm_post_translate_insn()
7786 uint32_t pc = dc->base.pc_next; in arm_tr_translate_insn()
7791 dc->base.pc_next = pc + 4; in arm_tr_translate_insn()
7802 assert(dc->base.num_insns == 1); in arm_tr_translate_insn()
7804 dc->base.is_jmp = DISAS_NORETURN; in arm_tr_translate_insn()
7805 dc->base.pc_next = QEMU_ALIGN_UP(pc, 4); in arm_tr_translate_insn()
7810 dc->base.pc_next = pc + 4; in arm_tr_translate_insn()
7814 dc->pc_curr = pc; in arm_tr_translate_insn()
7815 insn = arm_ldl_code(env, &dc->base, pc, dc->sctlr_b); in arm_tr_translate_insn()
7816 dc->insn = insn; in arm_tr_translate_insn()
7817 dc->base.pc_next = pc + 4; in arm_tr_translate_insn()
7822 /* ARM is a fixed-length ISA. We performed the cross-page check in arm_tr_translate_insn()
7838 * insn is either a 16-bit or a 32-bit instruction; the two are in thumb_insn_is_unconditional()
7839 * distinguishable because for the 16-bit case the top 16 bits in thumb_insn_is_unconditional()
7840 * are zeroes, and that isn't a valid 32-bit encoding. in thumb_insn_is_unconditional()
7876 uint32_t pc = dc->base.pc_next; in thumb_tr_translate_insn()
7881 target_ulong insn_eci_pc_save = -1; in thumb_tr_translate_insn()
7884 assert((dc->base.pc_next & 1) == 0); in thumb_tr_translate_insn()
7887 dc->base.pc_next = pc + 2; in thumb_tr_translate_insn()
7891 dc->pc_curr = pc; in thumb_tr_translate_insn()
7892 insn = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b); in thumb_tr_translate_insn()
7893 is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn); in thumb_tr_translate_insn()
7896 uint32_t insn2 = arm_lduw_code(env, &dc->base, pc, dc->sctlr_b); in thumb_tr_translate_insn()
7900 dc->base.pc_next = pc; in thumb_tr_translate_insn()
7901 dc->insn = insn; in thumb_tr_translate_insn()
7903 if (dc->pstate_il) { in thumb_tr_translate_insn()
7912 if (dc->eci) { in thumb_tr_translate_insn()
7914 * For M-profile continuable instructions, ECI/ICI handling in thumb_tr_translate_insn()
7916 * - interrupt-continuable instructions in thumb_tr_translate_insn()
7924 * - MVE instructions subject to beat-wise execution in thumb_tr_translate_insn()
7931 * - Special cases which don't advance ECI in thumb_tr_translate_insn()
7934 * - all other insns (the common case) in thumb_tr_translate_insn()
7935 * Non-zero ECI/ICI means an INVSTATE UsageFault. in thumb_tr_translate_insn()
7936 * We place a rewind-marker here. Insns in the previous in thumb_tr_translate_insn()
7944 insn_eci_pc_save = dc->pc_save; in thumb_tr_translate_insn()
7947 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) { in thumb_tr_translate_insn()
7948 uint32_t cond = dc->condexec_cond; in thumb_tr_translate_insn()
7966 if (dc->condexec_mask) { in thumb_tr_translate_insn()
7967 dc->condexec_cond = ((dc->condexec_cond & 0xe) | in thumb_tr_translate_insn()
7968 ((dc->condexec_mask >> 4) & 1)); in thumb_tr_translate_insn()
7969 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; in thumb_tr_translate_insn()
7970 if (dc->condexec_mask == 0) { in thumb_tr_translate_insn()
7971 dc->condexec_cond = 0; in thumb_tr_translate_insn()
7975 if (dc->eci && !dc->eci_handled) { in thumb_tr_translate_insn()
7981 dc->pc_save = insn_eci_pc_save; in thumb_tr_translate_insn()
7982 dc->condjmp = 0; in thumb_tr_translate_insn()
7988 /* Thumb is a variable-length ISA. Stop translation when the next insn in thumb_tr_translate_insn()
7995 * see if it's a 16-bit Thumb insn (which will fit in this TB) in thumb_tr_translate_insn()
7996 * or a 32-bit Thumb insn (which won't). in thumb_tr_translate_insn()
7997 * This is to avoid generating a silly TB with a single 16-bit insn in thumb_tr_translate_insn()
8001 if (dc->base.is_jmp == DISAS_NEXT in thumb_tr_translate_insn()
8002 && (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE in thumb_tr_translate_insn()
8003 || (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3 in thumb_tr_translate_insn()
8005 dc->base.is_jmp = DISAS_TOO_MANY; in thumb_tr_translate_insn()
8013 /* At this stage dc->condjmp will only be set when the skipped in arm_tr_tb_stop()
8017 if (dc->base.is_jmp == DISAS_BX_EXCRET) { in arm_tr_tb_stop()
8020 * handle the single-step vs not and the condition-failed in arm_tr_tb_stop()
8024 } else if (unlikely(dc->ss_active)) { in arm_tr_tb_stop()
8026 switch (dc->base.is_jmp) { in arm_tr_tb_stop()
8029 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); in arm_tr_tb_stop()
8033 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); in arm_tr_tb_stop()
8056 - Exception generating instructions (bkpt, swi, undefined). in arm_tr_tb_stop()
8057 - Page boundaries. in arm_tr_tb_stop()
8058 - Hardware watchpoints. in arm_tr_tb_stop()
8061 switch (dc->base.is_jmp) { in arm_tr_tb_stop()
8097 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb)); in arm_tr_tb_stop()
8100 gen_exception_el(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); in arm_tr_tb_stop()
8108 if (dc->condjmp) { in arm_tr_tb_stop()
8110 set_disas_label(dc, dc->condlabel); in arm_tr_tb_stop()
8112 if (unlikely(dc->ss_active)) { in arm_tr_tb_stop()