1 #ifndef TARGET_ARM_TRANSLATE_H 2 #define TARGET_ARM_TRANSLATE_H 3 4 #include "cpu.h" 5 #include "tcg/tcg-op.h" 6 #include "tcg/tcg-op-gvec.h" 7 #include "exec/exec-all.h" 8 #include "exec/translator.h" 9 #include "exec/helper-gen.h" 10 #include "internals.h" 11 #include "cpu-features.h" 12 13 /* internal defines */ 14 15 /* 16 * Save pc_save across a branch, so that we may restore the value from 17 * before the branch at the point the label is emitted. 18 */ 19 typedef struct DisasLabel { 20 TCGLabel *label; 21 target_ulong pc_save; 22 } DisasLabel; 23 24 typedef struct DisasContext { 25 DisasContextBase base; 26 const ARMISARegisters *isar; 27 28 /* The address of the current instruction being translated. */ 29 target_ulong pc_curr; 30 /* 31 * For CF_PCREL, the full value of cpu_pc is not known 32 * (although the page offset is known). For convenience, the 33 * translation loop uses the full virtual address that triggered 34 * the translation, from base.pc_start through pc_curr. 35 * For efficiency, we do not update cpu_pc for every instruction. 36 * Instead, pc_save has the value of pc_curr at the time of the 37 * last update to cpu_pc, which allows us to compute the addend 38 * needed to bring cpu_pc current: pc_curr - pc_save. 39 * If cpu_pc now contains the destination of an indirect branch, 40 * pc_save contains -1 to indicate that relative updates are no 41 * longer possible. 42 */ 43 target_ulong pc_save; 44 target_ulong page_start; 45 uint32_t insn; 46 /* Nonzero if this instruction has been conditionally skipped. */ 47 int condjmp; 48 /* The label that will be jumped to when the instruction is skipped. */ 49 DisasLabel condlabel; 50 /* Thumb-2 conditional execution bits. */ 51 int condexec_mask; 52 int condexec_cond; 53 /* M-profile ECI/ICI exception-continuable instruction state */ 54 int eci; 55 /* 56 * trans_ functions for insns which are continuable should set this true 57 * after decode (ie after any UNDEF checks) 58 */ 59 bool eci_handled; 60 int sctlr_b; 61 MemOp be_data; 62 #if !defined(CONFIG_USER_ONLY) 63 int user; 64 #endif 65 ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */ 66 uint8_t tbii; /* TBI1|TBI0 for insns */ 67 uint8_t tbid; /* TBI1|TBI0 for data */ 68 uint8_t tcma; /* TCMA1|TCMA0 for MTE */ 69 bool ns; /* Use non-secure CPREG bank on access */ 70 int fp_excp_el; /* FP exception EL or 0 if enabled */ 71 int sve_excp_el; /* SVE exception EL or 0 if enabled */ 72 int sme_excp_el; /* SME exception EL or 0 if enabled */ 73 int vl; /* current vector length in bytes */ 74 int svl; /* current streaming vector length in bytes */ 75 bool vfp_enabled; /* FP enabled via FPSCR.EN */ 76 int vec_len; 77 int vec_stride; 78 bool v7m_handler_mode; 79 bool v8m_secure; /* true if v8M and we're in Secure mode */ 80 bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */ 81 bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */ 82 bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */ 83 bool v7m_lspact; /* FPCCR.LSPACT set */ 84 /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI 85 * so that top level loop can generate correct syndrome information. 86 */ 87 uint32_t svc_imm; 88 int current_el; 89 GHashTable *cp_regs; 90 uint64_t features; /* CPU features bits */ 91 bool aarch64; 92 bool thumb; 93 bool lse2; 94 /* Because unallocated encodings generate different exception syndrome 95 * information from traps due to FP being disabled, we can't do a single 96 * "is fp access disabled" check at a high level in the decode tree. 97 * To help in catching bugs where the access check was forgotten in some 98 * code path, we set this flag when the access check is done, and assert 99 * that it is set at the point where we actually touch the FP regs. 100 */ 101 bool fp_access_checked; 102 bool sve_access_checked; 103 /* ARMv8 single-step state (this is distinct from the QEMU gdbstub 104 * single-step support). 105 */ 106 bool ss_active; 107 bool pstate_ss; 108 /* True if the insn just emitted was a load-exclusive instruction 109 * (necessary for syndrome information for single step exceptions), 110 * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. 111 */ 112 bool is_ldex; 113 /* True if AccType_UNPRIV should be used for LDTR et al */ 114 bool unpriv; 115 /* True if v8.3-PAuth is active. */ 116 bool pauth_active; 117 /* True if v8.5-MTE access to tags is enabled; index with is_unpriv. */ 118 bool ata[2]; 119 /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */ 120 bool mte_active[2]; 121 /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ 122 bool bt; 123 /* True if any CP15 access is trapped by HSTR_EL2 */ 124 bool hstr_active; 125 /* True if memory operations require alignment */ 126 bool align_mem; 127 /* True if PSTATE.IL is set */ 128 bool pstate_il; 129 /* True if PSTATE.SM is set. */ 130 bool pstate_sm; 131 /* True if PSTATE.ZA is set. */ 132 bool pstate_za; 133 /* True if non-streaming insns should raise an SME Streaming exception. */ 134 bool sme_trap_nonstreaming; 135 /* True if the current instruction is non-streaming. */ 136 bool is_nonstreaming; 137 /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */ 138 bool mve_no_pred; 139 /* True if fine-grained traps are active */ 140 bool fgt_active; 141 /* True if fine-grained trap on SVC is enabled */ 142 bool fgt_svc; 143 /* True if a trap on ERET is enabled (FGT or NV) */ 144 bool trap_eret; 145 /* True if FEAT_LSE2 SCTLR_ELx.nAA is set */ 146 bool naa; 147 /* True if FEAT_NV HCR_EL2.NV is enabled */ 148 bool nv; 149 /* True if NV enabled and HCR_EL2.NV1 is set */ 150 bool nv1; 151 /* True if NV enabled and HCR_EL2.NV2 is set */ 152 bool nv2; 153 /* True if NV2 enabled and NV2 RAM accesses use EL2&0 translation regime */ 154 bool nv2_mem_e20; 155 /* True if NV2 enabled and NV2 RAM accesses are big-endian */ 156 bool nv2_mem_be; 157 /* 158 * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. 159 * < 0, set by the current instruction. 160 */ 161 int8_t btype; 162 /* A copy of cpu->dcz_blocksize. */ 163 uint8_t dcz_blocksize; 164 /* A copy of cpu->gm_blocksize. */ 165 uint8_t gm_blocksize; 166 /* True if this page is guarded. */ 167 bool guarded_page; 168 /* True if the current insn_start has been updated. */ 169 bool insn_start_updated; 170 /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ 171 int c15_cpar; 172 /* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */ 173 uint32_t nv2_redirect_offset; 174 } DisasContext; 175 176 typedef struct DisasCompare { 177 TCGCond cond; 178 TCGv_i32 value; 179 } DisasCompare; 180 181 /* Share the TCG temporaries common between 32 and 64 bit modes. */ 182 extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF; 183 extern TCGv_i64 cpu_exclusive_addr; 184 extern TCGv_i64 cpu_exclusive_val; 185 186 /* 187 * Constant expanders for the decoders. 188 */ 189 190 static inline int negate(DisasContext *s, int x) 191 { 192 return -x; 193 } 194 195 static inline int plus_1(DisasContext *s, int x) 196 { 197 return x + 1; 198 } 199 200 static inline int plus_2(DisasContext *s, int x) 201 { 202 return x + 2; 203 } 204 205 static inline int plus_12(DisasContext *s, int x) 206 { 207 return x + 12; 208 } 209 210 static inline int times_2(DisasContext *s, int x) 211 { 212 return x * 2; 213 } 214 215 static inline int times_4(DisasContext *s, int x) 216 { 217 return x * 4; 218 } 219 220 static inline int times_8(DisasContext *s, int x) 221 { 222 return x * 8; 223 } 224 225 static inline int times_2_plus_1(DisasContext *s, int x) 226 { 227 return x * 2 + 1; 228 } 229 230 static inline int rsub_64(DisasContext *s, int x) 231 { 232 return 64 - x; 233 } 234 235 static inline int rsub_32(DisasContext *s, int x) 236 { 237 return 32 - x; 238 } 239 240 static inline int rsub_16(DisasContext *s, int x) 241 { 242 return 16 - x; 243 } 244 245 static inline int rsub_8(DisasContext *s, int x) 246 { 247 return 8 - x; 248 } 249 250 static inline int shl_12(DisasContext *s, int x) 251 { 252 return x << 12; 253 } 254 255 static inline int xor_2(DisasContext *s, int x) 256 { 257 return x ^ 2; 258 } 259 260 static inline int neon_3same_fp_size(DisasContext *s, int x) 261 { 262 /* Convert 0==fp32, 1==fp16 into a MO_* value */ 263 return MO_32 - x; 264 } 265 266 static inline int arm_dc_feature(DisasContext *dc, int feature) 267 { 268 return (dc->features & (1ULL << feature)) != 0; 269 } 270 271 static inline int get_mem_index(DisasContext *s) 272 { 273 return arm_to_core_mmu_idx(s->mmu_idx); 274 } 275 276 static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) 277 { 278 /* We don't need to save all of the syndrome so we mask and shift 279 * out unneeded bits to help the sleb128 encoder do a better job. 280 */ 281 syn &= ARM_INSN_START_WORD2_MASK; 282 syn >>= ARM_INSN_START_WORD2_SHIFT; 283 284 /* Check for multiple updates. */ 285 assert(!s->insn_start_updated); 286 s->insn_start_updated = true; 287 tcg_set_insn_start_param(s->base.insn_start, 2, syn); 288 } 289 290 static inline int curr_insn_len(DisasContext *s) 291 { 292 return s->base.pc_next - s->pc_curr; 293 } 294 295 /* is_jmp field values */ 296 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 297 /* CPU state was modified dynamically; exit to main loop for interrupts. */ 298 #define DISAS_UPDATE_EXIT DISAS_TARGET_1 299 /* These instructions trap after executing, so the A32/T32 decoder must 300 * defer them until after the conditional execution state has been updated. 301 * WFI also needs special handling when single-stepping. 302 */ 303 #define DISAS_WFI DISAS_TARGET_2 304 #define DISAS_SWI DISAS_TARGET_3 305 /* WFE */ 306 #define DISAS_WFE DISAS_TARGET_4 307 #define DISAS_HVC DISAS_TARGET_5 308 #define DISAS_SMC DISAS_TARGET_6 309 #define DISAS_YIELD DISAS_TARGET_7 310 /* M profile branch which might be an exception return (and so needs 311 * custom end-of-TB code) 312 */ 313 #define DISAS_BX_EXCRET DISAS_TARGET_8 314 /* 315 * For instructions which want an immediate exit to the main loop, as opposed 316 * to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this 317 * doesn't write the PC on exiting the translation loop so you need to ensure 318 * something (gen_a64_update_pc or runtime helper) has done so before we reach 319 * return from cpu_tb_exec. 320 */ 321 #define DISAS_EXIT DISAS_TARGET_9 322 /* CPU state was modified dynamically; no need to exit, but do not chain. */ 323 #define DISAS_UPDATE_NOCHAIN DISAS_TARGET_10 324 325 #ifdef TARGET_AARCH64 326 void a64_translate_init(void); 327 void gen_a64_update_pc(DisasContext *s, target_long diff); 328 extern const TranslatorOps aarch64_translator_ops; 329 #else 330 static inline void a64_translate_init(void) 331 { 332 } 333 334 static inline void gen_a64_update_pc(DisasContext *s, target_long diff) 335 { 336 } 337 #endif 338 339 void arm_test_cc(DisasCompare *cmp, int cc); 340 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); 341 void arm_gen_test_cc(int cc, TCGLabel *label); 342 MemOp pow2_align(unsigned i); 343 void unallocated_encoding(DisasContext *s); 344 void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp, 345 uint32_t syn, uint32_t target_el); 346 void gen_exception_insn(DisasContext *s, target_long pc_diff, 347 int excp, uint32_t syn); 348 349 /* Return state of Alternate Half-precision flag, caller frees result */ 350 static inline TCGv_i32 get_ahp_flag(void) 351 { 352 TCGv_i32 ret = tcg_temp_new_i32(); 353 354 tcg_gen_ld_i32(ret, tcg_env, 355 offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR])); 356 tcg_gen_extract_i32(ret, ret, 26, 1); 357 358 return ret; 359 } 360 361 /* Set bits within PSTATE. */ 362 static inline void set_pstate_bits(uint32_t bits) 363 { 364 TCGv_i32 p = tcg_temp_new_i32(); 365 366 tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); 367 368 tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate)); 369 tcg_gen_ori_i32(p, p, bits); 370 tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate)); 371 } 372 373 /* Clear bits within PSTATE. */ 374 static inline void clear_pstate_bits(uint32_t bits) 375 { 376 TCGv_i32 p = tcg_temp_new_i32(); 377 378 tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); 379 380 tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate)); 381 tcg_gen_andi_i32(p, p, ~bits); 382 tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate)); 383 } 384 385 /* If the singlestep state is Active-not-pending, advance to Active-pending. */ 386 static inline void gen_ss_advance(DisasContext *s) 387 { 388 if (s->ss_active) { 389 s->pstate_ss = 0; 390 clear_pstate_bits(PSTATE_SS); 391 } 392 } 393 394 /* Generate an architectural singlestep exception */ 395 static inline void gen_swstep_exception(DisasContext *s, int isv, int ex) 396 { 397 /* Fill in the same_el field of the syndrome in the helper. */ 398 uint32_t syn = syn_swstep(false, isv, ex); 399 gen_helper_exception_swstep(tcg_env, tcg_constant_i32(syn)); 400 } 401 402 /* 403 * Given a VFP floating point constant encoded into an 8 bit immediate in an 404 * instruction, expand it to the actual constant value of the specified 405 * size, as per the VFPExpandImm() pseudocode in the Arm ARM. 406 */ 407 uint64_t vfp_expand_imm(int size, uint8_t imm8); 408 409 /* Vector operations shared between ARM and AArch64. */ 410 void gen_gvec_ceq0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 411 uint32_t opr_sz, uint32_t max_sz); 412 void gen_gvec_clt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 413 uint32_t opr_sz, uint32_t max_sz); 414 void gen_gvec_cgt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 415 uint32_t opr_sz, uint32_t max_sz); 416 void gen_gvec_cle0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 417 uint32_t opr_sz, uint32_t max_sz); 418 void gen_gvec_cge0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 419 uint32_t opr_sz, uint32_t max_sz); 420 421 void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 422 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 423 void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 424 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 425 426 void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 427 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 428 void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 429 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 430 void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 431 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 432 433 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 434 void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); 435 void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); 436 void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 437 void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 438 439 void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 440 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 441 void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 442 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 443 void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 444 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 445 void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 446 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 447 448 void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 449 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 450 void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 451 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 452 453 void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh); 454 void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh); 455 void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh); 456 void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh); 457 458 void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 459 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 460 void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 461 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 462 void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 463 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 464 void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 465 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 466 467 void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 468 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 469 void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 470 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 471 472 void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 473 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 474 void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 475 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 476 477 void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 478 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 479 void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 480 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 481 482 void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 483 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 484 void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 485 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 486 487 /* 488 * Forward to the isar_feature_* tests given a DisasContext pointer. 489 */ 490 #define dc_isar_feature(name, ctx) \ 491 ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); }) 492 493 /* Note that the gvec expanders operate on offsets + sizes. */ 494 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t); 495 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t, 496 uint32_t, uint32_t); 497 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t, 498 uint32_t, uint32_t, uint32_t); 499 typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t, 500 uint32_t, uint32_t, uint32_t); 501 502 /* Function prototype for gen_ functions for calling Neon helpers */ 503 typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32); 504 typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32); 505 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32); 506 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); 507 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32, 508 TCGv_i32, TCGv_i32); 509 typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64); 510 typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); 511 typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64); 512 typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64); 513 typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32); 514 typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32); 515 typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr); 516 typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); 517 typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); 518 typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64); 519 typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); 520 typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); 521 typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); 522 typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); 523 typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift); 524 typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32); 525 typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift); 526 typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); 527 528 /** 529 * arm_tbflags_from_tb: 530 * @tb: the TranslationBlock 531 * 532 * Extract the flag values from @tb. 533 */ 534 static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb) 535 { 536 return (CPUARMTBFlags){ tb->flags, tb->cs_base }; 537 } 538 539 /* 540 * Enum for argument to fpstatus_ptr(). 541 */ 542 typedef enum ARMFPStatusFlavour { 543 FPST_FPCR, 544 FPST_FPCR_F16, 545 FPST_STD, 546 FPST_STD_F16, 547 } ARMFPStatusFlavour; 548 549 /** 550 * fpstatus_ptr: return TCGv_ptr to the specified fp_status field 551 * 552 * We have multiple softfloat float_status fields in the Arm CPU state struct 553 * (see the comment in cpu.h for details). Return a TCGv_ptr which has 554 * been set up to point to the requested field in the CPU state struct. 555 * The options are: 556 * 557 * FPST_FPCR 558 * for non-FP16 operations controlled by the FPCR 559 * FPST_FPCR_F16 560 * for operations controlled by the FPCR where FPCR.FZ16 is to be used 561 * FPST_STD 562 * for A32/T32 Neon operations using the "standard FPSCR value" 563 * FPST_STD_F16 564 * as FPST_STD, but where FPCR.FZ16 is to be used 565 */ 566 static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) 567 { 568 TCGv_ptr statusptr = tcg_temp_new_ptr(); 569 int offset; 570 571 switch (flavour) { 572 case FPST_FPCR: 573 offset = offsetof(CPUARMState, vfp.fp_status); 574 break; 575 case FPST_FPCR_F16: 576 offset = offsetof(CPUARMState, vfp.fp_status_f16); 577 break; 578 case FPST_STD: 579 offset = offsetof(CPUARMState, vfp.standard_fp_status); 580 break; 581 case FPST_STD_F16: 582 offset = offsetof(CPUARMState, vfp.standard_fp_status_f16); 583 break; 584 default: 585 g_assert_not_reached(); 586 } 587 tcg_gen_addi_ptr(statusptr, tcg_env, offset); 588 return statusptr; 589 } 590 591 /** 592 * finalize_memop_atom: 593 * @s: DisasContext 594 * @opc: size+sign+align of the memory operation 595 * @atom: atomicity of the memory operation 596 * 597 * Build the complete MemOp for a memory operation, including alignment, 598 * endianness, and atomicity. 599 * 600 * If (op & MO_AMASK) then the operation already contains the required 601 * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally 602 * unaligned operation, e.g. for AccType_NORMAL. 603 * 604 * In the latter case, there are configuration bits that require alignment, 605 * and this is applied here. Note that there is no way to indicate that 606 * no alignment should ever be enforced; this must be handled manually. 607 */ 608 static inline MemOp finalize_memop_atom(DisasContext *s, MemOp opc, MemOp atom) 609 { 610 if (s->align_mem && !(opc & MO_AMASK)) { 611 opc |= MO_ALIGN; 612 } 613 return opc | atom | s->be_data; 614 } 615 616 /** 617 * finalize_memop: 618 * @s: DisasContext 619 * @opc: size+sign+align of the memory operation 620 * 621 * Like finalize_memop_atom, but with default atomicity. 622 */ 623 static inline MemOp finalize_memop(DisasContext *s, MemOp opc) 624 { 625 MemOp atom = s->lse2 ? MO_ATOM_WITHIN16 : MO_ATOM_IFALIGN; 626 return finalize_memop_atom(s, opc, atom); 627 } 628 629 /** 630 * finalize_memop_pair: 631 * @s: DisasContext 632 * @opc: size+sign+align of the memory operation 633 * 634 * Like finalize_memop_atom, but with atomicity for a pair. 635 * C.f. Pseudocode for Mem[], operand ispair. 636 */ 637 static inline MemOp finalize_memop_pair(DisasContext *s, MemOp opc) 638 { 639 MemOp atom = s->lse2 ? MO_ATOM_WITHIN16_PAIR : MO_ATOM_IFALIGN_PAIR; 640 return finalize_memop_atom(s, opc, atom); 641 } 642 643 /** 644 * finalize_memop_asimd: 645 * @s: DisasContext 646 * @opc: size+sign+align of the memory operation 647 * 648 * Like finalize_memop_atom, but with atomicity of AccessType_ASIMD. 649 */ 650 static inline MemOp finalize_memop_asimd(DisasContext *s, MemOp opc) 651 { 652 /* 653 * In the pseudocode for Mem[], with AccessType_ASIMD, size == 16, 654 * if IsAligned(8), the first case provides separate atomicity for 655 * the pair of 64-bit accesses. If !IsAligned(8), the middle cases 656 * do not apply, and we're left with the final case of no atomicity. 657 * Thus MO_ATOM_IFALIGN_PAIR. 658 * 659 * For other sizes, normal LSE2 rules apply. 660 */ 661 if ((opc & MO_SIZE) == MO_128) { 662 return finalize_memop_atom(s, opc, MO_ATOM_IFALIGN_PAIR); 663 } 664 return finalize_memop(s, opc); 665 } 666 667 /** 668 * asimd_imm_const: Expand an encoded SIMD constant value 669 * 670 * Expand a SIMD constant value. This is essentially the pseudocode 671 * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for 672 * VMVN and VBIC (when cmode < 14 && op == 1). 673 * 674 * The combination cmode == 15 op == 1 is a reserved encoding for AArch32; 675 * callers must catch this; we return the 64-bit constant value defined 676 * for AArch64. 677 * 678 * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but 679 * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A; 680 * we produce an immediate constant value of 0 in these cases. 681 */ 682 uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); 683 684 /* 685 * gen_disas_label: 686 * Create a label and cache a copy of pc_save. 687 */ 688 static inline DisasLabel gen_disas_label(DisasContext *s) 689 { 690 return (DisasLabel){ 691 .label = gen_new_label(), 692 .pc_save = s->pc_save, 693 }; 694 } 695 696 /* 697 * set_disas_label: 698 * Emit a label and restore the cached copy of pc_save. 699 */ 700 static inline void set_disas_label(DisasContext *s, DisasLabel l) 701 { 702 gen_set_label(l.label); 703 s->pc_save = l.pc_save; 704 } 705 706 static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key) 707 { 708 TCGv_ptr ret = tcg_temp_new_ptr(); 709 gen_helper_lookup_cp_reg(ret, tcg_env, tcg_constant_i32(key)); 710 return ret; 711 } 712 713 /* 714 * Set and reset rounding mode around another operation. 715 */ 716 static inline TCGv_i32 gen_set_rmode(ARMFPRounding rmode, TCGv_ptr fpst) 717 { 718 TCGv_i32 new = tcg_constant_i32(arm_rmode_to_sf(rmode)); 719 TCGv_i32 old = tcg_temp_new_i32(); 720 721 gen_helper_set_rmode(old, new, fpst); 722 return old; 723 } 724 725 static inline void gen_restore_rmode(TCGv_i32 old, TCGv_ptr fpst) 726 { 727 gen_helper_set_rmode(old, old, fpst); 728 } 729 730 /* 731 * Helpers for implementing sets of trans_* functions. 732 * Defer the implementation of NAME to FUNC, with optional extra arguments. 733 */ 734 #define TRANS(NAME, FUNC, ...) \ 735 static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ 736 { return FUNC(s, __VA_ARGS__); } 737 #define TRANS_FEAT(NAME, FEAT, FUNC, ...) \ 738 static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ 739 { return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); } 740 741 #define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \ 742 static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ 743 { \ 744 s->is_nonstreaming = true; \ 745 return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \ 746 } 747 748 #endif /* TARGET_ARM_TRANSLATE_H */ 749