1 #ifndef TARGET_ARM_TRANSLATE_H 2 #define TARGET_ARM_TRANSLATE_H 3 4 #include "exec/translator.h" 5 #include "internals.h" 6 7 8 /* internal defines */ 9 10 /* 11 * Save pc_save across a branch, so that we may restore the value from 12 * before the branch at the point the label is emitted. 13 */ 14 typedef struct DisasLabel { 15 TCGLabel *label; 16 target_ulong pc_save; 17 } DisasLabel; 18 19 typedef struct DisasContext { 20 DisasContextBase base; 21 const ARMISARegisters *isar; 22 23 /* The address of the current instruction being translated. */ 24 target_ulong pc_curr; 25 /* 26 * For CF_PCREL, the full value of cpu_pc is not known 27 * (although the page offset is known). For convenience, the 28 * translation loop uses the full virtual address that triggered 29 * the translation, from base.pc_start through pc_curr. 30 * For efficiency, we do not update cpu_pc for every instruction. 31 * Instead, pc_save has the value of pc_curr at the time of the 32 * last update to cpu_pc, which allows us to compute the addend 33 * needed to bring cpu_pc current: pc_curr - pc_save. 34 * If cpu_pc now contains the destination of an indirect branch, 35 * pc_save contains -1 to indicate that relative updates are no 36 * longer possible. 37 */ 38 target_ulong pc_save; 39 target_ulong page_start; 40 uint32_t insn; 41 /* Nonzero if this instruction has been conditionally skipped. */ 42 int condjmp; 43 /* The label that will be jumped to when the instruction is skipped. */ 44 DisasLabel condlabel; 45 /* Thumb-2 conditional execution bits. */ 46 int condexec_mask; 47 int condexec_cond; 48 /* M-profile ECI/ICI exception-continuable instruction state */ 49 int eci; 50 /* 51 * trans_ functions for insns which are continuable should set this true 52 * after decode (ie after any UNDEF checks) 53 */ 54 bool eci_handled; 55 int sctlr_b; 56 MemOp be_data; 57 #if !defined(CONFIG_USER_ONLY) 58 int user; 59 #endif 60 ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */ 61 uint8_t tbii; /* TBI1|TBI0 for insns */ 62 uint8_t tbid; /* TBI1|TBI0 for data */ 63 uint8_t tcma; /* TCMA1|TCMA0 for MTE */ 64 bool ns; /* Use non-secure CPREG bank on access */ 65 int fp_excp_el; /* FP exception EL or 0 if enabled */ 66 int sve_excp_el; /* SVE exception EL or 0 if enabled */ 67 int sme_excp_el; /* SME exception EL or 0 if enabled */ 68 int vl; /* current vector length in bytes */ 69 int svl; /* current streaming vector length in bytes */ 70 bool vfp_enabled; /* FP enabled via FPSCR.EN */ 71 int vec_len; 72 int vec_stride; 73 bool v7m_handler_mode; 74 bool v8m_secure; /* true if v8M and we're in Secure mode */ 75 bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */ 76 bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */ 77 bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */ 78 bool v7m_lspact; /* FPCCR.LSPACT set */ 79 /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI 80 * so that top level loop can generate correct syndrome information. 81 */ 82 uint32_t svc_imm; 83 int current_el; 84 GHashTable *cp_regs; 85 uint64_t features; /* CPU features bits */ 86 bool aarch64; 87 bool thumb; 88 /* Because unallocated encodings generate different exception syndrome 89 * information from traps due to FP being disabled, we can't do a single 90 * "is fp access disabled" check at a high level in the decode tree. 91 * To help in catching bugs where the access check was forgotten in some 92 * code path, we set this flag when the access check is done, and assert 93 * that it is set at the point where we actually touch the FP regs. 94 */ 95 bool fp_access_checked; 96 bool sve_access_checked; 97 /* ARMv8 single-step state (this is distinct from the QEMU gdbstub 98 * single-step support). 99 */ 100 bool ss_active; 101 bool pstate_ss; 102 /* True if the insn just emitted was a load-exclusive instruction 103 * (necessary for syndrome information for single step exceptions), 104 * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*. 105 */ 106 bool is_ldex; 107 /* True if AccType_UNPRIV should be used for LDTR et al */ 108 bool unpriv; 109 /* True if v8.3-PAuth is active. */ 110 bool pauth_active; 111 /* True if v8.5-MTE access to tags is enabled. */ 112 bool ata; 113 /* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */ 114 bool mte_active[2]; 115 /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ 116 bool bt; 117 /* True if any CP15 access is trapped by HSTR_EL2 */ 118 bool hstr_active; 119 /* True if memory operations require alignment */ 120 bool align_mem; 121 /* True if PSTATE.IL is set */ 122 bool pstate_il; 123 /* True if PSTATE.SM is set. */ 124 bool pstate_sm; 125 /* True if PSTATE.ZA is set. */ 126 bool pstate_za; 127 /* True if non-streaming insns should raise an SME Streaming exception. */ 128 bool sme_trap_nonstreaming; 129 /* True if the current instruction is non-streaming. */ 130 bool is_nonstreaming; 131 /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */ 132 bool mve_no_pred; 133 /* True if fine-grained traps are active */ 134 bool fgt_active; 135 /* True if fine-grained trap on ERET is enabled */ 136 bool fgt_eret; 137 /* True if fine-grained trap on SVC is enabled */ 138 bool fgt_svc; 139 /* 140 * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. 141 * < 0, set by the current instruction. 142 */ 143 int8_t btype; 144 /* A copy of cpu->dcz_blocksize. */ 145 uint8_t dcz_blocksize; 146 /* True if this page is guarded. */ 147 bool guarded_page; 148 /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ 149 int c15_cpar; 150 /* TCG op of the current insn_start. */ 151 TCGOp *insn_start; 152 } DisasContext; 153 154 typedef struct DisasCompare { 155 TCGCond cond; 156 TCGv_i32 value; 157 } DisasCompare; 158 159 /* Share the TCG temporaries common between 32 and 64 bit modes. */ 160 extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF; 161 extern TCGv_i64 cpu_exclusive_addr; 162 extern TCGv_i64 cpu_exclusive_val; 163 164 /* 165 * Constant expanders for the decoders. 166 */ 167 168 static inline int negate(DisasContext *s, int x) 169 { 170 return -x; 171 } 172 173 static inline int plus_1(DisasContext *s, int x) 174 { 175 return x + 1; 176 } 177 178 static inline int plus_2(DisasContext *s, int x) 179 { 180 return x + 2; 181 } 182 183 static inline int plus_12(DisasContext *s, int x) 184 { 185 return x + 12; 186 } 187 188 static inline int times_2(DisasContext *s, int x) 189 { 190 return x * 2; 191 } 192 193 static inline int times_4(DisasContext *s, int x) 194 { 195 return x * 4; 196 } 197 198 static inline int times_2_plus_1(DisasContext *s, int x) 199 { 200 return x * 2 + 1; 201 } 202 203 static inline int rsub_64(DisasContext *s, int x) 204 { 205 return 64 - x; 206 } 207 208 static inline int rsub_32(DisasContext *s, int x) 209 { 210 return 32 - x; 211 } 212 213 static inline int rsub_16(DisasContext *s, int x) 214 { 215 return 16 - x; 216 } 217 218 static inline int rsub_8(DisasContext *s, int x) 219 { 220 return 8 - x; 221 } 222 223 static inline int neon_3same_fp_size(DisasContext *s, int x) 224 { 225 /* Convert 0==fp32, 1==fp16 into a MO_* value */ 226 return MO_32 - x; 227 } 228 229 static inline int arm_dc_feature(DisasContext *dc, int feature) 230 { 231 return (dc->features & (1ULL << feature)) != 0; 232 } 233 234 static inline int get_mem_index(DisasContext *s) 235 { 236 return arm_to_core_mmu_idx(s->mmu_idx); 237 } 238 239 static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn) 240 { 241 /* We don't need to save all of the syndrome so we mask and shift 242 * out unneeded bits to help the sleb128 encoder do a better job. 243 */ 244 syn &= ARM_INSN_START_WORD2_MASK; 245 syn >>= ARM_INSN_START_WORD2_SHIFT; 246 247 /* We check and clear insn_start_idx to catch multiple updates. */ 248 assert(s->insn_start != NULL); 249 tcg_set_insn_start_param(s->insn_start, 2, syn); 250 s->insn_start = NULL; 251 } 252 253 static inline int curr_insn_len(DisasContext *s) 254 { 255 return s->base.pc_next - s->pc_curr; 256 } 257 258 /* is_jmp field values */ 259 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ 260 /* CPU state was modified dynamically; exit to main loop for interrupts. */ 261 #define DISAS_UPDATE_EXIT DISAS_TARGET_1 262 /* These instructions trap after executing, so the A32/T32 decoder must 263 * defer them until after the conditional execution state has been updated. 264 * WFI also needs special handling when single-stepping. 265 */ 266 #define DISAS_WFI DISAS_TARGET_2 267 #define DISAS_SWI DISAS_TARGET_3 268 /* WFE */ 269 #define DISAS_WFE DISAS_TARGET_4 270 #define DISAS_HVC DISAS_TARGET_5 271 #define DISAS_SMC DISAS_TARGET_6 272 #define DISAS_YIELD DISAS_TARGET_7 273 /* M profile branch which might be an exception return (and so needs 274 * custom end-of-TB code) 275 */ 276 #define DISAS_BX_EXCRET DISAS_TARGET_8 277 /* 278 * For instructions which want an immediate exit to the main loop, as opposed 279 * to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this 280 * doesn't write the PC on exiting the translation loop so you need to ensure 281 * something (gen_a64_update_pc or runtime helper) has done so before we reach 282 * return from cpu_tb_exec. 283 */ 284 #define DISAS_EXIT DISAS_TARGET_9 285 /* CPU state was modified dynamically; no need to exit, but do not chain. */ 286 #define DISAS_UPDATE_NOCHAIN DISAS_TARGET_10 287 288 #ifdef TARGET_AARCH64 289 void a64_translate_init(void); 290 void gen_a64_update_pc(DisasContext *s, target_long diff); 291 extern const TranslatorOps aarch64_translator_ops; 292 #else 293 static inline void a64_translate_init(void) 294 { 295 } 296 297 static inline void gen_a64_update_pc(DisasContext *s, target_long diff) 298 { 299 } 300 #endif 301 302 void arm_test_cc(DisasCompare *cmp, int cc); 303 void arm_jump_cc(DisasCompare *cmp, TCGLabel *label); 304 void arm_gen_test_cc(int cc, TCGLabel *label); 305 MemOp pow2_align(unsigned i); 306 void unallocated_encoding(DisasContext *s); 307 void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp, 308 uint32_t syn, uint32_t target_el); 309 void gen_exception_insn(DisasContext *s, target_long pc_diff, 310 int excp, uint32_t syn); 311 312 /* Return state of Alternate Half-precision flag, caller frees result */ 313 static inline TCGv_i32 get_ahp_flag(void) 314 { 315 TCGv_i32 ret = tcg_temp_new_i32(); 316 317 tcg_gen_ld_i32(ret, cpu_env, 318 offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR])); 319 tcg_gen_extract_i32(ret, ret, 26, 1); 320 321 return ret; 322 } 323 324 /* Set bits within PSTATE. */ 325 static inline void set_pstate_bits(uint32_t bits) 326 { 327 TCGv_i32 p = tcg_temp_new_i32(); 328 329 tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); 330 331 tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); 332 tcg_gen_ori_i32(p, p, bits); 333 tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); 334 tcg_temp_free_i32(p); 335 } 336 337 /* Clear bits within PSTATE. */ 338 static inline void clear_pstate_bits(uint32_t bits) 339 { 340 TCGv_i32 p = tcg_temp_new_i32(); 341 342 tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); 343 344 tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate)); 345 tcg_gen_andi_i32(p, p, ~bits); 346 tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate)); 347 tcg_temp_free_i32(p); 348 } 349 350 /* If the singlestep state is Active-not-pending, advance to Active-pending. */ 351 static inline void gen_ss_advance(DisasContext *s) 352 { 353 if (s->ss_active) { 354 s->pstate_ss = 0; 355 clear_pstate_bits(PSTATE_SS); 356 } 357 } 358 359 /* Generate an architectural singlestep exception */ 360 static inline void gen_swstep_exception(DisasContext *s, int isv, int ex) 361 { 362 /* Fill in the same_el field of the syndrome in the helper. */ 363 uint32_t syn = syn_swstep(false, isv, ex); 364 gen_helper_exception_swstep(cpu_env, tcg_constant_i32(syn)); 365 } 366 367 /* 368 * Given a VFP floating point constant encoded into an 8 bit immediate in an 369 * instruction, expand it to the actual constant value of the specified 370 * size, as per the VFPExpandImm() pseudocode in the Arm ARM. 371 */ 372 uint64_t vfp_expand_imm(int size, uint8_t imm8); 373 374 /* Vector operations shared between ARM and AArch64. */ 375 void gen_gvec_ceq0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 376 uint32_t opr_sz, uint32_t max_sz); 377 void gen_gvec_clt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 378 uint32_t opr_sz, uint32_t max_sz); 379 void gen_gvec_cgt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 380 uint32_t opr_sz, uint32_t max_sz); 381 void gen_gvec_cle0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 382 uint32_t opr_sz, uint32_t max_sz); 383 void gen_gvec_cge0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 384 uint32_t opr_sz, uint32_t max_sz); 385 386 void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 387 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 388 void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 389 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 390 391 void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 392 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 393 void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 394 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 395 void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 396 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 397 398 void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 399 void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); 400 void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); 401 void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 402 void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); 403 404 void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 405 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 406 void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 407 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 408 void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 409 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 410 void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 411 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 412 413 void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 414 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 415 void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 416 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 417 418 void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 419 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 420 void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 421 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 422 void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 423 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 424 void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 425 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 426 427 void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 428 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 429 void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, 430 int64_t shift, uint32_t opr_sz, uint32_t max_sz); 431 432 void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 433 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 434 void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 435 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 436 437 void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 438 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 439 void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 440 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 441 442 void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 443 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 444 void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 445 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); 446 447 /* 448 * Forward to the isar_feature_* tests given a DisasContext pointer. 449 */ 450 #define dc_isar_feature(name, ctx) \ 451 ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); }) 452 453 /* Note that the gvec expanders operate on offsets + sizes. */ 454 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t); 455 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t, 456 uint32_t, uint32_t); 457 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t, 458 uint32_t, uint32_t, uint32_t); 459 typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t, 460 uint32_t, uint32_t, uint32_t); 461 462 /* Function prototype for gen_ functions for calling Neon helpers */ 463 typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32); 464 typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32); 465 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32); 466 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); 467 typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32, 468 TCGv_i32, TCGv_i32); 469 typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64); 470 typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64); 471 typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64); 472 typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64); 473 typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32); 474 typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32); 475 typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr); 476 typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); 477 typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); 478 typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64); 479 typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr); 480 typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32); 481 typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr); 482 typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp); 483 typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift); 484 typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32); 485 typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift); 486 typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32); 487 488 /** 489 * arm_tbflags_from_tb: 490 * @tb: the TranslationBlock 491 * 492 * Extract the flag values from @tb. 493 */ 494 static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb) 495 { 496 return (CPUARMTBFlags){ tb->flags, tb->cs_base }; 497 } 498 499 /* 500 * Enum for argument to fpstatus_ptr(). 501 */ 502 typedef enum ARMFPStatusFlavour { 503 FPST_FPCR, 504 FPST_FPCR_F16, 505 FPST_STD, 506 FPST_STD_F16, 507 } ARMFPStatusFlavour; 508 509 /** 510 * fpstatus_ptr: return TCGv_ptr to the specified fp_status field 511 * 512 * We have multiple softfloat float_status fields in the Arm CPU state struct 513 * (see the comment in cpu.h for details). Return a TCGv_ptr which has 514 * been set up to point to the requested field in the CPU state struct. 515 * The options are: 516 * 517 * FPST_FPCR 518 * for non-FP16 operations controlled by the FPCR 519 * FPST_FPCR_F16 520 * for operations controlled by the FPCR where FPCR.FZ16 is to be used 521 * FPST_STD 522 * for A32/T32 Neon operations using the "standard FPSCR value" 523 * FPST_STD_F16 524 * as FPST_STD, but where FPCR.FZ16 is to be used 525 */ 526 static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour) 527 { 528 TCGv_ptr statusptr = tcg_temp_new_ptr(); 529 int offset; 530 531 switch (flavour) { 532 case FPST_FPCR: 533 offset = offsetof(CPUARMState, vfp.fp_status); 534 break; 535 case FPST_FPCR_F16: 536 offset = offsetof(CPUARMState, vfp.fp_status_f16); 537 break; 538 case FPST_STD: 539 offset = offsetof(CPUARMState, vfp.standard_fp_status); 540 break; 541 case FPST_STD_F16: 542 offset = offsetof(CPUARMState, vfp.standard_fp_status_f16); 543 break; 544 default: 545 g_assert_not_reached(); 546 } 547 tcg_gen_addi_ptr(statusptr, cpu_env, offset); 548 return statusptr; 549 } 550 551 /** 552 * finalize_memop: 553 * @s: DisasContext 554 * @opc: size+sign+align of the memory operation 555 * 556 * Build the complete MemOp for a memory operation, including alignment 557 * and endianness. 558 * 559 * If (op & MO_AMASK) then the operation already contains the required 560 * alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally 561 * unaligned operation, e.g. for AccType_NORMAL. 562 * 563 * In the latter case, there are configuration bits that require alignment, 564 * and this is applied here. Note that there is no way to indicate that 565 * no alignment should ever be enforced; this must be handled manually. 566 */ 567 static inline MemOp finalize_memop(DisasContext *s, MemOp opc) 568 { 569 if (s->align_mem && !(opc & MO_AMASK)) { 570 opc |= MO_ALIGN; 571 } 572 return opc | s->be_data; 573 } 574 575 /** 576 * asimd_imm_const: Expand an encoded SIMD constant value 577 * 578 * Expand a SIMD constant value. This is essentially the pseudocode 579 * AdvSIMDExpandImm, except that we also perform the boolean NOT needed for 580 * VMVN and VBIC (when cmode < 14 && op == 1). 581 * 582 * The combination cmode == 15 op == 1 is a reserved encoding for AArch32; 583 * callers must catch this; we return the 64-bit constant value defined 584 * for AArch64. 585 * 586 * cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but 587 * is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A; 588 * we produce an immediate constant value of 0 in these cases. 589 */ 590 uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); 591 592 /* 593 * gen_disas_label: 594 * Create a label and cache a copy of pc_save. 595 */ 596 static inline DisasLabel gen_disas_label(DisasContext *s) 597 { 598 return (DisasLabel){ 599 .label = gen_new_label(), 600 .pc_save = s->pc_save, 601 }; 602 } 603 604 /* 605 * set_disas_label: 606 * Emit a label and restore the cached copy of pc_save. 607 */ 608 static inline void set_disas_label(DisasContext *s, DisasLabel l) 609 { 610 gen_set_label(l.label); 611 s->pc_save = l.pc_save; 612 } 613 614 static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key) 615 { 616 TCGv_ptr ret = tcg_temp_new_ptr(); 617 gen_helper_lookup_cp_reg(ret, cpu_env, tcg_constant_i32(key)); 618 return ret; 619 } 620 621 /* 622 * Helpers for implementing sets of trans_* functions. 623 * Defer the implementation of NAME to FUNC, with optional extra arguments. 624 */ 625 #define TRANS(NAME, FUNC, ...) \ 626 static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ 627 { return FUNC(s, __VA_ARGS__); } 628 #define TRANS_FEAT(NAME, FEAT, FUNC, ...) \ 629 static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ 630 { return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); } 631 632 #define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \ 633 static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ 634 { \ 635 s->is_nonstreaming = true; \ 636 return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \ 637 } 638 639 #endif /* TARGET_ARM_TRANSLATE_H */ 640