1 /* 2 * AArch64 translation 3 * 4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 21 #include "exec/exec-all.h" 22 #include "translate.h" 23 #include "translate-a64.h" 24 #include "qemu/log.h" 25 #include "arm_ldst.h" 26 #include "semihosting/semihost.h" 27 #include "cpregs.h" 28 29 static TCGv_i64 cpu_X[32]; 30 static TCGv_i64 cpu_pc; 31 32 /* Load/store exclusive handling */ 33 static TCGv_i64 cpu_exclusive_high; 34 35 static const char *regnames[] = { 36 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", 37 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", 38 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", 39 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp" 40 }; 41 42 enum a64_shift_type { 43 A64_SHIFT_TYPE_LSL = 0, 44 A64_SHIFT_TYPE_LSR = 1, 45 A64_SHIFT_TYPE_ASR = 2, 46 A64_SHIFT_TYPE_ROR = 3 47 }; 48 49 /* 50 * Helpers for extracting complex instruction fields 51 */ 52 53 /* 54 * For load/store with an unsigned 12 bit immediate scaled by the element 55 * size. The input has the immediate field in bits [14:3] and the element 56 * size in [2:0]. 57 */ 58 static int uimm_scaled(DisasContext *s, int x) 59 { 60 unsigned imm = x >> 3; 61 unsigned scale = extract32(x, 0, 3); 62 return imm << scale; 63 } 64 65 /* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */ 66 static int scale_by_log2_tag_granule(DisasContext *s, int x) 67 { 68 return x << LOG2_TAG_GRANULE; 69 } 70 71 /* 72 * Include the generated decoders. 73 */ 74 75 #include "decode-sme-fa64.c.inc" 76 #include "decode-a64.c.inc" 77 78 /* Table based decoder typedefs - used when the relevant bits for decode 79 * are too awkwardly scattered across the instruction (eg SIMD). 80 */ 81 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn); 82 83 typedef struct AArch64DecodeTable { 84 uint32_t pattern; 85 uint32_t mask; 86 AArch64DecodeFn *disas_fn; 87 } AArch64DecodeTable; 88 89 /* initialize TCG globals. */ 90 void a64_translate_init(void) 91 { 92 int i; 93 94 cpu_pc = tcg_global_mem_new_i64(tcg_env, 95 offsetof(CPUARMState, pc), 96 "pc"); 97 for (i = 0; i < 32; i++) { 98 cpu_X[i] = tcg_global_mem_new_i64(tcg_env, 99 offsetof(CPUARMState, xregs[i]), 100 regnames[i]); 101 } 102 103 cpu_exclusive_high = tcg_global_mem_new_i64(tcg_env, 104 offsetof(CPUARMState, exclusive_high), "exclusive_high"); 105 } 106 107 /* 108 * Return the core mmu_idx to use for A64 load/store insns which 109 * have a "unprivileged load/store" variant. Those insns access 110 * EL0 if executed from an EL which has control over EL0 (usually 111 * EL1) but behave like normal loads and stores if executed from 112 * elsewhere (eg EL3). 113 * 114 * @unpriv : true for the unprivileged encoding; false for the 115 * normal encoding (in which case we will return the same 116 * thing as get_mem_index(). 117 */ 118 static int get_a64_user_mem_index(DisasContext *s, bool unpriv) 119 { 120 /* 121 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL, 122 * which is the usual mmu_idx for this cpu state. 123 */ 124 ARMMMUIdx useridx = s->mmu_idx; 125 126 if (unpriv && s->unpriv) { 127 /* 128 * We have pre-computed the condition for AccType_UNPRIV. 129 * Therefore we should never get here with a mmu_idx for 130 * which we do not know the corresponding user mmu_idx. 131 */ 132 switch (useridx) { 133 case ARMMMUIdx_E10_1: 134 case ARMMMUIdx_E10_1_PAN: 135 useridx = ARMMMUIdx_E10_0; 136 break; 137 case ARMMMUIdx_E20_2: 138 case ARMMMUIdx_E20_2_PAN: 139 useridx = ARMMMUIdx_E20_0; 140 break; 141 default: 142 g_assert_not_reached(); 143 } 144 } 145 return arm_to_core_mmu_idx(useridx); 146 } 147 148 static void set_btype_raw(int val) 149 { 150 tcg_gen_st_i32(tcg_constant_i32(val), tcg_env, 151 offsetof(CPUARMState, btype)); 152 } 153 154 static void set_btype(DisasContext *s, int val) 155 { 156 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */ 157 tcg_debug_assert(val >= 1 && val <= 3); 158 set_btype_raw(val); 159 s->btype = -1; 160 } 161 162 static void reset_btype(DisasContext *s) 163 { 164 if (s->btype != 0) { 165 set_btype_raw(0); 166 s->btype = 0; 167 } 168 } 169 170 static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff) 171 { 172 assert(s->pc_save != -1); 173 if (tb_cflags(s->base.tb) & CF_PCREL) { 174 tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff); 175 } else { 176 tcg_gen_movi_i64(dest, s->pc_curr + diff); 177 } 178 } 179 180 void gen_a64_update_pc(DisasContext *s, target_long diff) 181 { 182 gen_pc_plus_diff(s, cpu_pc, diff); 183 s->pc_save = s->pc_curr + diff; 184 } 185 186 /* 187 * Handle Top Byte Ignore (TBI) bits. 188 * 189 * If address tagging is enabled via the TCR TBI bits: 190 * + for EL2 and EL3 there is only one TBI bit, and if it is set 191 * then the address is zero-extended, clearing bits [63:56] 192 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0 193 * and TBI1 controls addresses with bit 55 == 1. 194 * If the appropriate TBI bit is set for the address then 195 * the address is sign-extended from bit 55 into bits [63:56] 196 * 197 * Here We have concatenated TBI{1,0} into tbi. 198 */ 199 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst, 200 TCGv_i64 src, int tbi) 201 { 202 if (tbi == 0) { 203 /* Load unmodified address */ 204 tcg_gen_mov_i64(dst, src); 205 } else if (!regime_has_2_ranges(s->mmu_idx)) { 206 /* Force tag byte to all zero */ 207 tcg_gen_extract_i64(dst, src, 0, 56); 208 } else { 209 /* Sign-extend from bit 55. */ 210 tcg_gen_sextract_i64(dst, src, 0, 56); 211 212 switch (tbi) { 213 case 1: 214 /* tbi0 but !tbi1: only use the extension if positive */ 215 tcg_gen_and_i64(dst, dst, src); 216 break; 217 case 2: 218 /* !tbi0 but tbi1: only use the extension if negative */ 219 tcg_gen_or_i64(dst, dst, src); 220 break; 221 case 3: 222 /* tbi0 and tbi1: always use the extension */ 223 break; 224 default: 225 g_assert_not_reached(); 226 } 227 } 228 } 229 230 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src) 231 { 232 /* 233 * If address tagging is enabled for instructions via the TCR TBI bits, 234 * then loading an address into the PC will clear out any tag. 235 */ 236 gen_top_byte_ignore(s, cpu_pc, src, s->tbii); 237 s->pc_save = -1; 238 } 239 240 /* 241 * Handle MTE and/or TBI. 242 * 243 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 244 * for the tag to be present in the FAR_ELx register. But for user-only 245 * mode we do not have a TLB with which to implement this, so we must 246 * remove the top byte now. 247 * 248 * Always return a fresh temporary that we can increment independently 249 * of the write-back address. 250 */ 251 252 TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr) 253 { 254 TCGv_i64 clean = tcg_temp_new_i64(); 255 #ifdef CONFIG_USER_ONLY 256 gen_top_byte_ignore(s, clean, addr, s->tbid); 257 #else 258 tcg_gen_mov_i64(clean, addr); 259 #endif 260 return clean; 261 } 262 263 /* Insert a zero tag into src, with the result at dst. */ 264 static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src) 265 { 266 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4)); 267 } 268 269 static void gen_probe_access(DisasContext *s, TCGv_i64 ptr, 270 MMUAccessType acc, int log2_size) 271 { 272 gen_helper_probe_access(tcg_env, ptr, 273 tcg_constant_i32(acc), 274 tcg_constant_i32(get_mem_index(s)), 275 tcg_constant_i32(1 << log2_size)); 276 } 277 278 /* 279 * For MTE, check a single logical or atomic access. This probes a single 280 * address, the exact one specified. The size and alignment of the access 281 * is not relevant to MTE, per se, but watchpoints do require the size, 282 * and we want to recognize those before making any other changes to state. 283 */ 284 static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, 285 bool is_write, bool tag_checked, 286 MemOp memop, bool is_unpriv, 287 int core_idx) 288 { 289 if (tag_checked && s->mte_active[is_unpriv]) { 290 TCGv_i64 ret; 291 int desc = 0; 292 293 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx); 294 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 295 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 296 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); 297 desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop)); 298 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1); 299 300 ret = tcg_temp_new_i64(); 301 gen_helper_mte_check(ret, tcg_env, tcg_constant_i32(desc), addr); 302 303 return ret; 304 } 305 return clean_data_tbi(s, addr); 306 } 307 308 TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, 309 bool tag_checked, MemOp memop) 310 { 311 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, memop, 312 false, get_mem_index(s)); 313 } 314 315 /* 316 * For MTE, check multiple logical sequential accesses. 317 */ 318 TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, 319 bool tag_checked, int total_size, MemOp single_mop) 320 { 321 if (tag_checked && s->mte_active[0]) { 322 TCGv_i64 ret; 323 int desc = 0; 324 325 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); 326 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 327 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 328 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); 329 desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop)); 330 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1); 331 332 ret = tcg_temp_new_i64(); 333 gen_helper_mte_check(ret, tcg_env, tcg_constant_i32(desc), addr); 334 335 return ret; 336 } 337 return clean_data_tbi(s, addr); 338 } 339 340 /* 341 * Generate the special alignment check that applies to AccType_ATOMIC 342 * and AccType_ORDERED insns under FEAT_LSE2: the access need not be 343 * naturally aligned, but it must not cross a 16-byte boundary. 344 * See AArch64.CheckAlignment(). 345 */ 346 static void check_lse2_align(DisasContext *s, int rn, int imm, 347 bool is_write, MemOp mop) 348 { 349 TCGv_i32 tmp; 350 TCGv_i64 addr; 351 TCGLabel *over_label; 352 MMUAccessType type; 353 int mmu_idx; 354 355 tmp = tcg_temp_new_i32(); 356 tcg_gen_extrl_i64_i32(tmp, cpu_reg_sp(s, rn)); 357 tcg_gen_addi_i32(tmp, tmp, imm & 15); 358 tcg_gen_andi_i32(tmp, tmp, 15); 359 tcg_gen_addi_i32(tmp, tmp, memop_size(mop)); 360 361 over_label = gen_new_label(); 362 tcg_gen_brcondi_i32(TCG_COND_LEU, tmp, 16, over_label); 363 364 addr = tcg_temp_new_i64(); 365 tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm); 366 367 type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD, 368 mmu_idx = get_mem_index(s); 369 gen_helper_unaligned_access(tcg_env, addr, tcg_constant_i32(type), 370 tcg_constant_i32(mmu_idx)); 371 372 gen_set_label(over_label); 373 374 } 375 376 /* Handle the alignment check for AccType_ATOMIC instructions. */ 377 static MemOp check_atomic_align(DisasContext *s, int rn, MemOp mop) 378 { 379 MemOp size = mop & MO_SIZE; 380 381 if (size == MO_8) { 382 return mop; 383 } 384 385 /* 386 * If size == MO_128, this is a LDXP, and the operation is single-copy 387 * atomic for each doubleword, not the entire quadword; it still must 388 * be quadword aligned. 389 */ 390 if (size == MO_128) { 391 return finalize_memop_atom(s, MO_128 | MO_ALIGN, 392 MO_ATOM_IFALIGN_PAIR); 393 } 394 if (dc_isar_feature(aa64_lse2, s)) { 395 check_lse2_align(s, rn, 0, true, mop); 396 } else { 397 mop |= MO_ALIGN; 398 } 399 return finalize_memop(s, mop); 400 } 401 402 /* Handle the alignment check for AccType_ORDERED instructions. */ 403 static MemOp check_ordered_align(DisasContext *s, int rn, int imm, 404 bool is_write, MemOp mop) 405 { 406 MemOp size = mop & MO_SIZE; 407 408 if (size == MO_8) { 409 return mop; 410 } 411 if (size == MO_128) { 412 return finalize_memop_atom(s, MO_128 | MO_ALIGN, 413 MO_ATOM_IFALIGN_PAIR); 414 } 415 if (!dc_isar_feature(aa64_lse2, s)) { 416 mop |= MO_ALIGN; 417 } else if (!s->naa) { 418 check_lse2_align(s, rn, imm, is_write, mop); 419 } 420 return finalize_memop(s, mop); 421 } 422 423 typedef struct DisasCompare64 { 424 TCGCond cond; 425 TCGv_i64 value; 426 } DisasCompare64; 427 428 static void a64_test_cc(DisasCompare64 *c64, int cc) 429 { 430 DisasCompare c32; 431 432 arm_test_cc(&c32, cc); 433 434 /* 435 * Sign-extend the 32-bit value so that the GE/LT comparisons work 436 * properly. The NE/EQ comparisons are also fine with this choice. 437 */ 438 c64->cond = c32.cond; 439 c64->value = tcg_temp_new_i64(); 440 tcg_gen_ext_i32_i64(c64->value, c32.value); 441 } 442 443 static void gen_rebuild_hflags(DisasContext *s) 444 { 445 gen_helper_rebuild_hflags_a64(tcg_env, tcg_constant_i32(s->current_el)); 446 } 447 448 static void gen_exception_internal(int excp) 449 { 450 assert(excp_is_internal(excp)); 451 gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp)); 452 } 453 454 static void gen_exception_internal_insn(DisasContext *s, int excp) 455 { 456 gen_a64_update_pc(s, 0); 457 gen_exception_internal(excp); 458 s->base.is_jmp = DISAS_NORETURN; 459 } 460 461 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome) 462 { 463 gen_a64_update_pc(s, 0); 464 gen_helper_exception_bkpt_insn(tcg_env, tcg_constant_i32(syndrome)); 465 s->base.is_jmp = DISAS_NORETURN; 466 } 467 468 static void gen_step_complete_exception(DisasContext *s) 469 { 470 /* We just completed step of an insn. Move from Active-not-pending 471 * to Active-pending, and then also take the swstep exception. 472 * This corresponds to making the (IMPDEF) choice to prioritize 473 * swstep exceptions over asynchronous exceptions taken to an exception 474 * level where debug is disabled. This choice has the advantage that 475 * we do not need to maintain internal state corresponding to the 476 * ISV/EX syndrome bits between completion of the step and generation 477 * of the exception, and our syndrome information is always correct. 478 */ 479 gen_ss_advance(s); 480 gen_swstep_exception(s, 1, s->is_ldex); 481 s->base.is_jmp = DISAS_NORETURN; 482 } 483 484 static inline bool use_goto_tb(DisasContext *s, uint64_t dest) 485 { 486 if (s->ss_active) { 487 return false; 488 } 489 return translator_use_goto_tb(&s->base, dest); 490 } 491 492 static void gen_goto_tb(DisasContext *s, int n, int64_t diff) 493 { 494 if (use_goto_tb(s, s->pc_curr + diff)) { 495 /* 496 * For pcrel, the pc must always be up-to-date on entry to 497 * the linked TB, so that it can use simple additions for all 498 * further adjustments. For !pcrel, the linked TB is compiled 499 * to know its full virtual address, so we can delay the 500 * update to pc to the unlinked path. A long chain of links 501 * can thus avoid many updates to the PC. 502 */ 503 if (tb_cflags(s->base.tb) & CF_PCREL) { 504 gen_a64_update_pc(s, diff); 505 tcg_gen_goto_tb(n); 506 } else { 507 tcg_gen_goto_tb(n); 508 gen_a64_update_pc(s, diff); 509 } 510 tcg_gen_exit_tb(s->base.tb, n); 511 s->base.is_jmp = DISAS_NORETURN; 512 } else { 513 gen_a64_update_pc(s, diff); 514 if (s->ss_active) { 515 gen_step_complete_exception(s); 516 } else { 517 tcg_gen_lookup_and_goto_ptr(); 518 s->base.is_jmp = DISAS_NORETURN; 519 } 520 } 521 } 522 523 /* 524 * Register access functions 525 * 526 * These functions are used for directly accessing a register in where 527 * changes to the final register value are likely to be made. If you 528 * need to use a register for temporary calculation (e.g. index type 529 * operations) use the read_* form. 530 * 531 * B1.2.1 Register mappings 532 * 533 * In instruction register encoding 31 can refer to ZR (zero register) or 534 * the SP (stack pointer) depending on context. In QEMU's case we map SP 535 * to cpu_X[31] and ZR accesses to a temporary which can be discarded. 536 * This is the point of the _sp forms. 537 */ 538 TCGv_i64 cpu_reg(DisasContext *s, int reg) 539 { 540 if (reg == 31) { 541 TCGv_i64 t = tcg_temp_new_i64(); 542 tcg_gen_movi_i64(t, 0); 543 return t; 544 } else { 545 return cpu_X[reg]; 546 } 547 } 548 549 /* register access for when 31 == SP */ 550 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) 551 { 552 return cpu_X[reg]; 553 } 554 555 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64 556 * representing the register contents. This TCGv is an auto-freed 557 * temporary so it need not be explicitly freed, and may be modified. 558 */ 559 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) 560 { 561 TCGv_i64 v = tcg_temp_new_i64(); 562 if (reg != 31) { 563 if (sf) { 564 tcg_gen_mov_i64(v, cpu_X[reg]); 565 } else { 566 tcg_gen_ext32u_i64(v, cpu_X[reg]); 567 } 568 } else { 569 tcg_gen_movi_i64(v, 0); 570 } 571 return v; 572 } 573 574 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) 575 { 576 TCGv_i64 v = tcg_temp_new_i64(); 577 if (sf) { 578 tcg_gen_mov_i64(v, cpu_X[reg]); 579 } else { 580 tcg_gen_ext32u_i64(v, cpu_X[reg]); 581 } 582 return v; 583 } 584 585 /* Return the offset into CPUARMState of a slice (from 586 * the least significant end) of FP register Qn (ie 587 * Dn, Sn, Hn or Bn). 588 * (Note that this is not the same mapping as for A32; see cpu.h) 589 */ 590 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) 591 { 592 return vec_reg_offset(s, regno, 0, size); 593 } 594 595 /* Offset of the high half of the 128 bit vector Qn */ 596 static inline int fp_reg_hi_offset(DisasContext *s, int regno) 597 { 598 return vec_reg_offset(s, regno, 1, MO_64); 599 } 600 601 /* Convenience accessors for reading and writing single and double 602 * FP registers. Writing clears the upper parts of the associated 603 * 128 bit vector register, as required by the architecture. 604 * Note that unlike the GP register accessors, the values returned 605 * by the read functions must be manually freed. 606 */ 607 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg) 608 { 609 TCGv_i64 v = tcg_temp_new_i64(); 610 611 tcg_gen_ld_i64(v, tcg_env, fp_reg_offset(s, reg, MO_64)); 612 return v; 613 } 614 615 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg) 616 { 617 TCGv_i32 v = tcg_temp_new_i32(); 618 619 tcg_gen_ld_i32(v, tcg_env, fp_reg_offset(s, reg, MO_32)); 620 return v; 621 } 622 623 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg) 624 { 625 TCGv_i32 v = tcg_temp_new_i32(); 626 627 tcg_gen_ld16u_i32(v, tcg_env, fp_reg_offset(s, reg, MO_16)); 628 return v; 629 } 630 631 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64). 632 * If SVE is not enabled, then there are only 128 bits in the vector. 633 */ 634 static void clear_vec_high(DisasContext *s, bool is_q, int rd) 635 { 636 unsigned ofs = fp_reg_offset(s, rd, MO_64); 637 unsigned vsz = vec_full_reg_size(s); 638 639 /* Nop move, with side effect of clearing the tail. */ 640 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz); 641 } 642 643 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v) 644 { 645 unsigned ofs = fp_reg_offset(s, reg, MO_64); 646 647 tcg_gen_st_i64(v, tcg_env, ofs); 648 clear_vec_high(s, false, reg); 649 } 650 651 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) 652 { 653 TCGv_i64 tmp = tcg_temp_new_i64(); 654 655 tcg_gen_extu_i32_i64(tmp, v); 656 write_fp_dreg(s, reg, tmp); 657 } 658 659 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */ 660 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn, 661 GVecGen2Fn *gvec_fn, int vece) 662 { 663 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), 664 is_q ? 16 : 8, vec_full_reg_size(s)); 665 } 666 667 /* Expand a 2-operand + immediate AdvSIMD vector operation using 668 * an expander function. 669 */ 670 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn, 671 int64_t imm, GVecGen2iFn *gvec_fn, int vece) 672 { 673 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), 674 imm, is_q ? 16 : 8, vec_full_reg_size(s)); 675 } 676 677 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */ 678 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm, 679 GVecGen3Fn *gvec_fn, int vece) 680 { 681 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), 682 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s)); 683 } 684 685 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */ 686 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm, 687 int rx, GVecGen4Fn *gvec_fn, int vece) 688 { 689 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), 690 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx), 691 is_q ? 16 : 8, vec_full_reg_size(s)); 692 } 693 694 /* Expand a 2-operand operation using an out-of-line helper. */ 695 static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd, 696 int rn, int data, gen_helper_gvec_2 *fn) 697 { 698 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd), 699 vec_full_reg_offset(s, rn), 700 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 701 } 702 703 /* Expand a 3-operand operation using an out-of-line helper. */ 704 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd, 705 int rn, int rm, int data, gen_helper_gvec_3 *fn) 706 { 707 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 708 vec_full_reg_offset(s, rn), 709 vec_full_reg_offset(s, rm), 710 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 711 } 712 713 /* Expand a 3-operand + fpstatus pointer + simd data value operation using 714 * an out-of-line helper. 715 */ 716 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn, 717 int rm, bool is_fp16, int data, 718 gen_helper_gvec_3_ptr *fn) 719 { 720 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); 721 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 722 vec_full_reg_offset(s, rn), 723 vec_full_reg_offset(s, rm), fpst, 724 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 725 } 726 727 /* Expand a 4-operand operation using an out-of-line helper. */ 728 static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn, 729 int rm, int ra, int data, gen_helper_gvec_4 *fn) 730 { 731 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 732 vec_full_reg_offset(s, rn), 733 vec_full_reg_offset(s, rm), 734 vec_full_reg_offset(s, ra), 735 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 736 } 737 738 /* 739 * Expand a 4-operand + fpstatus pointer + simd data value operation using 740 * an out-of-line helper. 741 */ 742 static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, 743 int rm, int ra, bool is_fp16, int data, 744 gen_helper_gvec_4_ptr *fn) 745 { 746 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); 747 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 748 vec_full_reg_offset(s, rn), 749 vec_full_reg_offset(s, rm), 750 vec_full_reg_offset(s, ra), fpst, 751 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 752 } 753 754 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier 755 * than the 32 bit equivalent. 756 */ 757 static inline void gen_set_NZ64(TCGv_i64 result) 758 { 759 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result); 760 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF); 761 } 762 763 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */ 764 static inline void gen_logic_CC(int sf, TCGv_i64 result) 765 { 766 if (sf) { 767 gen_set_NZ64(result); 768 } else { 769 tcg_gen_extrl_i64_i32(cpu_ZF, result); 770 tcg_gen_mov_i32(cpu_NF, cpu_ZF); 771 } 772 tcg_gen_movi_i32(cpu_CF, 0); 773 tcg_gen_movi_i32(cpu_VF, 0); 774 } 775 776 /* dest = T0 + T1; compute C, N, V and Z flags */ 777 static void gen_add64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 778 { 779 TCGv_i64 result, flag, tmp; 780 result = tcg_temp_new_i64(); 781 flag = tcg_temp_new_i64(); 782 tmp = tcg_temp_new_i64(); 783 784 tcg_gen_movi_i64(tmp, 0); 785 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp); 786 787 tcg_gen_extrl_i64_i32(cpu_CF, flag); 788 789 gen_set_NZ64(result); 790 791 tcg_gen_xor_i64(flag, result, t0); 792 tcg_gen_xor_i64(tmp, t0, t1); 793 tcg_gen_andc_i64(flag, flag, tmp); 794 tcg_gen_extrh_i64_i32(cpu_VF, flag); 795 796 tcg_gen_mov_i64(dest, result); 797 } 798 799 static void gen_add32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 800 { 801 TCGv_i32 t0_32 = tcg_temp_new_i32(); 802 TCGv_i32 t1_32 = tcg_temp_new_i32(); 803 TCGv_i32 tmp = tcg_temp_new_i32(); 804 805 tcg_gen_movi_i32(tmp, 0); 806 tcg_gen_extrl_i64_i32(t0_32, t0); 807 tcg_gen_extrl_i64_i32(t1_32, t1); 808 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp); 809 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 810 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); 811 tcg_gen_xor_i32(tmp, t0_32, t1_32); 812 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); 813 tcg_gen_extu_i32_i64(dest, cpu_NF); 814 } 815 816 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 817 { 818 if (sf) { 819 gen_add64_CC(dest, t0, t1); 820 } else { 821 gen_add32_CC(dest, t0, t1); 822 } 823 } 824 825 /* dest = T0 - T1; compute C, N, V and Z flags */ 826 static void gen_sub64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 827 { 828 /* 64 bit arithmetic */ 829 TCGv_i64 result, flag, tmp; 830 831 result = tcg_temp_new_i64(); 832 flag = tcg_temp_new_i64(); 833 tcg_gen_sub_i64(result, t0, t1); 834 835 gen_set_NZ64(result); 836 837 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1); 838 tcg_gen_extrl_i64_i32(cpu_CF, flag); 839 840 tcg_gen_xor_i64(flag, result, t0); 841 tmp = tcg_temp_new_i64(); 842 tcg_gen_xor_i64(tmp, t0, t1); 843 tcg_gen_and_i64(flag, flag, tmp); 844 tcg_gen_extrh_i64_i32(cpu_VF, flag); 845 tcg_gen_mov_i64(dest, result); 846 } 847 848 static void gen_sub32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 849 { 850 /* 32 bit arithmetic */ 851 TCGv_i32 t0_32 = tcg_temp_new_i32(); 852 TCGv_i32 t1_32 = tcg_temp_new_i32(); 853 TCGv_i32 tmp; 854 855 tcg_gen_extrl_i64_i32(t0_32, t0); 856 tcg_gen_extrl_i64_i32(t1_32, t1); 857 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32); 858 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 859 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32); 860 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); 861 tmp = tcg_temp_new_i32(); 862 tcg_gen_xor_i32(tmp, t0_32, t1_32); 863 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp); 864 tcg_gen_extu_i32_i64(dest, cpu_NF); 865 } 866 867 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 868 { 869 if (sf) { 870 gen_sub64_CC(dest, t0, t1); 871 } else { 872 gen_sub32_CC(dest, t0, t1); 873 } 874 } 875 876 /* dest = T0 + T1 + CF; do not compute flags. */ 877 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 878 { 879 TCGv_i64 flag = tcg_temp_new_i64(); 880 tcg_gen_extu_i32_i64(flag, cpu_CF); 881 tcg_gen_add_i64(dest, t0, t1); 882 tcg_gen_add_i64(dest, dest, flag); 883 884 if (!sf) { 885 tcg_gen_ext32u_i64(dest, dest); 886 } 887 } 888 889 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */ 890 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 891 { 892 if (sf) { 893 TCGv_i64 result = tcg_temp_new_i64(); 894 TCGv_i64 cf_64 = tcg_temp_new_i64(); 895 TCGv_i64 vf_64 = tcg_temp_new_i64(); 896 TCGv_i64 tmp = tcg_temp_new_i64(); 897 TCGv_i64 zero = tcg_constant_i64(0); 898 899 tcg_gen_extu_i32_i64(cf_64, cpu_CF); 900 tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero); 901 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero); 902 tcg_gen_extrl_i64_i32(cpu_CF, cf_64); 903 gen_set_NZ64(result); 904 905 tcg_gen_xor_i64(vf_64, result, t0); 906 tcg_gen_xor_i64(tmp, t0, t1); 907 tcg_gen_andc_i64(vf_64, vf_64, tmp); 908 tcg_gen_extrh_i64_i32(cpu_VF, vf_64); 909 910 tcg_gen_mov_i64(dest, result); 911 } else { 912 TCGv_i32 t0_32 = tcg_temp_new_i32(); 913 TCGv_i32 t1_32 = tcg_temp_new_i32(); 914 TCGv_i32 tmp = tcg_temp_new_i32(); 915 TCGv_i32 zero = tcg_constant_i32(0); 916 917 tcg_gen_extrl_i64_i32(t0_32, t0); 918 tcg_gen_extrl_i64_i32(t1_32, t1); 919 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero); 920 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero); 921 922 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 923 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); 924 tcg_gen_xor_i32(tmp, t0_32, t1_32); 925 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); 926 tcg_gen_extu_i32_i64(dest, cpu_NF); 927 } 928 } 929 930 /* 931 * Load/Store generators 932 */ 933 934 /* 935 * Store from GPR register to memory. 936 */ 937 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, 938 TCGv_i64 tcg_addr, MemOp memop, int memidx, 939 bool iss_valid, 940 unsigned int iss_srt, 941 bool iss_sf, bool iss_ar) 942 { 943 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop); 944 945 if (iss_valid) { 946 uint32_t syn; 947 948 syn = syn_data_abort_with_iss(0, 949 (memop & MO_SIZE), 950 false, 951 iss_srt, 952 iss_sf, 953 iss_ar, 954 0, 0, 0, 0, 0, false); 955 disas_set_insn_syndrome(s, syn); 956 } 957 } 958 959 static void do_gpr_st(DisasContext *s, TCGv_i64 source, 960 TCGv_i64 tcg_addr, MemOp memop, 961 bool iss_valid, 962 unsigned int iss_srt, 963 bool iss_sf, bool iss_ar) 964 { 965 do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s), 966 iss_valid, iss_srt, iss_sf, iss_ar); 967 } 968 969 /* 970 * Load from memory to GPR register 971 */ 972 static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, 973 MemOp memop, bool extend, int memidx, 974 bool iss_valid, unsigned int iss_srt, 975 bool iss_sf, bool iss_ar) 976 { 977 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop); 978 979 if (extend && (memop & MO_SIGN)) { 980 g_assert((memop & MO_SIZE) <= MO_32); 981 tcg_gen_ext32u_i64(dest, dest); 982 } 983 984 if (iss_valid) { 985 uint32_t syn; 986 987 syn = syn_data_abort_with_iss(0, 988 (memop & MO_SIZE), 989 (memop & MO_SIGN) != 0, 990 iss_srt, 991 iss_sf, 992 iss_ar, 993 0, 0, 0, 0, 0, false); 994 disas_set_insn_syndrome(s, syn); 995 } 996 } 997 998 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, 999 MemOp memop, bool extend, 1000 bool iss_valid, unsigned int iss_srt, 1001 bool iss_sf, bool iss_ar) 1002 { 1003 do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s), 1004 iss_valid, iss_srt, iss_sf, iss_ar); 1005 } 1006 1007 /* 1008 * Store from FP register to memory 1009 */ 1010 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, MemOp mop) 1011 { 1012 /* This writes the bottom N bits of a 128 bit wide vector to memory */ 1013 TCGv_i64 tmplo = tcg_temp_new_i64(); 1014 1015 tcg_gen_ld_i64(tmplo, tcg_env, fp_reg_offset(s, srcidx, MO_64)); 1016 1017 if ((mop & MO_SIZE) < MO_128) { 1018 tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop); 1019 } else { 1020 TCGv_i64 tmphi = tcg_temp_new_i64(); 1021 TCGv_i128 t16 = tcg_temp_new_i128(); 1022 1023 tcg_gen_ld_i64(tmphi, tcg_env, fp_reg_hi_offset(s, srcidx)); 1024 tcg_gen_concat_i64_i128(t16, tmplo, tmphi); 1025 1026 tcg_gen_qemu_st_i128(t16, tcg_addr, get_mem_index(s), mop); 1027 } 1028 } 1029 1030 /* 1031 * Load from memory to FP register 1032 */ 1033 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, MemOp mop) 1034 { 1035 /* This always zero-extends and writes to a full 128 bit wide vector */ 1036 TCGv_i64 tmplo = tcg_temp_new_i64(); 1037 TCGv_i64 tmphi = NULL; 1038 1039 if ((mop & MO_SIZE) < MO_128) { 1040 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop); 1041 } else { 1042 TCGv_i128 t16 = tcg_temp_new_i128(); 1043 1044 tcg_gen_qemu_ld_i128(t16, tcg_addr, get_mem_index(s), mop); 1045 1046 tmphi = tcg_temp_new_i64(); 1047 tcg_gen_extr_i128_i64(tmplo, tmphi, t16); 1048 } 1049 1050 tcg_gen_st_i64(tmplo, tcg_env, fp_reg_offset(s, destidx, MO_64)); 1051 1052 if (tmphi) { 1053 tcg_gen_st_i64(tmphi, tcg_env, fp_reg_hi_offset(s, destidx)); 1054 } 1055 clear_vec_high(s, tmphi != NULL, destidx); 1056 } 1057 1058 /* 1059 * Vector load/store helpers. 1060 * 1061 * The principal difference between this and a FP load is that we don't 1062 * zero extend as we are filling a partial chunk of the vector register. 1063 * These functions don't support 128 bit loads/stores, which would be 1064 * normal load/store operations. 1065 * 1066 * The _i32 versions are useful when operating on 32 bit quantities 1067 * (eg for floating point single or using Neon helper functions). 1068 */ 1069 1070 /* Get value of an element within a vector register */ 1071 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, 1072 int element, MemOp memop) 1073 { 1074 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); 1075 switch ((unsigned)memop) { 1076 case MO_8: 1077 tcg_gen_ld8u_i64(tcg_dest, tcg_env, vect_off); 1078 break; 1079 case MO_16: 1080 tcg_gen_ld16u_i64(tcg_dest, tcg_env, vect_off); 1081 break; 1082 case MO_32: 1083 tcg_gen_ld32u_i64(tcg_dest, tcg_env, vect_off); 1084 break; 1085 case MO_8|MO_SIGN: 1086 tcg_gen_ld8s_i64(tcg_dest, tcg_env, vect_off); 1087 break; 1088 case MO_16|MO_SIGN: 1089 tcg_gen_ld16s_i64(tcg_dest, tcg_env, vect_off); 1090 break; 1091 case MO_32|MO_SIGN: 1092 tcg_gen_ld32s_i64(tcg_dest, tcg_env, vect_off); 1093 break; 1094 case MO_64: 1095 case MO_64|MO_SIGN: 1096 tcg_gen_ld_i64(tcg_dest, tcg_env, vect_off); 1097 break; 1098 default: 1099 g_assert_not_reached(); 1100 } 1101 } 1102 1103 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, 1104 int element, MemOp memop) 1105 { 1106 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); 1107 switch (memop) { 1108 case MO_8: 1109 tcg_gen_ld8u_i32(tcg_dest, tcg_env, vect_off); 1110 break; 1111 case MO_16: 1112 tcg_gen_ld16u_i32(tcg_dest, tcg_env, vect_off); 1113 break; 1114 case MO_8|MO_SIGN: 1115 tcg_gen_ld8s_i32(tcg_dest, tcg_env, vect_off); 1116 break; 1117 case MO_16|MO_SIGN: 1118 tcg_gen_ld16s_i32(tcg_dest, tcg_env, vect_off); 1119 break; 1120 case MO_32: 1121 case MO_32|MO_SIGN: 1122 tcg_gen_ld_i32(tcg_dest, tcg_env, vect_off); 1123 break; 1124 default: 1125 g_assert_not_reached(); 1126 } 1127 } 1128 1129 /* Set value of an element within a vector register */ 1130 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, 1131 int element, MemOp memop) 1132 { 1133 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); 1134 switch (memop) { 1135 case MO_8: 1136 tcg_gen_st8_i64(tcg_src, tcg_env, vect_off); 1137 break; 1138 case MO_16: 1139 tcg_gen_st16_i64(tcg_src, tcg_env, vect_off); 1140 break; 1141 case MO_32: 1142 tcg_gen_st32_i64(tcg_src, tcg_env, vect_off); 1143 break; 1144 case MO_64: 1145 tcg_gen_st_i64(tcg_src, tcg_env, vect_off); 1146 break; 1147 default: 1148 g_assert_not_reached(); 1149 } 1150 } 1151 1152 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, 1153 int destidx, int element, MemOp memop) 1154 { 1155 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); 1156 switch (memop) { 1157 case MO_8: 1158 tcg_gen_st8_i32(tcg_src, tcg_env, vect_off); 1159 break; 1160 case MO_16: 1161 tcg_gen_st16_i32(tcg_src, tcg_env, vect_off); 1162 break; 1163 case MO_32: 1164 tcg_gen_st_i32(tcg_src, tcg_env, vect_off); 1165 break; 1166 default: 1167 g_assert_not_reached(); 1168 } 1169 } 1170 1171 /* Store from vector register to memory */ 1172 static void do_vec_st(DisasContext *s, int srcidx, int element, 1173 TCGv_i64 tcg_addr, MemOp mop) 1174 { 1175 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 1176 1177 read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE); 1178 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); 1179 } 1180 1181 /* Load from memory to vector register */ 1182 static void do_vec_ld(DisasContext *s, int destidx, int element, 1183 TCGv_i64 tcg_addr, MemOp mop) 1184 { 1185 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 1186 1187 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); 1188 write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE); 1189 } 1190 1191 /* Check that FP/Neon access is enabled. If it is, return 1192 * true. If not, emit code to generate an appropriate exception, 1193 * and return false; the caller should not emit any code for 1194 * the instruction. Note that this check must happen after all 1195 * unallocated-encoding checks (otherwise the syndrome information 1196 * for the resulting exception will be incorrect). 1197 */ 1198 static bool fp_access_check_only(DisasContext *s) 1199 { 1200 if (s->fp_excp_el) { 1201 assert(!s->fp_access_checked); 1202 s->fp_access_checked = true; 1203 1204 gen_exception_insn_el(s, 0, EXCP_UDEF, 1205 syn_fp_access_trap(1, 0xe, false, 0), 1206 s->fp_excp_el); 1207 return false; 1208 } 1209 s->fp_access_checked = true; 1210 return true; 1211 } 1212 1213 static bool fp_access_check(DisasContext *s) 1214 { 1215 if (!fp_access_check_only(s)) { 1216 return false; 1217 } 1218 if (s->sme_trap_nonstreaming && s->is_nonstreaming) { 1219 gen_exception_insn(s, 0, EXCP_UDEF, 1220 syn_smetrap(SME_ET_Streaming, false)); 1221 return false; 1222 } 1223 return true; 1224 } 1225 1226 /* 1227 * Check that SVE access is enabled. If it is, return true. 1228 * If not, emit code to generate an appropriate exception and return false. 1229 * This function corresponds to CheckSVEEnabled(). 1230 */ 1231 bool sve_access_check(DisasContext *s) 1232 { 1233 if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) { 1234 assert(dc_isar_feature(aa64_sme, s)); 1235 if (!sme_sm_enabled_check(s)) { 1236 goto fail_exit; 1237 } 1238 } else if (s->sve_excp_el) { 1239 gen_exception_insn_el(s, 0, EXCP_UDEF, 1240 syn_sve_access_trap(), s->sve_excp_el); 1241 goto fail_exit; 1242 } 1243 s->sve_access_checked = true; 1244 return fp_access_check(s); 1245 1246 fail_exit: 1247 /* Assert that we only raise one exception per instruction. */ 1248 assert(!s->sve_access_checked); 1249 s->sve_access_checked = true; 1250 return false; 1251 } 1252 1253 /* 1254 * Check that SME access is enabled, raise an exception if not. 1255 * Note that this function corresponds to CheckSMEAccess and is 1256 * only used directly for cpregs. 1257 */ 1258 static bool sme_access_check(DisasContext *s) 1259 { 1260 if (s->sme_excp_el) { 1261 gen_exception_insn_el(s, 0, EXCP_UDEF, 1262 syn_smetrap(SME_ET_AccessTrap, false), 1263 s->sme_excp_el); 1264 return false; 1265 } 1266 return true; 1267 } 1268 1269 /* This function corresponds to CheckSMEEnabled. */ 1270 bool sme_enabled_check(DisasContext *s) 1271 { 1272 /* 1273 * Note that unlike sve_excp_el, we have not constrained sme_excp_el 1274 * to be zero when fp_excp_el has priority. This is because we need 1275 * sme_excp_el by itself for cpregs access checks. 1276 */ 1277 if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) { 1278 s->fp_access_checked = true; 1279 return sme_access_check(s); 1280 } 1281 return fp_access_check_only(s); 1282 } 1283 1284 /* Common subroutine for CheckSMEAnd*Enabled. */ 1285 bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req) 1286 { 1287 if (!sme_enabled_check(s)) { 1288 return false; 1289 } 1290 if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) { 1291 gen_exception_insn(s, 0, EXCP_UDEF, 1292 syn_smetrap(SME_ET_NotStreaming, false)); 1293 return false; 1294 } 1295 if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) { 1296 gen_exception_insn(s, 0, EXCP_UDEF, 1297 syn_smetrap(SME_ET_InactiveZA, false)); 1298 return false; 1299 } 1300 return true; 1301 } 1302 1303 /* 1304 * Expanders for AdvSIMD translation functions. 1305 */ 1306 1307 static bool do_gvec_op2_ool(DisasContext *s, arg_qrr_e *a, int data, 1308 gen_helper_gvec_2 *fn) 1309 { 1310 if (!a->q && a->esz == MO_64) { 1311 return false; 1312 } 1313 if (fp_access_check(s)) { 1314 gen_gvec_op2_ool(s, a->q, a->rd, a->rn, data, fn); 1315 } 1316 return true; 1317 } 1318 1319 static bool do_gvec_op3_ool(DisasContext *s, arg_qrrr_e *a, int data, 1320 gen_helper_gvec_3 *fn) 1321 { 1322 if (!a->q && a->esz == MO_64) { 1323 return false; 1324 } 1325 if (fp_access_check(s)) { 1326 gen_gvec_op3_ool(s, a->q, a->rd, a->rn, a->rm, data, fn); 1327 } 1328 return true; 1329 } 1330 1331 static bool do_gvec_fn3(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn) 1332 { 1333 if (!a->q && a->esz == MO_64) { 1334 return false; 1335 } 1336 if (fp_access_check(s)) { 1337 gen_gvec_fn3(s, a->q, a->rd, a->rn, a->rm, fn, a->esz); 1338 } 1339 return true; 1340 } 1341 1342 static bool do_gvec_fn3_no64(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn) 1343 { 1344 if (a->esz == MO_64) { 1345 return false; 1346 } 1347 if (fp_access_check(s)) { 1348 gen_gvec_fn3(s, a->q, a->rd, a->rn, a->rm, fn, a->esz); 1349 } 1350 return true; 1351 } 1352 1353 static bool do_gvec_fn3_no8_no64(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn) 1354 { 1355 if (a->esz == MO_8) { 1356 return false; 1357 } 1358 return do_gvec_fn3_no64(s, a, fn); 1359 } 1360 1361 static bool do_gvec_fn4(DisasContext *s, arg_qrrrr_e *a, GVecGen4Fn *fn) 1362 { 1363 if (!a->q && a->esz == MO_64) { 1364 return false; 1365 } 1366 if (fp_access_check(s)) { 1367 gen_gvec_fn4(s, a->q, a->rd, a->rn, a->rm, a->ra, fn, a->esz); 1368 } 1369 return true; 1370 } 1371 1372 /* 1373 * This utility function is for doing register extension with an 1374 * optional shift. You will likely want to pass a temporary for the 1375 * destination register. See DecodeRegExtend() in the ARM ARM. 1376 */ 1377 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in, 1378 int option, unsigned int shift) 1379 { 1380 int extsize = extract32(option, 0, 2); 1381 bool is_signed = extract32(option, 2, 1); 1382 1383 tcg_gen_ext_i64(tcg_out, tcg_in, extsize | (is_signed ? MO_SIGN : 0)); 1384 tcg_gen_shli_i64(tcg_out, tcg_out, shift); 1385 } 1386 1387 static inline void gen_check_sp_alignment(DisasContext *s) 1388 { 1389 /* The AArch64 architecture mandates that (if enabled via PSTATE 1390 * or SCTLR bits) there is a check that SP is 16-aligned on every 1391 * SP-relative load or store (with an exception generated if it is not). 1392 * In line with general QEMU practice regarding misaligned accesses, 1393 * we omit these checks for the sake of guest program performance. 1394 * This function is provided as a hook so we can more easily add these 1395 * checks in future (possibly as a "favour catching guest program bugs 1396 * over speed" user selectable option). 1397 */ 1398 } 1399 1400 /* 1401 * This provides a simple table based table lookup decoder. It is 1402 * intended to be used when the relevant bits for decode are too 1403 * awkwardly placed and switch/if based logic would be confusing and 1404 * deeply nested. Since it's a linear search through the table, tables 1405 * should be kept small. 1406 * 1407 * It returns the first handler where insn & mask == pattern, or 1408 * NULL if there is no match. 1409 * The table is terminated by an empty mask (i.e. 0) 1410 */ 1411 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, 1412 uint32_t insn) 1413 { 1414 const AArch64DecodeTable *tptr = table; 1415 1416 while (tptr->mask) { 1417 if ((insn & tptr->mask) == tptr->pattern) { 1418 return tptr->disas_fn; 1419 } 1420 tptr++; 1421 } 1422 return NULL; 1423 } 1424 1425 /* 1426 * The instruction disassembly implemented here matches 1427 * the instruction encoding classifications in chapter C4 1428 * of the ARM Architecture Reference Manual (DDI0487B_a); 1429 * classification names and decode diagrams here should generally 1430 * match up with those in the manual. 1431 */ 1432 1433 static bool trans_B(DisasContext *s, arg_i *a) 1434 { 1435 reset_btype(s); 1436 gen_goto_tb(s, 0, a->imm); 1437 return true; 1438 } 1439 1440 static bool trans_BL(DisasContext *s, arg_i *a) 1441 { 1442 gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s)); 1443 reset_btype(s); 1444 gen_goto_tb(s, 0, a->imm); 1445 return true; 1446 } 1447 1448 1449 static bool trans_CBZ(DisasContext *s, arg_cbz *a) 1450 { 1451 DisasLabel match; 1452 TCGv_i64 tcg_cmp; 1453 1454 tcg_cmp = read_cpu_reg(s, a->rt, a->sf); 1455 reset_btype(s); 1456 1457 match = gen_disas_label(s); 1458 tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ, 1459 tcg_cmp, 0, match.label); 1460 gen_goto_tb(s, 0, 4); 1461 set_disas_label(s, match); 1462 gen_goto_tb(s, 1, a->imm); 1463 return true; 1464 } 1465 1466 static bool trans_TBZ(DisasContext *s, arg_tbz *a) 1467 { 1468 DisasLabel match; 1469 TCGv_i64 tcg_cmp; 1470 1471 tcg_cmp = tcg_temp_new_i64(); 1472 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, a->rt), 1ULL << a->bitpos); 1473 1474 reset_btype(s); 1475 1476 match = gen_disas_label(s); 1477 tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ, 1478 tcg_cmp, 0, match.label); 1479 gen_goto_tb(s, 0, 4); 1480 set_disas_label(s, match); 1481 gen_goto_tb(s, 1, a->imm); 1482 return true; 1483 } 1484 1485 static bool trans_B_cond(DisasContext *s, arg_B_cond *a) 1486 { 1487 /* BC.cond is only present with FEAT_HBC */ 1488 if (a->c && !dc_isar_feature(aa64_hbc, s)) { 1489 return false; 1490 } 1491 reset_btype(s); 1492 if (a->cond < 0x0e) { 1493 /* genuinely conditional branches */ 1494 DisasLabel match = gen_disas_label(s); 1495 arm_gen_test_cc(a->cond, match.label); 1496 gen_goto_tb(s, 0, 4); 1497 set_disas_label(s, match); 1498 gen_goto_tb(s, 1, a->imm); 1499 } else { 1500 /* 0xe and 0xf are both "always" conditions */ 1501 gen_goto_tb(s, 0, a->imm); 1502 } 1503 return true; 1504 } 1505 1506 static void set_btype_for_br(DisasContext *s, int rn) 1507 { 1508 if (dc_isar_feature(aa64_bti, s)) { 1509 /* BR to {x16,x17} or !guard -> 1, else 3. */ 1510 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3); 1511 } 1512 } 1513 1514 static void set_btype_for_blr(DisasContext *s) 1515 { 1516 if (dc_isar_feature(aa64_bti, s)) { 1517 /* BLR sets BTYPE to 2, regardless of source guarded page. */ 1518 set_btype(s, 2); 1519 } 1520 } 1521 1522 static bool trans_BR(DisasContext *s, arg_r *a) 1523 { 1524 gen_a64_set_pc(s, cpu_reg(s, a->rn)); 1525 set_btype_for_br(s, a->rn); 1526 s->base.is_jmp = DISAS_JUMP; 1527 return true; 1528 } 1529 1530 static bool trans_BLR(DisasContext *s, arg_r *a) 1531 { 1532 TCGv_i64 dst = cpu_reg(s, a->rn); 1533 TCGv_i64 lr = cpu_reg(s, 30); 1534 if (dst == lr) { 1535 TCGv_i64 tmp = tcg_temp_new_i64(); 1536 tcg_gen_mov_i64(tmp, dst); 1537 dst = tmp; 1538 } 1539 gen_pc_plus_diff(s, lr, curr_insn_len(s)); 1540 gen_a64_set_pc(s, dst); 1541 set_btype_for_blr(s); 1542 s->base.is_jmp = DISAS_JUMP; 1543 return true; 1544 } 1545 1546 static bool trans_RET(DisasContext *s, arg_r *a) 1547 { 1548 gen_a64_set_pc(s, cpu_reg(s, a->rn)); 1549 s->base.is_jmp = DISAS_JUMP; 1550 return true; 1551 } 1552 1553 static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst, 1554 TCGv_i64 modifier, bool use_key_a) 1555 { 1556 TCGv_i64 truedst; 1557 /* 1558 * Return the branch target for a BRAA/RETA/etc, which is either 1559 * just the destination dst, or that value with the pauth check 1560 * done and the code removed from the high bits. 1561 */ 1562 if (!s->pauth_active) { 1563 return dst; 1564 } 1565 1566 truedst = tcg_temp_new_i64(); 1567 if (use_key_a) { 1568 gen_helper_autia_combined(truedst, tcg_env, dst, modifier); 1569 } else { 1570 gen_helper_autib_combined(truedst, tcg_env, dst, modifier); 1571 } 1572 return truedst; 1573 } 1574 1575 static bool trans_BRAZ(DisasContext *s, arg_braz *a) 1576 { 1577 TCGv_i64 dst; 1578 1579 if (!dc_isar_feature(aa64_pauth, s)) { 1580 return false; 1581 } 1582 1583 dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m); 1584 gen_a64_set_pc(s, dst); 1585 set_btype_for_br(s, a->rn); 1586 s->base.is_jmp = DISAS_JUMP; 1587 return true; 1588 } 1589 1590 static bool trans_BLRAZ(DisasContext *s, arg_braz *a) 1591 { 1592 TCGv_i64 dst, lr; 1593 1594 if (!dc_isar_feature(aa64_pauth, s)) { 1595 return false; 1596 } 1597 1598 dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m); 1599 lr = cpu_reg(s, 30); 1600 if (dst == lr) { 1601 TCGv_i64 tmp = tcg_temp_new_i64(); 1602 tcg_gen_mov_i64(tmp, dst); 1603 dst = tmp; 1604 } 1605 gen_pc_plus_diff(s, lr, curr_insn_len(s)); 1606 gen_a64_set_pc(s, dst); 1607 set_btype_for_blr(s); 1608 s->base.is_jmp = DISAS_JUMP; 1609 return true; 1610 } 1611 1612 static bool trans_RETA(DisasContext *s, arg_reta *a) 1613 { 1614 TCGv_i64 dst; 1615 1616 dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m); 1617 gen_a64_set_pc(s, dst); 1618 s->base.is_jmp = DISAS_JUMP; 1619 return true; 1620 } 1621 1622 static bool trans_BRA(DisasContext *s, arg_bra *a) 1623 { 1624 TCGv_i64 dst; 1625 1626 if (!dc_isar_feature(aa64_pauth, s)) { 1627 return false; 1628 } 1629 dst = auth_branch_target(s, cpu_reg(s,a->rn), cpu_reg_sp(s, a->rm), !a->m); 1630 gen_a64_set_pc(s, dst); 1631 set_btype_for_br(s, a->rn); 1632 s->base.is_jmp = DISAS_JUMP; 1633 return true; 1634 } 1635 1636 static bool trans_BLRA(DisasContext *s, arg_bra *a) 1637 { 1638 TCGv_i64 dst, lr; 1639 1640 if (!dc_isar_feature(aa64_pauth, s)) { 1641 return false; 1642 } 1643 dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m); 1644 lr = cpu_reg(s, 30); 1645 if (dst == lr) { 1646 TCGv_i64 tmp = tcg_temp_new_i64(); 1647 tcg_gen_mov_i64(tmp, dst); 1648 dst = tmp; 1649 } 1650 gen_pc_plus_diff(s, lr, curr_insn_len(s)); 1651 gen_a64_set_pc(s, dst); 1652 set_btype_for_blr(s); 1653 s->base.is_jmp = DISAS_JUMP; 1654 return true; 1655 } 1656 1657 static bool trans_ERET(DisasContext *s, arg_ERET *a) 1658 { 1659 TCGv_i64 dst; 1660 1661 if (s->current_el == 0) { 1662 return false; 1663 } 1664 if (s->trap_eret) { 1665 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(0), 2); 1666 return true; 1667 } 1668 dst = tcg_temp_new_i64(); 1669 tcg_gen_ld_i64(dst, tcg_env, 1670 offsetof(CPUARMState, elr_el[s->current_el])); 1671 1672 translator_io_start(&s->base); 1673 1674 gen_helper_exception_return(tcg_env, dst); 1675 /* Must exit loop to check un-masked IRQs */ 1676 s->base.is_jmp = DISAS_EXIT; 1677 return true; 1678 } 1679 1680 static bool trans_ERETA(DisasContext *s, arg_reta *a) 1681 { 1682 TCGv_i64 dst; 1683 1684 if (!dc_isar_feature(aa64_pauth, s)) { 1685 return false; 1686 } 1687 if (s->current_el == 0) { 1688 return false; 1689 } 1690 /* The FGT trap takes precedence over an auth trap. */ 1691 if (s->trap_eret) { 1692 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(a->m ? 3 : 2), 2); 1693 return true; 1694 } 1695 dst = tcg_temp_new_i64(); 1696 tcg_gen_ld_i64(dst, tcg_env, 1697 offsetof(CPUARMState, elr_el[s->current_el])); 1698 1699 dst = auth_branch_target(s, dst, cpu_X[31], !a->m); 1700 1701 translator_io_start(&s->base); 1702 1703 gen_helper_exception_return(tcg_env, dst); 1704 /* Must exit loop to check un-masked IRQs */ 1705 s->base.is_jmp = DISAS_EXIT; 1706 return true; 1707 } 1708 1709 static bool trans_NOP(DisasContext *s, arg_NOP *a) 1710 { 1711 return true; 1712 } 1713 1714 static bool trans_YIELD(DisasContext *s, arg_YIELD *a) 1715 { 1716 /* 1717 * When running in MTTCG we don't generate jumps to the yield and 1718 * WFE helpers as it won't affect the scheduling of other vCPUs. 1719 * If we wanted to more completely model WFE/SEV so we don't busy 1720 * spin unnecessarily we would need to do something more involved. 1721 */ 1722 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { 1723 s->base.is_jmp = DISAS_YIELD; 1724 } 1725 return true; 1726 } 1727 1728 static bool trans_WFI(DisasContext *s, arg_WFI *a) 1729 { 1730 s->base.is_jmp = DISAS_WFI; 1731 return true; 1732 } 1733 1734 static bool trans_WFE(DisasContext *s, arg_WFI *a) 1735 { 1736 /* 1737 * When running in MTTCG we don't generate jumps to the yield and 1738 * WFE helpers as it won't affect the scheduling of other vCPUs. 1739 * If we wanted to more completely model WFE/SEV so we don't busy 1740 * spin unnecessarily we would need to do something more involved. 1741 */ 1742 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { 1743 s->base.is_jmp = DISAS_WFE; 1744 } 1745 return true; 1746 } 1747 1748 static bool trans_WFIT(DisasContext *s, arg_WFIT *a) 1749 { 1750 if (!dc_isar_feature(aa64_wfxt, s)) { 1751 return false; 1752 } 1753 1754 /* 1755 * Because we need to pass the register value to the helper, 1756 * it's easier to emit the code now, unlike trans_WFI which 1757 * defers it to aarch64_tr_tb_stop(). That means we need to 1758 * check ss_active so that single-stepping a WFIT doesn't halt. 1759 */ 1760 if (s->ss_active) { 1761 /* Act like a NOP under architectural singlestep */ 1762 return true; 1763 } 1764 1765 gen_a64_update_pc(s, 4); 1766 gen_helper_wfit(tcg_env, cpu_reg(s, a->rd)); 1767 /* Go back to the main loop to check for interrupts */ 1768 s->base.is_jmp = DISAS_EXIT; 1769 return true; 1770 } 1771 1772 static bool trans_WFET(DisasContext *s, arg_WFET *a) 1773 { 1774 if (!dc_isar_feature(aa64_wfxt, s)) { 1775 return false; 1776 } 1777 1778 /* 1779 * We rely here on our WFE implementation being a NOP, so we 1780 * don't need to do anything different to handle the WFET timeout 1781 * from what trans_WFE does. 1782 */ 1783 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { 1784 s->base.is_jmp = DISAS_WFE; 1785 } 1786 return true; 1787 } 1788 1789 static bool trans_XPACLRI(DisasContext *s, arg_XPACLRI *a) 1790 { 1791 if (s->pauth_active) { 1792 gen_helper_xpaci(cpu_X[30], tcg_env, cpu_X[30]); 1793 } 1794 return true; 1795 } 1796 1797 static bool trans_PACIA1716(DisasContext *s, arg_PACIA1716 *a) 1798 { 1799 if (s->pauth_active) { 1800 gen_helper_pacia(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]); 1801 } 1802 return true; 1803 } 1804 1805 static bool trans_PACIB1716(DisasContext *s, arg_PACIB1716 *a) 1806 { 1807 if (s->pauth_active) { 1808 gen_helper_pacib(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]); 1809 } 1810 return true; 1811 } 1812 1813 static bool trans_AUTIA1716(DisasContext *s, arg_AUTIA1716 *a) 1814 { 1815 if (s->pauth_active) { 1816 gen_helper_autia(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]); 1817 } 1818 return true; 1819 } 1820 1821 static bool trans_AUTIB1716(DisasContext *s, arg_AUTIB1716 *a) 1822 { 1823 if (s->pauth_active) { 1824 gen_helper_autib(cpu_X[17], tcg_env, cpu_X[17], cpu_X[16]); 1825 } 1826 return true; 1827 } 1828 1829 static bool trans_ESB(DisasContext *s, arg_ESB *a) 1830 { 1831 /* Without RAS, we must implement this as NOP. */ 1832 if (dc_isar_feature(aa64_ras, s)) { 1833 /* 1834 * QEMU does not have a source of physical SErrors, 1835 * so we are only concerned with virtual SErrors. 1836 * The pseudocode in the ARM for this case is 1837 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then 1838 * AArch64.vESBOperation(); 1839 * Most of the condition can be evaluated at translation time. 1840 * Test for EL2 present, and defer test for SEL2 to runtime. 1841 */ 1842 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) { 1843 gen_helper_vesb(tcg_env); 1844 } 1845 } 1846 return true; 1847 } 1848 1849 static bool trans_PACIAZ(DisasContext *s, arg_PACIAZ *a) 1850 { 1851 if (s->pauth_active) { 1852 gen_helper_pacia(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0)); 1853 } 1854 return true; 1855 } 1856 1857 static bool trans_PACIASP(DisasContext *s, arg_PACIASP *a) 1858 { 1859 if (s->pauth_active) { 1860 gen_helper_pacia(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]); 1861 } 1862 return true; 1863 } 1864 1865 static bool trans_PACIBZ(DisasContext *s, arg_PACIBZ *a) 1866 { 1867 if (s->pauth_active) { 1868 gen_helper_pacib(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0)); 1869 } 1870 return true; 1871 } 1872 1873 static bool trans_PACIBSP(DisasContext *s, arg_PACIBSP *a) 1874 { 1875 if (s->pauth_active) { 1876 gen_helper_pacib(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]); 1877 } 1878 return true; 1879 } 1880 1881 static bool trans_AUTIAZ(DisasContext *s, arg_AUTIAZ *a) 1882 { 1883 if (s->pauth_active) { 1884 gen_helper_autia(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0)); 1885 } 1886 return true; 1887 } 1888 1889 static bool trans_AUTIASP(DisasContext *s, arg_AUTIASP *a) 1890 { 1891 if (s->pauth_active) { 1892 gen_helper_autia(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]); 1893 } 1894 return true; 1895 } 1896 1897 static bool trans_AUTIBZ(DisasContext *s, arg_AUTIBZ *a) 1898 { 1899 if (s->pauth_active) { 1900 gen_helper_autib(cpu_X[30], tcg_env, cpu_X[30], tcg_constant_i64(0)); 1901 } 1902 return true; 1903 } 1904 1905 static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a) 1906 { 1907 if (s->pauth_active) { 1908 gen_helper_autib(cpu_X[30], tcg_env, cpu_X[30], cpu_X[31]); 1909 } 1910 return true; 1911 } 1912 1913 static bool trans_CLREX(DisasContext *s, arg_CLREX *a) 1914 { 1915 tcg_gen_movi_i64(cpu_exclusive_addr, -1); 1916 return true; 1917 } 1918 1919 static bool trans_DSB_DMB(DisasContext *s, arg_DSB_DMB *a) 1920 { 1921 /* We handle DSB and DMB the same way */ 1922 TCGBar bar; 1923 1924 switch (a->types) { 1925 case 1: /* MBReqTypes_Reads */ 1926 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST; 1927 break; 1928 case 2: /* MBReqTypes_Writes */ 1929 bar = TCG_BAR_SC | TCG_MO_ST_ST; 1930 break; 1931 default: /* MBReqTypes_All */ 1932 bar = TCG_BAR_SC | TCG_MO_ALL; 1933 break; 1934 } 1935 tcg_gen_mb(bar); 1936 return true; 1937 } 1938 1939 static bool trans_ISB(DisasContext *s, arg_ISB *a) 1940 { 1941 /* 1942 * We need to break the TB after this insn to execute 1943 * self-modifying code correctly and also to take 1944 * any pending interrupts immediately. 1945 */ 1946 reset_btype(s); 1947 gen_goto_tb(s, 0, 4); 1948 return true; 1949 } 1950 1951 static bool trans_SB(DisasContext *s, arg_SB *a) 1952 { 1953 if (!dc_isar_feature(aa64_sb, s)) { 1954 return false; 1955 } 1956 /* 1957 * TODO: There is no speculation barrier opcode for TCG; 1958 * MB and end the TB instead. 1959 */ 1960 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 1961 gen_goto_tb(s, 0, 4); 1962 return true; 1963 } 1964 1965 static bool trans_CFINV(DisasContext *s, arg_CFINV *a) 1966 { 1967 if (!dc_isar_feature(aa64_condm_4, s)) { 1968 return false; 1969 } 1970 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1); 1971 return true; 1972 } 1973 1974 static bool trans_XAFLAG(DisasContext *s, arg_XAFLAG *a) 1975 { 1976 TCGv_i32 z; 1977 1978 if (!dc_isar_feature(aa64_condm_5, s)) { 1979 return false; 1980 } 1981 1982 z = tcg_temp_new_i32(); 1983 1984 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0); 1985 1986 /* 1987 * (!C & !Z) << 31 1988 * (!(C | Z)) << 31 1989 * ~((C | Z) << 31) 1990 * ~-(C | Z) 1991 * (C | Z) - 1 1992 */ 1993 tcg_gen_or_i32(cpu_NF, cpu_CF, z); 1994 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1); 1995 1996 /* !(Z & C) */ 1997 tcg_gen_and_i32(cpu_ZF, z, cpu_CF); 1998 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1); 1999 2000 /* (!C & Z) << 31 -> -(Z & ~C) */ 2001 tcg_gen_andc_i32(cpu_VF, z, cpu_CF); 2002 tcg_gen_neg_i32(cpu_VF, cpu_VF); 2003 2004 /* C | Z */ 2005 tcg_gen_or_i32(cpu_CF, cpu_CF, z); 2006 2007 return true; 2008 } 2009 2010 static bool trans_AXFLAG(DisasContext *s, arg_AXFLAG *a) 2011 { 2012 if (!dc_isar_feature(aa64_condm_5, s)) { 2013 return false; 2014 } 2015 2016 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */ 2017 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */ 2018 2019 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */ 2020 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF); 2021 2022 tcg_gen_movi_i32(cpu_NF, 0); 2023 tcg_gen_movi_i32(cpu_VF, 0); 2024 2025 return true; 2026 } 2027 2028 static bool trans_MSR_i_UAO(DisasContext *s, arg_i *a) 2029 { 2030 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) { 2031 return false; 2032 } 2033 if (a->imm & 1) { 2034 set_pstate_bits(PSTATE_UAO); 2035 } else { 2036 clear_pstate_bits(PSTATE_UAO); 2037 } 2038 gen_rebuild_hflags(s); 2039 s->base.is_jmp = DISAS_TOO_MANY; 2040 return true; 2041 } 2042 2043 static bool trans_MSR_i_PAN(DisasContext *s, arg_i *a) 2044 { 2045 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) { 2046 return false; 2047 } 2048 if (a->imm & 1) { 2049 set_pstate_bits(PSTATE_PAN); 2050 } else { 2051 clear_pstate_bits(PSTATE_PAN); 2052 } 2053 gen_rebuild_hflags(s); 2054 s->base.is_jmp = DISAS_TOO_MANY; 2055 return true; 2056 } 2057 2058 static bool trans_MSR_i_SPSEL(DisasContext *s, arg_i *a) 2059 { 2060 if (s->current_el == 0) { 2061 return false; 2062 } 2063 gen_helper_msr_i_spsel(tcg_env, tcg_constant_i32(a->imm & PSTATE_SP)); 2064 s->base.is_jmp = DISAS_TOO_MANY; 2065 return true; 2066 } 2067 2068 static bool trans_MSR_i_SBSS(DisasContext *s, arg_i *a) 2069 { 2070 if (!dc_isar_feature(aa64_ssbs, s)) { 2071 return false; 2072 } 2073 if (a->imm & 1) { 2074 set_pstate_bits(PSTATE_SSBS); 2075 } else { 2076 clear_pstate_bits(PSTATE_SSBS); 2077 } 2078 /* Don't need to rebuild hflags since SSBS is a nop */ 2079 s->base.is_jmp = DISAS_TOO_MANY; 2080 return true; 2081 } 2082 2083 static bool trans_MSR_i_DIT(DisasContext *s, arg_i *a) 2084 { 2085 if (!dc_isar_feature(aa64_dit, s)) { 2086 return false; 2087 } 2088 if (a->imm & 1) { 2089 set_pstate_bits(PSTATE_DIT); 2090 } else { 2091 clear_pstate_bits(PSTATE_DIT); 2092 } 2093 /* There's no need to rebuild hflags because DIT is a nop */ 2094 s->base.is_jmp = DISAS_TOO_MANY; 2095 return true; 2096 } 2097 2098 static bool trans_MSR_i_TCO(DisasContext *s, arg_i *a) 2099 { 2100 if (dc_isar_feature(aa64_mte, s)) { 2101 /* Full MTE is enabled -- set the TCO bit as directed. */ 2102 if (a->imm & 1) { 2103 set_pstate_bits(PSTATE_TCO); 2104 } else { 2105 clear_pstate_bits(PSTATE_TCO); 2106 } 2107 gen_rebuild_hflags(s); 2108 /* Many factors, including TCO, go into MTE_ACTIVE. */ 2109 s->base.is_jmp = DISAS_UPDATE_NOCHAIN; 2110 return true; 2111 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) { 2112 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */ 2113 return true; 2114 } else { 2115 /* Insn not present */ 2116 return false; 2117 } 2118 } 2119 2120 static bool trans_MSR_i_DAIFSET(DisasContext *s, arg_i *a) 2121 { 2122 gen_helper_msr_i_daifset(tcg_env, tcg_constant_i32(a->imm)); 2123 s->base.is_jmp = DISAS_TOO_MANY; 2124 return true; 2125 } 2126 2127 static bool trans_MSR_i_DAIFCLEAR(DisasContext *s, arg_i *a) 2128 { 2129 gen_helper_msr_i_daifclear(tcg_env, tcg_constant_i32(a->imm)); 2130 /* Exit the cpu loop to re-evaluate pending IRQs. */ 2131 s->base.is_jmp = DISAS_UPDATE_EXIT; 2132 return true; 2133 } 2134 2135 static bool trans_MSR_i_ALLINT(DisasContext *s, arg_i *a) 2136 { 2137 if (!dc_isar_feature(aa64_nmi, s) || s->current_el == 0) { 2138 return false; 2139 } 2140 2141 if (a->imm == 0) { 2142 clear_pstate_bits(PSTATE_ALLINT); 2143 } else if (s->current_el > 1) { 2144 set_pstate_bits(PSTATE_ALLINT); 2145 } else { 2146 gen_helper_msr_set_allint_el1(tcg_env); 2147 } 2148 2149 /* Exit the cpu loop to re-evaluate pending IRQs. */ 2150 s->base.is_jmp = DISAS_UPDATE_EXIT; 2151 return true; 2152 } 2153 2154 static bool trans_MSR_i_SVCR(DisasContext *s, arg_MSR_i_SVCR *a) 2155 { 2156 if (!dc_isar_feature(aa64_sme, s) || a->mask == 0) { 2157 return false; 2158 } 2159 if (sme_access_check(s)) { 2160 int old = s->pstate_sm | (s->pstate_za << 1); 2161 int new = a->imm * 3; 2162 2163 if ((old ^ new) & a->mask) { 2164 /* At least one bit changes. */ 2165 gen_helper_set_svcr(tcg_env, tcg_constant_i32(new), 2166 tcg_constant_i32(a->mask)); 2167 s->base.is_jmp = DISAS_TOO_MANY; 2168 } 2169 } 2170 return true; 2171 } 2172 2173 static void gen_get_nzcv(TCGv_i64 tcg_rt) 2174 { 2175 TCGv_i32 tmp = tcg_temp_new_i32(); 2176 TCGv_i32 nzcv = tcg_temp_new_i32(); 2177 2178 /* build bit 31, N */ 2179 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31)); 2180 /* build bit 30, Z */ 2181 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0); 2182 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1); 2183 /* build bit 29, C */ 2184 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1); 2185 /* build bit 28, V */ 2186 tcg_gen_shri_i32(tmp, cpu_VF, 31); 2187 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1); 2188 /* generate result */ 2189 tcg_gen_extu_i32_i64(tcg_rt, nzcv); 2190 } 2191 2192 static void gen_set_nzcv(TCGv_i64 tcg_rt) 2193 { 2194 TCGv_i32 nzcv = tcg_temp_new_i32(); 2195 2196 /* take NZCV from R[t] */ 2197 tcg_gen_extrl_i64_i32(nzcv, tcg_rt); 2198 2199 /* bit 31, N */ 2200 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31)); 2201 /* bit 30, Z */ 2202 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30)); 2203 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0); 2204 /* bit 29, C */ 2205 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29)); 2206 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29); 2207 /* bit 28, V */ 2208 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28)); 2209 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3); 2210 } 2211 2212 static void gen_sysreg_undef(DisasContext *s, bool isread, 2213 uint8_t op0, uint8_t op1, uint8_t op2, 2214 uint8_t crn, uint8_t crm, uint8_t rt) 2215 { 2216 /* 2217 * Generate code to emit an UNDEF with correct syndrome 2218 * information for a failed system register access. 2219 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases, 2220 * but if FEAT_IDST is implemented then read accesses to registers 2221 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP 2222 * syndrome. 2223 */ 2224 uint32_t syndrome; 2225 2226 if (isread && dc_isar_feature(aa64_ids, s) && 2227 arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) { 2228 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); 2229 } else { 2230 syndrome = syn_uncategorized(); 2231 } 2232 gen_exception_insn(s, 0, EXCP_UDEF, syndrome); 2233 } 2234 2235 /* MRS - move from system register 2236 * MSR (register) - move to system register 2237 * SYS 2238 * SYSL 2239 * These are all essentially the same insn in 'read' and 'write' 2240 * versions, with varying op0 fields. 2241 */ 2242 static void handle_sys(DisasContext *s, bool isread, 2243 unsigned int op0, unsigned int op1, unsigned int op2, 2244 unsigned int crn, unsigned int crm, unsigned int rt) 2245 { 2246 uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, 2247 crn, crm, op0, op1, op2); 2248 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); 2249 bool need_exit_tb = false; 2250 bool nv_trap_to_el2 = false; 2251 bool nv_redirect_reg = false; 2252 bool skip_fp_access_checks = false; 2253 bool nv2_mem_redirect = false; 2254 TCGv_ptr tcg_ri = NULL; 2255 TCGv_i64 tcg_rt; 2256 uint32_t syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); 2257 2258 if (crn == 11 || crn == 15) { 2259 /* 2260 * Check for TIDCP trap, which must take precedence over 2261 * the UNDEF for "no such register" etc. 2262 */ 2263 switch (s->current_el) { 2264 case 0: 2265 if (dc_isar_feature(aa64_tidcp1, s)) { 2266 gen_helper_tidcp_el0(tcg_env, tcg_constant_i32(syndrome)); 2267 } 2268 break; 2269 case 1: 2270 gen_helper_tidcp_el1(tcg_env, tcg_constant_i32(syndrome)); 2271 break; 2272 } 2273 } 2274 2275 if (!ri) { 2276 /* Unknown register; this might be a guest error or a QEMU 2277 * unimplemented feature. 2278 */ 2279 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 " 2280 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n", 2281 isread ? "read" : "write", op0, op1, crn, crm, op2); 2282 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); 2283 return; 2284 } 2285 2286 if (s->nv2 && ri->nv2_redirect_offset) { 2287 /* 2288 * Some registers always redirect to memory; some only do so if 2289 * HCR_EL2.NV1 is 0, and some only if NV1 is 1 (these come in 2290 * pairs which share an offset; see the table in R_CSRPQ). 2291 */ 2292 if (ri->nv2_redirect_offset & NV2_REDIR_NV1) { 2293 nv2_mem_redirect = s->nv1; 2294 } else if (ri->nv2_redirect_offset & NV2_REDIR_NO_NV1) { 2295 nv2_mem_redirect = !s->nv1; 2296 } else { 2297 nv2_mem_redirect = true; 2298 } 2299 } 2300 2301 /* Check access permissions */ 2302 if (!cp_access_ok(s->current_el, ri, isread)) { 2303 /* 2304 * FEAT_NV/NV2 handling does not do the usual FP access checks 2305 * for registers only accessible at EL2 (though it *does* do them 2306 * for registers accessible at EL1). 2307 */ 2308 skip_fp_access_checks = true; 2309 if (s->nv2 && (ri->type & ARM_CP_NV2_REDIRECT)) { 2310 /* 2311 * This is one of the few EL2 registers which should redirect 2312 * to the equivalent EL1 register. We do that after running 2313 * the EL2 register's accessfn. 2314 */ 2315 nv_redirect_reg = true; 2316 assert(!nv2_mem_redirect); 2317 } else if (nv2_mem_redirect) { 2318 /* 2319 * NV2 redirect-to-memory takes precedence over trap to EL2 or 2320 * UNDEF to EL1. 2321 */ 2322 } else if (s->nv && arm_cpreg_traps_in_nv(ri)) { 2323 /* 2324 * This register / instruction exists and is an EL2 register, so 2325 * we must trap to EL2 if accessed in nested virtualization EL1 2326 * instead of UNDEFing. We'll do that after the usual access checks. 2327 * (This makes a difference only for a couple of registers like 2328 * VSTTBR_EL2 where the "UNDEF if NonSecure" should take priority 2329 * over the trap-to-EL2. Most trapped-by-FEAT_NV registers have 2330 * an accessfn which does nothing when called from EL1, because 2331 * the trap-to-EL3 controls which would apply to that register 2332 * at EL2 don't take priority over the FEAT_NV trap-to-EL2.) 2333 */ 2334 nv_trap_to_el2 = true; 2335 } else { 2336 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); 2337 return; 2338 } 2339 } 2340 2341 if (ri->accessfn || (ri->fgt && s->fgt_active)) { 2342 /* Emit code to perform further access permissions checks at 2343 * runtime; this may result in an exception. 2344 */ 2345 gen_a64_update_pc(s, 0); 2346 tcg_ri = tcg_temp_new_ptr(); 2347 gen_helper_access_check_cp_reg(tcg_ri, tcg_env, 2348 tcg_constant_i32(key), 2349 tcg_constant_i32(syndrome), 2350 tcg_constant_i32(isread)); 2351 } else if (ri->type & ARM_CP_RAISES_EXC) { 2352 /* 2353 * The readfn or writefn might raise an exception; 2354 * synchronize the CPU state in case it does. 2355 */ 2356 gen_a64_update_pc(s, 0); 2357 } 2358 2359 if (!skip_fp_access_checks) { 2360 if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { 2361 return; 2362 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { 2363 return; 2364 } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { 2365 return; 2366 } 2367 } 2368 2369 if (nv_trap_to_el2) { 2370 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2); 2371 return; 2372 } 2373 2374 if (nv_redirect_reg) { 2375 /* 2376 * FEAT_NV2 redirection of an EL2 register to an EL1 register. 2377 * Conveniently in all cases the encoding of the EL1 register is 2378 * identical to the EL2 register except that opc1 is 0. 2379 * Get the reginfo for the EL1 register to use for the actual access. 2380 * We don't use the EL1 register's access function, and 2381 * fine-grained-traps on EL1 also do not apply here. 2382 */ 2383 key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, 2384 crn, crm, op0, 0, op2); 2385 ri = get_arm_cp_reginfo(s->cp_regs, key); 2386 assert(ri); 2387 assert(cp_access_ok(s->current_el, ri, isread)); 2388 /* 2389 * We might not have done an update_pc earlier, so check we don't 2390 * need it. We could support this in future if necessary. 2391 */ 2392 assert(!(ri->type & ARM_CP_RAISES_EXC)); 2393 } 2394 2395 if (nv2_mem_redirect) { 2396 /* 2397 * This system register is being redirected into an EL2 memory access. 2398 * This means it is not an IO operation, doesn't change hflags, 2399 * and need not end the TB, because it has no side effects. 2400 * 2401 * The access is 64-bit single copy atomic, guaranteed aligned because 2402 * of the definition of VCNR_EL2. Its endianness depends on 2403 * SCTLR_EL2.EE, not on the data endianness of EL1. 2404 * It is done under either the EL2 translation regime or the EL2&0 2405 * translation regime, depending on HCR_EL2.E2H. It behaves as if 2406 * PSTATE.PAN is 0. 2407 */ 2408 TCGv_i64 ptr = tcg_temp_new_i64(); 2409 MemOp mop = MO_64 | MO_ALIGN | MO_ATOM_IFALIGN; 2410 ARMMMUIdx armmemidx = s->nv2_mem_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; 2411 int memidx = arm_to_core_mmu_idx(armmemidx); 2412 uint32_t syn; 2413 2414 mop |= (s->nv2_mem_be ? MO_BE : MO_LE); 2415 2416 tcg_gen_ld_i64(ptr, tcg_env, offsetof(CPUARMState, cp15.vncr_el2)); 2417 tcg_gen_addi_i64(ptr, ptr, 2418 (ri->nv2_redirect_offset & ~NV2_REDIR_FLAG_MASK)); 2419 tcg_rt = cpu_reg(s, rt); 2420 2421 syn = syn_data_abort_vncr(0, !isread, 0); 2422 disas_set_insn_syndrome(s, syn); 2423 if (isread) { 2424 tcg_gen_qemu_ld_i64(tcg_rt, ptr, memidx, mop); 2425 } else { 2426 tcg_gen_qemu_st_i64(tcg_rt, ptr, memidx, mop); 2427 } 2428 return; 2429 } 2430 2431 /* Handle special cases first */ 2432 switch (ri->type & ARM_CP_SPECIAL_MASK) { 2433 case 0: 2434 break; 2435 case ARM_CP_NOP: 2436 return; 2437 case ARM_CP_NZCV: 2438 tcg_rt = cpu_reg(s, rt); 2439 if (isread) { 2440 gen_get_nzcv(tcg_rt); 2441 } else { 2442 gen_set_nzcv(tcg_rt); 2443 } 2444 return; 2445 case ARM_CP_CURRENTEL: 2446 { 2447 /* 2448 * Reads as current EL value from pstate, which is 2449 * guaranteed to be constant by the tb flags. 2450 * For nested virt we should report EL2. 2451 */ 2452 int el = s->nv ? 2 : s->current_el; 2453 tcg_rt = cpu_reg(s, rt); 2454 tcg_gen_movi_i64(tcg_rt, el << 2); 2455 return; 2456 } 2457 case ARM_CP_DC_ZVA: 2458 /* Writes clear the aligned block of memory which rt points into. */ 2459 if (s->mte_active[0]) { 2460 int desc = 0; 2461 2462 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); 2463 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 2464 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 2465 2466 tcg_rt = tcg_temp_new_i64(); 2467 gen_helper_mte_check_zva(tcg_rt, tcg_env, 2468 tcg_constant_i32(desc), cpu_reg(s, rt)); 2469 } else { 2470 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt)); 2471 } 2472 gen_helper_dc_zva(tcg_env, tcg_rt); 2473 return; 2474 case ARM_CP_DC_GVA: 2475 { 2476 TCGv_i64 clean_addr, tag; 2477 2478 /* 2479 * DC_GVA, like DC_ZVA, requires that we supply the original 2480 * pointer for an invalid page. Probe that address first. 2481 */ 2482 tcg_rt = cpu_reg(s, rt); 2483 clean_addr = clean_data_tbi(s, tcg_rt); 2484 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8); 2485 2486 if (s->ata[0]) { 2487 /* Extract the tag from the register to match STZGM. */ 2488 tag = tcg_temp_new_i64(); 2489 tcg_gen_shri_i64(tag, tcg_rt, 56); 2490 gen_helper_stzgm_tags(tcg_env, clean_addr, tag); 2491 } 2492 } 2493 return; 2494 case ARM_CP_DC_GZVA: 2495 { 2496 TCGv_i64 clean_addr, tag; 2497 2498 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */ 2499 tcg_rt = cpu_reg(s, rt); 2500 clean_addr = clean_data_tbi(s, tcg_rt); 2501 gen_helper_dc_zva(tcg_env, clean_addr); 2502 2503 if (s->ata[0]) { 2504 /* Extract the tag from the register to match STZGM. */ 2505 tag = tcg_temp_new_i64(); 2506 tcg_gen_shri_i64(tag, tcg_rt, 56); 2507 gen_helper_stzgm_tags(tcg_env, clean_addr, tag); 2508 } 2509 } 2510 return; 2511 default: 2512 g_assert_not_reached(); 2513 } 2514 2515 if (ri->type & ARM_CP_IO) { 2516 /* I/O operations must end the TB here (whether read or write) */ 2517 need_exit_tb = translator_io_start(&s->base); 2518 } 2519 2520 tcg_rt = cpu_reg(s, rt); 2521 2522 if (isread) { 2523 if (ri->type & ARM_CP_CONST) { 2524 tcg_gen_movi_i64(tcg_rt, ri->resetvalue); 2525 } else if (ri->readfn) { 2526 if (!tcg_ri) { 2527 tcg_ri = gen_lookup_cp_reg(key); 2528 } 2529 gen_helper_get_cp_reg64(tcg_rt, tcg_env, tcg_ri); 2530 } else { 2531 tcg_gen_ld_i64(tcg_rt, tcg_env, ri->fieldoffset); 2532 } 2533 } else { 2534 if (ri->type & ARM_CP_CONST) { 2535 /* If not forbidden by access permissions, treat as WI */ 2536 return; 2537 } else if (ri->writefn) { 2538 if (!tcg_ri) { 2539 tcg_ri = gen_lookup_cp_reg(key); 2540 } 2541 gen_helper_set_cp_reg64(tcg_env, tcg_ri, tcg_rt); 2542 } else { 2543 tcg_gen_st_i64(tcg_rt, tcg_env, ri->fieldoffset); 2544 } 2545 } 2546 2547 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { 2548 /* 2549 * A write to any coprocessor register that ends a TB 2550 * must rebuild the hflags for the next TB. 2551 */ 2552 gen_rebuild_hflags(s); 2553 /* 2554 * We default to ending the TB on a coprocessor register write, 2555 * but allow this to be suppressed by the register definition 2556 * (usually only necessary to work around guest bugs). 2557 */ 2558 need_exit_tb = true; 2559 } 2560 if (need_exit_tb) { 2561 s->base.is_jmp = DISAS_UPDATE_EXIT; 2562 } 2563 } 2564 2565 static bool trans_SYS(DisasContext *s, arg_SYS *a) 2566 { 2567 handle_sys(s, a->l, a->op0, a->op1, a->op2, a->crn, a->crm, a->rt); 2568 return true; 2569 } 2570 2571 static bool trans_SVC(DisasContext *s, arg_i *a) 2572 { 2573 /* 2574 * For SVC, HVC and SMC we advance the single-step state 2575 * machine before taking the exception. This is architecturally 2576 * mandated, to ensure that single-stepping a system call 2577 * instruction works properly. 2578 */ 2579 uint32_t syndrome = syn_aa64_svc(a->imm); 2580 if (s->fgt_svc) { 2581 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2); 2582 return true; 2583 } 2584 gen_ss_advance(s); 2585 gen_exception_insn(s, 4, EXCP_SWI, syndrome); 2586 return true; 2587 } 2588 2589 static bool trans_HVC(DisasContext *s, arg_i *a) 2590 { 2591 int target_el = s->current_el == 3 ? 3 : 2; 2592 2593 if (s->current_el == 0) { 2594 unallocated_encoding(s); 2595 return true; 2596 } 2597 /* 2598 * The pre HVC helper handles cases when HVC gets trapped 2599 * as an undefined insn by runtime configuration. 2600 */ 2601 gen_a64_update_pc(s, 0); 2602 gen_helper_pre_hvc(tcg_env); 2603 /* Architecture requires ss advance before we do the actual work */ 2604 gen_ss_advance(s); 2605 gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), target_el); 2606 return true; 2607 } 2608 2609 static bool trans_SMC(DisasContext *s, arg_i *a) 2610 { 2611 if (s->current_el == 0) { 2612 unallocated_encoding(s); 2613 return true; 2614 } 2615 gen_a64_update_pc(s, 0); 2616 gen_helper_pre_smc(tcg_env, tcg_constant_i32(syn_aa64_smc(a->imm))); 2617 /* Architecture requires ss advance before we do the actual work */ 2618 gen_ss_advance(s); 2619 gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(a->imm), 3); 2620 return true; 2621 } 2622 2623 static bool trans_BRK(DisasContext *s, arg_i *a) 2624 { 2625 gen_exception_bkpt_insn(s, syn_aa64_bkpt(a->imm)); 2626 return true; 2627 } 2628 2629 static bool trans_HLT(DisasContext *s, arg_i *a) 2630 { 2631 /* 2632 * HLT. This has two purposes. 2633 * Architecturally, it is an external halting debug instruction. 2634 * Since QEMU doesn't implement external debug, we treat this as 2635 * it is required for halting debug disabled: it will UNDEF. 2636 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction. 2637 */ 2638 if (semihosting_enabled(s->current_el == 0) && a->imm == 0xf000) { 2639 gen_exception_internal_insn(s, EXCP_SEMIHOST); 2640 } else { 2641 unallocated_encoding(s); 2642 } 2643 return true; 2644 } 2645 2646 /* 2647 * Load/Store exclusive instructions are implemented by remembering 2648 * the value/address loaded, and seeing if these are the same 2649 * when the store is performed. This is not actually the architecturally 2650 * mandated semantics, but it works for typical guest code sequences 2651 * and avoids having to monitor regular stores. 2652 * 2653 * The store exclusive uses the atomic cmpxchg primitives to avoid 2654 * races in multi-threaded linux-user and when MTTCG softmmu is 2655 * enabled. 2656 */ 2657 static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn, 2658 int size, bool is_pair) 2659 { 2660 int idx = get_mem_index(s); 2661 TCGv_i64 dirty_addr, clean_addr; 2662 MemOp memop = check_atomic_align(s, rn, size + is_pair); 2663 2664 s->is_ldex = true; 2665 dirty_addr = cpu_reg_sp(s, rn); 2666 clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, memop); 2667 2668 g_assert(size <= 3); 2669 if (is_pair) { 2670 g_assert(size >= 2); 2671 if (size == 2) { 2672 tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop); 2673 if (s->be_data == MO_LE) { 2674 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32); 2675 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32); 2676 } else { 2677 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32); 2678 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32); 2679 } 2680 } else { 2681 TCGv_i128 t16 = tcg_temp_new_i128(); 2682 2683 tcg_gen_qemu_ld_i128(t16, clean_addr, idx, memop); 2684 2685 if (s->be_data == MO_LE) { 2686 tcg_gen_extr_i128_i64(cpu_exclusive_val, 2687 cpu_exclusive_high, t16); 2688 } else { 2689 tcg_gen_extr_i128_i64(cpu_exclusive_high, 2690 cpu_exclusive_val, t16); 2691 } 2692 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); 2693 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high); 2694 } 2695 } else { 2696 tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop); 2697 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); 2698 } 2699 tcg_gen_mov_i64(cpu_exclusive_addr, clean_addr); 2700 } 2701 2702 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, 2703 int rn, int size, int is_pair) 2704 { 2705 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr] 2706 * && (!is_pair || env->exclusive_high == [addr + datasize])) { 2707 * [addr] = {Rt}; 2708 * if (is_pair) { 2709 * [addr + datasize] = {Rt2}; 2710 * } 2711 * {Rd} = 0; 2712 * } else { 2713 * {Rd} = 1; 2714 * } 2715 * env->exclusive_addr = -1; 2716 */ 2717 TCGLabel *fail_label = gen_new_label(); 2718 TCGLabel *done_label = gen_new_label(); 2719 TCGv_i64 tmp, clean_addr; 2720 MemOp memop; 2721 2722 /* 2723 * FIXME: We are out of spec here. We have recorded only the address 2724 * from load_exclusive, not the entire range, and we assume that the 2725 * size of the access on both sides match. The architecture allows the 2726 * store to be smaller than the load, so long as the stored bytes are 2727 * within the range recorded by the load. 2728 */ 2729 2730 /* See AArch64.ExclusiveMonitorsPass() and AArch64.IsExclusiveVA(). */ 2731 clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn)); 2732 tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label); 2733 2734 /* 2735 * The write, and any associated faults, only happen if the virtual 2736 * and physical addresses pass the exclusive monitor check. These 2737 * faults are exceedingly unlikely, because normally the guest uses 2738 * the exact same address register for the load_exclusive, and we 2739 * would have recognized these faults there. 2740 * 2741 * It is possible to trigger an alignment fault pre-LSE2, e.g. with an 2742 * unaligned 4-byte write within the range of an aligned 8-byte load. 2743 * With LSE2, the store would need to cross a 16-byte boundary when the 2744 * load did not, which would mean the store is outside the range 2745 * recorded for the monitor, which would have failed a corrected monitor 2746 * check above. For now, we assume no size change and retain the 2747 * MO_ALIGN to let tcg know what we checked in the load_exclusive. 2748 * 2749 * It is possible to trigger an MTE fault, by performing the load with 2750 * a virtual address with a valid tag and performing the store with the 2751 * same virtual address and a different invalid tag. 2752 */ 2753 memop = size + is_pair; 2754 if (memop == MO_128 || !dc_isar_feature(aa64_lse2, s)) { 2755 memop |= MO_ALIGN; 2756 } 2757 memop = finalize_memop(s, memop); 2758 gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop); 2759 2760 tmp = tcg_temp_new_i64(); 2761 if (is_pair) { 2762 if (size == 2) { 2763 if (s->be_data == MO_LE) { 2764 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2)); 2765 } else { 2766 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt)); 2767 } 2768 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, 2769 cpu_exclusive_val, tmp, 2770 get_mem_index(s), memop); 2771 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); 2772 } else { 2773 TCGv_i128 t16 = tcg_temp_new_i128(); 2774 TCGv_i128 c16 = tcg_temp_new_i128(); 2775 TCGv_i64 a, b; 2776 2777 if (s->be_data == MO_LE) { 2778 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt), cpu_reg(s, rt2)); 2779 tcg_gen_concat_i64_i128(c16, cpu_exclusive_val, 2780 cpu_exclusive_high); 2781 } else { 2782 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt2), cpu_reg(s, rt)); 2783 tcg_gen_concat_i64_i128(c16, cpu_exclusive_high, 2784 cpu_exclusive_val); 2785 } 2786 2787 tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16, 2788 get_mem_index(s), memop); 2789 2790 a = tcg_temp_new_i64(); 2791 b = tcg_temp_new_i64(); 2792 if (s->be_data == MO_LE) { 2793 tcg_gen_extr_i128_i64(a, b, t16); 2794 } else { 2795 tcg_gen_extr_i128_i64(b, a, t16); 2796 } 2797 2798 tcg_gen_xor_i64(a, a, cpu_exclusive_val); 2799 tcg_gen_xor_i64(b, b, cpu_exclusive_high); 2800 tcg_gen_or_i64(tmp, a, b); 2801 2802 tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0); 2803 } 2804 } else { 2805 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val, 2806 cpu_reg(s, rt), get_mem_index(s), memop); 2807 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); 2808 } 2809 tcg_gen_mov_i64(cpu_reg(s, rd), tmp); 2810 tcg_gen_br(done_label); 2811 2812 gen_set_label(fail_label); 2813 tcg_gen_movi_i64(cpu_reg(s, rd), 1); 2814 gen_set_label(done_label); 2815 tcg_gen_movi_i64(cpu_exclusive_addr, -1); 2816 } 2817 2818 static void gen_compare_and_swap(DisasContext *s, int rs, int rt, 2819 int rn, int size) 2820 { 2821 TCGv_i64 tcg_rs = cpu_reg(s, rs); 2822 TCGv_i64 tcg_rt = cpu_reg(s, rt); 2823 int memidx = get_mem_index(s); 2824 TCGv_i64 clean_addr; 2825 MemOp memop; 2826 2827 if (rn == 31) { 2828 gen_check_sp_alignment(s); 2829 } 2830 memop = check_atomic_align(s, rn, size); 2831 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop); 2832 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, 2833 memidx, memop); 2834 } 2835 2836 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, 2837 int rn, int size) 2838 { 2839 TCGv_i64 s1 = cpu_reg(s, rs); 2840 TCGv_i64 s2 = cpu_reg(s, rs + 1); 2841 TCGv_i64 t1 = cpu_reg(s, rt); 2842 TCGv_i64 t2 = cpu_reg(s, rt + 1); 2843 TCGv_i64 clean_addr; 2844 int memidx = get_mem_index(s); 2845 MemOp memop; 2846 2847 if (rn == 31) { 2848 gen_check_sp_alignment(s); 2849 } 2850 2851 /* This is a single atomic access, despite the "pair". */ 2852 memop = check_atomic_align(s, rn, size + 1); 2853 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop); 2854 2855 if (size == 2) { 2856 TCGv_i64 cmp = tcg_temp_new_i64(); 2857 TCGv_i64 val = tcg_temp_new_i64(); 2858 2859 if (s->be_data == MO_LE) { 2860 tcg_gen_concat32_i64(val, t1, t2); 2861 tcg_gen_concat32_i64(cmp, s1, s2); 2862 } else { 2863 tcg_gen_concat32_i64(val, t2, t1); 2864 tcg_gen_concat32_i64(cmp, s2, s1); 2865 } 2866 2867 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx, memop); 2868 2869 if (s->be_data == MO_LE) { 2870 tcg_gen_extr32_i64(s1, s2, cmp); 2871 } else { 2872 tcg_gen_extr32_i64(s2, s1, cmp); 2873 } 2874 } else { 2875 TCGv_i128 cmp = tcg_temp_new_i128(); 2876 TCGv_i128 val = tcg_temp_new_i128(); 2877 2878 if (s->be_data == MO_LE) { 2879 tcg_gen_concat_i64_i128(val, t1, t2); 2880 tcg_gen_concat_i64_i128(cmp, s1, s2); 2881 } else { 2882 tcg_gen_concat_i64_i128(val, t2, t1); 2883 tcg_gen_concat_i64_i128(cmp, s2, s1); 2884 } 2885 2886 tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx, memop); 2887 2888 if (s->be_data == MO_LE) { 2889 tcg_gen_extr_i128_i64(s1, s2, cmp); 2890 } else { 2891 tcg_gen_extr_i128_i64(s2, s1, cmp); 2892 } 2893 } 2894 } 2895 2896 /* 2897 * Compute the ISS.SF bit for syndrome information if an exception 2898 * is taken on a load or store. This indicates whether the instruction 2899 * is accessing a 32-bit or 64-bit register. This logic is derived 2900 * from the ARMv8 specs for LDR (Shared decode for all encodings). 2901 */ 2902 static bool ldst_iss_sf(int size, bool sign, bool ext) 2903 { 2904 2905 if (sign) { 2906 /* 2907 * Signed loads are 64 bit results if we are not going to 2908 * do a zero-extend from 32 to 64 after the load. 2909 * (For a store, sign and ext are always false.) 2910 */ 2911 return !ext; 2912 } else { 2913 /* Unsigned loads/stores work at the specified size */ 2914 return size == MO_64; 2915 } 2916 } 2917 2918 static bool trans_STXR(DisasContext *s, arg_stxr *a) 2919 { 2920 if (a->rn == 31) { 2921 gen_check_sp_alignment(s); 2922 } 2923 if (a->lasr) { 2924 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 2925 } 2926 gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, false); 2927 return true; 2928 } 2929 2930 static bool trans_LDXR(DisasContext *s, arg_stxr *a) 2931 { 2932 if (a->rn == 31) { 2933 gen_check_sp_alignment(s); 2934 } 2935 gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, false); 2936 if (a->lasr) { 2937 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 2938 } 2939 return true; 2940 } 2941 2942 static bool trans_STLR(DisasContext *s, arg_stlr *a) 2943 { 2944 TCGv_i64 clean_addr; 2945 MemOp memop; 2946 bool iss_sf = ldst_iss_sf(a->sz, false, false); 2947 2948 /* 2949 * StoreLORelease is the same as Store-Release for QEMU, but 2950 * needs the feature-test. 2951 */ 2952 if (!a->lasr && !dc_isar_feature(aa64_lor, s)) { 2953 return false; 2954 } 2955 /* Generate ISS for non-exclusive accesses including LASR. */ 2956 if (a->rn == 31) { 2957 gen_check_sp_alignment(s); 2958 } 2959 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 2960 memop = check_ordered_align(s, a->rn, 0, true, a->sz); 2961 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), 2962 true, a->rn != 31, memop); 2963 do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, memop, true, a->rt, 2964 iss_sf, a->lasr); 2965 return true; 2966 } 2967 2968 static bool trans_LDAR(DisasContext *s, arg_stlr *a) 2969 { 2970 TCGv_i64 clean_addr; 2971 MemOp memop; 2972 bool iss_sf = ldst_iss_sf(a->sz, false, false); 2973 2974 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */ 2975 if (!a->lasr && !dc_isar_feature(aa64_lor, s)) { 2976 return false; 2977 } 2978 /* Generate ISS for non-exclusive accesses including LASR. */ 2979 if (a->rn == 31) { 2980 gen_check_sp_alignment(s); 2981 } 2982 memop = check_ordered_align(s, a->rn, 0, false, a->sz); 2983 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), 2984 false, a->rn != 31, memop); 2985 do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, memop, false, true, 2986 a->rt, iss_sf, a->lasr); 2987 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 2988 return true; 2989 } 2990 2991 static bool trans_STXP(DisasContext *s, arg_stxr *a) 2992 { 2993 if (a->rn == 31) { 2994 gen_check_sp_alignment(s); 2995 } 2996 if (a->lasr) { 2997 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 2998 } 2999 gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, true); 3000 return true; 3001 } 3002 3003 static bool trans_LDXP(DisasContext *s, arg_stxr *a) 3004 { 3005 if (a->rn == 31) { 3006 gen_check_sp_alignment(s); 3007 } 3008 gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, true); 3009 if (a->lasr) { 3010 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 3011 } 3012 return true; 3013 } 3014 3015 static bool trans_CASP(DisasContext *s, arg_CASP *a) 3016 { 3017 if (!dc_isar_feature(aa64_atomics, s)) { 3018 return false; 3019 } 3020 if (((a->rt | a->rs) & 1) != 0) { 3021 return false; 3022 } 3023 3024 gen_compare_and_swap_pair(s, a->rs, a->rt, a->rn, a->sz); 3025 return true; 3026 } 3027 3028 static bool trans_CAS(DisasContext *s, arg_CAS *a) 3029 { 3030 if (!dc_isar_feature(aa64_atomics, s)) { 3031 return false; 3032 } 3033 gen_compare_and_swap(s, a->rs, a->rt, a->rn, a->sz); 3034 return true; 3035 } 3036 3037 static bool trans_LD_lit(DisasContext *s, arg_ldlit *a) 3038 { 3039 bool iss_sf = ldst_iss_sf(a->sz, a->sign, false); 3040 TCGv_i64 tcg_rt = cpu_reg(s, a->rt); 3041 TCGv_i64 clean_addr = tcg_temp_new_i64(); 3042 MemOp memop = finalize_memop(s, a->sz + a->sign * MO_SIGN); 3043 3044 gen_pc_plus_diff(s, clean_addr, a->imm); 3045 do_gpr_ld(s, tcg_rt, clean_addr, memop, 3046 false, true, a->rt, iss_sf, false); 3047 return true; 3048 } 3049 3050 static bool trans_LD_lit_v(DisasContext *s, arg_ldlit *a) 3051 { 3052 /* Load register (literal), vector version */ 3053 TCGv_i64 clean_addr; 3054 MemOp memop; 3055 3056 if (!fp_access_check(s)) { 3057 return true; 3058 } 3059 memop = finalize_memop_asimd(s, a->sz); 3060 clean_addr = tcg_temp_new_i64(); 3061 gen_pc_plus_diff(s, clean_addr, a->imm); 3062 do_fp_ld(s, a->rt, clean_addr, memop); 3063 return true; 3064 } 3065 3066 static void op_addr_ldstpair_pre(DisasContext *s, arg_ldstpair *a, 3067 TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr, 3068 uint64_t offset, bool is_store, MemOp mop) 3069 { 3070 if (a->rn == 31) { 3071 gen_check_sp_alignment(s); 3072 } 3073 3074 *dirty_addr = read_cpu_reg_sp(s, a->rn, 1); 3075 if (!a->p) { 3076 tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset); 3077 } 3078 3079 *clean_addr = gen_mte_checkN(s, *dirty_addr, is_store, 3080 (a->w || a->rn != 31), 2 << a->sz, mop); 3081 } 3082 3083 static void op_addr_ldstpair_post(DisasContext *s, arg_ldstpair *a, 3084 TCGv_i64 dirty_addr, uint64_t offset) 3085 { 3086 if (a->w) { 3087 if (a->p) { 3088 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); 3089 } 3090 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr); 3091 } 3092 } 3093 3094 static bool trans_STP(DisasContext *s, arg_ldstpair *a) 3095 { 3096 uint64_t offset = a->imm << a->sz; 3097 TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2; 3098 MemOp mop = finalize_memop(s, a->sz); 3099 3100 op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, true, mop); 3101 tcg_rt = cpu_reg(s, a->rt); 3102 tcg_rt2 = cpu_reg(s, a->rt2); 3103 /* 3104 * We built mop above for the single logical access -- rebuild it 3105 * now for the paired operation. 3106 * 3107 * With LSE2, non-sign-extending pairs are treated atomically if 3108 * aligned, and if unaligned one of the pair will be completely 3109 * within a 16-byte block and that element will be atomic. 3110 * Otherwise each element is separately atomic. 3111 * In all cases, issue one operation with the correct atomicity. 3112 */ 3113 mop = a->sz + 1; 3114 if (s->align_mem) { 3115 mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8); 3116 } 3117 mop = finalize_memop_pair(s, mop); 3118 if (a->sz == 2) { 3119 TCGv_i64 tmp = tcg_temp_new_i64(); 3120 3121 if (s->be_data == MO_LE) { 3122 tcg_gen_concat32_i64(tmp, tcg_rt, tcg_rt2); 3123 } else { 3124 tcg_gen_concat32_i64(tmp, tcg_rt2, tcg_rt); 3125 } 3126 tcg_gen_qemu_st_i64(tmp, clean_addr, get_mem_index(s), mop); 3127 } else { 3128 TCGv_i128 tmp = tcg_temp_new_i128(); 3129 3130 if (s->be_data == MO_LE) { 3131 tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2); 3132 } else { 3133 tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt); 3134 } 3135 tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop); 3136 } 3137 op_addr_ldstpair_post(s, a, dirty_addr, offset); 3138 return true; 3139 } 3140 3141 static bool trans_LDP(DisasContext *s, arg_ldstpair *a) 3142 { 3143 uint64_t offset = a->imm << a->sz; 3144 TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2; 3145 MemOp mop = finalize_memop(s, a->sz); 3146 3147 op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, false, mop); 3148 tcg_rt = cpu_reg(s, a->rt); 3149 tcg_rt2 = cpu_reg(s, a->rt2); 3150 3151 /* 3152 * We built mop above for the single logical access -- rebuild it 3153 * now for the paired operation. 3154 * 3155 * With LSE2, non-sign-extending pairs are treated atomically if 3156 * aligned, and if unaligned one of the pair will be completely 3157 * within a 16-byte block and that element will be atomic. 3158 * Otherwise each element is separately atomic. 3159 * In all cases, issue one operation with the correct atomicity. 3160 * 3161 * This treats sign-extending loads like zero-extending loads, 3162 * since that reuses the most code below. 3163 */ 3164 mop = a->sz + 1; 3165 if (s->align_mem) { 3166 mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8); 3167 } 3168 mop = finalize_memop_pair(s, mop); 3169 if (a->sz == 2) { 3170 int o2 = s->be_data == MO_LE ? 32 : 0; 3171 int o1 = o2 ^ 32; 3172 3173 tcg_gen_qemu_ld_i64(tcg_rt, clean_addr, get_mem_index(s), mop); 3174 if (a->sign) { 3175 tcg_gen_sextract_i64(tcg_rt2, tcg_rt, o2, 32); 3176 tcg_gen_sextract_i64(tcg_rt, tcg_rt, o1, 32); 3177 } else { 3178 tcg_gen_extract_i64(tcg_rt2, tcg_rt, o2, 32); 3179 tcg_gen_extract_i64(tcg_rt, tcg_rt, o1, 32); 3180 } 3181 } else { 3182 TCGv_i128 tmp = tcg_temp_new_i128(); 3183 3184 tcg_gen_qemu_ld_i128(tmp, clean_addr, get_mem_index(s), mop); 3185 if (s->be_data == MO_LE) { 3186 tcg_gen_extr_i128_i64(tcg_rt, tcg_rt2, tmp); 3187 } else { 3188 tcg_gen_extr_i128_i64(tcg_rt2, tcg_rt, tmp); 3189 } 3190 } 3191 op_addr_ldstpair_post(s, a, dirty_addr, offset); 3192 return true; 3193 } 3194 3195 static bool trans_STP_v(DisasContext *s, arg_ldstpair *a) 3196 { 3197 uint64_t offset = a->imm << a->sz; 3198 TCGv_i64 clean_addr, dirty_addr; 3199 MemOp mop; 3200 3201 if (!fp_access_check(s)) { 3202 return true; 3203 } 3204 3205 /* LSE2 does not merge FP pairs; leave these as separate operations. */ 3206 mop = finalize_memop_asimd(s, a->sz); 3207 op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, true, mop); 3208 do_fp_st(s, a->rt, clean_addr, mop); 3209 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << a->sz); 3210 do_fp_st(s, a->rt2, clean_addr, mop); 3211 op_addr_ldstpair_post(s, a, dirty_addr, offset); 3212 return true; 3213 } 3214 3215 static bool trans_LDP_v(DisasContext *s, arg_ldstpair *a) 3216 { 3217 uint64_t offset = a->imm << a->sz; 3218 TCGv_i64 clean_addr, dirty_addr; 3219 MemOp mop; 3220 3221 if (!fp_access_check(s)) { 3222 return true; 3223 } 3224 3225 /* LSE2 does not merge FP pairs; leave these as separate operations. */ 3226 mop = finalize_memop_asimd(s, a->sz); 3227 op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, false, mop); 3228 do_fp_ld(s, a->rt, clean_addr, mop); 3229 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << a->sz); 3230 do_fp_ld(s, a->rt2, clean_addr, mop); 3231 op_addr_ldstpair_post(s, a, dirty_addr, offset); 3232 return true; 3233 } 3234 3235 static bool trans_STGP(DisasContext *s, arg_ldstpair *a) 3236 { 3237 TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2; 3238 uint64_t offset = a->imm << LOG2_TAG_GRANULE; 3239 MemOp mop; 3240 TCGv_i128 tmp; 3241 3242 /* STGP only comes in one size. */ 3243 tcg_debug_assert(a->sz == MO_64); 3244 3245 if (!dc_isar_feature(aa64_mte_insn_reg, s)) { 3246 return false; 3247 } 3248 3249 if (a->rn == 31) { 3250 gen_check_sp_alignment(s); 3251 } 3252 3253 dirty_addr = read_cpu_reg_sp(s, a->rn, 1); 3254 if (!a->p) { 3255 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); 3256 } 3257 3258 clean_addr = clean_data_tbi(s, dirty_addr); 3259 tcg_rt = cpu_reg(s, a->rt); 3260 tcg_rt2 = cpu_reg(s, a->rt2); 3261 3262 /* 3263 * STGP is defined as two 8-byte memory operations, aligned to TAG_GRANULE, 3264 * and one tag operation. We implement it as one single aligned 16-byte 3265 * memory operation for convenience. Note that the alignment ensures 3266 * MO_ATOM_IFALIGN_PAIR produces 8-byte atomicity for the memory store. 3267 */ 3268 mop = finalize_memop_atom(s, MO_128 | MO_ALIGN, MO_ATOM_IFALIGN_PAIR); 3269 3270 tmp = tcg_temp_new_i128(); 3271 if (s->be_data == MO_LE) { 3272 tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2); 3273 } else { 3274 tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt); 3275 } 3276 tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop); 3277 3278 /* Perform the tag store, if tag access enabled. */ 3279 if (s->ata[0]) { 3280 if (tb_cflags(s->base.tb) & CF_PARALLEL) { 3281 gen_helper_stg_parallel(tcg_env, dirty_addr, dirty_addr); 3282 } else { 3283 gen_helper_stg(tcg_env, dirty_addr, dirty_addr); 3284 } 3285 } 3286 3287 op_addr_ldstpair_post(s, a, dirty_addr, offset); 3288 return true; 3289 } 3290 3291 static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a, 3292 TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr, 3293 uint64_t offset, bool is_store, MemOp mop) 3294 { 3295 int memidx; 3296 3297 if (a->rn == 31) { 3298 gen_check_sp_alignment(s); 3299 } 3300 3301 *dirty_addr = read_cpu_reg_sp(s, a->rn, 1); 3302 if (!a->p) { 3303 tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset); 3304 } 3305 memidx = get_a64_user_mem_index(s, a->unpriv); 3306 *clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store, 3307 a->w || a->rn != 31, 3308 mop, a->unpriv, memidx); 3309 } 3310 3311 static void op_addr_ldst_imm_post(DisasContext *s, arg_ldst_imm *a, 3312 TCGv_i64 dirty_addr, uint64_t offset) 3313 { 3314 if (a->w) { 3315 if (a->p) { 3316 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); 3317 } 3318 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr); 3319 } 3320 } 3321 3322 static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a) 3323 { 3324 bool iss_sf, iss_valid = !a->w; 3325 TCGv_i64 clean_addr, dirty_addr, tcg_rt; 3326 int memidx = get_a64_user_mem_index(s, a->unpriv); 3327 MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN); 3328 3329 op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop); 3330 3331 tcg_rt = cpu_reg(s, a->rt); 3332 iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext); 3333 3334 do_gpr_st_memidx(s, tcg_rt, clean_addr, mop, memidx, 3335 iss_valid, a->rt, iss_sf, false); 3336 op_addr_ldst_imm_post(s, a, dirty_addr, a->imm); 3337 return true; 3338 } 3339 3340 static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a) 3341 { 3342 bool iss_sf, iss_valid = !a->w; 3343 TCGv_i64 clean_addr, dirty_addr, tcg_rt; 3344 int memidx = get_a64_user_mem_index(s, a->unpriv); 3345 MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN); 3346 3347 op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop); 3348 3349 tcg_rt = cpu_reg(s, a->rt); 3350 iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext); 3351 3352 do_gpr_ld_memidx(s, tcg_rt, clean_addr, mop, 3353 a->ext, memidx, iss_valid, a->rt, iss_sf, false); 3354 op_addr_ldst_imm_post(s, a, dirty_addr, a->imm); 3355 return true; 3356 } 3357 3358 static bool trans_STR_v_i(DisasContext *s, arg_ldst_imm *a) 3359 { 3360 TCGv_i64 clean_addr, dirty_addr; 3361 MemOp mop; 3362 3363 if (!fp_access_check(s)) { 3364 return true; 3365 } 3366 mop = finalize_memop_asimd(s, a->sz); 3367 op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop); 3368 do_fp_st(s, a->rt, clean_addr, mop); 3369 op_addr_ldst_imm_post(s, a, dirty_addr, a->imm); 3370 return true; 3371 } 3372 3373 static bool trans_LDR_v_i(DisasContext *s, arg_ldst_imm *a) 3374 { 3375 TCGv_i64 clean_addr, dirty_addr; 3376 MemOp mop; 3377 3378 if (!fp_access_check(s)) { 3379 return true; 3380 } 3381 mop = finalize_memop_asimd(s, a->sz); 3382 op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop); 3383 do_fp_ld(s, a->rt, clean_addr, mop); 3384 op_addr_ldst_imm_post(s, a, dirty_addr, a->imm); 3385 return true; 3386 } 3387 3388 static void op_addr_ldst_pre(DisasContext *s, arg_ldst *a, 3389 TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr, 3390 bool is_store, MemOp memop) 3391 { 3392 TCGv_i64 tcg_rm; 3393 3394 if (a->rn == 31) { 3395 gen_check_sp_alignment(s); 3396 } 3397 *dirty_addr = read_cpu_reg_sp(s, a->rn, 1); 3398 3399 tcg_rm = read_cpu_reg(s, a->rm, 1); 3400 ext_and_shift_reg(tcg_rm, tcg_rm, a->opt, a->s ? a->sz : 0); 3401 3402 tcg_gen_add_i64(*dirty_addr, *dirty_addr, tcg_rm); 3403 *clean_addr = gen_mte_check1(s, *dirty_addr, is_store, true, memop); 3404 } 3405 3406 static bool trans_LDR(DisasContext *s, arg_ldst *a) 3407 { 3408 TCGv_i64 clean_addr, dirty_addr, tcg_rt; 3409 bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext); 3410 MemOp memop; 3411 3412 if (extract32(a->opt, 1, 1) == 0) { 3413 return false; 3414 } 3415 3416 memop = finalize_memop(s, a->sz + a->sign * MO_SIGN); 3417 op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, false, memop); 3418 tcg_rt = cpu_reg(s, a->rt); 3419 do_gpr_ld(s, tcg_rt, clean_addr, memop, 3420 a->ext, true, a->rt, iss_sf, false); 3421 return true; 3422 } 3423 3424 static bool trans_STR(DisasContext *s, arg_ldst *a) 3425 { 3426 TCGv_i64 clean_addr, dirty_addr, tcg_rt; 3427 bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext); 3428 MemOp memop; 3429 3430 if (extract32(a->opt, 1, 1) == 0) { 3431 return false; 3432 } 3433 3434 memop = finalize_memop(s, a->sz); 3435 op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, true, memop); 3436 tcg_rt = cpu_reg(s, a->rt); 3437 do_gpr_st(s, tcg_rt, clean_addr, memop, true, a->rt, iss_sf, false); 3438 return true; 3439 } 3440 3441 static bool trans_LDR_v(DisasContext *s, arg_ldst *a) 3442 { 3443 TCGv_i64 clean_addr, dirty_addr; 3444 MemOp memop; 3445 3446 if (extract32(a->opt, 1, 1) == 0) { 3447 return false; 3448 } 3449 3450 if (!fp_access_check(s)) { 3451 return true; 3452 } 3453 3454 memop = finalize_memop_asimd(s, a->sz); 3455 op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, false, memop); 3456 do_fp_ld(s, a->rt, clean_addr, memop); 3457 return true; 3458 } 3459 3460 static bool trans_STR_v(DisasContext *s, arg_ldst *a) 3461 { 3462 TCGv_i64 clean_addr, dirty_addr; 3463 MemOp memop; 3464 3465 if (extract32(a->opt, 1, 1) == 0) { 3466 return false; 3467 } 3468 3469 if (!fp_access_check(s)) { 3470 return true; 3471 } 3472 3473 memop = finalize_memop_asimd(s, a->sz); 3474 op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, true, memop); 3475 do_fp_st(s, a->rt, clean_addr, memop); 3476 return true; 3477 } 3478 3479 3480 static bool do_atomic_ld(DisasContext *s, arg_atomic *a, AtomicThreeOpFn *fn, 3481 int sign, bool invert) 3482 { 3483 MemOp mop = a->sz | sign; 3484 TCGv_i64 clean_addr, tcg_rs, tcg_rt; 3485 3486 if (a->rn == 31) { 3487 gen_check_sp_alignment(s); 3488 } 3489 mop = check_atomic_align(s, a->rn, mop); 3490 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false, 3491 a->rn != 31, mop); 3492 tcg_rs = read_cpu_reg(s, a->rs, true); 3493 tcg_rt = cpu_reg(s, a->rt); 3494 if (invert) { 3495 tcg_gen_not_i64(tcg_rs, tcg_rs); 3496 } 3497 /* 3498 * The tcg atomic primitives are all full barriers. Therefore we 3499 * can ignore the Acquire and Release bits of this instruction. 3500 */ 3501 fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop); 3502 3503 if (mop & MO_SIGN) { 3504 switch (a->sz) { 3505 case MO_8: 3506 tcg_gen_ext8u_i64(tcg_rt, tcg_rt); 3507 break; 3508 case MO_16: 3509 tcg_gen_ext16u_i64(tcg_rt, tcg_rt); 3510 break; 3511 case MO_32: 3512 tcg_gen_ext32u_i64(tcg_rt, tcg_rt); 3513 break; 3514 case MO_64: 3515 break; 3516 default: 3517 g_assert_not_reached(); 3518 } 3519 } 3520 return true; 3521 } 3522 3523 TRANS_FEAT(LDADD, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_add_i64, 0, false) 3524 TRANS_FEAT(LDCLR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_and_i64, 0, true) 3525 TRANS_FEAT(LDEOR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_xor_i64, 0, false) 3526 TRANS_FEAT(LDSET, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_or_i64, 0, false) 3527 TRANS_FEAT(LDSMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smax_i64, MO_SIGN, false) 3528 TRANS_FEAT(LDSMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smin_i64, MO_SIGN, false) 3529 TRANS_FEAT(LDUMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umax_i64, 0, false) 3530 TRANS_FEAT(LDUMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umin_i64, 0, false) 3531 TRANS_FEAT(SWP, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_xchg_i64, 0, false) 3532 3533 static bool trans_LDAPR(DisasContext *s, arg_LDAPR *a) 3534 { 3535 bool iss_sf = ldst_iss_sf(a->sz, false, false); 3536 TCGv_i64 clean_addr; 3537 MemOp mop; 3538 3539 if (!dc_isar_feature(aa64_atomics, s) || 3540 !dc_isar_feature(aa64_rcpc_8_3, s)) { 3541 return false; 3542 } 3543 if (a->rn == 31) { 3544 gen_check_sp_alignment(s); 3545 } 3546 mop = check_atomic_align(s, a->rn, a->sz); 3547 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false, 3548 a->rn != 31, mop); 3549 /* 3550 * LDAPR* are a special case because they are a simple load, not a 3551 * fetch-and-do-something op. 3552 * The architectural consistency requirements here are weaker than 3553 * full load-acquire (we only need "load-acquire processor consistent"), 3554 * but we choose to implement them as full LDAQ. 3555 */ 3556 do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, mop, false, 3557 true, a->rt, iss_sf, true); 3558 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 3559 return true; 3560 } 3561 3562 static bool trans_LDRA(DisasContext *s, arg_LDRA *a) 3563 { 3564 TCGv_i64 clean_addr, dirty_addr, tcg_rt; 3565 MemOp memop; 3566 3567 /* Load with pointer authentication */ 3568 if (!dc_isar_feature(aa64_pauth, s)) { 3569 return false; 3570 } 3571 3572 if (a->rn == 31) { 3573 gen_check_sp_alignment(s); 3574 } 3575 dirty_addr = read_cpu_reg_sp(s, a->rn, 1); 3576 3577 if (s->pauth_active) { 3578 if (!a->m) { 3579 gen_helper_autda_combined(dirty_addr, tcg_env, dirty_addr, 3580 tcg_constant_i64(0)); 3581 } else { 3582 gen_helper_autdb_combined(dirty_addr, tcg_env, dirty_addr, 3583 tcg_constant_i64(0)); 3584 } 3585 } 3586 3587 tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm); 3588 3589 memop = finalize_memop(s, MO_64); 3590 3591 /* Note that "clean" and "dirty" here refer to TBI not PAC. */ 3592 clean_addr = gen_mte_check1(s, dirty_addr, false, 3593 a->w || a->rn != 31, memop); 3594 3595 tcg_rt = cpu_reg(s, a->rt); 3596 do_gpr_ld(s, tcg_rt, clean_addr, memop, 3597 /* extend */ false, /* iss_valid */ !a->w, 3598 /* iss_srt */ a->rt, /* iss_sf */ true, /* iss_ar */ false); 3599 3600 if (a->w) { 3601 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr); 3602 } 3603 return true; 3604 } 3605 3606 static bool trans_LDAPR_i(DisasContext *s, arg_ldapr_stlr_i *a) 3607 { 3608 TCGv_i64 clean_addr, dirty_addr; 3609 MemOp mop = a->sz | (a->sign ? MO_SIGN : 0); 3610 bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext); 3611 3612 if (!dc_isar_feature(aa64_rcpc_8_4, s)) { 3613 return false; 3614 } 3615 3616 if (a->rn == 31) { 3617 gen_check_sp_alignment(s); 3618 } 3619 3620 mop = check_ordered_align(s, a->rn, a->imm, false, mop); 3621 dirty_addr = read_cpu_reg_sp(s, a->rn, 1); 3622 tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm); 3623 clean_addr = clean_data_tbi(s, dirty_addr); 3624 3625 /* 3626 * Load-AcquirePC semantics; we implement as the slightly more 3627 * restrictive Load-Acquire. 3628 */ 3629 do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, mop, a->ext, true, 3630 a->rt, iss_sf, true); 3631 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 3632 return true; 3633 } 3634 3635 static bool trans_STLR_i(DisasContext *s, arg_ldapr_stlr_i *a) 3636 { 3637 TCGv_i64 clean_addr, dirty_addr; 3638 MemOp mop = a->sz; 3639 bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext); 3640 3641 if (!dc_isar_feature(aa64_rcpc_8_4, s)) { 3642 return false; 3643 } 3644 3645 /* TODO: ARMv8.4-LSE SCTLR.nAA */ 3646 3647 if (a->rn == 31) { 3648 gen_check_sp_alignment(s); 3649 } 3650 3651 mop = check_ordered_align(s, a->rn, a->imm, true, mop); 3652 dirty_addr = read_cpu_reg_sp(s, a->rn, 1); 3653 tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm); 3654 clean_addr = clean_data_tbi(s, dirty_addr); 3655 3656 /* Store-Release semantics */ 3657 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 3658 do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, mop, true, a->rt, iss_sf, true); 3659 return true; 3660 } 3661 3662 static bool trans_LD_mult(DisasContext *s, arg_ldst_mult *a) 3663 { 3664 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; 3665 MemOp endian, align, mop; 3666 3667 int total; /* total bytes */ 3668 int elements; /* elements per vector */ 3669 int r; 3670 int size = a->sz; 3671 3672 if (!a->p && a->rm != 0) { 3673 /* For non-postindexed accesses the Rm field must be 0 */ 3674 return false; 3675 } 3676 if (size == 3 && !a->q && a->selem != 1) { 3677 return false; 3678 } 3679 if (!fp_access_check(s)) { 3680 return true; 3681 } 3682 3683 if (a->rn == 31) { 3684 gen_check_sp_alignment(s); 3685 } 3686 3687 /* For our purposes, bytes are always little-endian. */ 3688 endian = s->be_data; 3689 if (size == 0) { 3690 endian = MO_LE; 3691 } 3692 3693 total = a->rpt * a->selem * (a->q ? 16 : 8); 3694 tcg_rn = cpu_reg_sp(s, a->rn); 3695 3696 /* 3697 * Issue the MTE check vs the logical repeat count, before we 3698 * promote consecutive little-endian elements below. 3699 */ 3700 clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31, total, 3701 finalize_memop_asimd(s, size)); 3702 3703 /* 3704 * Consecutive little-endian elements from a single register 3705 * can be promoted to a larger little-endian operation. 3706 */ 3707 align = MO_ALIGN; 3708 if (a->selem == 1 && endian == MO_LE) { 3709 align = pow2_align(size); 3710 size = 3; 3711 } 3712 if (!s->align_mem) { 3713 align = 0; 3714 } 3715 mop = endian | size | align; 3716 3717 elements = (a->q ? 16 : 8) >> size; 3718 tcg_ebytes = tcg_constant_i64(1 << size); 3719 for (r = 0; r < a->rpt; r++) { 3720 int e; 3721 for (e = 0; e < elements; e++) { 3722 int xs; 3723 for (xs = 0; xs < a->selem; xs++) { 3724 int tt = (a->rt + r + xs) % 32; 3725 do_vec_ld(s, tt, e, clean_addr, mop); 3726 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); 3727 } 3728 } 3729 } 3730 3731 /* 3732 * For non-quad operations, setting a slice of the low 64 bits of 3733 * the register clears the high 64 bits (in the ARM ARM pseudocode 3734 * this is implicit in the fact that 'rval' is a 64 bit wide 3735 * variable). For quad operations, we might still need to zero 3736 * the high bits of SVE. 3737 */ 3738 for (r = 0; r < a->rpt * a->selem; r++) { 3739 int tt = (a->rt + r) % 32; 3740 clear_vec_high(s, a->q, tt); 3741 } 3742 3743 if (a->p) { 3744 if (a->rm == 31) { 3745 tcg_gen_addi_i64(tcg_rn, tcg_rn, total); 3746 } else { 3747 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm)); 3748 } 3749 } 3750 return true; 3751 } 3752 3753 static bool trans_ST_mult(DisasContext *s, arg_ldst_mult *a) 3754 { 3755 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; 3756 MemOp endian, align, mop; 3757 3758 int total; /* total bytes */ 3759 int elements; /* elements per vector */ 3760 int r; 3761 int size = a->sz; 3762 3763 if (!a->p && a->rm != 0) { 3764 /* For non-postindexed accesses the Rm field must be 0 */ 3765 return false; 3766 } 3767 if (size == 3 && !a->q && a->selem != 1) { 3768 return false; 3769 } 3770 if (!fp_access_check(s)) { 3771 return true; 3772 } 3773 3774 if (a->rn == 31) { 3775 gen_check_sp_alignment(s); 3776 } 3777 3778 /* For our purposes, bytes are always little-endian. */ 3779 endian = s->be_data; 3780 if (size == 0) { 3781 endian = MO_LE; 3782 } 3783 3784 total = a->rpt * a->selem * (a->q ? 16 : 8); 3785 tcg_rn = cpu_reg_sp(s, a->rn); 3786 3787 /* 3788 * Issue the MTE check vs the logical repeat count, before we 3789 * promote consecutive little-endian elements below. 3790 */ 3791 clean_addr = gen_mte_checkN(s, tcg_rn, true, a->p || a->rn != 31, total, 3792 finalize_memop_asimd(s, size)); 3793 3794 /* 3795 * Consecutive little-endian elements from a single register 3796 * can be promoted to a larger little-endian operation. 3797 */ 3798 align = MO_ALIGN; 3799 if (a->selem == 1 && endian == MO_LE) { 3800 align = pow2_align(size); 3801 size = 3; 3802 } 3803 if (!s->align_mem) { 3804 align = 0; 3805 } 3806 mop = endian | size | align; 3807 3808 elements = (a->q ? 16 : 8) >> size; 3809 tcg_ebytes = tcg_constant_i64(1 << size); 3810 for (r = 0; r < a->rpt; r++) { 3811 int e; 3812 for (e = 0; e < elements; e++) { 3813 int xs; 3814 for (xs = 0; xs < a->selem; xs++) { 3815 int tt = (a->rt + r + xs) % 32; 3816 do_vec_st(s, tt, e, clean_addr, mop); 3817 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); 3818 } 3819 } 3820 } 3821 3822 if (a->p) { 3823 if (a->rm == 31) { 3824 tcg_gen_addi_i64(tcg_rn, tcg_rn, total); 3825 } else { 3826 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm)); 3827 } 3828 } 3829 return true; 3830 } 3831 3832 static bool trans_ST_single(DisasContext *s, arg_ldst_single *a) 3833 { 3834 int xs, total, rt; 3835 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; 3836 MemOp mop; 3837 3838 if (!a->p && a->rm != 0) { 3839 return false; 3840 } 3841 if (!fp_access_check(s)) { 3842 return true; 3843 } 3844 3845 if (a->rn == 31) { 3846 gen_check_sp_alignment(s); 3847 } 3848 3849 total = a->selem << a->scale; 3850 tcg_rn = cpu_reg_sp(s, a->rn); 3851 3852 mop = finalize_memop_asimd(s, a->scale); 3853 clean_addr = gen_mte_checkN(s, tcg_rn, true, a->p || a->rn != 31, 3854 total, mop); 3855 3856 tcg_ebytes = tcg_constant_i64(1 << a->scale); 3857 for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) { 3858 do_vec_st(s, rt, a->index, clean_addr, mop); 3859 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); 3860 } 3861 3862 if (a->p) { 3863 if (a->rm == 31) { 3864 tcg_gen_addi_i64(tcg_rn, tcg_rn, total); 3865 } else { 3866 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm)); 3867 } 3868 } 3869 return true; 3870 } 3871 3872 static bool trans_LD_single(DisasContext *s, arg_ldst_single *a) 3873 { 3874 int xs, total, rt; 3875 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; 3876 MemOp mop; 3877 3878 if (!a->p && a->rm != 0) { 3879 return false; 3880 } 3881 if (!fp_access_check(s)) { 3882 return true; 3883 } 3884 3885 if (a->rn == 31) { 3886 gen_check_sp_alignment(s); 3887 } 3888 3889 total = a->selem << a->scale; 3890 tcg_rn = cpu_reg_sp(s, a->rn); 3891 3892 mop = finalize_memop_asimd(s, a->scale); 3893 clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31, 3894 total, mop); 3895 3896 tcg_ebytes = tcg_constant_i64(1 << a->scale); 3897 for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) { 3898 do_vec_ld(s, rt, a->index, clean_addr, mop); 3899 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); 3900 } 3901 3902 if (a->p) { 3903 if (a->rm == 31) { 3904 tcg_gen_addi_i64(tcg_rn, tcg_rn, total); 3905 } else { 3906 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm)); 3907 } 3908 } 3909 return true; 3910 } 3911 3912 static bool trans_LD_single_repl(DisasContext *s, arg_LD_single_repl *a) 3913 { 3914 int xs, total, rt; 3915 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; 3916 MemOp mop; 3917 3918 if (!a->p && a->rm != 0) { 3919 return false; 3920 } 3921 if (!fp_access_check(s)) { 3922 return true; 3923 } 3924 3925 if (a->rn == 31) { 3926 gen_check_sp_alignment(s); 3927 } 3928 3929 total = a->selem << a->scale; 3930 tcg_rn = cpu_reg_sp(s, a->rn); 3931 3932 mop = finalize_memop_asimd(s, a->scale); 3933 clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31, 3934 total, mop); 3935 3936 tcg_ebytes = tcg_constant_i64(1 << a->scale); 3937 for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) { 3938 /* Load and replicate to all elements */ 3939 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 3940 3941 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop); 3942 tcg_gen_gvec_dup_i64(a->scale, vec_full_reg_offset(s, rt), 3943 (a->q + 1) * 8, vec_full_reg_size(s), tcg_tmp); 3944 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); 3945 } 3946 3947 if (a->p) { 3948 if (a->rm == 31) { 3949 tcg_gen_addi_i64(tcg_rn, tcg_rn, total); 3950 } else { 3951 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm)); 3952 } 3953 } 3954 return true; 3955 } 3956 3957 static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a) 3958 { 3959 TCGv_i64 addr, clean_addr, tcg_rt; 3960 int size = 4 << s->dcz_blocksize; 3961 3962 if (!dc_isar_feature(aa64_mte, s)) { 3963 return false; 3964 } 3965 if (s->current_el == 0) { 3966 return false; 3967 } 3968 3969 if (a->rn == 31) { 3970 gen_check_sp_alignment(s); 3971 } 3972 3973 addr = read_cpu_reg_sp(s, a->rn, true); 3974 tcg_gen_addi_i64(addr, addr, a->imm); 3975 tcg_rt = cpu_reg(s, a->rt); 3976 3977 if (s->ata[0]) { 3978 gen_helper_stzgm_tags(tcg_env, addr, tcg_rt); 3979 } 3980 /* 3981 * The non-tags portion of STZGM is mostly like DC_ZVA, 3982 * except the alignment happens before the access. 3983 */ 3984 clean_addr = clean_data_tbi(s, addr); 3985 tcg_gen_andi_i64(clean_addr, clean_addr, -size); 3986 gen_helper_dc_zva(tcg_env, clean_addr); 3987 return true; 3988 } 3989 3990 static bool trans_STGM(DisasContext *s, arg_ldst_tag *a) 3991 { 3992 TCGv_i64 addr, clean_addr, tcg_rt; 3993 3994 if (!dc_isar_feature(aa64_mte, s)) { 3995 return false; 3996 } 3997 if (s->current_el == 0) { 3998 return false; 3999 } 4000 4001 if (a->rn == 31) { 4002 gen_check_sp_alignment(s); 4003 } 4004 4005 addr = read_cpu_reg_sp(s, a->rn, true); 4006 tcg_gen_addi_i64(addr, addr, a->imm); 4007 tcg_rt = cpu_reg(s, a->rt); 4008 4009 if (s->ata[0]) { 4010 gen_helper_stgm(tcg_env, addr, tcg_rt); 4011 } else { 4012 MMUAccessType acc = MMU_DATA_STORE; 4013 int size = 4 << s->gm_blocksize; 4014 4015 clean_addr = clean_data_tbi(s, addr); 4016 tcg_gen_andi_i64(clean_addr, clean_addr, -size); 4017 gen_probe_access(s, clean_addr, acc, size); 4018 } 4019 return true; 4020 } 4021 4022 static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a) 4023 { 4024 TCGv_i64 addr, clean_addr, tcg_rt; 4025 4026 if (!dc_isar_feature(aa64_mte, s)) { 4027 return false; 4028 } 4029 if (s->current_el == 0) { 4030 return false; 4031 } 4032 4033 if (a->rn == 31) { 4034 gen_check_sp_alignment(s); 4035 } 4036 4037 addr = read_cpu_reg_sp(s, a->rn, true); 4038 tcg_gen_addi_i64(addr, addr, a->imm); 4039 tcg_rt = cpu_reg(s, a->rt); 4040 4041 if (s->ata[0]) { 4042 gen_helper_ldgm(tcg_rt, tcg_env, addr); 4043 } else { 4044 MMUAccessType acc = MMU_DATA_LOAD; 4045 int size = 4 << s->gm_blocksize; 4046 4047 clean_addr = clean_data_tbi(s, addr); 4048 tcg_gen_andi_i64(clean_addr, clean_addr, -size); 4049 gen_probe_access(s, clean_addr, acc, size); 4050 /* The result tags are zeros. */ 4051 tcg_gen_movi_i64(tcg_rt, 0); 4052 } 4053 return true; 4054 } 4055 4056 static bool trans_LDG(DisasContext *s, arg_ldst_tag *a) 4057 { 4058 TCGv_i64 addr, clean_addr, tcg_rt; 4059 4060 if (!dc_isar_feature(aa64_mte_insn_reg, s)) { 4061 return false; 4062 } 4063 4064 if (a->rn == 31) { 4065 gen_check_sp_alignment(s); 4066 } 4067 4068 addr = read_cpu_reg_sp(s, a->rn, true); 4069 if (!a->p) { 4070 /* pre-index or signed offset */ 4071 tcg_gen_addi_i64(addr, addr, a->imm); 4072 } 4073 4074 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE); 4075 tcg_rt = cpu_reg(s, a->rt); 4076 if (s->ata[0]) { 4077 gen_helper_ldg(tcg_rt, tcg_env, addr, tcg_rt); 4078 } else { 4079 /* 4080 * Tag access disabled: we must check for aborts on the load 4081 * load from [rn+offset], and then insert a 0 tag into rt. 4082 */ 4083 clean_addr = clean_data_tbi(s, addr); 4084 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8); 4085 gen_address_with_allocation_tag0(tcg_rt, tcg_rt); 4086 } 4087 4088 if (a->w) { 4089 /* pre-index or post-index */ 4090 if (a->p) { 4091 /* post-index */ 4092 tcg_gen_addi_i64(addr, addr, a->imm); 4093 } 4094 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr); 4095 } 4096 return true; 4097 } 4098 4099 static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair) 4100 { 4101 TCGv_i64 addr, tcg_rt; 4102 4103 if (a->rn == 31) { 4104 gen_check_sp_alignment(s); 4105 } 4106 4107 addr = read_cpu_reg_sp(s, a->rn, true); 4108 if (!a->p) { 4109 /* pre-index or signed offset */ 4110 tcg_gen_addi_i64(addr, addr, a->imm); 4111 } 4112 tcg_rt = cpu_reg_sp(s, a->rt); 4113 if (!s->ata[0]) { 4114 /* 4115 * For STG and ST2G, we need to check alignment and probe memory. 4116 * TODO: For STZG and STZ2G, we could rely on the stores below, 4117 * at least for system mode; user-only won't enforce alignment. 4118 */ 4119 if (is_pair) { 4120 gen_helper_st2g_stub(tcg_env, addr); 4121 } else { 4122 gen_helper_stg_stub(tcg_env, addr); 4123 } 4124 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { 4125 if (is_pair) { 4126 gen_helper_st2g_parallel(tcg_env, addr, tcg_rt); 4127 } else { 4128 gen_helper_stg_parallel(tcg_env, addr, tcg_rt); 4129 } 4130 } else { 4131 if (is_pair) { 4132 gen_helper_st2g(tcg_env, addr, tcg_rt); 4133 } else { 4134 gen_helper_stg(tcg_env, addr, tcg_rt); 4135 } 4136 } 4137 4138 if (is_zero) { 4139 TCGv_i64 clean_addr = clean_data_tbi(s, addr); 4140 TCGv_i64 zero64 = tcg_constant_i64(0); 4141 TCGv_i128 zero128 = tcg_temp_new_i128(); 4142 int mem_index = get_mem_index(s); 4143 MemOp mop = finalize_memop(s, MO_128 | MO_ALIGN); 4144 4145 tcg_gen_concat_i64_i128(zero128, zero64, zero64); 4146 4147 /* This is 1 or 2 atomic 16-byte operations. */ 4148 tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop); 4149 if (is_pair) { 4150 tcg_gen_addi_i64(clean_addr, clean_addr, 16); 4151 tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop); 4152 } 4153 } 4154 4155 if (a->w) { 4156 /* pre-index or post-index */ 4157 if (a->p) { 4158 /* post-index */ 4159 tcg_gen_addi_i64(addr, addr, a->imm); 4160 } 4161 tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr); 4162 } 4163 return true; 4164 } 4165 4166 TRANS_FEAT(STG, aa64_mte_insn_reg, do_STG, a, false, false) 4167 TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false) 4168 TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true) 4169 TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true) 4170 4171 typedef void SetFn(TCGv_env, TCGv_i32, TCGv_i32); 4172 4173 static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, 4174 bool is_setg, SetFn fn) 4175 { 4176 int memidx; 4177 uint32_t syndrome, desc = 0; 4178 4179 if (is_setg && !dc_isar_feature(aa64_mte, s)) { 4180 return false; 4181 } 4182 4183 /* 4184 * UNPREDICTABLE cases: we choose to UNDEF, which allows 4185 * us to pull this check before the CheckMOPSEnabled() test 4186 * (which we do in the helper function) 4187 */ 4188 if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd || 4189 a->rd == 31 || a->rn == 31) { 4190 return false; 4191 } 4192 4193 memidx = get_a64_user_mem_index(s, a->unpriv); 4194 4195 /* 4196 * We pass option_a == true, matching our implementation; 4197 * we pass wrong_option == false: helper function may set that bit. 4198 */ 4199 syndrome = syn_mop(true, is_setg, (a->nontemp << 1) | a->unpriv, 4200 is_epilogue, false, true, a->rd, a->rs, a->rn); 4201 4202 if (is_setg ? s->ata[a->unpriv] : s->mte_active[a->unpriv]) { 4203 /* We may need to do MTE tag checking, so assemble the descriptor */ 4204 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 4205 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 4206 desc = FIELD_DP32(desc, MTEDESC, WRITE, true); 4207 /* SIZEM1 and ALIGN we leave 0 (byte write) */ 4208 } 4209 /* The helper function always needs the memidx even with MTE disabled */ 4210 desc = FIELD_DP32(desc, MTEDESC, MIDX, memidx); 4211 4212 /* 4213 * The helper needs the register numbers, but since they're in 4214 * the syndrome anyway, we let it extract them from there rather 4215 * than passing in an extra three integer arguments. 4216 */ 4217 fn(tcg_env, tcg_constant_i32(syndrome), tcg_constant_i32(desc)); 4218 return true; 4219 } 4220 4221 TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, false, gen_helper_setp) 4222 TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, false, gen_helper_setm) 4223 TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, false, gen_helper_sete) 4224 TRANS_FEAT(SETGP, aa64_mops, do_SET, a, false, true, gen_helper_setgp) 4225 TRANS_FEAT(SETGM, aa64_mops, do_SET, a, false, true, gen_helper_setgm) 4226 TRANS_FEAT(SETGE, aa64_mops, do_SET, a, true, true, gen_helper_setge) 4227 4228 typedef void CpyFn(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32); 4229 4230 static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn) 4231 { 4232 int rmemidx, wmemidx; 4233 uint32_t syndrome, rdesc = 0, wdesc = 0; 4234 bool wunpriv = extract32(a->options, 0, 1); 4235 bool runpriv = extract32(a->options, 1, 1); 4236 4237 /* 4238 * UNPREDICTABLE cases: we choose to UNDEF, which allows 4239 * us to pull this check before the CheckMOPSEnabled() test 4240 * (which we do in the helper function) 4241 */ 4242 if (a->rs == a->rn || a->rs == a->rd || a->rn == a->rd || 4243 a->rd == 31 || a->rs == 31 || a->rn == 31) { 4244 return false; 4245 } 4246 4247 rmemidx = get_a64_user_mem_index(s, runpriv); 4248 wmemidx = get_a64_user_mem_index(s, wunpriv); 4249 4250 /* 4251 * We pass option_a == true, matching our implementation; 4252 * we pass wrong_option == false: helper function may set that bit. 4253 */ 4254 syndrome = syn_mop(false, false, a->options, is_epilogue, 4255 false, true, a->rd, a->rs, a->rn); 4256 4257 /* If we need to do MTE tag checking, assemble the descriptors */ 4258 if (s->mte_active[runpriv]) { 4259 rdesc = FIELD_DP32(rdesc, MTEDESC, TBI, s->tbid); 4260 rdesc = FIELD_DP32(rdesc, MTEDESC, TCMA, s->tcma); 4261 } 4262 if (s->mte_active[wunpriv]) { 4263 wdesc = FIELD_DP32(wdesc, MTEDESC, TBI, s->tbid); 4264 wdesc = FIELD_DP32(wdesc, MTEDESC, TCMA, s->tcma); 4265 wdesc = FIELD_DP32(wdesc, MTEDESC, WRITE, true); 4266 } 4267 /* The helper function needs these parts of the descriptor regardless */ 4268 rdesc = FIELD_DP32(rdesc, MTEDESC, MIDX, rmemidx); 4269 wdesc = FIELD_DP32(wdesc, MTEDESC, MIDX, wmemidx); 4270 4271 /* 4272 * The helper needs the register numbers, but since they're in 4273 * the syndrome anyway, we let it extract them from there rather 4274 * than passing in an extra three integer arguments. 4275 */ 4276 fn(tcg_env, tcg_constant_i32(syndrome), tcg_constant_i32(wdesc), 4277 tcg_constant_i32(rdesc)); 4278 return true; 4279 } 4280 4281 TRANS_FEAT(CPYP, aa64_mops, do_CPY, a, false, gen_helper_cpyp) 4282 TRANS_FEAT(CPYM, aa64_mops, do_CPY, a, false, gen_helper_cpym) 4283 TRANS_FEAT(CPYE, aa64_mops, do_CPY, a, true, gen_helper_cpye) 4284 TRANS_FEAT(CPYFP, aa64_mops, do_CPY, a, false, gen_helper_cpyfp) 4285 TRANS_FEAT(CPYFM, aa64_mops, do_CPY, a, false, gen_helper_cpyfm) 4286 TRANS_FEAT(CPYFE, aa64_mops, do_CPY, a, true, gen_helper_cpyfe) 4287 4288 typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64); 4289 4290 static bool gen_rri(DisasContext *s, arg_rri_sf *a, 4291 bool rd_sp, bool rn_sp, ArithTwoOp *fn) 4292 { 4293 TCGv_i64 tcg_rn = rn_sp ? cpu_reg_sp(s, a->rn) : cpu_reg(s, a->rn); 4294 TCGv_i64 tcg_rd = rd_sp ? cpu_reg_sp(s, a->rd) : cpu_reg(s, a->rd); 4295 TCGv_i64 tcg_imm = tcg_constant_i64(a->imm); 4296 4297 fn(tcg_rd, tcg_rn, tcg_imm); 4298 if (!a->sf) { 4299 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4300 } 4301 return true; 4302 } 4303 4304 /* 4305 * PC-rel. addressing 4306 */ 4307 4308 static bool trans_ADR(DisasContext *s, arg_ri *a) 4309 { 4310 gen_pc_plus_diff(s, cpu_reg(s, a->rd), a->imm); 4311 return true; 4312 } 4313 4314 static bool trans_ADRP(DisasContext *s, arg_ri *a) 4315 { 4316 int64_t offset = (int64_t)a->imm << 12; 4317 4318 /* The page offset is ok for CF_PCREL. */ 4319 offset -= s->pc_curr & 0xfff; 4320 gen_pc_plus_diff(s, cpu_reg(s, a->rd), offset); 4321 return true; 4322 } 4323 4324 /* 4325 * Add/subtract (immediate) 4326 */ 4327 TRANS(ADD_i, gen_rri, a, 1, 1, tcg_gen_add_i64) 4328 TRANS(SUB_i, gen_rri, a, 1, 1, tcg_gen_sub_i64) 4329 TRANS(ADDS_i, gen_rri, a, 0, 1, a->sf ? gen_add64_CC : gen_add32_CC) 4330 TRANS(SUBS_i, gen_rri, a, 0, 1, a->sf ? gen_sub64_CC : gen_sub32_CC) 4331 4332 /* 4333 * Add/subtract (immediate, with tags) 4334 */ 4335 4336 static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a, 4337 bool sub_op) 4338 { 4339 TCGv_i64 tcg_rn, tcg_rd; 4340 int imm; 4341 4342 imm = a->uimm6 << LOG2_TAG_GRANULE; 4343 if (sub_op) { 4344 imm = -imm; 4345 } 4346 4347 tcg_rn = cpu_reg_sp(s, a->rn); 4348 tcg_rd = cpu_reg_sp(s, a->rd); 4349 4350 if (s->ata[0]) { 4351 gen_helper_addsubg(tcg_rd, tcg_env, tcg_rn, 4352 tcg_constant_i32(imm), 4353 tcg_constant_i32(a->uimm4)); 4354 } else { 4355 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm); 4356 gen_address_with_allocation_tag0(tcg_rd, tcg_rd); 4357 } 4358 return true; 4359 } 4360 4361 TRANS_FEAT(ADDG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, false) 4362 TRANS_FEAT(SUBG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, true) 4363 4364 /* The input should be a value in the bottom e bits (with higher 4365 * bits zero); returns that value replicated into every element 4366 * of size e in a 64 bit integer. 4367 */ 4368 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e) 4369 { 4370 assert(e != 0); 4371 while (e < 64) { 4372 mask |= mask << e; 4373 e *= 2; 4374 } 4375 return mask; 4376 } 4377 4378 /* 4379 * Logical (immediate) 4380 */ 4381 4382 /* 4383 * Simplified variant of pseudocode DecodeBitMasks() for the case where we 4384 * only require the wmask. Returns false if the imms/immr/immn are a reserved 4385 * value (ie should cause a guest UNDEF exception), and true if they are 4386 * valid, in which case the decoded bit pattern is written to result. 4387 */ 4388 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, 4389 unsigned int imms, unsigned int immr) 4390 { 4391 uint64_t mask; 4392 unsigned e, levels, s, r; 4393 int len; 4394 4395 assert(immn < 2 && imms < 64 && immr < 64); 4396 4397 /* The bit patterns we create here are 64 bit patterns which 4398 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or 4399 * 64 bits each. Each element contains the same value: a run 4400 * of between 1 and e-1 non-zero bits, rotated within the 4401 * element by between 0 and e-1 bits. 4402 * 4403 * The element size and run length are encoded into immn (1 bit) 4404 * and imms (6 bits) as follows: 4405 * 64 bit elements: immn = 1, imms = <length of run - 1> 4406 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1> 4407 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1> 4408 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1> 4409 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1> 4410 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1> 4411 * Notice that immn = 0, imms = 11111x is the only combination 4412 * not covered by one of the above options; this is reserved. 4413 * Further, <length of run - 1> all-ones is a reserved pattern. 4414 * 4415 * In all cases the rotation is by immr % e (and immr is 6 bits). 4416 */ 4417 4418 /* First determine the element size */ 4419 len = 31 - clz32((immn << 6) | (~imms & 0x3f)); 4420 if (len < 1) { 4421 /* This is the immn == 0, imms == 0x11111x case */ 4422 return false; 4423 } 4424 e = 1 << len; 4425 4426 levels = e - 1; 4427 s = imms & levels; 4428 r = immr & levels; 4429 4430 if (s == levels) { 4431 /* <length of run - 1> mustn't be all-ones. */ 4432 return false; 4433 } 4434 4435 /* Create the value of one element: s+1 set bits rotated 4436 * by r within the element (which is e bits wide)... 4437 */ 4438 mask = MAKE_64BIT_MASK(0, s + 1); 4439 if (r) { 4440 mask = (mask >> r) | (mask << (e - r)); 4441 mask &= MAKE_64BIT_MASK(0, e); 4442 } 4443 /* ...then replicate the element over the whole 64 bit value */ 4444 mask = bitfield_replicate(mask, e); 4445 *result = mask; 4446 return true; 4447 } 4448 4449 static bool gen_rri_log(DisasContext *s, arg_rri_log *a, bool set_cc, 4450 void (*fn)(TCGv_i64, TCGv_i64, int64_t)) 4451 { 4452 TCGv_i64 tcg_rd, tcg_rn; 4453 uint64_t imm; 4454 4455 /* Some immediate field values are reserved. */ 4456 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1), 4457 extract32(a->dbm, 0, 6), 4458 extract32(a->dbm, 6, 6))) { 4459 return false; 4460 } 4461 if (!a->sf) { 4462 imm &= 0xffffffffull; 4463 } 4464 4465 tcg_rd = set_cc ? cpu_reg(s, a->rd) : cpu_reg_sp(s, a->rd); 4466 tcg_rn = cpu_reg(s, a->rn); 4467 4468 fn(tcg_rd, tcg_rn, imm); 4469 if (set_cc) { 4470 gen_logic_CC(a->sf, tcg_rd); 4471 } 4472 if (!a->sf) { 4473 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4474 } 4475 return true; 4476 } 4477 4478 TRANS(AND_i, gen_rri_log, a, false, tcg_gen_andi_i64) 4479 TRANS(ORR_i, gen_rri_log, a, false, tcg_gen_ori_i64) 4480 TRANS(EOR_i, gen_rri_log, a, false, tcg_gen_xori_i64) 4481 TRANS(ANDS_i, gen_rri_log, a, true, tcg_gen_andi_i64) 4482 4483 /* 4484 * Move wide (immediate) 4485 */ 4486 4487 static bool trans_MOVZ(DisasContext *s, arg_movw *a) 4488 { 4489 int pos = a->hw << 4; 4490 tcg_gen_movi_i64(cpu_reg(s, a->rd), (uint64_t)a->imm << pos); 4491 return true; 4492 } 4493 4494 static bool trans_MOVN(DisasContext *s, arg_movw *a) 4495 { 4496 int pos = a->hw << 4; 4497 uint64_t imm = a->imm; 4498 4499 imm = ~(imm << pos); 4500 if (!a->sf) { 4501 imm = (uint32_t)imm; 4502 } 4503 tcg_gen_movi_i64(cpu_reg(s, a->rd), imm); 4504 return true; 4505 } 4506 4507 static bool trans_MOVK(DisasContext *s, arg_movw *a) 4508 { 4509 int pos = a->hw << 4; 4510 TCGv_i64 tcg_rd, tcg_im; 4511 4512 tcg_rd = cpu_reg(s, a->rd); 4513 tcg_im = tcg_constant_i64(a->imm); 4514 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_im, pos, 16); 4515 if (!a->sf) { 4516 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4517 } 4518 return true; 4519 } 4520 4521 /* 4522 * Bitfield 4523 */ 4524 4525 static bool trans_SBFM(DisasContext *s, arg_SBFM *a) 4526 { 4527 TCGv_i64 tcg_rd = cpu_reg(s, a->rd); 4528 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1); 4529 unsigned int bitsize = a->sf ? 64 : 32; 4530 unsigned int ri = a->immr; 4531 unsigned int si = a->imms; 4532 unsigned int pos, len; 4533 4534 if (si >= ri) { 4535 /* Wd<s-r:0> = Wn<s:r> */ 4536 len = (si - ri) + 1; 4537 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len); 4538 if (!a->sf) { 4539 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4540 } 4541 } else { 4542 /* Wd<32+s-r,32-r> = Wn<s:0> */ 4543 len = si + 1; 4544 pos = (bitsize - ri) & (bitsize - 1); 4545 4546 if (len < ri) { 4547 /* 4548 * Sign extend the destination field from len to fill the 4549 * balance of the word. Let the deposit below insert all 4550 * of those sign bits. 4551 */ 4552 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len); 4553 len = ri; 4554 } 4555 4556 /* 4557 * We start with zero, and we haven't modified any bits outside 4558 * bitsize, therefore no final zero-extension is unneeded for !sf. 4559 */ 4560 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len); 4561 } 4562 return true; 4563 } 4564 4565 static bool trans_UBFM(DisasContext *s, arg_UBFM *a) 4566 { 4567 TCGv_i64 tcg_rd = cpu_reg(s, a->rd); 4568 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1); 4569 unsigned int bitsize = a->sf ? 64 : 32; 4570 unsigned int ri = a->immr; 4571 unsigned int si = a->imms; 4572 unsigned int pos, len; 4573 4574 tcg_rd = cpu_reg(s, a->rd); 4575 tcg_tmp = read_cpu_reg(s, a->rn, 1); 4576 4577 if (si >= ri) { 4578 /* Wd<s-r:0> = Wn<s:r> */ 4579 len = (si - ri) + 1; 4580 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len); 4581 } else { 4582 /* Wd<32+s-r,32-r> = Wn<s:0> */ 4583 len = si + 1; 4584 pos = (bitsize - ri) & (bitsize - 1); 4585 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len); 4586 } 4587 return true; 4588 } 4589 4590 static bool trans_BFM(DisasContext *s, arg_BFM *a) 4591 { 4592 TCGv_i64 tcg_rd = cpu_reg(s, a->rd); 4593 TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1); 4594 unsigned int bitsize = a->sf ? 64 : 32; 4595 unsigned int ri = a->immr; 4596 unsigned int si = a->imms; 4597 unsigned int pos, len; 4598 4599 tcg_rd = cpu_reg(s, a->rd); 4600 tcg_tmp = read_cpu_reg(s, a->rn, 1); 4601 4602 if (si >= ri) { 4603 /* Wd<s-r:0> = Wn<s:r> */ 4604 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri); 4605 len = (si - ri) + 1; 4606 pos = 0; 4607 } else { 4608 /* Wd<32+s-r,32-r> = Wn<s:0> */ 4609 len = si + 1; 4610 pos = (bitsize - ri) & (bitsize - 1); 4611 } 4612 4613 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len); 4614 if (!a->sf) { 4615 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4616 } 4617 return true; 4618 } 4619 4620 static bool trans_EXTR(DisasContext *s, arg_extract *a) 4621 { 4622 TCGv_i64 tcg_rd, tcg_rm, tcg_rn; 4623 4624 tcg_rd = cpu_reg(s, a->rd); 4625 4626 if (unlikely(a->imm == 0)) { 4627 /* 4628 * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts, 4629 * so an extract from bit 0 is a special case. 4630 */ 4631 if (a->sf) { 4632 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, a->rm)); 4633 } else { 4634 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, a->rm)); 4635 } 4636 } else { 4637 tcg_rm = cpu_reg(s, a->rm); 4638 tcg_rn = cpu_reg(s, a->rn); 4639 4640 if (a->sf) { 4641 /* Specialization to ROR happens in EXTRACT2. */ 4642 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, a->imm); 4643 } else { 4644 TCGv_i32 t0 = tcg_temp_new_i32(); 4645 4646 tcg_gen_extrl_i64_i32(t0, tcg_rm); 4647 if (a->rm == a->rn) { 4648 tcg_gen_rotri_i32(t0, t0, a->imm); 4649 } else { 4650 TCGv_i32 t1 = tcg_temp_new_i32(); 4651 tcg_gen_extrl_i64_i32(t1, tcg_rn); 4652 tcg_gen_extract2_i32(t0, t0, t1, a->imm); 4653 } 4654 tcg_gen_extu_i32_i64(tcg_rd, t0); 4655 } 4656 } 4657 return true; 4658 } 4659 4660 /* 4661 * Cryptographic AES, SHA, SHA512 4662 */ 4663 4664 TRANS_FEAT(AESE, aa64_aes, do_gvec_op3_ool, a, 0, gen_helper_crypto_aese) 4665 TRANS_FEAT(AESD, aa64_aes, do_gvec_op3_ool, a, 0, gen_helper_crypto_aesd) 4666 TRANS_FEAT(AESMC, aa64_aes, do_gvec_op2_ool, a, 0, gen_helper_crypto_aesmc) 4667 TRANS_FEAT(AESIMC, aa64_aes, do_gvec_op2_ool, a, 0, gen_helper_crypto_aesimc) 4668 4669 TRANS_FEAT(SHA1C, aa64_sha1, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha1c) 4670 TRANS_FEAT(SHA1P, aa64_sha1, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha1p) 4671 TRANS_FEAT(SHA1M, aa64_sha1, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha1m) 4672 TRANS_FEAT(SHA1SU0, aa64_sha1, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha1su0) 4673 4674 TRANS_FEAT(SHA256H, aa64_sha256, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha256h) 4675 TRANS_FEAT(SHA256H2, aa64_sha256, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha256h2) 4676 TRANS_FEAT(SHA256SU1, aa64_sha256, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha256su1) 4677 4678 TRANS_FEAT(SHA1H, aa64_sha1, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha1h) 4679 TRANS_FEAT(SHA1SU1, aa64_sha1, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha1su1) 4680 TRANS_FEAT(SHA256SU0, aa64_sha256, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha256su0) 4681 4682 TRANS_FEAT(SHA512H, aa64_sha512, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha512h) 4683 TRANS_FEAT(SHA512H2, aa64_sha512, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha512h2) 4684 TRANS_FEAT(SHA512SU1, aa64_sha512, do_gvec_op3_ool, a, 0, gen_helper_crypto_sha512su1) 4685 TRANS_FEAT(RAX1, aa64_sha3, do_gvec_fn3, a, gen_gvec_rax1) 4686 TRANS_FEAT(SM3PARTW1, aa64_sm3, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm3partw1) 4687 TRANS_FEAT(SM3PARTW2, aa64_sm3, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm3partw2) 4688 TRANS_FEAT(SM4EKEY, aa64_sm4, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm4ekey) 4689 4690 TRANS_FEAT(SHA512SU0, aa64_sha512, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha512su0) 4691 TRANS_FEAT(SM4E, aa64_sm4, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm4e) 4692 4693 TRANS_FEAT(EOR3, aa64_sha3, do_gvec_fn4, a, gen_gvec_eor3) 4694 TRANS_FEAT(BCAX, aa64_sha3, do_gvec_fn4, a, gen_gvec_bcax) 4695 4696 static bool trans_SM3SS1(DisasContext *s, arg_SM3SS1 *a) 4697 { 4698 if (!dc_isar_feature(aa64_sm3, s)) { 4699 return false; 4700 } 4701 if (fp_access_check(s)) { 4702 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 4703 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 4704 TCGv_i32 tcg_op3 = tcg_temp_new_i32(); 4705 TCGv_i32 tcg_res = tcg_temp_new_i32(); 4706 unsigned vsz, dofs; 4707 4708 read_vec_element_i32(s, tcg_op1, a->rn, 3, MO_32); 4709 read_vec_element_i32(s, tcg_op2, a->rm, 3, MO_32); 4710 read_vec_element_i32(s, tcg_op3, a->ra, 3, MO_32); 4711 4712 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20); 4713 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2); 4714 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3); 4715 tcg_gen_rotri_i32(tcg_res, tcg_res, 25); 4716 4717 /* Clear the whole register first, then store bits [127:96]. */ 4718 vsz = vec_full_reg_size(s); 4719 dofs = vec_full_reg_offset(s, a->rd); 4720 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0); 4721 write_vec_element_i32(s, tcg_res, a->rd, 3, MO_32); 4722 } 4723 return true; 4724 } 4725 4726 static bool do_crypto3i(DisasContext *s, arg_crypto3i *a, gen_helper_gvec_3 *fn) 4727 { 4728 if (fp_access_check(s)) { 4729 gen_gvec_op3_ool(s, true, a->rd, a->rn, a->rm, a->imm, fn); 4730 } 4731 return true; 4732 } 4733 TRANS_FEAT(SM3TT1A, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt1a) 4734 TRANS_FEAT(SM3TT1B, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt1b) 4735 TRANS_FEAT(SM3TT2A, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt2a) 4736 TRANS_FEAT(SM3TT2B, aa64_sm3, do_crypto3i, a, gen_helper_crypto_sm3tt2b) 4737 4738 static bool trans_XAR(DisasContext *s, arg_XAR *a) 4739 { 4740 if (!dc_isar_feature(aa64_sha3, s)) { 4741 return false; 4742 } 4743 if (fp_access_check(s)) { 4744 gen_gvec_xar(MO_64, vec_full_reg_offset(s, a->rd), 4745 vec_full_reg_offset(s, a->rn), 4746 vec_full_reg_offset(s, a->rm), a->imm, 16, 4747 vec_full_reg_size(s)); 4748 } 4749 return true; 4750 } 4751 4752 /* 4753 * Advanced SIMD copy 4754 */ 4755 4756 static bool decode_esz_idx(int imm, MemOp *pesz, unsigned *pidx) 4757 { 4758 unsigned esz = ctz32(imm); 4759 if (esz <= MO_64) { 4760 *pesz = esz; 4761 *pidx = imm >> (esz + 1); 4762 return true; 4763 } 4764 return false; 4765 } 4766 4767 static bool trans_DUP_element_s(DisasContext *s, arg_DUP_element_s *a) 4768 { 4769 MemOp esz; 4770 unsigned idx; 4771 4772 if (!decode_esz_idx(a->imm, &esz, &idx)) { 4773 return false; 4774 } 4775 if (fp_access_check(s)) { 4776 /* 4777 * This instruction just extracts the specified element and 4778 * zero-extends it into the bottom of the destination register. 4779 */ 4780 TCGv_i64 tmp = tcg_temp_new_i64(); 4781 read_vec_element(s, tmp, a->rn, idx, esz); 4782 write_fp_dreg(s, a->rd, tmp); 4783 } 4784 return true; 4785 } 4786 4787 static bool trans_DUP_element_v(DisasContext *s, arg_DUP_element_v *a) 4788 { 4789 MemOp esz; 4790 unsigned idx; 4791 4792 if (!decode_esz_idx(a->imm, &esz, &idx)) { 4793 return false; 4794 } 4795 if (esz == MO_64 && !a->q) { 4796 return false; 4797 } 4798 if (fp_access_check(s)) { 4799 tcg_gen_gvec_dup_mem(esz, vec_full_reg_offset(s, a->rd), 4800 vec_reg_offset(s, a->rn, idx, esz), 4801 a->q ? 16 : 8, vec_full_reg_size(s)); 4802 } 4803 return true; 4804 } 4805 4806 static bool trans_DUP_general(DisasContext *s, arg_DUP_general *a) 4807 { 4808 MemOp esz; 4809 unsigned idx; 4810 4811 if (!decode_esz_idx(a->imm, &esz, &idx)) { 4812 return false; 4813 } 4814 if (esz == MO_64 && !a->q) { 4815 return false; 4816 } 4817 if (fp_access_check(s)) { 4818 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), 4819 a->q ? 16 : 8, vec_full_reg_size(s), 4820 cpu_reg(s, a->rn)); 4821 } 4822 return true; 4823 } 4824 4825 static bool do_smov_umov(DisasContext *s, arg_SMOV *a, MemOp is_signed) 4826 { 4827 MemOp esz; 4828 unsigned idx; 4829 4830 if (!decode_esz_idx(a->imm, &esz, &idx)) { 4831 return false; 4832 } 4833 if (is_signed) { 4834 if (esz == MO_64 || (esz == MO_32 && !a->q)) { 4835 return false; 4836 } 4837 } else { 4838 if (esz == MO_64 ? !a->q : a->q) { 4839 return false; 4840 } 4841 } 4842 if (fp_access_check(s)) { 4843 TCGv_i64 tcg_rd = cpu_reg(s, a->rd); 4844 read_vec_element(s, tcg_rd, a->rn, idx, esz | is_signed); 4845 if (is_signed && !a->q) { 4846 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4847 } 4848 } 4849 return true; 4850 } 4851 4852 TRANS(SMOV, do_smov_umov, a, MO_SIGN) 4853 TRANS(UMOV, do_smov_umov, a, 0) 4854 4855 static bool trans_INS_general(DisasContext *s, arg_INS_general *a) 4856 { 4857 MemOp esz; 4858 unsigned idx; 4859 4860 if (!decode_esz_idx(a->imm, &esz, &idx)) { 4861 return false; 4862 } 4863 if (fp_access_check(s)) { 4864 write_vec_element(s, cpu_reg(s, a->rn), a->rd, idx, esz); 4865 clear_vec_high(s, true, a->rd); 4866 } 4867 return true; 4868 } 4869 4870 static bool trans_INS_element(DisasContext *s, arg_INS_element *a) 4871 { 4872 MemOp esz; 4873 unsigned didx, sidx; 4874 4875 if (!decode_esz_idx(a->di, &esz, &didx)) { 4876 return false; 4877 } 4878 sidx = a->si >> esz; 4879 if (fp_access_check(s)) { 4880 TCGv_i64 tmp = tcg_temp_new_i64(); 4881 4882 read_vec_element(s, tmp, a->rn, sidx, esz); 4883 write_vec_element(s, tmp, a->rd, didx, esz); 4884 4885 /* INS is considered a 128-bit write for SVE. */ 4886 clear_vec_high(s, true, a->rd); 4887 } 4888 return true; 4889 } 4890 4891 /* 4892 * Advanced SIMD three same 4893 */ 4894 4895 typedef struct FPScalar { 4896 void (*gen_h)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); 4897 void (*gen_s)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); 4898 void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); 4899 } FPScalar; 4900 4901 static bool do_fp3_scalar(DisasContext *s, arg_rrr_e *a, const FPScalar *f) 4902 { 4903 switch (a->esz) { 4904 case MO_64: 4905 if (fp_access_check(s)) { 4906 TCGv_i64 t0 = read_fp_dreg(s, a->rn); 4907 TCGv_i64 t1 = read_fp_dreg(s, a->rm); 4908 f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); 4909 write_fp_dreg(s, a->rd, t0); 4910 } 4911 break; 4912 case MO_32: 4913 if (fp_access_check(s)) { 4914 TCGv_i32 t0 = read_fp_sreg(s, a->rn); 4915 TCGv_i32 t1 = read_fp_sreg(s, a->rm); 4916 f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); 4917 write_fp_sreg(s, a->rd, t0); 4918 } 4919 break; 4920 case MO_16: 4921 if (!dc_isar_feature(aa64_fp16, s)) { 4922 return false; 4923 } 4924 if (fp_access_check(s)) { 4925 TCGv_i32 t0 = read_fp_hreg(s, a->rn); 4926 TCGv_i32 t1 = read_fp_hreg(s, a->rm); 4927 f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16)); 4928 write_fp_sreg(s, a->rd, t0); 4929 } 4930 break; 4931 default: 4932 return false; 4933 } 4934 return true; 4935 } 4936 4937 static const FPScalar f_scalar_fadd = { 4938 gen_helper_vfp_addh, 4939 gen_helper_vfp_adds, 4940 gen_helper_vfp_addd, 4941 }; 4942 TRANS(FADD_s, do_fp3_scalar, a, &f_scalar_fadd) 4943 4944 static const FPScalar f_scalar_fsub = { 4945 gen_helper_vfp_subh, 4946 gen_helper_vfp_subs, 4947 gen_helper_vfp_subd, 4948 }; 4949 TRANS(FSUB_s, do_fp3_scalar, a, &f_scalar_fsub) 4950 4951 static const FPScalar f_scalar_fdiv = { 4952 gen_helper_vfp_divh, 4953 gen_helper_vfp_divs, 4954 gen_helper_vfp_divd, 4955 }; 4956 TRANS(FDIV_s, do_fp3_scalar, a, &f_scalar_fdiv) 4957 4958 static const FPScalar f_scalar_fmul = { 4959 gen_helper_vfp_mulh, 4960 gen_helper_vfp_muls, 4961 gen_helper_vfp_muld, 4962 }; 4963 TRANS(FMUL_s, do_fp3_scalar, a, &f_scalar_fmul) 4964 4965 static const FPScalar f_scalar_fmax = { 4966 gen_helper_advsimd_maxh, 4967 gen_helper_vfp_maxs, 4968 gen_helper_vfp_maxd, 4969 }; 4970 TRANS(FMAX_s, do_fp3_scalar, a, &f_scalar_fmax) 4971 4972 static const FPScalar f_scalar_fmin = { 4973 gen_helper_advsimd_minh, 4974 gen_helper_vfp_mins, 4975 gen_helper_vfp_mind, 4976 }; 4977 TRANS(FMIN_s, do_fp3_scalar, a, &f_scalar_fmin) 4978 4979 static const FPScalar f_scalar_fmaxnm = { 4980 gen_helper_advsimd_maxnumh, 4981 gen_helper_vfp_maxnums, 4982 gen_helper_vfp_maxnumd, 4983 }; 4984 TRANS(FMAXNM_s, do_fp3_scalar, a, &f_scalar_fmaxnm) 4985 4986 static const FPScalar f_scalar_fminnm = { 4987 gen_helper_advsimd_minnumh, 4988 gen_helper_vfp_minnums, 4989 gen_helper_vfp_minnumd, 4990 }; 4991 TRANS(FMINNM_s, do_fp3_scalar, a, &f_scalar_fminnm) 4992 4993 static const FPScalar f_scalar_fmulx = { 4994 gen_helper_advsimd_mulxh, 4995 gen_helper_vfp_mulxs, 4996 gen_helper_vfp_mulxd, 4997 }; 4998 TRANS(FMULX_s, do_fp3_scalar, a, &f_scalar_fmulx) 4999 5000 static void gen_fnmul_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) 5001 { 5002 gen_helper_vfp_mulh(d, n, m, s); 5003 gen_vfp_negh(d, d); 5004 } 5005 5006 static void gen_fnmul_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) 5007 { 5008 gen_helper_vfp_muls(d, n, m, s); 5009 gen_vfp_negs(d, d); 5010 } 5011 5012 static void gen_fnmul_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) 5013 { 5014 gen_helper_vfp_muld(d, n, m, s); 5015 gen_vfp_negd(d, d); 5016 } 5017 5018 static const FPScalar f_scalar_fnmul = { 5019 gen_fnmul_h, 5020 gen_fnmul_s, 5021 gen_fnmul_d, 5022 }; 5023 TRANS(FNMUL_s, do_fp3_scalar, a, &f_scalar_fnmul) 5024 5025 static const FPScalar f_scalar_fcmeq = { 5026 gen_helper_advsimd_ceq_f16, 5027 gen_helper_neon_ceq_f32, 5028 gen_helper_neon_ceq_f64, 5029 }; 5030 TRANS(FCMEQ_s, do_fp3_scalar, a, &f_scalar_fcmeq) 5031 5032 static const FPScalar f_scalar_fcmge = { 5033 gen_helper_advsimd_cge_f16, 5034 gen_helper_neon_cge_f32, 5035 gen_helper_neon_cge_f64, 5036 }; 5037 TRANS(FCMGE_s, do_fp3_scalar, a, &f_scalar_fcmge) 5038 5039 static const FPScalar f_scalar_fcmgt = { 5040 gen_helper_advsimd_cgt_f16, 5041 gen_helper_neon_cgt_f32, 5042 gen_helper_neon_cgt_f64, 5043 }; 5044 TRANS(FCMGT_s, do_fp3_scalar, a, &f_scalar_fcmgt) 5045 5046 static const FPScalar f_scalar_facge = { 5047 gen_helper_advsimd_acge_f16, 5048 gen_helper_neon_acge_f32, 5049 gen_helper_neon_acge_f64, 5050 }; 5051 TRANS(FACGE_s, do_fp3_scalar, a, &f_scalar_facge) 5052 5053 static const FPScalar f_scalar_facgt = { 5054 gen_helper_advsimd_acgt_f16, 5055 gen_helper_neon_acgt_f32, 5056 gen_helper_neon_acgt_f64, 5057 }; 5058 TRANS(FACGT_s, do_fp3_scalar, a, &f_scalar_facgt) 5059 5060 static void gen_fabd_h(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) 5061 { 5062 gen_helper_vfp_subh(d, n, m, s); 5063 gen_vfp_absh(d, d); 5064 } 5065 5066 static void gen_fabd_s(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_ptr s) 5067 { 5068 gen_helper_vfp_subs(d, n, m, s); 5069 gen_vfp_abss(d, d); 5070 } 5071 5072 static void gen_fabd_d(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_ptr s) 5073 { 5074 gen_helper_vfp_subd(d, n, m, s); 5075 gen_vfp_absd(d, d); 5076 } 5077 5078 static const FPScalar f_scalar_fabd = { 5079 gen_fabd_h, 5080 gen_fabd_s, 5081 gen_fabd_d, 5082 }; 5083 TRANS(FABD_s, do_fp3_scalar, a, &f_scalar_fabd) 5084 5085 static const FPScalar f_scalar_frecps = { 5086 gen_helper_recpsf_f16, 5087 gen_helper_recpsf_f32, 5088 gen_helper_recpsf_f64, 5089 }; 5090 TRANS(FRECPS_s, do_fp3_scalar, a, &f_scalar_frecps) 5091 5092 static const FPScalar f_scalar_frsqrts = { 5093 gen_helper_rsqrtsf_f16, 5094 gen_helper_rsqrtsf_f32, 5095 gen_helper_rsqrtsf_f64, 5096 }; 5097 TRANS(FRSQRTS_s, do_fp3_scalar, a, &f_scalar_frsqrts) 5098 5099 static bool do_satacc_s(DisasContext *s, arg_rrr_e *a, 5100 MemOp sgn_n, MemOp sgn_m, 5101 void (*gen_bhs)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, MemOp), 5102 void (*gen_d)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) 5103 { 5104 TCGv_i64 t0, t1, t2, qc; 5105 MemOp esz = a->esz; 5106 5107 if (!fp_access_check(s)) { 5108 return true; 5109 } 5110 5111 t0 = tcg_temp_new_i64(); 5112 t1 = tcg_temp_new_i64(); 5113 t2 = tcg_temp_new_i64(); 5114 qc = tcg_temp_new_i64(); 5115 read_vec_element(s, t1, a->rn, 0, esz | sgn_n); 5116 read_vec_element(s, t2, a->rm, 0, esz | sgn_m); 5117 tcg_gen_ld_i64(qc, tcg_env, offsetof(CPUARMState, vfp.qc)); 5118 5119 if (esz == MO_64) { 5120 gen_d(t0, qc, t1, t2); 5121 } else { 5122 gen_bhs(t0, qc, t1, t2, esz); 5123 tcg_gen_ext_i64(t0, t0, esz); 5124 } 5125 5126 write_fp_dreg(s, a->rd, t0); 5127 tcg_gen_st_i64(qc, tcg_env, offsetof(CPUARMState, vfp.qc)); 5128 return true; 5129 } 5130 5131 TRANS(SQADD_s, do_satacc_s, a, MO_SIGN, MO_SIGN, gen_sqadd_bhs, gen_sqadd_d) 5132 TRANS(SQSUB_s, do_satacc_s, a, MO_SIGN, MO_SIGN, gen_sqsub_bhs, gen_sqsub_d) 5133 TRANS(UQADD_s, do_satacc_s, a, 0, 0, gen_uqadd_bhs, gen_uqadd_d) 5134 TRANS(UQSUB_s, do_satacc_s, a, 0, 0, gen_uqsub_bhs, gen_uqsub_d) 5135 TRANS(SUQADD_s, do_satacc_s, a, MO_SIGN, 0, gen_suqadd_bhs, gen_suqadd_d) 5136 TRANS(USQADD_s, do_satacc_s, a, 0, MO_SIGN, gen_usqadd_bhs, gen_usqadd_d) 5137 5138 static bool do_int3_scalar_d(DisasContext *s, arg_rrr_e *a, 5139 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64)) 5140 { 5141 if (fp_access_check(s)) { 5142 TCGv_i64 t0 = tcg_temp_new_i64(); 5143 TCGv_i64 t1 = tcg_temp_new_i64(); 5144 5145 read_vec_element(s, t0, a->rn, 0, MO_64); 5146 read_vec_element(s, t1, a->rm, 0, MO_64); 5147 fn(t0, t0, t1); 5148 write_fp_dreg(s, a->rd, t0); 5149 } 5150 return true; 5151 } 5152 5153 TRANS(SSHL_s, do_int3_scalar_d, a, gen_sshl_i64) 5154 TRANS(USHL_s, do_int3_scalar_d, a, gen_ushl_i64) 5155 TRANS(SRSHL_s, do_int3_scalar_d, a, gen_helper_neon_rshl_s64) 5156 TRANS(URSHL_s, do_int3_scalar_d, a, gen_helper_neon_rshl_u64) 5157 TRANS(ADD_s, do_int3_scalar_d, a, tcg_gen_add_i64) 5158 TRANS(SUB_s, do_int3_scalar_d, a, tcg_gen_sub_i64) 5159 5160 typedef struct ENVScalar2 { 5161 NeonGenTwoOpEnvFn *gen_bhs[3]; 5162 NeonGenTwo64OpEnvFn *gen_d; 5163 } ENVScalar2; 5164 5165 static bool do_env_scalar2(DisasContext *s, arg_rrr_e *a, const ENVScalar2 *f) 5166 { 5167 if (!fp_access_check(s)) { 5168 return true; 5169 } 5170 if (a->esz == MO_64) { 5171 TCGv_i64 t0 = read_fp_dreg(s, a->rn); 5172 TCGv_i64 t1 = read_fp_dreg(s, a->rm); 5173 f->gen_d(t0, tcg_env, t0, t1); 5174 write_fp_dreg(s, a->rd, t0); 5175 } else { 5176 TCGv_i32 t0 = tcg_temp_new_i32(); 5177 TCGv_i32 t1 = tcg_temp_new_i32(); 5178 5179 read_vec_element_i32(s, t0, a->rn, 0, a->esz); 5180 read_vec_element_i32(s, t1, a->rm, 0, a->esz); 5181 f->gen_bhs[a->esz](t0, tcg_env, t0, t1); 5182 write_fp_sreg(s, a->rd, t0); 5183 } 5184 return true; 5185 } 5186 5187 static const ENVScalar2 f_scalar_sqshl = { 5188 { gen_helper_neon_qshl_s8, 5189 gen_helper_neon_qshl_s16, 5190 gen_helper_neon_qshl_s32 }, 5191 gen_helper_neon_qshl_s64, 5192 }; 5193 TRANS(SQSHL_s, do_env_scalar2, a, &f_scalar_sqshl) 5194 5195 static const ENVScalar2 f_scalar_uqshl = { 5196 { gen_helper_neon_qshl_u8, 5197 gen_helper_neon_qshl_u16, 5198 gen_helper_neon_qshl_u32 }, 5199 gen_helper_neon_qshl_u64, 5200 }; 5201 TRANS(UQSHL_s, do_env_scalar2, a, &f_scalar_uqshl) 5202 5203 static const ENVScalar2 f_scalar_sqrshl = { 5204 { gen_helper_neon_qrshl_s8, 5205 gen_helper_neon_qrshl_s16, 5206 gen_helper_neon_qrshl_s32 }, 5207 gen_helper_neon_qrshl_s64, 5208 }; 5209 TRANS(SQRSHL_s, do_env_scalar2, a, &f_scalar_sqrshl) 5210 5211 static const ENVScalar2 f_scalar_uqrshl = { 5212 { gen_helper_neon_qrshl_u8, 5213 gen_helper_neon_qrshl_u16, 5214 gen_helper_neon_qrshl_u32 }, 5215 gen_helper_neon_qrshl_u64, 5216 }; 5217 TRANS(UQRSHL_s, do_env_scalar2, a, &f_scalar_uqrshl) 5218 5219 static bool do_env_scalar2_hs(DisasContext *s, arg_rrr_e *a, 5220 const ENVScalar2 *f) 5221 { 5222 if (a->esz == MO_16 || a->esz == MO_32) { 5223 return do_env_scalar2(s, a, f); 5224 } 5225 return false; 5226 } 5227 5228 static const ENVScalar2 f_scalar_sqdmulh = { 5229 { NULL, gen_helper_neon_qdmulh_s16, gen_helper_neon_qdmulh_s32 } 5230 }; 5231 TRANS(SQDMULH_s, do_env_scalar2_hs, a, &f_scalar_sqdmulh) 5232 5233 static const ENVScalar2 f_scalar_sqrdmulh = { 5234 { NULL, gen_helper_neon_qrdmulh_s16, gen_helper_neon_qrdmulh_s32 } 5235 }; 5236 TRANS(SQRDMULH_s, do_env_scalar2_hs, a, &f_scalar_sqrdmulh) 5237 5238 typedef struct ENVScalar3 { 5239 NeonGenThreeOpEnvFn *gen_hs[2]; 5240 } ENVScalar3; 5241 5242 static bool do_env_scalar3_hs(DisasContext *s, arg_rrr_e *a, 5243 const ENVScalar3 *f) 5244 { 5245 TCGv_i32 t0, t1, t2; 5246 5247 if (a->esz != MO_16 && a->esz != MO_32) { 5248 return false; 5249 } 5250 if (!fp_access_check(s)) { 5251 return true; 5252 } 5253 5254 t0 = tcg_temp_new_i32(); 5255 t1 = tcg_temp_new_i32(); 5256 t2 = tcg_temp_new_i32(); 5257 read_vec_element_i32(s, t0, a->rn, 0, a->esz); 5258 read_vec_element_i32(s, t1, a->rm, 0, a->esz); 5259 read_vec_element_i32(s, t2, a->rd, 0, a->esz); 5260 f->gen_hs[a->esz - 1](t0, tcg_env, t0, t1, t2); 5261 write_fp_sreg(s, a->rd, t0); 5262 return true; 5263 } 5264 5265 static const ENVScalar3 f_scalar_sqrdmlah = { 5266 { gen_helper_neon_qrdmlah_s16, gen_helper_neon_qrdmlah_s32 } 5267 }; 5268 TRANS_FEAT(SQRDMLAH_s, aa64_rdm, do_env_scalar3_hs, a, &f_scalar_sqrdmlah) 5269 5270 static const ENVScalar3 f_scalar_sqrdmlsh = { 5271 { gen_helper_neon_qrdmlsh_s16, gen_helper_neon_qrdmlsh_s32 } 5272 }; 5273 TRANS_FEAT(SQRDMLSH_s, aa64_rdm, do_env_scalar3_hs, a, &f_scalar_sqrdmlsh) 5274 5275 static bool do_cmop_d(DisasContext *s, arg_rrr_e *a, TCGCond cond) 5276 { 5277 if (fp_access_check(s)) { 5278 TCGv_i64 t0 = read_fp_dreg(s, a->rn); 5279 TCGv_i64 t1 = read_fp_dreg(s, a->rm); 5280 tcg_gen_negsetcond_i64(cond, t0, t0, t1); 5281 write_fp_dreg(s, a->rd, t0); 5282 } 5283 return true; 5284 } 5285 5286 TRANS(CMGT_s, do_cmop_d, a, TCG_COND_GT) 5287 TRANS(CMHI_s, do_cmop_d, a, TCG_COND_GTU) 5288 TRANS(CMGE_s, do_cmop_d, a, TCG_COND_GE) 5289 TRANS(CMHS_s, do_cmop_d, a, TCG_COND_GEU) 5290 TRANS(CMEQ_s, do_cmop_d, a, TCG_COND_EQ) 5291 TRANS(CMTST_s, do_cmop_d, a, TCG_COND_TSTNE) 5292 5293 static bool do_fp3_vector(DisasContext *s, arg_qrrr_e *a, 5294 gen_helper_gvec_3_ptr * const fns[3]) 5295 { 5296 MemOp esz = a->esz; 5297 5298 switch (esz) { 5299 case MO_64: 5300 if (!a->q) { 5301 return false; 5302 } 5303 break; 5304 case MO_32: 5305 break; 5306 case MO_16: 5307 if (!dc_isar_feature(aa64_fp16, s)) { 5308 return false; 5309 } 5310 break; 5311 default: 5312 return false; 5313 } 5314 if (fp_access_check(s)) { 5315 gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, 5316 esz == MO_16, 0, fns[esz - 1]); 5317 } 5318 return true; 5319 } 5320 5321 static gen_helper_gvec_3_ptr * const f_vector_fadd[3] = { 5322 gen_helper_gvec_fadd_h, 5323 gen_helper_gvec_fadd_s, 5324 gen_helper_gvec_fadd_d, 5325 }; 5326 TRANS(FADD_v, do_fp3_vector, a, f_vector_fadd) 5327 5328 static gen_helper_gvec_3_ptr * const f_vector_fsub[3] = { 5329 gen_helper_gvec_fsub_h, 5330 gen_helper_gvec_fsub_s, 5331 gen_helper_gvec_fsub_d, 5332 }; 5333 TRANS(FSUB_v, do_fp3_vector, a, f_vector_fsub) 5334 5335 static gen_helper_gvec_3_ptr * const f_vector_fdiv[3] = { 5336 gen_helper_gvec_fdiv_h, 5337 gen_helper_gvec_fdiv_s, 5338 gen_helper_gvec_fdiv_d, 5339 }; 5340 TRANS(FDIV_v, do_fp3_vector, a, f_vector_fdiv) 5341 5342 static gen_helper_gvec_3_ptr * const f_vector_fmul[3] = { 5343 gen_helper_gvec_fmul_h, 5344 gen_helper_gvec_fmul_s, 5345 gen_helper_gvec_fmul_d, 5346 }; 5347 TRANS(FMUL_v, do_fp3_vector, a, f_vector_fmul) 5348 5349 static gen_helper_gvec_3_ptr * const f_vector_fmax[3] = { 5350 gen_helper_gvec_fmax_h, 5351 gen_helper_gvec_fmax_s, 5352 gen_helper_gvec_fmax_d, 5353 }; 5354 TRANS(FMAX_v, do_fp3_vector, a, f_vector_fmax) 5355 5356 static gen_helper_gvec_3_ptr * const f_vector_fmin[3] = { 5357 gen_helper_gvec_fmin_h, 5358 gen_helper_gvec_fmin_s, 5359 gen_helper_gvec_fmin_d, 5360 }; 5361 TRANS(FMIN_v, do_fp3_vector, a, f_vector_fmin) 5362 5363 static gen_helper_gvec_3_ptr * const f_vector_fmaxnm[3] = { 5364 gen_helper_gvec_fmaxnum_h, 5365 gen_helper_gvec_fmaxnum_s, 5366 gen_helper_gvec_fmaxnum_d, 5367 }; 5368 TRANS(FMAXNM_v, do_fp3_vector, a, f_vector_fmaxnm) 5369 5370 static gen_helper_gvec_3_ptr * const f_vector_fminnm[3] = { 5371 gen_helper_gvec_fminnum_h, 5372 gen_helper_gvec_fminnum_s, 5373 gen_helper_gvec_fminnum_d, 5374 }; 5375 TRANS(FMINNM_v, do_fp3_vector, a, f_vector_fminnm) 5376 5377 static gen_helper_gvec_3_ptr * const f_vector_fmulx[3] = { 5378 gen_helper_gvec_fmulx_h, 5379 gen_helper_gvec_fmulx_s, 5380 gen_helper_gvec_fmulx_d, 5381 }; 5382 TRANS(FMULX_v, do_fp3_vector, a, f_vector_fmulx) 5383 5384 static gen_helper_gvec_3_ptr * const f_vector_fmla[3] = { 5385 gen_helper_gvec_vfma_h, 5386 gen_helper_gvec_vfma_s, 5387 gen_helper_gvec_vfma_d, 5388 }; 5389 TRANS(FMLA_v, do_fp3_vector, a, f_vector_fmla) 5390 5391 static gen_helper_gvec_3_ptr * const f_vector_fmls[3] = { 5392 gen_helper_gvec_vfms_h, 5393 gen_helper_gvec_vfms_s, 5394 gen_helper_gvec_vfms_d, 5395 }; 5396 TRANS(FMLS_v, do_fp3_vector, a, f_vector_fmls) 5397 5398 static gen_helper_gvec_3_ptr * const f_vector_fcmeq[3] = { 5399 gen_helper_gvec_fceq_h, 5400 gen_helper_gvec_fceq_s, 5401 gen_helper_gvec_fceq_d, 5402 }; 5403 TRANS(FCMEQ_v, do_fp3_vector, a, f_vector_fcmeq) 5404 5405 static gen_helper_gvec_3_ptr * const f_vector_fcmge[3] = { 5406 gen_helper_gvec_fcge_h, 5407 gen_helper_gvec_fcge_s, 5408 gen_helper_gvec_fcge_d, 5409 }; 5410 TRANS(FCMGE_v, do_fp3_vector, a, f_vector_fcmge) 5411 5412 static gen_helper_gvec_3_ptr * const f_vector_fcmgt[3] = { 5413 gen_helper_gvec_fcgt_h, 5414 gen_helper_gvec_fcgt_s, 5415 gen_helper_gvec_fcgt_d, 5416 }; 5417 TRANS(FCMGT_v, do_fp3_vector, a, f_vector_fcmgt) 5418 5419 static gen_helper_gvec_3_ptr * const f_vector_facge[3] = { 5420 gen_helper_gvec_facge_h, 5421 gen_helper_gvec_facge_s, 5422 gen_helper_gvec_facge_d, 5423 }; 5424 TRANS(FACGE_v, do_fp3_vector, a, f_vector_facge) 5425 5426 static gen_helper_gvec_3_ptr * const f_vector_facgt[3] = { 5427 gen_helper_gvec_facgt_h, 5428 gen_helper_gvec_facgt_s, 5429 gen_helper_gvec_facgt_d, 5430 }; 5431 TRANS(FACGT_v, do_fp3_vector, a, f_vector_facgt) 5432 5433 static gen_helper_gvec_3_ptr * const f_vector_fabd[3] = { 5434 gen_helper_gvec_fabd_h, 5435 gen_helper_gvec_fabd_s, 5436 gen_helper_gvec_fabd_d, 5437 }; 5438 TRANS(FABD_v, do_fp3_vector, a, f_vector_fabd) 5439 5440 static gen_helper_gvec_3_ptr * const f_vector_frecps[3] = { 5441 gen_helper_gvec_recps_h, 5442 gen_helper_gvec_recps_s, 5443 gen_helper_gvec_recps_d, 5444 }; 5445 TRANS(FRECPS_v, do_fp3_vector, a, f_vector_frecps) 5446 5447 static gen_helper_gvec_3_ptr * const f_vector_frsqrts[3] = { 5448 gen_helper_gvec_rsqrts_h, 5449 gen_helper_gvec_rsqrts_s, 5450 gen_helper_gvec_rsqrts_d, 5451 }; 5452 TRANS(FRSQRTS_v, do_fp3_vector, a, f_vector_frsqrts) 5453 5454 static gen_helper_gvec_3_ptr * const f_vector_faddp[3] = { 5455 gen_helper_gvec_faddp_h, 5456 gen_helper_gvec_faddp_s, 5457 gen_helper_gvec_faddp_d, 5458 }; 5459 TRANS(FADDP_v, do_fp3_vector, a, f_vector_faddp) 5460 5461 static gen_helper_gvec_3_ptr * const f_vector_fmaxp[3] = { 5462 gen_helper_gvec_fmaxp_h, 5463 gen_helper_gvec_fmaxp_s, 5464 gen_helper_gvec_fmaxp_d, 5465 }; 5466 TRANS(FMAXP_v, do_fp3_vector, a, f_vector_fmaxp) 5467 5468 static gen_helper_gvec_3_ptr * const f_vector_fminp[3] = { 5469 gen_helper_gvec_fminp_h, 5470 gen_helper_gvec_fminp_s, 5471 gen_helper_gvec_fminp_d, 5472 }; 5473 TRANS(FMINP_v, do_fp3_vector, a, f_vector_fminp) 5474 5475 static gen_helper_gvec_3_ptr * const f_vector_fmaxnmp[3] = { 5476 gen_helper_gvec_fmaxnump_h, 5477 gen_helper_gvec_fmaxnump_s, 5478 gen_helper_gvec_fmaxnump_d, 5479 }; 5480 TRANS(FMAXNMP_v, do_fp3_vector, a, f_vector_fmaxnmp) 5481 5482 static gen_helper_gvec_3_ptr * const f_vector_fminnmp[3] = { 5483 gen_helper_gvec_fminnump_h, 5484 gen_helper_gvec_fminnump_s, 5485 gen_helper_gvec_fminnump_d, 5486 }; 5487 TRANS(FMINNMP_v, do_fp3_vector, a, f_vector_fminnmp) 5488 5489 static bool do_fmlal(DisasContext *s, arg_qrrr_e *a, bool is_s, bool is_2) 5490 { 5491 if (fp_access_check(s)) { 5492 int data = (is_2 << 1) | is_s; 5493 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), 5494 vec_full_reg_offset(s, a->rn), 5495 vec_full_reg_offset(s, a->rm), tcg_env, 5496 a->q ? 16 : 8, vec_full_reg_size(s), 5497 data, gen_helper_gvec_fmlal_a64); 5498 } 5499 return true; 5500 } 5501 5502 TRANS_FEAT(FMLAL_v, aa64_fhm, do_fmlal, a, false, false) 5503 TRANS_FEAT(FMLSL_v, aa64_fhm, do_fmlal, a, true, false) 5504 TRANS_FEAT(FMLAL2_v, aa64_fhm, do_fmlal, a, false, true) 5505 TRANS_FEAT(FMLSL2_v, aa64_fhm, do_fmlal, a, true, true) 5506 5507 TRANS(ADDP_v, do_gvec_fn3, a, gen_gvec_addp) 5508 TRANS(SMAXP_v, do_gvec_fn3_no64, a, gen_gvec_smaxp) 5509 TRANS(SMINP_v, do_gvec_fn3_no64, a, gen_gvec_sminp) 5510 TRANS(UMAXP_v, do_gvec_fn3_no64, a, gen_gvec_umaxp) 5511 TRANS(UMINP_v, do_gvec_fn3_no64, a, gen_gvec_uminp) 5512 5513 TRANS(AND_v, do_gvec_fn3, a, tcg_gen_gvec_and) 5514 TRANS(BIC_v, do_gvec_fn3, a, tcg_gen_gvec_andc) 5515 TRANS(ORR_v, do_gvec_fn3, a, tcg_gen_gvec_or) 5516 TRANS(ORN_v, do_gvec_fn3, a, tcg_gen_gvec_orc) 5517 TRANS(EOR_v, do_gvec_fn3, a, tcg_gen_gvec_xor) 5518 5519 static bool do_bitsel(DisasContext *s, bool is_q, int d, int a, int b, int c) 5520 { 5521 if (fp_access_check(s)) { 5522 gen_gvec_fn4(s, is_q, d, a, b, c, tcg_gen_gvec_bitsel, 0); 5523 } 5524 return true; 5525 } 5526 5527 TRANS(BSL_v, do_bitsel, a->q, a->rd, a->rd, a->rn, a->rm) 5528 TRANS(BIT_v, do_bitsel, a->q, a->rd, a->rm, a->rn, a->rd) 5529 TRANS(BIF_v, do_bitsel, a->q, a->rd, a->rm, a->rd, a->rn) 5530 5531 TRANS(SQADD_v, do_gvec_fn3, a, gen_gvec_sqadd_qc) 5532 TRANS(UQADD_v, do_gvec_fn3, a, gen_gvec_uqadd_qc) 5533 TRANS(SQSUB_v, do_gvec_fn3, a, gen_gvec_sqsub_qc) 5534 TRANS(UQSUB_v, do_gvec_fn3, a, gen_gvec_uqsub_qc) 5535 TRANS(SUQADD_v, do_gvec_fn3, a, gen_gvec_suqadd_qc) 5536 TRANS(USQADD_v, do_gvec_fn3, a, gen_gvec_usqadd_qc) 5537 5538 TRANS(SSHL_v, do_gvec_fn3, a, gen_gvec_sshl) 5539 TRANS(USHL_v, do_gvec_fn3, a, gen_gvec_ushl) 5540 TRANS(SRSHL_v, do_gvec_fn3, a, gen_gvec_srshl) 5541 TRANS(URSHL_v, do_gvec_fn3, a, gen_gvec_urshl) 5542 TRANS(SQSHL_v, do_gvec_fn3, a, gen_neon_sqshl) 5543 TRANS(UQSHL_v, do_gvec_fn3, a, gen_neon_uqshl) 5544 TRANS(SQRSHL_v, do_gvec_fn3, a, gen_neon_sqrshl) 5545 TRANS(UQRSHL_v, do_gvec_fn3, a, gen_neon_uqrshl) 5546 5547 TRANS(ADD_v, do_gvec_fn3, a, tcg_gen_gvec_add) 5548 TRANS(SUB_v, do_gvec_fn3, a, tcg_gen_gvec_sub) 5549 TRANS(SHADD_v, do_gvec_fn3_no64, a, gen_gvec_shadd) 5550 TRANS(UHADD_v, do_gvec_fn3_no64, a, gen_gvec_uhadd) 5551 TRANS(SHSUB_v, do_gvec_fn3_no64, a, gen_gvec_shsub) 5552 TRANS(UHSUB_v, do_gvec_fn3_no64, a, gen_gvec_uhsub) 5553 TRANS(SRHADD_v, do_gvec_fn3_no64, a, gen_gvec_srhadd) 5554 TRANS(URHADD_v, do_gvec_fn3_no64, a, gen_gvec_urhadd) 5555 TRANS(SMAX_v, do_gvec_fn3_no64, a, tcg_gen_gvec_smax) 5556 TRANS(UMAX_v, do_gvec_fn3_no64, a, tcg_gen_gvec_umax) 5557 TRANS(SMIN_v, do_gvec_fn3_no64, a, tcg_gen_gvec_smin) 5558 TRANS(UMIN_v, do_gvec_fn3_no64, a, tcg_gen_gvec_umin) 5559 TRANS(SABA_v, do_gvec_fn3_no64, a, gen_gvec_saba) 5560 TRANS(UABA_v, do_gvec_fn3_no64, a, gen_gvec_uaba) 5561 TRANS(SABD_v, do_gvec_fn3_no64, a, gen_gvec_sabd) 5562 TRANS(UABD_v, do_gvec_fn3_no64, a, gen_gvec_uabd) 5563 TRANS(MUL_v, do_gvec_fn3_no64, a, tcg_gen_gvec_mul) 5564 TRANS(PMUL_v, do_gvec_op3_ool, a, 0, gen_helper_gvec_pmul_b) 5565 TRANS(MLA_v, do_gvec_fn3_no64, a, gen_gvec_mla) 5566 TRANS(MLS_v, do_gvec_fn3_no64, a, gen_gvec_mls) 5567 5568 static bool do_cmop_v(DisasContext *s, arg_qrrr_e *a, TCGCond cond) 5569 { 5570 if (a->esz == MO_64 && !a->q) { 5571 return false; 5572 } 5573 if (fp_access_check(s)) { 5574 tcg_gen_gvec_cmp(cond, a->esz, 5575 vec_full_reg_offset(s, a->rd), 5576 vec_full_reg_offset(s, a->rn), 5577 vec_full_reg_offset(s, a->rm), 5578 a->q ? 16 : 8, vec_full_reg_size(s)); 5579 } 5580 return true; 5581 } 5582 5583 TRANS(CMGT_v, do_cmop_v, a, TCG_COND_GT) 5584 TRANS(CMHI_v, do_cmop_v, a, TCG_COND_GTU) 5585 TRANS(CMGE_v, do_cmop_v, a, TCG_COND_GE) 5586 TRANS(CMHS_v, do_cmop_v, a, TCG_COND_GEU) 5587 TRANS(CMEQ_v, do_cmop_v, a, TCG_COND_EQ) 5588 TRANS(CMTST_v, do_gvec_fn3, a, gen_gvec_cmtst) 5589 5590 TRANS(SQDMULH_v, do_gvec_fn3_no8_no64, a, gen_gvec_sqdmulh_qc) 5591 TRANS(SQRDMULH_v, do_gvec_fn3_no8_no64, a, gen_gvec_sqrdmulh_qc) 5592 TRANS_FEAT(SQRDMLAH_v, aa64_rdm, do_gvec_fn3_no8_no64, a, gen_gvec_sqrdmlah_qc) 5593 TRANS_FEAT(SQRDMLSH_v, aa64_rdm, do_gvec_fn3_no8_no64, a, gen_gvec_sqrdmlsh_qc) 5594 5595 static bool do_dot_vector(DisasContext *s, arg_qrrr_e *a, 5596 gen_helper_gvec_4 *fn) 5597 { 5598 if (fp_access_check(s)) { 5599 gen_gvec_op4_ool(s, a->q, a->rd, a->rn, a->rm, a->rd, 0, fn); 5600 } 5601 return true; 5602 } 5603 5604 TRANS_FEAT(SDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_sdot_b) 5605 TRANS_FEAT(UDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_udot_b) 5606 TRANS_FEAT(USDOT_v, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_usdot_b) 5607 5608 /* 5609 * Advanced SIMD scalar/vector x indexed element 5610 */ 5611 5612 static bool do_fp3_scalar_idx(DisasContext *s, arg_rrx_e *a, const FPScalar *f) 5613 { 5614 switch (a->esz) { 5615 case MO_64: 5616 if (fp_access_check(s)) { 5617 TCGv_i64 t0 = read_fp_dreg(s, a->rn); 5618 TCGv_i64 t1 = tcg_temp_new_i64(); 5619 5620 read_vec_element(s, t1, a->rm, a->idx, MO_64); 5621 f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); 5622 write_fp_dreg(s, a->rd, t0); 5623 } 5624 break; 5625 case MO_32: 5626 if (fp_access_check(s)) { 5627 TCGv_i32 t0 = read_fp_sreg(s, a->rn); 5628 TCGv_i32 t1 = tcg_temp_new_i32(); 5629 5630 read_vec_element_i32(s, t1, a->rm, a->idx, MO_32); 5631 f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); 5632 write_fp_sreg(s, a->rd, t0); 5633 } 5634 break; 5635 case MO_16: 5636 if (!dc_isar_feature(aa64_fp16, s)) { 5637 return false; 5638 } 5639 if (fp_access_check(s)) { 5640 TCGv_i32 t0 = read_fp_hreg(s, a->rn); 5641 TCGv_i32 t1 = tcg_temp_new_i32(); 5642 5643 read_vec_element_i32(s, t1, a->rm, a->idx, MO_16); 5644 f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16)); 5645 write_fp_sreg(s, a->rd, t0); 5646 } 5647 break; 5648 default: 5649 g_assert_not_reached(); 5650 } 5651 return true; 5652 } 5653 5654 TRANS(FMUL_si, do_fp3_scalar_idx, a, &f_scalar_fmul) 5655 TRANS(FMULX_si, do_fp3_scalar_idx, a, &f_scalar_fmulx) 5656 5657 static bool do_fmla_scalar_idx(DisasContext *s, arg_rrx_e *a, bool neg) 5658 { 5659 switch (a->esz) { 5660 case MO_64: 5661 if (fp_access_check(s)) { 5662 TCGv_i64 t0 = read_fp_dreg(s, a->rd); 5663 TCGv_i64 t1 = read_fp_dreg(s, a->rn); 5664 TCGv_i64 t2 = tcg_temp_new_i64(); 5665 5666 read_vec_element(s, t2, a->rm, a->idx, MO_64); 5667 if (neg) { 5668 gen_vfp_negd(t1, t1); 5669 } 5670 gen_helper_vfp_muladdd(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR)); 5671 write_fp_dreg(s, a->rd, t0); 5672 } 5673 break; 5674 case MO_32: 5675 if (fp_access_check(s)) { 5676 TCGv_i32 t0 = read_fp_sreg(s, a->rd); 5677 TCGv_i32 t1 = read_fp_sreg(s, a->rn); 5678 TCGv_i32 t2 = tcg_temp_new_i32(); 5679 5680 read_vec_element_i32(s, t2, a->rm, a->idx, MO_32); 5681 if (neg) { 5682 gen_vfp_negs(t1, t1); 5683 } 5684 gen_helper_vfp_muladds(t0, t1, t2, t0, fpstatus_ptr(FPST_FPCR)); 5685 write_fp_sreg(s, a->rd, t0); 5686 } 5687 break; 5688 case MO_16: 5689 if (!dc_isar_feature(aa64_fp16, s)) { 5690 return false; 5691 } 5692 if (fp_access_check(s)) { 5693 TCGv_i32 t0 = read_fp_hreg(s, a->rd); 5694 TCGv_i32 t1 = read_fp_hreg(s, a->rn); 5695 TCGv_i32 t2 = tcg_temp_new_i32(); 5696 5697 read_vec_element_i32(s, t2, a->rm, a->idx, MO_16); 5698 if (neg) { 5699 gen_vfp_negh(t1, t1); 5700 } 5701 gen_helper_advsimd_muladdh(t0, t1, t2, t0, 5702 fpstatus_ptr(FPST_FPCR_F16)); 5703 write_fp_sreg(s, a->rd, t0); 5704 } 5705 break; 5706 default: 5707 g_assert_not_reached(); 5708 } 5709 return true; 5710 } 5711 5712 TRANS(FMLA_si, do_fmla_scalar_idx, a, false) 5713 TRANS(FMLS_si, do_fmla_scalar_idx, a, true) 5714 5715 static bool do_env_scalar2_idx_hs(DisasContext *s, arg_rrx_e *a, 5716 const ENVScalar2 *f) 5717 { 5718 if (a->esz < MO_16 || a->esz > MO_32) { 5719 return false; 5720 } 5721 if (fp_access_check(s)) { 5722 TCGv_i32 t0 = tcg_temp_new_i32(); 5723 TCGv_i32 t1 = tcg_temp_new_i32(); 5724 5725 read_vec_element_i32(s, t0, a->rn, 0, a->esz); 5726 read_vec_element_i32(s, t1, a->rm, a->idx, a->esz); 5727 f->gen_bhs[a->esz](t0, tcg_env, t0, t1); 5728 write_fp_sreg(s, a->rd, t0); 5729 } 5730 return true; 5731 } 5732 5733 TRANS(SQDMULH_si, do_env_scalar2_idx_hs, a, &f_scalar_sqdmulh) 5734 TRANS(SQRDMULH_si, do_env_scalar2_idx_hs, a, &f_scalar_sqrdmulh) 5735 5736 static bool do_env_scalar3_idx_hs(DisasContext *s, arg_rrx_e *a, 5737 const ENVScalar3 *f) 5738 { 5739 if (a->esz < MO_16 || a->esz > MO_32) { 5740 return false; 5741 } 5742 if (fp_access_check(s)) { 5743 TCGv_i32 t0 = tcg_temp_new_i32(); 5744 TCGv_i32 t1 = tcg_temp_new_i32(); 5745 TCGv_i32 t2 = tcg_temp_new_i32(); 5746 5747 read_vec_element_i32(s, t0, a->rn, 0, a->esz); 5748 read_vec_element_i32(s, t1, a->rm, a->idx, a->esz); 5749 read_vec_element_i32(s, t2, a->rd, 0, a->esz); 5750 f->gen_hs[a->esz - 1](t0, tcg_env, t0, t1, t2); 5751 write_fp_sreg(s, a->rd, t0); 5752 } 5753 return true; 5754 } 5755 5756 TRANS_FEAT(SQRDMLAH_si, aa64_rdm, do_env_scalar3_idx_hs, a, &f_scalar_sqrdmlah) 5757 TRANS_FEAT(SQRDMLSH_si, aa64_rdm, do_env_scalar3_idx_hs, a, &f_scalar_sqrdmlsh) 5758 5759 static bool do_fp3_vector_idx(DisasContext *s, arg_qrrx_e *a, 5760 gen_helper_gvec_3_ptr * const fns[3]) 5761 { 5762 MemOp esz = a->esz; 5763 5764 switch (esz) { 5765 case MO_64: 5766 if (!a->q) { 5767 return false; 5768 } 5769 break; 5770 case MO_32: 5771 break; 5772 case MO_16: 5773 if (!dc_isar_feature(aa64_fp16, s)) { 5774 return false; 5775 } 5776 break; 5777 default: 5778 g_assert_not_reached(); 5779 } 5780 if (fp_access_check(s)) { 5781 gen_gvec_op3_fpst(s, a->q, a->rd, a->rn, a->rm, 5782 esz == MO_16, a->idx, fns[esz - 1]); 5783 } 5784 return true; 5785 } 5786 5787 static gen_helper_gvec_3_ptr * const f_vector_idx_fmul[3] = { 5788 gen_helper_gvec_fmul_idx_h, 5789 gen_helper_gvec_fmul_idx_s, 5790 gen_helper_gvec_fmul_idx_d, 5791 }; 5792 TRANS(FMUL_vi, do_fp3_vector_idx, a, f_vector_idx_fmul) 5793 5794 static gen_helper_gvec_3_ptr * const f_vector_idx_fmulx[3] = { 5795 gen_helper_gvec_fmulx_idx_h, 5796 gen_helper_gvec_fmulx_idx_s, 5797 gen_helper_gvec_fmulx_idx_d, 5798 }; 5799 TRANS(FMULX_vi, do_fp3_vector_idx, a, f_vector_idx_fmulx) 5800 5801 static bool do_fmla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool neg) 5802 { 5803 static gen_helper_gvec_4_ptr * const fns[3] = { 5804 gen_helper_gvec_fmla_idx_h, 5805 gen_helper_gvec_fmla_idx_s, 5806 gen_helper_gvec_fmla_idx_d, 5807 }; 5808 MemOp esz = a->esz; 5809 5810 switch (esz) { 5811 case MO_64: 5812 if (!a->q) { 5813 return false; 5814 } 5815 break; 5816 case MO_32: 5817 break; 5818 case MO_16: 5819 if (!dc_isar_feature(aa64_fp16, s)) { 5820 return false; 5821 } 5822 break; 5823 default: 5824 g_assert_not_reached(); 5825 } 5826 if (fp_access_check(s)) { 5827 gen_gvec_op4_fpst(s, a->q, a->rd, a->rn, a->rm, a->rd, 5828 esz == MO_16, (a->idx << 1) | neg, 5829 fns[esz - 1]); 5830 } 5831 return true; 5832 } 5833 5834 TRANS(FMLA_vi, do_fmla_vector_idx, a, false) 5835 TRANS(FMLS_vi, do_fmla_vector_idx, a, true) 5836 5837 static bool do_fmlal_idx(DisasContext *s, arg_qrrx_e *a, bool is_s, bool is_2) 5838 { 5839 if (fp_access_check(s)) { 5840 int data = (a->idx << 2) | (is_2 << 1) | is_s; 5841 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd), 5842 vec_full_reg_offset(s, a->rn), 5843 vec_full_reg_offset(s, a->rm), tcg_env, 5844 a->q ? 16 : 8, vec_full_reg_size(s), 5845 data, gen_helper_gvec_fmlal_idx_a64); 5846 } 5847 return true; 5848 } 5849 5850 TRANS_FEAT(FMLAL_vi, aa64_fhm, do_fmlal_idx, a, false, false) 5851 TRANS_FEAT(FMLSL_vi, aa64_fhm, do_fmlal_idx, a, true, false) 5852 TRANS_FEAT(FMLAL2_vi, aa64_fhm, do_fmlal_idx, a, false, true) 5853 TRANS_FEAT(FMLSL2_vi, aa64_fhm, do_fmlal_idx, a, true, true) 5854 5855 static bool do_int3_vector_idx(DisasContext *s, arg_qrrx_e *a, 5856 gen_helper_gvec_3 * const fns[2]) 5857 { 5858 assert(a->esz == MO_16 || a->esz == MO_32); 5859 if (fp_access_check(s)) { 5860 gen_gvec_op3_ool(s, a->q, a->rd, a->rn, a->rm, a->idx, fns[a->esz - 1]); 5861 } 5862 return true; 5863 } 5864 5865 static gen_helper_gvec_3 * const f_vector_idx_mul[2] = { 5866 gen_helper_gvec_mul_idx_h, 5867 gen_helper_gvec_mul_idx_s, 5868 }; 5869 TRANS(MUL_vi, do_int3_vector_idx, a, f_vector_idx_mul) 5870 5871 static bool do_mla_vector_idx(DisasContext *s, arg_qrrx_e *a, bool sub) 5872 { 5873 static gen_helper_gvec_4 * const fns[2][2] = { 5874 { gen_helper_gvec_mla_idx_h, gen_helper_gvec_mls_idx_h }, 5875 { gen_helper_gvec_mla_idx_s, gen_helper_gvec_mls_idx_s }, 5876 }; 5877 5878 assert(a->esz == MO_16 || a->esz == MO_32); 5879 if (fp_access_check(s)) { 5880 gen_gvec_op4_ool(s, a->q, a->rd, a->rn, a->rm, a->rd, 5881 a->idx, fns[a->esz - 1][sub]); 5882 } 5883 return true; 5884 } 5885 5886 TRANS(MLA_vi, do_mla_vector_idx, a, false) 5887 TRANS(MLS_vi, do_mla_vector_idx, a, true) 5888 5889 static bool do_int3_qc_vector_idx(DisasContext *s, arg_qrrx_e *a, 5890 gen_helper_gvec_4 * const fns[2]) 5891 { 5892 assert(a->esz == MO_16 || a->esz == MO_32); 5893 if (fp_access_check(s)) { 5894 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, a->rd), 5895 vec_full_reg_offset(s, a->rn), 5896 vec_full_reg_offset(s, a->rm), 5897 offsetof(CPUARMState, vfp.qc), 5898 a->q ? 16 : 8, vec_full_reg_size(s), 5899 a->idx, fns[a->esz - 1]); 5900 } 5901 return true; 5902 } 5903 5904 static gen_helper_gvec_4 * const f_vector_idx_sqdmulh[2] = { 5905 gen_helper_neon_sqdmulh_idx_h, 5906 gen_helper_neon_sqdmulh_idx_s, 5907 }; 5908 TRANS(SQDMULH_vi, do_int3_qc_vector_idx, a, f_vector_idx_sqdmulh) 5909 5910 static gen_helper_gvec_4 * const f_vector_idx_sqrdmulh[2] = { 5911 gen_helper_neon_sqrdmulh_idx_h, 5912 gen_helper_neon_sqrdmulh_idx_s, 5913 }; 5914 TRANS(SQRDMULH_vi, do_int3_qc_vector_idx, a, f_vector_idx_sqrdmulh) 5915 5916 static gen_helper_gvec_4 * const f_vector_idx_sqrdmlah[2] = { 5917 gen_helper_neon_sqrdmlah_idx_h, 5918 gen_helper_neon_sqrdmlah_idx_s, 5919 }; 5920 TRANS_FEAT(SQRDMLAH_vi, aa64_rdm, do_int3_qc_vector_idx, a, 5921 f_vector_idx_sqrdmlah) 5922 5923 static gen_helper_gvec_4 * const f_vector_idx_sqrdmlsh[2] = { 5924 gen_helper_neon_sqrdmlsh_idx_h, 5925 gen_helper_neon_sqrdmlsh_idx_s, 5926 }; 5927 TRANS_FEAT(SQRDMLSH_vi, aa64_rdm, do_int3_qc_vector_idx, a, 5928 f_vector_idx_sqrdmlsh) 5929 5930 static bool do_dot_vector_idx(DisasContext *s, arg_qrrx_e *a, 5931 gen_helper_gvec_4 *fn) 5932 { 5933 if (fp_access_check(s)) { 5934 gen_gvec_op4_ool(s, a->q, a->rd, a->rn, a->rm, a->rd, a->idx, fn); 5935 } 5936 return true; 5937 } 5938 5939 TRANS_FEAT(SDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_sdot_idx_b) 5940 TRANS_FEAT(UDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_udot_idx_b) 5941 TRANS_FEAT(SUDOT_vi, aa64_i8mm, do_dot_vector_idx, a, 5942 gen_helper_gvec_sudot_idx_b) 5943 TRANS_FEAT(USDOT_vi, aa64_i8mm, do_dot_vector_idx, a, 5944 gen_helper_gvec_usdot_idx_b) 5945 5946 /* 5947 * Advanced SIMD scalar pairwise 5948 */ 5949 5950 static bool do_fp3_scalar_pair(DisasContext *s, arg_rr_e *a, const FPScalar *f) 5951 { 5952 switch (a->esz) { 5953 case MO_64: 5954 if (fp_access_check(s)) { 5955 TCGv_i64 t0 = tcg_temp_new_i64(); 5956 TCGv_i64 t1 = tcg_temp_new_i64(); 5957 5958 read_vec_element(s, t0, a->rn, 0, MO_64); 5959 read_vec_element(s, t1, a->rn, 1, MO_64); 5960 f->gen_d(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); 5961 write_fp_dreg(s, a->rd, t0); 5962 } 5963 break; 5964 case MO_32: 5965 if (fp_access_check(s)) { 5966 TCGv_i32 t0 = tcg_temp_new_i32(); 5967 TCGv_i32 t1 = tcg_temp_new_i32(); 5968 5969 read_vec_element_i32(s, t0, a->rn, 0, MO_32); 5970 read_vec_element_i32(s, t1, a->rn, 1, MO_32); 5971 f->gen_s(t0, t0, t1, fpstatus_ptr(FPST_FPCR)); 5972 write_fp_sreg(s, a->rd, t0); 5973 } 5974 break; 5975 case MO_16: 5976 if (!dc_isar_feature(aa64_fp16, s)) { 5977 return false; 5978 } 5979 if (fp_access_check(s)) { 5980 TCGv_i32 t0 = tcg_temp_new_i32(); 5981 TCGv_i32 t1 = tcg_temp_new_i32(); 5982 5983 read_vec_element_i32(s, t0, a->rn, 0, MO_16); 5984 read_vec_element_i32(s, t1, a->rn, 1, MO_16); 5985 f->gen_h(t0, t0, t1, fpstatus_ptr(FPST_FPCR_F16)); 5986 write_fp_sreg(s, a->rd, t0); 5987 } 5988 break; 5989 default: 5990 g_assert_not_reached(); 5991 } 5992 return true; 5993 } 5994 5995 TRANS(FADDP_s, do_fp3_scalar_pair, a, &f_scalar_fadd) 5996 TRANS(FMAXP_s, do_fp3_scalar_pair, a, &f_scalar_fmax) 5997 TRANS(FMINP_s, do_fp3_scalar_pair, a, &f_scalar_fmin) 5998 TRANS(FMAXNMP_s, do_fp3_scalar_pair, a, &f_scalar_fmaxnm) 5999 TRANS(FMINNMP_s, do_fp3_scalar_pair, a, &f_scalar_fminnm) 6000 6001 static bool trans_ADDP_s(DisasContext *s, arg_rr_e *a) 6002 { 6003 if (fp_access_check(s)) { 6004 TCGv_i64 t0 = tcg_temp_new_i64(); 6005 TCGv_i64 t1 = tcg_temp_new_i64(); 6006 6007 read_vec_element(s, t0, a->rn, 0, MO_64); 6008 read_vec_element(s, t1, a->rn, 1, MO_64); 6009 tcg_gen_add_i64(t0, t0, t1); 6010 write_fp_dreg(s, a->rd, t0); 6011 } 6012 return true; 6013 } 6014 6015 /* 6016 * Floating-point conditional select 6017 */ 6018 6019 static bool trans_FCSEL(DisasContext *s, arg_FCSEL *a) 6020 { 6021 TCGv_i64 t_true, t_false; 6022 DisasCompare64 c; 6023 6024 switch (a->esz) { 6025 case MO_32: 6026 case MO_64: 6027 break; 6028 case MO_16: 6029 if (!dc_isar_feature(aa64_fp16, s)) { 6030 return false; 6031 } 6032 break; 6033 default: 6034 return false; 6035 } 6036 6037 if (!fp_access_check(s)) { 6038 return true; 6039 } 6040 6041 /* Zero extend sreg & hreg inputs to 64 bits now. */ 6042 t_true = tcg_temp_new_i64(); 6043 t_false = tcg_temp_new_i64(); 6044 read_vec_element(s, t_true, a->rn, 0, a->esz); 6045 read_vec_element(s, t_false, a->rm, 0, a->esz); 6046 6047 a64_test_cc(&c, a->cond); 6048 tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0), 6049 t_true, t_false); 6050 6051 /* 6052 * Note that sregs & hregs write back zeros to the high bits, 6053 * and we've already done the zero-extension. 6054 */ 6055 write_fp_dreg(s, a->rd, t_true); 6056 return true; 6057 } 6058 6059 /* 6060 * Floating-point data-processing (3 source) 6061 */ 6062 6063 static bool do_fmadd(DisasContext *s, arg_rrrr_e *a, bool neg_a, bool neg_n) 6064 { 6065 TCGv_ptr fpst; 6066 6067 /* 6068 * These are fused multiply-add. Note that doing the negations here 6069 * as separate steps is correct: an input NaN should come out with 6070 * its sign bit flipped if it is a negated-input. 6071 */ 6072 switch (a->esz) { 6073 case MO_64: 6074 if (fp_access_check(s)) { 6075 TCGv_i64 tn = read_fp_dreg(s, a->rn); 6076 TCGv_i64 tm = read_fp_dreg(s, a->rm); 6077 TCGv_i64 ta = read_fp_dreg(s, a->ra); 6078 6079 if (neg_a) { 6080 gen_vfp_negd(ta, ta); 6081 } 6082 if (neg_n) { 6083 gen_vfp_negd(tn, tn); 6084 } 6085 fpst = fpstatus_ptr(FPST_FPCR); 6086 gen_helper_vfp_muladdd(ta, tn, tm, ta, fpst); 6087 write_fp_dreg(s, a->rd, ta); 6088 } 6089 break; 6090 6091 case MO_32: 6092 if (fp_access_check(s)) { 6093 TCGv_i32 tn = read_fp_sreg(s, a->rn); 6094 TCGv_i32 tm = read_fp_sreg(s, a->rm); 6095 TCGv_i32 ta = read_fp_sreg(s, a->ra); 6096 6097 if (neg_a) { 6098 gen_vfp_negs(ta, ta); 6099 } 6100 if (neg_n) { 6101 gen_vfp_negs(tn, tn); 6102 } 6103 fpst = fpstatus_ptr(FPST_FPCR); 6104 gen_helper_vfp_muladds(ta, tn, tm, ta, fpst); 6105 write_fp_sreg(s, a->rd, ta); 6106 } 6107 break; 6108 6109 case MO_16: 6110 if (!dc_isar_feature(aa64_fp16, s)) { 6111 return false; 6112 } 6113 if (fp_access_check(s)) { 6114 TCGv_i32 tn = read_fp_hreg(s, a->rn); 6115 TCGv_i32 tm = read_fp_hreg(s, a->rm); 6116 TCGv_i32 ta = read_fp_hreg(s, a->ra); 6117 6118 if (neg_a) { 6119 gen_vfp_negh(ta, ta); 6120 } 6121 if (neg_n) { 6122 gen_vfp_negh(tn, tn); 6123 } 6124 fpst = fpstatus_ptr(FPST_FPCR_F16); 6125 gen_helper_advsimd_muladdh(ta, tn, tm, ta, fpst); 6126 write_fp_sreg(s, a->rd, ta); 6127 } 6128 break; 6129 6130 default: 6131 return false; 6132 } 6133 return true; 6134 } 6135 6136 TRANS(FMADD, do_fmadd, a, false, false) 6137 TRANS(FNMADD, do_fmadd, a, true, true) 6138 TRANS(FMSUB, do_fmadd, a, false, true) 6139 TRANS(FNMSUB, do_fmadd, a, true, false) 6140 6141 /* Shift a TCGv src by TCGv shift_amount, put result in dst. 6142 * Note that it is the caller's responsibility to ensure that the 6143 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM 6144 * mandated semantics for out of range shifts. 6145 */ 6146 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf, 6147 enum a64_shift_type shift_type, TCGv_i64 shift_amount) 6148 { 6149 switch (shift_type) { 6150 case A64_SHIFT_TYPE_LSL: 6151 tcg_gen_shl_i64(dst, src, shift_amount); 6152 break; 6153 case A64_SHIFT_TYPE_LSR: 6154 tcg_gen_shr_i64(dst, src, shift_amount); 6155 break; 6156 case A64_SHIFT_TYPE_ASR: 6157 if (!sf) { 6158 tcg_gen_ext32s_i64(dst, src); 6159 } 6160 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount); 6161 break; 6162 case A64_SHIFT_TYPE_ROR: 6163 if (sf) { 6164 tcg_gen_rotr_i64(dst, src, shift_amount); 6165 } else { 6166 TCGv_i32 t0, t1; 6167 t0 = tcg_temp_new_i32(); 6168 t1 = tcg_temp_new_i32(); 6169 tcg_gen_extrl_i64_i32(t0, src); 6170 tcg_gen_extrl_i64_i32(t1, shift_amount); 6171 tcg_gen_rotr_i32(t0, t0, t1); 6172 tcg_gen_extu_i32_i64(dst, t0); 6173 } 6174 break; 6175 default: 6176 assert(FALSE); /* all shift types should be handled */ 6177 break; 6178 } 6179 6180 if (!sf) { /* zero extend final result */ 6181 tcg_gen_ext32u_i64(dst, dst); 6182 } 6183 } 6184 6185 /* Shift a TCGv src by immediate, put result in dst. 6186 * The shift amount must be in range (this should always be true as the 6187 * relevant instructions will UNDEF on bad shift immediates). 6188 */ 6189 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf, 6190 enum a64_shift_type shift_type, unsigned int shift_i) 6191 { 6192 assert(shift_i < (sf ? 64 : 32)); 6193 6194 if (shift_i == 0) { 6195 tcg_gen_mov_i64(dst, src); 6196 } else { 6197 shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i)); 6198 } 6199 } 6200 6201 /* Logical (shifted register) 6202 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 6203 * +----+-----+-----------+-------+---+------+--------+------+------+ 6204 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd | 6205 * +----+-----+-----------+-------+---+------+--------+------+------+ 6206 */ 6207 static void disas_logic_reg(DisasContext *s, uint32_t insn) 6208 { 6209 TCGv_i64 tcg_rd, tcg_rn, tcg_rm; 6210 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd; 6211 6212 sf = extract32(insn, 31, 1); 6213 opc = extract32(insn, 29, 2); 6214 shift_type = extract32(insn, 22, 2); 6215 invert = extract32(insn, 21, 1); 6216 rm = extract32(insn, 16, 5); 6217 shift_amount = extract32(insn, 10, 6); 6218 rn = extract32(insn, 5, 5); 6219 rd = extract32(insn, 0, 5); 6220 6221 if (!sf && (shift_amount & (1 << 5))) { 6222 unallocated_encoding(s); 6223 return; 6224 } 6225 6226 tcg_rd = cpu_reg(s, rd); 6227 6228 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) { 6229 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for 6230 * register-register MOV and MVN, so it is worth special casing. 6231 */ 6232 tcg_rm = cpu_reg(s, rm); 6233 if (invert) { 6234 tcg_gen_not_i64(tcg_rd, tcg_rm); 6235 if (!sf) { 6236 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 6237 } 6238 } else { 6239 if (sf) { 6240 tcg_gen_mov_i64(tcg_rd, tcg_rm); 6241 } else { 6242 tcg_gen_ext32u_i64(tcg_rd, tcg_rm); 6243 } 6244 } 6245 return; 6246 } 6247 6248 tcg_rm = read_cpu_reg(s, rm, sf); 6249 6250 if (shift_amount) { 6251 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount); 6252 } 6253 6254 tcg_rn = cpu_reg(s, rn); 6255 6256 switch (opc | (invert << 2)) { 6257 case 0: /* AND */ 6258 case 3: /* ANDS */ 6259 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm); 6260 break; 6261 case 1: /* ORR */ 6262 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm); 6263 break; 6264 case 2: /* EOR */ 6265 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm); 6266 break; 6267 case 4: /* BIC */ 6268 case 7: /* BICS */ 6269 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm); 6270 break; 6271 case 5: /* ORN */ 6272 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm); 6273 break; 6274 case 6: /* EON */ 6275 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm); 6276 break; 6277 default: 6278 assert(FALSE); 6279 break; 6280 } 6281 6282 if (!sf) { 6283 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 6284 } 6285 6286 if (opc == 3) { 6287 gen_logic_CC(sf, tcg_rd); 6288 } 6289 } 6290 6291 /* 6292 * Add/subtract (extended register) 6293 * 6294 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0| 6295 * +--+--+--+-----------+-----+--+-------+------+------+----+----+ 6296 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd | 6297 * +--+--+--+-----------+-----+--+-------+------+------+----+----+ 6298 * 6299 * sf: 0 -> 32bit, 1 -> 64bit 6300 * op: 0 -> add , 1 -> sub 6301 * S: 1 -> set flags 6302 * opt: 00 6303 * option: extension type (see DecodeRegExtend) 6304 * imm3: optional shift to Rm 6305 * 6306 * Rd = Rn + LSL(extend(Rm), amount) 6307 */ 6308 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) 6309 { 6310 int rd = extract32(insn, 0, 5); 6311 int rn = extract32(insn, 5, 5); 6312 int imm3 = extract32(insn, 10, 3); 6313 int option = extract32(insn, 13, 3); 6314 int rm = extract32(insn, 16, 5); 6315 int opt = extract32(insn, 22, 2); 6316 bool setflags = extract32(insn, 29, 1); 6317 bool sub_op = extract32(insn, 30, 1); 6318 bool sf = extract32(insn, 31, 1); 6319 6320 TCGv_i64 tcg_rm, tcg_rn; /* temps */ 6321 TCGv_i64 tcg_rd; 6322 TCGv_i64 tcg_result; 6323 6324 if (imm3 > 4 || opt != 0) { 6325 unallocated_encoding(s); 6326 return; 6327 } 6328 6329 /* non-flag setting ops may use SP */ 6330 if (!setflags) { 6331 tcg_rd = cpu_reg_sp(s, rd); 6332 } else { 6333 tcg_rd = cpu_reg(s, rd); 6334 } 6335 tcg_rn = read_cpu_reg_sp(s, rn, sf); 6336 6337 tcg_rm = read_cpu_reg(s, rm, sf); 6338 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3); 6339 6340 tcg_result = tcg_temp_new_i64(); 6341 6342 if (!setflags) { 6343 if (sub_op) { 6344 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm); 6345 } else { 6346 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm); 6347 } 6348 } else { 6349 if (sub_op) { 6350 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm); 6351 } else { 6352 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm); 6353 } 6354 } 6355 6356 if (sf) { 6357 tcg_gen_mov_i64(tcg_rd, tcg_result); 6358 } else { 6359 tcg_gen_ext32u_i64(tcg_rd, tcg_result); 6360 } 6361 } 6362 6363 /* 6364 * Add/subtract (shifted register) 6365 * 6366 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 6367 * +--+--+--+-----------+-----+--+-------+---------+------+------+ 6368 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd | 6369 * +--+--+--+-----------+-----+--+-------+---------+------+------+ 6370 * 6371 * sf: 0 -> 32bit, 1 -> 64bit 6372 * op: 0 -> add , 1 -> sub 6373 * S: 1 -> set flags 6374 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED 6375 * imm6: Shift amount to apply to Rm before the add/sub 6376 */ 6377 static void disas_add_sub_reg(DisasContext *s, uint32_t insn) 6378 { 6379 int rd = extract32(insn, 0, 5); 6380 int rn = extract32(insn, 5, 5); 6381 int imm6 = extract32(insn, 10, 6); 6382 int rm = extract32(insn, 16, 5); 6383 int shift_type = extract32(insn, 22, 2); 6384 bool setflags = extract32(insn, 29, 1); 6385 bool sub_op = extract32(insn, 30, 1); 6386 bool sf = extract32(insn, 31, 1); 6387 6388 TCGv_i64 tcg_rd = cpu_reg(s, rd); 6389 TCGv_i64 tcg_rn, tcg_rm; 6390 TCGv_i64 tcg_result; 6391 6392 if ((shift_type == 3) || (!sf && (imm6 > 31))) { 6393 unallocated_encoding(s); 6394 return; 6395 } 6396 6397 tcg_rn = read_cpu_reg(s, rn, sf); 6398 tcg_rm = read_cpu_reg(s, rm, sf); 6399 6400 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6); 6401 6402 tcg_result = tcg_temp_new_i64(); 6403 6404 if (!setflags) { 6405 if (sub_op) { 6406 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm); 6407 } else { 6408 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm); 6409 } 6410 } else { 6411 if (sub_op) { 6412 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm); 6413 } else { 6414 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm); 6415 } 6416 } 6417 6418 if (sf) { 6419 tcg_gen_mov_i64(tcg_rd, tcg_result); 6420 } else { 6421 tcg_gen_ext32u_i64(tcg_rd, tcg_result); 6422 } 6423 } 6424 6425 /* Data-processing (3 source) 6426 * 6427 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0 6428 * +--+------+-----------+------+------+----+------+------+------+ 6429 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd | 6430 * +--+------+-----------+------+------+----+------+------+------+ 6431 */ 6432 static void disas_data_proc_3src(DisasContext *s, uint32_t insn) 6433 { 6434 int rd = extract32(insn, 0, 5); 6435 int rn = extract32(insn, 5, 5); 6436 int ra = extract32(insn, 10, 5); 6437 int rm = extract32(insn, 16, 5); 6438 int op_id = (extract32(insn, 29, 3) << 4) | 6439 (extract32(insn, 21, 3) << 1) | 6440 extract32(insn, 15, 1); 6441 bool sf = extract32(insn, 31, 1); 6442 bool is_sub = extract32(op_id, 0, 1); 6443 bool is_high = extract32(op_id, 2, 1); 6444 bool is_signed = false; 6445 TCGv_i64 tcg_op1; 6446 TCGv_i64 tcg_op2; 6447 TCGv_i64 tcg_tmp; 6448 6449 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */ 6450 switch (op_id) { 6451 case 0x42: /* SMADDL */ 6452 case 0x43: /* SMSUBL */ 6453 case 0x44: /* SMULH */ 6454 is_signed = true; 6455 break; 6456 case 0x0: /* MADD (32bit) */ 6457 case 0x1: /* MSUB (32bit) */ 6458 case 0x40: /* MADD (64bit) */ 6459 case 0x41: /* MSUB (64bit) */ 6460 case 0x4a: /* UMADDL */ 6461 case 0x4b: /* UMSUBL */ 6462 case 0x4c: /* UMULH */ 6463 break; 6464 default: 6465 unallocated_encoding(s); 6466 return; 6467 } 6468 6469 if (is_high) { 6470 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */ 6471 TCGv_i64 tcg_rd = cpu_reg(s, rd); 6472 TCGv_i64 tcg_rn = cpu_reg(s, rn); 6473 TCGv_i64 tcg_rm = cpu_reg(s, rm); 6474 6475 if (is_signed) { 6476 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); 6477 } else { 6478 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); 6479 } 6480 return; 6481 } 6482 6483 tcg_op1 = tcg_temp_new_i64(); 6484 tcg_op2 = tcg_temp_new_i64(); 6485 tcg_tmp = tcg_temp_new_i64(); 6486 6487 if (op_id < 0x42) { 6488 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn)); 6489 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm)); 6490 } else { 6491 if (is_signed) { 6492 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn)); 6493 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm)); 6494 } else { 6495 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn)); 6496 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm)); 6497 } 6498 } 6499 6500 if (ra == 31 && !is_sub) { 6501 /* Special-case MADD with rA == XZR; it is the standard MUL alias */ 6502 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2); 6503 } else { 6504 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2); 6505 if (is_sub) { 6506 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); 6507 } else { 6508 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); 6509 } 6510 } 6511 6512 if (!sf) { 6513 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd)); 6514 } 6515 } 6516 6517 /* Add/subtract (with carry) 6518 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0 6519 * +--+--+--+------------------------+------+-------------+------+-----+ 6520 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd | 6521 * +--+--+--+------------------------+------+-------------+------+-----+ 6522 */ 6523 6524 static void disas_adc_sbc(DisasContext *s, uint32_t insn) 6525 { 6526 unsigned int sf, op, setflags, rm, rn, rd; 6527 TCGv_i64 tcg_y, tcg_rn, tcg_rd; 6528 6529 sf = extract32(insn, 31, 1); 6530 op = extract32(insn, 30, 1); 6531 setflags = extract32(insn, 29, 1); 6532 rm = extract32(insn, 16, 5); 6533 rn = extract32(insn, 5, 5); 6534 rd = extract32(insn, 0, 5); 6535 6536 tcg_rd = cpu_reg(s, rd); 6537 tcg_rn = cpu_reg(s, rn); 6538 6539 if (op) { 6540 tcg_y = tcg_temp_new_i64(); 6541 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm)); 6542 } else { 6543 tcg_y = cpu_reg(s, rm); 6544 } 6545 6546 if (setflags) { 6547 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y); 6548 } else { 6549 gen_adc(sf, tcg_rd, tcg_rn, tcg_y); 6550 } 6551 } 6552 6553 /* 6554 * Rotate right into flags 6555 * 31 30 29 21 15 10 5 4 0 6556 * +--+--+--+-----------------+--------+-----------+------+--+------+ 6557 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask | 6558 * +--+--+--+-----------------+--------+-----------+------+--+------+ 6559 */ 6560 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn) 6561 { 6562 int mask = extract32(insn, 0, 4); 6563 int o2 = extract32(insn, 4, 1); 6564 int rn = extract32(insn, 5, 5); 6565 int imm6 = extract32(insn, 15, 6); 6566 int sf_op_s = extract32(insn, 29, 3); 6567 TCGv_i64 tcg_rn; 6568 TCGv_i32 nzcv; 6569 6570 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) { 6571 unallocated_encoding(s); 6572 return; 6573 } 6574 6575 tcg_rn = read_cpu_reg(s, rn, 1); 6576 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6); 6577 6578 nzcv = tcg_temp_new_i32(); 6579 tcg_gen_extrl_i64_i32(nzcv, tcg_rn); 6580 6581 if (mask & 8) { /* N */ 6582 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3); 6583 } 6584 if (mask & 4) { /* Z */ 6585 tcg_gen_not_i32(cpu_ZF, nzcv); 6586 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4); 6587 } 6588 if (mask & 2) { /* C */ 6589 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1); 6590 } 6591 if (mask & 1) { /* V */ 6592 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0); 6593 } 6594 } 6595 6596 /* 6597 * Evaluate into flags 6598 * 31 30 29 21 15 14 10 5 4 0 6599 * +--+--+--+-----------------+---------+----+---------+------+--+------+ 6600 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask | 6601 * +--+--+--+-----------------+---------+----+---------+------+--+------+ 6602 */ 6603 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn) 6604 { 6605 int o3_mask = extract32(insn, 0, 5); 6606 int rn = extract32(insn, 5, 5); 6607 int o2 = extract32(insn, 15, 6); 6608 int sz = extract32(insn, 14, 1); 6609 int sf_op_s = extract32(insn, 29, 3); 6610 TCGv_i32 tmp; 6611 int shift; 6612 6613 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd || 6614 !dc_isar_feature(aa64_condm_4, s)) { 6615 unallocated_encoding(s); 6616 return; 6617 } 6618 shift = sz ? 16 : 24; /* SETF16 or SETF8 */ 6619 6620 tmp = tcg_temp_new_i32(); 6621 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn)); 6622 tcg_gen_shli_i32(cpu_NF, tmp, shift); 6623 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1); 6624 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 6625 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF); 6626 } 6627 6628 /* Conditional compare (immediate / register) 6629 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 6630 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ 6631 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv | 6632 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ 6633 * [1] y [0] [0] 6634 */ 6635 static void disas_cc(DisasContext *s, uint32_t insn) 6636 { 6637 unsigned int sf, op, y, cond, rn, nzcv, is_imm; 6638 TCGv_i32 tcg_t0, tcg_t1, tcg_t2; 6639 TCGv_i64 tcg_tmp, tcg_y, tcg_rn; 6640 DisasCompare c; 6641 6642 if (!extract32(insn, 29, 1)) { 6643 unallocated_encoding(s); 6644 return; 6645 } 6646 if (insn & (1 << 10 | 1 << 4)) { 6647 unallocated_encoding(s); 6648 return; 6649 } 6650 sf = extract32(insn, 31, 1); 6651 op = extract32(insn, 30, 1); 6652 is_imm = extract32(insn, 11, 1); 6653 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */ 6654 cond = extract32(insn, 12, 4); 6655 rn = extract32(insn, 5, 5); 6656 nzcv = extract32(insn, 0, 4); 6657 6658 /* Set T0 = !COND. */ 6659 tcg_t0 = tcg_temp_new_i32(); 6660 arm_test_cc(&c, cond); 6661 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0); 6662 6663 /* Load the arguments for the new comparison. */ 6664 if (is_imm) { 6665 tcg_y = tcg_temp_new_i64(); 6666 tcg_gen_movi_i64(tcg_y, y); 6667 } else { 6668 tcg_y = cpu_reg(s, y); 6669 } 6670 tcg_rn = cpu_reg(s, rn); 6671 6672 /* Set the flags for the new comparison. */ 6673 tcg_tmp = tcg_temp_new_i64(); 6674 if (op) { 6675 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y); 6676 } else { 6677 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y); 6678 } 6679 6680 /* If COND was false, force the flags to #nzcv. Compute two masks 6681 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0). 6682 * For tcg hosts that support ANDC, we can make do with just T1. 6683 * In either case, allow the tcg optimizer to delete any unused mask. 6684 */ 6685 tcg_t1 = tcg_temp_new_i32(); 6686 tcg_t2 = tcg_temp_new_i32(); 6687 tcg_gen_neg_i32(tcg_t1, tcg_t0); 6688 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1); 6689 6690 if (nzcv & 8) { /* N */ 6691 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1); 6692 } else { 6693 if (TCG_TARGET_HAS_andc_i32) { 6694 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1); 6695 } else { 6696 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2); 6697 } 6698 } 6699 if (nzcv & 4) { /* Z */ 6700 if (TCG_TARGET_HAS_andc_i32) { 6701 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1); 6702 } else { 6703 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2); 6704 } 6705 } else { 6706 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0); 6707 } 6708 if (nzcv & 2) { /* C */ 6709 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0); 6710 } else { 6711 if (TCG_TARGET_HAS_andc_i32) { 6712 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1); 6713 } else { 6714 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2); 6715 } 6716 } 6717 if (nzcv & 1) { /* V */ 6718 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1); 6719 } else { 6720 if (TCG_TARGET_HAS_andc_i32) { 6721 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1); 6722 } else { 6723 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2); 6724 } 6725 } 6726 } 6727 6728 /* Conditional select 6729 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0 6730 * +----+----+---+-----------------+------+------+-----+------+------+ 6731 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd | 6732 * +----+----+---+-----------------+------+------+-----+------+------+ 6733 */ 6734 static void disas_cond_select(DisasContext *s, uint32_t insn) 6735 { 6736 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd; 6737 TCGv_i64 tcg_rd, zero; 6738 DisasCompare64 c; 6739 6740 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) { 6741 /* S == 1 or op2<1> == 1 */ 6742 unallocated_encoding(s); 6743 return; 6744 } 6745 sf = extract32(insn, 31, 1); 6746 else_inv = extract32(insn, 30, 1); 6747 rm = extract32(insn, 16, 5); 6748 cond = extract32(insn, 12, 4); 6749 else_inc = extract32(insn, 10, 1); 6750 rn = extract32(insn, 5, 5); 6751 rd = extract32(insn, 0, 5); 6752 6753 tcg_rd = cpu_reg(s, rd); 6754 6755 a64_test_cc(&c, cond); 6756 zero = tcg_constant_i64(0); 6757 6758 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) { 6759 /* CSET & CSETM. */ 6760 if (else_inv) { 6761 tcg_gen_negsetcond_i64(tcg_invert_cond(c.cond), 6762 tcg_rd, c.value, zero); 6763 } else { 6764 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), 6765 tcg_rd, c.value, zero); 6766 } 6767 } else { 6768 TCGv_i64 t_true = cpu_reg(s, rn); 6769 TCGv_i64 t_false = read_cpu_reg(s, rm, 1); 6770 if (else_inv && else_inc) { 6771 tcg_gen_neg_i64(t_false, t_false); 6772 } else if (else_inv) { 6773 tcg_gen_not_i64(t_false, t_false); 6774 } else if (else_inc) { 6775 tcg_gen_addi_i64(t_false, t_false, 1); 6776 } 6777 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false); 6778 } 6779 6780 if (!sf) { 6781 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 6782 } 6783 } 6784 6785 static void handle_clz(DisasContext *s, unsigned int sf, 6786 unsigned int rn, unsigned int rd) 6787 { 6788 TCGv_i64 tcg_rd, tcg_rn; 6789 tcg_rd = cpu_reg(s, rd); 6790 tcg_rn = cpu_reg(s, rn); 6791 6792 if (sf) { 6793 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64); 6794 } else { 6795 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); 6796 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); 6797 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32); 6798 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); 6799 } 6800 } 6801 6802 static void handle_cls(DisasContext *s, unsigned int sf, 6803 unsigned int rn, unsigned int rd) 6804 { 6805 TCGv_i64 tcg_rd, tcg_rn; 6806 tcg_rd = cpu_reg(s, rd); 6807 tcg_rn = cpu_reg(s, rn); 6808 6809 if (sf) { 6810 tcg_gen_clrsb_i64(tcg_rd, tcg_rn); 6811 } else { 6812 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); 6813 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); 6814 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32); 6815 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); 6816 } 6817 } 6818 6819 static void handle_rbit(DisasContext *s, unsigned int sf, 6820 unsigned int rn, unsigned int rd) 6821 { 6822 TCGv_i64 tcg_rd, tcg_rn; 6823 tcg_rd = cpu_reg(s, rd); 6824 tcg_rn = cpu_reg(s, rn); 6825 6826 if (sf) { 6827 gen_helper_rbit64(tcg_rd, tcg_rn); 6828 } else { 6829 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); 6830 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); 6831 gen_helper_rbit(tcg_tmp32, tcg_tmp32); 6832 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); 6833 } 6834 } 6835 6836 /* REV with sf==1, opcode==3 ("REV64") */ 6837 static void handle_rev64(DisasContext *s, unsigned int sf, 6838 unsigned int rn, unsigned int rd) 6839 { 6840 if (!sf) { 6841 unallocated_encoding(s); 6842 return; 6843 } 6844 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn)); 6845 } 6846 6847 /* REV with sf==0, opcode==2 6848 * REV32 (sf==1, opcode==2) 6849 */ 6850 static void handle_rev32(DisasContext *s, unsigned int sf, 6851 unsigned int rn, unsigned int rd) 6852 { 6853 TCGv_i64 tcg_rd = cpu_reg(s, rd); 6854 TCGv_i64 tcg_rn = cpu_reg(s, rn); 6855 6856 if (sf) { 6857 tcg_gen_bswap64_i64(tcg_rd, tcg_rn); 6858 tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32); 6859 } else { 6860 tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ); 6861 } 6862 } 6863 6864 /* REV16 (opcode==1) */ 6865 static void handle_rev16(DisasContext *s, unsigned int sf, 6866 unsigned int rn, unsigned int rd) 6867 { 6868 TCGv_i64 tcg_rd = cpu_reg(s, rd); 6869 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 6870 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); 6871 TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff); 6872 6873 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8); 6874 tcg_gen_and_i64(tcg_rd, tcg_rn, mask); 6875 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask); 6876 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8); 6877 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp); 6878 } 6879 6880 /* Data-processing (1 source) 6881 * 31 30 29 28 21 20 16 15 10 9 5 4 0 6882 * +----+---+---+-----------------+---------+--------+------+------+ 6883 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd | 6884 * +----+---+---+-----------------+---------+--------+------+------+ 6885 */ 6886 static void disas_data_proc_1src(DisasContext *s, uint32_t insn) 6887 { 6888 unsigned int sf, opcode, opcode2, rn, rd; 6889 TCGv_i64 tcg_rd; 6890 6891 if (extract32(insn, 29, 1)) { 6892 unallocated_encoding(s); 6893 return; 6894 } 6895 6896 sf = extract32(insn, 31, 1); 6897 opcode = extract32(insn, 10, 6); 6898 opcode2 = extract32(insn, 16, 5); 6899 rn = extract32(insn, 5, 5); 6900 rd = extract32(insn, 0, 5); 6901 6902 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7)) 6903 6904 switch (MAP(sf, opcode2, opcode)) { 6905 case MAP(0, 0x00, 0x00): /* RBIT */ 6906 case MAP(1, 0x00, 0x00): 6907 handle_rbit(s, sf, rn, rd); 6908 break; 6909 case MAP(0, 0x00, 0x01): /* REV16 */ 6910 case MAP(1, 0x00, 0x01): 6911 handle_rev16(s, sf, rn, rd); 6912 break; 6913 case MAP(0, 0x00, 0x02): /* REV/REV32 */ 6914 case MAP(1, 0x00, 0x02): 6915 handle_rev32(s, sf, rn, rd); 6916 break; 6917 case MAP(1, 0x00, 0x03): /* REV64 */ 6918 handle_rev64(s, sf, rn, rd); 6919 break; 6920 case MAP(0, 0x00, 0x04): /* CLZ */ 6921 case MAP(1, 0x00, 0x04): 6922 handle_clz(s, sf, rn, rd); 6923 break; 6924 case MAP(0, 0x00, 0x05): /* CLS */ 6925 case MAP(1, 0x00, 0x05): 6926 handle_cls(s, sf, rn, rd); 6927 break; 6928 case MAP(1, 0x01, 0x00): /* PACIA */ 6929 if (s->pauth_active) { 6930 tcg_rd = cpu_reg(s, rd); 6931 gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); 6932 } else if (!dc_isar_feature(aa64_pauth, s)) { 6933 goto do_unallocated; 6934 } 6935 break; 6936 case MAP(1, 0x01, 0x01): /* PACIB */ 6937 if (s->pauth_active) { 6938 tcg_rd = cpu_reg(s, rd); 6939 gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); 6940 } else if (!dc_isar_feature(aa64_pauth, s)) { 6941 goto do_unallocated; 6942 } 6943 break; 6944 case MAP(1, 0x01, 0x02): /* PACDA */ 6945 if (s->pauth_active) { 6946 tcg_rd = cpu_reg(s, rd); 6947 gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); 6948 } else if (!dc_isar_feature(aa64_pauth, s)) { 6949 goto do_unallocated; 6950 } 6951 break; 6952 case MAP(1, 0x01, 0x03): /* PACDB */ 6953 if (s->pauth_active) { 6954 tcg_rd = cpu_reg(s, rd); 6955 gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); 6956 } else if (!dc_isar_feature(aa64_pauth, s)) { 6957 goto do_unallocated; 6958 } 6959 break; 6960 case MAP(1, 0x01, 0x04): /* AUTIA */ 6961 if (s->pauth_active) { 6962 tcg_rd = cpu_reg(s, rd); 6963 gen_helper_autia(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); 6964 } else if (!dc_isar_feature(aa64_pauth, s)) { 6965 goto do_unallocated; 6966 } 6967 break; 6968 case MAP(1, 0x01, 0x05): /* AUTIB */ 6969 if (s->pauth_active) { 6970 tcg_rd = cpu_reg(s, rd); 6971 gen_helper_autib(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); 6972 } else if (!dc_isar_feature(aa64_pauth, s)) { 6973 goto do_unallocated; 6974 } 6975 break; 6976 case MAP(1, 0x01, 0x06): /* AUTDA */ 6977 if (s->pauth_active) { 6978 tcg_rd = cpu_reg(s, rd); 6979 gen_helper_autda(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); 6980 } else if (!dc_isar_feature(aa64_pauth, s)) { 6981 goto do_unallocated; 6982 } 6983 break; 6984 case MAP(1, 0x01, 0x07): /* AUTDB */ 6985 if (s->pauth_active) { 6986 tcg_rd = cpu_reg(s, rd); 6987 gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, cpu_reg_sp(s, rn)); 6988 } else if (!dc_isar_feature(aa64_pauth, s)) { 6989 goto do_unallocated; 6990 } 6991 break; 6992 case MAP(1, 0x01, 0x08): /* PACIZA */ 6993 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 6994 goto do_unallocated; 6995 } else if (s->pauth_active) { 6996 tcg_rd = cpu_reg(s, rd); 6997 gen_helper_pacia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); 6998 } 6999 break; 7000 case MAP(1, 0x01, 0x09): /* PACIZB */ 7001 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7002 goto do_unallocated; 7003 } else if (s->pauth_active) { 7004 tcg_rd = cpu_reg(s, rd); 7005 gen_helper_pacib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); 7006 } 7007 break; 7008 case MAP(1, 0x01, 0x0a): /* PACDZA */ 7009 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7010 goto do_unallocated; 7011 } else if (s->pauth_active) { 7012 tcg_rd = cpu_reg(s, rd); 7013 gen_helper_pacda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); 7014 } 7015 break; 7016 case MAP(1, 0x01, 0x0b): /* PACDZB */ 7017 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7018 goto do_unallocated; 7019 } else if (s->pauth_active) { 7020 tcg_rd = cpu_reg(s, rd); 7021 gen_helper_pacdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); 7022 } 7023 break; 7024 case MAP(1, 0x01, 0x0c): /* AUTIZA */ 7025 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7026 goto do_unallocated; 7027 } else if (s->pauth_active) { 7028 tcg_rd = cpu_reg(s, rd); 7029 gen_helper_autia(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); 7030 } 7031 break; 7032 case MAP(1, 0x01, 0x0d): /* AUTIZB */ 7033 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7034 goto do_unallocated; 7035 } else if (s->pauth_active) { 7036 tcg_rd = cpu_reg(s, rd); 7037 gen_helper_autib(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); 7038 } 7039 break; 7040 case MAP(1, 0x01, 0x0e): /* AUTDZA */ 7041 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7042 goto do_unallocated; 7043 } else if (s->pauth_active) { 7044 tcg_rd = cpu_reg(s, rd); 7045 gen_helper_autda(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); 7046 } 7047 break; 7048 case MAP(1, 0x01, 0x0f): /* AUTDZB */ 7049 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7050 goto do_unallocated; 7051 } else if (s->pauth_active) { 7052 tcg_rd = cpu_reg(s, rd); 7053 gen_helper_autdb(tcg_rd, tcg_env, tcg_rd, tcg_constant_i64(0)); 7054 } 7055 break; 7056 case MAP(1, 0x01, 0x10): /* XPACI */ 7057 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7058 goto do_unallocated; 7059 } else if (s->pauth_active) { 7060 tcg_rd = cpu_reg(s, rd); 7061 gen_helper_xpaci(tcg_rd, tcg_env, tcg_rd); 7062 } 7063 break; 7064 case MAP(1, 0x01, 0x11): /* XPACD */ 7065 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 7066 goto do_unallocated; 7067 } else if (s->pauth_active) { 7068 tcg_rd = cpu_reg(s, rd); 7069 gen_helper_xpacd(tcg_rd, tcg_env, tcg_rd); 7070 } 7071 break; 7072 default: 7073 do_unallocated: 7074 unallocated_encoding(s); 7075 break; 7076 } 7077 7078 #undef MAP 7079 } 7080 7081 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, 7082 unsigned int rm, unsigned int rn, unsigned int rd) 7083 { 7084 TCGv_i64 tcg_n, tcg_m, tcg_rd; 7085 tcg_rd = cpu_reg(s, rd); 7086 7087 if (!sf && is_signed) { 7088 tcg_n = tcg_temp_new_i64(); 7089 tcg_m = tcg_temp_new_i64(); 7090 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn)); 7091 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm)); 7092 } else { 7093 tcg_n = read_cpu_reg(s, rn, sf); 7094 tcg_m = read_cpu_reg(s, rm, sf); 7095 } 7096 7097 if (is_signed) { 7098 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m); 7099 } else { 7100 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m); 7101 } 7102 7103 if (!sf) { /* zero extend final result */ 7104 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 7105 } 7106 } 7107 7108 /* LSLV, LSRV, ASRV, RORV */ 7109 static void handle_shift_reg(DisasContext *s, 7110 enum a64_shift_type shift_type, unsigned int sf, 7111 unsigned int rm, unsigned int rn, unsigned int rd) 7112 { 7113 TCGv_i64 tcg_shift = tcg_temp_new_i64(); 7114 TCGv_i64 tcg_rd = cpu_reg(s, rd); 7115 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); 7116 7117 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31); 7118 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift); 7119 } 7120 7121 /* CRC32[BHWX], CRC32C[BHWX] */ 7122 static void handle_crc32(DisasContext *s, 7123 unsigned int sf, unsigned int sz, bool crc32c, 7124 unsigned int rm, unsigned int rn, unsigned int rd) 7125 { 7126 TCGv_i64 tcg_acc, tcg_val; 7127 TCGv_i32 tcg_bytes; 7128 7129 if (!dc_isar_feature(aa64_crc32, s) 7130 || (sf == 1 && sz != 3) 7131 || (sf == 0 && sz == 3)) { 7132 unallocated_encoding(s); 7133 return; 7134 } 7135 7136 if (sz == 3) { 7137 tcg_val = cpu_reg(s, rm); 7138 } else { 7139 uint64_t mask; 7140 switch (sz) { 7141 case 0: 7142 mask = 0xFF; 7143 break; 7144 case 1: 7145 mask = 0xFFFF; 7146 break; 7147 case 2: 7148 mask = 0xFFFFFFFF; 7149 break; 7150 default: 7151 g_assert_not_reached(); 7152 } 7153 tcg_val = tcg_temp_new_i64(); 7154 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask); 7155 } 7156 7157 tcg_acc = cpu_reg(s, rn); 7158 tcg_bytes = tcg_constant_i32(1 << sz); 7159 7160 if (crc32c) { 7161 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); 7162 } else { 7163 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); 7164 } 7165 } 7166 7167 /* Data-processing (2 source) 7168 * 31 30 29 28 21 20 16 15 10 9 5 4 0 7169 * +----+---+---+-----------------+------+--------+------+------+ 7170 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd | 7171 * +----+---+---+-----------------+------+--------+------+------+ 7172 */ 7173 static void disas_data_proc_2src(DisasContext *s, uint32_t insn) 7174 { 7175 unsigned int sf, rm, opcode, rn, rd, setflag; 7176 sf = extract32(insn, 31, 1); 7177 setflag = extract32(insn, 29, 1); 7178 rm = extract32(insn, 16, 5); 7179 opcode = extract32(insn, 10, 6); 7180 rn = extract32(insn, 5, 5); 7181 rd = extract32(insn, 0, 5); 7182 7183 if (setflag && opcode != 0) { 7184 unallocated_encoding(s); 7185 return; 7186 } 7187 7188 switch (opcode) { 7189 case 0: /* SUBP(S) */ 7190 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { 7191 goto do_unallocated; 7192 } else { 7193 TCGv_i64 tcg_n, tcg_m, tcg_d; 7194 7195 tcg_n = read_cpu_reg_sp(s, rn, true); 7196 tcg_m = read_cpu_reg_sp(s, rm, true); 7197 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56); 7198 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56); 7199 tcg_d = cpu_reg(s, rd); 7200 7201 if (setflag) { 7202 gen_sub_CC(true, tcg_d, tcg_n, tcg_m); 7203 } else { 7204 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m); 7205 } 7206 } 7207 break; 7208 case 2: /* UDIV */ 7209 handle_div(s, false, sf, rm, rn, rd); 7210 break; 7211 case 3: /* SDIV */ 7212 handle_div(s, true, sf, rm, rn, rd); 7213 break; 7214 case 4: /* IRG */ 7215 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { 7216 goto do_unallocated; 7217 } 7218 if (s->ata[0]) { 7219 gen_helper_irg(cpu_reg_sp(s, rd), tcg_env, 7220 cpu_reg_sp(s, rn), cpu_reg(s, rm)); 7221 } else { 7222 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd), 7223 cpu_reg_sp(s, rn)); 7224 } 7225 break; 7226 case 5: /* GMI */ 7227 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { 7228 goto do_unallocated; 7229 } else { 7230 TCGv_i64 t = tcg_temp_new_i64(); 7231 7232 tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4); 7233 tcg_gen_shl_i64(t, tcg_constant_i64(1), t); 7234 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t); 7235 } 7236 break; 7237 case 8: /* LSLV */ 7238 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd); 7239 break; 7240 case 9: /* LSRV */ 7241 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd); 7242 break; 7243 case 10: /* ASRV */ 7244 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd); 7245 break; 7246 case 11: /* RORV */ 7247 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd); 7248 break; 7249 case 12: /* PACGA */ 7250 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) { 7251 goto do_unallocated; 7252 } 7253 gen_helper_pacga(cpu_reg(s, rd), tcg_env, 7254 cpu_reg(s, rn), cpu_reg_sp(s, rm)); 7255 break; 7256 case 16: 7257 case 17: 7258 case 18: 7259 case 19: 7260 case 20: 7261 case 21: 7262 case 22: 7263 case 23: /* CRC32 */ 7264 { 7265 int sz = extract32(opcode, 0, 2); 7266 bool crc32c = extract32(opcode, 2, 1); 7267 handle_crc32(s, sf, sz, crc32c, rm, rn, rd); 7268 break; 7269 } 7270 default: 7271 do_unallocated: 7272 unallocated_encoding(s); 7273 break; 7274 } 7275 } 7276 7277 /* 7278 * Data processing - register 7279 * 31 30 29 28 25 21 20 16 10 0 7280 * +--+---+--+---+-------+-----+-------+-------+---------+ 7281 * | |op0| |op1| 1 0 1 | op2 | | op3 | | 7282 * +--+---+--+---+-------+-----+-------+-------+---------+ 7283 */ 7284 static void disas_data_proc_reg(DisasContext *s, uint32_t insn) 7285 { 7286 int op0 = extract32(insn, 30, 1); 7287 int op1 = extract32(insn, 28, 1); 7288 int op2 = extract32(insn, 21, 4); 7289 int op3 = extract32(insn, 10, 6); 7290 7291 if (!op1) { 7292 if (op2 & 8) { 7293 if (op2 & 1) { 7294 /* Add/sub (extended register) */ 7295 disas_add_sub_ext_reg(s, insn); 7296 } else { 7297 /* Add/sub (shifted register) */ 7298 disas_add_sub_reg(s, insn); 7299 } 7300 } else { 7301 /* Logical (shifted register) */ 7302 disas_logic_reg(s, insn); 7303 } 7304 return; 7305 } 7306 7307 switch (op2) { 7308 case 0x0: 7309 switch (op3) { 7310 case 0x00: /* Add/subtract (with carry) */ 7311 disas_adc_sbc(s, insn); 7312 break; 7313 7314 case 0x01: /* Rotate right into flags */ 7315 case 0x21: 7316 disas_rotate_right_into_flags(s, insn); 7317 break; 7318 7319 case 0x02: /* Evaluate into flags */ 7320 case 0x12: 7321 case 0x22: 7322 case 0x32: 7323 disas_evaluate_into_flags(s, insn); 7324 break; 7325 7326 default: 7327 goto do_unallocated; 7328 } 7329 break; 7330 7331 case 0x2: /* Conditional compare */ 7332 disas_cc(s, insn); /* both imm and reg forms */ 7333 break; 7334 7335 case 0x4: /* Conditional select */ 7336 disas_cond_select(s, insn); 7337 break; 7338 7339 case 0x6: /* Data-processing */ 7340 if (op0) { /* (1 source) */ 7341 disas_data_proc_1src(s, insn); 7342 } else { /* (2 source) */ 7343 disas_data_proc_2src(s, insn); 7344 } 7345 break; 7346 case 0x8 ... 0xf: /* (3 source) */ 7347 disas_data_proc_3src(s, insn); 7348 break; 7349 7350 default: 7351 do_unallocated: 7352 unallocated_encoding(s); 7353 break; 7354 } 7355 } 7356 7357 static void handle_fp_compare(DisasContext *s, int size, 7358 unsigned int rn, unsigned int rm, 7359 bool cmp_with_zero, bool signal_all_nans) 7360 { 7361 TCGv_i64 tcg_flags = tcg_temp_new_i64(); 7362 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 7363 7364 if (size == MO_64) { 7365 TCGv_i64 tcg_vn, tcg_vm; 7366 7367 tcg_vn = read_fp_dreg(s, rn); 7368 if (cmp_with_zero) { 7369 tcg_vm = tcg_constant_i64(0); 7370 } else { 7371 tcg_vm = read_fp_dreg(s, rm); 7372 } 7373 if (signal_all_nans) { 7374 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 7375 } else { 7376 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 7377 } 7378 } else { 7379 TCGv_i32 tcg_vn = tcg_temp_new_i32(); 7380 TCGv_i32 tcg_vm = tcg_temp_new_i32(); 7381 7382 read_vec_element_i32(s, tcg_vn, rn, 0, size); 7383 if (cmp_with_zero) { 7384 tcg_gen_movi_i32(tcg_vm, 0); 7385 } else { 7386 read_vec_element_i32(s, tcg_vm, rm, 0, size); 7387 } 7388 7389 switch (size) { 7390 case MO_32: 7391 if (signal_all_nans) { 7392 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 7393 } else { 7394 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 7395 } 7396 break; 7397 case MO_16: 7398 if (signal_all_nans) { 7399 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 7400 } else { 7401 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 7402 } 7403 break; 7404 default: 7405 g_assert_not_reached(); 7406 } 7407 } 7408 7409 gen_set_nzcv(tcg_flags); 7410 } 7411 7412 /* Floating point compare 7413 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0 7414 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ 7415 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 | 7416 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ 7417 */ 7418 static void disas_fp_compare(DisasContext *s, uint32_t insn) 7419 { 7420 unsigned int mos, type, rm, op, rn, opc, op2r; 7421 int size; 7422 7423 mos = extract32(insn, 29, 3); 7424 type = extract32(insn, 22, 2); 7425 rm = extract32(insn, 16, 5); 7426 op = extract32(insn, 14, 2); 7427 rn = extract32(insn, 5, 5); 7428 opc = extract32(insn, 3, 2); 7429 op2r = extract32(insn, 0, 3); 7430 7431 if (mos || op || op2r) { 7432 unallocated_encoding(s); 7433 return; 7434 } 7435 7436 switch (type) { 7437 case 0: 7438 size = MO_32; 7439 break; 7440 case 1: 7441 size = MO_64; 7442 break; 7443 case 3: 7444 size = MO_16; 7445 if (dc_isar_feature(aa64_fp16, s)) { 7446 break; 7447 } 7448 /* fallthru */ 7449 default: 7450 unallocated_encoding(s); 7451 return; 7452 } 7453 7454 if (!fp_access_check(s)) { 7455 return; 7456 } 7457 7458 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2); 7459 } 7460 7461 /* Floating point conditional compare 7462 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 7463 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ 7464 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv | 7465 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ 7466 */ 7467 static void disas_fp_ccomp(DisasContext *s, uint32_t insn) 7468 { 7469 unsigned int mos, type, rm, cond, rn, op, nzcv; 7470 TCGLabel *label_continue = NULL; 7471 int size; 7472 7473 mos = extract32(insn, 29, 3); 7474 type = extract32(insn, 22, 2); 7475 rm = extract32(insn, 16, 5); 7476 cond = extract32(insn, 12, 4); 7477 rn = extract32(insn, 5, 5); 7478 op = extract32(insn, 4, 1); 7479 nzcv = extract32(insn, 0, 4); 7480 7481 if (mos) { 7482 unallocated_encoding(s); 7483 return; 7484 } 7485 7486 switch (type) { 7487 case 0: 7488 size = MO_32; 7489 break; 7490 case 1: 7491 size = MO_64; 7492 break; 7493 case 3: 7494 size = MO_16; 7495 if (dc_isar_feature(aa64_fp16, s)) { 7496 break; 7497 } 7498 /* fallthru */ 7499 default: 7500 unallocated_encoding(s); 7501 return; 7502 } 7503 7504 if (!fp_access_check(s)) { 7505 return; 7506 } 7507 7508 if (cond < 0x0e) { /* not always */ 7509 TCGLabel *label_match = gen_new_label(); 7510 label_continue = gen_new_label(); 7511 arm_gen_test_cc(cond, label_match); 7512 /* nomatch: */ 7513 gen_set_nzcv(tcg_constant_i64(nzcv << 28)); 7514 tcg_gen_br(label_continue); 7515 gen_set_label(label_match); 7516 } 7517 7518 handle_fp_compare(s, size, rn, rm, false, op); 7519 7520 if (cond < 0x0e) { 7521 gen_set_label(label_continue); 7522 } 7523 } 7524 7525 /* Floating-point data-processing (1 source) - half precision */ 7526 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) 7527 { 7528 TCGv_ptr fpst = NULL; 7529 TCGv_i32 tcg_op = read_fp_hreg(s, rn); 7530 TCGv_i32 tcg_res = tcg_temp_new_i32(); 7531 7532 switch (opcode) { 7533 case 0x0: /* FMOV */ 7534 tcg_gen_mov_i32(tcg_res, tcg_op); 7535 break; 7536 case 0x1: /* FABS */ 7537 gen_vfp_absh(tcg_res, tcg_op); 7538 break; 7539 case 0x2: /* FNEG */ 7540 gen_vfp_negh(tcg_res, tcg_op); 7541 break; 7542 case 0x3: /* FSQRT */ 7543 fpst = fpstatus_ptr(FPST_FPCR_F16); 7544 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst); 7545 break; 7546 case 0x8: /* FRINTN */ 7547 case 0x9: /* FRINTP */ 7548 case 0xa: /* FRINTM */ 7549 case 0xb: /* FRINTZ */ 7550 case 0xc: /* FRINTA */ 7551 { 7552 TCGv_i32 tcg_rmode; 7553 7554 fpst = fpstatus_ptr(FPST_FPCR_F16); 7555 tcg_rmode = gen_set_rmode(opcode & 7, fpst); 7556 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); 7557 gen_restore_rmode(tcg_rmode, fpst); 7558 break; 7559 } 7560 case 0xe: /* FRINTX */ 7561 fpst = fpstatus_ptr(FPST_FPCR_F16); 7562 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst); 7563 break; 7564 case 0xf: /* FRINTI */ 7565 fpst = fpstatus_ptr(FPST_FPCR_F16); 7566 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); 7567 break; 7568 default: 7569 g_assert_not_reached(); 7570 } 7571 7572 write_fp_sreg(s, rd, tcg_res); 7573 } 7574 7575 /* Floating-point data-processing (1 source) - single precision */ 7576 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) 7577 { 7578 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr); 7579 TCGv_i32 tcg_op, tcg_res; 7580 TCGv_ptr fpst; 7581 int rmode = -1; 7582 7583 tcg_op = read_fp_sreg(s, rn); 7584 tcg_res = tcg_temp_new_i32(); 7585 7586 switch (opcode) { 7587 case 0x0: /* FMOV */ 7588 tcg_gen_mov_i32(tcg_res, tcg_op); 7589 goto done; 7590 case 0x1: /* FABS */ 7591 gen_vfp_abss(tcg_res, tcg_op); 7592 goto done; 7593 case 0x2: /* FNEG */ 7594 gen_vfp_negs(tcg_res, tcg_op); 7595 goto done; 7596 case 0x3: /* FSQRT */ 7597 gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env); 7598 goto done; 7599 case 0x6: /* BFCVT */ 7600 gen_fpst = gen_helper_bfcvt; 7601 break; 7602 case 0x8: /* FRINTN */ 7603 case 0x9: /* FRINTP */ 7604 case 0xa: /* FRINTM */ 7605 case 0xb: /* FRINTZ */ 7606 case 0xc: /* FRINTA */ 7607 rmode = opcode & 7; 7608 gen_fpst = gen_helper_rints; 7609 break; 7610 case 0xe: /* FRINTX */ 7611 gen_fpst = gen_helper_rints_exact; 7612 break; 7613 case 0xf: /* FRINTI */ 7614 gen_fpst = gen_helper_rints; 7615 break; 7616 case 0x10: /* FRINT32Z */ 7617 rmode = FPROUNDING_ZERO; 7618 gen_fpst = gen_helper_frint32_s; 7619 break; 7620 case 0x11: /* FRINT32X */ 7621 gen_fpst = gen_helper_frint32_s; 7622 break; 7623 case 0x12: /* FRINT64Z */ 7624 rmode = FPROUNDING_ZERO; 7625 gen_fpst = gen_helper_frint64_s; 7626 break; 7627 case 0x13: /* FRINT64X */ 7628 gen_fpst = gen_helper_frint64_s; 7629 break; 7630 default: 7631 g_assert_not_reached(); 7632 } 7633 7634 fpst = fpstatus_ptr(FPST_FPCR); 7635 if (rmode >= 0) { 7636 TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst); 7637 gen_fpst(tcg_res, tcg_op, fpst); 7638 gen_restore_rmode(tcg_rmode, fpst); 7639 } else { 7640 gen_fpst(tcg_res, tcg_op, fpst); 7641 } 7642 7643 done: 7644 write_fp_sreg(s, rd, tcg_res); 7645 } 7646 7647 /* Floating-point data-processing (1 source) - double precision */ 7648 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) 7649 { 7650 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr); 7651 TCGv_i64 tcg_op, tcg_res; 7652 TCGv_ptr fpst; 7653 int rmode = -1; 7654 7655 switch (opcode) { 7656 case 0x0: /* FMOV */ 7657 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0); 7658 return; 7659 } 7660 7661 tcg_op = read_fp_dreg(s, rn); 7662 tcg_res = tcg_temp_new_i64(); 7663 7664 switch (opcode) { 7665 case 0x1: /* FABS */ 7666 gen_vfp_absd(tcg_res, tcg_op); 7667 goto done; 7668 case 0x2: /* FNEG */ 7669 gen_vfp_negd(tcg_res, tcg_op); 7670 goto done; 7671 case 0x3: /* FSQRT */ 7672 gen_helper_vfp_sqrtd(tcg_res, tcg_op, tcg_env); 7673 goto done; 7674 case 0x8: /* FRINTN */ 7675 case 0x9: /* FRINTP */ 7676 case 0xa: /* FRINTM */ 7677 case 0xb: /* FRINTZ */ 7678 case 0xc: /* FRINTA */ 7679 rmode = opcode & 7; 7680 gen_fpst = gen_helper_rintd; 7681 break; 7682 case 0xe: /* FRINTX */ 7683 gen_fpst = gen_helper_rintd_exact; 7684 break; 7685 case 0xf: /* FRINTI */ 7686 gen_fpst = gen_helper_rintd; 7687 break; 7688 case 0x10: /* FRINT32Z */ 7689 rmode = FPROUNDING_ZERO; 7690 gen_fpst = gen_helper_frint32_d; 7691 break; 7692 case 0x11: /* FRINT32X */ 7693 gen_fpst = gen_helper_frint32_d; 7694 break; 7695 case 0x12: /* FRINT64Z */ 7696 rmode = FPROUNDING_ZERO; 7697 gen_fpst = gen_helper_frint64_d; 7698 break; 7699 case 0x13: /* FRINT64X */ 7700 gen_fpst = gen_helper_frint64_d; 7701 break; 7702 default: 7703 g_assert_not_reached(); 7704 } 7705 7706 fpst = fpstatus_ptr(FPST_FPCR); 7707 if (rmode >= 0) { 7708 TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst); 7709 gen_fpst(tcg_res, tcg_op, fpst); 7710 gen_restore_rmode(tcg_rmode, fpst); 7711 } else { 7712 gen_fpst(tcg_res, tcg_op, fpst); 7713 } 7714 7715 done: 7716 write_fp_dreg(s, rd, tcg_res); 7717 } 7718 7719 static void handle_fp_fcvt(DisasContext *s, int opcode, 7720 int rd, int rn, int dtype, int ntype) 7721 { 7722 switch (ntype) { 7723 case 0x0: 7724 { 7725 TCGv_i32 tcg_rn = read_fp_sreg(s, rn); 7726 if (dtype == 1) { 7727 /* Single to double */ 7728 TCGv_i64 tcg_rd = tcg_temp_new_i64(); 7729 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, tcg_env); 7730 write_fp_dreg(s, rd, tcg_rd); 7731 } else { 7732 /* Single to half */ 7733 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 7734 TCGv_i32 ahp = get_ahp_flag(); 7735 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 7736 7737 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp); 7738 /* write_fp_sreg is OK here because top half of tcg_rd is zero */ 7739 write_fp_sreg(s, rd, tcg_rd); 7740 } 7741 break; 7742 } 7743 case 0x1: 7744 { 7745 TCGv_i64 tcg_rn = read_fp_dreg(s, rn); 7746 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 7747 if (dtype == 0) { 7748 /* Double to single */ 7749 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, tcg_env); 7750 } else { 7751 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 7752 TCGv_i32 ahp = get_ahp_flag(); 7753 /* Double to half */ 7754 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp); 7755 /* write_fp_sreg is OK here because top half of tcg_rd is zero */ 7756 } 7757 write_fp_sreg(s, rd, tcg_rd); 7758 break; 7759 } 7760 case 0x3: 7761 { 7762 TCGv_i32 tcg_rn = read_fp_sreg(s, rn); 7763 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR); 7764 TCGv_i32 tcg_ahp = get_ahp_flag(); 7765 tcg_gen_ext16u_i32(tcg_rn, tcg_rn); 7766 if (dtype == 0) { 7767 /* Half to single */ 7768 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 7769 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); 7770 write_fp_sreg(s, rd, tcg_rd); 7771 } else { 7772 /* Half to double */ 7773 TCGv_i64 tcg_rd = tcg_temp_new_i64(); 7774 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); 7775 write_fp_dreg(s, rd, tcg_rd); 7776 } 7777 break; 7778 } 7779 default: 7780 g_assert_not_reached(); 7781 } 7782 } 7783 7784 /* Floating point data-processing (1 source) 7785 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0 7786 * +---+---+---+-----------+------+---+--------+-----------+------+------+ 7787 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd | 7788 * +---+---+---+-----------+------+---+--------+-----------+------+------+ 7789 */ 7790 static void disas_fp_1src(DisasContext *s, uint32_t insn) 7791 { 7792 int mos = extract32(insn, 29, 3); 7793 int type = extract32(insn, 22, 2); 7794 int opcode = extract32(insn, 15, 6); 7795 int rn = extract32(insn, 5, 5); 7796 int rd = extract32(insn, 0, 5); 7797 7798 if (mos) { 7799 goto do_unallocated; 7800 } 7801 7802 switch (opcode) { 7803 case 0x4: case 0x5: case 0x7: 7804 { 7805 /* FCVT between half, single and double precision */ 7806 int dtype = extract32(opcode, 0, 2); 7807 if (type == 2 || dtype == type) { 7808 goto do_unallocated; 7809 } 7810 if (!fp_access_check(s)) { 7811 return; 7812 } 7813 7814 handle_fp_fcvt(s, opcode, rd, rn, dtype, type); 7815 break; 7816 } 7817 7818 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */ 7819 if (type > 1 || !dc_isar_feature(aa64_frint, s)) { 7820 goto do_unallocated; 7821 } 7822 /* fall through */ 7823 case 0x0 ... 0x3: 7824 case 0x8 ... 0xc: 7825 case 0xe ... 0xf: 7826 /* 32-to-32 and 64-to-64 ops */ 7827 switch (type) { 7828 case 0: 7829 if (!fp_access_check(s)) { 7830 return; 7831 } 7832 handle_fp_1src_single(s, opcode, rd, rn); 7833 break; 7834 case 1: 7835 if (!fp_access_check(s)) { 7836 return; 7837 } 7838 handle_fp_1src_double(s, opcode, rd, rn); 7839 break; 7840 case 3: 7841 if (!dc_isar_feature(aa64_fp16, s)) { 7842 goto do_unallocated; 7843 } 7844 7845 if (!fp_access_check(s)) { 7846 return; 7847 } 7848 handle_fp_1src_half(s, opcode, rd, rn); 7849 break; 7850 default: 7851 goto do_unallocated; 7852 } 7853 break; 7854 7855 case 0x6: 7856 switch (type) { 7857 case 1: /* BFCVT */ 7858 if (!dc_isar_feature(aa64_bf16, s)) { 7859 goto do_unallocated; 7860 } 7861 if (!fp_access_check(s)) { 7862 return; 7863 } 7864 handle_fp_1src_single(s, opcode, rd, rn); 7865 break; 7866 default: 7867 goto do_unallocated; 7868 } 7869 break; 7870 7871 default: 7872 do_unallocated: 7873 unallocated_encoding(s); 7874 break; 7875 } 7876 } 7877 7878 /* Floating point immediate 7879 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0 7880 * +---+---+---+-----------+------+---+------------+-------+------+------+ 7881 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd | 7882 * +---+---+---+-----------+------+---+------------+-------+------+------+ 7883 */ 7884 static void disas_fp_imm(DisasContext *s, uint32_t insn) 7885 { 7886 int rd = extract32(insn, 0, 5); 7887 int imm5 = extract32(insn, 5, 5); 7888 int imm8 = extract32(insn, 13, 8); 7889 int type = extract32(insn, 22, 2); 7890 int mos = extract32(insn, 29, 3); 7891 uint64_t imm; 7892 MemOp sz; 7893 7894 if (mos || imm5) { 7895 unallocated_encoding(s); 7896 return; 7897 } 7898 7899 switch (type) { 7900 case 0: 7901 sz = MO_32; 7902 break; 7903 case 1: 7904 sz = MO_64; 7905 break; 7906 case 3: 7907 sz = MO_16; 7908 if (dc_isar_feature(aa64_fp16, s)) { 7909 break; 7910 } 7911 /* fallthru */ 7912 default: 7913 unallocated_encoding(s); 7914 return; 7915 } 7916 7917 if (!fp_access_check(s)) { 7918 return; 7919 } 7920 7921 imm = vfp_expand_imm(sz, imm8); 7922 write_fp_dreg(s, rd, tcg_constant_i64(imm)); 7923 } 7924 7925 /* Handle floating point <=> fixed point conversions. Note that we can 7926 * also deal with fp <=> integer conversions as a special case (scale == 64) 7927 * OPTME: consider handling that special case specially or at least skipping 7928 * the call to scalbn in the helpers for zero shifts. 7929 */ 7930 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, 7931 bool itof, int rmode, int scale, int sf, int type) 7932 { 7933 bool is_signed = !(opcode & 1); 7934 TCGv_ptr tcg_fpstatus; 7935 TCGv_i32 tcg_shift, tcg_single; 7936 TCGv_i64 tcg_double; 7937 7938 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR); 7939 7940 tcg_shift = tcg_constant_i32(64 - scale); 7941 7942 if (itof) { 7943 TCGv_i64 tcg_int = cpu_reg(s, rn); 7944 if (!sf) { 7945 TCGv_i64 tcg_extend = tcg_temp_new_i64(); 7946 7947 if (is_signed) { 7948 tcg_gen_ext32s_i64(tcg_extend, tcg_int); 7949 } else { 7950 tcg_gen_ext32u_i64(tcg_extend, tcg_int); 7951 } 7952 7953 tcg_int = tcg_extend; 7954 } 7955 7956 switch (type) { 7957 case 1: /* float64 */ 7958 tcg_double = tcg_temp_new_i64(); 7959 if (is_signed) { 7960 gen_helper_vfp_sqtod(tcg_double, tcg_int, 7961 tcg_shift, tcg_fpstatus); 7962 } else { 7963 gen_helper_vfp_uqtod(tcg_double, tcg_int, 7964 tcg_shift, tcg_fpstatus); 7965 } 7966 write_fp_dreg(s, rd, tcg_double); 7967 break; 7968 7969 case 0: /* float32 */ 7970 tcg_single = tcg_temp_new_i32(); 7971 if (is_signed) { 7972 gen_helper_vfp_sqtos(tcg_single, tcg_int, 7973 tcg_shift, tcg_fpstatus); 7974 } else { 7975 gen_helper_vfp_uqtos(tcg_single, tcg_int, 7976 tcg_shift, tcg_fpstatus); 7977 } 7978 write_fp_sreg(s, rd, tcg_single); 7979 break; 7980 7981 case 3: /* float16 */ 7982 tcg_single = tcg_temp_new_i32(); 7983 if (is_signed) { 7984 gen_helper_vfp_sqtoh(tcg_single, tcg_int, 7985 tcg_shift, tcg_fpstatus); 7986 } else { 7987 gen_helper_vfp_uqtoh(tcg_single, tcg_int, 7988 tcg_shift, tcg_fpstatus); 7989 } 7990 write_fp_sreg(s, rd, tcg_single); 7991 break; 7992 7993 default: 7994 g_assert_not_reached(); 7995 } 7996 } else { 7997 TCGv_i64 tcg_int = cpu_reg(s, rd); 7998 TCGv_i32 tcg_rmode; 7999 8000 if (extract32(opcode, 2, 1)) { 8001 /* There are too many rounding modes to all fit into rmode, 8002 * so FCVTA[US] is a special case. 8003 */ 8004 rmode = FPROUNDING_TIEAWAY; 8005 } 8006 8007 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); 8008 8009 switch (type) { 8010 case 1: /* float64 */ 8011 tcg_double = read_fp_dreg(s, rn); 8012 if (is_signed) { 8013 if (!sf) { 8014 gen_helper_vfp_tosld(tcg_int, tcg_double, 8015 tcg_shift, tcg_fpstatus); 8016 } else { 8017 gen_helper_vfp_tosqd(tcg_int, tcg_double, 8018 tcg_shift, tcg_fpstatus); 8019 } 8020 } else { 8021 if (!sf) { 8022 gen_helper_vfp_tould(tcg_int, tcg_double, 8023 tcg_shift, tcg_fpstatus); 8024 } else { 8025 gen_helper_vfp_touqd(tcg_int, tcg_double, 8026 tcg_shift, tcg_fpstatus); 8027 } 8028 } 8029 if (!sf) { 8030 tcg_gen_ext32u_i64(tcg_int, tcg_int); 8031 } 8032 break; 8033 8034 case 0: /* float32 */ 8035 tcg_single = read_fp_sreg(s, rn); 8036 if (sf) { 8037 if (is_signed) { 8038 gen_helper_vfp_tosqs(tcg_int, tcg_single, 8039 tcg_shift, tcg_fpstatus); 8040 } else { 8041 gen_helper_vfp_touqs(tcg_int, tcg_single, 8042 tcg_shift, tcg_fpstatus); 8043 } 8044 } else { 8045 TCGv_i32 tcg_dest = tcg_temp_new_i32(); 8046 if (is_signed) { 8047 gen_helper_vfp_tosls(tcg_dest, tcg_single, 8048 tcg_shift, tcg_fpstatus); 8049 } else { 8050 gen_helper_vfp_touls(tcg_dest, tcg_single, 8051 tcg_shift, tcg_fpstatus); 8052 } 8053 tcg_gen_extu_i32_i64(tcg_int, tcg_dest); 8054 } 8055 break; 8056 8057 case 3: /* float16 */ 8058 tcg_single = read_fp_sreg(s, rn); 8059 if (sf) { 8060 if (is_signed) { 8061 gen_helper_vfp_tosqh(tcg_int, tcg_single, 8062 tcg_shift, tcg_fpstatus); 8063 } else { 8064 gen_helper_vfp_touqh(tcg_int, tcg_single, 8065 tcg_shift, tcg_fpstatus); 8066 } 8067 } else { 8068 TCGv_i32 tcg_dest = tcg_temp_new_i32(); 8069 if (is_signed) { 8070 gen_helper_vfp_toslh(tcg_dest, tcg_single, 8071 tcg_shift, tcg_fpstatus); 8072 } else { 8073 gen_helper_vfp_toulh(tcg_dest, tcg_single, 8074 tcg_shift, tcg_fpstatus); 8075 } 8076 tcg_gen_extu_i32_i64(tcg_int, tcg_dest); 8077 } 8078 break; 8079 8080 default: 8081 g_assert_not_reached(); 8082 } 8083 8084 gen_restore_rmode(tcg_rmode, tcg_fpstatus); 8085 } 8086 } 8087 8088 /* Floating point <-> fixed point conversions 8089 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 8090 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ 8091 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd | 8092 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ 8093 */ 8094 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn) 8095 { 8096 int rd = extract32(insn, 0, 5); 8097 int rn = extract32(insn, 5, 5); 8098 int scale = extract32(insn, 10, 6); 8099 int opcode = extract32(insn, 16, 3); 8100 int rmode = extract32(insn, 19, 2); 8101 int type = extract32(insn, 22, 2); 8102 bool sbit = extract32(insn, 29, 1); 8103 bool sf = extract32(insn, 31, 1); 8104 bool itof; 8105 8106 if (sbit || (!sf && scale < 32)) { 8107 unallocated_encoding(s); 8108 return; 8109 } 8110 8111 switch (type) { 8112 case 0: /* float32 */ 8113 case 1: /* float64 */ 8114 break; 8115 case 3: /* float16 */ 8116 if (dc_isar_feature(aa64_fp16, s)) { 8117 break; 8118 } 8119 /* fallthru */ 8120 default: 8121 unallocated_encoding(s); 8122 return; 8123 } 8124 8125 switch ((rmode << 3) | opcode) { 8126 case 0x2: /* SCVTF */ 8127 case 0x3: /* UCVTF */ 8128 itof = true; 8129 break; 8130 case 0x18: /* FCVTZS */ 8131 case 0x19: /* FCVTZU */ 8132 itof = false; 8133 break; 8134 default: 8135 unallocated_encoding(s); 8136 return; 8137 } 8138 8139 if (!fp_access_check(s)) { 8140 return; 8141 } 8142 8143 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type); 8144 } 8145 8146 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) 8147 { 8148 /* FMOV: gpr to or from float, double, or top half of quad fp reg, 8149 * without conversion. 8150 */ 8151 8152 if (itof) { 8153 TCGv_i64 tcg_rn = cpu_reg(s, rn); 8154 TCGv_i64 tmp; 8155 8156 switch (type) { 8157 case 0: 8158 /* 32 bit */ 8159 tmp = tcg_temp_new_i64(); 8160 tcg_gen_ext32u_i64(tmp, tcg_rn); 8161 write_fp_dreg(s, rd, tmp); 8162 break; 8163 case 1: 8164 /* 64 bit */ 8165 write_fp_dreg(s, rd, tcg_rn); 8166 break; 8167 case 2: 8168 /* 64 bit to top half. */ 8169 tcg_gen_st_i64(tcg_rn, tcg_env, fp_reg_hi_offset(s, rd)); 8170 clear_vec_high(s, true, rd); 8171 break; 8172 case 3: 8173 /* 16 bit */ 8174 tmp = tcg_temp_new_i64(); 8175 tcg_gen_ext16u_i64(tmp, tcg_rn); 8176 write_fp_dreg(s, rd, tmp); 8177 break; 8178 default: 8179 g_assert_not_reached(); 8180 } 8181 } else { 8182 TCGv_i64 tcg_rd = cpu_reg(s, rd); 8183 8184 switch (type) { 8185 case 0: 8186 /* 32 bit */ 8187 tcg_gen_ld32u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_32)); 8188 break; 8189 case 1: 8190 /* 64 bit */ 8191 tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_64)); 8192 break; 8193 case 2: 8194 /* 64 bits from top half */ 8195 tcg_gen_ld_i64(tcg_rd, tcg_env, fp_reg_hi_offset(s, rn)); 8196 break; 8197 case 3: 8198 /* 16 bit */ 8199 tcg_gen_ld16u_i64(tcg_rd, tcg_env, fp_reg_offset(s, rn, MO_16)); 8200 break; 8201 default: 8202 g_assert_not_reached(); 8203 } 8204 } 8205 } 8206 8207 static void handle_fjcvtzs(DisasContext *s, int rd, int rn) 8208 { 8209 TCGv_i64 t = read_fp_dreg(s, rn); 8210 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR); 8211 8212 gen_helper_fjcvtzs(t, t, fpstatus); 8213 8214 tcg_gen_ext32u_i64(cpu_reg(s, rd), t); 8215 tcg_gen_extrh_i64_i32(cpu_ZF, t); 8216 tcg_gen_movi_i32(cpu_CF, 0); 8217 tcg_gen_movi_i32(cpu_NF, 0); 8218 tcg_gen_movi_i32(cpu_VF, 0); 8219 } 8220 8221 /* Floating point <-> integer conversions 8222 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 8223 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ 8224 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd | 8225 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ 8226 */ 8227 static void disas_fp_int_conv(DisasContext *s, uint32_t insn) 8228 { 8229 int rd = extract32(insn, 0, 5); 8230 int rn = extract32(insn, 5, 5); 8231 int opcode = extract32(insn, 16, 3); 8232 int rmode = extract32(insn, 19, 2); 8233 int type = extract32(insn, 22, 2); 8234 bool sbit = extract32(insn, 29, 1); 8235 bool sf = extract32(insn, 31, 1); 8236 bool itof = false; 8237 8238 if (sbit) { 8239 goto do_unallocated; 8240 } 8241 8242 switch (opcode) { 8243 case 2: /* SCVTF */ 8244 case 3: /* UCVTF */ 8245 itof = true; 8246 /* fallthru */ 8247 case 4: /* FCVTAS */ 8248 case 5: /* FCVTAU */ 8249 if (rmode != 0) { 8250 goto do_unallocated; 8251 } 8252 /* fallthru */ 8253 case 0: /* FCVT[NPMZ]S */ 8254 case 1: /* FCVT[NPMZ]U */ 8255 switch (type) { 8256 case 0: /* float32 */ 8257 case 1: /* float64 */ 8258 break; 8259 case 3: /* float16 */ 8260 if (!dc_isar_feature(aa64_fp16, s)) { 8261 goto do_unallocated; 8262 } 8263 break; 8264 default: 8265 goto do_unallocated; 8266 } 8267 if (!fp_access_check(s)) { 8268 return; 8269 } 8270 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type); 8271 break; 8272 8273 default: 8274 switch (sf << 7 | type << 5 | rmode << 3 | opcode) { 8275 case 0b01100110: /* FMOV half <-> 32-bit int */ 8276 case 0b01100111: 8277 case 0b11100110: /* FMOV half <-> 64-bit int */ 8278 case 0b11100111: 8279 if (!dc_isar_feature(aa64_fp16, s)) { 8280 goto do_unallocated; 8281 } 8282 /* fallthru */ 8283 case 0b00000110: /* FMOV 32-bit */ 8284 case 0b00000111: 8285 case 0b10100110: /* FMOV 64-bit */ 8286 case 0b10100111: 8287 case 0b11001110: /* FMOV top half of 128-bit */ 8288 case 0b11001111: 8289 if (!fp_access_check(s)) { 8290 return; 8291 } 8292 itof = opcode & 1; 8293 handle_fmov(s, rd, rn, type, itof); 8294 break; 8295 8296 case 0b00111110: /* FJCVTZS */ 8297 if (!dc_isar_feature(aa64_jscvt, s)) { 8298 goto do_unallocated; 8299 } else if (fp_access_check(s)) { 8300 handle_fjcvtzs(s, rd, rn); 8301 } 8302 break; 8303 8304 default: 8305 do_unallocated: 8306 unallocated_encoding(s); 8307 return; 8308 } 8309 break; 8310 } 8311 } 8312 8313 /* FP-specific subcases of table C3-6 (SIMD and FP data processing) 8314 * 31 30 29 28 25 24 0 8315 * +---+---+---+---------+-----------------------------+ 8316 * | | 0 | | 1 1 1 1 | | 8317 * +---+---+---+---------+-----------------------------+ 8318 */ 8319 static void disas_data_proc_fp(DisasContext *s, uint32_t insn) 8320 { 8321 if (extract32(insn, 24, 1)) { 8322 unallocated_encoding(s); /* in decodetree */ 8323 } else if (extract32(insn, 21, 1) == 0) { 8324 /* Floating point to fixed point conversions */ 8325 disas_fp_fixed_conv(s, insn); 8326 } else { 8327 switch (extract32(insn, 10, 2)) { 8328 case 1: 8329 /* Floating point conditional compare */ 8330 disas_fp_ccomp(s, insn); 8331 break; 8332 case 2: 8333 /* Floating point data-processing (2 source) */ 8334 unallocated_encoding(s); /* in decodetree */ 8335 break; 8336 case 3: 8337 /* Floating point conditional select */ 8338 unallocated_encoding(s); /* in decodetree */ 8339 break; 8340 case 0: 8341 switch (ctz32(extract32(insn, 12, 4))) { 8342 case 0: /* [15:12] == xxx1 */ 8343 /* Floating point immediate */ 8344 disas_fp_imm(s, insn); 8345 break; 8346 case 1: /* [15:12] == xx10 */ 8347 /* Floating point compare */ 8348 disas_fp_compare(s, insn); 8349 break; 8350 case 2: /* [15:12] == x100 */ 8351 /* Floating point data-processing (1 source) */ 8352 disas_fp_1src(s, insn); 8353 break; 8354 case 3: /* [15:12] == 1000 */ 8355 unallocated_encoding(s); 8356 break; 8357 default: /* [15:12] == 0000 */ 8358 /* Floating point <-> integer conversions */ 8359 disas_fp_int_conv(s, insn); 8360 break; 8361 } 8362 break; 8363 } 8364 } 8365 } 8366 8367 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right, 8368 int pos) 8369 { 8370 /* Extract 64 bits from the middle of two concatenated 64 bit 8371 * vector register slices left:right. The extracted bits start 8372 * at 'pos' bits into the right (least significant) side. 8373 * We return the result in tcg_right, and guarantee not to 8374 * trash tcg_left. 8375 */ 8376 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 8377 assert(pos > 0 && pos < 64); 8378 8379 tcg_gen_shri_i64(tcg_right, tcg_right, pos); 8380 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos); 8381 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp); 8382 } 8383 8384 /* EXT 8385 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0 8386 * +---+---+-------------+-----+---+------+---+------+---+------+------+ 8387 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd | 8388 * +---+---+-------------+-----+---+------+---+------+---+------+------+ 8389 */ 8390 static void disas_simd_ext(DisasContext *s, uint32_t insn) 8391 { 8392 int is_q = extract32(insn, 30, 1); 8393 int op2 = extract32(insn, 22, 2); 8394 int imm4 = extract32(insn, 11, 4); 8395 int rm = extract32(insn, 16, 5); 8396 int rn = extract32(insn, 5, 5); 8397 int rd = extract32(insn, 0, 5); 8398 int pos = imm4 << 3; 8399 TCGv_i64 tcg_resl, tcg_resh; 8400 8401 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) { 8402 unallocated_encoding(s); 8403 return; 8404 } 8405 8406 if (!fp_access_check(s)) { 8407 return; 8408 } 8409 8410 tcg_resh = tcg_temp_new_i64(); 8411 tcg_resl = tcg_temp_new_i64(); 8412 8413 /* Vd gets bits starting at pos bits into Vm:Vn. This is 8414 * either extracting 128 bits from a 128:128 concatenation, or 8415 * extracting 64 bits from a 64:64 concatenation. 8416 */ 8417 if (!is_q) { 8418 read_vec_element(s, tcg_resl, rn, 0, MO_64); 8419 if (pos != 0) { 8420 read_vec_element(s, tcg_resh, rm, 0, MO_64); 8421 do_ext64(s, tcg_resh, tcg_resl, pos); 8422 } 8423 } else { 8424 TCGv_i64 tcg_hh; 8425 typedef struct { 8426 int reg; 8427 int elt; 8428 } EltPosns; 8429 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} }; 8430 EltPosns *elt = eltposns; 8431 8432 if (pos >= 64) { 8433 elt++; 8434 pos -= 64; 8435 } 8436 8437 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64); 8438 elt++; 8439 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64); 8440 elt++; 8441 if (pos != 0) { 8442 do_ext64(s, tcg_resh, tcg_resl, pos); 8443 tcg_hh = tcg_temp_new_i64(); 8444 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64); 8445 do_ext64(s, tcg_hh, tcg_resh, pos); 8446 } 8447 } 8448 8449 write_vec_element(s, tcg_resl, rd, 0, MO_64); 8450 if (is_q) { 8451 write_vec_element(s, tcg_resh, rd, 1, MO_64); 8452 } 8453 clear_vec_high(s, is_q, rd); 8454 } 8455 8456 /* TBL/TBX 8457 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0 8458 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ 8459 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd | 8460 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ 8461 */ 8462 static void disas_simd_tb(DisasContext *s, uint32_t insn) 8463 { 8464 int op2 = extract32(insn, 22, 2); 8465 int is_q = extract32(insn, 30, 1); 8466 int rm = extract32(insn, 16, 5); 8467 int rn = extract32(insn, 5, 5); 8468 int rd = extract32(insn, 0, 5); 8469 int is_tbx = extract32(insn, 12, 1); 8470 int len = (extract32(insn, 13, 2) + 1) * 16; 8471 8472 if (op2 != 0) { 8473 unallocated_encoding(s); 8474 return; 8475 } 8476 8477 if (!fp_access_check(s)) { 8478 return; 8479 } 8480 8481 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), 8482 vec_full_reg_offset(s, rm), tcg_env, 8483 is_q ? 16 : 8, vec_full_reg_size(s), 8484 (len << 6) | (is_tbx << 5) | rn, 8485 gen_helper_simd_tblx); 8486 } 8487 8488 /* ZIP/UZP/TRN 8489 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 8490 * +---+---+-------------+------+---+------+---+------------------+------+ 8491 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd | 8492 * +---+---+-------------+------+---+------+---+------------------+------+ 8493 */ 8494 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) 8495 { 8496 int rd = extract32(insn, 0, 5); 8497 int rn = extract32(insn, 5, 5); 8498 int rm = extract32(insn, 16, 5); 8499 int size = extract32(insn, 22, 2); 8500 /* opc field bits [1:0] indicate ZIP/UZP/TRN; 8501 * bit 2 indicates 1 vs 2 variant of the insn. 8502 */ 8503 int opcode = extract32(insn, 12, 2); 8504 bool part = extract32(insn, 14, 1); 8505 bool is_q = extract32(insn, 30, 1); 8506 int esize = 8 << size; 8507 int i; 8508 int datasize = is_q ? 128 : 64; 8509 int elements = datasize / esize; 8510 TCGv_i64 tcg_res[2], tcg_ele; 8511 8512 if (opcode == 0 || (size == 3 && !is_q)) { 8513 unallocated_encoding(s); 8514 return; 8515 } 8516 8517 if (!fp_access_check(s)) { 8518 return; 8519 } 8520 8521 tcg_res[0] = tcg_temp_new_i64(); 8522 tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL; 8523 tcg_ele = tcg_temp_new_i64(); 8524 8525 for (i = 0; i < elements; i++) { 8526 int o, w; 8527 8528 switch (opcode) { 8529 case 1: /* UZP1/2 */ 8530 { 8531 int midpoint = elements / 2; 8532 if (i < midpoint) { 8533 read_vec_element(s, tcg_ele, rn, 2 * i + part, size); 8534 } else { 8535 read_vec_element(s, tcg_ele, rm, 8536 2 * (i - midpoint) + part, size); 8537 } 8538 break; 8539 } 8540 case 2: /* TRN1/2 */ 8541 if (i & 1) { 8542 read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size); 8543 } else { 8544 read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size); 8545 } 8546 break; 8547 case 3: /* ZIP1/2 */ 8548 { 8549 int base = part * elements / 2; 8550 if (i & 1) { 8551 read_vec_element(s, tcg_ele, rm, base + (i >> 1), size); 8552 } else { 8553 read_vec_element(s, tcg_ele, rn, base + (i >> 1), size); 8554 } 8555 break; 8556 } 8557 default: 8558 g_assert_not_reached(); 8559 } 8560 8561 w = (i * esize) / 64; 8562 o = (i * esize) % 64; 8563 if (o == 0) { 8564 tcg_gen_mov_i64(tcg_res[w], tcg_ele); 8565 } else { 8566 tcg_gen_shli_i64(tcg_ele, tcg_ele, o); 8567 tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele); 8568 } 8569 } 8570 8571 for (i = 0; i <= is_q; ++i) { 8572 write_vec_element(s, tcg_res[i], rd, i, MO_64); 8573 } 8574 clear_vec_high(s, is_q, rd); 8575 } 8576 8577 /* 8578 * do_reduction_op helper 8579 * 8580 * This mirrors the Reduce() pseudocode in the ARM ARM. It is 8581 * important for correct NaN propagation that we do these 8582 * operations in exactly the order specified by the pseudocode. 8583 * 8584 * This is a recursive function, TCG temps should be freed by the 8585 * calling function once it is done with the values. 8586 */ 8587 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, 8588 int esize, int size, int vmap, TCGv_ptr fpst) 8589 { 8590 if (esize == size) { 8591 int element; 8592 MemOp msize = esize == 16 ? MO_16 : MO_32; 8593 TCGv_i32 tcg_elem; 8594 8595 /* We should have one register left here */ 8596 assert(ctpop8(vmap) == 1); 8597 element = ctz32(vmap); 8598 assert(element < 8); 8599 8600 tcg_elem = tcg_temp_new_i32(); 8601 read_vec_element_i32(s, tcg_elem, rn, element, msize); 8602 return tcg_elem; 8603 } else { 8604 int bits = size / 2; 8605 int shift = ctpop8(vmap) / 2; 8606 int vmap_lo = (vmap >> shift) & vmap; 8607 int vmap_hi = (vmap & ~vmap_lo); 8608 TCGv_i32 tcg_hi, tcg_lo, tcg_res; 8609 8610 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst); 8611 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst); 8612 tcg_res = tcg_temp_new_i32(); 8613 8614 switch (fpopcode) { 8615 case 0x0c: /* fmaxnmv half-precision */ 8616 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst); 8617 break; 8618 case 0x0f: /* fmaxv half-precision */ 8619 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst); 8620 break; 8621 case 0x1c: /* fminnmv half-precision */ 8622 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst); 8623 break; 8624 case 0x1f: /* fminv half-precision */ 8625 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst); 8626 break; 8627 case 0x2c: /* fmaxnmv */ 8628 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst); 8629 break; 8630 case 0x2f: /* fmaxv */ 8631 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst); 8632 break; 8633 case 0x3c: /* fminnmv */ 8634 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst); 8635 break; 8636 case 0x3f: /* fminv */ 8637 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst); 8638 break; 8639 default: 8640 g_assert_not_reached(); 8641 } 8642 return tcg_res; 8643 } 8644 } 8645 8646 /* AdvSIMD across lanes 8647 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 8648 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ 8649 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | 8650 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ 8651 */ 8652 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) 8653 { 8654 int rd = extract32(insn, 0, 5); 8655 int rn = extract32(insn, 5, 5); 8656 int size = extract32(insn, 22, 2); 8657 int opcode = extract32(insn, 12, 5); 8658 bool is_q = extract32(insn, 30, 1); 8659 bool is_u = extract32(insn, 29, 1); 8660 bool is_fp = false; 8661 bool is_min = false; 8662 int esize; 8663 int elements; 8664 int i; 8665 TCGv_i64 tcg_res, tcg_elt; 8666 8667 switch (opcode) { 8668 case 0x1b: /* ADDV */ 8669 if (is_u) { 8670 unallocated_encoding(s); 8671 return; 8672 } 8673 /* fall through */ 8674 case 0x3: /* SADDLV, UADDLV */ 8675 case 0xa: /* SMAXV, UMAXV */ 8676 case 0x1a: /* SMINV, UMINV */ 8677 if (size == 3 || (size == 2 && !is_q)) { 8678 unallocated_encoding(s); 8679 return; 8680 } 8681 break; 8682 case 0xc: /* FMAXNMV, FMINNMV */ 8683 case 0xf: /* FMAXV, FMINV */ 8684 /* Bit 1 of size field encodes min vs max and the actual size 8685 * depends on the encoding of the U bit. If not set (and FP16 8686 * enabled) then we do half-precision float instead of single 8687 * precision. 8688 */ 8689 is_min = extract32(size, 1, 1); 8690 is_fp = true; 8691 if (!is_u && dc_isar_feature(aa64_fp16, s)) { 8692 size = 1; 8693 } else if (!is_u || !is_q || extract32(size, 0, 1)) { 8694 unallocated_encoding(s); 8695 return; 8696 } else { 8697 size = 2; 8698 } 8699 break; 8700 default: 8701 unallocated_encoding(s); 8702 return; 8703 } 8704 8705 if (!fp_access_check(s)) { 8706 return; 8707 } 8708 8709 esize = 8 << size; 8710 elements = (is_q ? 128 : 64) / esize; 8711 8712 tcg_res = tcg_temp_new_i64(); 8713 tcg_elt = tcg_temp_new_i64(); 8714 8715 /* These instructions operate across all lanes of a vector 8716 * to produce a single result. We can guarantee that a 64 8717 * bit intermediate is sufficient: 8718 * + for [US]ADDLV the maximum element size is 32 bits, and 8719 * the result type is 64 bits 8720 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the 8721 * same as the element size, which is 32 bits at most 8722 * For the integer operations we can choose to work at 64 8723 * or 32 bits and truncate at the end; for simplicity 8724 * we use 64 bits always. The floating point 8725 * ops do require 32 bit intermediates, though. 8726 */ 8727 if (!is_fp) { 8728 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN)); 8729 8730 for (i = 1; i < elements; i++) { 8731 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN)); 8732 8733 switch (opcode) { 8734 case 0x03: /* SADDLV / UADDLV */ 8735 case 0x1b: /* ADDV */ 8736 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt); 8737 break; 8738 case 0x0a: /* SMAXV / UMAXV */ 8739 if (is_u) { 8740 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt); 8741 } else { 8742 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt); 8743 } 8744 break; 8745 case 0x1a: /* SMINV / UMINV */ 8746 if (is_u) { 8747 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt); 8748 } else { 8749 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt); 8750 } 8751 break; 8752 default: 8753 g_assert_not_reached(); 8754 } 8755 8756 } 8757 } else { 8758 /* Floating point vector reduction ops which work across 32 8759 * bit (single) or 16 bit (half-precision) intermediates. 8760 * Note that correct NaN propagation requires that we do these 8761 * operations in exactly the order specified by the pseudocode. 8762 */ 8763 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 8764 int fpopcode = opcode | is_min << 4 | is_u << 5; 8765 int vmap = (1 << elements) - 1; 8766 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize, 8767 (is_q ? 128 : 64), vmap, fpst); 8768 tcg_gen_extu_i32_i64(tcg_res, tcg_res32); 8769 } 8770 8771 /* Now truncate the result to the width required for the final output */ 8772 if (opcode == 0x03) { 8773 /* SADDLV, UADDLV: result is 2*esize */ 8774 size++; 8775 } 8776 8777 switch (size) { 8778 case 0: 8779 tcg_gen_ext8u_i64(tcg_res, tcg_res); 8780 break; 8781 case 1: 8782 tcg_gen_ext16u_i64(tcg_res, tcg_res); 8783 break; 8784 case 2: 8785 tcg_gen_ext32u_i64(tcg_res, tcg_res); 8786 break; 8787 case 3: 8788 break; 8789 default: 8790 g_assert_not_reached(); 8791 } 8792 8793 write_fp_dreg(s, rd, tcg_res); 8794 } 8795 8796 /* AdvSIMD modified immediate 8797 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0 8798 * +---+---+----+---------------------+-----+-------+----+---+-------+------+ 8799 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd | 8800 * +---+---+----+---------------------+-----+-------+----+---+-------+------+ 8801 * 8802 * There are a number of operations that can be carried out here: 8803 * MOVI - move (shifted) imm into register 8804 * MVNI - move inverted (shifted) imm into register 8805 * ORR - bitwise OR of (shifted) imm with register 8806 * BIC - bitwise clear of (shifted) imm with register 8807 * With ARMv8.2 we also have: 8808 * FMOV half-precision 8809 */ 8810 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) 8811 { 8812 int rd = extract32(insn, 0, 5); 8813 int cmode = extract32(insn, 12, 4); 8814 int o2 = extract32(insn, 11, 1); 8815 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5); 8816 bool is_neg = extract32(insn, 29, 1); 8817 bool is_q = extract32(insn, 30, 1); 8818 uint64_t imm = 0; 8819 8820 if (o2) { 8821 if (cmode != 0xf || is_neg) { 8822 unallocated_encoding(s); 8823 return; 8824 } 8825 /* FMOV (vector, immediate) - half-precision */ 8826 if (!dc_isar_feature(aa64_fp16, s)) { 8827 unallocated_encoding(s); 8828 return; 8829 } 8830 imm = vfp_expand_imm(MO_16, abcdefgh); 8831 /* now duplicate across the lanes */ 8832 imm = dup_const(MO_16, imm); 8833 } else { 8834 if (cmode == 0xf && is_neg && !is_q) { 8835 unallocated_encoding(s); 8836 return; 8837 } 8838 imm = asimd_imm_const(abcdefgh, cmode, is_neg); 8839 } 8840 8841 if (!fp_access_check(s)) { 8842 return; 8843 } 8844 8845 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) { 8846 /* MOVI or MVNI, with MVNI negation handled above. */ 8847 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8, 8848 vec_full_reg_size(s), imm); 8849 } else { 8850 /* ORR or BIC, with BIC negation to AND handled above. */ 8851 if (is_neg) { 8852 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64); 8853 } else { 8854 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64); 8855 } 8856 } 8857 } 8858 8859 /* 8860 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate) 8861 * 8862 * This code is handles the common shifting code and is used by both 8863 * the vector and scalar code. 8864 */ 8865 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, 8866 TCGv_i64 tcg_rnd, bool accumulate, 8867 bool is_u, int size, int shift) 8868 { 8869 bool extended_result = false; 8870 bool round = tcg_rnd != NULL; 8871 int ext_lshift = 0; 8872 TCGv_i64 tcg_src_hi; 8873 8874 if (round && size == 3) { 8875 extended_result = true; 8876 ext_lshift = 64 - shift; 8877 tcg_src_hi = tcg_temp_new_i64(); 8878 } else if (shift == 64) { 8879 if (!accumulate && is_u) { 8880 /* result is zero */ 8881 tcg_gen_movi_i64(tcg_res, 0); 8882 return; 8883 } 8884 } 8885 8886 /* Deal with the rounding step */ 8887 if (round) { 8888 if (extended_result) { 8889 TCGv_i64 tcg_zero = tcg_constant_i64(0); 8890 if (!is_u) { 8891 /* take care of sign extending tcg_res */ 8892 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63); 8893 tcg_gen_add2_i64(tcg_src, tcg_src_hi, 8894 tcg_src, tcg_src_hi, 8895 tcg_rnd, tcg_zero); 8896 } else { 8897 tcg_gen_add2_i64(tcg_src, tcg_src_hi, 8898 tcg_src, tcg_zero, 8899 tcg_rnd, tcg_zero); 8900 } 8901 } else { 8902 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd); 8903 } 8904 } 8905 8906 /* Now do the shift right */ 8907 if (round && extended_result) { 8908 /* extended case, >64 bit precision required */ 8909 if (ext_lshift == 0) { 8910 /* special case, only high bits matter */ 8911 tcg_gen_mov_i64(tcg_src, tcg_src_hi); 8912 } else { 8913 tcg_gen_shri_i64(tcg_src, tcg_src, shift); 8914 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift); 8915 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi); 8916 } 8917 } else { 8918 if (is_u) { 8919 if (shift == 64) { 8920 /* essentially shifting in 64 zeros */ 8921 tcg_gen_movi_i64(tcg_src, 0); 8922 } else { 8923 tcg_gen_shri_i64(tcg_src, tcg_src, shift); 8924 } 8925 } else { 8926 if (shift == 64) { 8927 /* effectively extending the sign-bit */ 8928 tcg_gen_sari_i64(tcg_src, tcg_src, 63); 8929 } else { 8930 tcg_gen_sari_i64(tcg_src, tcg_src, shift); 8931 } 8932 } 8933 } 8934 8935 if (accumulate) { 8936 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src); 8937 } else { 8938 tcg_gen_mov_i64(tcg_res, tcg_src); 8939 } 8940 } 8941 8942 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */ 8943 static void handle_scalar_simd_shri(DisasContext *s, 8944 bool is_u, int immh, int immb, 8945 int opcode, int rn, int rd) 8946 { 8947 const int size = 3; 8948 int immhb = immh << 3 | immb; 8949 int shift = 2 * (8 << size) - immhb; 8950 bool accumulate = false; 8951 bool round = false; 8952 bool insert = false; 8953 TCGv_i64 tcg_rn; 8954 TCGv_i64 tcg_rd; 8955 TCGv_i64 tcg_round; 8956 8957 if (!extract32(immh, 3, 1)) { 8958 unallocated_encoding(s); 8959 return; 8960 } 8961 8962 if (!fp_access_check(s)) { 8963 return; 8964 } 8965 8966 switch (opcode) { 8967 case 0x02: /* SSRA / USRA (accumulate) */ 8968 accumulate = true; 8969 break; 8970 case 0x04: /* SRSHR / URSHR (rounding) */ 8971 round = true; 8972 break; 8973 case 0x06: /* SRSRA / URSRA (accum + rounding) */ 8974 accumulate = round = true; 8975 break; 8976 case 0x08: /* SRI */ 8977 insert = true; 8978 break; 8979 } 8980 8981 if (round) { 8982 tcg_round = tcg_constant_i64(1ULL << (shift - 1)); 8983 } else { 8984 tcg_round = NULL; 8985 } 8986 8987 tcg_rn = read_fp_dreg(s, rn); 8988 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64(); 8989 8990 if (insert) { 8991 /* shift count same as element size is valid but does nothing; 8992 * special case to avoid potential shift by 64. 8993 */ 8994 int esize = 8 << size; 8995 if (shift != esize) { 8996 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift); 8997 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift); 8998 } 8999 } else { 9000 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, 9001 accumulate, is_u, size, shift); 9002 } 9003 9004 write_fp_dreg(s, rd, tcg_rd); 9005 } 9006 9007 /* SHL/SLI - Scalar shift left */ 9008 static void handle_scalar_simd_shli(DisasContext *s, bool insert, 9009 int immh, int immb, int opcode, 9010 int rn, int rd) 9011 { 9012 int size = 32 - clz32(immh) - 1; 9013 int immhb = immh << 3 | immb; 9014 int shift = immhb - (8 << size); 9015 TCGv_i64 tcg_rn; 9016 TCGv_i64 tcg_rd; 9017 9018 if (!extract32(immh, 3, 1)) { 9019 unallocated_encoding(s); 9020 return; 9021 } 9022 9023 if (!fp_access_check(s)) { 9024 return; 9025 } 9026 9027 tcg_rn = read_fp_dreg(s, rn); 9028 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64(); 9029 9030 if (insert) { 9031 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift); 9032 } else { 9033 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift); 9034 } 9035 9036 write_fp_dreg(s, rd, tcg_rd); 9037 } 9038 9039 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with 9040 * (signed/unsigned) narrowing */ 9041 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, 9042 bool is_u_shift, bool is_u_narrow, 9043 int immh, int immb, int opcode, 9044 int rn, int rd) 9045 { 9046 int immhb = immh << 3 | immb; 9047 int size = 32 - clz32(immh) - 1; 9048 int esize = 8 << size; 9049 int shift = (2 * esize) - immhb; 9050 int elements = is_scalar ? 1 : (64 / esize); 9051 bool round = extract32(opcode, 0, 1); 9052 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); 9053 TCGv_i64 tcg_rn, tcg_rd, tcg_round; 9054 TCGv_i32 tcg_rd_narrowed; 9055 TCGv_i64 tcg_final; 9056 9057 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = { 9058 { gen_helper_neon_narrow_sat_s8, 9059 gen_helper_neon_unarrow_sat8 }, 9060 { gen_helper_neon_narrow_sat_s16, 9061 gen_helper_neon_unarrow_sat16 }, 9062 { gen_helper_neon_narrow_sat_s32, 9063 gen_helper_neon_unarrow_sat32 }, 9064 { NULL, NULL }, 9065 }; 9066 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = { 9067 gen_helper_neon_narrow_sat_u8, 9068 gen_helper_neon_narrow_sat_u16, 9069 gen_helper_neon_narrow_sat_u32, 9070 NULL 9071 }; 9072 NeonGenNarrowEnvFn *narrowfn; 9073 9074 int i; 9075 9076 assert(size < 4); 9077 9078 if (extract32(immh, 3, 1)) { 9079 unallocated_encoding(s); 9080 return; 9081 } 9082 9083 if (!fp_access_check(s)) { 9084 return; 9085 } 9086 9087 if (is_u_shift) { 9088 narrowfn = unsigned_narrow_fns[size]; 9089 } else { 9090 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0]; 9091 } 9092 9093 tcg_rn = tcg_temp_new_i64(); 9094 tcg_rd = tcg_temp_new_i64(); 9095 tcg_rd_narrowed = tcg_temp_new_i32(); 9096 tcg_final = tcg_temp_new_i64(); 9097 9098 if (round) { 9099 tcg_round = tcg_constant_i64(1ULL << (shift - 1)); 9100 } else { 9101 tcg_round = NULL; 9102 } 9103 9104 for (i = 0; i < elements; i++) { 9105 read_vec_element(s, tcg_rn, rn, i, ldop); 9106 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, 9107 false, is_u_shift, size+1, shift); 9108 narrowfn(tcg_rd_narrowed, tcg_env, tcg_rd); 9109 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed); 9110 if (i == 0) { 9111 tcg_gen_extract_i64(tcg_final, tcg_rd, 0, esize); 9112 } else { 9113 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); 9114 } 9115 } 9116 9117 if (!is_q) { 9118 write_vec_element(s, tcg_final, rd, 0, MO_64); 9119 } else { 9120 write_vec_element(s, tcg_final, rd, 1, MO_64); 9121 } 9122 clear_vec_high(s, is_q, rd); 9123 } 9124 9125 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */ 9126 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, 9127 bool src_unsigned, bool dst_unsigned, 9128 int immh, int immb, int rn, int rd) 9129 { 9130 int immhb = immh << 3 | immb; 9131 int size = 32 - clz32(immh) - 1; 9132 int shift = immhb - (8 << size); 9133 int pass; 9134 9135 assert(immh != 0); 9136 assert(!(scalar && is_q)); 9137 9138 if (!scalar) { 9139 if (!is_q && extract32(immh, 3, 1)) { 9140 unallocated_encoding(s); 9141 return; 9142 } 9143 9144 /* Since we use the variable-shift helpers we must 9145 * replicate the shift count into each element of 9146 * the tcg_shift value. 9147 */ 9148 switch (size) { 9149 case 0: 9150 shift |= shift << 8; 9151 /* fall through */ 9152 case 1: 9153 shift |= shift << 16; 9154 break; 9155 case 2: 9156 case 3: 9157 break; 9158 default: 9159 g_assert_not_reached(); 9160 } 9161 } 9162 9163 if (!fp_access_check(s)) { 9164 return; 9165 } 9166 9167 if (size == 3) { 9168 TCGv_i64 tcg_shift = tcg_constant_i64(shift); 9169 static NeonGenTwo64OpEnvFn * const fns[2][2] = { 9170 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 }, 9171 { NULL, gen_helper_neon_qshl_u64 }, 9172 }; 9173 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned]; 9174 int maxpass = is_q ? 2 : 1; 9175 9176 for (pass = 0; pass < maxpass; pass++) { 9177 TCGv_i64 tcg_op = tcg_temp_new_i64(); 9178 9179 read_vec_element(s, tcg_op, rn, pass, MO_64); 9180 genfn(tcg_op, tcg_env, tcg_op, tcg_shift); 9181 write_vec_element(s, tcg_op, rd, pass, MO_64); 9182 } 9183 clear_vec_high(s, is_q, rd); 9184 } else { 9185 TCGv_i32 tcg_shift = tcg_constant_i32(shift); 9186 static NeonGenTwoOpEnvFn * const fns[2][2][3] = { 9187 { 9188 { gen_helper_neon_qshl_s8, 9189 gen_helper_neon_qshl_s16, 9190 gen_helper_neon_qshl_s32 }, 9191 { gen_helper_neon_qshlu_s8, 9192 gen_helper_neon_qshlu_s16, 9193 gen_helper_neon_qshlu_s32 } 9194 }, { 9195 { NULL, NULL, NULL }, 9196 { gen_helper_neon_qshl_u8, 9197 gen_helper_neon_qshl_u16, 9198 gen_helper_neon_qshl_u32 } 9199 } 9200 }; 9201 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; 9202 MemOp memop = scalar ? size : MO_32; 9203 int maxpass = scalar ? 1 : is_q ? 4 : 2; 9204 9205 for (pass = 0; pass < maxpass; pass++) { 9206 TCGv_i32 tcg_op = tcg_temp_new_i32(); 9207 9208 read_vec_element_i32(s, tcg_op, rn, pass, memop); 9209 genfn(tcg_op, tcg_env, tcg_op, tcg_shift); 9210 if (scalar) { 9211 switch (size) { 9212 case 0: 9213 tcg_gen_ext8u_i32(tcg_op, tcg_op); 9214 break; 9215 case 1: 9216 tcg_gen_ext16u_i32(tcg_op, tcg_op); 9217 break; 9218 case 2: 9219 break; 9220 default: 9221 g_assert_not_reached(); 9222 } 9223 write_fp_sreg(s, rd, tcg_op); 9224 } else { 9225 write_vec_element_i32(s, tcg_op, rd, pass, MO_32); 9226 } 9227 } 9228 9229 if (!scalar) { 9230 clear_vec_high(s, is_q, rd); 9231 } 9232 } 9233 } 9234 9235 /* Common vector code for handling integer to FP conversion */ 9236 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, 9237 int elements, int is_signed, 9238 int fracbits, int size) 9239 { 9240 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 9241 TCGv_i32 tcg_shift = NULL; 9242 9243 MemOp mop = size | (is_signed ? MO_SIGN : 0); 9244 int pass; 9245 9246 if (fracbits || size == MO_64) { 9247 tcg_shift = tcg_constant_i32(fracbits); 9248 } 9249 9250 if (size == MO_64) { 9251 TCGv_i64 tcg_int64 = tcg_temp_new_i64(); 9252 TCGv_i64 tcg_double = tcg_temp_new_i64(); 9253 9254 for (pass = 0; pass < elements; pass++) { 9255 read_vec_element(s, tcg_int64, rn, pass, mop); 9256 9257 if (is_signed) { 9258 gen_helper_vfp_sqtod(tcg_double, tcg_int64, 9259 tcg_shift, tcg_fpst); 9260 } else { 9261 gen_helper_vfp_uqtod(tcg_double, tcg_int64, 9262 tcg_shift, tcg_fpst); 9263 } 9264 if (elements == 1) { 9265 write_fp_dreg(s, rd, tcg_double); 9266 } else { 9267 write_vec_element(s, tcg_double, rd, pass, MO_64); 9268 } 9269 } 9270 } else { 9271 TCGv_i32 tcg_int32 = tcg_temp_new_i32(); 9272 TCGv_i32 tcg_float = tcg_temp_new_i32(); 9273 9274 for (pass = 0; pass < elements; pass++) { 9275 read_vec_element_i32(s, tcg_int32, rn, pass, mop); 9276 9277 switch (size) { 9278 case MO_32: 9279 if (fracbits) { 9280 if (is_signed) { 9281 gen_helper_vfp_sltos(tcg_float, tcg_int32, 9282 tcg_shift, tcg_fpst); 9283 } else { 9284 gen_helper_vfp_ultos(tcg_float, tcg_int32, 9285 tcg_shift, tcg_fpst); 9286 } 9287 } else { 9288 if (is_signed) { 9289 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst); 9290 } else { 9291 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst); 9292 } 9293 } 9294 break; 9295 case MO_16: 9296 if (fracbits) { 9297 if (is_signed) { 9298 gen_helper_vfp_sltoh(tcg_float, tcg_int32, 9299 tcg_shift, tcg_fpst); 9300 } else { 9301 gen_helper_vfp_ultoh(tcg_float, tcg_int32, 9302 tcg_shift, tcg_fpst); 9303 } 9304 } else { 9305 if (is_signed) { 9306 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst); 9307 } else { 9308 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst); 9309 } 9310 } 9311 break; 9312 default: 9313 g_assert_not_reached(); 9314 } 9315 9316 if (elements == 1) { 9317 write_fp_sreg(s, rd, tcg_float); 9318 } else { 9319 write_vec_element_i32(s, tcg_float, rd, pass, size); 9320 } 9321 } 9322 } 9323 9324 clear_vec_high(s, elements << size == 16, rd); 9325 } 9326 9327 /* UCVTF/SCVTF - Integer to FP conversion */ 9328 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, 9329 bool is_q, bool is_u, 9330 int immh, int immb, int opcode, 9331 int rn, int rd) 9332 { 9333 int size, elements, fracbits; 9334 int immhb = immh << 3 | immb; 9335 9336 if (immh & 8) { 9337 size = MO_64; 9338 if (!is_scalar && !is_q) { 9339 unallocated_encoding(s); 9340 return; 9341 } 9342 } else if (immh & 4) { 9343 size = MO_32; 9344 } else if (immh & 2) { 9345 size = MO_16; 9346 if (!dc_isar_feature(aa64_fp16, s)) { 9347 unallocated_encoding(s); 9348 return; 9349 } 9350 } else { 9351 /* immh == 0 would be a failure of the decode logic */ 9352 g_assert(immh == 1); 9353 unallocated_encoding(s); 9354 return; 9355 } 9356 9357 if (is_scalar) { 9358 elements = 1; 9359 } else { 9360 elements = (8 << is_q) >> size; 9361 } 9362 fracbits = (16 << size) - immhb; 9363 9364 if (!fp_access_check(s)) { 9365 return; 9366 } 9367 9368 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size); 9369 } 9370 9371 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */ 9372 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, 9373 bool is_q, bool is_u, 9374 int immh, int immb, int rn, int rd) 9375 { 9376 int immhb = immh << 3 | immb; 9377 int pass, size, fracbits; 9378 TCGv_ptr tcg_fpstatus; 9379 TCGv_i32 tcg_rmode, tcg_shift; 9380 9381 if (immh & 0x8) { 9382 size = MO_64; 9383 if (!is_scalar && !is_q) { 9384 unallocated_encoding(s); 9385 return; 9386 } 9387 } else if (immh & 0x4) { 9388 size = MO_32; 9389 } else if (immh & 0x2) { 9390 size = MO_16; 9391 if (!dc_isar_feature(aa64_fp16, s)) { 9392 unallocated_encoding(s); 9393 return; 9394 } 9395 } else { 9396 /* Should have split out AdvSIMD modified immediate earlier. */ 9397 assert(immh == 1); 9398 unallocated_encoding(s); 9399 return; 9400 } 9401 9402 if (!fp_access_check(s)) { 9403 return; 9404 } 9405 9406 assert(!(is_scalar && is_q)); 9407 9408 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 9409 tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus); 9410 fracbits = (16 << size) - immhb; 9411 tcg_shift = tcg_constant_i32(fracbits); 9412 9413 if (size == MO_64) { 9414 int maxpass = is_scalar ? 1 : 2; 9415 9416 for (pass = 0; pass < maxpass; pass++) { 9417 TCGv_i64 tcg_op = tcg_temp_new_i64(); 9418 9419 read_vec_element(s, tcg_op, rn, pass, MO_64); 9420 if (is_u) { 9421 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); 9422 } else { 9423 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); 9424 } 9425 write_vec_element(s, tcg_op, rd, pass, MO_64); 9426 } 9427 clear_vec_high(s, is_q, rd); 9428 } else { 9429 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); 9430 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size); 9431 9432 switch (size) { 9433 case MO_16: 9434 if (is_u) { 9435 fn = gen_helper_vfp_touhh; 9436 } else { 9437 fn = gen_helper_vfp_toshh; 9438 } 9439 break; 9440 case MO_32: 9441 if (is_u) { 9442 fn = gen_helper_vfp_touls; 9443 } else { 9444 fn = gen_helper_vfp_tosls; 9445 } 9446 break; 9447 default: 9448 g_assert_not_reached(); 9449 } 9450 9451 for (pass = 0; pass < maxpass; pass++) { 9452 TCGv_i32 tcg_op = tcg_temp_new_i32(); 9453 9454 read_vec_element_i32(s, tcg_op, rn, pass, size); 9455 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); 9456 if (is_scalar) { 9457 if (size == MO_16 && !is_u) { 9458 tcg_gen_ext16u_i32(tcg_op, tcg_op); 9459 } 9460 write_fp_sreg(s, rd, tcg_op); 9461 } else { 9462 write_vec_element_i32(s, tcg_op, rd, pass, size); 9463 } 9464 } 9465 if (!is_scalar) { 9466 clear_vec_high(s, is_q, rd); 9467 } 9468 } 9469 9470 gen_restore_rmode(tcg_rmode, tcg_fpstatus); 9471 } 9472 9473 /* AdvSIMD scalar shift by immediate 9474 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 9475 * +-----+---+-------------+------+------+--------+---+------+------+ 9476 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | 9477 * +-----+---+-------------+------+------+--------+---+------+------+ 9478 * 9479 * This is the scalar version so it works on a fixed sized registers 9480 */ 9481 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn) 9482 { 9483 int rd = extract32(insn, 0, 5); 9484 int rn = extract32(insn, 5, 5); 9485 int opcode = extract32(insn, 11, 5); 9486 int immb = extract32(insn, 16, 3); 9487 int immh = extract32(insn, 19, 4); 9488 bool is_u = extract32(insn, 29, 1); 9489 9490 if (immh == 0) { 9491 unallocated_encoding(s); 9492 return; 9493 } 9494 9495 switch (opcode) { 9496 case 0x08: /* SRI */ 9497 if (!is_u) { 9498 unallocated_encoding(s); 9499 return; 9500 } 9501 /* fall through */ 9502 case 0x00: /* SSHR / USHR */ 9503 case 0x02: /* SSRA / USRA */ 9504 case 0x04: /* SRSHR / URSHR */ 9505 case 0x06: /* SRSRA / URSRA */ 9506 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd); 9507 break; 9508 case 0x0a: /* SHL / SLI */ 9509 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd); 9510 break; 9511 case 0x1c: /* SCVTF, UCVTF */ 9512 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb, 9513 opcode, rn, rd); 9514 break; 9515 case 0x10: /* SQSHRUN, SQSHRUN2 */ 9516 case 0x11: /* SQRSHRUN, SQRSHRUN2 */ 9517 if (!is_u) { 9518 unallocated_encoding(s); 9519 return; 9520 } 9521 handle_vec_simd_sqshrn(s, true, false, false, true, 9522 immh, immb, opcode, rn, rd); 9523 break; 9524 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */ 9525 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */ 9526 handle_vec_simd_sqshrn(s, true, false, is_u, is_u, 9527 immh, immb, opcode, rn, rd); 9528 break; 9529 case 0xc: /* SQSHLU */ 9530 if (!is_u) { 9531 unallocated_encoding(s); 9532 return; 9533 } 9534 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd); 9535 break; 9536 case 0xe: /* SQSHL, UQSHL */ 9537 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd); 9538 break; 9539 case 0x1f: /* FCVTZS, FCVTZU */ 9540 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd); 9541 break; 9542 default: 9543 unallocated_encoding(s); 9544 break; 9545 } 9546 } 9547 9548 /* AdvSIMD scalar three different 9549 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 9550 * +-----+---+-----------+------+---+------+--------+-----+------+------+ 9551 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | 9552 * +-----+---+-----------+------+---+------+--------+-----+------+------+ 9553 */ 9554 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) 9555 { 9556 bool is_u = extract32(insn, 29, 1); 9557 int size = extract32(insn, 22, 2); 9558 int opcode = extract32(insn, 12, 4); 9559 int rm = extract32(insn, 16, 5); 9560 int rn = extract32(insn, 5, 5); 9561 int rd = extract32(insn, 0, 5); 9562 9563 if (is_u) { 9564 unallocated_encoding(s); 9565 return; 9566 } 9567 9568 switch (opcode) { 9569 case 0x9: /* SQDMLAL, SQDMLAL2 */ 9570 case 0xb: /* SQDMLSL, SQDMLSL2 */ 9571 case 0xd: /* SQDMULL, SQDMULL2 */ 9572 if (size == 0 || size == 3) { 9573 unallocated_encoding(s); 9574 return; 9575 } 9576 break; 9577 default: 9578 unallocated_encoding(s); 9579 return; 9580 } 9581 9582 if (!fp_access_check(s)) { 9583 return; 9584 } 9585 9586 if (size == 2) { 9587 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 9588 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 9589 TCGv_i64 tcg_res = tcg_temp_new_i64(); 9590 9591 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN); 9592 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN); 9593 9594 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2); 9595 gen_helper_neon_addl_saturate_s64(tcg_res, tcg_env, tcg_res, tcg_res); 9596 9597 switch (opcode) { 9598 case 0xd: /* SQDMULL, SQDMULL2 */ 9599 break; 9600 case 0xb: /* SQDMLSL, SQDMLSL2 */ 9601 tcg_gen_neg_i64(tcg_res, tcg_res); 9602 /* fall through */ 9603 case 0x9: /* SQDMLAL, SQDMLAL2 */ 9604 read_vec_element(s, tcg_op1, rd, 0, MO_64); 9605 gen_helper_neon_addl_saturate_s64(tcg_res, tcg_env, 9606 tcg_res, tcg_op1); 9607 break; 9608 default: 9609 g_assert_not_reached(); 9610 } 9611 9612 write_fp_dreg(s, rd, tcg_res); 9613 } else { 9614 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn); 9615 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm); 9616 TCGv_i64 tcg_res = tcg_temp_new_i64(); 9617 9618 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2); 9619 gen_helper_neon_addl_saturate_s32(tcg_res, tcg_env, tcg_res, tcg_res); 9620 9621 switch (opcode) { 9622 case 0xd: /* SQDMULL, SQDMULL2 */ 9623 break; 9624 case 0xb: /* SQDMLSL, SQDMLSL2 */ 9625 gen_helper_neon_negl_u32(tcg_res, tcg_res); 9626 /* fall through */ 9627 case 0x9: /* SQDMLAL, SQDMLAL2 */ 9628 { 9629 TCGv_i64 tcg_op3 = tcg_temp_new_i64(); 9630 read_vec_element(s, tcg_op3, rd, 0, MO_32); 9631 gen_helper_neon_addl_saturate_s32(tcg_res, tcg_env, 9632 tcg_res, tcg_op3); 9633 break; 9634 } 9635 default: 9636 g_assert_not_reached(); 9637 } 9638 9639 tcg_gen_ext32u_i64(tcg_res, tcg_res); 9640 write_fp_dreg(s, rd, tcg_res); 9641 } 9642 } 9643 9644 static void handle_2misc_64(DisasContext *s, int opcode, bool u, 9645 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, 9646 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus) 9647 { 9648 /* Handle 64->64 opcodes which are shared between the scalar and 9649 * vector 2-reg-misc groups. We cover every integer opcode where size == 3 9650 * is valid in either group and also the double-precision fp ops. 9651 * The caller only need provide tcg_rmode and tcg_fpstatus if the op 9652 * requires them. 9653 */ 9654 TCGCond cond; 9655 9656 switch (opcode) { 9657 case 0x4: /* CLS, CLZ */ 9658 if (u) { 9659 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64); 9660 } else { 9661 tcg_gen_clrsb_i64(tcg_rd, tcg_rn); 9662 } 9663 break; 9664 case 0x5: /* NOT */ 9665 /* This opcode is shared with CNT and RBIT but we have earlier 9666 * enforced that size == 3 if and only if this is the NOT insn. 9667 */ 9668 tcg_gen_not_i64(tcg_rd, tcg_rn); 9669 break; 9670 case 0x7: /* SQABS, SQNEG */ 9671 if (u) { 9672 gen_helper_neon_qneg_s64(tcg_rd, tcg_env, tcg_rn); 9673 } else { 9674 gen_helper_neon_qabs_s64(tcg_rd, tcg_env, tcg_rn); 9675 } 9676 break; 9677 case 0xa: /* CMLT */ 9678 cond = TCG_COND_LT; 9679 do_cmop: 9680 /* 64 bit integer comparison against zero, result is test ? -1 : 0. */ 9681 tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_constant_i64(0)); 9682 break; 9683 case 0x8: /* CMGT, CMGE */ 9684 cond = u ? TCG_COND_GE : TCG_COND_GT; 9685 goto do_cmop; 9686 case 0x9: /* CMEQ, CMLE */ 9687 cond = u ? TCG_COND_LE : TCG_COND_EQ; 9688 goto do_cmop; 9689 case 0xb: /* ABS, NEG */ 9690 if (u) { 9691 tcg_gen_neg_i64(tcg_rd, tcg_rn); 9692 } else { 9693 tcg_gen_abs_i64(tcg_rd, tcg_rn); 9694 } 9695 break; 9696 case 0x2f: /* FABS */ 9697 gen_vfp_absd(tcg_rd, tcg_rn); 9698 break; 9699 case 0x6f: /* FNEG */ 9700 gen_vfp_negd(tcg_rd, tcg_rn); 9701 break; 9702 case 0x7f: /* FSQRT */ 9703 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, tcg_env); 9704 break; 9705 case 0x1a: /* FCVTNS */ 9706 case 0x1b: /* FCVTMS */ 9707 case 0x1c: /* FCVTAS */ 9708 case 0x3a: /* FCVTPS */ 9709 case 0x3b: /* FCVTZS */ 9710 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus); 9711 break; 9712 case 0x5a: /* FCVTNU */ 9713 case 0x5b: /* FCVTMU */ 9714 case 0x5c: /* FCVTAU */ 9715 case 0x7a: /* FCVTPU */ 9716 case 0x7b: /* FCVTZU */ 9717 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus); 9718 break; 9719 case 0x18: /* FRINTN */ 9720 case 0x19: /* FRINTM */ 9721 case 0x38: /* FRINTP */ 9722 case 0x39: /* FRINTZ */ 9723 case 0x58: /* FRINTA */ 9724 case 0x79: /* FRINTI */ 9725 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus); 9726 break; 9727 case 0x59: /* FRINTX */ 9728 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus); 9729 break; 9730 case 0x1e: /* FRINT32Z */ 9731 case 0x5e: /* FRINT32X */ 9732 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus); 9733 break; 9734 case 0x1f: /* FRINT64Z */ 9735 case 0x5f: /* FRINT64X */ 9736 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus); 9737 break; 9738 default: 9739 g_assert_not_reached(); 9740 } 9741 } 9742 9743 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, 9744 bool is_scalar, bool is_u, bool is_q, 9745 int size, int rn, int rd) 9746 { 9747 bool is_double = (size == MO_64); 9748 TCGv_ptr fpst; 9749 9750 if (!fp_access_check(s)) { 9751 return; 9752 } 9753 9754 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 9755 9756 if (is_double) { 9757 TCGv_i64 tcg_op = tcg_temp_new_i64(); 9758 TCGv_i64 tcg_zero = tcg_constant_i64(0); 9759 TCGv_i64 tcg_res = tcg_temp_new_i64(); 9760 NeonGenTwoDoubleOpFn *genfn; 9761 bool swap = false; 9762 int pass; 9763 9764 switch (opcode) { 9765 case 0x2e: /* FCMLT (zero) */ 9766 swap = true; 9767 /* fallthrough */ 9768 case 0x2c: /* FCMGT (zero) */ 9769 genfn = gen_helper_neon_cgt_f64; 9770 break; 9771 case 0x2d: /* FCMEQ (zero) */ 9772 genfn = gen_helper_neon_ceq_f64; 9773 break; 9774 case 0x6d: /* FCMLE (zero) */ 9775 swap = true; 9776 /* fall through */ 9777 case 0x6c: /* FCMGE (zero) */ 9778 genfn = gen_helper_neon_cge_f64; 9779 break; 9780 default: 9781 g_assert_not_reached(); 9782 } 9783 9784 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 9785 read_vec_element(s, tcg_op, rn, pass, MO_64); 9786 if (swap) { 9787 genfn(tcg_res, tcg_zero, tcg_op, fpst); 9788 } else { 9789 genfn(tcg_res, tcg_op, tcg_zero, fpst); 9790 } 9791 write_vec_element(s, tcg_res, rd, pass, MO_64); 9792 } 9793 9794 clear_vec_high(s, !is_scalar, rd); 9795 } else { 9796 TCGv_i32 tcg_op = tcg_temp_new_i32(); 9797 TCGv_i32 tcg_zero = tcg_constant_i32(0); 9798 TCGv_i32 tcg_res = tcg_temp_new_i32(); 9799 NeonGenTwoSingleOpFn *genfn; 9800 bool swap = false; 9801 int pass, maxpasses; 9802 9803 if (size == MO_16) { 9804 switch (opcode) { 9805 case 0x2e: /* FCMLT (zero) */ 9806 swap = true; 9807 /* fall through */ 9808 case 0x2c: /* FCMGT (zero) */ 9809 genfn = gen_helper_advsimd_cgt_f16; 9810 break; 9811 case 0x2d: /* FCMEQ (zero) */ 9812 genfn = gen_helper_advsimd_ceq_f16; 9813 break; 9814 case 0x6d: /* FCMLE (zero) */ 9815 swap = true; 9816 /* fall through */ 9817 case 0x6c: /* FCMGE (zero) */ 9818 genfn = gen_helper_advsimd_cge_f16; 9819 break; 9820 default: 9821 g_assert_not_reached(); 9822 } 9823 } else { 9824 switch (opcode) { 9825 case 0x2e: /* FCMLT (zero) */ 9826 swap = true; 9827 /* fall through */ 9828 case 0x2c: /* FCMGT (zero) */ 9829 genfn = gen_helper_neon_cgt_f32; 9830 break; 9831 case 0x2d: /* FCMEQ (zero) */ 9832 genfn = gen_helper_neon_ceq_f32; 9833 break; 9834 case 0x6d: /* FCMLE (zero) */ 9835 swap = true; 9836 /* fall through */ 9837 case 0x6c: /* FCMGE (zero) */ 9838 genfn = gen_helper_neon_cge_f32; 9839 break; 9840 default: 9841 g_assert_not_reached(); 9842 } 9843 } 9844 9845 if (is_scalar) { 9846 maxpasses = 1; 9847 } else { 9848 int vector_size = 8 << is_q; 9849 maxpasses = vector_size >> size; 9850 } 9851 9852 for (pass = 0; pass < maxpasses; pass++) { 9853 read_vec_element_i32(s, tcg_op, rn, pass, size); 9854 if (swap) { 9855 genfn(tcg_res, tcg_zero, tcg_op, fpst); 9856 } else { 9857 genfn(tcg_res, tcg_op, tcg_zero, fpst); 9858 } 9859 if (is_scalar) { 9860 write_fp_sreg(s, rd, tcg_res); 9861 } else { 9862 write_vec_element_i32(s, tcg_res, rd, pass, size); 9863 } 9864 } 9865 9866 if (!is_scalar) { 9867 clear_vec_high(s, is_q, rd); 9868 } 9869 } 9870 } 9871 9872 static void handle_2misc_reciprocal(DisasContext *s, int opcode, 9873 bool is_scalar, bool is_u, bool is_q, 9874 int size, int rn, int rd) 9875 { 9876 bool is_double = (size == 3); 9877 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 9878 9879 if (is_double) { 9880 TCGv_i64 tcg_op = tcg_temp_new_i64(); 9881 TCGv_i64 tcg_res = tcg_temp_new_i64(); 9882 int pass; 9883 9884 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 9885 read_vec_element(s, tcg_op, rn, pass, MO_64); 9886 switch (opcode) { 9887 case 0x3d: /* FRECPE */ 9888 gen_helper_recpe_f64(tcg_res, tcg_op, fpst); 9889 break; 9890 case 0x3f: /* FRECPX */ 9891 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst); 9892 break; 9893 case 0x7d: /* FRSQRTE */ 9894 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst); 9895 break; 9896 default: 9897 g_assert_not_reached(); 9898 } 9899 write_vec_element(s, tcg_res, rd, pass, MO_64); 9900 } 9901 clear_vec_high(s, !is_scalar, rd); 9902 } else { 9903 TCGv_i32 tcg_op = tcg_temp_new_i32(); 9904 TCGv_i32 tcg_res = tcg_temp_new_i32(); 9905 int pass, maxpasses; 9906 9907 if (is_scalar) { 9908 maxpasses = 1; 9909 } else { 9910 maxpasses = is_q ? 4 : 2; 9911 } 9912 9913 for (pass = 0; pass < maxpasses; pass++) { 9914 read_vec_element_i32(s, tcg_op, rn, pass, MO_32); 9915 9916 switch (opcode) { 9917 case 0x3c: /* URECPE */ 9918 gen_helper_recpe_u32(tcg_res, tcg_op); 9919 break; 9920 case 0x3d: /* FRECPE */ 9921 gen_helper_recpe_f32(tcg_res, tcg_op, fpst); 9922 break; 9923 case 0x3f: /* FRECPX */ 9924 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst); 9925 break; 9926 case 0x7d: /* FRSQRTE */ 9927 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst); 9928 break; 9929 default: 9930 g_assert_not_reached(); 9931 } 9932 9933 if (is_scalar) { 9934 write_fp_sreg(s, rd, tcg_res); 9935 } else { 9936 write_vec_element_i32(s, tcg_res, rd, pass, MO_32); 9937 } 9938 } 9939 if (!is_scalar) { 9940 clear_vec_high(s, is_q, rd); 9941 } 9942 } 9943 } 9944 9945 static void handle_2misc_narrow(DisasContext *s, bool scalar, 9946 int opcode, bool u, bool is_q, 9947 int size, int rn, int rd) 9948 { 9949 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element 9950 * in the source becomes a size element in the destination). 9951 */ 9952 int pass; 9953 TCGv_i32 tcg_res[2]; 9954 int destelt = is_q ? 2 : 0; 9955 int passes = scalar ? 1 : 2; 9956 9957 if (scalar) { 9958 tcg_res[1] = tcg_constant_i32(0); 9959 } 9960 9961 for (pass = 0; pass < passes; pass++) { 9962 TCGv_i64 tcg_op = tcg_temp_new_i64(); 9963 NeonGenNarrowFn *genfn = NULL; 9964 NeonGenNarrowEnvFn *genenvfn = NULL; 9965 9966 if (scalar) { 9967 read_vec_element(s, tcg_op, rn, pass, size + 1); 9968 } else { 9969 read_vec_element(s, tcg_op, rn, pass, MO_64); 9970 } 9971 tcg_res[pass] = tcg_temp_new_i32(); 9972 9973 switch (opcode) { 9974 case 0x12: /* XTN, SQXTUN */ 9975 { 9976 static NeonGenNarrowFn * const xtnfns[3] = { 9977 gen_helper_neon_narrow_u8, 9978 gen_helper_neon_narrow_u16, 9979 tcg_gen_extrl_i64_i32, 9980 }; 9981 static NeonGenNarrowEnvFn * const sqxtunfns[3] = { 9982 gen_helper_neon_unarrow_sat8, 9983 gen_helper_neon_unarrow_sat16, 9984 gen_helper_neon_unarrow_sat32, 9985 }; 9986 if (u) { 9987 genenvfn = sqxtunfns[size]; 9988 } else { 9989 genfn = xtnfns[size]; 9990 } 9991 break; 9992 } 9993 case 0x14: /* SQXTN, UQXTN */ 9994 { 9995 static NeonGenNarrowEnvFn * const fns[3][2] = { 9996 { gen_helper_neon_narrow_sat_s8, 9997 gen_helper_neon_narrow_sat_u8 }, 9998 { gen_helper_neon_narrow_sat_s16, 9999 gen_helper_neon_narrow_sat_u16 }, 10000 { gen_helper_neon_narrow_sat_s32, 10001 gen_helper_neon_narrow_sat_u32 }, 10002 }; 10003 genenvfn = fns[size][u]; 10004 break; 10005 } 10006 case 0x16: /* FCVTN, FCVTN2 */ 10007 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */ 10008 if (size == 2) { 10009 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, tcg_env); 10010 } else { 10011 TCGv_i32 tcg_lo = tcg_temp_new_i32(); 10012 TCGv_i32 tcg_hi = tcg_temp_new_i32(); 10013 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 10014 TCGv_i32 ahp = get_ahp_flag(); 10015 10016 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op); 10017 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp); 10018 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp); 10019 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16); 10020 } 10021 break; 10022 case 0x36: /* BFCVTN, BFCVTN2 */ 10023 { 10024 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 10025 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst); 10026 } 10027 break; 10028 case 0x56: /* FCVTXN, FCVTXN2 */ 10029 /* 64 bit to 32 bit float conversion 10030 * with von Neumann rounding (round to odd) 10031 */ 10032 assert(size == 2); 10033 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, tcg_env); 10034 break; 10035 default: 10036 g_assert_not_reached(); 10037 } 10038 10039 if (genfn) { 10040 genfn(tcg_res[pass], tcg_op); 10041 } else if (genenvfn) { 10042 genenvfn(tcg_res[pass], tcg_env, tcg_op); 10043 } 10044 } 10045 10046 for (pass = 0; pass < 2; pass++) { 10047 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32); 10048 } 10049 clear_vec_high(s, is_q, rd); 10050 } 10051 10052 /* AdvSIMD scalar two reg misc 10053 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 10054 * +-----+---+-----------+------+-----------+--------+-----+------+------+ 10055 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | 10056 * +-----+---+-----------+------+-----------+--------+-----+------+------+ 10057 */ 10058 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) 10059 { 10060 int rd = extract32(insn, 0, 5); 10061 int rn = extract32(insn, 5, 5); 10062 int opcode = extract32(insn, 12, 5); 10063 int size = extract32(insn, 22, 2); 10064 bool u = extract32(insn, 29, 1); 10065 bool is_fcvt = false; 10066 int rmode; 10067 TCGv_i32 tcg_rmode; 10068 TCGv_ptr tcg_fpstatus; 10069 10070 switch (opcode) { 10071 case 0x7: /* SQABS / SQNEG */ 10072 break; 10073 case 0xa: /* CMLT */ 10074 if (u) { 10075 unallocated_encoding(s); 10076 return; 10077 } 10078 /* fall through */ 10079 case 0x8: /* CMGT, CMGE */ 10080 case 0x9: /* CMEQ, CMLE */ 10081 case 0xb: /* ABS, NEG */ 10082 if (size != 3) { 10083 unallocated_encoding(s); 10084 return; 10085 } 10086 break; 10087 case 0x12: /* SQXTUN */ 10088 if (!u) { 10089 unallocated_encoding(s); 10090 return; 10091 } 10092 /* fall through */ 10093 case 0x14: /* SQXTN, UQXTN */ 10094 if (size == 3) { 10095 unallocated_encoding(s); 10096 return; 10097 } 10098 if (!fp_access_check(s)) { 10099 return; 10100 } 10101 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd); 10102 return; 10103 case 0xc ... 0xf: 10104 case 0x16 ... 0x1d: 10105 case 0x1f: 10106 /* Floating point: U, size[1] and opcode indicate operation; 10107 * size[0] indicates single or double precision. 10108 */ 10109 opcode |= (extract32(size, 1, 1) << 5) | (u << 6); 10110 size = extract32(size, 0, 1) ? 3 : 2; 10111 switch (opcode) { 10112 case 0x2c: /* FCMGT (zero) */ 10113 case 0x2d: /* FCMEQ (zero) */ 10114 case 0x2e: /* FCMLT (zero) */ 10115 case 0x6c: /* FCMGE (zero) */ 10116 case 0x6d: /* FCMLE (zero) */ 10117 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd); 10118 return; 10119 case 0x1d: /* SCVTF */ 10120 case 0x5d: /* UCVTF */ 10121 { 10122 bool is_signed = (opcode == 0x1d); 10123 if (!fp_access_check(s)) { 10124 return; 10125 } 10126 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size); 10127 return; 10128 } 10129 case 0x3d: /* FRECPE */ 10130 case 0x3f: /* FRECPX */ 10131 case 0x7d: /* FRSQRTE */ 10132 if (!fp_access_check(s)) { 10133 return; 10134 } 10135 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd); 10136 return; 10137 case 0x1a: /* FCVTNS */ 10138 case 0x1b: /* FCVTMS */ 10139 case 0x3a: /* FCVTPS */ 10140 case 0x3b: /* FCVTZS */ 10141 case 0x5a: /* FCVTNU */ 10142 case 0x5b: /* FCVTMU */ 10143 case 0x7a: /* FCVTPU */ 10144 case 0x7b: /* FCVTZU */ 10145 is_fcvt = true; 10146 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); 10147 break; 10148 case 0x1c: /* FCVTAS */ 10149 case 0x5c: /* FCVTAU */ 10150 /* TIEAWAY doesn't fit in the usual rounding mode encoding */ 10151 is_fcvt = true; 10152 rmode = FPROUNDING_TIEAWAY; 10153 break; 10154 case 0x56: /* FCVTXN, FCVTXN2 */ 10155 if (size == 2) { 10156 unallocated_encoding(s); 10157 return; 10158 } 10159 if (!fp_access_check(s)) { 10160 return; 10161 } 10162 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd); 10163 return; 10164 default: 10165 unallocated_encoding(s); 10166 return; 10167 } 10168 break; 10169 default: 10170 case 0x3: /* USQADD / SUQADD */ 10171 unallocated_encoding(s); 10172 return; 10173 } 10174 10175 if (!fp_access_check(s)) { 10176 return; 10177 } 10178 10179 if (is_fcvt) { 10180 tcg_fpstatus = fpstatus_ptr(FPST_FPCR); 10181 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); 10182 } else { 10183 tcg_fpstatus = NULL; 10184 tcg_rmode = NULL; 10185 } 10186 10187 if (size == 3) { 10188 TCGv_i64 tcg_rn = read_fp_dreg(s, rn); 10189 TCGv_i64 tcg_rd = tcg_temp_new_i64(); 10190 10191 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus); 10192 write_fp_dreg(s, rd, tcg_rd); 10193 } else { 10194 TCGv_i32 tcg_rn = tcg_temp_new_i32(); 10195 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 10196 10197 read_vec_element_i32(s, tcg_rn, rn, 0, size); 10198 10199 switch (opcode) { 10200 case 0x7: /* SQABS, SQNEG */ 10201 { 10202 NeonGenOneOpEnvFn *genfn; 10203 static NeonGenOneOpEnvFn * const fns[3][2] = { 10204 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, 10205 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, 10206 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 }, 10207 }; 10208 genfn = fns[size][u]; 10209 genfn(tcg_rd, tcg_env, tcg_rn); 10210 break; 10211 } 10212 case 0x1a: /* FCVTNS */ 10213 case 0x1b: /* FCVTMS */ 10214 case 0x1c: /* FCVTAS */ 10215 case 0x3a: /* FCVTPS */ 10216 case 0x3b: /* FCVTZS */ 10217 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0), 10218 tcg_fpstatus); 10219 break; 10220 case 0x5a: /* FCVTNU */ 10221 case 0x5b: /* FCVTMU */ 10222 case 0x5c: /* FCVTAU */ 10223 case 0x7a: /* FCVTPU */ 10224 case 0x7b: /* FCVTZU */ 10225 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0), 10226 tcg_fpstatus); 10227 break; 10228 default: 10229 g_assert_not_reached(); 10230 } 10231 10232 write_fp_sreg(s, rd, tcg_rd); 10233 } 10234 10235 if (is_fcvt) { 10236 gen_restore_rmode(tcg_rmode, tcg_fpstatus); 10237 } 10238 } 10239 10240 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */ 10241 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, 10242 int immh, int immb, int opcode, int rn, int rd) 10243 { 10244 int size = 32 - clz32(immh) - 1; 10245 int immhb = immh << 3 | immb; 10246 int shift = 2 * (8 << size) - immhb; 10247 GVecGen2iFn *gvec_fn; 10248 10249 if (extract32(immh, 3, 1) && !is_q) { 10250 unallocated_encoding(s); 10251 return; 10252 } 10253 tcg_debug_assert(size <= 3); 10254 10255 if (!fp_access_check(s)) { 10256 return; 10257 } 10258 10259 switch (opcode) { 10260 case 0x02: /* SSRA / USRA (accumulate) */ 10261 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra; 10262 break; 10263 10264 case 0x08: /* SRI */ 10265 gvec_fn = gen_gvec_sri; 10266 break; 10267 10268 case 0x00: /* SSHR / USHR */ 10269 if (is_u) { 10270 if (shift == 8 << size) { 10271 /* Shift count the same size as element size produces zero. */ 10272 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd), 10273 is_q ? 16 : 8, vec_full_reg_size(s), 0); 10274 return; 10275 } 10276 gvec_fn = tcg_gen_gvec_shri; 10277 } else { 10278 /* Shift count the same size as element size produces all sign. */ 10279 if (shift == 8 << size) { 10280 shift -= 1; 10281 } 10282 gvec_fn = tcg_gen_gvec_sari; 10283 } 10284 break; 10285 10286 case 0x04: /* SRSHR / URSHR (rounding) */ 10287 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr; 10288 break; 10289 10290 case 0x06: /* SRSRA / URSRA (accum + rounding) */ 10291 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra; 10292 break; 10293 10294 default: 10295 g_assert_not_reached(); 10296 } 10297 10298 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size); 10299 } 10300 10301 /* SHL/SLI - Vector shift left */ 10302 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, 10303 int immh, int immb, int opcode, int rn, int rd) 10304 { 10305 int size = 32 - clz32(immh) - 1; 10306 int immhb = immh << 3 | immb; 10307 int shift = immhb - (8 << size); 10308 10309 /* Range of size is limited by decode: immh is a non-zero 4 bit field */ 10310 assert(size >= 0 && size <= 3); 10311 10312 if (extract32(immh, 3, 1) && !is_q) { 10313 unallocated_encoding(s); 10314 return; 10315 } 10316 10317 if (!fp_access_check(s)) { 10318 return; 10319 } 10320 10321 if (insert) { 10322 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size); 10323 } else { 10324 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size); 10325 } 10326 } 10327 10328 /* USHLL/SHLL - Vector shift left with widening */ 10329 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u, 10330 int immh, int immb, int opcode, int rn, int rd) 10331 { 10332 int size = 32 - clz32(immh) - 1; 10333 int immhb = immh << 3 | immb; 10334 int shift = immhb - (8 << size); 10335 int dsize = 64; 10336 int esize = 8 << size; 10337 int elements = dsize/esize; 10338 TCGv_i64 tcg_rn = tcg_temp_new_i64(); 10339 TCGv_i64 tcg_rd = tcg_temp_new_i64(); 10340 int i; 10341 10342 if (size >= 3) { 10343 unallocated_encoding(s); 10344 return; 10345 } 10346 10347 if (!fp_access_check(s)) { 10348 return; 10349 } 10350 10351 /* For the LL variants the store is larger than the load, 10352 * so if rd == rn we would overwrite parts of our input. 10353 * So load everything right now and use shifts in the main loop. 10354 */ 10355 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64); 10356 10357 for (i = 0; i < elements; i++) { 10358 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize); 10359 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0); 10360 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift); 10361 write_vec_element(s, tcg_rd, rd, i, size + 1); 10362 } 10363 } 10364 10365 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */ 10366 static void handle_vec_simd_shrn(DisasContext *s, bool is_q, 10367 int immh, int immb, int opcode, int rn, int rd) 10368 { 10369 int immhb = immh << 3 | immb; 10370 int size = 32 - clz32(immh) - 1; 10371 int dsize = 64; 10372 int esize = 8 << size; 10373 int elements = dsize/esize; 10374 int shift = (2 * esize) - immhb; 10375 bool round = extract32(opcode, 0, 1); 10376 TCGv_i64 tcg_rn, tcg_rd, tcg_final; 10377 TCGv_i64 tcg_round; 10378 int i; 10379 10380 if (extract32(immh, 3, 1)) { 10381 unallocated_encoding(s); 10382 return; 10383 } 10384 10385 if (!fp_access_check(s)) { 10386 return; 10387 } 10388 10389 tcg_rn = tcg_temp_new_i64(); 10390 tcg_rd = tcg_temp_new_i64(); 10391 tcg_final = tcg_temp_new_i64(); 10392 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64); 10393 10394 if (round) { 10395 tcg_round = tcg_constant_i64(1ULL << (shift - 1)); 10396 } else { 10397 tcg_round = NULL; 10398 } 10399 10400 for (i = 0; i < elements; i++) { 10401 read_vec_element(s, tcg_rn, rn, i, size+1); 10402 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, 10403 false, true, size+1, shift); 10404 10405 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); 10406 } 10407 10408 if (!is_q) { 10409 write_vec_element(s, tcg_final, rd, 0, MO_64); 10410 } else { 10411 write_vec_element(s, tcg_final, rd, 1, MO_64); 10412 } 10413 10414 clear_vec_high(s, is_q, rd); 10415 } 10416 10417 10418 /* AdvSIMD shift by immediate 10419 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 10420 * +---+---+---+-------------+------+------+--------+---+------+------+ 10421 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | 10422 * +---+---+---+-------------+------+------+--------+---+------+------+ 10423 */ 10424 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn) 10425 { 10426 int rd = extract32(insn, 0, 5); 10427 int rn = extract32(insn, 5, 5); 10428 int opcode = extract32(insn, 11, 5); 10429 int immb = extract32(insn, 16, 3); 10430 int immh = extract32(insn, 19, 4); 10431 bool is_u = extract32(insn, 29, 1); 10432 bool is_q = extract32(insn, 30, 1); 10433 10434 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */ 10435 assert(immh != 0); 10436 10437 switch (opcode) { 10438 case 0x08: /* SRI */ 10439 if (!is_u) { 10440 unallocated_encoding(s); 10441 return; 10442 } 10443 /* fall through */ 10444 case 0x00: /* SSHR / USHR */ 10445 case 0x02: /* SSRA / USRA (accumulate) */ 10446 case 0x04: /* SRSHR / URSHR (rounding) */ 10447 case 0x06: /* SRSRA / URSRA (accum + rounding) */ 10448 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd); 10449 break; 10450 case 0x0a: /* SHL / SLI */ 10451 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd); 10452 break; 10453 case 0x10: /* SHRN */ 10454 case 0x11: /* RSHRN / SQRSHRUN */ 10455 if (is_u) { 10456 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb, 10457 opcode, rn, rd); 10458 } else { 10459 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd); 10460 } 10461 break; 10462 case 0x12: /* SQSHRN / UQSHRN */ 10463 case 0x13: /* SQRSHRN / UQRSHRN */ 10464 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb, 10465 opcode, rn, rd); 10466 break; 10467 case 0x14: /* SSHLL / USHLL */ 10468 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd); 10469 break; 10470 case 0x1c: /* SCVTF / UCVTF */ 10471 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb, 10472 opcode, rn, rd); 10473 break; 10474 case 0xc: /* SQSHLU */ 10475 if (!is_u) { 10476 unallocated_encoding(s); 10477 return; 10478 } 10479 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd); 10480 break; 10481 case 0xe: /* SQSHL, UQSHL */ 10482 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd); 10483 break; 10484 case 0x1f: /* FCVTZS/ FCVTZU */ 10485 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd); 10486 return; 10487 default: 10488 unallocated_encoding(s); 10489 return; 10490 } 10491 } 10492 10493 /* Generate code to do a "long" addition or subtraction, ie one done in 10494 * TCGv_i64 on vector lanes twice the width specified by size. 10495 */ 10496 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res, 10497 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2) 10498 { 10499 static NeonGenTwo64OpFn * const fns[3][2] = { 10500 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 }, 10501 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 }, 10502 { tcg_gen_add_i64, tcg_gen_sub_i64 }, 10503 }; 10504 NeonGenTwo64OpFn *genfn; 10505 assert(size < 3); 10506 10507 genfn = fns[size][is_sub]; 10508 genfn(tcg_res, tcg_op1, tcg_op2); 10509 } 10510 10511 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, 10512 int opcode, int rd, int rn, int rm) 10513 { 10514 /* 3-reg-different widening insns: 64 x 64 -> 128 */ 10515 TCGv_i64 tcg_res[2]; 10516 int pass, accop; 10517 10518 tcg_res[0] = tcg_temp_new_i64(); 10519 tcg_res[1] = tcg_temp_new_i64(); 10520 10521 /* Does this op do an adding accumulate, a subtracting accumulate, 10522 * or no accumulate at all? 10523 */ 10524 switch (opcode) { 10525 case 5: 10526 case 8: 10527 case 9: 10528 accop = 1; 10529 break; 10530 case 10: 10531 case 11: 10532 accop = -1; 10533 break; 10534 default: 10535 accop = 0; 10536 break; 10537 } 10538 10539 if (accop != 0) { 10540 read_vec_element(s, tcg_res[0], rd, 0, MO_64); 10541 read_vec_element(s, tcg_res[1], rd, 1, MO_64); 10542 } 10543 10544 /* size == 2 means two 32x32->64 operations; this is worth special 10545 * casing because we can generally handle it inline. 10546 */ 10547 if (size == 2) { 10548 for (pass = 0; pass < 2; pass++) { 10549 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 10550 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 10551 TCGv_i64 tcg_passres; 10552 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); 10553 10554 int elt = pass + is_q * 2; 10555 10556 read_vec_element(s, tcg_op1, rn, elt, memop); 10557 read_vec_element(s, tcg_op2, rm, elt, memop); 10558 10559 if (accop == 0) { 10560 tcg_passres = tcg_res[pass]; 10561 } else { 10562 tcg_passres = tcg_temp_new_i64(); 10563 } 10564 10565 switch (opcode) { 10566 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ 10567 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2); 10568 break; 10569 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ 10570 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2); 10571 break; 10572 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ 10573 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ 10574 { 10575 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64(); 10576 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64(); 10577 10578 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2); 10579 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1); 10580 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE, 10581 tcg_passres, 10582 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2); 10583 break; 10584 } 10585 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 10586 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 10587 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */ 10588 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2); 10589 break; 10590 case 9: /* SQDMLAL, SQDMLAL2 */ 10591 case 11: /* SQDMLSL, SQDMLSL2 */ 10592 case 13: /* SQDMULL, SQDMULL2 */ 10593 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2); 10594 gen_helper_neon_addl_saturate_s64(tcg_passres, tcg_env, 10595 tcg_passres, tcg_passres); 10596 break; 10597 default: 10598 g_assert_not_reached(); 10599 } 10600 10601 if (opcode == 9 || opcode == 11) { 10602 /* saturating accumulate ops */ 10603 if (accop < 0) { 10604 tcg_gen_neg_i64(tcg_passres, tcg_passres); 10605 } 10606 gen_helper_neon_addl_saturate_s64(tcg_res[pass], tcg_env, 10607 tcg_res[pass], tcg_passres); 10608 } else if (accop > 0) { 10609 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres); 10610 } else if (accop < 0) { 10611 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres); 10612 } 10613 } 10614 } else { 10615 /* size 0 or 1, generally helper functions */ 10616 for (pass = 0; pass < 2; pass++) { 10617 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 10618 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 10619 TCGv_i64 tcg_passres; 10620 int elt = pass + is_q * 2; 10621 10622 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32); 10623 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32); 10624 10625 if (accop == 0) { 10626 tcg_passres = tcg_res[pass]; 10627 } else { 10628 tcg_passres = tcg_temp_new_i64(); 10629 } 10630 10631 switch (opcode) { 10632 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ 10633 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ 10634 { 10635 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64(); 10636 static NeonGenWidenFn * const widenfns[2][2] = { 10637 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 }, 10638 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 }, 10639 }; 10640 NeonGenWidenFn *widenfn = widenfns[size][is_u]; 10641 10642 widenfn(tcg_op2_64, tcg_op2); 10643 widenfn(tcg_passres, tcg_op1); 10644 gen_neon_addl(size, (opcode == 2), tcg_passres, 10645 tcg_passres, tcg_op2_64); 10646 break; 10647 } 10648 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ 10649 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ 10650 if (size == 0) { 10651 if (is_u) { 10652 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2); 10653 } else { 10654 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2); 10655 } 10656 } else { 10657 if (is_u) { 10658 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2); 10659 } else { 10660 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2); 10661 } 10662 } 10663 break; 10664 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 10665 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 10666 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */ 10667 if (size == 0) { 10668 if (is_u) { 10669 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2); 10670 } else { 10671 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2); 10672 } 10673 } else { 10674 if (is_u) { 10675 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2); 10676 } else { 10677 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2); 10678 } 10679 } 10680 break; 10681 case 9: /* SQDMLAL, SQDMLAL2 */ 10682 case 11: /* SQDMLSL, SQDMLSL2 */ 10683 case 13: /* SQDMULL, SQDMULL2 */ 10684 assert(size == 1); 10685 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2); 10686 gen_helper_neon_addl_saturate_s32(tcg_passres, tcg_env, 10687 tcg_passres, tcg_passres); 10688 break; 10689 default: 10690 g_assert_not_reached(); 10691 } 10692 10693 if (accop != 0) { 10694 if (opcode == 9 || opcode == 11) { 10695 /* saturating accumulate ops */ 10696 if (accop < 0) { 10697 gen_helper_neon_negl_u32(tcg_passres, tcg_passres); 10698 } 10699 gen_helper_neon_addl_saturate_s32(tcg_res[pass], tcg_env, 10700 tcg_res[pass], 10701 tcg_passres); 10702 } else { 10703 gen_neon_addl(size, (accop < 0), tcg_res[pass], 10704 tcg_res[pass], tcg_passres); 10705 } 10706 } 10707 } 10708 } 10709 10710 write_vec_element(s, tcg_res[0], rd, 0, MO_64); 10711 write_vec_element(s, tcg_res[1], rd, 1, MO_64); 10712 } 10713 10714 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, 10715 int opcode, int rd, int rn, int rm) 10716 { 10717 TCGv_i64 tcg_res[2]; 10718 int part = is_q ? 2 : 0; 10719 int pass; 10720 10721 for (pass = 0; pass < 2; pass++) { 10722 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 10723 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 10724 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64(); 10725 static NeonGenWidenFn * const widenfns[3][2] = { 10726 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 }, 10727 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 }, 10728 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 }, 10729 }; 10730 NeonGenWidenFn *widenfn = widenfns[size][is_u]; 10731 10732 read_vec_element(s, tcg_op1, rn, pass, MO_64); 10733 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32); 10734 widenfn(tcg_op2_wide, tcg_op2); 10735 tcg_res[pass] = tcg_temp_new_i64(); 10736 gen_neon_addl(size, (opcode == 3), 10737 tcg_res[pass], tcg_op1, tcg_op2_wide); 10738 } 10739 10740 for (pass = 0; pass < 2; pass++) { 10741 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 10742 } 10743 } 10744 10745 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in) 10746 { 10747 tcg_gen_addi_i64(in, in, 1U << 31); 10748 tcg_gen_extrh_i64_i32(res, in); 10749 } 10750 10751 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, 10752 int opcode, int rd, int rn, int rm) 10753 { 10754 TCGv_i32 tcg_res[2]; 10755 int part = is_q ? 2 : 0; 10756 int pass; 10757 10758 for (pass = 0; pass < 2; pass++) { 10759 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 10760 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 10761 TCGv_i64 tcg_wideres = tcg_temp_new_i64(); 10762 static NeonGenNarrowFn * const narrowfns[3][2] = { 10763 { gen_helper_neon_narrow_high_u8, 10764 gen_helper_neon_narrow_round_high_u8 }, 10765 { gen_helper_neon_narrow_high_u16, 10766 gen_helper_neon_narrow_round_high_u16 }, 10767 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 }, 10768 }; 10769 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u]; 10770 10771 read_vec_element(s, tcg_op1, rn, pass, MO_64); 10772 read_vec_element(s, tcg_op2, rm, pass, MO_64); 10773 10774 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2); 10775 10776 tcg_res[pass] = tcg_temp_new_i32(); 10777 gennarrow(tcg_res[pass], tcg_wideres); 10778 } 10779 10780 for (pass = 0; pass < 2; pass++) { 10781 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32); 10782 } 10783 clear_vec_high(s, is_q, rd); 10784 } 10785 10786 /* AdvSIMD three different 10787 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 10788 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ 10789 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | 10790 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ 10791 */ 10792 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) 10793 { 10794 /* Instructions in this group fall into three basic classes 10795 * (in each case with the operation working on each element in 10796 * the input vectors): 10797 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra 10798 * 128 bit input) 10799 * (2) wide 64 x 128 -> 128 10800 * (3) narrowing 128 x 128 -> 64 10801 * Here we do initial decode, catch unallocated cases and 10802 * dispatch to separate functions for each class. 10803 */ 10804 int is_q = extract32(insn, 30, 1); 10805 int is_u = extract32(insn, 29, 1); 10806 int size = extract32(insn, 22, 2); 10807 int opcode = extract32(insn, 12, 4); 10808 int rm = extract32(insn, 16, 5); 10809 int rn = extract32(insn, 5, 5); 10810 int rd = extract32(insn, 0, 5); 10811 10812 switch (opcode) { 10813 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */ 10814 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */ 10815 /* 64 x 128 -> 128 */ 10816 if (size == 3) { 10817 unallocated_encoding(s); 10818 return; 10819 } 10820 if (!fp_access_check(s)) { 10821 return; 10822 } 10823 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm); 10824 break; 10825 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */ 10826 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */ 10827 /* 128 x 128 -> 64 */ 10828 if (size == 3) { 10829 unallocated_encoding(s); 10830 return; 10831 } 10832 if (!fp_access_check(s)) { 10833 return; 10834 } 10835 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm); 10836 break; 10837 case 14: /* PMULL, PMULL2 */ 10838 if (is_u) { 10839 unallocated_encoding(s); 10840 return; 10841 } 10842 switch (size) { 10843 case 0: /* PMULL.P8 */ 10844 if (!fp_access_check(s)) { 10845 return; 10846 } 10847 /* The Q field specifies lo/hi half input for this insn. */ 10848 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, 10849 gen_helper_neon_pmull_h); 10850 break; 10851 10852 case 3: /* PMULL.P64 */ 10853 if (!dc_isar_feature(aa64_pmull, s)) { 10854 unallocated_encoding(s); 10855 return; 10856 } 10857 if (!fp_access_check(s)) { 10858 return; 10859 } 10860 /* The Q field specifies lo/hi half input for this insn. */ 10861 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, 10862 gen_helper_gvec_pmull_q); 10863 break; 10864 10865 default: 10866 unallocated_encoding(s); 10867 break; 10868 } 10869 return; 10870 case 9: /* SQDMLAL, SQDMLAL2 */ 10871 case 11: /* SQDMLSL, SQDMLSL2 */ 10872 case 13: /* SQDMULL, SQDMULL2 */ 10873 if (is_u || size == 0) { 10874 unallocated_encoding(s); 10875 return; 10876 } 10877 /* fall through */ 10878 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ 10879 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ 10880 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ 10881 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ 10882 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 10883 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 10884 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */ 10885 /* 64 x 64 -> 128 */ 10886 if (size == 3) { 10887 unallocated_encoding(s); 10888 return; 10889 } 10890 if (!fp_access_check(s)) { 10891 return; 10892 } 10893 10894 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm); 10895 break; 10896 default: 10897 /* opcode 15 not allocated */ 10898 unallocated_encoding(s); 10899 break; 10900 } 10901 } 10902 10903 /* AdvSIMD three same extra 10904 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0 10905 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+ 10906 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd | 10907 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+ 10908 */ 10909 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) 10910 { 10911 int rd = extract32(insn, 0, 5); 10912 int rn = extract32(insn, 5, 5); 10913 int opcode = extract32(insn, 11, 4); 10914 int rm = extract32(insn, 16, 5); 10915 int size = extract32(insn, 22, 2); 10916 bool u = extract32(insn, 29, 1); 10917 bool is_q = extract32(insn, 30, 1); 10918 bool feature; 10919 int rot; 10920 10921 switch (u * 16 + opcode) { 10922 case 0x04: /* SMMLA */ 10923 case 0x14: /* UMMLA */ 10924 case 0x05: /* USMMLA */ 10925 if (!is_q || size != MO_32) { 10926 unallocated_encoding(s); 10927 return; 10928 } 10929 feature = dc_isar_feature(aa64_i8mm, s); 10930 break; 10931 case 0x18: /* FCMLA, #0 */ 10932 case 0x19: /* FCMLA, #90 */ 10933 case 0x1a: /* FCMLA, #180 */ 10934 case 0x1b: /* FCMLA, #270 */ 10935 case 0x1c: /* FCADD, #90 */ 10936 case 0x1e: /* FCADD, #270 */ 10937 if (size == 0 10938 || (size == 1 && !dc_isar_feature(aa64_fp16, s)) 10939 || (size == 3 && !is_q)) { 10940 unallocated_encoding(s); 10941 return; 10942 } 10943 feature = dc_isar_feature(aa64_fcma, s); 10944 break; 10945 case 0x1d: /* BFMMLA */ 10946 if (size != MO_16 || !is_q) { 10947 unallocated_encoding(s); 10948 return; 10949 } 10950 feature = dc_isar_feature(aa64_bf16, s); 10951 break; 10952 case 0x1f: 10953 switch (size) { 10954 case 1: /* BFDOT */ 10955 case 3: /* BFMLAL{B,T} */ 10956 feature = dc_isar_feature(aa64_bf16, s); 10957 break; 10958 default: 10959 unallocated_encoding(s); 10960 return; 10961 } 10962 break; 10963 default: 10964 case 0x02: /* SDOT (vector) */ 10965 case 0x03: /* USDOT */ 10966 case 0x10: /* SQRDMLAH (vector) */ 10967 case 0x11: /* SQRDMLSH (vector) */ 10968 case 0x12: /* UDOT (vector) */ 10969 unallocated_encoding(s); 10970 return; 10971 } 10972 if (!feature) { 10973 unallocated_encoding(s); 10974 return; 10975 } 10976 if (!fp_access_check(s)) { 10977 return; 10978 } 10979 10980 switch (opcode) { 10981 case 0x04: /* SMMLA, UMMLA */ 10982 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, 10983 u ? gen_helper_gvec_ummla_b 10984 : gen_helper_gvec_smmla_b); 10985 return; 10986 case 0x05: /* USMMLA */ 10987 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b); 10988 return; 10989 10990 case 0x8: /* FCMLA, #0 */ 10991 case 0x9: /* FCMLA, #90 */ 10992 case 0xa: /* FCMLA, #180 */ 10993 case 0xb: /* FCMLA, #270 */ 10994 rot = extract32(opcode, 0, 2); 10995 switch (size) { 10996 case 1: 10997 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot, 10998 gen_helper_gvec_fcmlah); 10999 break; 11000 case 2: 11001 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot, 11002 gen_helper_gvec_fcmlas); 11003 break; 11004 case 3: 11005 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot, 11006 gen_helper_gvec_fcmlad); 11007 break; 11008 default: 11009 g_assert_not_reached(); 11010 } 11011 return; 11012 11013 case 0xc: /* FCADD, #90 */ 11014 case 0xe: /* FCADD, #270 */ 11015 rot = extract32(opcode, 1, 1); 11016 switch (size) { 11017 case 1: 11018 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, 11019 gen_helper_gvec_fcaddh); 11020 break; 11021 case 2: 11022 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, 11023 gen_helper_gvec_fcadds); 11024 break; 11025 case 3: 11026 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, 11027 gen_helper_gvec_fcaddd); 11028 break; 11029 default: 11030 g_assert_not_reached(); 11031 } 11032 return; 11033 11034 case 0xd: /* BFMMLA */ 11035 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla); 11036 return; 11037 case 0xf: 11038 switch (size) { 11039 case 1: /* BFDOT */ 11040 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot); 11041 break; 11042 case 3: /* BFMLAL{B,T} */ 11043 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q, 11044 gen_helper_gvec_bfmlal); 11045 break; 11046 default: 11047 g_assert_not_reached(); 11048 } 11049 return; 11050 11051 default: 11052 g_assert_not_reached(); 11053 } 11054 } 11055 11056 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, 11057 int size, int rn, int rd) 11058 { 11059 /* Handle 2-reg-misc ops which are widening (so each size element 11060 * in the source becomes a 2*size element in the destination. 11061 * The only instruction like this is FCVTL. 11062 */ 11063 int pass; 11064 11065 if (size == 3) { 11066 /* 32 -> 64 bit fp conversion */ 11067 TCGv_i64 tcg_res[2]; 11068 int srcelt = is_q ? 2 : 0; 11069 11070 for (pass = 0; pass < 2; pass++) { 11071 TCGv_i32 tcg_op = tcg_temp_new_i32(); 11072 tcg_res[pass] = tcg_temp_new_i64(); 11073 11074 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32); 11075 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, tcg_env); 11076 } 11077 for (pass = 0; pass < 2; pass++) { 11078 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 11079 } 11080 } else { 11081 /* 16 -> 32 bit fp conversion */ 11082 int srcelt = is_q ? 4 : 0; 11083 TCGv_i32 tcg_res[4]; 11084 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 11085 TCGv_i32 ahp = get_ahp_flag(); 11086 11087 for (pass = 0; pass < 4; pass++) { 11088 tcg_res[pass] = tcg_temp_new_i32(); 11089 11090 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16); 11091 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass], 11092 fpst, ahp); 11093 } 11094 for (pass = 0; pass < 4; pass++) { 11095 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); 11096 } 11097 } 11098 } 11099 11100 static void handle_rev(DisasContext *s, int opcode, bool u, 11101 bool is_q, int size, int rn, int rd) 11102 { 11103 int op = (opcode << 1) | u; 11104 int opsz = op + size; 11105 int grp_size = 3 - opsz; 11106 int dsize = is_q ? 128 : 64; 11107 int i; 11108 11109 if (opsz >= 3) { 11110 unallocated_encoding(s); 11111 return; 11112 } 11113 11114 if (!fp_access_check(s)) { 11115 return; 11116 } 11117 11118 if (size == 0) { 11119 /* Special case bytes, use bswap op on each group of elements */ 11120 int groups = dsize / (8 << grp_size); 11121 11122 for (i = 0; i < groups; i++) { 11123 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 11124 11125 read_vec_element(s, tcg_tmp, rn, i, grp_size); 11126 switch (grp_size) { 11127 case MO_16: 11128 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ); 11129 break; 11130 case MO_32: 11131 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ); 11132 break; 11133 case MO_64: 11134 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp); 11135 break; 11136 default: 11137 g_assert_not_reached(); 11138 } 11139 write_vec_element(s, tcg_tmp, rd, i, grp_size); 11140 } 11141 clear_vec_high(s, is_q, rd); 11142 } else { 11143 int revmask = (1 << grp_size) - 1; 11144 int esize = 8 << size; 11145 int elements = dsize / esize; 11146 TCGv_i64 tcg_rn = tcg_temp_new_i64(); 11147 TCGv_i64 tcg_rd[2]; 11148 11149 for (i = 0; i < 2; i++) { 11150 tcg_rd[i] = tcg_temp_new_i64(); 11151 tcg_gen_movi_i64(tcg_rd[i], 0); 11152 } 11153 11154 for (i = 0; i < elements; i++) { 11155 int e_rev = (i & 0xf) ^ revmask; 11156 int w = (e_rev * esize) / 64; 11157 int o = (e_rev * esize) % 64; 11158 11159 read_vec_element(s, tcg_rn, rn, i, size); 11160 tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize); 11161 } 11162 11163 for (i = 0; i < 2; i++) { 11164 write_vec_element(s, tcg_rd[i], rd, i, MO_64); 11165 } 11166 clear_vec_high(s, true, rd); 11167 } 11168 } 11169 11170 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, 11171 bool is_q, int size, int rn, int rd) 11172 { 11173 /* Implement the pairwise operations from 2-misc: 11174 * SADDLP, UADDLP, SADALP, UADALP. 11175 * These all add pairs of elements in the input to produce a 11176 * double-width result element in the output (possibly accumulating). 11177 */ 11178 bool accum = (opcode == 0x6); 11179 int maxpass = is_q ? 2 : 1; 11180 int pass; 11181 TCGv_i64 tcg_res[2]; 11182 11183 if (size == 2) { 11184 /* 32 + 32 -> 64 op */ 11185 MemOp memop = size + (u ? 0 : MO_SIGN); 11186 11187 for (pass = 0; pass < maxpass; pass++) { 11188 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 11189 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 11190 11191 tcg_res[pass] = tcg_temp_new_i64(); 11192 11193 read_vec_element(s, tcg_op1, rn, pass * 2, memop); 11194 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop); 11195 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2); 11196 if (accum) { 11197 read_vec_element(s, tcg_op1, rd, pass, MO_64); 11198 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1); 11199 } 11200 } 11201 } else { 11202 for (pass = 0; pass < maxpass; pass++) { 11203 TCGv_i64 tcg_op = tcg_temp_new_i64(); 11204 NeonGenOne64OpFn *genfn; 11205 static NeonGenOne64OpFn * const fns[2][2] = { 11206 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 }, 11207 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 }, 11208 }; 11209 11210 genfn = fns[size][u]; 11211 11212 tcg_res[pass] = tcg_temp_new_i64(); 11213 11214 read_vec_element(s, tcg_op, rn, pass, MO_64); 11215 genfn(tcg_res[pass], tcg_op); 11216 11217 if (accum) { 11218 read_vec_element(s, tcg_op, rd, pass, MO_64); 11219 if (size == 0) { 11220 gen_helper_neon_addl_u16(tcg_res[pass], 11221 tcg_res[pass], tcg_op); 11222 } else { 11223 gen_helper_neon_addl_u32(tcg_res[pass], 11224 tcg_res[pass], tcg_op); 11225 } 11226 } 11227 } 11228 } 11229 if (!is_q) { 11230 tcg_res[1] = tcg_constant_i64(0); 11231 } 11232 for (pass = 0; pass < 2; pass++) { 11233 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 11234 } 11235 } 11236 11237 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd) 11238 { 11239 /* Implement SHLL and SHLL2 */ 11240 int pass; 11241 int part = is_q ? 2 : 0; 11242 TCGv_i64 tcg_res[2]; 11243 11244 for (pass = 0; pass < 2; pass++) { 11245 static NeonGenWidenFn * const widenfns[3] = { 11246 gen_helper_neon_widen_u8, 11247 gen_helper_neon_widen_u16, 11248 tcg_gen_extu_i32_i64, 11249 }; 11250 NeonGenWidenFn *widenfn = widenfns[size]; 11251 TCGv_i32 tcg_op = tcg_temp_new_i32(); 11252 11253 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32); 11254 tcg_res[pass] = tcg_temp_new_i64(); 11255 widenfn(tcg_res[pass], tcg_op); 11256 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size); 11257 } 11258 11259 for (pass = 0; pass < 2; pass++) { 11260 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 11261 } 11262 } 11263 11264 /* AdvSIMD two reg misc 11265 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 11266 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ 11267 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | 11268 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ 11269 */ 11270 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) 11271 { 11272 int size = extract32(insn, 22, 2); 11273 int opcode = extract32(insn, 12, 5); 11274 bool u = extract32(insn, 29, 1); 11275 bool is_q = extract32(insn, 30, 1); 11276 int rn = extract32(insn, 5, 5); 11277 int rd = extract32(insn, 0, 5); 11278 bool need_fpstatus = false; 11279 int rmode = -1; 11280 TCGv_i32 tcg_rmode; 11281 TCGv_ptr tcg_fpstatus; 11282 11283 switch (opcode) { 11284 case 0x0: /* REV64, REV32 */ 11285 case 0x1: /* REV16 */ 11286 handle_rev(s, opcode, u, is_q, size, rn, rd); 11287 return; 11288 case 0x5: /* CNT, NOT, RBIT */ 11289 if (u && size == 0) { 11290 /* NOT */ 11291 break; 11292 } else if (u && size == 1) { 11293 /* RBIT */ 11294 break; 11295 } else if (!u && size == 0) { 11296 /* CNT */ 11297 break; 11298 } 11299 unallocated_encoding(s); 11300 return; 11301 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */ 11302 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */ 11303 if (size == 3) { 11304 unallocated_encoding(s); 11305 return; 11306 } 11307 if (!fp_access_check(s)) { 11308 return; 11309 } 11310 11311 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd); 11312 return; 11313 case 0x4: /* CLS, CLZ */ 11314 if (size == 3) { 11315 unallocated_encoding(s); 11316 return; 11317 } 11318 break; 11319 case 0x2: /* SADDLP, UADDLP */ 11320 case 0x6: /* SADALP, UADALP */ 11321 if (size == 3) { 11322 unallocated_encoding(s); 11323 return; 11324 } 11325 if (!fp_access_check(s)) { 11326 return; 11327 } 11328 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd); 11329 return; 11330 case 0x13: /* SHLL, SHLL2 */ 11331 if (u == 0 || size == 3) { 11332 unallocated_encoding(s); 11333 return; 11334 } 11335 if (!fp_access_check(s)) { 11336 return; 11337 } 11338 handle_shll(s, is_q, size, rn, rd); 11339 return; 11340 case 0xa: /* CMLT */ 11341 if (u == 1) { 11342 unallocated_encoding(s); 11343 return; 11344 } 11345 /* fall through */ 11346 case 0x8: /* CMGT, CMGE */ 11347 case 0x9: /* CMEQ, CMLE */ 11348 case 0xb: /* ABS, NEG */ 11349 if (size == 3 && !is_q) { 11350 unallocated_encoding(s); 11351 return; 11352 } 11353 break; 11354 case 0x7: /* SQABS, SQNEG */ 11355 if (size == 3 && !is_q) { 11356 unallocated_encoding(s); 11357 return; 11358 } 11359 break; 11360 case 0xc ... 0xf: 11361 case 0x16 ... 0x1f: 11362 { 11363 /* Floating point: U, size[1] and opcode indicate operation; 11364 * size[0] indicates single or double precision. 11365 */ 11366 int is_double = extract32(size, 0, 1); 11367 opcode |= (extract32(size, 1, 1) << 5) | (u << 6); 11368 size = is_double ? 3 : 2; 11369 switch (opcode) { 11370 case 0x2f: /* FABS */ 11371 case 0x6f: /* FNEG */ 11372 if (size == 3 && !is_q) { 11373 unallocated_encoding(s); 11374 return; 11375 } 11376 break; 11377 case 0x1d: /* SCVTF */ 11378 case 0x5d: /* UCVTF */ 11379 { 11380 bool is_signed = (opcode == 0x1d) ? true : false; 11381 int elements = is_double ? 2 : is_q ? 4 : 2; 11382 if (is_double && !is_q) { 11383 unallocated_encoding(s); 11384 return; 11385 } 11386 if (!fp_access_check(s)) { 11387 return; 11388 } 11389 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size); 11390 return; 11391 } 11392 case 0x2c: /* FCMGT (zero) */ 11393 case 0x2d: /* FCMEQ (zero) */ 11394 case 0x2e: /* FCMLT (zero) */ 11395 case 0x6c: /* FCMGE (zero) */ 11396 case 0x6d: /* FCMLE (zero) */ 11397 if (size == 3 && !is_q) { 11398 unallocated_encoding(s); 11399 return; 11400 } 11401 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd); 11402 return; 11403 case 0x7f: /* FSQRT */ 11404 if (size == 3 && !is_q) { 11405 unallocated_encoding(s); 11406 return; 11407 } 11408 break; 11409 case 0x1a: /* FCVTNS */ 11410 case 0x1b: /* FCVTMS */ 11411 case 0x3a: /* FCVTPS */ 11412 case 0x3b: /* FCVTZS */ 11413 case 0x5a: /* FCVTNU */ 11414 case 0x5b: /* FCVTMU */ 11415 case 0x7a: /* FCVTPU */ 11416 case 0x7b: /* FCVTZU */ 11417 need_fpstatus = true; 11418 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); 11419 if (size == 3 && !is_q) { 11420 unallocated_encoding(s); 11421 return; 11422 } 11423 break; 11424 case 0x5c: /* FCVTAU */ 11425 case 0x1c: /* FCVTAS */ 11426 need_fpstatus = true; 11427 rmode = FPROUNDING_TIEAWAY; 11428 if (size == 3 && !is_q) { 11429 unallocated_encoding(s); 11430 return; 11431 } 11432 break; 11433 case 0x3c: /* URECPE */ 11434 if (size == 3) { 11435 unallocated_encoding(s); 11436 return; 11437 } 11438 /* fall through */ 11439 case 0x3d: /* FRECPE */ 11440 case 0x7d: /* FRSQRTE */ 11441 if (size == 3 && !is_q) { 11442 unallocated_encoding(s); 11443 return; 11444 } 11445 if (!fp_access_check(s)) { 11446 return; 11447 } 11448 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd); 11449 return; 11450 case 0x56: /* FCVTXN, FCVTXN2 */ 11451 if (size == 2) { 11452 unallocated_encoding(s); 11453 return; 11454 } 11455 /* fall through */ 11456 case 0x16: /* FCVTN, FCVTN2 */ 11457 /* handle_2misc_narrow does a 2*size -> size operation, but these 11458 * instructions encode the source size rather than dest size. 11459 */ 11460 if (!fp_access_check(s)) { 11461 return; 11462 } 11463 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd); 11464 return; 11465 case 0x36: /* BFCVTN, BFCVTN2 */ 11466 if (!dc_isar_feature(aa64_bf16, s) || size != 2) { 11467 unallocated_encoding(s); 11468 return; 11469 } 11470 if (!fp_access_check(s)) { 11471 return; 11472 } 11473 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd); 11474 return; 11475 case 0x17: /* FCVTL, FCVTL2 */ 11476 if (!fp_access_check(s)) { 11477 return; 11478 } 11479 handle_2misc_widening(s, opcode, is_q, size, rn, rd); 11480 return; 11481 case 0x18: /* FRINTN */ 11482 case 0x19: /* FRINTM */ 11483 case 0x38: /* FRINTP */ 11484 case 0x39: /* FRINTZ */ 11485 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); 11486 /* fall through */ 11487 case 0x59: /* FRINTX */ 11488 case 0x79: /* FRINTI */ 11489 need_fpstatus = true; 11490 if (size == 3 && !is_q) { 11491 unallocated_encoding(s); 11492 return; 11493 } 11494 break; 11495 case 0x58: /* FRINTA */ 11496 rmode = FPROUNDING_TIEAWAY; 11497 need_fpstatus = true; 11498 if (size == 3 && !is_q) { 11499 unallocated_encoding(s); 11500 return; 11501 } 11502 break; 11503 case 0x7c: /* URSQRTE */ 11504 if (size == 3) { 11505 unallocated_encoding(s); 11506 return; 11507 } 11508 break; 11509 case 0x1e: /* FRINT32Z */ 11510 case 0x1f: /* FRINT64Z */ 11511 rmode = FPROUNDING_ZERO; 11512 /* fall through */ 11513 case 0x5e: /* FRINT32X */ 11514 case 0x5f: /* FRINT64X */ 11515 need_fpstatus = true; 11516 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) { 11517 unallocated_encoding(s); 11518 return; 11519 } 11520 break; 11521 default: 11522 unallocated_encoding(s); 11523 return; 11524 } 11525 break; 11526 } 11527 default: 11528 case 0x3: /* SUQADD, USQADD */ 11529 unallocated_encoding(s); 11530 return; 11531 } 11532 11533 if (!fp_access_check(s)) { 11534 return; 11535 } 11536 11537 if (need_fpstatus || rmode >= 0) { 11538 tcg_fpstatus = fpstatus_ptr(FPST_FPCR); 11539 } else { 11540 tcg_fpstatus = NULL; 11541 } 11542 if (rmode >= 0) { 11543 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); 11544 } else { 11545 tcg_rmode = NULL; 11546 } 11547 11548 switch (opcode) { 11549 case 0x5: 11550 if (u && size == 0) { /* NOT */ 11551 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0); 11552 return; 11553 } 11554 break; 11555 case 0x8: /* CMGT, CMGE */ 11556 if (u) { 11557 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size); 11558 } else { 11559 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size); 11560 } 11561 return; 11562 case 0x9: /* CMEQ, CMLE */ 11563 if (u) { 11564 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size); 11565 } else { 11566 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size); 11567 } 11568 return; 11569 case 0xa: /* CMLT */ 11570 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size); 11571 return; 11572 case 0xb: 11573 if (u) { /* ABS, NEG */ 11574 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size); 11575 } else { 11576 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size); 11577 } 11578 return; 11579 } 11580 11581 if (size == 3) { 11582 /* All 64-bit element operations can be shared with scalar 2misc */ 11583 int pass; 11584 11585 /* Coverity claims (size == 3 && !is_q) has been eliminated 11586 * from all paths leading to here. 11587 */ 11588 tcg_debug_assert(is_q); 11589 for (pass = 0; pass < 2; pass++) { 11590 TCGv_i64 tcg_op = tcg_temp_new_i64(); 11591 TCGv_i64 tcg_res = tcg_temp_new_i64(); 11592 11593 read_vec_element(s, tcg_op, rn, pass, MO_64); 11594 11595 handle_2misc_64(s, opcode, u, tcg_res, tcg_op, 11596 tcg_rmode, tcg_fpstatus); 11597 11598 write_vec_element(s, tcg_res, rd, pass, MO_64); 11599 } 11600 } else { 11601 int pass; 11602 11603 for (pass = 0; pass < (is_q ? 4 : 2); pass++) { 11604 TCGv_i32 tcg_op = tcg_temp_new_i32(); 11605 TCGv_i32 tcg_res = tcg_temp_new_i32(); 11606 11607 read_vec_element_i32(s, tcg_op, rn, pass, MO_32); 11608 11609 if (size == 2) { 11610 /* Special cases for 32 bit elements */ 11611 switch (opcode) { 11612 case 0x4: /* CLS */ 11613 if (u) { 11614 tcg_gen_clzi_i32(tcg_res, tcg_op, 32); 11615 } else { 11616 tcg_gen_clrsb_i32(tcg_res, tcg_op); 11617 } 11618 break; 11619 case 0x7: /* SQABS, SQNEG */ 11620 if (u) { 11621 gen_helper_neon_qneg_s32(tcg_res, tcg_env, tcg_op); 11622 } else { 11623 gen_helper_neon_qabs_s32(tcg_res, tcg_env, tcg_op); 11624 } 11625 break; 11626 case 0x2f: /* FABS */ 11627 gen_vfp_abss(tcg_res, tcg_op); 11628 break; 11629 case 0x6f: /* FNEG */ 11630 gen_vfp_negs(tcg_res, tcg_op); 11631 break; 11632 case 0x7f: /* FSQRT */ 11633 gen_helper_vfp_sqrts(tcg_res, tcg_op, tcg_env); 11634 break; 11635 case 0x1a: /* FCVTNS */ 11636 case 0x1b: /* FCVTMS */ 11637 case 0x1c: /* FCVTAS */ 11638 case 0x3a: /* FCVTPS */ 11639 case 0x3b: /* FCVTZS */ 11640 gen_helper_vfp_tosls(tcg_res, tcg_op, 11641 tcg_constant_i32(0), tcg_fpstatus); 11642 break; 11643 case 0x5a: /* FCVTNU */ 11644 case 0x5b: /* FCVTMU */ 11645 case 0x5c: /* FCVTAU */ 11646 case 0x7a: /* FCVTPU */ 11647 case 0x7b: /* FCVTZU */ 11648 gen_helper_vfp_touls(tcg_res, tcg_op, 11649 tcg_constant_i32(0), tcg_fpstatus); 11650 break; 11651 case 0x18: /* FRINTN */ 11652 case 0x19: /* FRINTM */ 11653 case 0x38: /* FRINTP */ 11654 case 0x39: /* FRINTZ */ 11655 case 0x58: /* FRINTA */ 11656 case 0x79: /* FRINTI */ 11657 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus); 11658 break; 11659 case 0x59: /* FRINTX */ 11660 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus); 11661 break; 11662 case 0x7c: /* URSQRTE */ 11663 gen_helper_rsqrte_u32(tcg_res, tcg_op); 11664 break; 11665 case 0x1e: /* FRINT32Z */ 11666 case 0x5e: /* FRINT32X */ 11667 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus); 11668 break; 11669 case 0x1f: /* FRINT64Z */ 11670 case 0x5f: /* FRINT64X */ 11671 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus); 11672 break; 11673 default: 11674 g_assert_not_reached(); 11675 } 11676 } else { 11677 /* Use helpers for 8 and 16 bit elements */ 11678 switch (opcode) { 11679 case 0x5: /* CNT, RBIT */ 11680 /* For these two insns size is part of the opcode specifier 11681 * (handled earlier); they always operate on byte elements. 11682 */ 11683 if (u) { 11684 gen_helper_neon_rbit_u8(tcg_res, tcg_op); 11685 } else { 11686 gen_helper_neon_cnt_u8(tcg_res, tcg_op); 11687 } 11688 break; 11689 case 0x7: /* SQABS, SQNEG */ 11690 { 11691 NeonGenOneOpEnvFn *genfn; 11692 static NeonGenOneOpEnvFn * const fns[2][2] = { 11693 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, 11694 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, 11695 }; 11696 genfn = fns[size][u]; 11697 genfn(tcg_res, tcg_env, tcg_op); 11698 break; 11699 } 11700 case 0x4: /* CLS, CLZ */ 11701 if (u) { 11702 if (size == 0) { 11703 gen_helper_neon_clz_u8(tcg_res, tcg_op); 11704 } else { 11705 gen_helper_neon_clz_u16(tcg_res, tcg_op); 11706 } 11707 } else { 11708 if (size == 0) { 11709 gen_helper_neon_cls_s8(tcg_res, tcg_op); 11710 } else { 11711 gen_helper_neon_cls_s16(tcg_res, tcg_op); 11712 } 11713 } 11714 break; 11715 default: 11716 g_assert_not_reached(); 11717 } 11718 } 11719 11720 write_vec_element_i32(s, tcg_res, rd, pass, MO_32); 11721 } 11722 } 11723 clear_vec_high(s, is_q, rd); 11724 11725 if (tcg_rmode) { 11726 gen_restore_rmode(tcg_rmode, tcg_fpstatus); 11727 } 11728 } 11729 11730 /* AdvSIMD [scalar] two register miscellaneous (FP16) 11731 * 11732 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0 11733 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ 11734 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd | 11735 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ 11736 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00 11737 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800 11738 * 11739 * This actually covers two groups where scalar access is governed by 11740 * bit 28. A bunch of the instructions (float to integral) only exist 11741 * in the vector form and are un-allocated for the scalar decode. Also 11742 * in the scalar decode Q is always 1. 11743 */ 11744 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) 11745 { 11746 int fpop, opcode, a, u; 11747 int rn, rd; 11748 bool is_q; 11749 bool is_scalar; 11750 bool only_in_vector = false; 11751 11752 int pass; 11753 TCGv_i32 tcg_rmode = NULL; 11754 TCGv_ptr tcg_fpstatus = NULL; 11755 bool need_fpst = true; 11756 int rmode = -1; 11757 11758 if (!dc_isar_feature(aa64_fp16, s)) { 11759 unallocated_encoding(s); 11760 return; 11761 } 11762 11763 rd = extract32(insn, 0, 5); 11764 rn = extract32(insn, 5, 5); 11765 11766 a = extract32(insn, 23, 1); 11767 u = extract32(insn, 29, 1); 11768 is_scalar = extract32(insn, 28, 1); 11769 is_q = extract32(insn, 30, 1); 11770 11771 opcode = extract32(insn, 12, 5); 11772 fpop = deposit32(opcode, 5, 1, a); 11773 fpop = deposit32(fpop, 6, 1, u); 11774 11775 switch (fpop) { 11776 case 0x1d: /* SCVTF */ 11777 case 0x5d: /* UCVTF */ 11778 { 11779 int elements; 11780 11781 if (is_scalar) { 11782 elements = 1; 11783 } else { 11784 elements = (is_q ? 8 : 4); 11785 } 11786 11787 if (!fp_access_check(s)) { 11788 return; 11789 } 11790 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16); 11791 return; 11792 } 11793 break; 11794 case 0x2c: /* FCMGT (zero) */ 11795 case 0x2d: /* FCMEQ (zero) */ 11796 case 0x2e: /* FCMLT (zero) */ 11797 case 0x6c: /* FCMGE (zero) */ 11798 case 0x6d: /* FCMLE (zero) */ 11799 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd); 11800 return; 11801 case 0x3d: /* FRECPE */ 11802 case 0x3f: /* FRECPX */ 11803 break; 11804 case 0x18: /* FRINTN */ 11805 only_in_vector = true; 11806 rmode = FPROUNDING_TIEEVEN; 11807 break; 11808 case 0x19: /* FRINTM */ 11809 only_in_vector = true; 11810 rmode = FPROUNDING_NEGINF; 11811 break; 11812 case 0x38: /* FRINTP */ 11813 only_in_vector = true; 11814 rmode = FPROUNDING_POSINF; 11815 break; 11816 case 0x39: /* FRINTZ */ 11817 only_in_vector = true; 11818 rmode = FPROUNDING_ZERO; 11819 break; 11820 case 0x58: /* FRINTA */ 11821 only_in_vector = true; 11822 rmode = FPROUNDING_TIEAWAY; 11823 break; 11824 case 0x59: /* FRINTX */ 11825 case 0x79: /* FRINTI */ 11826 only_in_vector = true; 11827 /* current rounding mode */ 11828 break; 11829 case 0x1a: /* FCVTNS */ 11830 rmode = FPROUNDING_TIEEVEN; 11831 break; 11832 case 0x1b: /* FCVTMS */ 11833 rmode = FPROUNDING_NEGINF; 11834 break; 11835 case 0x1c: /* FCVTAS */ 11836 rmode = FPROUNDING_TIEAWAY; 11837 break; 11838 case 0x3a: /* FCVTPS */ 11839 rmode = FPROUNDING_POSINF; 11840 break; 11841 case 0x3b: /* FCVTZS */ 11842 rmode = FPROUNDING_ZERO; 11843 break; 11844 case 0x5a: /* FCVTNU */ 11845 rmode = FPROUNDING_TIEEVEN; 11846 break; 11847 case 0x5b: /* FCVTMU */ 11848 rmode = FPROUNDING_NEGINF; 11849 break; 11850 case 0x5c: /* FCVTAU */ 11851 rmode = FPROUNDING_TIEAWAY; 11852 break; 11853 case 0x7a: /* FCVTPU */ 11854 rmode = FPROUNDING_POSINF; 11855 break; 11856 case 0x7b: /* FCVTZU */ 11857 rmode = FPROUNDING_ZERO; 11858 break; 11859 case 0x2f: /* FABS */ 11860 case 0x6f: /* FNEG */ 11861 need_fpst = false; 11862 break; 11863 case 0x7d: /* FRSQRTE */ 11864 case 0x7f: /* FSQRT (vector) */ 11865 break; 11866 default: 11867 unallocated_encoding(s); 11868 return; 11869 } 11870 11871 11872 /* Check additional constraints for the scalar encoding */ 11873 if (is_scalar) { 11874 if (!is_q) { 11875 unallocated_encoding(s); 11876 return; 11877 } 11878 /* FRINTxx is only in the vector form */ 11879 if (only_in_vector) { 11880 unallocated_encoding(s); 11881 return; 11882 } 11883 } 11884 11885 if (!fp_access_check(s)) { 11886 return; 11887 } 11888 11889 if (rmode >= 0 || need_fpst) { 11890 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16); 11891 } 11892 11893 if (rmode >= 0) { 11894 tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus); 11895 } 11896 11897 if (is_scalar) { 11898 TCGv_i32 tcg_op = read_fp_hreg(s, rn); 11899 TCGv_i32 tcg_res = tcg_temp_new_i32(); 11900 11901 switch (fpop) { 11902 case 0x1a: /* FCVTNS */ 11903 case 0x1b: /* FCVTMS */ 11904 case 0x1c: /* FCVTAS */ 11905 case 0x3a: /* FCVTPS */ 11906 case 0x3b: /* FCVTZS */ 11907 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); 11908 break; 11909 case 0x3d: /* FRECPE */ 11910 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); 11911 break; 11912 case 0x3f: /* FRECPX */ 11913 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus); 11914 break; 11915 case 0x5a: /* FCVTNU */ 11916 case 0x5b: /* FCVTMU */ 11917 case 0x5c: /* FCVTAU */ 11918 case 0x7a: /* FCVTPU */ 11919 case 0x7b: /* FCVTZU */ 11920 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); 11921 break; 11922 case 0x6f: /* FNEG */ 11923 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); 11924 break; 11925 case 0x7d: /* FRSQRTE */ 11926 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); 11927 break; 11928 default: 11929 g_assert_not_reached(); 11930 } 11931 11932 /* limit any sign extension going on */ 11933 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff); 11934 write_fp_sreg(s, rd, tcg_res); 11935 } else { 11936 for (pass = 0; pass < (is_q ? 8 : 4); pass++) { 11937 TCGv_i32 tcg_op = tcg_temp_new_i32(); 11938 TCGv_i32 tcg_res = tcg_temp_new_i32(); 11939 11940 read_vec_element_i32(s, tcg_op, rn, pass, MO_16); 11941 11942 switch (fpop) { 11943 case 0x1a: /* FCVTNS */ 11944 case 0x1b: /* FCVTMS */ 11945 case 0x1c: /* FCVTAS */ 11946 case 0x3a: /* FCVTPS */ 11947 case 0x3b: /* FCVTZS */ 11948 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); 11949 break; 11950 case 0x3d: /* FRECPE */ 11951 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); 11952 break; 11953 case 0x5a: /* FCVTNU */ 11954 case 0x5b: /* FCVTMU */ 11955 case 0x5c: /* FCVTAU */ 11956 case 0x7a: /* FCVTPU */ 11957 case 0x7b: /* FCVTZU */ 11958 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); 11959 break; 11960 case 0x18: /* FRINTN */ 11961 case 0x19: /* FRINTM */ 11962 case 0x38: /* FRINTP */ 11963 case 0x39: /* FRINTZ */ 11964 case 0x58: /* FRINTA */ 11965 case 0x79: /* FRINTI */ 11966 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus); 11967 break; 11968 case 0x59: /* FRINTX */ 11969 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus); 11970 break; 11971 case 0x2f: /* FABS */ 11972 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff); 11973 break; 11974 case 0x6f: /* FNEG */ 11975 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); 11976 break; 11977 case 0x7d: /* FRSQRTE */ 11978 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); 11979 break; 11980 case 0x7f: /* FSQRT */ 11981 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus); 11982 break; 11983 default: 11984 g_assert_not_reached(); 11985 } 11986 11987 write_vec_element_i32(s, tcg_res, rd, pass, MO_16); 11988 } 11989 11990 clear_vec_high(s, is_q, rd); 11991 } 11992 11993 if (tcg_rmode) { 11994 gen_restore_rmode(tcg_rmode, tcg_fpstatus); 11995 } 11996 } 11997 11998 /* AdvSIMD scalar x indexed element 11999 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 12000 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ 12001 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | 12002 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ 12003 * AdvSIMD vector x indexed element 12004 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 12005 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ 12006 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | 12007 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ 12008 */ 12009 static void disas_simd_indexed(DisasContext *s, uint32_t insn) 12010 { 12011 /* This encoding has two kinds of instruction: 12012 * normal, where we perform elt x idxelt => elt for each 12013 * element in the vector 12014 * long, where we perform elt x idxelt and generate a result of 12015 * double the width of the input element 12016 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs). 12017 */ 12018 bool is_scalar = extract32(insn, 28, 1); 12019 bool is_q = extract32(insn, 30, 1); 12020 bool u = extract32(insn, 29, 1); 12021 int size = extract32(insn, 22, 2); 12022 int l = extract32(insn, 21, 1); 12023 int m = extract32(insn, 20, 1); 12024 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */ 12025 int rm = extract32(insn, 16, 4); 12026 int opcode = extract32(insn, 12, 4); 12027 int h = extract32(insn, 11, 1); 12028 int rn = extract32(insn, 5, 5); 12029 int rd = extract32(insn, 0, 5); 12030 bool is_long = false; 12031 int is_fp = 0; 12032 bool is_fp16 = false; 12033 int index; 12034 TCGv_ptr fpst; 12035 12036 switch (16 * u + opcode) { 12037 case 0x02: /* SMLAL, SMLAL2 */ 12038 case 0x12: /* UMLAL, UMLAL2 */ 12039 case 0x06: /* SMLSL, SMLSL2 */ 12040 case 0x16: /* UMLSL, UMLSL2 */ 12041 case 0x0a: /* SMULL, SMULL2 */ 12042 case 0x1a: /* UMULL, UMULL2 */ 12043 if (is_scalar) { 12044 unallocated_encoding(s); 12045 return; 12046 } 12047 is_long = true; 12048 break; 12049 case 0x03: /* SQDMLAL, SQDMLAL2 */ 12050 case 0x07: /* SQDMLSL, SQDMLSL2 */ 12051 case 0x0b: /* SQDMULL, SQDMULL2 */ 12052 is_long = true; 12053 break; 12054 case 0x0f: 12055 switch (size) { 12056 case 1: /* BFDOT */ 12057 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) { 12058 unallocated_encoding(s); 12059 return; 12060 } 12061 size = MO_32; 12062 break; 12063 case 3: /* BFMLAL{B,T} */ 12064 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) { 12065 unallocated_encoding(s); 12066 return; 12067 } 12068 /* can't set is_fp without other incorrect size checks */ 12069 size = MO_16; 12070 break; 12071 default: 12072 case 0: /* SUDOT */ 12073 case 2: /* USDOT */ 12074 unallocated_encoding(s); 12075 return; 12076 } 12077 break; 12078 case 0x11: /* FCMLA #0 */ 12079 case 0x13: /* FCMLA #90 */ 12080 case 0x15: /* FCMLA #180 */ 12081 case 0x17: /* FCMLA #270 */ 12082 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) { 12083 unallocated_encoding(s); 12084 return; 12085 } 12086 is_fp = 2; 12087 break; 12088 default: 12089 case 0x00: /* FMLAL */ 12090 case 0x01: /* FMLA */ 12091 case 0x04: /* FMLSL */ 12092 case 0x05: /* FMLS */ 12093 case 0x08: /* MUL */ 12094 case 0x09: /* FMUL */ 12095 case 0x0c: /* SQDMULH */ 12096 case 0x0d: /* SQRDMULH */ 12097 case 0x0e: /* SDOT */ 12098 case 0x10: /* MLA */ 12099 case 0x14: /* MLS */ 12100 case 0x18: /* FMLAL2 */ 12101 case 0x19: /* FMULX */ 12102 case 0x1c: /* FMLSL2 */ 12103 case 0x1d: /* SQRDMLAH */ 12104 case 0x1e: /* UDOT */ 12105 case 0x1f: /* SQRDMLSH */ 12106 unallocated_encoding(s); 12107 return; 12108 } 12109 12110 switch (is_fp) { 12111 case 1: /* normal fp */ 12112 unallocated_encoding(s); /* in decodetree */ 12113 return; 12114 12115 case 2: /* complex fp */ 12116 /* Each indexable element is a complex pair. */ 12117 size += 1; 12118 switch (size) { 12119 case MO_32: 12120 if (h && !is_q) { 12121 unallocated_encoding(s); 12122 return; 12123 } 12124 is_fp16 = true; 12125 break; 12126 case MO_64: 12127 break; 12128 default: 12129 unallocated_encoding(s); 12130 return; 12131 } 12132 break; 12133 12134 default: /* integer */ 12135 switch (size) { 12136 case MO_8: 12137 case MO_64: 12138 unallocated_encoding(s); 12139 return; 12140 } 12141 break; 12142 } 12143 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) { 12144 unallocated_encoding(s); 12145 return; 12146 } 12147 12148 /* Given MemOp size, adjust register and indexing. */ 12149 switch (size) { 12150 case MO_16: 12151 index = h << 2 | l << 1 | m; 12152 break; 12153 case MO_32: 12154 index = h << 1 | l; 12155 rm |= m << 4; 12156 break; 12157 case MO_64: 12158 if (l || !is_q) { 12159 unallocated_encoding(s); 12160 return; 12161 } 12162 index = h; 12163 rm |= m << 4; 12164 break; 12165 default: 12166 g_assert_not_reached(); 12167 } 12168 12169 if (!fp_access_check(s)) { 12170 return; 12171 } 12172 12173 if (is_fp) { 12174 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); 12175 } else { 12176 fpst = NULL; 12177 } 12178 12179 switch (16 * u + opcode) { 12180 case 0x0f: 12181 switch (extract32(insn, 22, 2)) { 12182 case 1: /* BFDOT */ 12183 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index, 12184 gen_helper_gvec_bfdot_idx); 12185 return; 12186 case 3: /* BFMLAL{B,T} */ 12187 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q, 12188 gen_helper_gvec_bfmlal_idx); 12189 return; 12190 } 12191 g_assert_not_reached(); 12192 case 0x11: /* FCMLA #0 */ 12193 case 0x13: /* FCMLA #90 */ 12194 case 0x15: /* FCMLA #180 */ 12195 case 0x17: /* FCMLA #270 */ 12196 { 12197 int rot = extract32(insn, 13, 2); 12198 int data = (index << 2) | rot; 12199 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 12200 vec_full_reg_offset(s, rn), 12201 vec_full_reg_offset(s, rm), 12202 vec_full_reg_offset(s, rd), fpst, 12203 is_q ? 16 : 8, vec_full_reg_size(s), data, 12204 size == MO_64 12205 ? gen_helper_gvec_fcmlas_idx 12206 : gen_helper_gvec_fcmlah_idx); 12207 } 12208 return; 12209 } 12210 12211 if (size == 3) { 12212 g_assert_not_reached(); 12213 } else if (!is_long) { 12214 /* 32 bit floating point, or 16 or 32 bit integer. 12215 * For the 16 bit scalar case we use the usual Neon helpers and 12216 * rely on the fact that 0 op 0 == 0 with no side effects. 12217 */ 12218 TCGv_i32 tcg_idx = tcg_temp_new_i32(); 12219 int pass, maxpasses; 12220 12221 if (is_scalar) { 12222 maxpasses = 1; 12223 } else { 12224 maxpasses = is_q ? 4 : 2; 12225 } 12226 12227 read_vec_element_i32(s, tcg_idx, rm, index, size); 12228 12229 if (size == 1 && !is_scalar) { 12230 /* The simplest way to handle the 16x16 indexed ops is to duplicate 12231 * the index into both halves of the 32 bit tcg_idx and then use 12232 * the usual Neon helpers. 12233 */ 12234 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16); 12235 } 12236 12237 for (pass = 0; pass < maxpasses; pass++) { 12238 TCGv_i32 tcg_op = tcg_temp_new_i32(); 12239 TCGv_i32 tcg_res = tcg_temp_new_i32(); 12240 12241 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32); 12242 12243 switch (16 * u + opcode) { 12244 case 0x10: /* MLA */ 12245 case 0x14: /* MLS */ 12246 { 12247 static NeonGenTwoOpFn * const fns[2][2] = { 12248 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, 12249 { tcg_gen_add_i32, tcg_gen_sub_i32 }, 12250 }; 12251 NeonGenTwoOpFn *genfn; 12252 bool is_sub = opcode == 0x4; 12253 12254 if (size == 1) { 12255 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx); 12256 } else { 12257 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx); 12258 } 12259 if (opcode == 0x8) { 12260 break; 12261 } 12262 read_vec_element_i32(s, tcg_op, rd, pass, MO_32); 12263 genfn = fns[size - 1][is_sub]; 12264 genfn(tcg_res, tcg_op, tcg_res); 12265 break; 12266 } 12267 case 0x0c: /* SQDMULH */ 12268 if (size == 1) { 12269 gen_helper_neon_qdmulh_s16(tcg_res, tcg_env, 12270 tcg_op, tcg_idx); 12271 } else { 12272 gen_helper_neon_qdmulh_s32(tcg_res, tcg_env, 12273 tcg_op, tcg_idx); 12274 } 12275 break; 12276 case 0x0d: /* SQRDMULH */ 12277 if (size == 1) { 12278 gen_helper_neon_qrdmulh_s16(tcg_res, tcg_env, 12279 tcg_op, tcg_idx); 12280 } else { 12281 gen_helper_neon_qrdmulh_s32(tcg_res, tcg_env, 12282 tcg_op, tcg_idx); 12283 } 12284 break; 12285 default: 12286 case 0x01: /* FMLA */ 12287 case 0x05: /* FMLS */ 12288 case 0x09: /* FMUL */ 12289 case 0x19: /* FMULX */ 12290 case 0x1d: /* SQRDMLAH */ 12291 case 0x1f: /* SQRDMLSH */ 12292 g_assert_not_reached(); 12293 } 12294 12295 if (is_scalar) { 12296 write_fp_sreg(s, rd, tcg_res); 12297 } else { 12298 write_vec_element_i32(s, tcg_res, rd, pass, MO_32); 12299 } 12300 } 12301 12302 clear_vec_high(s, is_q, rd); 12303 } else { 12304 /* long ops: 16x16->32 or 32x32->64 */ 12305 TCGv_i64 tcg_res[2]; 12306 int pass; 12307 bool satop = extract32(opcode, 0, 1); 12308 MemOp memop = MO_32; 12309 12310 if (satop || !u) { 12311 memop |= MO_SIGN; 12312 } 12313 12314 if (size == 2) { 12315 TCGv_i64 tcg_idx = tcg_temp_new_i64(); 12316 12317 read_vec_element(s, tcg_idx, rm, index, memop); 12318 12319 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 12320 TCGv_i64 tcg_op = tcg_temp_new_i64(); 12321 TCGv_i64 tcg_passres; 12322 int passelt; 12323 12324 if (is_scalar) { 12325 passelt = 0; 12326 } else { 12327 passelt = pass + (is_q * 2); 12328 } 12329 12330 read_vec_element(s, tcg_op, rn, passelt, memop); 12331 12332 tcg_res[pass] = tcg_temp_new_i64(); 12333 12334 if (opcode == 0xa || opcode == 0xb) { 12335 /* Non-accumulating ops */ 12336 tcg_passres = tcg_res[pass]; 12337 } else { 12338 tcg_passres = tcg_temp_new_i64(); 12339 } 12340 12341 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx); 12342 12343 if (satop) { 12344 /* saturating, doubling */ 12345 gen_helper_neon_addl_saturate_s64(tcg_passres, tcg_env, 12346 tcg_passres, tcg_passres); 12347 } 12348 12349 if (opcode == 0xa || opcode == 0xb) { 12350 continue; 12351 } 12352 12353 /* Accumulating op: handle accumulate step */ 12354 read_vec_element(s, tcg_res[pass], rd, pass, MO_64); 12355 12356 switch (opcode) { 12357 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 12358 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres); 12359 break; 12360 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 12361 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres); 12362 break; 12363 case 0x7: /* SQDMLSL, SQDMLSL2 */ 12364 tcg_gen_neg_i64(tcg_passres, tcg_passres); 12365 /* fall through */ 12366 case 0x3: /* SQDMLAL, SQDMLAL2 */ 12367 gen_helper_neon_addl_saturate_s64(tcg_res[pass], tcg_env, 12368 tcg_res[pass], 12369 tcg_passres); 12370 break; 12371 default: 12372 g_assert_not_reached(); 12373 } 12374 } 12375 12376 clear_vec_high(s, !is_scalar, rd); 12377 } else { 12378 TCGv_i32 tcg_idx = tcg_temp_new_i32(); 12379 12380 assert(size == 1); 12381 read_vec_element_i32(s, tcg_idx, rm, index, size); 12382 12383 if (!is_scalar) { 12384 /* The simplest way to handle the 16x16 indexed ops is to 12385 * duplicate the index into both halves of the 32 bit tcg_idx 12386 * and then use the usual Neon helpers. 12387 */ 12388 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16); 12389 } 12390 12391 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 12392 TCGv_i32 tcg_op = tcg_temp_new_i32(); 12393 TCGv_i64 tcg_passres; 12394 12395 if (is_scalar) { 12396 read_vec_element_i32(s, tcg_op, rn, pass, size); 12397 } else { 12398 read_vec_element_i32(s, tcg_op, rn, 12399 pass + (is_q * 2), MO_32); 12400 } 12401 12402 tcg_res[pass] = tcg_temp_new_i64(); 12403 12404 if (opcode == 0xa || opcode == 0xb) { 12405 /* Non-accumulating ops */ 12406 tcg_passres = tcg_res[pass]; 12407 } else { 12408 tcg_passres = tcg_temp_new_i64(); 12409 } 12410 12411 if (memop & MO_SIGN) { 12412 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx); 12413 } else { 12414 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx); 12415 } 12416 if (satop) { 12417 gen_helper_neon_addl_saturate_s32(tcg_passres, tcg_env, 12418 tcg_passres, tcg_passres); 12419 } 12420 12421 if (opcode == 0xa || opcode == 0xb) { 12422 continue; 12423 } 12424 12425 /* Accumulating op: handle accumulate step */ 12426 read_vec_element(s, tcg_res[pass], rd, pass, MO_64); 12427 12428 switch (opcode) { 12429 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 12430 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass], 12431 tcg_passres); 12432 break; 12433 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 12434 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass], 12435 tcg_passres); 12436 break; 12437 case 0x7: /* SQDMLSL, SQDMLSL2 */ 12438 gen_helper_neon_negl_u32(tcg_passres, tcg_passres); 12439 /* fall through */ 12440 case 0x3: /* SQDMLAL, SQDMLAL2 */ 12441 gen_helper_neon_addl_saturate_s32(tcg_res[pass], tcg_env, 12442 tcg_res[pass], 12443 tcg_passres); 12444 break; 12445 default: 12446 g_assert_not_reached(); 12447 } 12448 } 12449 12450 if (is_scalar) { 12451 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]); 12452 } 12453 } 12454 12455 if (is_scalar) { 12456 tcg_res[1] = tcg_constant_i64(0); 12457 } 12458 12459 for (pass = 0; pass < 2; pass++) { 12460 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 12461 } 12462 } 12463 } 12464 12465 /* C3.6 Data processing - SIMD, inc Crypto 12466 * 12467 * As the decode gets a little complex we are using a table based 12468 * approach for this part of the decode. 12469 */ 12470 static const AArch64DecodeTable data_proc_simd[] = { 12471 /* pattern , mask , fn */ 12472 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra }, 12473 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff }, 12474 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc }, 12475 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes }, 12476 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */ 12477 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */ 12478 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm }, 12479 { 0x0f000400, 0x9f800400, disas_simd_shift_imm }, 12480 { 0x0e000000, 0xbf208c00, disas_simd_tb }, 12481 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn }, 12482 { 0x2e000000, 0xbf208400, disas_simd_ext }, 12483 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff }, 12484 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc }, 12485 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */ 12486 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm }, 12487 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 }, 12488 { 0x00000000, 0x00000000, NULL } 12489 }; 12490 12491 static void disas_data_proc_simd(DisasContext *s, uint32_t insn) 12492 { 12493 /* Note that this is called with all non-FP cases from 12494 * table C3-6 so it must UNDEF for entries not specifically 12495 * allocated to instructions in that table. 12496 */ 12497 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn); 12498 if (fn) { 12499 fn(s, insn); 12500 } else { 12501 unallocated_encoding(s); 12502 } 12503 } 12504 12505 /* C3.6 Data processing - SIMD and floating point */ 12506 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) 12507 { 12508 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) { 12509 disas_data_proc_fp(s, insn); 12510 } else { 12511 /* SIMD, including crypto */ 12512 disas_data_proc_simd(s, insn); 12513 } 12514 } 12515 12516 static bool trans_OK(DisasContext *s, arg_OK *a) 12517 { 12518 return true; 12519 } 12520 12521 static bool trans_FAIL(DisasContext *s, arg_OK *a) 12522 { 12523 s->is_nonstreaming = true; 12524 return true; 12525 } 12526 12527 /** 12528 * is_guarded_page: 12529 * @env: The cpu environment 12530 * @s: The DisasContext 12531 * 12532 * Return true if the page is guarded. 12533 */ 12534 static bool is_guarded_page(CPUARMState *env, DisasContext *s) 12535 { 12536 uint64_t addr = s->base.pc_first; 12537 #ifdef CONFIG_USER_ONLY 12538 return page_get_flags(addr) & PAGE_BTI; 12539 #else 12540 CPUTLBEntryFull *full; 12541 void *host; 12542 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx); 12543 int flags; 12544 12545 /* 12546 * We test this immediately after reading an insn, which means 12547 * that the TLB entry must be present and valid, and thus this 12548 * access will never raise an exception. 12549 */ 12550 flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx, 12551 false, &host, &full, 0); 12552 assert(!(flags & TLB_INVALID_MASK)); 12553 12554 return full->extra.arm.guarded; 12555 #endif 12556 } 12557 12558 /** 12559 * btype_destination_ok: 12560 * @insn: The instruction at the branch destination 12561 * @bt: SCTLR_ELx.BT 12562 * @btype: PSTATE.BTYPE, and is non-zero 12563 * 12564 * On a guarded page, there are a limited number of insns 12565 * that may be present at the branch target: 12566 * - branch target identifiers, 12567 * - paciasp, pacibsp, 12568 * - BRK insn 12569 * - HLT insn 12570 * Anything else causes a Branch Target Exception. 12571 * 12572 * Return true if the branch is compatible, false to raise BTITRAP. 12573 */ 12574 static bool btype_destination_ok(uint32_t insn, bool bt, int btype) 12575 { 12576 if ((insn & 0xfffff01fu) == 0xd503201fu) { 12577 /* HINT space */ 12578 switch (extract32(insn, 5, 7)) { 12579 case 0b011001: /* PACIASP */ 12580 case 0b011011: /* PACIBSP */ 12581 /* 12582 * If SCTLR_ELx.BT, then PACI*SP are not compatible 12583 * with btype == 3. Otherwise all btype are ok. 12584 */ 12585 return !bt || btype != 3; 12586 case 0b100000: /* BTI */ 12587 /* Not compatible with any btype. */ 12588 return false; 12589 case 0b100010: /* BTI c */ 12590 /* Not compatible with btype == 3 */ 12591 return btype != 3; 12592 case 0b100100: /* BTI j */ 12593 /* Not compatible with btype == 2 */ 12594 return btype != 2; 12595 case 0b100110: /* BTI jc */ 12596 /* Compatible with any btype. */ 12597 return true; 12598 } 12599 } else { 12600 switch (insn & 0xffe0001fu) { 12601 case 0xd4200000u: /* BRK */ 12602 case 0xd4400000u: /* HLT */ 12603 /* Give priority to the breakpoint exception. */ 12604 return true; 12605 } 12606 } 12607 return false; 12608 } 12609 12610 /* C3.1 A64 instruction index by encoding */ 12611 static void disas_a64_legacy(DisasContext *s, uint32_t insn) 12612 { 12613 switch (extract32(insn, 25, 4)) { 12614 case 0x5: 12615 case 0xd: /* Data processing - register */ 12616 disas_data_proc_reg(s, insn); 12617 break; 12618 case 0x7: 12619 case 0xf: /* Data processing - SIMD and floating point */ 12620 disas_data_proc_simd_fp(s, insn); 12621 break; 12622 default: 12623 unallocated_encoding(s); 12624 break; 12625 } 12626 } 12627 12628 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, 12629 CPUState *cpu) 12630 { 12631 DisasContext *dc = container_of(dcbase, DisasContext, base); 12632 CPUARMState *env = cpu_env(cpu); 12633 ARMCPU *arm_cpu = env_archcpu(env); 12634 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); 12635 int bound, core_mmu_idx; 12636 12637 dc->isar = &arm_cpu->isar; 12638 dc->condjmp = 0; 12639 dc->pc_save = dc->base.pc_first; 12640 dc->aarch64 = true; 12641 dc->thumb = false; 12642 dc->sctlr_b = 0; 12643 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; 12644 dc->condexec_mask = 0; 12645 dc->condexec_cond = 0; 12646 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX); 12647 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx); 12648 dc->tbii = EX_TBFLAG_A64(tb_flags, TBII); 12649 dc->tbid = EX_TBFLAG_A64(tb_flags, TBID); 12650 dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA); 12651 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); 12652 #if !defined(CONFIG_USER_ONLY) 12653 dc->user = (dc->current_el == 0); 12654 #endif 12655 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); 12656 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); 12657 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); 12658 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE); 12659 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC); 12660 dc->trap_eret = EX_TBFLAG_A64(tb_flags, TRAP_ERET); 12661 dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); 12662 dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL); 12663 dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16; 12664 dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16; 12665 dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE); 12666 dc->bt = EX_TBFLAG_A64(tb_flags, BT); 12667 dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); 12668 dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV); 12669 dc->ata[0] = EX_TBFLAG_A64(tb_flags, ATA); 12670 dc->ata[1] = EX_TBFLAG_A64(tb_flags, ATA0); 12671 dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE); 12672 dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); 12673 dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); 12674 dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); 12675 dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); 12676 dc->naa = EX_TBFLAG_A64(tb_flags, NAA); 12677 dc->nv = EX_TBFLAG_A64(tb_flags, NV); 12678 dc->nv1 = EX_TBFLAG_A64(tb_flags, NV1); 12679 dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2); 12680 dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20); 12681 dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE); 12682 dc->vec_len = 0; 12683 dc->vec_stride = 0; 12684 dc->cp_regs = arm_cpu->cp_regs; 12685 dc->features = env->features; 12686 dc->dcz_blocksize = arm_cpu->dcz_blocksize; 12687 dc->gm_blocksize = arm_cpu->gm_blocksize; 12688 12689 #ifdef CONFIG_USER_ONLY 12690 /* In sve_probe_page, we assume TBI is enabled. */ 12691 tcg_debug_assert(dc->tbid & 1); 12692 #endif 12693 12694 dc->lse2 = dc_isar_feature(aa64_lse2, dc); 12695 12696 /* Single step state. The code-generation logic here is: 12697 * SS_ACTIVE == 0: 12698 * generate code with no special handling for single-stepping (except 12699 * that anything that can make us go to SS_ACTIVE == 1 must end the TB; 12700 * this happens anyway because those changes are all system register or 12701 * PSTATE writes). 12702 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) 12703 * emit code for one insn 12704 * emit code to clear PSTATE.SS 12705 * emit code to generate software step exception for completed step 12706 * end TB (as usual for having generated an exception) 12707 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) 12708 * emit code to generate a software step exception 12709 * end the TB 12710 */ 12711 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE); 12712 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS); 12713 dc->is_ldex = false; 12714 12715 /* Bound the number of insns to execute to those left on the page. */ 12716 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; 12717 12718 /* If architectural single step active, limit to 1. */ 12719 if (dc->ss_active) { 12720 bound = 1; 12721 } 12722 dc->base.max_insns = MIN(dc->base.max_insns, bound); 12723 } 12724 12725 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) 12726 { 12727 } 12728 12729 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 12730 { 12731 DisasContext *dc = container_of(dcbase, DisasContext, base); 12732 target_ulong pc_arg = dc->base.pc_next; 12733 12734 if (tb_cflags(dcbase->tb) & CF_PCREL) { 12735 pc_arg &= ~TARGET_PAGE_MASK; 12736 } 12737 tcg_gen_insn_start(pc_arg, 0, 0); 12738 dc->insn_start_updated = false; 12739 } 12740 12741 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 12742 { 12743 DisasContext *s = container_of(dcbase, DisasContext, base); 12744 CPUARMState *env = cpu_env(cpu); 12745 uint64_t pc = s->base.pc_next; 12746 uint32_t insn; 12747 12748 /* Singlestep exceptions have the highest priority. */ 12749 if (s->ss_active && !s->pstate_ss) { 12750 /* Singlestep state is Active-pending. 12751 * If we're in this state at the start of a TB then either 12752 * a) we just took an exception to an EL which is being debugged 12753 * and this is the first insn in the exception handler 12754 * b) debug exceptions were masked and we just unmasked them 12755 * without changing EL (eg by clearing PSTATE.D) 12756 * In either case we're going to take a swstep exception in the 12757 * "did not step an insn" case, and so the syndrome ISV and EX 12758 * bits should be zero. 12759 */ 12760 assert(s->base.num_insns == 1); 12761 gen_swstep_exception(s, 0, 0); 12762 s->base.is_jmp = DISAS_NORETURN; 12763 s->base.pc_next = pc + 4; 12764 return; 12765 } 12766 12767 if (pc & 3) { 12768 /* 12769 * PC alignment fault. This has priority over the instruction abort 12770 * that we would receive from a translation fault via arm_ldl_code. 12771 * This should only be possible after an indirect branch, at the 12772 * start of the TB. 12773 */ 12774 assert(s->base.num_insns == 1); 12775 gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc)); 12776 s->base.is_jmp = DISAS_NORETURN; 12777 s->base.pc_next = QEMU_ALIGN_UP(pc, 4); 12778 return; 12779 } 12780 12781 s->pc_curr = pc; 12782 insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b); 12783 s->insn = insn; 12784 s->base.pc_next = pc + 4; 12785 12786 s->fp_access_checked = false; 12787 s->sve_access_checked = false; 12788 12789 if (s->pstate_il) { 12790 /* 12791 * Illegal execution state. This has priority over BTI 12792 * exceptions, but comes after instruction abort exceptions. 12793 */ 12794 gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate()); 12795 return; 12796 } 12797 12798 if (dc_isar_feature(aa64_bti, s)) { 12799 if (s->base.num_insns == 1) { 12800 /* 12801 * At the first insn of the TB, compute s->guarded_page. 12802 * We delayed computing this until successfully reading 12803 * the first insn of the TB, above. This (mostly) ensures 12804 * that the softmmu tlb entry has been populated, and the 12805 * page table GP bit is available. 12806 * 12807 * Note that we need to compute this even if btype == 0, 12808 * because this value is used for BR instructions later 12809 * where ENV is not available. 12810 */ 12811 s->guarded_page = is_guarded_page(env, s); 12812 12813 /* First insn can have btype set to non-zero. */ 12814 tcg_debug_assert(s->btype >= 0); 12815 12816 /* 12817 * Note that the Branch Target Exception has fairly high 12818 * priority -- below debugging exceptions but above most 12819 * everything else. This allows us to handle this now 12820 * instead of waiting until the insn is otherwise decoded. 12821 */ 12822 if (s->btype != 0 12823 && s->guarded_page 12824 && !btype_destination_ok(insn, s->bt, s->btype)) { 12825 gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype)); 12826 return; 12827 } 12828 } else { 12829 /* Not the first insn: btype must be 0. */ 12830 tcg_debug_assert(s->btype == 0); 12831 } 12832 } 12833 12834 s->is_nonstreaming = false; 12835 if (s->sme_trap_nonstreaming) { 12836 disas_sme_fa64(s, insn); 12837 } 12838 12839 if (!disas_a64(s, insn) && 12840 !disas_sme(s, insn) && 12841 !disas_sve(s, insn)) { 12842 disas_a64_legacy(s, insn); 12843 } 12844 12845 /* 12846 * After execution of most insns, btype is reset to 0. 12847 * Note that we set btype == -1 when the insn sets btype. 12848 */ 12849 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { 12850 reset_btype(s); 12851 } 12852 } 12853 12854 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 12855 { 12856 DisasContext *dc = container_of(dcbase, DisasContext, base); 12857 12858 if (unlikely(dc->ss_active)) { 12859 /* Note that this means single stepping WFI doesn't halt the CPU. 12860 * For conditional branch insns this is harmless unreachable code as 12861 * gen_goto_tb() has already handled emitting the debug exception 12862 * (and thus a tb-jump is not possible when singlestepping). 12863 */ 12864 switch (dc->base.is_jmp) { 12865 default: 12866 gen_a64_update_pc(dc, 4); 12867 /* fall through */ 12868 case DISAS_EXIT: 12869 case DISAS_JUMP: 12870 gen_step_complete_exception(dc); 12871 break; 12872 case DISAS_NORETURN: 12873 break; 12874 } 12875 } else { 12876 switch (dc->base.is_jmp) { 12877 case DISAS_NEXT: 12878 case DISAS_TOO_MANY: 12879 gen_goto_tb(dc, 1, 4); 12880 break; 12881 default: 12882 case DISAS_UPDATE_EXIT: 12883 gen_a64_update_pc(dc, 4); 12884 /* fall through */ 12885 case DISAS_EXIT: 12886 tcg_gen_exit_tb(NULL, 0); 12887 break; 12888 case DISAS_UPDATE_NOCHAIN: 12889 gen_a64_update_pc(dc, 4); 12890 /* fall through */ 12891 case DISAS_JUMP: 12892 tcg_gen_lookup_and_goto_ptr(); 12893 break; 12894 case DISAS_NORETURN: 12895 case DISAS_SWI: 12896 break; 12897 case DISAS_WFE: 12898 gen_a64_update_pc(dc, 4); 12899 gen_helper_wfe(tcg_env); 12900 break; 12901 case DISAS_YIELD: 12902 gen_a64_update_pc(dc, 4); 12903 gen_helper_yield(tcg_env); 12904 break; 12905 case DISAS_WFI: 12906 /* 12907 * This is a special case because we don't want to just halt 12908 * the CPU if trying to debug across a WFI. 12909 */ 12910 gen_a64_update_pc(dc, 4); 12911 gen_helper_wfi(tcg_env, tcg_constant_i32(4)); 12912 /* 12913 * The helper doesn't necessarily throw an exception, but we 12914 * must go back to the main loop to check for interrupts anyway. 12915 */ 12916 tcg_gen_exit_tb(NULL, 0); 12917 break; 12918 } 12919 } 12920 } 12921 12922 const TranslatorOps aarch64_translator_ops = { 12923 .init_disas_context = aarch64_tr_init_disas_context, 12924 .tb_start = aarch64_tr_tb_start, 12925 .insn_start = aarch64_tr_insn_start, 12926 .translate_insn = aarch64_tr_translate_insn, 12927 .tb_stop = aarch64_tr_tb_stop, 12928 }; 12929