1 /* 2 * AArch64 translation 3 * 4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "tcg/tcg-op.h" 24 #include "tcg/tcg-op-gvec.h" 25 #include "qemu/log.h" 26 #include "arm_ldst.h" 27 #include "translate.h" 28 #include "internals.h" 29 #include "qemu/host-utils.h" 30 #include "semihosting/semihost.h" 31 #include "exec/gen-icount.h" 32 #include "exec/helper-proto.h" 33 #include "exec/helper-gen.h" 34 #include "exec/log.h" 35 #include "cpregs.h" 36 #include "translate-a64.h" 37 #include "qemu/atomic128.h" 38 39 static TCGv_i64 cpu_X[32]; 40 static TCGv_i64 cpu_pc; 41 42 /* Load/store exclusive handling */ 43 static TCGv_i64 cpu_exclusive_high; 44 45 static const char *regnames[] = { 46 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", 47 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", 48 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", 49 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp" 50 }; 51 52 enum a64_shift_type { 53 A64_SHIFT_TYPE_LSL = 0, 54 A64_SHIFT_TYPE_LSR = 1, 55 A64_SHIFT_TYPE_ASR = 2, 56 A64_SHIFT_TYPE_ROR = 3 57 }; 58 59 /* Table based decoder typedefs - used when the relevant bits for decode 60 * are too awkwardly scattered across the instruction (eg SIMD). 61 */ 62 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn); 63 64 typedef struct AArch64DecodeTable { 65 uint32_t pattern; 66 uint32_t mask; 67 AArch64DecodeFn *disas_fn; 68 } AArch64DecodeTable; 69 70 /* initialize TCG globals. */ 71 void a64_translate_init(void) 72 { 73 int i; 74 75 cpu_pc = tcg_global_mem_new_i64(cpu_env, 76 offsetof(CPUARMState, pc), 77 "pc"); 78 for (i = 0; i < 32; i++) { 79 cpu_X[i] = tcg_global_mem_new_i64(cpu_env, 80 offsetof(CPUARMState, xregs[i]), 81 regnames[i]); 82 } 83 84 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env, 85 offsetof(CPUARMState, exclusive_high), "exclusive_high"); 86 } 87 88 /* 89 * Return the core mmu_idx to use for A64 "unprivileged load/store" insns 90 */ 91 static int get_a64_user_mem_index(DisasContext *s) 92 { 93 /* 94 * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL, 95 * which is the usual mmu_idx for this cpu state. 96 */ 97 ARMMMUIdx useridx = s->mmu_idx; 98 99 if (s->unpriv) { 100 /* 101 * We have pre-computed the condition for AccType_UNPRIV. 102 * Therefore we should never get here with a mmu_idx for 103 * which we do not know the corresponding user mmu_idx. 104 */ 105 switch (useridx) { 106 case ARMMMUIdx_E10_1: 107 case ARMMMUIdx_E10_1_PAN: 108 useridx = ARMMMUIdx_E10_0; 109 break; 110 case ARMMMUIdx_E20_2: 111 case ARMMMUIdx_E20_2_PAN: 112 useridx = ARMMMUIdx_E20_0; 113 break; 114 default: 115 g_assert_not_reached(); 116 } 117 } 118 return arm_to_core_mmu_idx(useridx); 119 } 120 121 static void set_btype_raw(int val) 122 { 123 tcg_gen_st_i32(tcg_constant_i32(val), cpu_env, 124 offsetof(CPUARMState, btype)); 125 } 126 127 static void set_btype(DisasContext *s, int val) 128 { 129 /* BTYPE is a 2-bit field, and 0 should be done with reset_btype. */ 130 tcg_debug_assert(val >= 1 && val <= 3); 131 set_btype_raw(val); 132 s->btype = -1; 133 } 134 135 static void reset_btype(DisasContext *s) 136 { 137 if (s->btype != 0) { 138 set_btype_raw(0); 139 s->btype = 0; 140 } 141 } 142 143 static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff) 144 { 145 assert(s->pc_save != -1); 146 if (tb_cflags(s->base.tb) & CF_PCREL) { 147 tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff); 148 } else { 149 tcg_gen_movi_i64(dest, s->pc_curr + diff); 150 } 151 } 152 153 void gen_a64_update_pc(DisasContext *s, target_long diff) 154 { 155 gen_pc_plus_diff(s, cpu_pc, diff); 156 s->pc_save = s->pc_curr + diff; 157 } 158 159 /* 160 * Handle Top Byte Ignore (TBI) bits. 161 * 162 * If address tagging is enabled via the TCR TBI bits: 163 * + for EL2 and EL3 there is only one TBI bit, and if it is set 164 * then the address is zero-extended, clearing bits [63:56] 165 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0 166 * and TBI1 controls addressses with bit 55 == 1. 167 * If the appropriate TBI bit is set for the address then 168 * the address is sign-extended from bit 55 into bits [63:56] 169 * 170 * Here We have concatenated TBI{1,0} into tbi. 171 */ 172 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst, 173 TCGv_i64 src, int tbi) 174 { 175 if (tbi == 0) { 176 /* Load unmodified address */ 177 tcg_gen_mov_i64(dst, src); 178 } else if (!regime_has_2_ranges(s->mmu_idx)) { 179 /* Force tag byte to all zero */ 180 tcg_gen_extract_i64(dst, src, 0, 56); 181 } else { 182 /* Sign-extend from bit 55. */ 183 tcg_gen_sextract_i64(dst, src, 0, 56); 184 185 switch (tbi) { 186 case 1: 187 /* tbi0 but !tbi1: only use the extension if positive */ 188 tcg_gen_and_i64(dst, dst, src); 189 break; 190 case 2: 191 /* !tbi0 but tbi1: only use the extension if negative */ 192 tcg_gen_or_i64(dst, dst, src); 193 break; 194 case 3: 195 /* tbi0 and tbi1: always use the extension */ 196 break; 197 default: 198 g_assert_not_reached(); 199 } 200 } 201 } 202 203 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src) 204 { 205 /* 206 * If address tagging is enabled for instructions via the TCR TBI bits, 207 * then loading an address into the PC will clear out any tag. 208 */ 209 gen_top_byte_ignore(s, cpu_pc, src, s->tbii); 210 s->pc_save = -1; 211 } 212 213 /* 214 * Handle MTE and/or TBI. 215 * 216 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 217 * for the tag to be present in the FAR_ELx register. But for user-only 218 * mode we do not have a TLB with which to implement this, so we must 219 * remove the top byte now. 220 * 221 * Always return a fresh temporary that we can increment independently 222 * of the write-back address. 223 */ 224 225 TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr) 226 { 227 TCGv_i64 clean = new_tmp_a64(s); 228 #ifdef CONFIG_USER_ONLY 229 gen_top_byte_ignore(s, clean, addr, s->tbid); 230 #else 231 tcg_gen_mov_i64(clean, addr); 232 #endif 233 return clean; 234 } 235 236 /* Insert a zero tag into src, with the result at dst. */ 237 static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src) 238 { 239 tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4)); 240 } 241 242 static void gen_probe_access(DisasContext *s, TCGv_i64 ptr, 243 MMUAccessType acc, int log2_size) 244 { 245 gen_helper_probe_access(cpu_env, ptr, 246 tcg_constant_i32(acc), 247 tcg_constant_i32(get_mem_index(s)), 248 tcg_constant_i32(1 << log2_size)); 249 } 250 251 /* 252 * For MTE, check a single logical or atomic access. This probes a single 253 * address, the exact one specified. The size and alignment of the access 254 * is not relevant to MTE, per se, but watchpoints do require the size, 255 * and we want to recognize those before making any other changes to state. 256 */ 257 static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr, 258 bool is_write, bool tag_checked, 259 int log2_size, bool is_unpriv, 260 int core_idx) 261 { 262 if (tag_checked && s->mte_active[is_unpriv]) { 263 TCGv_i64 ret; 264 int desc = 0; 265 266 desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx); 267 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 268 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 269 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); 270 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << log2_size) - 1); 271 272 ret = new_tmp_a64(s); 273 gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); 274 275 return ret; 276 } 277 return clean_data_tbi(s, addr); 278 } 279 280 TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, 281 bool tag_checked, int log2_size) 282 { 283 return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, log2_size, 284 false, get_mem_index(s)); 285 } 286 287 /* 288 * For MTE, check multiple logical sequential accesses. 289 */ 290 TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write, 291 bool tag_checked, int size) 292 { 293 if (tag_checked && s->mte_active[0]) { 294 TCGv_i64 ret; 295 int desc = 0; 296 297 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); 298 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 299 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 300 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); 301 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, size - 1); 302 303 ret = new_tmp_a64(s); 304 gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr); 305 306 return ret; 307 } 308 return clean_data_tbi(s, addr); 309 } 310 311 typedef struct DisasCompare64 { 312 TCGCond cond; 313 TCGv_i64 value; 314 } DisasCompare64; 315 316 static void a64_test_cc(DisasCompare64 *c64, int cc) 317 { 318 DisasCompare c32; 319 320 arm_test_cc(&c32, cc); 321 322 /* 323 * Sign-extend the 32-bit value so that the GE/LT comparisons work 324 * properly. The NE/EQ comparisons are also fine with this choice. 325 */ 326 c64->cond = c32.cond; 327 c64->value = tcg_temp_new_i64(); 328 tcg_gen_ext_i32_i64(c64->value, c32.value); 329 } 330 331 static void gen_rebuild_hflags(DisasContext *s) 332 { 333 gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el)); 334 } 335 336 static void gen_exception_internal(int excp) 337 { 338 assert(excp_is_internal(excp)); 339 gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp)); 340 } 341 342 static void gen_exception_internal_insn(DisasContext *s, int excp) 343 { 344 gen_a64_update_pc(s, 0); 345 gen_exception_internal(excp); 346 s->base.is_jmp = DISAS_NORETURN; 347 } 348 349 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome) 350 { 351 gen_a64_update_pc(s, 0); 352 gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome)); 353 s->base.is_jmp = DISAS_NORETURN; 354 } 355 356 static void gen_step_complete_exception(DisasContext *s) 357 { 358 /* We just completed step of an insn. Move from Active-not-pending 359 * to Active-pending, and then also take the swstep exception. 360 * This corresponds to making the (IMPDEF) choice to prioritize 361 * swstep exceptions over asynchronous exceptions taken to an exception 362 * level where debug is disabled. This choice has the advantage that 363 * we do not need to maintain internal state corresponding to the 364 * ISV/EX syndrome bits between completion of the step and generation 365 * of the exception, and our syndrome information is always correct. 366 */ 367 gen_ss_advance(s); 368 gen_swstep_exception(s, 1, s->is_ldex); 369 s->base.is_jmp = DISAS_NORETURN; 370 } 371 372 static inline bool use_goto_tb(DisasContext *s, uint64_t dest) 373 { 374 if (s->ss_active) { 375 return false; 376 } 377 return translator_use_goto_tb(&s->base, dest); 378 } 379 380 static void gen_goto_tb(DisasContext *s, int n, int64_t diff) 381 { 382 if (use_goto_tb(s, s->pc_curr + diff)) { 383 /* 384 * For pcrel, the pc must always be up-to-date on entry to 385 * the linked TB, so that it can use simple additions for all 386 * further adjustments. For !pcrel, the linked TB is compiled 387 * to know its full virtual address, so we can delay the 388 * update to pc to the unlinked path. A long chain of links 389 * can thus avoid many updates to the PC. 390 */ 391 if (tb_cflags(s->base.tb) & CF_PCREL) { 392 gen_a64_update_pc(s, diff); 393 tcg_gen_goto_tb(n); 394 } else { 395 tcg_gen_goto_tb(n); 396 gen_a64_update_pc(s, diff); 397 } 398 tcg_gen_exit_tb(s->base.tb, n); 399 s->base.is_jmp = DISAS_NORETURN; 400 } else { 401 gen_a64_update_pc(s, diff); 402 if (s->ss_active) { 403 gen_step_complete_exception(s); 404 } else { 405 tcg_gen_lookup_and_goto_ptr(); 406 s->base.is_jmp = DISAS_NORETURN; 407 } 408 } 409 } 410 411 TCGv_i64 new_tmp_a64(DisasContext *s) 412 { 413 return tcg_temp_new_i64(); 414 } 415 416 TCGv_i64 new_tmp_a64_zero(DisasContext *s) 417 { 418 TCGv_i64 t = new_tmp_a64(s); 419 tcg_gen_movi_i64(t, 0); 420 return t; 421 } 422 423 /* 424 * Register access functions 425 * 426 * These functions are used for directly accessing a register in where 427 * changes to the final register value are likely to be made. If you 428 * need to use a register for temporary calculation (e.g. index type 429 * operations) use the read_* form. 430 * 431 * B1.2.1 Register mappings 432 * 433 * In instruction register encoding 31 can refer to ZR (zero register) or 434 * the SP (stack pointer) depending on context. In QEMU's case we map SP 435 * to cpu_X[31] and ZR accesses to a temporary which can be discarded. 436 * This is the point of the _sp forms. 437 */ 438 TCGv_i64 cpu_reg(DisasContext *s, int reg) 439 { 440 if (reg == 31) { 441 return new_tmp_a64_zero(s); 442 } else { 443 return cpu_X[reg]; 444 } 445 } 446 447 /* register access for when 31 == SP */ 448 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg) 449 { 450 return cpu_X[reg]; 451 } 452 453 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64 454 * representing the register contents. This TCGv is an auto-freed 455 * temporary so it need not be explicitly freed, and may be modified. 456 */ 457 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf) 458 { 459 TCGv_i64 v = new_tmp_a64(s); 460 if (reg != 31) { 461 if (sf) { 462 tcg_gen_mov_i64(v, cpu_X[reg]); 463 } else { 464 tcg_gen_ext32u_i64(v, cpu_X[reg]); 465 } 466 } else { 467 tcg_gen_movi_i64(v, 0); 468 } 469 return v; 470 } 471 472 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) 473 { 474 TCGv_i64 v = new_tmp_a64(s); 475 if (sf) { 476 tcg_gen_mov_i64(v, cpu_X[reg]); 477 } else { 478 tcg_gen_ext32u_i64(v, cpu_X[reg]); 479 } 480 return v; 481 } 482 483 /* Return the offset into CPUARMState of a slice (from 484 * the least significant end) of FP register Qn (ie 485 * Dn, Sn, Hn or Bn). 486 * (Note that this is not the same mapping as for A32; see cpu.h) 487 */ 488 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size) 489 { 490 return vec_reg_offset(s, regno, 0, size); 491 } 492 493 /* Offset of the high half of the 128 bit vector Qn */ 494 static inline int fp_reg_hi_offset(DisasContext *s, int regno) 495 { 496 return vec_reg_offset(s, regno, 1, MO_64); 497 } 498 499 /* Convenience accessors for reading and writing single and double 500 * FP registers. Writing clears the upper parts of the associated 501 * 128 bit vector register, as required by the architecture. 502 * Note that unlike the GP register accessors, the values returned 503 * by the read functions must be manually freed. 504 */ 505 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg) 506 { 507 TCGv_i64 v = tcg_temp_new_i64(); 508 509 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64)); 510 return v; 511 } 512 513 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg) 514 { 515 TCGv_i32 v = tcg_temp_new_i32(); 516 517 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32)); 518 return v; 519 } 520 521 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg) 522 { 523 TCGv_i32 v = tcg_temp_new_i32(); 524 525 tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16)); 526 return v; 527 } 528 529 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64). 530 * If SVE is not enabled, then there are only 128 bits in the vector. 531 */ 532 static void clear_vec_high(DisasContext *s, bool is_q, int rd) 533 { 534 unsigned ofs = fp_reg_offset(s, rd, MO_64); 535 unsigned vsz = vec_full_reg_size(s); 536 537 /* Nop move, with side effect of clearing the tail. */ 538 tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz); 539 } 540 541 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v) 542 { 543 unsigned ofs = fp_reg_offset(s, reg, MO_64); 544 545 tcg_gen_st_i64(v, cpu_env, ofs); 546 clear_vec_high(s, false, reg); 547 } 548 549 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v) 550 { 551 TCGv_i64 tmp = tcg_temp_new_i64(); 552 553 tcg_gen_extu_i32_i64(tmp, v); 554 write_fp_dreg(s, reg, tmp); 555 tcg_temp_free_i64(tmp); 556 } 557 558 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */ 559 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn, 560 GVecGen2Fn *gvec_fn, int vece) 561 { 562 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), 563 is_q ? 16 : 8, vec_full_reg_size(s)); 564 } 565 566 /* Expand a 2-operand + immediate AdvSIMD vector operation using 567 * an expander function. 568 */ 569 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn, 570 int64_t imm, GVecGen2iFn *gvec_fn, int vece) 571 { 572 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), 573 imm, is_q ? 16 : 8, vec_full_reg_size(s)); 574 } 575 576 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */ 577 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm, 578 GVecGen3Fn *gvec_fn, int vece) 579 { 580 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), 581 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s)); 582 } 583 584 /* Expand a 4-operand AdvSIMD vector operation using an expander function. */ 585 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm, 586 int rx, GVecGen4Fn *gvec_fn, int vece) 587 { 588 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn), 589 vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx), 590 is_q ? 16 : 8, vec_full_reg_size(s)); 591 } 592 593 /* Expand a 2-operand operation using an out-of-line helper. */ 594 static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd, 595 int rn, int data, gen_helper_gvec_2 *fn) 596 { 597 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd), 598 vec_full_reg_offset(s, rn), 599 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 600 } 601 602 /* Expand a 3-operand operation using an out-of-line helper. */ 603 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd, 604 int rn, int rm, int data, gen_helper_gvec_3 *fn) 605 { 606 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 607 vec_full_reg_offset(s, rn), 608 vec_full_reg_offset(s, rm), 609 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 610 } 611 612 /* Expand a 3-operand + fpstatus pointer + simd data value operation using 613 * an out-of-line helper. 614 */ 615 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn, 616 int rm, bool is_fp16, int data, 617 gen_helper_gvec_3_ptr *fn) 618 { 619 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); 620 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 621 vec_full_reg_offset(s, rn), 622 vec_full_reg_offset(s, rm), fpst, 623 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 624 tcg_temp_free_ptr(fpst); 625 } 626 627 /* Expand a 3-operand + qc + operation using an out-of-line helper. */ 628 static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn, 629 int rm, gen_helper_gvec_3_ptr *fn) 630 { 631 TCGv_ptr qc_ptr = tcg_temp_new_ptr(); 632 633 tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc)); 634 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 635 vec_full_reg_offset(s, rn), 636 vec_full_reg_offset(s, rm), qc_ptr, 637 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn); 638 tcg_temp_free_ptr(qc_ptr); 639 } 640 641 /* Expand a 4-operand operation using an out-of-line helper. */ 642 static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn, 643 int rm, int ra, int data, gen_helper_gvec_4 *fn) 644 { 645 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 646 vec_full_reg_offset(s, rn), 647 vec_full_reg_offset(s, rm), 648 vec_full_reg_offset(s, ra), 649 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 650 } 651 652 /* 653 * Expand a 4-operand + fpstatus pointer + simd data value operation using 654 * an out-of-line helper. 655 */ 656 static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn, 657 int rm, int ra, bool is_fp16, int data, 658 gen_helper_gvec_4_ptr *fn) 659 { 660 TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); 661 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 662 vec_full_reg_offset(s, rn), 663 vec_full_reg_offset(s, rm), 664 vec_full_reg_offset(s, ra), fpst, 665 is_q ? 16 : 8, vec_full_reg_size(s), data, fn); 666 tcg_temp_free_ptr(fpst); 667 } 668 669 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier 670 * than the 32 bit equivalent. 671 */ 672 static inline void gen_set_NZ64(TCGv_i64 result) 673 { 674 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result); 675 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF); 676 } 677 678 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */ 679 static inline void gen_logic_CC(int sf, TCGv_i64 result) 680 { 681 if (sf) { 682 gen_set_NZ64(result); 683 } else { 684 tcg_gen_extrl_i64_i32(cpu_ZF, result); 685 tcg_gen_mov_i32(cpu_NF, cpu_ZF); 686 } 687 tcg_gen_movi_i32(cpu_CF, 0); 688 tcg_gen_movi_i32(cpu_VF, 0); 689 } 690 691 /* dest = T0 + T1; compute C, N, V and Z flags */ 692 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 693 { 694 if (sf) { 695 TCGv_i64 result, flag, tmp; 696 result = tcg_temp_new_i64(); 697 flag = tcg_temp_new_i64(); 698 tmp = tcg_temp_new_i64(); 699 700 tcg_gen_movi_i64(tmp, 0); 701 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp); 702 703 tcg_gen_extrl_i64_i32(cpu_CF, flag); 704 705 gen_set_NZ64(result); 706 707 tcg_gen_xor_i64(flag, result, t0); 708 tcg_gen_xor_i64(tmp, t0, t1); 709 tcg_gen_andc_i64(flag, flag, tmp); 710 tcg_temp_free_i64(tmp); 711 tcg_gen_extrh_i64_i32(cpu_VF, flag); 712 713 tcg_gen_mov_i64(dest, result); 714 tcg_temp_free_i64(result); 715 tcg_temp_free_i64(flag); 716 } else { 717 /* 32 bit arithmetic */ 718 TCGv_i32 t0_32 = tcg_temp_new_i32(); 719 TCGv_i32 t1_32 = tcg_temp_new_i32(); 720 TCGv_i32 tmp = tcg_temp_new_i32(); 721 722 tcg_gen_movi_i32(tmp, 0); 723 tcg_gen_extrl_i64_i32(t0_32, t0); 724 tcg_gen_extrl_i64_i32(t1_32, t1); 725 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp); 726 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 727 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); 728 tcg_gen_xor_i32(tmp, t0_32, t1_32); 729 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); 730 tcg_gen_extu_i32_i64(dest, cpu_NF); 731 732 tcg_temp_free_i32(tmp); 733 tcg_temp_free_i32(t0_32); 734 tcg_temp_free_i32(t1_32); 735 } 736 } 737 738 /* dest = T0 - T1; compute C, N, V and Z flags */ 739 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 740 { 741 if (sf) { 742 /* 64 bit arithmetic */ 743 TCGv_i64 result, flag, tmp; 744 745 result = tcg_temp_new_i64(); 746 flag = tcg_temp_new_i64(); 747 tcg_gen_sub_i64(result, t0, t1); 748 749 gen_set_NZ64(result); 750 751 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1); 752 tcg_gen_extrl_i64_i32(cpu_CF, flag); 753 754 tcg_gen_xor_i64(flag, result, t0); 755 tmp = tcg_temp_new_i64(); 756 tcg_gen_xor_i64(tmp, t0, t1); 757 tcg_gen_and_i64(flag, flag, tmp); 758 tcg_temp_free_i64(tmp); 759 tcg_gen_extrh_i64_i32(cpu_VF, flag); 760 tcg_gen_mov_i64(dest, result); 761 tcg_temp_free_i64(flag); 762 tcg_temp_free_i64(result); 763 } else { 764 /* 32 bit arithmetic */ 765 TCGv_i32 t0_32 = tcg_temp_new_i32(); 766 TCGv_i32 t1_32 = tcg_temp_new_i32(); 767 TCGv_i32 tmp; 768 769 tcg_gen_extrl_i64_i32(t0_32, t0); 770 tcg_gen_extrl_i64_i32(t1_32, t1); 771 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32); 772 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 773 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32); 774 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); 775 tmp = tcg_temp_new_i32(); 776 tcg_gen_xor_i32(tmp, t0_32, t1_32); 777 tcg_temp_free_i32(t0_32); 778 tcg_temp_free_i32(t1_32); 779 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp); 780 tcg_temp_free_i32(tmp); 781 tcg_gen_extu_i32_i64(dest, cpu_NF); 782 } 783 } 784 785 /* dest = T0 + T1 + CF; do not compute flags. */ 786 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 787 { 788 TCGv_i64 flag = tcg_temp_new_i64(); 789 tcg_gen_extu_i32_i64(flag, cpu_CF); 790 tcg_gen_add_i64(dest, t0, t1); 791 tcg_gen_add_i64(dest, dest, flag); 792 tcg_temp_free_i64(flag); 793 794 if (!sf) { 795 tcg_gen_ext32u_i64(dest, dest); 796 } 797 } 798 799 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */ 800 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) 801 { 802 if (sf) { 803 TCGv_i64 result = tcg_temp_new_i64(); 804 TCGv_i64 cf_64 = tcg_temp_new_i64(); 805 TCGv_i64 vf_64 = tcg_temp_new_i64(); 806 TCGv_i64 tmp = tcg_temp_new_i64(); 807 TCGv_i64 zero = tcg_constant_i64(0); 808 809 tcg_gen_extu_i32_i64(cf_64, cpu_CF); 810 tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero); 811 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero); 812 tcg_gen_extrl_i64_i32(cpu_CF, cf_64); 813 gen_set_NZ64(result); 814 815 tcg_gen_xor_i64(vf_64, result, t0); 816 tcg_gen_xor_i64(tmp, t0, t1); 817 tcg_gen_andc_i64(vf_64, vf_64, tmp); 818 tcg_gen_extrh_i64_i32(cpu_VF, vf_64); 819 820 tcg_gen_mov_i64(dest, result); 821 822 tcg_temp_free_i64(tmp); 823 tcg_temp_free_i64(vf_64); 824 tcg_temp_free_i64(cf_64); 825 tcg_temp_free_i64(result); 826 } else { 827 TCGv_i32 t0_32 = tcg_temp_new_i32(); 828 TCGv_i32 t1_32 = tcg_temp_new_i32(); 829 TCGv_i32 tmp = tcg_temp_new_i32(); 830 TCGv_i32 zero = tcg_constant_i32(0); 831 832 tcg_gen_extrl_i64_i32(t0_32, t0); 833 tcg_gen_extrl_i64_i32(t1_32, t1); 834 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero); 835 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero); 836 837 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 838 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); 839 tcg_gen_xor_i32(tmp, t0_32, t1_32); 840 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp); 841 tcg_gen_extu_i32_i64(dest, cpu_NF); 842 843 tcg_temp_free_i32(tmp); 844 tcg_temp_free_i32(t1_32); 845 tcg_temp_free_i32(t0_32); 846 } 847 } 848 849 /* 850 * Load/Store generators 851 */ 852 853 /* 854 * Store from GPR register to memory. 855 */ 856 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source, 857 TCGv_i64 tcg_addr, MemOp memop, int memidx, 858 bool iss_valid, 859 unsigned int iss_srt, 860 bool iss_sf, bool iss_ar) 861 { 862 memop = finalize_memop(s, memop); 863 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop); 864 865 if (iss_valid) { 866 uint32_t syn; 867 868 syn = syn_data_abort_with_iss(0, 869 (memop & MO_SIZE), 870 false, 871 iss_srt, 872 iss_sf, 873 iss_ar, 874 0, 0, 0, 0, 0, false); 875 disas_set_insn_syndrome(s, syn); 876 } 877 } 878 879 static void do_gpr_st(DisasContext *s, TCGv_i64 source, 880 TCGv_i64 tcg_addr, MemOp memop, 881 bool iss_valid, 882 unsigned int iss_srt, 883 bool iss_sf, bool iss_ar) 884 { 885 do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s), 886 iss_valid, iss_srt, iss_sf, iss_ar); 887 } 888 889 /* 890 * Load from memory to GPR register 891 */ 892 static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, 893 MemOp memop, bool extend, int memidx, 894 bool iss_valid, unsigned int iss_srt, 895 bool iss_sf, bool iss_ar) 896 { 897 memop = finalize_memop(s, memop); 898 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop); 899 900 if (extend && (memop & MO_SIGN)) { 901 g_assert((memop & MO_SIZE) <= MO_32); 902 tcg_gen_ext32u_i64(dest, dest); 903 } 904 905 if (iss_valid) { 906 uint32_t syn; 907 908 syn = syn_data_abort_with_iss(0, 909 (memop & MO_SIZE), 910 (memop & MO_SIGN) != 0, 911 iss_srt, 912 iss_sf, 913 iss_ar, 914 0, 0, 0, 0, 0, false); 915 disas_set_insn_syndrome(s, syn); 916 } 917 } 918 919 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr, 920 MemOp memop, bool extend, 921 bool iss_valid, unsigned int iss_srt, 922 bool iss_sf, bool iss_ar) 923 { 924 do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s), 925 iss_valid, iss_srt, iss_sf, iss_ar); 926 } 927 928 /* 929 * Store from FP register to memory 930 */ 931 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size) 932 { 933 /* This writes the bottom N bits of a 128 bit wide vector to memory */ 934 TCGv_i64 tmplo = tcg_temp_new_i64(); 935 MemOp mop; 936 937 tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64)); 938 939 if (size < 4) { 940 mop = finalize_memop(s, size); 941 tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop); 942 } else { 943 bool be = s->be_data == MO_BE; 944 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64(); 945 TCGv_i64 tmphi = tcg_temp_new_i64(); 946 947 tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx)); 948 949 mop = s->be_data | MO_UQ; 950 tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s), 951 mop | (s->align_mem ? MO_ALIGN_16 : 0)); 952 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); 953 tcg_gen_qemu_st_i64(be ? tmplo : tmphi, tcg_hiaddr, 954 get_mem_index(s), mop); 955 956 tcg_temp_free_i64(tcg_hiaddr); 957 tcg_temp_free_i64(tmphi); 958 } 959 960 tcg_temp_free_i64(tmplo); 961 } 962 963 /* 964 * Load from memory to FP register 965 */ 966 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size) 967 { 968 /* This always zero-extends and writes to a full 128 bit wide vector */ 969 TCGv_i64 tmplo = tcg_temp_new_i64(); 970 TCGv_i64 tmphi = NULL; 971 MemOp mop; 972 973 if (size < 4) { 974 mop = finalize_memop(s, size); 975 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop); 976 } else { 977 bool be = s->be_data == MO_BE; 978 TCGv_i64 tcg_hiaddr; 979 980 tmphi = tcg_temp_new_i64(); 981 tcg_hiaddr = tcg_temp_new_i64(); 982 983 mop = s->be_data | MO_UQ; 984 tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s), 985 mop | (s->align_mem ? MO_ALIGN_16 : 0)); 986 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8); 987 tcg_gen_qemu_ld_i64(be ? tmplo : tmphi, tcg_hiaddr, 988 get_mem_index(s), mop); 989 tcg_temp_free_i64(tcg_hiaddr); 990 } 991 992 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64)); 993 tcg_temp_free_i64(tmplo); 994 995 if (tmphi) { 996 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx)); 997 tcg_temp_free_i64(tmphi); 998 } 999 clear_vec_high(s, tmphi != NULL, destidx); 1000 } 1001 1002 /* 1003 * Vector load/store helpers. 1004 * 1005 * The principal difference between this and a FP load is that we don't 1006 * zero extend as we are filling a partial chunk of the vector register. 1007 * These functions don't support 128 bit loads/stores, which would be 1008 * normal load/store operations. 1009 * 1010 * The _i32 versions are useful when operating on 32 bit quantities 1011 * (eg for floating point single or using Neon helper functions). 1012 */ 1013 1014 /* Get value of an element within a vector register */ 1015 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, 1016 int element, MemOp memop) 1017 { 1018 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); 1019 switch ((unsigned)memop) { 1020 case MO_8: 1021 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off); 1022 break; 1023 case MO_16: 1024 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off); 1025 break; 1026 case MO_32: 1027 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off); 1028 break; 1029 case MO_8|MO_SIGN: 1030 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off); 1031 break; 1032 case MO_16|MO_SIGN: 1033 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off); 1034 break; 1035 case MO_32|MO_SIGN: 1036 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off); 1037 break; 1038 case MO_64: 1039 case MO_64|MO_SIGN: 1040 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off); 1041 break; 1042 default: 1043 g_assert_not_reached(); 1044 } 1045 } 1046 1047 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, 1048 int element, MemOp memop) 1049 { 1050 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); 1051 switch (memop) { 1052 case MO_8: 1053 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off); 1054 break; 1055 case MO_16: 1056 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off); 1057 break; 1058 case MO_8|MO_SIGN: 1059 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off); 1060 break; 1061 case MO_16|MO_SIGN: 1062 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off); 1063 break; 1064 case MO_32: 1065 case MO_32|MO_SIGN: 1066 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off); 1067 break; 1068 default: 1069 g_assert_not_reached(); 1070 } 1071 } 1072 1073 /* Set value of an element within a vector register */ 1074 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx, 1075 int element, MemOp memop) 1076 { 1077 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); 1078 switch (memop) { 1079 case MO_8: 1080 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off); 1081 break; 1082 case MO_16: 1083 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off); 1084 break; 1085 case MO_32: 1086 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off); 1087 break; 1088 case MO_64: 1089 tcg_gen_st_i64(tcg_src, cpu_env, vect_off); 1090 break; 1091 default: 1092 g_assert_not_reached(); 1093 } 1094 } 1095 1096 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, 1097 int destidx, int element, MemOp memop) 1098 { 1099 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE); 1100 switch (memop) { 1101 case MO_8: 1102 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off); 1103 break; 1104 case MO_16: 1105 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off); 1106 break; 1107 case MO_32: 1108 tcg_gen_st_i32(tcg_src, cpu_env, vect_off); 1109 break; 1110 default: 1111 g_assert_not_reached(); 1112 } 1113 } 1114 1115 /* Store from vector register to memory */ 1116 static void do_vec_st(DisasContext *s, int srcidx, int element, 1117 TCGv_i64 tcg_addr, MemOp mop) 1118 { 1119 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 1120 1121 read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE); 1122 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); 1123 1124 tcg_temp_free_i64(tcg_tmp); 1125 } 1126 1127 /* Load from memory to vector register */ 1128 static void do_vec_ld(DisasContext *s, int destidx, int element, 1129 TCGv_i64 tcg_addr, MemOp mop) 1130 { 1131 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 1132 1133 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop); 1134 write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE); 1135 1136 tcg_temp_free_i64(tcg_tmp); 1137 } 1138 1139 /* Check that FP/Neon access is enabled. If it is, return 1140 * true. If not, emit code to generate an appropriate exception, 1141 * and return false; the caller should not emit any code for 1142 * the instruction. Note that this check must happen after all 1143 * unallocated-encoding checks (otherwise the syndrome information 1144 * for the resulting exception will be incorrect). 1145 */ 1146 static bool fp_access_check_only(DisasContext *s) 1147 { 1148 if (s->fp_excp_el) { 1149 assert(!s->fp_access_checked); 1150 s->fp_access_checked = true; 1151 1152 gen_exception_insn_el(s, 0, EXCP_UDEF, 1153 syn_fp_access_trap(1, 0xe, false, 0), 1154 s->fp_excp_el); 1155 return false; 1156 } 1157 s->fp_access_checked = true; 1158 return true; 1159 } 1160 1161 static bool fp_access_check(DisasContext *s) 1162 { 1163 if (!fp_access_check_only(s)) { 1164 return false; 1165 } 1166 if (s->sme_trap_nonstreaming && s->is_nonstreaming) { 1167 gen_exception_insn(s, 0, EXCP_UDEF, 1168 syn_smetrap(SME_ET_Streaming, false)); 1169 return false; 1170 } 1171 return true; 1172 } 1173 1174 /* 1175 * Check that SVE access is enabled. If it is, return true. 1176 * If not, emit code to generate an appropriate exception and return false. 1177 * This function corresponds to CheckSVEEnabled(). 1178 */ 1179 bool sve_access_check(DisasContext *s) 1180 { 1181 if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) { 1182 assert(dc_isar_feature(aa64_sme, s)); 1183 if (!sme_sm_enabled_check(s)) { 1184 goto fail_exit; 1185 } 1186 } else if (s->sve_excp_el) { 1187 gen_exception_insn_el(s, 0, EXCP_UDEF, 1188 syn_sve_access_trap(), s->sve_excp_el); 1189 goto fail_exit; 1190 } 1191 s->sve_access_checked = true; 1192 return fp_access_check(s); 1193 1194 fail_exit: 1195 /* Assert that we only raise one exception per instruction. */ 1196 assert(!s->sve_access_checked); 1197 s->sve_access_checked = true; 1198 return false; 1199 } 1200 1201 /* 1202 * Check that SME access is enabled, raise an exception if not. 1203 * Note that this function corresponds to CheckSMEAccess and is 1204 * only used directly for cpregs. 1205 */ 1206 static bool sme_access_check(DisasContext *s) 1207 { 1208 if (s->sme_excp_el) { 1209 gen_exception_insn_el(s, 0, EXCP_UDEF, 1210 syn_smetrap(SME_ET_AccessTrap, false), 1211 s->sme_excp_el); 1212 return false; 1213 } 1214 return true; 1215 } 1216 1217 /* This function corresponds to CheckSMEEnabled. */ 1218 bool sme_enabled_check(DisasContext *s) 1219 { 1220 /* 1221 * Note that unlike sve_excp_el, we have not constrained sme_excp_el 1222 * to be zero when fp_excp_el has priority. This is because we need 1223 * sme_excp_el by itself for cpregs access checks. 1224 */ 1225 if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) { 1226 s->fp_access_checked = true; 1227 return sme_access_check(s); 1228 } 1229 return fp_access_check_only(s); 1230 } 1231 1232 /* Common subroutine for CheckSMEAnd*Enabled. */ 1233 bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req) 1234 { 1235 if (!sme_enabled_check(s)) { 1236 return false; 1237 } 1238 if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) { 1239 gen_exception_insn(s, 0, EXCP_UDEF, 1240 syn_smetrap(SME_ET_NotStreaming, false)); 1241 return false; 1242 } 1243 if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) { 1244 gen_exception_insn(s, 0, EXCP_UDEF, 1245 syn_smetrap(SME_ET_InactiveZA, false)); 1246 return false; 1247 } 1248 return true; 1249 } 1250 1251 /* 1252 * This utility function is for doing register extension with an 1253 * optional shift. You will likely want to pass a temporary for the 1254 * destination register. See DecodeRegExtend() in the ARM ARM. 1255 */ 1256 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in, 1257 int option, unsigned int shift) 1258 { 1259 int extsize = extract32(option, 0, 2); 1260 bool is_signed = extract32(option, 2, 1); 1261 1262 if (is_signed) { 1263 switch (extsize) { 1264 case 0: 1265 tcg_gen_ext8s_i64(tcg_out, tcg_in); 1266 break; 1267 case 1: 1268 tcg_gen_ext16s_i64(tcg_out, tcg_in); 1269 break; 1270 case 2: 1271 tcg_gen_ext32s_i64(tcg_out, tcg_in); 1272 break; 1273 case 3: 1274 tcg_gen_mov_i64(tcg_out, tcg_in); 1275 break; 1276 } 1277 } else { 1278 switch (extsize) { 1279 case 0: 1280 tcg_gen_ext8u_i64(tcg_out, tcg_in); 1281 break; 1282 case 1: 1283 tcg_gen_ext16u_i64(tcg_out, tcg_in); 1284 break; 1285 case 2: 1286 tcg_gen_ext32u_i64(tcg_out, tcg_in); 1287 break; 1288 case 3: 1289 tcg_gen_mov_i64(tcg_out, tcg_in); 1290 break; 1291 } 1292 } 1293 1294 if (shift) { 1295 tcg_gen_shli_i64(tcg_out, tcg_out, shift); 1296 } 1297 } 1298 1299 static inline void gen_check_sp_alignment(DisasContext *s) 1300 { 1301 /* The AArch64 architecture mandates that (if enabled via PSTATE 1302 * or SCTLR bits) there is a check that SP is 16-aligned on every 1303 * SP-relative load or store (with an exception generated if it is not). 1304 * In line with general QEMU practice regarding misaligned accesses, 1305 * we omit these checks for the sake of guest program performance. 1306 * This function is provided as a hook so we can more easily add these 1307 * checks in future (possibly as a "favour catching guest program bugs 1308 * over speed" user selectable option). 1309 */ 1310 } 1311 1312 /* 1313 * This provides a simple table based table lookup decoder. It is 1314 * intended to be used when the relevant bits for decode are too 1315 * awkwardly placed and switch/if based logic would be confusing and 1316 * deeply nested. Since it's a linear search through the table, tables 1317 * should be kept small. 1318 * 1319 * It returns the first handler where insn & mask == pattern, or 1320 * NULL if there is no match. 1321 * The table is terminated by an empty mask (i.e. 0) 1322 */ 1323 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table, 1324 uint32_t insn) 1325 { 1326 const AArch64DecodeTable *tptr = table; 1327 1328 while (tptr->mask) { 1329 if ((insn & tptr->mask) == tptr->pattern) { 1330 return tptr->disas_fn; 1331 } 1332 tptr++; 1333 } 1334 return NULL; 1335 } 1336 1337 /* 1338 * The instruction disassembly implemented here matches 1339 * the instruction encoding classifications in chapter C4 1340 * of the ARM Architecture Reference Manual (DDI0487B_a); 1341 * classification names and decode diagrams here should generally 1342 * match up with those in the manual. 1343 */ 1344 1345 /* Unconditional branch (immediate) 1346 * 31 30 26 25 0 1347 * +----+-----------+-------------------------------------+ 1348 * | op | 0 0 1 0 1 | imm26 | 1349 * +----+-----------+-------------------------------------+ 1350 */ 1351 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) 1352 { 1353 int64_t diff = sextract32(insn, 0, 26) * 4; 1354 1355 if (insn & (1U << 31)) { 1356 /* BL Branch with link */ 1357 gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s)); 1358 } 1359 1360 /* B Branch / BL Branch with link */ 1361 reset_btype(s); 1362 gen_goto_tb(s, 0, diff); 1363 } 1364 1365 /* Compare and branch (immediate) 1366 * 31 30 25 24 23 5 4 0 1367 * +----+-------------+----+---------------------+--------+ 1368 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt | 1369 * +----+-------------+----+---------------------+--------+ 1370 */ 1371 static void disas_comp_b_imm(DisasContext *s, uint32_t insn) 1372 { 1373 unsigned int sf, op, rt; 1374 int64_t diff; 1375 DisasLabel match; 1376 TCGv_i64 tcg_cmp; 1377 1378 sf = extract32(insn, 31, 1); 1379 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */ 1380 rt = extract32(insn, 0, 5); 1381 diff = sextract32(insn, 5, 19) * 4; 1382 1383 tcg_cmp = read_cpu_reg(s, rt, sf); 1384 reset_btype(s); 1385 1386 match = gen_disas_label(s); 1387 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, 1388 tcg_cmp, 0, match.label); 1389 gen_goto_tb(s, 0, 4); 1390 set_disas_label(s, match); 1391 gen_goto_tb(s, 1, diff); 1392 } 1393 1394 /* Test and branch (immediate) 1395 * 31 30 25 24 23 19 18 5 4 0 1396 * +----+-------------+----+-------+-------------+------+ 1397 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt | 1398 * +----+-------------+----+-------+-------------+------+ 1399 */ 1400 static void disas_test_b_imm(DisasContext *s, uint32_t insn) 1401 { 1402 unsigned int bit_pos, op, rt; 1403 int64_t diff; 1404 DisasLabel match; 1405 TCGv_i64 tcg_cmp; 1406 1407 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5); 1408 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */ 1409 diff = sextract32(insn, 5, 14) * 4; 1410 rt = extract32(insn, 0, 5); 1411 1412 tcg_cmp = tcg_temp_new_i64(); 1413 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos)); 1414 1415 reset_btype(s); 1416 1417 match = gen_disas_label(s); 1418 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, 1419 tcg_cmp, 0, match.label); 1420 tcg_temp_free_i64(tcg_cmp); 1421 gen_goto_tb(s, 0, 4); 1422 set_disas_label(s, match); 1423 gen_goto_tb(s, 1, diff); 1424 } 1425 1426 /* Conditional branch (immediate) 1427 * 31 25 24 23 5 4 3 0 1428 * +---------------+----+---------------------+----+------+ 1429 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond | 1430 * +---------------+----+---------------------+----+------+ 1431 */ 1432 static void disas_cond_b_imm(DisasContext *s, uint32_t insn) 1433 { 1434 unsigned int cond; 1435 int64_t diff; 1436 1437 if ((insn & (1 << 4)) || (insn & (1 << 24))) { 1438 unallocated_encoding(s); 1439 return; 1440 } 1441 diff = sextract32(insn, 5, 19) * 4; 1442 cond = extract32(insn, 0, 4); 1443 1444 reset_btype(s); 1445 if (cond < 0x0e) { 1446 /* genuinely conditional branches */ 1447 DisasLabel match = gen_disas_label(s); 1448 arm_gen_test_cc(cond, match.label); 1449 gen_goto_tb(s, 0, 4); 1450 set_disas_label(s, match); 1451 gen_goto_tb(s, 1, diff); 1452 } else { 1453 /* 0xe and 0xf are both "always" conditions */ 1454 gen_goto_tb(s, 0, diff); 1455 } 1456 } 1457 1458 /* HINT instruction group, including various allocated HINTs */ 1459 static void handle_hint(DisasContext *s, uint32_t insn, 1460 unsigned int op1, unsigned int op2, unsigned int crm) 1461 { 1462 unsigned int selector = crm << 3 | op2; 1463 1464 if (op1 != 3) { 1465 unallocated_encoding(s); 1466 return; 1467 } 1468 1469 switch (selector) { 1470 case 0b00000: /* NOP */ 1471 break; 1472 case 0b00011: /* WFI */ 1473 s->base.is_jmp = DISAS_WFI; 1474 break; 1475 case 0b00001: /* YIELD */ 1476 /* When running in MTTCG we don't generate jumps to the yield and 1477 * WFE helpers as it won't affect the scheduling of other vCPUs. 1478 * If we wanted to more completely model WFE/SEV so we don't busy 1479 * spin unnecessarily we would need to do something more involved. 1480 */ 1481 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { 1482 s->base.is_jmp = DISAS_YIELD; 1483 } 1484 break; 1485 case 0b00010: /* WFE */ 1486 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { 1487 s->base.is_jmp = DISAS_WFE; 1488 } 1489 break; 1490 case 0b00100: /* SEV */ 1491 case 0b00101: /* SEVL */ 1492 case 0b00110: /* DGH */ 1493 /* we treat all as NOP at least for now */ 1494 break; 1495 case 0b00111: /* XPACLRI */ 1496 if (s->pauth_active) { 1497 gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]); 1498 } 1499 break; 1500 case 0b01000: /* PACIA1716 */ 1501 if (s->pauth_active) { 1502 gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]); 1503 } 1504 break; 1505 case 0b01010: /* PACIB1716 */ 1506 if (s->pauth_active) { 1507 gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]); 1508 } 1509 break; 1510 case 0b01100: /* AUTIA1716 */ 1511 if (s->pauth_active) { 1512 gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]); 1513 } 1514 break; 1515 case 0b01110: /* AUTIB1716 */ 1516 if (s->pauth_active) { 1517 gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]); 1518 } 1519 break; 1520 case 0b10000: /* ESB */ 1521 /* Without RAS, we must implement this as NOP. */ 1522 if (dc_isar_feature(aa64_ras, s)) { 1523 /* 1524 * QEMU does not have a source of physical SErrors, 1525 * so we are only concerned with virtual SErrors. 1526 * The pseudocode in the ARM for this case is 1527 * if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then 1528 * AArch64.vESBOperation(); 1529 * Most of the condition can be evaluated at translation time. 1530 * Test for EL2 present, and defer test for SEL2 to runtime. 1531 */ 1532 if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) { 1533 gen_helper_vesb(cpu_env); 1534 } 1535 } 1536 break; 1537 case 0b11000: /* PACIAZ */ 1538 if (s->pauth_active) { 1539 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], 1540 new_tmp_a64_zero(s)); 1541 } 1542 break; 1543 case 0b11001: /* PACIASP */ 1544 if (s->pauth_active) { 1545 gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]); 1546 } 1547 break; 1548 case 0b11010: /* PACIBZ */ 1549 if (s->pauth_active) { 1550 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], 1551 new_tmp_a64_zero(s)); 1552 } 1553 break; 1554 case 0b11011: /* PACIBSP */ 1555 if (s->pauth_active) { 1556 gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]); 1557 } 1558 break; 1559 case 0b11100: /* AUTIAZ */ 1560 if (s->pauth_active) { 1561 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], 1562 new_tmp_a64_zero(s)); 1563 } 1564 break; 1565 case 0b11101: /* AUTIASP */ 1566 if (s->pauth_active) { 1567 gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]); 1568 } 1569 break; 1570 case 0b11110: /* AUTIBZ */ 1571 if (s->pauth_active) { 1572 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], 1573 new_tmp_a64_zero(s)); 1574 } 1575 break; 1576 case 0b11111: /* AUTIBSP */ 1577 if (s->pauth_active) { 1578 gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]); 1579 } 1580 break; 1581 default: 1582 /* default specified as NOP equivalent */ 1583 break; 1584 } 1585 } 1586 1587 static void gen_clrex(DisasContext *s, uint32_t insn) 1588 { 1589 tcg_gen_movi_i64(cpu_exclusive_addr, -1); 1590 } 1591 1592 /* CLREX, DSB, DMB, ISB */ 1593 static void handle_sync(DisasContext *s, uint32_t insn, 1594 unsigned int op1, unsigned int op2, unsigned int crm) 1595 { 1596 TCGBar bar; 1597 1598 if (op1 != 3) { 1599 unallocated_encoding(s); 1600 return; 1601 } 1602 1603 switch (op2) { 1604 case 2: /* CLREX */ 1605 gen_clrex(s, insn); 1606 return; 1607 case 4: /* DSB */ 1608 case 5: /* DMB */ 1609 switch (crm & 3) { 1610 case 1: /* MBReqTypes_Reads */ 1611 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST; 1612 break; 1613 case 2: /* MBReqTypes_Writes */ 1614 bar = TCG_BAR_SC | TCG_MO_ST_ST; 1615 break; 1616 default: /* MBReqTypes_All */ 1617 bar = TCG_BAR_SC | TCG_MO_ALL; 1618 break; 1619 } 1620 tcg_gen_mb(bar); 1621 return; 1622 case 6: /* ISB */ 1623 /* We need to break the TB after this insn to execute 1624 * a self-modified code correctly and also to take 1625 * any pending interrupts immediately. 1626 */ 1627 reset_btype(s); 1628 gen_goto_tb(s, 0, 4); 1629 return; 1630 1631 case 7: /* SB */ 1632 if (crm != 0 || !dc_isar_feature(aa64_sb, s)) { 1633 goto do_unallocated; 1634 } 1635 /* 1636 * TODO: There is no speculation barrier opcode for TCG; 1637 * MB and end the TB instead. 1638 */ 1639 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); 1640 gen_goto_tb(s, 0, 4); 1641 return; 1642 1643 default: 1644 do_unallocated: 1645 unallocated_encoding(s); 1646 return; 1647 } 1648 } 1649 1650 static void gen_xaflag(void) 1651 { 1652 TCGv_i32 z = tcg_temp_new_i32(); 1653 1654 tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0); 1655 1656 /* 1657 * (!C & !Z) << 31 1658 * (!(C | Z)) << 31 1659 * ~((C | Z) << 31) 1660 * ~-(C | Z) 1661 * (C | Z) - 1 1662 */ 1663 tcg_gen_or_i32(cpu_NF, cpu_CF, z); 1664 tcg_gen_subi_i32(cpu_NF, cpu_NF, 1); 1665 1666 /* !(Z & C) */ 1667 tcg_gen_and_i32(cpu_ZF, z, cpu_CF); 1668 tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1); 1669 1670 /* (!C & Z) << 31 -> -(Z & ~C) */ 1671 tcg_gen_andc_i32(cpu_VF, z, cpu_CF); 1672 tcg_gen_neg_i32(cpu_VF, cpu_VF); 1673 1674 /* C | Z */ 1675 tcg_gen_or_i32(cpu_CF, cpu_CF, z); 1676 1677 tcg_temp_free_i32(z); 1678 } 1679 1680 static void gen_axflag(void) 1681 { 1682 tcg_gen_sari_i32(cpu_VF, cpu_VF, 31); /* V ? -1 : 0 */ 1683 tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF); /* C & !V */ 1684 1685 /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */ 1686 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF); 1687 1688 tcg_gen_movi_i32(cpu_NF, 0); 1689 tcg_gen_movi_i32(cpu_VF, 0); 1690 } 1691 1692 /* MSR (immediate) - move immediate to processor state field */ 1693 static void handle_msr_i(DisasContext *s, uint32_t insn, 1694 unsigned int op1, unsigned int op2, unsigned int crm) 1695 { 1696 int op = op1 << 3 | op2; 1697 1698 /* End the TB by default, chaining is ok. */ 1699 s->base.is_jmp = DISAS_TOO_MANY; 1700 1701 switch (op) { 1702 case 0x00: /* CFINV */ 1703 if (crm != 0 || !dc_isar_feature(aa64_condm_4, s)) { 1704 goto do_unallocated; 1705 } 1706 tcg_gen_xori_i32(cpu_CF, cpu_CF, 1); 1707 s->base.is_jmp = DISAS_NEXT; 1708 break; 1709 1710 case 0x01: /* XAFlag */ 1711 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) { 1712 goto do_unallocated; 1713 } 1714 gen_xaflag(); 1715 s->base.is_jmp = DISAS_NEXT; 1716 break; 1717 1718 case 0x02: /* AXFlag */ 1719 if (crm != 0 || !dc_isar_feature(aa64_condm_5, s)) { 1720 goto do_unallocated; 1721 } 1722 gen_axflag(); 1723 s->base.is_jmp = DISAS_NEXT; 1724 break; 1725 1726 case 0x03: /* UAO */ 1727 if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) { 1728 goto do_unallocated; 1729 } 1730 if (crm & 1) { 1731 set_pstate_bits(PSTATE_UAO); 1732 } else { 1733 clear_pstate_bits(PSTATE_UAO); 1734 } 1735 gen_rebuild_hflags(s); 1736 break; 1737 1738 case 0x04: /* PAN */ 1739 if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) { 1740 goto do_unallocated; 1741 } 1742 if (crm & 1) { 1743 set_pstate_bits(PSTATE_PAN); 1744 } else { 1745 clear_pstate_bits(PSTATE_PAN); 1746 } 1747 gen_rebuild_hflags(s); 1748 break; 1749 1750 case 0x05: /* SPSel */ 1751 if (s->current_el == 0) { 1752 goto do_unallocated; 1753 } 1754 gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(crm & PSTATE_SP)); 1755 break; 1756 1757 case 0x19: /* SSBS */ 1758 if (!dc_isar_feature(aa64_ssbs, s)) { 1759 goto do_unallocated; 1760 } 1761 if (crm & 1) { 1762 set_pstate_bits(PSTATE_SSBS); 1763 } else { 1764 clear_pstate_bits(PSTATE_SSBS); 1765 } 1766 /* Don't need to rebuild hflags since SSBS is a nop */ 1767 break; 1768 1769 case 0x1a: /* DIT */ 1770 if (!dc_isar_feature(aa64_dit, s)) { 1771 goto do_unallocated; 1772 } 1773 if (crm & 1) { 1774 set_pstate_bits(PSTATE_DIT); 1775 } else { 1776 clear_pstate_bits(PSTATE_DIT); 1777 } 1778 /* There's no need to rebuild hflags because DIT is a nop */ 1779 break; 1780 1781 case 0x1e: /* DAIFSet */ 1782 gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(crm)); 1783 break; 1784 1785 case 0x1f: /* DAIFClear */ 1786 gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(crm)); 1787 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */ 1788 s->base.is_jmp = DISAS_UPDATE_EXIT; 1789 break; 1790 1791 case 0x1c: /* TCO */ 1792 if (dc_isar_feature(aa64_mte, s)) { 1793 /* Full MTE is enabled -- set the TCO bit as directed. */ 1794 if (crm & 1) { 1795 set_pstate_bits(PSTATE_TCO); 1796 } else { 1797 clear_pstate_bits(PSTATE_TCO); 1798 } 1799 gen_rebuild_hflags(s); 1800 /* Many factors, including TCO, go into MTE_ACTIVE. */ 1801 s->base.is_jmp = DISAS_UPDATE_NOCHAIN; 1802 } else if (dc_isar_feature(aa64_mte_insn_reg, s)) { 1803 /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI. */ 1804 s->base.is_jmp = DISAS_NEXT; 1805 } else { 1806 goto do_unallocated; 1807 } 1808 break; 1809 1810 case 0x1b: /* SVCR* */ 1811 if (!dc_isar_feature(aa64_sme, s) || crm < 2 || crm > 7) { 1812 goto do_unallocated; 1813 } 1814 if (sme_access_check(s)) { 1815 int old = s->pstate_sm | (s->pstate_za << 1); 1816 int new = (crm & 1) * 3; 1817 int msk = (crm >> 1) & 3; 1818 1819 if ((old ^ new) & msk) { 1820 /* At least one bit changes. */ 1821 gen_helper_set_svcr(cpu_env, tcg_constant_i32(new), 1822 tcg_constant_i32(msk)); 1823 } else { 1824 s->base.is_jmp = DISAS_NEXT; 1825 } 1826 } 1827 break; 1828 1829 default: 1830 do_unallocated: 1831 unallocated_encoding(s); 1832 return; 1833 } 1834 } 1835 1836 static void gen_get_nzcv(TCGv_i64 tcg_rt) 1837 { 1838 TCGv_i32 tmp = tcg_temp_new_i32(); 1839 TCGv_i32 nzcv = tcg_temp_new_i32(); 1840 1841 /* build bit 31, N */ 1842 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31)); 1843 /* build bit 30, Z */ 1844 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0); 1845 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1); 1846 /* build bit 29, C */ 1847 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1); 1848 /* build bit 28, V */ 1849 tcg_gen_shri_i32(tmp, cpu_VF, 31); 1850 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1); 1851 /* generate result */ 1852 tcg_gen_extu_i32_i64(tcg_rt, nzcv); 1853 1854 tcg_temp_free_i32(nzcv); 1855 tcg_temp_free_i32(tmp); 1856 } 1857 1858 static void gen_set_nzcv(TCGv_i64 tcg_rt) 1859 { 1860 TCGv_i32 nzcv = tcg_temp_new_i32(); 1861 1862 /* take NZCV from R[t] */ 1863 tcg_gen_extrl_i64_i32(nzcv, tcg_rt); 1864 1865 /* bit 31, N */ 1866 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31)); 1867 /* bit 30, Z */ 1868 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30)); 1869 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0); 1870 /* bit 29, C */ 1871 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29)); 1872 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29); 1873 /* bit 28, V */ 1874 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28)); 1875 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3); 1876 tcg_temp_free_i32(nzcv); 1877 } 1878 1879 static void gen_sysreg_undef(DisasContext *s, bool isread, 1880 uint8_t op0, uint8_t op1, uint8_t op2, 1881 uint8_t crn, uint8_t crm, uint8_t rt) 1882 { 1883 /* 1884 * Generate code to emit an UNDEF with correct syndrome 1885 * information for a failed system register access. 1886 * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases, 1887 * but if FEAT_IDST is implemented then read accesses to registers 1888 * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP 1889 * syndrome. 1890 */ 1891 uint32_t syndrome; 1892 1893 if (isread && dc_isar_feature(aa64_ids, s) && 1894 arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) { 1895 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); 1896 } else { 1897 syndrome = syn_uncategorized(); 1898 } 1899 gen_exception_insn(s, 0, EXCP_UDEF, syndrome); 1900 } 1901 1902 /* MRS - move from system register 1903 * MSR (register) - move to system register 1904 * SYS 1905 * SYSL 1906 * These are all essentially the same insn in 'read' and 'write' 1907 * versions, with varying op0 fields. 1908 */ 1909 static void handle_sys(DisasContext *s, uint32_t insn, bool isread, 1910 unsigned int op0, unsigned int op1, unsigned int op2, 1911 unsigned int crn, unsigned int crm, unsigned int rt) 1912 { 1913 uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, 1914 crn, crm, op0, op1, op2); 1915 const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); 1916 TCGv_ptr tcg_ri = NULL; 1917 TCGv_i64 tcg_rt; 1918 1919 if (!ri) { 1920 /* Unknown register; this might be a guest error or a QEMU 1921 * unimplemented feature. 1922 */ 1923 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 " 1924 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n", 1925 isread ? "read" : "write", op0, op1, crn, crm, op2); 1926 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); 1927 return; 1928 } 1929 1930 /* Check access permissions */ 1931 if (!cp_access_ok(s->current_el, ri, isread)) { 1932 gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); 1933 return; 1934 } 1935 1936 if (ri->accessfn || (ri->fgt && s->fgt_active)) { 1937 /* Emit code to perform further access permissions checks at 1938 * runtime; this may result in an exception. 1939 */ 1940 uint32_t syndrome; 1941 1942 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); 1943 gen_a64_update_pc(s, 0); 1944 tcg_ri = tcg_temp_new_ptr(); 1945 gen_helper_access_check_cp_reg(tcg_ri, cpu_env, 1946 tcg_constant_i32(key), 1947 tcg_constant_i32(syndrome), 1948 tcg_constant_i32(isread)); 1949 } else if (ri->type & ARM_CP_RAISES_EXC) { 1950 /* 1951 * The readfn or writefn might raise an exception; 1952 * synchronize the CPU state in case it does. 1953 */ 1954 gen_a64_update_pc(s, 0); 1955 } 1956 1957 /* Handle special cases first */ 1958 switch (ri->type & ARM_CP_SPECIAL_MASK) { 1959 case 0: 1960 break; 1961 case ARM_CP_NOP: 1962 goto exit; 1963 case ARM_CP_NZCV: 1964 tcg_rt = cpu_reg(s, rt); 1965 if (isread) { 1966 gen_get_nzcv(tcg_rt); 1967 } else { 1968 gen_set_nzcv(tcg_rt); 1969 } 1970 goto exit; 1971 case ARM_CP_CURRENTEL: 1972 /* Reads as current EL value from pstate, which is 1973 * guaranteed to be constant by the tb flags. 1974 */ 1975 tcg_rt = cpu_reg(s, rt); 1976 tcg_gen_movi_i64(tcg_rt, s->current_el << 2); 1977 goto exit; 1978 case ARM_CP_DC_ZVA: 1979 /* Writes clear the aligned block of memory which rt points into. */ 1980 if (s->mte_active[0]) { 1981 int desc = 0; 1982 1983 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); 1984 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); 1985 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); 1986 1987 tcg_rt = new_tmp_a64(s); 1988 gen_helper_mte_check_zva(tcg_rt, cpu_env, 1989 tcg_constant_i32(desc), cpu_reg(s, rt)); 1990 } else { 1991 tcg_rt = clean_data_tbi(s, cpu_reg(s, rt)); 1992 } 1993 gen_helper_dc_zva(cpu_env, tcg_rt); 1994 goto exit; 1995 case ARM_CP_DC_GVA: 1996 { 1997 TCGv_i64 clean_addr, tag; 1998 1999 /* 2000 * DC_GVA, like DC_ZVA, requires that we supply the original 2001 * pointer for an invalid page. Probe that address first. 2002 */ 2003 tcg_rt = cpu_reg(s, rt); 2004 clean_addr = clean_data_tbi(s, tcg_rt); 2005 gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8); 2006 2007 if (s->ata) { 2008 /* Extract the tag from the register to match STZGM. */ 2009 tag = tcg_temp_new_i64(); 2010 tcg_gen_shri_i64(tag, tcg_rt, 56); 2011 gen_helper_stzgm_tags(cpu_env, clean_addr, tag); 2012 tcg_temp_free_i64(tag); 2013 } 2014 } 2015 goto exit; 2016 case ARM_CP_DC_GZVA: 2017 { 2018 TCGv_i64 clean_addr, tag; 2019 2020 /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */ 2021 tcg_rt = cpu_reg(s, rt); 2022 clean_addr = clean_data_tbi(s, tcg_rt); 2023 gen_helper_dc_zva(cpu_env, clean_addr); 2024 2025 if (s->ata) { 2026 /* Extract the tag from the register to match STZGM. */ 2027 tag = tcg_temp_new_i64(); 2028 tcg_gen_shri_i64(tag, tcg_rt, 56); 2029 gen_helper_stzgm_tags(cpu_env, clean_addr, tag); 2030 tcg_temp_free_i64(tag); 2031 } 2032 } 2033 goto exit; 2034 default: 2035 g_assert_not_reached(); 2036 } 2037 if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { 2038 goto exit; 2039 } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { 2040 goto exit; 2041 } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { 2042 goto exit; 2043 } 2044 2045 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { 2046 gen_io_start(); 2047 } 2048 2049 tcg_rt = cpu_reg(s, rt); 2050 2051 if (isread) { 2052 if (ri->type & ARM_CP_CONST) { 2053 tcg_gen_movi_i64(tcg_rt, ri->resetvalue); 2054 } else if (ri->readfn) { 2055 if (!tcg_ri) { 2056 tcg_ri = gen_lookup_cp_reg(key); 2057 } 2058 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_ri); 2059 } else { 2060 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset); 2061 } 2062 } else { 2063 if (ri->type & ARM_CP_CONST) { 2064 /* If not forbidden by access permissions, treat as WI */ 2065 goto exit; 2066 } else if (ri->writefn) { 2067 if (!tcg_ri) { 2068 tcg_ri = gen_lookup_cp_reg(key); 2069 } 2070 gen_helper_set_cp_reg64(cpu_env, tcg_ri, tcg_rt); 2071 } else { 2072 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset); 2073 } 2074 } 2075 2076 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) { 2077 /* I/O operations must end the TB here (whether read or write) */ 2078 s->base.is_jmp = DISAS_UPDATE_EXIT; 2079 } 2080 if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) { 2081 /* 2082 * A write to any coprocessor regiser that ends a TB 2083 * must rebuild the hflags for the next TB. 2084 */ 2085 gen_rebuild_hflags(s); 2086 /* 2087 * We default to ending the TB on a coprocessor register write, 2088 * but allow this to be suppressed by the register definition 2089 * (usually only necessary to work around guest bugs). 2090 */ 2091 s->base.is_jmp = DISAS_UPDATE_EXIT; 2092 } 2093 2094 exit: 2095 if (tcg_ri) { 2096 tcg_temp_free_ptr(tcg_ri); 2097 } 2098 } 2099 2100 /* System 2101 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0 2102 * +---------------------+---+-----+-----+-------+-------+-----+------+ 2103 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt | 2104 * +---------------------+---+-----+-----+-------+-------+-----+------+ 2105 */ 2106 static void disas_system(DisasContext *s, uint32_t insn) 2107 { 2108 unsigned int l, op0, op1, crn, crm, op2, rt; 2109 l = extract32(insn, 21, 1); 2110 op0 = extract32(insn, 19, 2); 2111 op1 = extract32(insn, 16, 3); 2112 crn = extract32(insn, 12, 4); 2113 crm = extract32(insn, 8, 4); 2114 op2 = extract32(insn, 5, 3); 2115 rt = extract32(insn, 0, 5); 2116 2117 if (op0 == 0) { 2118 if (l || rt != 31) { 2119 unallocated_encoding(s); 2120 return; 2121 } 2122 switch (crn) { 2123 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */ 2124 handle_hint(s, insn, op1, op2, crm); 2125 break; 2126 case 3: /* CLREX, DSB, DMB, ISB */ 2127 handle_sync(s, insn, op1, op2, crm); 2128 break; 2129 case 4: /* MSR (immediate) */ 2130 handle_msr_i(s, insn, op1, op2, crm); 2131 break; 2132 default: 2133 unallocated_encoding(s); 2134 break; 2135 } 2136 return; 2137 } 2138 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt); 2139 } 2140 2141 /* Exception generation 2142 * 2143 * 31 24 23 21 20 5 4 2 1 0 2144 * +-----------------+-----+------------------------+-----+----+ 2145 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL | 2146 * +-----------------------+------------------------+----------+ 2147 */ 2148 static void disas_exc(DisasContext *s, uint32_t insn) 2149 { 2150 int opc = extract32(insn, 21, 3); 2151 int op2_ll = extract32(insn, 0, 5); 2152 int imm16 = extract32(insn, 5, 16); 2153 uint32_t syndrome; 2154 2155 switch (opc) { 2156 case 0: 2157 /* For SVC, HVC and SMC we advance the single-step state 2158 * machine before taking the exception. This is architecturally 2159 * mandated, to ensure that single-stepping a system call 2160 * instruction works properly. 2161 */ 2162 switch (op2_ll) { 2163 case 1: /* SVC */ 2164 syndrome = syn_aa64_svc(imm16); 2165 if (s->fgt_svc) { 2166 gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2); 2167 break; 2168 } 2169 gen_ss_advance(s); 2170 gen_exception_insn(s, 4, EXCP_SWI, syndrome); 2171 break; 2172 case 2: /* HVC */ 2173 if (s->current_el == 0) { 2174 unallocated_encoding(s); 2175 break; 2176 } 2177 /* The pre HVC helper handles cases when HVC gets trapped 2178 * as an undefined insn by runtime configuration. 2179 */ 2180 gen_a64_update_pc(s, 0); 2181 gen_helper_pre_hvc(cpu_env); 2182 gen_ss_advance(s); 2183 gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(imm16), 2); 2184 break; 2185 case 3: /* SMC */ 2186 if (s->current_el == 0) { 2187 unallocated_encoding(s); 2188 break; 2189 } 2190 gen_a64_update_pc(s, 0); 2191 gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(imm16))); 2192 gen_ss_advance(s); 2193 gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(imm16), 3); 2194 break; 2195 default: 2196 unallocated_encoding(s); 2197 break; 2198 } 2199 break; 2200 case 1: 2201 if (op2_ll != 0) { 2202 unallocated_encoding(s); 2203 break; 2204 } 2205 /* BRK */ 2206 gen_exception_bkpt_insn(s, syn_aa64_bkpt(imm16)); 2207 break; 2208 case 2: 2209 if (op2_ll != 0) { 2210 unallocated_encoding(s); 2211 break; 2212 } 2213 /* HLT. This has two purposes. 2214 * Architecturally, it is an external halting debug instruction. 2215 * Since QEMU doesn't implement external debug, we treat this as 2216 * it is required for halting debug disabled: it will UNDEF. 2217 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction. 2218 */ 2219 if (semihosting_enabled(s->current_el == 0) && imm16 == 0xf000) { 2220 gen_exception_internal_insn(s, EXCP_SEMIHOST); 2221 } else { 2222 unallocated_encoding(s); 2223 } 2224 break; 2225 case 5: 2226 if (op2_ll < 1 || op2_ll > 3) { 2227 unallocated_encoding(s); 2228 break; 2229 } 2230 /* DCPS1, DCPS2, DCPS3 */ 2231 unallocated_encoding(s); 2232 break; 2233 default: 2234 unallocated_encoding(s); 2235 break; 2236 } 2237 } 2238 2239 /* Unconditional branch (register) 2240 * 31 25 24 21 20 16 15 10 9 5 4 0 2241 * +---------------+-------+-------+-------+------+-------+ 2242 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 | 2243 * +---------------+-------+-------+-------+------+-------+ 2244 */ 2245 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) 2246 { 2247 unsigned int opc, op2, op3, rn, op4; 2248 unsigned btype_mod = 2; /* 0: BR, 1: BLR, 2: other */ 2249 TCGv_i64 dst; 2250 TCGv_i64 modifier; 2251 2252 opc = extract32(insn, 21, 4); 2253 op2 = extract32(insn, 16, 5); 2254 op3 = extract32(insn, 10, 6); 2255 rn = extract32(insn, 5, 5); 2256 op4 = extract32(insn, 0, 5); 2257 2258 if (op2 != 0x1f) { 2259 goto do_unallocated; 2260 } 2261 2262 switch (opc) { 2263 case 0: /* BR */ 2264 case 1: /* BLR */ 2265 case 2: /* RET */ 2266 btype_mod = opc; 2267 switch (op3) { 2268 case 0: 2269 /* BR, BLR, RET */ 2270 if (op4 != 0) { 2271 goto do_unallocated; 2272 } 2273 dst = cpu_reg(s, rn); 2274 break; 2275 2276 case 2: 2277 case 3: 2278 if (!dc_isar_feature(aa64_pauth, s)) { 2279 goto do_unallocated; 2280 } 2281 if (opc == 2) { 2282 /* RETAA, RETAB */ 2283 if (rn != 0x1f || op4 != 0x1f) { 2284 goto do_unallocated; 2285 } 2286 rn = 30; 2287 modifier = cpu_X[31]; 2288 } else { 2289 /* BRAAZ, BRABZ, BLRAAZ, BLRABZ */ 2290 if (op4 != 0x1f) { 2291 goto do_unallocated; 2292 } 2293 modifier = new_tmp_a64_zero(s); 2294 } 2295 if (s->pauth_active) { 2296 dst = new_tmp_a64(s); 2297 if (op3 == 2) { 2298 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier); 2299 } else { 2300 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier); 2301 } 2302 } else { 2303 dst = cpu_reg(s, rn); 2304 } 2305 break; 2306 2307 default: 2308 goto do_unallocated; 2309 } 2310 /* BLR also needs to load return address */ 2311 if (opc == 1) { 2312 TCGv_i64 lr = cpu_reg(s, 30); 2313 if (dst == lr) { 2314 TCGv_i64 tmp = new_tmp_a64(s); 2315 tcg_gen_mov_i64(tmp, dst); 2316 dst = tmp; 2317 } 2318 gen_pc_plus_diff(s, lr, curr_insn_len(s)); 2319 } 2320 gen_a64_set_pc(s, dst); 2321 break; 2322 2323 case 8: /* BRAA */ 2324 case 9: /* BLRAA */ 2325 if (!dc_isar_feature(aa64_pauth, s)) { 2326 goto do_unallocated; 2327 } 2328 if ((op3 & ~1) != 2) { 2329 goto do_unallocated; 2330 } 2331 btype_mod = opc & 1; 2332 if (s->pauth_active) { 2333 dst = new_tmp_a64(s); 2334 modifier = cpu_reg_sp(s, op4); 2335 if (op3 == 2) { 2336 gen_helper_autia(dst, cpu_env, cpu_reg(s, rn), modifier); 2337 } else { 2338 gen_helper_autib(dst, cpu_env, cpu_reg(s, rn), modifier); 2339 } 2340 } else { 2341 dst = cpu_reg(s, rn); 2342 } 2343 /* BLRAA also needs to load return address */ 2344 if (opc == 9) { 2345 TCGv_i64 lr = cpu_reg(s, 30); 2346 if (dst == lr) { 2347 TCGv_i64 tmp = new_tmp_a64(s); 2348 tcg_gen_mov_i64(tmp, dst); 2349 dst = tmp; 2350 } 2351 gen_pc_plus_diff(s, lr, curr_insn_len(s)); 2352 } 2353 gen_a64_set_pc(s, dst); 2354 break; 2355 2356 case 4: /* ERET */ 2357 if (s->current_el == 0) { 2358 goto do_unallocated; 2359 } 2360 switch (op3) { 2361 case 0: /* ERET */ 2362 if (op4 != 0) { 2363 goto do_unallocated; 2364 } 2365 if (s->fgt_eret) { 2366 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(op3), 2); 2367 return; 2368 } 2369 dst = tcg_temp_new_i64(); 2370 tcg_gen_ld_i64(dst, cpu_env, 2371 offsetof(CPUARMState, elr_el[s->current_el])); 2372 break; 2373 2374 case 2: /* ERETAA */ 2375 case 3: /* ERETAB */ 2376 if (!dc_isar_feature(aa64_pauth, s)) { 2377 goto do_unallocated; 2378 } 2379 if (rn != 0x1f || op4 != 0x1f) { 2380 goto do_unallocated; 2381 } 2382 /* The FGT trap takes precedence over an auth trap. */ 2383 if (s->fgt_eret) { 2384 gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(op3), 2); 2385 return; 2386 } 2387 dst = tcg_temp_new_i64(); 2388 tcg_gen_ld_i64(dst, cpu_env, 2389 offsetof(CPUARMState, elr_el[s->current_el])); 2390 if (s->pauth_active) { 2391 modifier = cpu_X[31]; 2392 if (op3 == 2) { 2393 gen_helper_autia(dst, cpu_env, dst, modifier); 2394 } else { 2395 gen_helper_autib(dst, cpu_env, dst, modifier); 2396 } 2397 } 2398 break; 2399 2400 default: 2401 goto do_unallocated; 2402 } 2403 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { 2404 gen_io_start(); 2405 } 2406 2407 gen_helper_exception_return(cpu_env, dst); 2408 tcg_temp_free_i64(dst); 2409 /* Must exit loop to check un-masked IRQs */ 2410 s->base.is_jmp = DISAS_EXIT; 2411 return; 2412 2413 case 5: /* DRPS */ 2414 if (op3 != 0 || op4 != 0 || rn != 0x1f) { 2415 goto do_unallocated; 2416 } else { 2417 unallocated_encoding(s); 2418 } 2419 return; 2420 2421 default: 2422 do_unallocated: 2423 unallocated_encoding(s); 2424 return; 2425 } 2426 2427 switch (btype_mod) { 2428 case 0: /* BR */ 2429 if (dc_isar_feature(aa64_bti, s)) { 2430 /* BR to {x16,x17} or !guard -> 1, else 3. */ 2431 set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3); 2432 } 2433 break; 2434 2435 case 1: /* BLR */ 2436 if (dc_isar_feature(aa64_bti, s)) { 2437 /* BLR sets BTYPE to 2, regardless of source guarded page. */ 2438 set_btype(s, 2); 2439 } 2440 break; 2441 2442 default: /* RET or none of the above. */ 2443 /* BTYPE will be set to 0 by normal end-of-insn processing. */ 2444 break; 2445 } 2446 2447 s->base.is_jmp = DISAS_JUMP; 2448 } 2449 2450 /* Branches, exception generating and system instructions */ 2451 static void disas_b_exc_sys(DisasContext *s, uint32_t insn) 2452 { 2453 switch (extract32(insn, 25, 7)) { 2454 case 0x0a: case 0x0b: 2455 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */ 2456 disas_uncond_b_imm(s, insn); 2457 break; 2458 case 0x1a: case 0x5a: /* Compare & branch (immediate) */ 2459 disas_comp_b_imm(s, insn); 2460 break; 2461 case 0x1b: case 0x5b: /* Test & branch (immediate) */ 2462 disas_test_b_imm(s, insn); 2463 break; 2464 case 0x2a: /* Conditional branch (immediate) */ 2465 disas_cond_b_imm(s, insn); 2466 break; 2467 case 0x6a: /* Exception generation / System */ 2468 if (insn & (1 << 24)) { 2469 if (extract32(insn, 22, 2) == 0) { 2470 disas_system(s, insn); 2471 } else { 2472 unallocated_encoding(s); 2473 } 2474 } else { 2475 disas_exc(s, insn); 2476 } 2477 break; 2478 case 0x6b: /* Unconditional branch (register) */ 2479 disas_uncond_b_reg(s, insn); 2480 break; 2481 default: 2482 unallocated_encoding(s); 2483 break; 2484 } 2485 } 2486 2487 /* 2488 * Load/Store exclusive instructions are implemented by remembering 2489 * the value/address loaded, and seeing if these are the same 2490 * when the store is performed. This is not actually the architecturally 2491 * mandated semantics, but it works for typical guest code sequences 2492 * and avoids having to monitor regular stores. 2493 * 2494 * The store exclusive uses the atomic cmpxchg primitives to avoid 2495 * races in multi-threaded linux-user and when MTTCG softmmu is 2496 * enabled. 2497 */ 2498 static void gen_load_exclusive(DisasContext *s, int rt, int rt2, 2499 TCGv_i64 addr, int size, bool is_pair) 2500 { 2501 int idx = get_mem_index(s); 2502 MemOp memop = s->be_data; 2503 2504 g_assert(size <= 3); 2505 if (is_pair) { 2506 g_assert(size >= 2); 2507 if (size == 2) { 2508 /* The pair must be single-copy atomic for the doubleword. */ 2509 memop |= MO_64 | MO_ALIGN; 2510 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); 2511 if (s->be_data == MO_LE) { 2512 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32); 2513 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32); 2514 } else { 2515 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32); 2516 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32); 2517 } 2518 } else { 2519 /* The pair must be single-copy atomic for *each* doubleword, not 2520 the entire quadword, however it must be quadword aligned. */ 2521 memop |= MO_64; 2522 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, 2523 memop | MO_ALIGN_16); 2524 2525 TCGv_i64 addr2 = tcg_temp_new_i64(); 2526 tcg_gen_addi_i64(addr2, addr, 8); 2527 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop); 2528 tcg_temp_free_i64(addr2); 2529 2530 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); 2531 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high); 2532 } 2533 } else { 2534 memop |= size | MO_ALIGN; 2535 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop); 2536 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val); 2537 } 2538 tcg_gen_mov_i64(cpu_exclusive_addr, addr); 2539 } 2540 2541 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, 2542 TCGv_i64 addr, int size, int is_pair) 2543 { 2544 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr] 2545 * && (!is_pair || env->exclusive_high == [addr + datasize])) { 2546 * [addr] = {Rt}; 2547 * if (is_pair) { 2548 * [addr + datasize] = {Rt2}; 2549 * } 2550 * {Rd} = 0; 2551 * } else { 2552 * {Rd} = 1; 2553 * } 2554 * env->exclusive_addr = -1; 2555 */ 2556 TCGLabel *fail_label = gen_new_label(); 2557 TCGLabel *done_label = gen_new_label(); 2558 TCGv_i64 tmp; 2559 2560 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label); 2561 2562 tmp = tcg_temp_new_i64(); 2563 if (is_pair) { 2564 if (size == 2) { 2565 if (s->be_data == MO_LE) { 2566 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2)); 2567 } else { 2568 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt)); 2569 } 2570 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, 2571 cpu_exclusive_val, tmp, 2572 get_mem_index(s), 2573 MO_64 | MO_ALIGN | s->be_data); 2574 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); 2575 } else { 2576 TCGv_i128 t16 = tcg_temp_new_i128(); 2577 TCGv_i128 c16 = tcg_temp_new_i128(); 2578 TCGv_i64 a, b; 2579 2580 if (s->be_data == MO_LE) { 2581 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt), cpu_reg(s, rt2)); 2582 tcg_gen_concat_i64_i128(c16, cpu_exclusive_val, 2583 cpu_exclusive_high); 2584 } else { 2585 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt2), cpu_reg(s, rt)); 2586 tcg_gen_concat_i64_i128(c16, cpu_exclusive_high, 2587 cpu_exclusive_val); 2588 } 2589 2590 tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16, 2591 get_mem_index(s), 2592 MO_128 | MO_ALIGN | s->be_data); 2593 tcg_temp_free_i128(c16); 2594 2595 a = tcg_temp_new_i64(); 2596 b = tcg_temp_new_i64(); 2597 if (s->be_data == MO_LE) { 2598 tcg_gen_extr_i128_i64(a, b, t16); 2599 } else { 2600 tcg_gen_extr_i128_i64(b, a, t16); 2601 } 2602 2603 tcg_gen_xor_i64(a, a, cpu_exclusive_val); 2604 tcg_gen_xor_i64(b, b, cpu_exclusive_high); 2605 tcg_gen_or_i64(tmp, a, b); 2606 tcg_temp_free_i64(a); 2607 tcg_temp_free_i64(b); 2608 tcg_temp_free_i128(t16); 2609 2610 tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0); 2611 } 2612 } else { 2613 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val, 2614 cpu_reg(s, rt), get_mem_index(s), 2615 size | MO_ALIGN | s->be_data); 2616 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); 2617 } 2618 tcg_gen_mov_i64(cpu_reg(s, rd), tmp); 2619 tcg_temp_free_i64(tmp); 2620 tcg_gen_br(done_label); 2621 2622 gen_set_label(fail_label); 2623 tcg_gen_movi_i64(cpu_reg(s, rd), 1); 2624 gen_set_label(done_label); 2625 tcg_gen_movi_i64(cpu_exclusive_addr, -1); 2626 } 2627 2628 static void gen_compare_and_swap(DisasContext *s, int rs, int rt, 2629 int rn, int size) 2630 { 2631 TCGv_i64 tcg_rs = cpu_reg(s, rs); 2632 TCGv_i64 tcg_rt = cpu_reg(s, rt); 2633 int memidx = get_mem_index(s); 2634 TCGv_i64 clean_addr; 2635 2636 if (rn == 31) { 2637 gen_check_sp_alignment(s); 2638 } 2639 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size); 2640 tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt, memidx, 2641 size | MO_ALIGN | s->be_data); 2642 } 2643 2644 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, 2645 int rn, int size) 2646 { 2647 TCGv_i64 s1 = cpu_reg(s, rs); 2648 TCGv_i64 s2 = cpu_reg(s, rs + 1); 2649 TCGv_i64 t1 = cpu_reg(s, rt); 2650 TCGv_i64 t2 = cpu_reg(s, rt + 1); 2651 TCGv_i64 clean_addr; 2652 int memidx = get_mem_index(s); 2653 2654 if (rn == 31) { 2655 gen_check_sp_alignment(s); 2656 } 2657 2658 /* This is a single atomic access, despite the "pair". */ 2659 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, size + 1); 2660 2661 if (size == 2) { 2662 TCGv_i64 cmp = tcg_temp_new_i64(); 2663 TCGv_i64 val = tcg_temp_new_i64(); 2664 2665 if (s->be_data == MO_LE) { 2666 tcg_gen_concat32_i64(val, t1, t2); 2667 tcg_gen_concat32_i64(cmp, s1, s2); 2668 } else { 2669 tcg_gen_concat32_i64(val, t2, t1); 2670 tcg_gen_concat32_i64(cmp, s2, s1); 2671 } 2672 2673 tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx, 2674 MO_64 | MO_ALIGN | s->be_data); 2675 tcg_temp_free_i64(val); 2676 2677 if (s->be_data == MO_LE) { 2678 tcg_gen_extr32_i64(s1, s2, cmp); 2679 } else { 2680 tcg_gen_extr32_i64(s2, s1, cmp); 2681 } 2682 tcg_temp_free_i64(cmp); 2683 } else { 2684 TCGv_i128 cmp = tcg_temp_new_i128(); 2685 TCGv_i128 val = tcg_temp_new_i128(); 2686 2687 if (s->be_data == MO_LE) { 2688 tcg_gen_concat_i64_i128(val, t1, t2); 2689 tcg_gen_concat_i64_i128(cmp, s1, s2); 2690 } else { 2691 tcg_gen_concat_i64_i128(val, t2, t1); 2692 tcg_gen_concat_i64_i128(cmp, s2, s1); 2693 } 2694 2695 tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx, 2696 MO_128 | MO_ALIGN | s->be_data); 2697 tcg_temp_free_i128(val); 2698 2699 if (s->be_data == MO_LE) { 2700 tcg_gen_extr_i128_i64(s1, s2, cmp); 2701 } else { 2702 tcg_gen_extr_i128_i64(s2, s1, cmp); 2703 } 2704 tcg_temp_free_i128(cmp); 2705 } 2706 } 2707 2708 /* Update the Sixty-Four bit (SF) registersize. This logic is derived 2709 * from the ARMv8 specs for LDR (Shared decode for all encodings). 2710 */ 2711 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc) 2712 { 2713 int opc0 = extract32(opc, 0, 1); 2714 int regsize; 2715 2716 if (is_signed) { 2717 regsize = opc0 ? 32 : 64; 2718 } else { 2719 regsize = size == 3 ? 64 : 32; 2720 } 2721 return regsize == 64; 2722 } 2723 2724 /* Load/store exclusive 2725 * 2726 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0 2727 * +-----+-------------+----+---+----+------+----+-------+------+------+ 2728 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt | 2729 * +-----+-------------+----+---+----+------+----+-------+------+------+ 2730 * 2731 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit 2732 * L: 0 -> store, 1 -> load 2733 * o2: 0 -> exclusive, 1 -> not 2734 * o1: 0 -> single register, 1 -> register pair 2735 * o0: 1 -> load-acquire/store-release, 0 -> not 2736 */ 2737 static void disas_ldst_excl(DisasContext *s, uint32_t insn) 2738 { 2739 int rt = extract32(insn, 0, 5); 2740 int rn = extract32(insn, 5, 5); 2741 int rt2 = extract32(insn, 10, 5); 2742 int rs = extract32(insn, 16, 5); 2743 int is_lasr = extract32(insn, 15, 1); 2744 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr; 2745 int size = extract32(insn, 30, 2); 2746 TCGv_i64 clean_addr; 2747 2748 switch (o2_L_o1_o0) { 2749 case 0x0: /* STXR */ 2750 case 0x1: /* STLXR */ 2751 if (rn == 31) { 2752 gen_check_sp_alignment(s); 2753 } 2754 if (is_lasr) { 2755 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 2756 } 2757 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), 2758 true, rn != 31, size); 2759 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, false); 2760 return; 2761 2762 case 0x4: /* LDXR */ 2763 case 0x5: /* LDAXR */ 2764 if (rn == 31) { 2765 gen_check_sp_alignment(s); 2766 } 2767 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), 2768 false, rn != 31, size); 2769 s->is_ldex = true; 2770 gen_load_exclusive(s, rt, rt2, clean_addr, size, false); 2771 if (is_lasr) { 2772 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 2773 } 2774 return; 2775 2776 case 0x8: /* STLLR */ 2777 if (!dc_isar_feature(aa64_lor, s)) { 2778 break; 2779 } 2780 /* StoreLORelease is the same as Store-Release for QEMU. */ 2781 /* fall through */ 2782 case 0x9: /* STLR */ 2783 /* Generate ISS for non-exclusive accesses including LASR. */ 2784 if (rn == 31) { 2785 gen_check_sp_alignment(s); 2786 } 2787 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 2788 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), 2789 true, rn != 31, size); 2790 /* TODO: ARMv8.4-LSE SCTLR.nAA */ 2791 do_gpr_st(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, true, rt, 2792 disas_ldst_compute_iss_sf(size, false, 0), is_lasr); 2793 return; 2794 2795 case 0xc: /* LDLAR */ 2796 if (!dc_isar_feature(aa64_lor, s)) { 2797 break; 2798 } 2799 /* LoadLOAcquire is the same as Load-Acquire for QEMU. */ 2800 /* fall through */ 2801 case 0xd: /* LDAR */ 2802 /* Generate ISS for non-exclusive accesses including LASR. */ 2803 if (rn == 31) { 2804 gen_check_sp_alignment(s); 2805 } 2806 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), 2807 false, rn != 31, size); 2808 /* TODO: ARMv8.4-LSE SCTLR.nAA */ 2809 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size | MO_ALIGN, false, true, 2810 rt, disas_ldst_compute_iss_sf(size, false, 0), is_lasr); 2811 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 2812 return; 2813 2814 case 0x2: case 0x3: /* CASP / STXP */ 2815 if (size & 2) { /* STXP / STLXP */ 2816 if (rn == 31) { 2817 gen_check_sp_alignment(s); 2818 } 2819 if (is_lasr) { 2820 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 2821 } 2822 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), 2823 true, rn != 31, size); 2824 gen_store_exclusive(s, rs, rt, rt2, clean_addr, size, true); 2825 return; 2826 } 2827 if (rt2 == 31 2828 && ((rt | rs) & 1) == 0 2829 && dc_isar_feature(aa64_atomics, s)) { 2830 /* CASP / CASPL */ 2831 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); 2832 return; 2833 } 2834 break; 2835 2836 case 0x6: case 0x7: /* CASPA / LDXP */ 2837 if (size & 2) { /* LDXP / LDAXP */ 2838 if (rn == 31) { 2839 gen_check_sp_alignment(s); 2840 } 2841 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), 2842 false, rn != 31, size); 2843 s->is_ldex = true; 2844 gen_load_exclusive(s, rt, rt2, clean_addr, size, true); 2845 if (is_lasr) { 2846 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 2847 } 2848 return; 2849 } 2850 if (rt2 == 31 2851 && ((rt | rs) & 1) == 0 2852 && dc_isar_feature(aa64_atomics, s)) { 2853 /* CASPA / CASPAL */ 2854 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); 2855 return; 2856 } 2857 break; 2858 2859 case 0xa: /* CAS */ 2860 case 0xb: /* CASL */ 2861 case 0xe: /* CASA */ 2862 case 0xf: /* CASAL */ 2863 if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) { 2864 gen_compare_and_swap(s, rs, rt, rn, size); 2865 return; 2866 } 2867 break; 2868 } 2869 unallocated_encoding(s); 2870 } 2871 2872 /* 2873 * Load register (literal) 2874 * 2875 * 31 30 29 27 26 25 24 23 5 4 0 2876 * +-----+-------+---+-----+-------------------+-------+ 2877 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt | 2878 * +-----+-------+---+-----+-------------------+-------+ 2879 * 2880 * V: 1 -> vector (simd/fp) 2881 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit, 2882 * 10-> 32 bit signed, 11 -> prefetch 2883 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated) 2884 */ 2885 static void disas_ld_lit(DisasContext *s, uint32_t insn) 2886 { 2887 int rt = extract32(insn, 0, 5); 2888 int64_t imm = sextract32(insn, 5, 19) << 2; 2889 bool is_vector = extract32(insn, 26, 1); 2890 int opc = extract32(insn, 30, 2); 2891 bool is_signed = false; 2892 int size = 2; 2893 TCGv_i64 tcg_rt, clean_addr; 2894 2895 if (is_vector) { 2896 if (opc == 3) { 2897 unallocated_encoding(s); 2898 return; 2899 } 2900 size = 2 + opc; 2901 if (!fp_access_check(s)) { 2902 return; 2903 } 2904 } else { 2905 if (opc == 3) { 2906 /* PRFM (literal) : prefetch */ 2907 return; 2908 } 2909 size = 2 + extract32(opc, 0, 1); 2910 is_signed = extract32(opc, 1, 1); 2911 } 2912 2913 tcg_rt = cpu_reg(s, rt); 2914 2915 clean_addr = new_tmp_a64(s); 2916 gen_pc_plus_diff(s, clean_addr, imm); 2917 if (is_vector) { 2918 do_fp_ld(s, rt, clean_addr, size); 2919 } else { 2920 /* Only unsigned 32bit loads target 32bit registers. */ 2921 bool iss_sf = opc != 0; 2922 2923 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, 2924 false, true, rt, iss_sf, false); 2925 } 2926 } 2927 2928 /* 2929 * LDNP (Load Pair - non-temporal hint) 2930 * LDP (Load Pair - non vector) 2931 * LDPSW (Load Pair Signed Word - non vector) 2932 * STNP (Store Pair - non-temporal hint) 2933 * STP (Store Pair - non vector) 2934 * LDNP (Load Pair of SIMD&FP - non-temporal hint) 2935 * LDP (Load Pair of SIMD&FP) 2936 * STNP (Store Pair of SIMD&FP - non-temporal hint) 2937 * STP (Store Pair of SIMD&FP) 2938 * 2939 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0 2940 * +-----+-------+---+---+-------+---+-----------------------------+ 2941 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt | 2942 * +-----+-------+---+---+-------+---+-------+-------+------+------+ 2943 * 2944 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit 2945 * LDPSW/STGP 01 2946 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit 2947 * V: 0 -> GPR, 1 -> Vector 2948 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index, 2949 * 10 -> signed offset, 11 -> pre-index 2950 * L: 0 -> Store 1 -> Load 2951 * 2952 * Rt, Rt2 = GPR or SIMD registers to be stored 2953 * Rn = general purpose register containing address 2954 * imm7 = signed offset (multiple of 4 or 8 depending on size) 2955 */ 2956 static void disas_ldst_pair(DisasContext *s, uint32_t insn) 2957 { 2958 int rt = extract32(insn, 0, 5); 2959 int rn = extract32(insn, 5, 5); 2960 int rt2 = extract32(insn, 10, 5); 2961 uint64_t offset = sextract64(insn, 15, 7); 2962 int index = extract32(insn, 23, 2); 2963 bool is_vector = extract32(insn, 26, 1); 2964 bool is_load = extract32(insn, 22, 1); 2965 int opc = extract32(insn, 30, 2); 2966 2967 bool is_signed = false; 2968 bool postindex = false; 2969 bool wback = false; 2970 bool set_tag = false; 2971 2972 TCGv_i64 clean_addr, dirty_addr; 2973 2974 int size; 2975 2976 if (opc == 3) { 2977 unallocated_encoding(s); 2978 return; 2979 } 2980 2981 if (is_vector) { 2982 size = 2 + opc; 2983 } else if (opc == 1 && !is_load) { 2984 /* STGP */ 2985 if (!dc_isar_feature(aa64_mte_insn_reg, s) || index == 0) { 2986 unallocated_encoding(s); 2987 return; 2988 } 2989 size = 3; 2990 set_tag = true; 2991 } else { 2992 size = 2 + extract32(opc, 1, 1); 2993 is_signed = extract32(opc, 0, 1); 2994 if (!is_load && is_signed) { 2995 unallocated_encoding(s); 2996 return; 2997 } 2998 } 2999 3000 switch (index) { 3001 case 1: /* post-index */ 3002 postindex = true; 3003 wback = true; 3004 break; 3005 case 0: 3006 /* signed offset with "non-temporal" hint. Since we don't emulate 3007 * caches we don't care about hints to the cache system about 3008 * data access patterns, and handle this identically to plain 3009 * signed offset. 3010 */ 3011 if (is_signed) { 3012 /* There is no non-temporal-hint version of LDPSW */ 3013 unallocated_encoding(s); 3014 return; 3015 } 3016 postindex = false; 3017 break; 3018 case 2: /* signed offset, rn not updated */ 3019 postindex = false; 3020 break; 3021 case 3: /* pre-index */ 3022 postindex = false; 3023 wback = true; 3024 break; 3025 } 3026 3027 if (is_vector && !fp_access_check(s)) { 3028 return; 3029 } 3030 3031 offset <<= (set_tag ? LOG2_TAG_GRANULE : size); 3032 3033 if (rn == 31) { 3034 gen_check_sp_alignment(s); 3035 } 3036 3037 dirty_addr = read_cpu_reg_sp(s, rn, 1); 3038 if (!postindex) { 3039 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); 3040 } 3041 3042 if (set_tag) { 3043 if (!s->ata) { 3044 /* 3045 * TODO: We could rely on the stores below, at least for 3046 * system mode, if we arrange to add MO_ALIGN_16. 3047 */ 3048 gen_helper_stg_stub(cpu_env, dirty_addr); 3049 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { 3050 gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr); 3051 } else { 3052 gen_helper_stg(cpu_env, dirty_addr, dirty_addr); 3053 } 3054 } 3055 3056 clean_addr = gen_mte_checkN(s, dirty_addr, !is_load, 3057 (wback || rn != 31) && !set_tag, 2 << size); 3058 3059 if (is_vector) { 3060 if (is_load) { 3061 do_fp_ld(s, rt, clean_addr, size); 3062 } else { 3063 do_fp_st(s, rt, clean_addr, size); 3064 } 3065 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); 3066 if (is_load) { 3067 do_fp_ld(s, rt2, clean_addr, size); 3068 } else { 3069 do_fp_st(s, rt2, clean_addr, size); 3070 } 3071 } else { 3072 TCGv_i64 tcg_rt = cpu_reg(s, rt); 3073 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2); 3074 3075 if (is_load) { 3076 TCGv_i64 tmp = tcg_temp_new_i64(); 3077 3078 /* Do not modify tcg_rt before recognizing any exception 3079 * from the second load. 3080 */ 3081 do_gpr_ld(s, tmp, clean_addr, size + is_signed * MO_SIGN, 3082 false, false, 0, false, false); 3083 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); 3084 do_gpr_ld(s, tcg_rt2, clean_addr, size + is_signed * MO_SIGN, 3085 false, false, 0, false, false); 3086 3087 tcg_gen_mov_i64(tcg_rt, tmp); 3088 tcg_temp_free_i64(tmp); 3089 } else { 3090 do_gpr_st(s, tcg_rt, clean_addr, size, 3091 false, 0, false, false); 3092 tcg_gen_addi_i64(clean_addr, clean_addr, 1 << size); 3093 do_gpr_st(s, tcg_rt2, clean_addr, size, 3094 false, 0, false, false); 3095 } 3096 } 3097 3098 if (wback) { 3099 if (postindex) { 3100 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); 3101 } 3102 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr); 3103 } 3104 } 3105 3106 /* 3107 * Load/store (immediate post-indexed) 3108 * Load/store (immediate pre-indexed) 3109 * Load/store (unscaled immediate) 3110 * 3111 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0 3112 * +----+-------+---+-----+-----+---+--------+-----+------+------+ 3113 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt | 3114 * +----+-------+---+-----+-----+---+--------+-----+------+------+ 3115 * 3116 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback) 3117 10 -> unprivileged 3118 * V = 0 -> non-vector 3119 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit 3120 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 3121 */ 3122 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, 3123 int opc, 3124 int size, 3125 int rt, 3126 bool is_vector) 3127 { 3128 int rn = extract32(insn, 5, 5); 3129 int imm9 = sextract32(insn, 12, 9); 3130 int idx = extract32(insn, 10, 2); 3131 bool is_signed = false; 3132 bool is_store = false; 3133 bool is_extended = false; 3134 bool is_unpriv = (idx == 2); 3135 bool iss_valid; 3136 bool post_index; 3137 bool writeback; 3138 int memidx; 3139 3140 TCGv_i64 clean_addr, dirty_addr; 3141 3142 if (is_vector) { 3143 size |= (opc & 2) << 1; 3144 if (size > 4 || is_unpriv) { 3145 unallocated_encoding(s); 3146 return; 3147 } 3148 is_store = ((opc & 1) == 0); 3149 if (!fp_access_check(s)) { 3150 return; 3151 } 3152 } else { 3153 if (size == 3 && opc == 2) { 3154 /* PRFM - prefetch */ 3155 if (idx != 0) { 3156 unallocated_encoding(s); 3157 return; 3158 } 3159 return; 3160 } 3161 if (opc == 3 && size > 1) { 3162 unallocated_encoding(s); 3163 return; 3164 } 3165 is_store = (opc == 0); 3166 is_signed = extract32(opc, 1, 1); 3167 is_extended = (size < 3) && extract32(opc, 0, 1); 3168 } 3169 3170 switch (idx) { 3171 case 0: 3172 case 2: 3173 post_index = false; 3174 writeback = false; 3175 break; 3176 case 1: 3177 post_index = true; 3178 writeback = true; 3179 break; 3180 case 3: 3181 post_index = false; 3182 writeback = true; 3183 break; 3184 default: 3185 g_assert_not_reached(); 3186 } 3187 3188 iss_valid = !is_vector && !writeback; 3189 3190 if (rn == 31) { 3191 gen_check_sp_alignment(s); 3192 } 3193 3194 dirty_addr = read_cpu_reg_sp(s, rn, 1); 3195 if (!post_index) { 3196 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9); 3197 } 3198 3199 memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s); 3200 clean_addr = gen_mte_check1_mmuidx(s, dirty_addr, is_store, 3201 writeback || rn != 31, 3202 size, is_unpriv, memidx); 3203 3204 if (is_vector) { 3205 if (is_store) { 3206 do_fp_st(s, rt, clean_addr, size); 3207 } else { 3208 do_fp_ld(s, rt, clean_addr, size); 3209 } 3210 } else { 3211 TCGv_i64 tcg_rt = cpu_reg(s, rt); 3212 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); 3213 3214 if (is_store) { 3215 do_gpr_st_memidx(s, tcg_rt, clean_addr, size, memidx, 3216 iss_valid, rt, iss_sf, false); 3217 } else { 3218 do_gpr_ld_memidx(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, 3219 is_extended, memidx, 3220 iss_valid, rt, iss_sf, false); 3221 } 3222 } 3223 3224 if (writeback) { 3225 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); 3226 if (post_index) { 3227 tcg_gen_addi_i64(dirty_addr, dirty_addr, imm9); 3228 } 3229 tcg_gen_mov_i64(tcg_rn, dirty_addr); 3230 } 3231 } 3232 3233 /* 3234 * Load/store (register offset) 3235 * 3236 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0 3237 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+ 3238 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt | 3239 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+ 3240 * 3241 * For non-vector: 3242 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit 3243 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 3244 * For vector: 3245 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated 3246 * opc<0>: 0 -> store, 1 -> load 3247 * V: 1 -> vector/simd 3248 * opt: extend encoding (see DecodeRegExtend) 3249 * S: if S=1 then scale (essentially index by sizeof(size)) 3250 * Rt: register to transfer into/out of 3251 * Rn: address register or SP for base 3252 * Rm: offset register or ZR for offset 3253 */ 3254 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn, 3255 int opc, 3256 int size, 3257 int rt, 3258 bool is_vector) 3259 { 3260 int rn = extract32(insn, 5, 5); 3261 int shift = extract32(insn, 12, 1); 3262 int rm = extract32(insn, 16, 5); 3263 int opt = extract32(insn, 13, 3); 3264 bool is_signed = false; 3265 bool is_store = false; 3266 bool is_extended = false; 3267 3268 TCGv_i64 tcg_rm, clean_addr, dirty_addr; 3269 3270 if (extract32(opt, 1, 1) == 0) { 3271 unallocated_encoding(s); 3272 return; 3273 } 3274 3275 if (is_vector) { 3276 size |= (opc & 2) << 1; 3277 if (size > 4) { 3278 unallocated_encoding(s); 3279 return; 3280 } 3281 is_store = !extract32(opc, 0, 1); 3282 if (!fp_access_check(s)) { 3283 return; 3284 } 3285 } else { 3286 if (size == 3 && opc == 2) { 3287 /* PRFM - prefetch */ 3288 return; 3289 } 3290 if (opc == 3 && size > 1) { 3291 unallocated_encoding(s); 3292 return; 3293 } 3294 is_store = (opc == 0); 3295 is_signed = extract32(opc, 1, 1); 3296 is_extended = (size < 3) && extract32(opc, 0, 1); 3297 } 3298 3299 if (rn == 31) { 3300 gen_check_sp_alignment(s); 3301 } 3302 dirty_addr = read_cpu_reg_sp(s, rn, 1); 3303 3304 tcg_rm = read_cpu_reg(s, rm, 1); 3305 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0); 3306 3307 tcg_gen_add_i64(dirty_addr, dirty_addr, tcg_rm); 3308 clean_addr = gen_mte_check1(s, dirty_addr, is_store, true, size); 3309 3310 if (is_vector) { 3311 if (is_store) { 3312 do_fp_st(s, rt, clean_addr, size); 3313 } else { 3314 do_fp_ld(s, rt, clean_addr, size); 3315 } 3316 } else { 3317 TCGv_i64 tcg_rt = cpu_reg(s, rt); 3318 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); 3319 if (is_store) { 3320 do_gpr_st(s, tcg_rt, clean_addr, size, 3321 true, rt, iss_sf, false); 3322 } else { 3323 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, 3324 is_extended, true, rt, iss_sf, false); 3325 } 3326 } 3327 } 3328 3329 /* 3330 * Load/store (unsigned immediate) 3331 * 3332 * 31 30 29 27 26 25 24 23 22 21 10 9 5 3333 * +----+-------+---+-----+-----+------------+-------+------+ 3334 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt | 3335 * +----+-------+---+-----+-----+------------+-------+------+ 3336 * 3337 * For non-vector: 3338 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit 3339 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32 3340 * For vector: 3341 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated 3342 * opc<0>: 0 -> store, 1 -> load 3343 * Rn: base address register (inc SP) 3344 * Rt: target register 3345 */ 3346 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn, 3347 int opc, 3348 int size, 3349 int rt, 3350 bool is_vector) 3351 { 3352 int rn = extract32(insn, 5, 5); 3353 unsigned int imm12 = extract32(insn, 10, 12); 3354 unsigned int offset; 3355 3356 TCGv_i64 clean_addr, dirty_addr; 3357 3358 bool is_store; 3359 bool is_signed = false; 3360 bool is_extended = false; 3361 3362 if (is_vector) { 3363 size |= (opc & 2) << 1; 3364 if (size > 4) { 3365 unallocated_encoding(s); 3366 return; 3367 } 3368 is_store = !extract32(opc, 0, 1); 3369 if (!fp_access_check(s)) { 3370 return; 3371 } 3372 } else { 3373 if (size == 3 && opc == 2) { 3374 /* PRFM - prefetch */ 3375 return; 3376 } 3377 if (opc == 3 && size > 1) { 3378 unallocated_encoding(s); 3379 return; 3380 } 3381 is_store = (opc == 0); 3382 is_signed = extract32(opc, 1, 1); 3383 is_extended = (size < 3) && extract32(opc, 0, 1); 3384 } 3385 3386 if (rn == 31) { 3387 gen_check_sp_alignment(s); 3388 } 3389 dirty_addr = read_cpu_reg_sp(s, rn, 1); 3390 offset = imm12 << size; 3391 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); 3392 clean_addr = gen_mte_check1(s, dirty_addr, is_store, rn != 31, size); 3393 3394 if (is_vector) { 3395 if (is_store) { 3396 do_fp_st(s, rt, clean_addr, size); 3397 } else { 3398 do_fp_ld(s, rt, clean_addr, size); 3399 } 3400 } else { 3401 TCGv_i64 tcg_rt = cpu_reg(s, rt); 3402 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc); 3403 if (is_store) { 3404 do_gpr_st(s, tcg_rt, clean_addr, size, 3405 true, rt, iss_sf, false); 3406 } else { 3407 do_gpr_ld(s, tcg_rt, clean_addr, size + is_signed * MO_SIGN, 3408 is_extended, true, rt, iss_sf, false); 3409 } 3410 } 3411 } 3412 3413 /* Atomic memory operations 3414 * 3415 * 31 30 27 26 24 22 21 16 15 12 10 5 0 3416 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+ 3417 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt | 3418 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+ 3419 * 3420 * Rt: the result register 3421 * Rn: base address or SP 3422 * Rs: the source register for the operation 3423 * V: vector flag (always 0 as of v8.3) 3424 * A: acquire flag 3425 * R: release flag 3426 */ 3427 static void disas_ldst_atomic(DisasContext *s, uint32_t insn, 3428 int size, int rt, bool is_vector) 3429 { 3430 int rs = extract32(insn, 16, 5); 3431 int rn = extract32(insn, 5, 5); 3432 int o3_opc = extract32(insn, 12, 4); 3433 bool r = extract32(insn, 22, 1); 3434 bool a = extract32(insn, 23, 1); 3435 TCGv_i64 tcg_rs, tcg_rt, clean_addr; 3436 AtomicThreeOpFn *fn = NULL; 3437 MemOp mop = s->be_data | size | MO_ALIGN; 3438 3439 if (is_vector || !dc_isar_feature(aa64_atomics, s)) { 3440 unallocated_encoding(s); 3441 return; 3442 } 3443 switch (o3_opc) { 3444 case 000: /* LDADD */ 3445 fn = tcg_gen_atomic_fetch_add_i64; 3446 break; 3447 case 001: /* LDCLR */ 3448 fn = tcg_gen_atomic_fetch_and_i64; 3449 break; 3450 case 002: /* LDEOR */ 3451 fn = tcg_gen_atomic_fetch_xor_i64; 3452 break; 3453 case 003: /* LDSET */ 3454 fn = tcg_gen_atomic_fetch_or_i64; 3455 break; 3456 case 004: /* LDSMAX */ 3457 fn = tcg_gen_atomic_fetch_smax_i64; 3458 mop |= MO_SIGN; 3459 break; 3460 case 005: /* LDSMIN */ 3461 fn = tcg_gen_atomic_fetch_smin_i64; 3462 mop |= MO_SIGN; 3463 break; 3464 case 006: /* LDUMAX */ 3465 fn = tcg_gen_atomic_fetch_umax_i64; 3466 break; 3467 case 007: /* LDUMIN */ 3468 fn = tcg_gen_atomic_fetch_umin_i64; 3469 break; 3470 case 010: /* SWP */ 3471 fn = tcg_gen_atomic_xchg_i64; 3472 break; 3473 case 014: /* LDAPR, LDAPRH, LDAPRB */ 3474 if (!dc_isar_feature(aa64_rcpc_8_3, s) || 3475 rs != 31 || a != 1 || r != 0) { 3476 unallocated_encoding(s); 3477 return; 3478 } 3479 break; 3480 default: 3481 unallocated_encoding(s); 3482 return; 3483 } 3484 3485 if (rn == 31) { 3486 gen_check_sp_alignment(s); 3487 } 3488 clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), false, rn != 31, size); 3489 3490 if (o3_opc == 014) { 3491 /* 3492 * LDAPR* are a special case because they are a simple load, not a 3493 * fetch-and-do-something op. 3494 * The architectural consistency requirements here are weaker than 3495 * full load-acquire (we only need "load-acquire processor consistent"), 3496 * but we choose to implement them as full LDAQ. 3497 */ 3498 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, 3499 true, rt, disas_ldst_compute_iss_sf(size, false, 0), true); 3500 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 3501 return; 3502 } 3503 3504 tcg_rs = read_cpu_reg(s, rs, true); 3505 tcg_rt = cpu_reg(s, rt); 3506 3507 if (o3_opc == 1) { /* LDCLR */ 3508 tcg_gen_not_i64(tcg_rs, tcg_rs); 3509 } 3510 3511 /* The tcg atomic primitives are all full barriers. Therefore we 3512 * can ignore the Acquire and Release bits of this instruction. 3513 */ 3514 fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop); 3515 3516 if ((mop & MO_SIGN) && size != MO_64) { 3517 tcg_gen_ext32u_i64(tcg_rt, tcg_rt); 3518 } 3519 } 3520 3521 /* 3522 * PAC memory operations 3523 * 3524 * 31 30 27 26 24 22 21 12 11 10 5 0 3525 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+ 3526 * | size | 1 1 1 | V | 0 0 | M S | 1 | imm9 | W | 1 | Rn | Rt | 3527 * +------+-------+---+-----+-----+---+--------+---+---+----+-----+ 3528 * 3529 * Rt: the result register 3530 * Rn: base address or SP 3531 * V: vector flag (always 0 as of v8.3) 3532 * M: clear for key DA, set for key DB 3533 * W: pre-indexing flag 3534 * S: sign for imm9. 3535 */ 3536 static void disas_ldst_pac(DisasContext *s, uint32_t insn, 3537 int size, int rt, bool is_vector) 3538 { 3539 int rn = extract32(insn, 5, 5); 3540 bool is_wback = extract32(insn, 11, 1); 3541 bool use_key_a = !extract32(insn, 23, 1); 3542 int offset; 3543 TCGv_i64 clean_addr, dirty_addr, tcg_rt; 3544 3545 if (size != 3 || is_vector || !dc_isar_feature(aa64_pauth, s)) { 3546 unallocated_encoding(s); 3547 return; 3548 } 3549 3550 if (rn == 31) { 3551 gen_check_sp_alignment(s); 3552 } 3553 dirty_addr = read_cpu_reg_sp(s, rn, 1); 3554 3555 if (s->pauth_active) { 3556 if (use_key_a) { 3557 gen_helper_autda(dirty_addr, cpu_env, dirty_addr, 3558 new_tmp_a64_zero(s)); 3559 } else { 3560 gen_helper_autdb(dirty_addr, cpu_env, dirty_addr, 3561 new_tmp_a64_zero(s)); 3562 } 3563 } 3564 3565 /* Form the 10-bit signed, scaled offset. */ 3566 offset = (extract32(insn, 22, 1) << 9) | extract32(insn, 12, 9); 3567 offset = sextract32(offset << size, 0, 10 + size); 3568 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); 3569 3570 /* Note that "clean" and "dirty" here refer to TBI not PAC. */ 3571 clean_addr = gen_mte_check1(s, dirty_addr, false, 3572 is_wback || rn != 31, size); 3573 3574 tcg_rt = cpu_reg(s, rt); 3575 do_gpr_ld(s, tcg_rt, clean_addr, size, 3576 /* extend */ false, /* iss_valid */ !is_wback, 3577 /* iss_srt */ rt, /* iss_sf */ true, /* iss_ar */ false); 3578 3579 if (is_wback) { 3580 tcg_gen_mov_i64(cpu_reg_sp(s, rn), dirty_addr); 3581 } 3582 } 3583 3584 /* 3585 * LDAPR/STLR (unscaled immediate) 3586 * 3587 * 31 30 24 22 21 12 10 5 0 3588 * +------+-------------+-----+---+--------+-----+----+-----+ 3589 * | size | 0 1 1 0 0 1 | opc | 0 | imm9 | 0 0 | Rn | Rt | 3590 * +------+-------------+-----+---+--------+-----+----+-----+ 3591 * 3592 * Rt: source or destination register 3593 * Rn: base register 3594 * imm9: unscaled immediate offset 3595 * opc: 00: STLUR*, 01/10/11: various LDAPUR* 3596 * size: size of load/store 3597 */ 3598 static void disas_ldst_ldapr_stlr(DisasContext *s, uint32_t insn) 3599 { 3600 int rt = extract32(insn, 0, 5); 3601 int rn = extract32(insn, 5, 5); 3602 int offset = sextract32(insn, 12, 9); 3603 int opc = extract32(insn, 22, 2); 3604 int size = extract32(insn, 30, 2); 3605 TCGv_i64 clean_addr, dirty_addr; 3606 bool is_store = false; 3607 bool extend = false; 3608 bool iss_sf; 3609 MemOp mop; 3610 3611 if (!dc_isar_feature(aa64_rcpc_8_4, s)) { 3612 unallocated_encoding(s); 3613 return; 3614 } 3615 3616 /* TODO: ARMv8.4-LSE SCTLR.nAA */ 3617 mop = size | MO_ALIGN; 3618 3619 switch (opc) { 3620 case 0: /* STLURB */ 3621 is_store = true; 3622 break; 3623 case 1: /* LDAPUR* */ 3624 break; 3625 case 2: /* LDAPURS* 64-bit variant */ 3626 if (size == 3) { 3627 unallocated_encoding(s); 3628 return; 3629 } 3630 mop |= MO_SIGN; 3631 break; 3632 case 3: /* LDAPURS* 32-bit variant */ 3633 if (size > 1) { 3634 unallocated_encoding(s); 3635 return; 3636 } 3637 mop |= MO_SIGN; 3638 extend = true; /* zero-extend 32->64 after signed load */ 3639 break; 3640 default: 3641 g_assert_not_reached(); 3642 } 3643 3644 iss_sf = disas_ldst_compute_iss_sf(size, (mop & MO_SIGN) != 0, opc); 3645 3646 if (rn == 31) { 3647 gen_check_sp_alignment(s); 3648 } 3649 3650 dirty_addr = read_cpu_reg_sp(s, rn, 1); 3651 tcg_gen_addi_i64(dirty_addr, dirty_addr, offset); 3652 clean_addr = clean_data_tbi(s, dirty_addr); 3653 3654 if (is_store) { 3655 /* Store-Release semantics */ 3656 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 3657 do_gpr_st(s, cpu_reg(s, rt), clean_addr, mop, true, rt, iss_sf, true); 3658 } else { 3659 /* 3660 * Load-AcquirePC semantics; we implement as the slightly more 3661 * restrictive Load-Acquire. 3662 */ 3663 do_gpr_ld(s, cpu_reg(s, rt), clean_addr, mop, 3664 extend, true, rt, iss_sf, true); 3665 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); 3666 } 3667 } 3668 3669 /* Load/store register (all forms) */ 3670 static void disas_ldst_reg(DisasContext *s, uint32_t insn) 3671 { 3672 int rt = extract32(insn, 0, 5); 3673 int opc = extract32(insn, 22, 2); 3674 bool is_vector = extract32(insn, 26, 1); 3675 int size = extract32(insn, 30, 2); 3676 3677 switch (extract32(insn, 24, 2)) { 3678 case 0: 3679 if (extract32(insn, 21, 1) == 0) { 3680 /* Load/store register (unscaled immediate) 3681 * Load/store immediate pre/post-indexed 3682 * Load/store register unprivileged 3683 */ 3684 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector); 3685 return; 3686 } 3687 switch (extract32(insn, 10, 2)) { 3688 case 0: 3689 disas_ldst_atomic(s, insn, size, rt, is_vector); 3690 return; 3691 case 2: 3692 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector); 3693 return; 3694 default: 3695 disas_ldst_pac(s, insn, size, rt, is_vector); 3696 return; 3697 } 3698 break; 3699 case 1: 3700 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector); 3701 return; 3702 } 3703 unallocated_encoding(s); 3704 } 3705 3706 /* AdvSIMD load/store multiple structures 3707 * 3708 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0 3709 * +---+---+---------------+---+-------------+--------+------+------+------+ 3710 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt | 3711 * +---+---+---------------+---+-------------+--------+------+------+------+ 3712 * 3713 * AdvSIMD load/store multiple structures (post-indexed) 3714 * 3715 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0 3716 * +---+---+---------------+---+---+---------+--------+------+------+------+ 3717 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt | 3718 * +---+---+---------------+---+---+---------+--------+------+------+------+ 3719 * 3720 * Rt: first (or only) SIMD&FP register to be transferred 3721 * Rn: base address or SP 3722 * Rm (post-index only): post-index register (when !31) or size dependent #imm 3723 */ 3724 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) 3725 { 3726 int rt = extract32(insn, 0, 5); 3727 int rn = extract32(insn, 5, 5); 3728 int rm = extract32(insn, 16, 5); 3729 int size = extract32(insn, 10, 2); 3730 int opcode = extract32(insn, 12, 4); 3731 bool is_store = !extract32(insn, 22, 1); 3732 bool is_postidx = extract32(insn, 23, 1); 3733 bool is_q = extract32(insn, 30, 1); 3734 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; 3735 MemOp endian, align, mop; 3736 3737 int total; /* total bytes */ 3738 int elements; /* elements per vector */ 3739 int rpt; /* num iterations */ 3740 int selem; /* structure elements */ 3741 int r; 3742 3743 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) { 3744 unallocated_encoding(s); 3745 return; 3746 } 3747 3748 if (!is_postidx && rm != 0) { 3749 unallocated_encoding(s); 3750 return; 3751 } 3752 3753 /* From the shared decode logic */ 3754 switch (opcode) { 3755 case 0x0: 3756 rpt = 1; 3757 selem = 4; 3758 break; 3759 case 0x2: 3760 rpt = 4; 3761 selem = 1; 3762 break; 3763 case 0x4: 3764 rpt = 1; 3765 selem = 3; 3766 break; 3767 case 0x6: 3768 rpt = 3; 3769 selem = 1; 3770 break; 3771 case 0x7: 3772 rpt = 1; 3773 selem = 1; 3774 break; 3775 case 0x8: 3776 rpt = 1; 3777 selem = 2; 3778 break; 3779 case 0xa: 3780 rpt = 2; 3781 selem = 1; 3782 break; 3783 default: 3784 unallocated_encoding(s); 3785 return; 3786 } 3787 3788 if (size == 3 && !is_q && selem != 1) { 3789 /* reserved */ 3790 unallocated_encoding(s); 3791 return; 3792 } 3793 3794 if (!fp_access_check(s)) { 3795 return; 3796 } 3797 3798 if (rn == 31) { 3799 gen_check_sp_alignment(s); 3800 } 3801 3802 /* For our purposes, bytes are always little-endian. */ 3803 endian = s->be_data; 3804 if (size == 0) { 3805 endian = MO_LE; 3806 } 3807 3808 total = rpt * selem * (is_q ? 16 : 8); 3809 tcg_rn = cpu_reg_sp(s, rn); 3810 3811 /* 3812 * Issue the MTE check vs the logical repeat count, before we 3813 * promote consecutive little-endian elements below. 3814 */ 3815 clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31, 3816 total); 3817 3818 /* 3819 * Consecutive little-endian elements from a single register 3820 * can be promoted to a larger little-endian operation. 3821 */ 3822 align = MO_ALIGN; 3823 if (selem == 1 && endian == MO_LE) { 3824 align = pow2_align(size); 3825 size = 3; 3826 } 3827 if (!s->align_mem) { 3828 align = 0; 3829 } 3830 mop = endian | size | align; 3831 3832 elements = (is_q ? 16 : 8) >> size; 3833 tcg_ebytes = tcg_constant_i64(1 << size); 3834 for (r = 0; r < rpt; r++) { 3835 int e; 3836 for (e = 0; e < elements; e++) { 3837 int xs; 3838 for (xs = 0; xs < selem; xs++) { 3839 int tt = (rt + r + xs) % 32; 3840 if (is_store) { 3841 do_vec_st(s, tt, e, clean_addr, mop); 3842 } else { 3843 do_vec_ld(s, tt, e, clean_addr, mop); 3844 } 3845 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); 3846 } 3847 } 3848 } 3849 3850 if (!is_store) { 3851 /* For non-quad operations, setting a slice of the low 3852 * 64 bits of the register clears the high 64 bits (in 3853 * the ARM ARM pseudocode this is implicit in the fact 3854 * that 'rval' is a 64 bit wide variable). 3855 * For quad operations, we might still need to zero the 3856 * high bits of SVE. 3857 */ 3858 for (r = 0; r < rpt * selem; r++) { 3859 int tt = (rt + r) % 32; 3860 clear_vec_high(s, is_q, tt); 3861 } 3862 } 3863 3864 if (is_postidx) { 3865 if (rm == 31) { 3866 tcg_gen_addi_i64(tcg_rn, tcg_rn, total); 3867 } else { 3868 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm)); 3869 } 3870 } 3871 } 3872 3873 /* AdvSIMD load/store single structure 3874 * 3875 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0 3876 * +---+---+---------------+-----+-----------+-----+---+------+------+------+ 3877 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt | 3878 * +---+---+---------------+-----+-----------+-----+---+------+------+------+ 3879 * 3880 * AdvSIMD load/store single structure (post-indexed) 3881 * 3882 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0 3883 * +---+---+---------------+-----+-----------+-----+---+------+------+------+ 3884 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt | 3885 * +---+---+---------------+-----+-----------+-----+---+------+------+------+ 3886 * 3887 * Rt: first (or only) SIMD&FP register to be transferred 3888 * Rn: base address or SP 3889 * Rm (post-index only): post-index register (when !31) or size dependent #imm 3890 * index = encoded in Q:S:size dependent on size 3891 * 3892 * lane_size = encoded in R, opc 3893 * transfer width = encoded in opc, S, size 3894 */ 3895 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) 3896 { 3897 int rt = extract32(insn, 0, 5); 3898 int rn = extract32(insn, 5, 5); 3899 int rm = extract32(insn, 16, 5); 3900 int size = extract32(insn, 10, 2); 3901 int S = extract32(insn, 12, 1); 3902 int opc = extract32(insn, 13, 3); 3903 int R = extract32(insn, 21, 1); 3904 int is_load = extract32(insn, 22, 1); 3905 int is_postidx = extract32(insn, 23, 1); 3906 int is_q = extract32(insn, 30, 1); 3907 3908 int scale = extract32(opc, 1, 2); 3909 int selem = (extract32(opc, 0, 1) << 1 | R) + 1; 3910 bool replicate = false; 3911 int index = is_q << 3 | S << 2 | size; 3912 int xs, total; 3913 TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; 3914 MemOp mop; 3915 3916 if (extract32(insn, 31, 1)) { 3917 unallocated_encoding(s); 3918 return; 3919 } 3920 if (!is_postidx && rm != 0) { 3921 unallocated_encoding(s); 3922 return; 3923 } 3924 3925 switch (scale) { 3926 case 3: 3927 if (!is_load || S) { 3928 unallocated_encoding(s); 3929 return; 3930 } 3931 scale = size; 3932 replicate = true; 3933 break; 3934 case 0: 3935 break; 3936 case 1: 3937 if (extract32(size, 0, 1)) { 3938 unallocated_encoding(s); 3939 return; 3940 } 3941 index >>= 1; 3942 break; 3943 case 2: 3944 if (extract32(size, 1, 1)) { 3945 unallocated_encoding(s); 3946 return; 3947 } 3948 if (!extract32(size, 0, 1)) { 3949 index >>= 2; 3950 } else { 3951 if (S) { 3952 unallocated_encoding(s); 3953 return; 3954 } 3955 index >>= 3; 3956 scale = 3; 3957 } 3958 break; 3959 default: 3960 g_assert_not_reached(); 3961 } 3962 3963 if (!fp_access_check(s)) { 3964 return; 3965 } 3966 3967 if (rn == 31) { 3968 gen_check_sp_alignment(s); 3969 } 3970 3971 total = selem << scale; 3972 tcg_rn = cpu_reg_sp(s, rn); 3973 3974 clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31, 3975 total); 3976 mop = finalize_memop(s, scale); 3977 3978 tcg_ebytes = tcg_constant_i64(1 << scale); 3979 for (xs = 0; xs < selem; xs++) { 3980 if (replicate) { 3981 /* Load and replicate to all elements */ 3982 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 3983 3984 tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop); 3985 tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt), 3986 (is_q + 1) * 8, vec_full_reg_size(s), 3987 tcg_tmp); 3988 tcg_temp_free_i64(tcg_tmp); 3989 } else { 3990 /* Load/store one element per register */ 3991 if (is_load) { 3992 do_vec_ld(s, rt, index, clean_addr, mop); 3993 } else { 3994 do_vec_st(s, rt, index, clean_addr, mop); 3995 } 3996 } 3997 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes); 3998 rt = (rt + 1) % 32; 3999 } 4000 4001 if (is_postidx) { 4002 if (rm == 31) { 4003 tcg_gen_addi_i64(tcg_rn, tcg_rn, total); 4004 } else { 4005 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm)); 4006 } 4007 } 4008 } 4009 4010 /* 4011 * Load/Store memory tags 4012 * 4013 * 31 30 29 24 22 21 12 10 5 0 4014 * +-----+-------------+-----+---+------+-----+------+------+ 4015 * | 1 1 | 0 1 1 0 0 1 | op1 | 1 | imm9 | op2 | Rn | Rt | 4016 * +-----+-------------+-----+---+------+-----+------+------+ 4017 */ 4018 static void disas_ldst_tag(DisasContext *s, uint32_t insn) 4019 { 4020 int rt = extract32(insn, 0, 5); 4021 int rn = extract32(insn, 5, 5); 4022 uint64_t offset = sextract64(insn, 12, 9) << LOG2_TAG_GRANULE; 4023 int op2 = extract32(insn, 10, 2); 4024 int op1 = extract32(insn, 22, 2); 4025 bool is_load = false, is_pair = false, is_zero = false, is_mult = false; 4026 int index = 0; 4027 TCGv_i64 addr, clean_addr, tcg_rt; 4028 4029 /* We checked insn bits [29:24,21] in the caller. */ 4030 if (extract32(insn, 30, 2) != 3) { 4031 goto do_unallocated; 4032 } 4033 4034 /* 4035 * @index is a tri-state variable which has 3 states: 4036 * < 0 : post-index, writeback 4037 * = 0 : signed offset 4038 * > 0 : pre-index, writeback 4039 */ 4040 switch (op1) { 4041 case 0: 4042 if (op2 != 0) { 4043 /* STG */ 4044 index = op2 - 2; 4045 } else { 4046 /* STZGM */ 4047 if (s->current_el == 0 || offset != 0) { 4048 goto do_unallocated; 4049 } 4050 is_mult = is_zero = true; 4051 } 4052 break; 4053 case 1: 4054 if (op2 != 0) { 4055 /* STZG */ 4056 is_zero = true; 4057 index = op2 - 2; 4058 } else { 4059 /* LDG */ 4060 is_load = true; 4061 } 4062 break; 4063 case 2: 4064 if (op2 != 0) { 4065 /* ST2G */ 4066 is_pair = true; 4067 index = op2 - 2; 4068 } else { 4069 /* STGM */ 4070 if (s->current_el == 0 || offset != 0) { 4071 goto do_unallocated; 4072 } 4073 is_mult = true; 4074 } 4075 break; 4076 case 3: 4077 if (op2 != 0) { 4078 /* STZ2G */ 4079 is_pair = is_zero = true; 4080 index = op2 - 2; 4081 } else { 4082 /* LDGM */ 4083 if (s->current_el == 0 || offset != 0) { 4084 goto do_unallocated; 4085 } 4086 is_mult = is_load = true; 4087 } 4088 break; 4089 4090 default: 4091 do_unallocated: 4092 unallocated_encoding(s); 4093 return; 4094 } 4095 4096 if (is_mult 4097 ? !dc_isar_feature(aa64_mte, s) 4098 : !dc_isar_feature(aa64_mte_insn_reg, s)) { 4099 goto do_unallocated; 4100 } 4101 4102 if (rn == 31) { 4103 gen_check_sp_alignment(s); 4104 } 4105 4106 addr = read_cpu_reg_sp(s, rn, true); 4107 if (index >= 0) { 4108 /* pre-index or signed offset */ 4109 tcg_gen_addi_i64(addr, addr, offset); 4110 } 4111 4112 if (is_mult) { 4113 tcg_rt = cpu_reg(s, rt); 4114 4115 if (is_zero) { 4116 int size = 4 << s->dcz_blocksize; 4117 4118 if (s->ata) { 4119 gen_helper_stzgm_tags(cpu_env, addr, tcg_rt); 4120 } 4121 /* 4122 * The non-tags portion of STZGM is mostly like DC_ZVA, 4123 * except the alignment happens before the access. 4124 */ 4125 clean_addr = clean_data_tbi(s, addr); 4126 tcg_gen_andi_i64(clean_addr, clean_addr, -size); 4127 gen_helper_dc_zva(cpu_env, clean_addr); 4128 } else if (s->ata) { 4129 if (is_load) { 4130 gen_helper_ldgm(tcg_rt, cpu_env, addr); 4131 } else { 4132 gen_helper_stgm(cpu_env, addr, tcg_rt); 4133 } 4134 } else { 4135 MMUAccessType acc = is_load ? MMU_DATA_LOAD : MMU_DATA_STORE; 4136 int size = 4 << GMID_EL1_BS; 4137 4138 clean_addr = clean_data_tbi(s, addr); 4139 tcg_gen_andi_i64(clean_addr, clean_addr, -size); 4140 gen_probe_access(s, clean_addr, acc, size); 4141 4142 if (is_load) { 4143 /* The result tags are zeros. */ 4144 tcg_gen_movi_i64(tcg_rt, 0); 4145 } 4146 } 4147 return; 4148 } 4149 4150 if (is_load) { 4151 tcg_gen_andi_i64(addr, addr, -TAG_GRANULE); 4152 tcg_rt = cpu_reg(s, rt); 4153 if (s->ata) { 4154 gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt); 4155 } else { 4156 clean_addr = clean_data_tbi(s, addr); 4157 gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8); 4158 gen_address_with_allocation_tag0(tcg_rt, addr); 4159 } 4160 } else { 4161 tcg_rt = cpu_reg_sp(s, rt); 4162 if (!s->ata) { 4163 /* 4164 * For STG and ST2G, we need to check alignment and probe memory. 4165 * TODO: For STZG and STZ2G, we could rely on the stores below, 4166 * at least for system mode; user-only won't enforce alignment. 4167 */ 4168 if (is_pair) { 4169 gen_helper_st2g_stub(cpu_env, addr); 4170 } else { 4171 gen_helper_stg_stub(cpu_env, addr); 4172 } 4173 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { 4174 if (is_pair) { 4175 gen_helper_st2g_parallel(cpu_env, addr, tcg_rt); 4176 } else { 4177 gen_helper_stg_parallel(cpu_env, addr, tcg_rt); 4178 } 4179 } else { 4180 if (is_pair) { 4181 gen_helper_st2g(cpu_env, addr, tcg_rt); 4182 } else { 4183 gen_helper_stg(cpu_env, addr, tcg_rt); 4184 } 4185 } 4186 } 4187 4188 if (is_zero) { 4189 TCGv_i64 clean_addr = clean_data_tbi(s, addr); 4190 TCGv_i64 tcg_zero = tcg_constant_i64(0); 4191 int mem_index = get_mem_index(s); 4192 int i, n = (1 + is_pair) << LOG2_TAG_GRANULE; 4193 4194 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, 4195 MO_UQ | MO_ALIGN_16); 4196 for (i = 8; i < n; i += 8) { 4197 tcg_gen_addi_i64(clean_addr, clean_addr, 8); 4198 tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ); 4199 } 4200 } 4201 4202 if (index != 0) { 4203 /* pre-index or post-index */ 4204 if (index < 0) { 4205 /* post-index */ 4206 tcg_gen_addi_i64(addr, addr, offset); 4207 } 4208 tcg_gen_mov_i64(cpu_reg_sp(s, rn), addr); 4209 } 4210 } 4211 4212 /* Loads and stores */ 4213 static void disas_ldst(DisasContext *s, uint32_t insn) 4214 { 4215 switch (extract32(insn, 24, 6)) { 4216 case 0x08: /* Load/store exclusive */ 4217 disas_ldst_excl(s, insn); 4218 break; 4219 case 0x18: case 0x1c: /* Load register (literal) */ 4220 disas_ld_lit(s, insn); 4221 break; 4222 case 0x28: case 0x29: 4223 case 0x2c: case 0x2d: /* Load/store pair (all forms) */ 4224 disas_ldst_pair(s, insn); 4225 break; 4226 case 0x38: case 0x39: 4227 case 0x3c: case 0x3d: /* Load/store register (all forms) */ 4228 disas_ldst_reg(s, insn); 4229 break; 4230 case 0x0c: /* AdvSIMD load/store multiple structures */ 4231 disas_ldst_multiple_struct(s, insn); 4232 break; 4233 case 0x0d: /* AdvSIMD load/store single structure */ 4234 disas_ldst_single_struct(s, insn); 4235 break; 4236 case 0x19: 4237 if (extract32(insn, 21, 1) != 0) { 4238 disas_ldst_tag(s, insn); 4239 } else if (extract32(insn, 10, 2) == 0) { 4240 disas_ldst_ldapr_stlr(s, insn); 4241 } else { 4242 unallocated_encoding(s); 4243 } 4244 break; 4245 default: 4246 unallocated_encoding(s); 4247 break; 4248 } 4249 } 4250 4251 /* PC-rel. addressing 4252 * 31 30 29 28 24 23 5 4 0 4253 * +----+-------+-----------+-------------------+------+ 4254 * | op | immlo | 1 0 0 0 0 | immhi | Rd | 4255 * +----+-------+-----------+-------------------+------+ 4256 */ 4257 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn) 4258 { 4259 unsigned int page, rd; 4260 int64_t offset; 4261 4262 page = extract32(insn, 31, 1); 4263 /* SignExtend(immhi:immlo) -> offset */ 4264 offset = sextract64(insn, 5, 19); 4265 offset = offset << 2 | extract32(insn, 29, 2); 4266 rd = extract32(insn, 0, 5); 4267 4268 if (page) { 4269 /* ADRP (page based) */ 4270 offset <<= 12; 4271 /* The page offset is ok for CF_PCREL. */ 4272 offset -= s->pc_curr & 0xfff; 4273 } 4274 4275 gen_pc_plus_diff(s, cpu_reg(s, rd), offset); 4276 } 4277 4278 /* 4279 * Add/subtract (immediate) 4280 * 4281 * 31 30 29 28 23 22 21 10 9 5 4 0 4282 * +--+--+--+-------------+--+-------------+-----+-----+ 4283 * |sf|op| S| 1 0 0 0 1 0 |sh| imm12 | Rn | Rd | 4284 * +--+--+--+-------------+--+-------------+-----+-----+ 4285 * 4286 * sf: 0 -> 32bit, 1 -> 64bit 4287 * op: 0 -> add , 1 -> sub 4288 * S: 1 -> set flags 4289 * sh: 1 -> LSL imm by 12 4290 */ 4291 static void disas_add_sub_imm(DisasContext *s, uint32_t insn) 4292 { 4293 int rd = extract32(insn, 0, 5); 4294 int rn = extract32(insn, 5, 5); 4295 uint64_t imm = extract32(insn, 10, 12); 4296 bool shift = extract32(insn, 22, 1); 4297 bool setflags = extract32(insn, 29, 1); 4298 bool sub_op = extract32(insn, 30, 1); 4299 bool is_64bit = extract32(insn, 31, 1); 4300 4301 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); 4302 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd); 4303 TCGv_i64 tcg_result; 4304 4305 if (shift) { 4306 imm <<= 12; 4307 } 4308 4309 tcg_result = tcg_temp_new_i64(); 4310 if (!setflags) { 4311 if (sub_op) { 4312 tcg_gen_subi_i64(tcg_result, tcg_rn, imm); 4313 } else { 4314 tcg_gen_addi_i64(tcg_result, tcg_rn, imm); 4315 } 4316 } else { 4317 TCGv_i64 tcg_imm = tcg_constant_i64(imm); 4318 if (sub_op) { 4319 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm); 4320 } else { 4321 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm); 4322 } 4323 } 4324 4325 if (is_64bit) { 4326 tcg_gen_mov_i64(tcg_rd, tcg_result); 4327 } else { 4328 tcg_gen_ext32u_i64(tcg_rd, tcg_result); 4329 } 4330 4331 tcg_temp_free_i64(tcg_result); 4332 } 4333 4334 /* 4335 * Add/subtract (immediate, with tags) 4336 * 4337 * 31 30 29 28 23 22 21 16 14 10 9 5 4 0 4338 * +--+--+--+-------------+--+---------+--+-------+-----+-----+ 4339 * |sf|op| S| 1 0 0 0 1 1 |o2| uimm6 |o3| uimm4 | Rn | Rd | 4340 * +--+--+--+-------------+--+---------+--+-------+-----+-----+ 4341 * 4342 * op: 0 -> add, 1 -> sub 4343 */ 4344 static void disas_add_sub_imm_with_tags(DisasContext *s, uint32_t insn) 4345 { 4346 int rd = extract32(insn, 0, 5); 4347 int rn = extract32(insn, 5, 5); 4348 int uimm4 = extract32(insn, 10, 4); 4349 int uimm6 = extract32(insn, 16, 6); 4350 bool sub_op = extract32(insn, 30, 1); 4351 TCGv_i64 tcg_rn, tcg_rd; 4352 int imm; 4353 4354 /* Test all of sf=1, S=0, o2=0, o3=0. */ 4355 if ((insn & 0xa040c000u) != 0x80000000u || 4356 !dc_isar_feature(aa64_mte_insn_reg, s)) { 4357 unallocated_encoding(s); 4358 return; 4359 } 4360 4361 imm = uimm6 << LOG2_TAG_GRANULE; 4362 if (sub_op) { 4363 imm = -imm; 4364 } 4365 4366 tcg_rn = cpu_reg_sp(s, rn); 4367 tcg_rd = cpu_reg_sp(s, rd); 4368 4369 if (s->ata) { 4370 gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn, 4371 tcg_constant_i32(imm), 4372 tcg_constant_i32(uimm4)); 4373 } else { 4374 tcg_gen_addi_i64(tcg_rd, tcg_rn, imm); 4375 gen_address_with_allocation_tag0(tcg_rd, tcg_rd); 4376 } 4377 } 4378 4379 /* The input should be a value in the bottom e bits (with higher 4380 * bits zero); returns that value replicated into every element 4381 * of size e in a 64 bit integer. 4382 */ 4383 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e) 4384 { 4385 assert(e != 0); 4386 while (e < 64) { 4387 mask |= mask << e; 4388 e *= 2; 4389 } 4390 return mask; 4391 } 4392 4393 /* Return a value with the bottom len bits set (where 0 < len <= 64) */ 4394 static inline uint64_t bitmask64(unsigned int length) 4395 { 4396 assert(length > 0 && length <= 64); 4397 return ~0ULL >> (64 - length); 4398 } 4399 4400 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we 4401 * only require the wmask. Returns false if the imms/immr/immn are a reserved 4402 * value (ie should cause a guest UNDEF exception), and true if they are 4403 * valid, in which case the decoded bit pattern is written to result. 4404 */ 4405 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, 4406 unsigned int imms, unsigned int immr) 4407 { 4408 uint64_t mask; 4409 unsigned e, levels, s, r; 4410 int len; 4411 4412 assert(immn < 2 && imms < 64 && immr < 64); 4413 4414 /* The bit patterns we create here are 64 bit patterns which 4415 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or 4416 * 64 bits each. Each element contains the same value: a run 4417 * of between 1 and e-1 non-zero bits, rotated within the 4418 * element by between 0 and e-1 bits. 4419 * 4420 * The element size and run length are encoded into immn (1 bit) 4421 * and imms (6 bits) as follows: 4422 * 64 bit elements: immn = 1, imms = <length of run - 1> 4423 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1> 4424 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1> 4425 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1> 4426 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1> 4427 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1> 4428 * Notice that immn = 0, imms = 11111x is the only combination 4429 * not covered by one of the above options; this is reserved. 4430 * Further, <length of run - 1> all-ones is a reserved pattern. 4431 * 4432 * In all cases the rotation is by immr % e (and immr is 6 bits). 4433 */ 4434 4435 /* First determine the element size */ 4436 len = 31 - clz32((immn << 6) | (~imms & 0x3f)); 4437 if (len < 1) { 4438 /* This is the immn == 0, imms == 0x11111x case */ 4439 return false; 4440 } 4441 e = 1 << len; 4442 4443 levels = e - 1; 4444 s = imms & levels; 4445 r = immr & levels; 4446 4447 if (s == levels) { 4448 /* <length of run - 1> mustn't be all-ones. */ 4449 return false; 4450 } 4451 4452 /* Create the value of one element: s+1 set bits rotated 4453 * by r within the element (which is e bits wide)... 4454 */ 4455 mask = bitmask64(s + 1); 4456 if (r) { 4457 mask = (mask >> r) | (mask << (e - r)); 4458 mask &= bitmask64(e); 4459 } 4460 /* ...then replicate the element over the whole 64 bit value */ 4461 mask = bitfield_replicate(mask, e); 4462 *result = mask; 4463 return true; 4464 } 4465 4466 /* Logical (immediate) 4467 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 4468 * +----+-----+-------------+---+------+------+------+------+ 4469 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd | 4470 * +----+-----+-------------+---+------+------+------+------+ 4471 */ 4472 static void disas_logic_imm(DisasContext *s, uint32_t insn) 4473 { 4474 unsigned int sf, opc, is_n, immr, imms, rn, rd; 4475 TCGv_i64 tcg_rd, tcg_rn; 4476 uint64_t wmask; 4477 bool is_and = false; 4478 4479 sf = extract32(insn, 31, 1); 4480 opc = extract32(insn, 29, 2); 4481 is_n = extract32(insn, 22, 1); 4482 immr = extract32(insn, 16, 6); 4483 imms = extract32(insn, 10, 6); 4484 rn = extract32(insn, 5, 5); 4485 rd = extract32(insn, 0, 5); 4486 4487 if (!sf && is_n) { 4488 unallocated_encoding(s); 4489 return; 4490 } 4491 4492 if (opc == 0x3) { /* ANDS */ 4493 tcg_rd = cpu_reg(s, rd); 4494 } else { 4495 tcg_rd = cpu_reg_sp(s, rd); 4496 } 4497 tcg_rn = cpu_reg(s, rn); 4498 4499 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) { 4500 /* some immediate field values are reserved */ 4501 unallocated_encoding(s); 4502 return; 4503 } 4504 4505 if (!sf) { 4506 wmask &= 0xffffffff; 4507 } 4508 4509 switch (opc) { 4510 case 0x3: /* ANDS */ 4511 case 0x0: /* AND */ 4512 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask); 4513 is_and = true; 4514 break; 4515 case 0x1: /* ORR */ 4516 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask); 4517 break; 4518 case 0x2: /* EOR */ 4519 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask); 4520 break; 4521 default: 4522 assert(FALSE); /* must handle all above */ 4523 break; 4524 } 4525 4526 if (!sf && !is_and) { 4527 /* zero extend final result; we know we can skip this for AND 4528 * since the immediate had the high 32 bits clear. 4529 */ 4530 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4531 } 4532 4533 if (opc == 3) { /* ANDS */ 4534 gen_logic_CC(sf, tcg_rd); 4535 } 4536 } 4537 4538 /* 4539 * Move wide (immediate) 4540 * 4541 * 31 30 29 28 23 22 21 20 5 4 0 4542 * +--+-----+-------------+-----+----------------+------+ 4543 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd | 4544 * +--+-----+-------------+-----+----------------+------+ 4545 * 4546 * sf: 0 -> 32 bit, 1 -> 64 bit 4547 * opc: 00 -> N, 10 -> Z, 11 -> K 4548 * hw: shift/16 (0,16, and sf only 32, 48) 4549 */ 4550 static void disas_movw_imm(DisasContext *s, uint32_t insn) 4551 { 4552 int rd = extract32(insn, 0, 5); 4553 uint64_t imm = extract32(insn, 5, 16); 4554 int sf = extract32(insn, 31, 1); 4555 int opc = extract32(insn, 29, 2); 4556 int pos = extract32(insn, 21, 2) << 4; 4557 TCGv_i64 tcg_rd = cpu_reg(s, rd); 4558 4559 if (!sf && (pos >= 32)) { 4560 unallocated_encoding(s); 4561 return; 4562 } 4563 4564 switch (opc) { 4565 case 0: /* MOVN */ 4566 case 2: /* MOVZ */ 4567 imm <<= pos; 4568 if (opc == 0) { 4569 imm = ~imm; 4570 } 4571 if (!sf) { 4572 imm &= 0xffffffffu; 4573 } 4574 tcg_gen_movi_i64(tcg_rd, imm); 4575 break; 4576 case 3: /* MOVK */ 4577 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_constant_i64(imm), pos, 16); 4578 if (!sf) { 4579 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4580 } 4581 break; 4582 default: 4583 unallocated_encoding(s); 4584 break; 4585 } 4586 } 4587 4588 /* Bitfield 4589 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0 4590 * +----+-----+-------------+---+------+------+------+------+ 4591 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd | 4592 * +----+-----+-------------+---+------+------+------+------+ 4593 */ 4594 static void disas_bitfield(DisasContext *s, uint32_t insn) 4595 { 4596 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len; 4597 TCGv_i64 tcg_rd, tcg_tmp; 4598 4599 sf = extract32(insn, 31, 1); 4600 opc = extract32(insn, 29, 2); 4601 n = extract32(insn, 22, 1); 4602 ri = extract32(insn, 16, 6); 4603 si = extract32(insn, 10, 6); 4604 rn = extract32(insn, 5, 5); 4605 rd = extract32(insn, 0, 5); 4606 bitsize = sf ? 64 : 32; 4607 4608 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) { 4609 unallocated_encoding(s); 4610 return; 4611 } 4612 4613 tcg_rd = cpu_reg(s, rd); 4614 4615 /* Suppress the zero-extend for !sf. Since RI and SI are constrained 4616 to be smaller than bitsize, we'll never reference data outside the 4617 low 32-bits anyway. */ 4618 tcg_tmp = read_cpu_reg(s, rn, 1); 4619 4620 /* Recognize simple(r) extractions. */ 4621 if (si >= ri) { 4622 /* Wd<s-r:0> = Wn<s:r> */ 4623 len = (si - ri) + 1; 4624 if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */ 4625 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len); 4626 goto done; 4627 } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */ 4628 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len); 4629 return; 4630 } 4631 /* opc == 1, BFXIL fall through to deposit */ 4632 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri); 4633 pos = 0; 4634 } else { 4635 /* Handle the ri > si case with a deposit 4636 * Wd<32+s-r,32-r> = Wn<s:0> 4637 */ 4638 len = si + 1; 4639 pos = (bitsize - ri) & (bitsize - 1); 4640 } 4641 4642 if (opc == 0 && len < ri) { 4643 /* SBFM: sign extend the destination field from len to fill 4644 the balance of the word. Let the deposit below insert all 4645 of those sign bits. */ 4646 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len); 4647 len = ri; 4648 } 4649 4650 if (opc == 1) { /* BFM, BFXIL */ 4651 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len); 4652 } else { 4653 /* SBFM or UBFM: We start with zero, and we haven't modified 4654 any bits outside bitsize, therefore the zero-extension 4655 below is unneeded. */ 4656 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len); 4657 return; 4658 } 4659 4660 done: 4661 if (!sf) { /* zero extend final result */ 4662 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4663 } 4664 } 4665 4666 /* Extract 4667 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0 4668 * +----+------+-------------+---+----+------+--------+------+------+ 4669 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd | 4670 * +----+------+-------------+---+----+------+--------+------+------+ 4671 */ 4672 static void disas_extract(DisasContext *s, uint32_t insn) 4673 { 4674 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0; 4675 4676 sf = extract32(insn, 31, 1); 4677 n = extract32(insn, 22, 1); 4678 rm = extract32(insn, 16, 5); 4679 imm = extract32(insn, 10, 6); 4680 rn = extract32(insn, 5, 5); 4681 rd = extract32(insn, 0, 5); 4682 op21 = extract32(insn, 29, 2); 4683 op0 = extract32(insn, 21, 1); 4684 bitsize = sf ? 64 : 32; 4685 4686 if (sf != n || op21 || op0 || imm >= bitsize) { 4687 unallocated_encoding(s); 4688 } else { 4689 TCGv_i64 tcg_rd, tcg_rm, tcg_rn; 4690 4691 tcg_rd = cpu_reg(s, rd); 4692 4693 if (unlikely(imm == 0)) { 4694 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts, 4695 * so an extract from bit 0 is a special case. 4696 */ 4697 if (sf) { 4698 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm)); 4699 } else { 4700 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm)); 4701 } 4702 } else { 4703 tcg_rm = cpu_reg(s, rm); 4704 tcg_rn = cpu_reg(s, rn); 4705 4706 if (sf) { 4707 /* Specialization to ROR happens in EXTRACT2. */ 4708 tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, imm); 4709 } else { 4710 TCGv_i32 t0 = tcg_temp_new_i32(); 4711 4712 tcg_gen_extrl_i64_i32(t0, tcg_rm); 4713 if (rm == rn) { 4714 tcg_gen_rotri_i32(t0, t0, imm); 4715 } else { 4716 TCGv_i32 t1 = tcg_temp_new_i32(); 4717 tcg_gen_extrl_i64_i32(t1, tcg_rn); 4718 tcg_gen_extract2_i32(t0, t0, t1, imm); 4719 tcg_temp_free_i32(t1); 4720 } 4721 tcg_gen_extu_i32_i64(tcg_rd, t0); 4722 tcg_temp_free_i32(t0); 4723 } 4724 } 4725 } 4726 } 4727 4728 /* Data processing - immediate */ 4729 static void disas_data_proc_imm(DisasContext *s, uint32_t insn) 4730 { 4731 switch (extract32(insn, 23, 6)) { 4732 case 0x20: case 0x21: /* PC-rel. addressing */ 4733 disas_pc_rel_adr(s, insn); 4734 break; 4735 case 0x22: /* Add/subtract (immediate) */ 4736 disas_add_sub_imm(s, insn); 4737 break; 4738 case 0x23: /* Add/subtract (immediate, with tags) */ 4739 disas_add_sub_imm_with_tags(s, insn); 4740 break; 4741 case 0x24: /* Logical (immediate) */ 4742 disas_logic_imm(s, insn); 4743 break; 4744 case 0x25: /* Move wide (immediate) */ 4745 disas_movw_imm(s, insn); 4746 break; 4747 case 0x26: /* Bitfield */ 4748 disas_bitfield(s, insn); 4749 break; 4750 case 0x27: /* Extract */ 4751 disas_extract(s, insn); 4752 break; 4753 default: 4754 unallocated_encoding(s); 4755 break; 4756 } 4757 } 4758 4759 /* Shift a TCGv src by TCGv shift_amount, put result in dst. 4760 * Note that it is the caller's responsibility to ensure that the 4761 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM 4762 * mandated semantics for out of range shifts. 4763 */ 4764 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf, 4765 enum a64_shift_type shift_type, TCGv_i64 shift_amount) 4766 { 4767 switch (shift_type) { 4768 case A64_SHIFT_TYPE_LSL: 4769 tcg_gen_shl_i64(dst, src, shift_amount); 4770 break; 4771 case A64_SHIFT_TYPE_LSR: 4772 tcg_gen_shr_i64(dst, src, shift_amount); 4773 break; 4774 case A64_SHIFT_TYPE_ASR: 4775 if (!sf) { 4776 tcg_gen_ext32s_i64(dst, src); 4777 } 4778 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount); 4779 break; 4780 case A64_SHIFT_TYPE_ROR: 4781 if (sf) { 4782 tcg_gen_rotr_i64(dst, src, shift_amount); 4783 } else { 4784 TCGv_i32 t0, t1; 4785 t0 = tcg_temp_new_i32(); 4786 t1 = tcg_temp_new_i32(); 4787 tcg_gen_extrl_i64_i32(t0, src); 4788 tcg_gen_extrl_i64_i32(t1, shift_amount); 4789 tcg_gen_rotr_i32(t0, t0, t1); 4790 tcg_gen_extu_i32_i64(dst, t0); 4791 tcg_temp_free_i32(t0); 4792 tcg_temp_free_i32(t1); 4793 } 4794 break; 4795 default: 4796 assert(FALSE); /* all shift types should be handled */ 4797 break; 4798 } 4799 4800 if (!sf) { /* zero extend final result */ 4801 tcg_gen_ext32u_i64(dst, dst); 4802 } 4803 } 4804 4805 /* Shift a TCGv src by immediate, put result in dst. 4806 * The shift amount must be in range (this should always be true as the 4807 * relevant instructions will UNDEF on bad shift immediates). 4808 */ 4809 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf, 4810 enum a64_shift_type shift_type, unsigned int shift_i) 4811 { 4812 assert(shift_i < (sf ? 64 : 32)); 4813 4814 if (shift_i == 0) { 4815 tcg_gen_mov_i64(dst, src); 4816 } else { 4817 shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i)); 4818 } 4819 } 4820 4821 /* Logical (shifted register) 4822 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 4823 * +----+-----+-----------+-------+---+------+--------+------+------+ 4824 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd | 4825 * +----+-----+-----------+-------+---+------+--------+------+------+ 4826 */ 4827 static void disas_logic_reg(DisasContext *s, uint32_t insn) 4828 { 4829 TCGv_i64 tcg_rd, tcg_rn, tcg_rm; 4830 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd; 4831 4832 sf = extract32(insn, 31, 1); 4833 opc = extract32(insn, 29, 2); 4834 shift_type = extract32(insn, 22, 2); 4835 invert = extract32(insn, 21, 1); 4836 rm = extract32(insn, 16, 5); 4837 shift_amount = extract32(insn, 10, 6); 4838 rn = extract32(insn, 5, 5); 4839 rd = extract32(insn, 0, 5); 4840 4841 if (!sf && (shift_amount & (1 << 5))) { 4842 unallocated_encoding(s); 4843 return; 4844 } 4845 4846 tcg_rd = cpu_reg(s, rd); 4847 4848 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) { 4849 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for 4850 * register-register MOV and MVN, so it is worth special casing. 4851 */ 4852 tcg_rm = cpu_reg(s, rm); 4853 if (invert) { 4854 tcg_gen_not_i64(tcg_rd, tcg_rm); 4855 if (!sf) { 4856 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4857 } 4858 } else { 4859 if (sf) { 4860 tcg_gen_mov_i64(tcg_rd, tcg_rm); 4861 } else { 4862 tcg_gen_ext32u_i64(tcg_rd, tcg_rm); 4863 } 4864 } 4865 return; 4866 } 4867 4868 tcg_rm = read_cpu_reg(s, rm, sf); 4869 4870 if (shift_amount) { 4871 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount); 4872 } 4873 4874 tcg_rn = cpu_reg(s, rn); 4875 4876 switch (opc | (invert << 2)) { 4877 case 0: /* AND */ 4878 case 3: /* ANDS */ 4879 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm); 4880 break; 4881 case 1: /* ORR */ 4882 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm); 4883 break; 4884 case 2: /* EOR */ 4885 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm); 4886 break; 4887 case 4: /* BIC */ 4888 case 7: /* BICS */ 4889 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm); 4890 break; 4891 case 5: /* ORN */ 4892 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm); 4893 break; 4894 case 6: /* EON */ 4895 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm); 4896 break; 4897 default: 4898 assert(FALSE); 4899 break; 4900 } 4901 4902 if (!sf) { 4903 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 4904 } 4905 4906 if (opc == 3) { 4907 gen_logic_CC(sf, tcg_rd); 4908 } 4909 } 4910 4911 /* 4912 * Add/subtract (extended register) 4913 * 4914 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0| 4915 * +--+--+--+-----------+-----+--+-------+------+------+----+----+ 4916 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd | 4917 * +--+--+--+-----------+-----+--+-------+------+------+----+----+ 4918 * 4919 * sf: 0 -> 32bit, 1 -> 64bit 4920 * op: 0 -> add , 1 -> sub 4921 * S: 1 -> set flags 4922 * opt: 00 4923 * option: extension type (see DecodeRegExtend) 4924 * imm3: optional shift to Rm 4925 * 4926 * Rd = Rn + LSL(extend(Rm), amount) 4927 */ 4928 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) 4929 { 4930 int rd = extract32(insn, 0, 5); 4931 int rn = extract32(insn, 5, 5); 4932 int imm3 = extract32(insn, 10, 3); 4933 int option = extract32(insn, 13, 3); 4934 int rm = extract32(insn, 16, 5); 4935 int opt = extract32(insn, 22, 2); 4936 bool setflags = extract32(insn, 29, 1); 4937 bool sub_op = extract32(insn, 30, 1); 4938 bool sf = extract32(insn, 31, 1); 4939 4940 TCGv_i64 tcg_rm, tcg_rn; /* temps */ 4941 TCGv_i64 tcg_rd; 4942 TCGv_i64 tcg_result; 4943 4944 if (imm3 > 4 || opt != 0) { 4945 unallocated_encoding(s); 4946 return; 4947 } 4948 4949 /* non-flag setting ops may use SP */ 4950 if (!setflags) { 4951 tcg_rd = cpu_reg_sp(s, rd); 4952 } else { 4953 tcg_rd = cpu_reg(s, rd); 4954 } 4955 tcg_rn = read_cpu_reg_sp(s, rn, sf); 4956 4957 tcg_rm = read_cpu_reg(s, rm, sf); 4958 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3); 4959 4960 tcg_result = tcg_temp_new_i64(); 4961 4962 if (!setflags) { 4963 if (sub_op) { 4964 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm); 4965 } else { 4966 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm); 4967 } 4968 } else { 4969 if (sub_op) { 4970 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm); 4971 } else { 4972 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm); 4973 } 4974 } 4975 4976 if (sf) { 4977 tcg_gen_mov_i64(tcg_rd, tcg_result); 4978 } else { 4979 tcg_gen_ext32u_i64(tcg_rd, tcg_result); 4980 } 4981 4982 tcg_temp_free_i64(tcg_result); 4983 } 4984 4985 /* 4986 * Add/subtract (shifted register) 4987 * 4988 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0 4989 * +--+--+--+-----------+-----+--+-------+---------+------+------+ 4990 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd | 4991 * +--+--+--+-----------+-----+--+-------+---------+------+------+ 4992 * 4993 * sf: 0 -> 32bit, 1 -> 64bit 4994 * op: 0 -> add , 1 -> sub 4995 * S: 1 -> set flags 4996 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED 4997 * imm6: Shift amount to apply to Rm before the add/sub 4998 */ 4999 static void disas_add_sub_reg(DisasContext *s, uint32_t insn) 5000 { 5001 int rd = extract32(insn, 0, 5); 5002 int rn = extract32(insn, 5, 5); 5003 int imm6 = extract32(insn, 10, 6); 5004 int rm = extract32(insn, 16, 5); 5005 int shift_type = extract32(insn, 22, 2); 5006 bool setflags = extract32(insn, 29, 1); 5007 bool sub_op = extract32(insn, 30, 1); 5008 bool sf = extract32(insn, 31, 1); 5009 5010 TCGv_i64 tcg_rd = cpu_reg(s, rd); 5011 TCGv_i64 tcg_rn, tcg_rm; 5012 TCGv_i64 tcg_result; 5013 5014 if ((shift_type == 3) || (!sf && (imm6 > 31))) { 5015 unallocated_encoding(s); 5016 return; 5017 } 5018 5019 tcg_rn = read_cpu_reg(s, rn, sf); 5020 tcg_rm = read_cpu_reg(s, rm, sf); 5021 5022 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6); 5023 5024 tcg_result = tcg_temp_new_i64(); 5025 5026 if (!setflags) { 5027 if (sub_op) { 5028 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm); 5029 } else { 5030 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm); 5031 } 5032 } else { 5033 if (sub_op) { 5034 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm); 5035 } else { 5036 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm); 5037 } 5038 } 5039 5040 if (sf) { 5041 tcg_gen_mov_i64(tcg_rd, tcg_result); 5042 } else { 5043 tcg_gen_ext32u_i64(tcg_rd, tcg_result); 5044 } 5045 5046 tcg_temp_free_i64(tcg_result); 5047 } 5048 5049 /* Data-processing (3 source) 5050 * 5051 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0 5052 * +--+------+-----------+------+------+----+------+------+------+ 5053 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd | 5054 * +--+------+-----------+------+------+----+------+------+------+ 5055 */ 5056 static void disas_data_proc_3src(DisasContext *s, uint32_t insn) 5057 { 5058 int rd = extract32(insn, 0, 5); 5059 int rn = extract32(insn, 5, 5); 5060 int ra = extract32(insn, 10, 5); 5061 int rm = extract32(insn, 16, 5); 5062 int op_id = (extract32(insn, 29, 3) << 4) | 5063 (extract32(insn, 21, 3) << 1) | 5064 extract32(insn, 15, 1); 5065 bool sf = extract32(insn, 31, 1); 5066 bool is_sub = extract32(op_id, 0, 1); 5067 bool is_high = extract32(op_id, 2, 1); 5068 bool is_signed = false; 5069 TCGv_i64 tcg_op1; 5070 TCGv_i64 tcg_op2; 5071 TCGv_i64 tcg_tmp; 5072 5073 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */ 5074 switch (op_id) { 5075 case 0x42: /* SMADDL */ 5076 case 0x43: /* SMSUBL */ 5077 case 0x44: /* SMULH */ 5078 is_signed = true; 5079 break; 5080 case 0x0: /* MADD (32bit) */ 5081 case 0x1: /* MSUB (32bit) */ 5082 case 0x40: /* MADD (64bit) */ 5083 case 0x41: /* MSUB (64bit) */ 5084 case 0x4a: /* UMADDL */ 5085 case 0x4b: /* UMSUBL */ 5086 case 0x4c: /* UMULH */ 5087 break; 5088 default: 5089 unallocated_encoding(s); 5090 return; 5091 } 5092 5093 if (is_high) { 5094 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */ 5095 TCGv_i64 tcg_rd = cpu_reg(s, rd); 5096 TCGv_i64 tcg_rn = cpu_reg(s, rn); 5097 TCGv_i64 tcg_rm = cpu_reg(s, rm); 5098 5099 if (is_signed) { 5100 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); 5101 } else { 5102 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm); 5103 } 5104 5105 tcg_temp_free_i64(low_bits); 5106 return; 5107 } 5108 5109 tcg_op1 = tcg_temp_new_i64(); 5110 tcg_op2 = tcg_temp_new_i64(); 5111 tcg_tmp = tcg_temp_new_i64(); 5112 5113 if (op_id < 0x42) { 5114 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn)); 5115 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm)); 5116 } else { 5117 if (is_signed) { 5118 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn)); 5119 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm)); 5120 } else { 5121 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn)); 5122 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm)); 5123 } 5124 } 5125 5126 if (ra == 31 && !is_sub) { 5127 /* Special-case MADD with rA == XZR; it is the standard MUL alias */ 5128 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2); 5129 } else { 5130 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2); 5131 if (is_sub) { 5132 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); 5133 } else { 5134 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp); 5135 } 5136 } 5137 5138 if (!sf) { 5139 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd)); 5140 } 5141 5142 tcg_temp_free_i64(tcg_op1); 5143 tcg_temp_free_i64(tcg_op2); 5144 tcg_temp_free_i64(tcg_tmp); 5145 } 5146 5147 /* Add/subtract (with carry) 5148 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0 5149 * +--+--+--+------------------------+------+-------------+------+-----+ 5150 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | 0 0 0 0 0 0 | Rn | Rd | 5151 * +--+--+--+------------------------+------+-------------+------+-----+ 5152 */ 5153 5154 static void disas_adc_sbc(DisasContext *s, uint32_t insn) 5155 { 5156 unsigned int sf, op, setflags, rm, rn, rd; 5157 TCGv_i64 tcg_y, tcg_rn, tcg_rd; 5158 5159 sf = extract32(insn, 31, 1); 5160 op = extract32(insn, 30, 1); 5161 setflags = extract32(insn, 29, 1); 5162 rm = extract32(insn, 16, 5); 5163 rn = extract32(insn, 5, 5); 5164 rd = extract32(insn, 0, 5); 5165 5166 tcg_rd = cpu_reg(s, rd); 5167 tcg_rn = cpu_reg(s, rn); 5168 5169 if (op) { 5170 tcg_y = new_tmp_a64(s); 5171 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm)); 5172 } else { 5173 tcg_y = cpu_reg(s, rm); 5174 } 5175 5176 if (setflags) { 5177 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y); 5178 } else { 5179 gen_adc(sf, tcg_rd, tcg_rn, tcg_y); 5180 } 5181 } 5182 5183 /* 5184 * Rotate right into flags 5185 * 31 30 29 21 15 10 5 4 0 5186 * +--+--+--+-----------------+--------+-----------+------+--+------+ 5187 * |sf|op| S| 1 1 0 1 0 0 0 0 | imm6 | 0 0 0 0 1 | Rn |o2| mask | 5188 * +--+--+--+-----------------+--------+-----------+------+--+------+ 5189 */ 5190 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn) 5191 { 5192 int mask = extract32(insn, 0, 4); 5193 int o2 = extract32(insn, 4, 1); 5194 int rn = extract32(insn, 5, 5); 5195 int imm6 = extract32(insn, 15, 6); 5196 int sf_op_s = extract32(insn, 29, 3); 5197 TCGv_i64 tcg_rn; 5198 TCGv_i32 nzcv; 5199 5200 if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) { 5201 unallocated_encoding(s); 5202 return; 5203 } 5204 5205 tcg_rn = read_cpu_reg(s, rn, 1); 5206 tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6); 5207 5208 nzcv = tcg_temp_new_i32(); 5209 tcg_gen_extrl_i64_i32(nzcv, tcg_rn); 5210 5211 if (mask & 8) { /* N */ 5212 tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3); 5213 } 5214 if (mask & 4) { /* Z */ 5215 tcg_gen_not_i32(cpu_ZF, nzcv); 5216 tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4); 5217 } 5218 if (mask & 2) { /* C */ 5219 tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1); 5220 } 5221 if (mask & 1) { /* V */ 5222 tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0); 5223 } 5224 5225 tcg_temp_free_i32(nzcv); 5226 } 5227 5228 /* 5229 * Evaluate into flags 5230 * 31 30 29 21 15 14 10 5 4 0 5231 * +--+--+--+-----------------+---------+----+---------+------+--+------+ 5232 * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 | Rn |o3| mask | 5233 * +--+--+--+-----------------+---------+----+---------+------+--+------+ 5234 */ 5235 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn) 5236 { 5237 int o3_mask = extract32(insn, 0, 5); 5238 int rn = extract32(insn, 5, 5); 5239 int o2 = extract32(insn, 15, 6); 5240 int sz = extract32(insn, 14, 1); 5241 int sf_op_s = extract32(insn, 29, 3); 5242 TCGv_i32 tmp; 5243 int shift; 5244 5245 if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd || 5246 !dc_isar_feature(aa64_condm_4, s)) { 5247 unallocated_encoding(s); 5248 return; 5249 } 5250 shift = sz ? 16 : 24; /* SETF16 or SETF8 */ 5251 5252 tmp = tcg_temp_new_i32(); 5253 tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn)); 5254 tcg_gen_shli_i32(cpu_NF, tmp, shift); 5255 tcg_gen_shli_i32(cpu_VF, tmp, shift - 1); 5256 tcg_gen_mov_i32(cpu_ZF, cpu_NF); 5257 tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF); 5258 tcg_temp_free_i32(tmp); 5259 } 5260 5261 /* Conditional compare (immediate / register) 5262 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 5263 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ 5264 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv | 5265 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+ 5266 * [1] y [0] [0] 5267 */ 5268 static void disas_cc(DisasContext *s, uint32_t insn) 5269 { 5270 unsigned int sf, op, y, cond, rn, nzcv, is_imm; 5271 TCGv_i32 tcg_t0, tcg_t1, tcg_t2; 5272 TCGv_i64 tcg_tmp, tcg_y, tcg_rn; 5273 DisasCompare c; 5274 5275 if (!extract32(insn, 29, 1)) { 5276 unallocated_encoding(s); 5277 return; 5278 } 5279 if (insn & (1 << 10 | 1 << 4)) { 5280 unallocated_encoding(s); 5281 return; 5282 } 5283 sf = extract32(insn, 31, 1); 5284 op = extract32(insn, 30, 1); 5285 is_imm = extract32(insn, 11, 1); 5286 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */ 5287 cond = extract32(insn, 12, 4); 5288 rn = extract32(insn, 5, 5); 5289 nzcv = extract32(insn, 0, 4); 5290 5291 /* Set T0 = !COND. */ 5292 tcg_t0 = tcg_temp_new_i32(); 5293 arm_test_cc(&c, cond); 5294 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0); 5295 5296 /* Load the arguments for the new comparison. */ 5297 if (is_imm) { 5298 tcg_y = new_tmp_a64(s); 5299 tcg_gen_movi_i64(tcg_y, y); 5300 } else { 5301 tcg_y = cpu_reg(s, y); 5302 } 5303 tcg_rn = cpu_reg(s, rn); 5304 5305 /* Set the flags for the new comparison. */ 5306 tcg_tmp = tcg_temp_new_i64(); 5307 if (op) { 5308 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y); 5309 } else { 5310 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y); 5311 } 5312 tcg_temp_free_i64(tcg_tmp); 5313 5314 /* If COND was false, force the flags to #nzcv. Compute two masks 5315 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0). 5316 * For tcg hosts that support ANDC, we can make do with just T1. 5317 * In either case, allow the tcg optimizer to delete any unused mask. 5318 */ 5319 tcg_t1 = tcg_temp_new_i32(); 5320 tcg_t2 = tcg_temp_new_i32(); 5321 tcg_gen_neg_i32(tcg_t1, tcg_t0); 5322 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1); 5323 5324 if (nzcv & 8) { /* N */ 5325 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1); 5326 } else { 5327 if (TCG_TARGET_HAS_andc_i32) { 5328 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1); 5329 } else { 5330 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2); 5331 } 5332 } 5333 if (nzcv & 4) { /* Z */ 5334 if (TCG_TARGET_HAS_andc_i32) { 5335 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1); 5336 } else { 5337 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2); 5338 } 5339 } else { 5340 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0); 5341 } 5342 if (nzcv & 2) { /* C */ 5343 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0); 5344 } else { 5345 if (TCG_TARGET_HAS_andc_i32) { 5346 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1); 5347 } else { 5348 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2); 5349 } 5350 } 5351 if (nzcv & 1) { /* V */ 5352 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1); 5353 } else { 5354 if (TCG_TARGET_HAS_andc_i32) { 5355 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1); 5356 } else { 5357 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2); 5358 } 5359 } 5360 tcg_temp_free_i32(tcg_t0); 5361 tcg_temp_free_i32(tcg_t1); 5362 tcg_temp_free_i32(tcg_t2); 5363 } 5364 5365 /* Conditional select 5366 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0 5367 * +----+----+---+-----------------+------+------+-----+------+------+ 5368 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd | 5369 * +----+----+---+-----------------+------+------+-----+------+------+ 5370 */ 5371 static void disas_cond_select(DisasContext *s, uint32_t insn) 5372 { 5373 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd; 5374 TCGv_i64 tcg_rd, zero; 5375 DisasCompare64 c; 5376 5377 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) { 5378 /* S == 1 or op2<1> == 1 */ 5379 unallocated_encoding(s); 5380 return; 5381 } 5382 sf = extract32(insn, 31, 1); 5383 else_inv = extract32(insn, 30, 1); 5384 rm = extract32(insn, 16, 5); 5385 cond = extract32(insn, 12, 4); 5386 else_inc = extract32(insn, 10, 1); 5387 rn = extract32(insn, 5, 5); 5388 rd = extract32(insn, 0, 5); 5389 5390 tcg_rd = cpu_reg(s, rd); 5391 5392 a64_test_cc(&c, cond); 5393 zero = tcg_constant_i64(0); 5394 5395 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) { 5396 /* CSET & CSETM. */ 5397 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero); 5398 if (else_inv) { 5399 tcg_gen_neg_i64(tcg_rd, tcg_rd); 5400 } 5401 } else { 5402 TCGv_i64 t_true = cpu_reg(s, rn); 5403 TCGv_i64 t_false = read_cpu_reg(s, rm, 1); 5404 if (else_inv && else_inc) { 5405 tcg_gen_neg_i64(t_false, t_false); 5406 } else if (else_inv) { 5407 tcg_gen_not_i64(t_false, t_false); 5408 } else if (else_inc) { 5409 tcg_gen_addi_i64(t_false, t_false, 1); 5410 } 5411 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false); 5412 } 5413 5414 if (!sf) { 5415 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 5416 } 5417 } 5418 5419 static void handle_clz(DisasContext *s, unsigned int sf, 5420 unsigned int rn, unsigned int rd) 5421 { 5422 TCGv_i64 tcg_rd, tcg_rn; 5423 tcg_rd = cpu_reg(s, rd); 5424 tcg_rn = cpu_reg(s, rn); 5425 5426 if (sf) { 5427 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64); 5428 } else { 5429 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); 5430 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); 5431 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32); 5432 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); 5433 tcg_temp_free_i32(tcg_tmp32); 5434 } 5435 } 5436 5437 static void handle_cls(DisasContext *s, unsigned int sf, 5438 unsigned int rn, unsigned int rd) 5439 { 5440 TCGv_i64 tcg_rd, tcg_rn; 5441 tcg_rd = cpu_reg(s, rd); 5442 tcg_rn = cpu_reg(s, rn); 5443 5444 if (sf) { 5445 tcg_gen_clrsb_i64(tcg_rd, tcg_rn); 5446 } else { 5447 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); 5448 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); 5449 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32); 5450 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); 5451 tcg_temp_free_i32(tcg_tmp32); 5452 } 5453 } 5454 5455 static void handle_rbit(DisasContext *s, unsigned int sf, 5456 unsigned int rn, unsigned int rd) 5457 { 5458 TCGv_i64 tcg_rd, tcg_rn; 5459 tcg_rd = cpu_reg(s, rd); 5460 tcg_rn = cpu_reg(s, rn); 5461 5462 if (sf) { 5463 gen_helper_rbit64(tcg_rd, tcg_rn); 5464 } else { 5465 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); 5466 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); 5467 gen_helper_rbit(tcg_tmp32, tcg_tmp32); 5468 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); 5469 tcg_temp_free_i32(tcg_tmp32); 5470 } 5471 } 5472 5473 /* REV with sf==1, opcode==3 ("REV64") */ 5474 static void handle_rev64(DisasContext *s, unsigned int sf, 5475 unsigned int rn, unsigned int rd) 5476 { 5477 if (!sf) { 5478 unallocated_encoding(s); 5479 return; 5480 } 5481 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn)); 5482 } 5483 5484 /* REV with sf==0, opcode==2 5485 * REV32 (sf==1, opcode==2) 5486 */ 5487 static void handle_rev32(DisasContext *s, unsigned int sf, 5488 unsigned int rn, unsigned int rd) 5489 { 5490 TCGv_i64 tcg_rd = cpu_reg(s, rd); 5491 TCGv_i64 tcg_rn = cpu_reg(s, rn); 5492 5493 if (sf) { 5494 tcg_gen_bswap64_i64(tcg_rd, tcg_rn); 5495 tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32); 5496 } else { 5497 tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ); 5498 } 5499 } 5500 5501 /* REV16 (opcode==1) */ 5502 static void handle_rev16(DisasContext *s, unsigned int sf, 5503 unsigned int rn, unsigned int rd) 5504 { 5505 TCGv_i64 tcg_rd = cpu_reg(s, rd); 5506 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 5507 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); 5508 TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff); 5509 5510 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8); 5511 tcg_gen_and_i64(tcg_rd, tcg_rn, mask); 5512 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask); 5513 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8); 5514 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp); 5515 5516 tcg_temp_free_i64(tcg_tmp); 5517 } 5518 5519 /* Data-processing (1 source) 5520 * 31 30 29 28 21 20 16 15 10 9 5 4 0 5521 * +----+---+---+-----------------+---------+--------+------+------+ 5522 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd | 5523 * +----+---+---+-----------------+---------+--------+------+------+ 5524 */ 5525 static void disas_data_proc_1src(DisasContext *s, uint32_t insn) 5526 { 5527 unsigned int sf, opcode, opcode2, rn, rd; 5528 TCGv_i64 tcg_rd; 5529 5530 if (extract32(insn, 29, 1)) { 5531 unallocated_encoding(s); 5532 return; 5533 } 5534 5535 sf = extract32(insn, 31, 1); 5536 opcode = extract32(insn, 10, 6); 5537 opcode2 = extract32(insn, 16, 5); 5538 rn = extract32(insn, 5, 5); 5539 rd = extract32(insn, 0, 5); 5540 5541 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7)) 5542 5543 switch (MAP(sf, opcode2, opcode)) { 5544 case MAP(0, 0x00, 0x00): /* RBIT */ 5545 case MAP(1, 0x00, 0x00): 5546 handle_rbit(s, sf, rn, rd); 5547 break; 5548 case MAP(0, 0x00, 0x01): /* REV16 */ 5549 case MAP(1, 0x00, 0x01): 5550 handle_rev16(s, sf, rn, rd); 5551 break; 5552 case MAP(0, 0x00, 0x02): /* REV/REV32 */ 5553 case MAP(1, 0x00, 0x02): 5554 handle_rev32(s, sf, rn, rd); 5555 break; 5556 case MAP(1, 0x00, 0x03): /* REV64 */ 5557 handle_rev64(s, sf, rn, rd); 5558 break; 5559 case MAP(0, 0x00, 0x04): /* CLZ */ 5560 case MAP(1, 0x00, 0x04): 5561 handle_clz(s, sf, rn, rd); 5562 break; 5563 case MAP(0, 0x00, 0x05): /* CLS */ 5564 case MAP(1, 0x00, 0x05): 5565 handle_cls(s, sf, rn, rd); 5566 break; 5567 case MAP(1, 0x01, 0x00): /* PACIA */ 5568 if (s->pauth_active) { 5569 tcg_rd = cpu_reg(s, rd); 5570 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn)); 5571 } else if (!dc_isar_feature(aa64_pauth, s)) { 5572 goto do_unallocated; 5573 } 5574 break; 5575 case MAP(1, 0x01, 0x01): /* PACIB */ 5576 if (s->pauth_active) { 5577 tcg_rd = cpu_reg(s, rd); 5578 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn)); 5579 } else if (!dc_isar_feature(aa64_pauth, s)) { 5580 goto do_unallocated; 5581 } 5582 break; 5583 case MAP(1, 0x01, 0x02): /* PACDA */ 5584 if (s->pauth_active) { 5585 tcg_rd = cpu_reg(s, rd); 5586 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn)); 5587 } else if (!dc_isar_feature(aa64_pauth, s)) { 5588 goto do_unallocated; 5589 } 5590 break; 5591 case MAP(1, 0x01, 0x03): /* PACDB */ 5592 if (s->pauth_active) { 5593 tcg_rd = cpu_reg(s, rd); 5594 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn)); 5595 } else if (!dc_isar_feature(aa64_pauth, s)) { 5596 goto do_unallocated; 5597 } 5598 break; 5599 case MAP(1, 0x01, 0x04): /* AUTIA */ 5600 if (s->pauth_active) { 5601 tcg_rd = cpu_reg(s, rd); 5602 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn)); 5603 } else if (!dc_isar_feature(aa64_pauth, s)) { 5604 goto do_unallocated; 5605 } 5606 break; 5607 case MAP(1, 0x01, 0x05): /* AUTIB */ 5608 if (s->pauth_active) { 5609 tcg_rd = cpu_reg(s, rd); 5610 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn)); 5611 } else if (!dc_isar_feature(aa64_pauth, s)) { 5612 goto do_unallocated; 5613 } 5614 break; 5615 case MAP(1, 0x01, 0x06): /* AUTDA */ 5616 if (s->pauth_active) { 5617 tcg_rd = cpu_reg(s, rd); 5618 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn)); 5619 } else if (!dc_isar_feature(aa64_pauth, s)) { 5620 goto do_unallocated; 5621 } 5622 break; 5623 case MAP(1, 0x01, 0x07): /* AUTDB */ 5624 if (s->pauth_active) { 5625 tcg_rd = cpu_reg(s, rd); 5626 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn)); 5627 } else if (!dc_isar_feature(aa64_pauth, s)) { 5628 goto do_unallocated; 5629 } 5630 break; 5631 case MAP(1, 0x01, 0x08): /* PACIZA */ 5632 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5633 goto do_unallocated; 5634 } else if (s->pauth_active) { 5635 tcg_rd = cpu_reg(s, rd); 5636 gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); 5637 } 5638 break; 5639 case MAP(1, 0x01, 0x09): /* PACIZB */ 5640 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5641 goto do_unallocated; 5642 } else if (s->pauth_active) { 5643 tcg_rd = cpu_reg(s, rd); 5644 gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); 5645 } 5646 break; 5647 case MAP(1, 0x01, 0x0a): /* PACDZA */ 5648 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5649 goto do_unallocated; 5650 } else if (s->pauth_active) { 5651 tcg_rd = cpu_reg(s, rd); 5652 gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); 5653 } 5654 break; 5655 case MAP(1, 0x01, 0x0b): /* PACDZB */ 5656 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5657 goto do_unallocated; 5658 } else if (s->pauth_active) { 5659 tcg_rd = cpu_reg(s, rd); 5660 gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); 5661 } 5662 break; 5663 case MAP(1, 0x01, 0x0c): /* AUTIZA */ 5664 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5665 goto do_unallocated; 5666 } else if (s->pauth_active) { 5667 tcg_rd = cpu_reg(s, rd); 5668 gen_helper_autia(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); 5669 } 5670 break; 5671 case MAP(1, 0x01, 0x0d): /* AUTIZB */ 5672 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5673 goto do_unallocated; 5674 } else if (s->pauth_active) { 5675 tcg_rd = cpu_reg(s, rd); 5676 gen_helper_autib(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); 5677 } 5678 break; 5679 case MAP(1, 0x01, 0x0e): /* AUTDZA */ 5680 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5681 goto do_unallocated; 5682 } else if (s->pauth_active) { 5683 tcg_rd = cpu_reg(s, rd); 5684 gen_helper_autda(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); 5685 } 5686 break; 5687 case MAP(1, 0x01, 0x0f): /* AUTDZB */ 5688 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5689 goto do_unallocated; 5690 } else if (s->pauth_active) { 5691 tcg_rd = cpu_reg(s, rd); 5692 gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, new_tmp_a64_zero(s)); 5693 } 5694 break; 5695 case MAP(1, 0x01, 0x10): /* XPACI */ 5696 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5697 goto do_unallocated; 5698 } else if (s->pauth_active) { 5699 tcg_rd = cpu_reg(s, rd); 5700 gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd); 5701 } 5702 break; 5703 case MAP(1, 0x01, 0x11): /* XPACD */ 5704 if (!dc_isar_feature(aa64_pauth, s) || rn != 31) { 5705 goto do_unallocated; 5706 } else if (s->pauth_active) { 5707 tcg_rd = cpu_reg(s, rd); 5708 gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd); 5709 } 5710 break; 5711 default: 5712 do_unallocated: 5713 unallocated_encoding(s); 5714 break; 5715 } 5716 5717 #undef MAP 5718 } 5719 5720 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf, 5721 unsigned int rm, unsigned int rn, unsigned int rd) 5722 { 5723 TCGv_i64 tcg_n, tcg_m, tcg_rd; 5724 tcg_rd = cpu_reg(s, rd); 5725 5726 if (!sf && is_signed) { 5727 tcg_n = new_tmp_a64(s); 5728 tcg_m = new_tmp_a64(s); 5729 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn)); 5730 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm)); 5731 } else { 5732 tcg_n = read_cpu_reg(s, rn, sf); 5733 tcg_m = read_cpu_reg(s, rm, sf); 5734 } 5735 5736 if (is_signed) { 5737 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m); 5738 } else { 5739 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m); 5740 } 5741 5742 if (!sf) { /* zero extend final result */ 5743 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 5744 } 5745 } 5746 5747 /* LSLV, LSRV, ASRV, RORV */ 5748 static void handle_shift_reg(DisasContext *s, 5749 enum a64_shift_type shift_type, unsigned int sf, 5750 unsigned int rm, unsigned int rn, unsigned int rd) 5751 { 5752 TCGv_i64 tcg_shift = tcg_temp_new_i64(); 5753 TCGv_i64 tcg_rd = cpu_reg(s, rd); 5754 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf); 5755 5756 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31); 5757 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift); 5758 tcg_temp_free_i64(tcg_shift); 5759 } 5760 5761 /* CRC32[BHWX], CRC32C[BHWX] */ 5762 static void handle_crc32(DisasContext *s, 5763 unsigned int sf, unsigned int sz, bool crc32c, 5764 unsigned int rm, unsigned int rn, unsigned int rd) 5765 { 5766 TCGv_i64 tcg_acc, tcg_val; 5767 TCGv_i32 tcg_bytes; 5768 5769 if (!dc_isar_feature(aa64_crc32, s) 5770 || (sf == 1 && sz != 3) 5771 || (sf == 0 && sz == 3)) { 5772 unallocated_encoding(s); 5773 return; 5774 } 5775 5776 if (sz == 3) { 5777 tcg_val = cpu_reg(s, rm); 5778 } else { 5779 uint64_t mask; 5780 switch (sz) { 5781 case 0: 5782 mask = 0xFF; 5783 break; 5784 case 1: 5785 mask = 0xFFFF; 5786 break; 5787 case 2: 5788 mask = 0xFFFFFFFF; 5789 break; 5790 default: 5791 g_assert_not_reached(); 5792 } 5793 tcg_val = new_tmp_a64(s); 5794 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask); 5795 } 5796 5797 tcg_acc = cpu_reg(s, rn); 5798 tcg_bytes = tcg_constant_i32(1 << sz); 5799 5800 if (crc32c) { 5801 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); 5802 } else { 5803 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes); 5804 } 5805 } 5806 5807 /* Data-processing (2 source) 5808 * 31 30 29 28 21 20 16 15 10 9 5 4 0 5809 * +----+---+---+-----------------+------+--------+------+------+ 5810 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd | 5811 * +----+---+---+-----------------+------+--------+------+------+ 5812 */ 5813 static void disas_data_proc_2src(DisasContext *s, uint32_t insn) 5814 { 5815 unsigned int sf, rm, opcode, rn, rd, setflag; 5816 sf = extract32(insn, 31, 1); 5817 setflag = extract32(insn, 29, 1); 5818 rm = extract32(insn, 16, 5); 5819 opcode = extract32(insn, 10, 6); 5820 rn = extract32(insn, 5, 5); 5821 rd = extract32(insn, 0, 5); 5822 5823 if (setflag && opcode != 0) { 5824 unallocated_encoding(s); 5825 return; 5826 } 5827 5828 switch (opcode) { 5829 case 0: /* SUBP(S) */ 5830 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { 5831 goto do_unallocated; 5832 } else { 5833 TCGv_i64 tcg_n, tcg_m, tcg_d; 5834 5835 tcg_n = read_cpu_reg_sp(s, rn, true); 5836 tcg_m = read_cpu_reg_sp(s, rm, true); 5837 tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56); 5838 tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56); 5839 tcg_d = cpu_reg(s, rd); 5840 5841 if (setflag) { 5842 gen_sub_CC(true, tcg_d, tcg_n, tcg_m); 5843 } else { 5844 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m); 5845 } 5846 } 5847 break; 5848 case 2: /* UDIV */ 5849 handle_div(s, false, sf, rm, rn, rd); 5850 break; 5851 case 3: /* SDIV */ 5852 handle_div(s, true, sf, rm, rn, rd); 5853 break; 5854 case 4: /* IRG */ 5855 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { 5856 goto do_unallocated; 5857 } 5858 if (s->ata) { 5859 gen_helper_irg(cpu_reg_sp(s, rd), cpu_env, 5860 cpu_reg_sp(s, rn), cpu_reg(s, rm)); 5861 } else { 5862 gen_address_with_allocation_tag0(cpu_reg_sp(s, rd), 5863 cpu_reg_sp(s, rn)); 5864 } 5865 break; 5866 case 5: /* GMI */ 5867 if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) { 5868 goto do_unallocated; 5869 } else { 5870 TCGv_i64 t = tcg_temp_new_i64(); 5871 5872 tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4); 5873 tcg_gen_shl_i64(t, tcg_constant_i64(1), t); 5874 tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t); 5875 5876 tcg_temp_free_i64(t); 5877 } 5878 break; 5879 case 8: /* LSLV */ 5880 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd); 5881 break; 5882 case 9: /* LSRV */ 5883 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd); 5884 break; 5885 case 10: /* ASRV */ 5886 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd); 5887 break; 5888 case 11: /* RORV */ 5889 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd); 5890 break; 5891 case 12: /* PACGA */ 5892 if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) { 5893 goto do_unallocated; 5894 } 5895 gen_helper_pacga(cpu_reg(s, rd), cpu_env, 5896 cpu_reg(s, rn), cpu_reg_sp(s, rm)); 5897 break; 5898 case 16: 5899 case 17: 5900 case 18: 5901 case 19: 5902 case 20: 5903 case 21: 5904 case 22: 5905 case 23: /* CRC32 */ 5906 { 5907 int sz = extract32(opcode, 0, 2); 5908 bool crc32c = extract32(opcode, 2, 1); 5909 handle_crc32(s, sf, sz, crc32c, rm, rn, rd); 5910 break; 5911 } 5912 default: 5913 do_unallocated: 5914 unallocated_encoding(s); 5915 break; 5916 } 5917 } 5918 5919 /* 5920 * Data processing - register 5921 * 31 30 29 28 25 21 20 16 10 0 5922 * +--+---+--+---+-------+-----+-------+-------+---------+ 5923 * | |op0| |op1| 1 0 1 | op2 | | op3 | | 5924 * +--+---+--+---+-------+-----+-------+-------+---------+ 5925 */ 5926 static void disas_data_proc_reg(DisasContext *s, uint32_t insn) 5927 { 5928 int op0 = extract32(insn, 30, 1); 5929 int op1 = extract32(insn, 28, 1); 5930 int op2 = extract32(insn, 21, 4); 5931 int op3 = extract32(insn, 10, 6); 5932 5933 if (!op1) { 5934 if (op2 & 8) { 5935 if (op2 & 1) { 5936 /* Add/sub (extended register) */ 5937 disas_add_sub_ext_reg(s, insn); 5938 } else { 5939 /* Add/sub (shifted register) */ 5940 disas_add_sub_reg(s, insn); 5941 } 5942 } else { 5943 /* Logical (shifted register) */ 5944 disas_logic_reg(s, insn); 5945 } 5946 return; 5947 } 5948 5949 switch (op2) { 5950 case 0x0: 5951 switch (op3) { 5952 case 0x00: /* Add/subtract (with carry) */ 5953 disas_adc_sbc(s, insn); 5954 break; 5955 5956 case 0x01: /* Rotate right into flags */ 5957 case 0x21: 5958 disas_rotate_right_into_flags(s, insn); 5959 break; 5960 5961 case 0x02: /* Evaluate into flags */ 5962 case 0x12: 5963 case 0x22: 5964 case 0x32: 5965 disas_evaluate_into_flags(s, insn); 5966 break; 5967 5968 default: 5969 goto do_unallocated; 5970 } 5971 break; 5972 5973 case 0x2: /* Conditional compare */ 5974 disas_cc(s, insn); /* both imm and reg forms */ 5975 break; 5976 5977 case 0x4: /* Conditional select */ 5978 disas_cond_select(s, insn); 5979 break; 5980 5981 case 0x6: /* Data-processing */ 5982 if (op0) { /* (1 source) */ 5983 disas_data_proc_1src(s, insn); 5984 } else { /* (2 source) */ 5985 disas_data_proc_2src(s, insn); 5986 } 5987 break; 5988 case 0x8 ... 0xf: /* (3 source) */ 5989 disas_data_proc_3src(s, insn); 5990 break; 5991 5992 default: 5993 do_unallocated: 5994 unallocated_encoding(s); 5995 break; 5996 } 5997 } 5998 5999 static void handle_fp_compare(DisasContext *s, int size, 6000 unsigned int rn, unsigned int rm, 6001 bool cmp_with_zero, bool signal_all_nans) 6002 { 6003 TCGv_i64 tcg_flags = tcg_temp_new_i64(); 6004 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 6005 6006 if (size == MO_64) { 6007 TCGv_i64 tcg_vn, tcg_vm; 6008 6009 tcg_vn = read_fp_dreg(s, rn); 6010 if (cmp_with_zero) { 6011 tcg_vm = tcg_constant_i64(0); 6012 } else { 6013 tcg_vm = read_fp_dreg(s, rm); 6014 } 6015 if (signal_all_nans) { 6016 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 6017 } else { 6018 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 6019 } 6020 tcg_temp_free_i64(tcg_vn); 6021 tcg_temp_free_i64(tcg_vm); 6022 } else { 6023 TCGv_i32 tcg_vn = tcg_temp_new_i32(); 6024 TCGv_i32 tcg_vm = tcg_temp_new_i32(); 6025 6026 read_vec_element_i32(s, tcg_vn, rn, 0, size); 6027 if (cmp_with_zero) { 6028 tcg_gen_movi_i32(tcg_vm, 0); 6029 } else { 6030 read_vec_element_i32(s, tcg_vm, rm, 0, size); 6031 } 6032 6033 switch (size) { 6034 case MO_32: 6035 if (signal_all_nans) { 6036 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 6037 } else { 6038 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 6039 } 6040 break; 6041 case MO_16: 6042 if (signal_all_nans) { 6043 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 6044 } else { 6045 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst); 6046 } 6047 break; 6048 default: 6049 g_assert_not_reached(); 6050 } 6051 6052 tcg_temp_free_i32(tcg_vn); 6053 tcg_temp_free_i32(tcg_vm); 6054 } 6055 6056 tcg_temp_free_ptr(fpst); 6057 6058 gen_set_nzcv(tcg_flags); 6059 6060 tcg_temp_free_i64(tcg_flags); 6061 } 6062 6063 /* Floating point compare 6064 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0 6065 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ 6066 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 | 6067 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+ 6068 */ 6069 static void disas_fp_compare(DisasContext *s, uint32_t insn) 6070 { 6071 unsigned int mos, type, rm, op, rn, opc, op2r; 6072 int size; 6073 6074 mos = extract32(insn, 29, 3); 6075 type = extract32(insn, 22, 2); 6076 rm = extract32(insn, 16, 5); 6077 op = extract32(insn, 14, 2); 6078 rn = extract32(insn, 5, 5); 6079 opc = extract32(insn, 3, 2); 6080 op2r = extract32(insn, 0, 3); 6081 6082 if (mos || op || op2r) { 6083 unallocated_encoding(s); 6084 return; 6085 } 6086 6087 switch (type) { 6088 case 0: 6089 size = MO_32; 6090 break; 6091 case 1: 6092 size = MO_64; 6093 break; 6094 case 3: 6095 size = MO_16; 6096 if (dc_isar_feature(aa64_fp16, s)) { 6097 break; 6098 } 6099 /* fallthru */ 6100 default: 6101 unallocated_encoding(s); 6102 return; 6103 } 6104 6105 if (!fp_access_check(s)) { 6106 return; 6107 } 6108 6109 handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2); 6110 } 6111 6112 /* Floating point conditional compare 6113 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0 6114 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ 6115 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv | 6116 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+ 6117 */ 6118 static void disas_fp_ccomp(DisasContext *s, uint32_t insn) 6119 { 6120 unsigned int mos, type, rm, cond, rn, op, nzcv; 6121 TCGLabel *label_continue = NULL; 6122 int size; 6123 6124 mos = extract32(insn, 29, 3); 6125 type = extract32(insn, 22, 2); 6126 rm = extract32(insn, 16, 5); 6127 cond = extract32(insn, 12, 4); 6128 rn = extract32(insn, 5, 5); 6129 op = extract32(insn, 4, 1); 6130 nzcv = extract32(insn, 0, 4); 6131 6132 if (mos) { 6133 unallocated_encoding(s); 6134 return; 6135 } 6136 6137 switch (type) { 6138 case 0: 6139 size = MO_32; 6140 break; 6141 case 1: 6142 size = MO_64; 6143 break; 6144 case 3: 6145 size = MO_16; 6146 if (dc_isar_feature(aa64_fp16, s)) { 6147 break; 6148 } 6149 /* fallthru */ 6150 default: 6151 unallocated_encoding(s); 6152 return; 6153 } 6154 6155 if (!fp_access_check(s)) { 6156 return; 6157 } 6158 6159 if (cond < 0x0e) { /* not always */ 6160 TCGLabel *label_match = gen_new_label(); 6161 label_continue = gen_new_label(); 6162 arm_gen_test_cc(cond, label_match); 6163 /* nomatch: */ 6164 gen_set_nzcv(tcg_constant_i64(nzcv << 28)); 6165 tcg_gen_br(label_continue); 6166 gen_set_label(label_match); 6167 } 6168 6169 handle_fp_compare(s, size, rn, rm, false, op); 6170 6171 if (cond < 0x0e) { 6172 gen_set_label(label_continue); 6173 } 6174 } 6175 6176 /* Floating point conditional select 6177 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 6178 * +---+---+---+-----------+------+---+------+------+-----+------+------+ 6179 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd | 6180 * +---+---+---+-----------+------+---+------+------+-----+------+------+ 6181 */ 6182 static void disas_fp_csel(DisasContext *s, uint32_t insn) 6183 { 6184 unsigned int mos, type, rm, cond, rn, rd; 6185 TCGv_i64 t_true, t_false; 6186 DisasCompare64 c; 6187 MemOp sz; 6188 6189 mos = extract32(insn, 29, 3); 6190 type = extract32(insn, 22, 2); 6191 rm = extract32(insn, 16, 5); 6192 cond = extract32(insn, 12, 4); 6193 rn = extract32(insn, 5, 5); 6194 rd = extract32(insn, 0, 5); 6195 6196 if (mos) { 6197 unallocated_encoding(s); 6198 return; 6199 } 6200 6201 switch (type) { 6202 case 0: 6203 sz = MO_32; 6204 break; 6205 case 1: 6206 sz = MO_64; 6207 break; 6208 case 3: 6209 sz = MO_16; 6210 if (dc_isar_feature(aa64_fp16, s)) { 6211 break; 6212 } 6213 /* fallthru */ 6214 default: 6215 unallocated_encoding(s); 6216 return; 6217 } 6218 6219 if (!fp_access_check(s)) { 6220 return; 6221 } 6222 6223 /* Zero extend sreg & hreg inputs to 64 bits now. */ 6224 t_true = tcg_temp_new_i64(); 6225 t_false = tcg_temp_new_i64(); 6226 read_vec_element(s, t_true, rn, 0, sz); 6227 read_vec_element(s, t_false, rm, 0, sz); 6228 6229 a64_test_cc(&c, cond); 6230 tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0), 6231 t_true, t_false); 6232 tcg_temp_free_i64(t_false); 6233 6234 /* Note that sregs & hregs write back zeros to the high bits, 6235 and we've already done the zero-extension. */ 6236 write_fp_dreg(s, rd, t_true); 6237 tcg_temp_free_i64(t_true); 6238 } 6239 6240 /* Floating-point data-processing (1 source) - half precision */ 6241 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn) 6242 { 6243 TCGv_ptr fpst = NULL; 6244 TCGv_i32 tcg_op = read_fp_hreg(s, rn); 6245 TCGv_i32 tcg_res = tcg_temp_new_i32(); 6246 6247 switch (opcode) { 6248 case 0x0: /* FMOV */ 6249 tcg_gen_mov_i32(tcg_res, tcg_op); 6250 break; 6251 case 0x1: /* FABS */ 6252 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff); 6253 break; 6254 case 0x2: /* FNEG */ 6255 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); 6256 break; 6257 case 0x3: /* FSQRT */ 6258 fpst = fpstatus_ptr(FPST_FPCR_F16); 6259 gen_helper_sqrt_f16(tcg_res, tcg_op, fpst); 6260 break; 6261 case 0x8: /* FRINTN */ 6262 case 0x9: /* FRINTP */ 6263 case 0xa: /* FRINTM */ 6264 case 0xb: /* FRINTZ */ 6265 case 0xc: /* FRINTA */ 6266 { 6267 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7)); 6268 fpst = fpstatus_ptr(FPST_FPCR_F16); 6269 6270 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); 6271 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); 6272 6273 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); 6274 tcg_temp_free_i32(tcg_rmode); 6275 break; 6276 } 6277 case 0xe: /* FRINTX */ 6278 fpst = fpstatus_ptr(FPST_FPCR_F16); 6279 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst); 6280 break; 6281 case 0xf: /* FRINTI */ 6282 fpst = fpstatus_ptr(FPST_FPCR_F16); 6283 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst); 6284 break; 6285 default: 6286 g_assert_not_reached(); 6287 } 6288 6289 write_fp_sreg(s, rd, tcg_res); 6290 6291 if (fpst) { 6292 tcg_temp_free_ptr(fpst); 6293 } 6294 tcg_temp_free_i32(tcg_op); 6295 tcg_temp_free_i32(tcg_res); 6296 } 6297 6298 /* Floating-point data-processing (1 source) - single precision */ 6299 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn) 6300 { 6301 void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr); 6302 TCGv_i32 tcg_op, tcg_res; 6303 TCGv_ptr fpst; 6304 int rmode = -1; 6305 6306 tcg_op = read_fp_sreg(s, rn); 6307 tcg_res = tcg_temp_new_i32(); 6308 6309 switch (opcode) { 6310 case 0x0: /* FMOV */ 6311 tcg_gen_mov_i32(tcg_res, tcg_op); 6312 goto done; 6313 case 0x1: /* FABS */ 6314 gen_helper_vfp_abss(tcg_res, tcg_op); 6315 goto done; 6316 case 0x2: /* FNEG */ 6317 gen_helper_vfp_negs(tcg_res, tcg_op); 6318 goto done; 6319 case 0x3: /* FSQRT */ 6320 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env); 6321 goto done; 6322 case 0x6: /* BFCVT */ 6323 gen_fpst = gen_helper_bfcvt; 6324 break; 6325 case 0x8: /* FRINTN */ 6326 case 0x9: /* FRINTP */ 6327 case 0xa: /* FRINTM */ 6328 case 0xb: /* FRINTZ */ 6329 case 0xc: /* FRINTA */ 6330 rmode = arm_rmode_to_sf(opcode & 7); 6331 gen_fpst = gen_helper_rints; 6332 break; 6333 case 0xe: /* FRINTX */ 6334 gen_fpst = gen_helper_rints_exact; 6335 break; 6336 case 0xf: /* FRINTI */ 6337 gen_fpst = gen_helper_rints; 6338 break; 6339 case 0x10: /* FRINT32Z */ 6340 rmode = float_round_to_zero; 6341 gen_fpst = gen_helper_frint32_s; 6342 break; 6343 case 0x11: /* FRINT32X */ 6344 gen_fpst = gen_helper_frint32_s; 6345 break; 6346 case 0x12: /* FRINT64Z */ 6347 rmode = float_round_to_zero; 6348 gen_fpst = gen_helper_frint64_s; 6349 break; 6350 case 0x13: /* FRINT64X */ 6351 gen_fpst = gen_helper_frint64_s; 6352 break; 6353 default: 6354 g_assert_not_reached(); 6355 } 6356 6357 fpst = fpstatus_ptr(FPST_FPCR); 6358 if (rmode >= 0) { 6359 TCGv_i32 tcg_rmode = tcg_const_i32(rmode); 6360 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); 6361 gen_fpst(tcg_res, tcg_op, fpst); 6362 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); 6363 tcg_temp_free_i32(tcg_rmode); 6364 } else { 6365 gen_fpst(tcg_res, tcg_op, fpst); 6366 } 6367 tcg_temp_free_ptr(fpst); 6368 6369 done: 6370 write_fp_sreg(s, rd, tcg_res); 6371 tcg_temp_free_i32(tcg_op); 6372 tcg_temp_free_i32(tcg_res); 6373 } 6374 6375 /* Floating-point data-processing (1 source) - double precision */ 6376 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn) 6377 { 6378 void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr); 6379 TCGv_i64 tcg_op, tcg_res; 6380 TCGv_ptr fpst; 6381 int rmode = -1; 6382 6383 switch (opcode) { 6384 case 0x0: /* FMOV */ 6385 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0); 6386 return; 6387 } 6388 6389 tcg_op = read_fp_dreg(s, rn); 6390 tcg_res = tcg_temp_new_i64(); 6391 6392 switch (opcode) { 6393 case 0x1: /* FABS */ 6394 gen_helper_vfp_absd(tcg_res, tcg_op); 6395 goto done; 6396 case 0x2: /* FNEG */ 6397 gen_helper_vfp_negd(tcg_res, tcg_op); 6398 goto done; 6399 case 0x3: /* FSQRT */ 6400 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env); 6401 goto done; 6402 case 0x8: /* FRINTN */ 6403 case 0x9: /* FRINTP */ 6404 case 0xa: /* FRINTM */ 6405 case 0xb: /* FRINTZ */ 6406 case 0xc: /* FRINTA */ 6407 rmode = arm_rmode_to_sf(opcode & 7); 6408 gen_fpst = gen_helper_rintd; 6409 break; 6410 case 0xe: /* FRINTX */ 6411 gen_fpst = gen_helper_rintd_exact; 6412 break; 6413 case 0xf: /* FRINTI */ 6414 gen_fpst = gen_helper_rintd; 6415 break; 6416 case 0x10: /* FRINT32Z */ 6417 rmode = float_round_to_zero; 6418 gen_fpst = gen_helper_frint32_d; 6419 break; 6420 case 0x11: /* FRINT32X */ 6421 gen_fpst = gen_helper_frint32_d; 6422 break; 6423 case 0x12: /* FRINT64Z */ 6424 rmode = float_round_to_zero; 6425 gen_fpst = gen_helper_frint64_d; 6426 break; 6427 case 0x13: /* FRINT64X */ 6428 gen_fpst = gen_helper_frint64_d; 6429 break; 6430 default: 6431 g_assert_not_reached(); 6432 } 6433 6434 fpst = fpstatus_ptr(FPST_FPCR); 6435 if (rmode >= 0) { 6436 TCGv_i32 tcg_rmode = tcg_const_i32(rmode); 6437 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); 6438 gen_fpst(tcg_res, tcg_op, fpst); 6439 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst); 6440 tcg_temp_free_i32(tcg_rmode); 6441 } else { 6442 gen_fpst(tcg_res, tcg_op, fpst); 6443 } 6444 tcg_temp_free_ptr(fpst); 6445 6446 done: 6447 write_fp_dreg(s, rd, tcg_res); 6448 tcg_temp_free_i64(tcg_op); 6449 tcg_temp_free_i64(tcg_res); 6450 } 6451 6452 static void handle_fp_fcvt(DisasContext *s, int opcode, 6453 int rd, int rn, int dtype, int ntype) 6454 { 6455 switch (ntype) { 6456 case 0x0: 6457 { 6458 TCGv_i32 tcg_rn = read_fp_sreg(s, rn); 6459 if (dtype == 1) { 6460 /* Single to double */ 6461 TCGv_i64 tcg_rd = tcg_temp_new_i64(); 6462 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env); 6463 write_fp_dreg(s, rd, tcg_rd); 6464 tcg_temp_free_i64(tcg_rd); 6465 } else { 6466 /* Single to half */ 6467 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 6468 TCGv_i32 ahp = get_ahp_flag(); 6469 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 6470 6471 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp); 6472 /* write_fp_sreg is OK here because top half of tcg_rd is zero */ 6473 write_fp_sreg(s, rd, tcg_rd); 6474 tcg_temp_free_i32(tcg_rd); 6475 tcg_temp_free_i32(ahp); 6476 tcg_temp_free_ptr(fpst); 6477 } 6478 tcg_temp_free_i32(tcg_rn); 6479 break; 6480 } 6481 case 0x1: 6482 { 6483 TCGv_i64 tcg_rn = read_fp_dreg(s, rn); 6484 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 6485 if (dtype == 0) { 6486 /* Double to single */ 6487 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env); 6488 } else { 6489 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 6490 TCGv_i32 ahp = get_ahp_flag(); 6491 /* Double to half */ 6492 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp); 6493 /* write_fp_sreg is OK here because top half of tcg_rd is zero */ 6494 tcg_temp_free_ptr(fpst); 6495 tcg_temp_free_i32(ahp); 6496 } 6497 write_fp_sreg(s, rd, tcg_rd); 6498 tcg_temp_free_i32(tcg_rd); 6499 tcg_temp_free_i64(tcg_rn); 6500 break; 6501 } 6502 case 0x3: 6503 { 6504 TCGv_i32 tcg_rn = read_fp_sreg(s, rn); 6505 TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR); 6506 TCGv_i32 tcg_ahp = get_ahp_flag(); 6507 tcg_gen_ext16u_i32(tcg_rn, tcg_rn); 6508 if (dtype == 0) { 6509 /* Half to single */ 6510 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 6511 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); 6512 write_fp_sreg(s, rd, tcg_rd); 6513 tcg_temp_free_i32(tcg_rd); 6514 } else { 6515 /* Half to double */ 6516 TCGv_i64 tcg_rd = tcg_temp_new_i64(); 6517 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp); 6518 write_fp_dreg(s, rd, tcg_rd); 6519 tcg_temp_free_i64(tcg_rd); 6520 } 6521 tcg_temp_free_i32(tcg_rn); 6522 tcg_temp_free_ptr(tcg_fpst); 6523 tcg_temp_free_i32(tcg_ahp); 6524 break; 6525 } 6526 default: 6527 g_assert_not_reached(); 6528 } 6529 } 6530 6531 /* Floating point data-processing (1 source) 6532 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0 6533 * +---+---+---+-----------+------+---+--------+-----------+------+------+ 6534 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd | 6535 * +---+---+---+-----------+------+---+--------+-----------+------+------+ 6536 */ 6537 static void disas_fp_1src(DisasContext *s, uint32_t insn) 6538 { 6539 int mos = extract32(insn, 29, 3); 6540 int type = extract32(insn, 22, 2); 6541 int opcode = extract32(insn, 15, 6); 6542 int rn = extract32(insn, 5, 5); 6543 int rd = extract32(insn, 0, 5); 6544 6545 if (mos) { 6546 goto do_unallocated; 6547 } 6548 6549 switch (opcode) { 6550 case 0x4: case 0x5: case 0x7: 6551 { 6552 /* FCVT between half, single and double precision */ 6553 int dtype = extract32(opcode, 0, 2); 6554 if (type == 2 || dtype == type) { 6555 goto do_unallocated; 6556 } 6557 if (!fp_access_check(s)) { 6558 return; 6559 } 6560 6561 handle_fp_fcvt(s, opcode, rd, rn, dtype, type); 6562 break; 6563 } 6564 6565 case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */ 6566 if (type > 1 || !dc_isar_feature(aa64_frint, s)) { 6567 goto do_unallocated; 6568 } 6569 /* fall through */ 6570 case 0x0 ... 0x3: 6571 case 0x8 ... 0xc: 6572 case 0xe ... 0xf: 6573 /* 32-to-32 and 64-to-64 ops */ 6574 switch (type) { 6575 case 0: 6576 if (!fp_access_check(s)) { 6577 return; 6578 } 6579 handle_fp_1src_single(s, opcode, rd, rn); 6580 break; 6581 case 1: 6582 if (!fp_access_check(s)) { 6583 return; 6584 } 6585 handle_fp_1src_double(s, opcode, rd, rn); 6586 break; 6587 case 3: 6588 if (!dc_isar_feature(aa64_fp16, s)) { 6589 goto do_unallocated; 6590 } 6591 6592 if (!fp_access_check(s)) { 6593 return; 6594 } 6595 handle_fp_1src_half(s, opcode, rd, rn); 6596 break; 6597 default: 6598 goto do_unallocated; 6599 } 6600 break; 6601 6602 case 0x6: 6603 switch (type) { 6604 case 1: /* BFCVT */ 6605 if (!dc_isar_feature(aa64_bf16, s)) { 6606 goto do_unallocated; 6607 } 6608 if (!fp_access_check(s)) { 6609 return; 6610 } 6611 handle_fp_1src_single(s, opcode, rd, rn); 6612 break; 6613 default: 6614 goto do_unallocated; 6615 } 6616 break; 6617 6618 default: 6619 do_unallocated: 6620 unallocated_encoding(s); 6621 break; 6622 } 6623 } 6624 6625 /* Floating-point data-processing (2 source) - single precision */ 6626 static void handle_fp_2src_single(DisasContext *s, int opcode, 6627 int rd, int rn, int rm) 6628 { 6629 TCGv_i32 tcg_op1; 6630 TCGv_i32 tcg_op2; 6631 TCGv_i32 tcg_res; 6632 TCGv_ptr fpst; 6633 6634 tcg_res = tcg_temp_new_i32(); 6635 fpst = fpstatus_ptr(FPST_FPCR); 6636 tcg_op1 = read_fp_sreg(s, rn); 6637 tcg_op2 = read_fp_sreg(s, rm); 6638 6639 switch (opcode) { 6640 case 0x0: /* FMUL */ 6641 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst); 6642 break; 6643 case 0x1: /* FDIV */ 6644 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst); 6645 break; 6646 case 0x2: /* FADD */ 6647 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst); 6648 break; 6649 case 0x3: /* FSUB */ 6650 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst); 6651 break; 6652 case 0x4: /* FMAX */ 6653 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst); 6654 break; 6655 case 0x5: /* FMIN */ 6656 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst); 6657 break; 6658 case 0x6: /* FMAXNM */ 6659 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst); 6660 break; 6661 case 0x7: /* FMINNM */ 6662 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst); 6663 break; 6664 case 0x8: /* FNMUL */ 6665 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst); 6666 gen_helper_vfp_negs(tcg_res, tcg_res); 6667 break; 6668 } 6669 6670 write_fp_sreg(s, rd, tcg_res); 6671 6672 tcg_temp_free_ptr(fpst); 6673 tcg_temp_free_i32(tcg_op1); 6674 tcg_temp_free_i32(tcg_op2); 6675 tcg_temp_free_i32(tcg_res); 6676 } 6677 6678 /* Floating-point data-processing (2 source) - double precision */ 6679 static void handle_fp_2src_double(DisasContext *s, int opcode, 6680 int rd, int rn, int rm) 6681 { 6682 TCGv_i64 tcg_op1; 6683 TCGv_i64 tcg_op2; 6684 TCGv_i64 tcg_res; 6685 TCGv_ptr fpst; 6686 6687 tcg_res = tcg_temp_new_i64(); 6688 fpst = fpstatus_ptr(FPST_FPCR); 6689 tcg_op1 = read_fp_dreg(s, rn); 6690 tcg_op2 = read_fp_dreg(s, rm); 6691 6692 switch (opcode) { 6693 case 0x0: /* FMUL */ 6694 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst); 6695 break; 6696 case 0x1: /* FDIV */ 6697 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst); 6698 break; 6699 case 0x2: /* FADD */ 6700 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst); 6701 break; 6702 case 0x3: /* FSUB */ 6703 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst); 6704 break; 6705 case 0x4: /* FMAX */ 6706 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst); 6707 break; 6708 case 0x5: /* FMIN */ 6709 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst); 6710 break; 6711 case 0x6: /* FMAXNM */ 6712 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst); 6713 break; 6714 case 0x7: /* FMINNM */ 6715 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst); 6716 break; 6717 case 0x8: /* FNMUL */ 6718 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst); 6719 gen_helper_vfp_negd(tcg_res, tcg_res); 6720 break; 6721 } 6722 6723 write_fp_dreg(s, rd, tcg_res); 6724 6725 tcg_temp_free_ptr(fpst); 6726 tcg_temp_free_i64(tcg_op1); 6727 tcg_temp_free_i64(tcg_op2); 6728 tcg_temp_free_i64(tcg_res); 6729 } 6730 6731 /* Floating-point data-processing (2 source) - half precision */ 6732 static void handle_fp_2src_half(DisasContext *s, int opcode, 6733 int rd, int rn, int rm) 6734 { 6735 TCGv_i32 tcg_op1; 6736 TCGv_i32 tcg_op2; 6737 TCGv_i32 tcg_res; 6738 TCGv_ptr fpst; 6739 6740 tcg_res = tcg_temp_new_i32(); 6741 fpst = fpstatus_ptr(FPST_FPCR_F16); 6742 tcg_op1 = read_fp_hreg(s, rn); 6743 tcg_op2 = read_fp_hreg(s, rm); 6744 6745 switch (opcode) { 6746 case 0x0: /* FMUL */ 6747 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst); 6748 break; 6749 case 0x1: /* FDIV */ 6750 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst); 6751 break; 6752 case 0x2: /* FADD */ 6753 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst); 6754 break; 6755 case 0x3: /* FSUB */ 6756 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst); 6757 break; 6758 case 0x4: /* FMAX */ 6759 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst); 6760 break; 6761 case 0x5: /* FMIN */ 6762 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst); 6763 break; 6764 case 0x6: /* FMAXNM */ 6765 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst); 6766 break; 6767 case 0x7: /* FMINNM */ 6768 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst); 6769 break; 6770 case 0x8: /* FNMUL */ 6771 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst); 6772 tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000); 6773 break; 6774 default: 6775 g_assert_not_reached(); 6776 } 6777 6778 write_fp_sreg(s, rd, tcg_res); 6779 6780 tcg_temp_free_ptr(fpst); 6781 tcg_temp_free_i32(tcg_op1); 6782 tcg_temp_free_i32(tcg_op2); 6783 tcg_temp_free_i32(tcg_res); 6784 } 6785 6786 /* Floating point data-processing (2 source) 6787 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 6788 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ 6789 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd | 6790 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ 6791 */ 6792 static void disas_fp_2src(DisasContext *s, uint32_t insn) 6793 { 6794 int mos = extract32(insn, 29, 3); 6795 int type = extract32(insn, 22, 2); 6796 int rd = extract32(insn, 0, 5); 6797 int rn = extract32(insn, 5, 5); 6798 int rm = extract32(insn, 16, 5); 6799 int opcode = extract32(insn, 12, 4); 6800 6801 if (opcode > 8 || mos) { 6802 unallocated_encoding(s); 6803 return; 6804 } 6805 6806 switch (type) { 6807 case 0: 6808 if (!fp_access_check(s)) { 6809 return; 6810 } 6811 handle_fp_2src_single(s, opcode, rd, rn, rm); 6812 break; 6813 case 1: 6814 if (!fp_access_check(s)) { 6815 return; 6816 } 6817 handle_fp_2src_double(s, opcode, rd, rn, rm); 6818 break; 6819 case 3: 6820 if (!dc_isar_feature(aa64_fp16, s)) { 6821 unallocated_encoding(s); 6822 return; 6823 } 6824 if (!fp_access_check(s)) { 6825 return; 6826 } 6827 handle_fp_2src_half(s, opcode, rd, rn, rm); 6828 break; 6829 default: 6830 unallocated_encoding(s); 6831 } 6832 } 6833 6834 /* Floating-point data-processing (3 source) - single precision */ 6835 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1, 6836 int rd, int rn, int rm, int ra) 6837 { 6838 TCGv_i32 tcg_op1, tcg_op2, tcg_op3; 6839 TCGv_i32 tcg_res = tcg_temp_new_i32(); 6840 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 6841 6842 tcg_op1 = read_fp_sreg(s, rn); 6843 tcg_op2 = read_fp_sreg(s, rm); 6844 tcg_op3 = read_fp_sreg(s, ra); 6845 6846 /* These are fused multiply-add, and must be done as one 6847 * floating point operation with no rounding between the 6848 * multiplication and addition steps. 6849 * NB that doing the negations here as separate steps is 6850 * correct : an input NaN should come out with its sign bit 6851 * flipped if it is a negated-input. 6852 */ 6853 if (o1 == true) { 6854 gen_helper_vfp_negs(tcg_op3, tcg_op3); 6855 } 6856 6857 if (o0 != o1) { 6858 gen_helper_vfp_negs(tcg_op1, tcg_op1); 6859 } 6860 6861 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); 6862 6863 write_fp_sreg(s, rd, tcg_res); 6864 6865 tcg_temp_free_ptr(fpst); 6866 tcg_temp_free_i32(tcg_op1); 6867 tcg_temp_free_i32(tcg_op2); 6868 tcg_temp_free_i32(tcg_op3); 6869 tcg_temp_free_i32(tcg_res); 6870 } 6871 6872 /* Floating-point data-processing (3 source) - double precision */ 6873 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1, 6874 int rd, int rn, int rm, int ra) 6875 { 6876 TCGv_i64 tcg_op1, tcg_op2, tcg_op3; 6877 TCGv_i64 tcg_res = tcg_temp_new_i64(); 6878 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 6879 6880 tcg_op1 = read_fp_dreg(s, rn); 6881 tcg_op2 = read_fp_dreg(s, rm); 6882 tcg_op3 = read_fp_dreg(s, ra); 6883 6884 /* These are fused multiply-add, and must be done as one 6885 * floating point operation with no rounding between the 6886 * multiplication and addition steps. 6887 * NB that doing the negations here as separate steps is 6888 * correct : an input NaN should come out with its sign bit 6889 * flipped if it is a negated-input. 6890 */ 6891 if (o1 == true) { 6892 gen_helper_vfp_negd(tcg_op3, tcg_op3); 6893 } 6894 6895 if (o0 != o1) { 6896 gen_helper_vfp_negd(tcg_op1, tcg_op1); 6897 } 6898 6899 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); 6900 6901 write_fp_dreg(s, rd, tcg_res); 6902 6903 tcg_temp_free_ptr(fpst); 6904 tcg_temp_free_i64(tcg_op1); 6905 tcg_temp_free_i64(tcg_op2); 6906 tcg_temp_free_i64(tcg_op3); 6907 tcg_temp_free_i64(tcg_res); 6908 } 6909 6910 /* Floating-point data-processing (3 source) - half precision */ 6911 static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1, 6912 int rd, int rn, int rm, int ra) 6913 { 6914 TCGv_i32 tcg_op1, tcg_op2, tcg_op3; 6915 TCGv_i32 tcg_res = tcg_temp_new_i32(); 6916 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16); 6917 6918 tcg_op1 = read_fp_hreg(s, rn); 6919 tcg_op2 = read_fp_hreg(s, rm); 6920 tcg_op3 = read_fp_hreg(s, ra); 6921 6922 /* These are fused multiply-add, and must be done as one 6923 * floating point operation with no rounding between the 6924 * multiplication and addition steps. 6925 * NB that doing the negations here as separate steps is 6926 * correct : an input NaN should come out with its sign bit 6927 * flipped if it is a negated-input. 6928 */ 6929 if (o1 == true) { 6930 tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000); 6931 } 6932 6933 if (o0 != o1) { 6934 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000); 6935 } 6936 6937 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst); 6938 6939 write_fp_sreg(s, rd, tcg_res); 6940 6941 tcg_temp_free_ptr(fpst); 6942 tcg_temp_free_i32(tcg_op1); 6943 tcg_temp_free_i32(tcg_op2); 6944 tcg_temp_free_i32(tcg_op3); 6945 tcg_temp_free_i32(tcg_res); 6946 } 6947 6948 /* Floating point data-processing (3 source) 6949 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0 6950 * +---+---+---+-----------+------+----+------+----+------+------+------+ 6951 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd | 6952 * +---+---+---+-----------+------+----+------+----+------+------+------+ 6953 */ 6954 static void disas_fp_3src(DisasContext *s, uint32_t insn) 6955 { 6956 int mos = extract32(insn, 29, 3); 6957 int type = extract32(insn, 22, 2); 6958 int rd = extract32(insn, 0, 5); 6959 int rn = extract32(insn, 5, 5); 6960 int ra = extract32(insn, 10, 5); 6961 int rm = extract32(insn, 16, 5); 6962 bool o0 = extract32(insn, 15, 1); 6963 bool o1 = extract32(insn, 21, 1); 6964 6965 if (mos) { 6966 unallocated_encoding(s); 6967 return; 6968 } 6969 6970 switch (type) { 6971 case 0: 6972 if (!fp_access_check(s)) { 6973 return; 6974 } 6975 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra); 6976 break; 6977 case 1: 6978 if (!fp_access_check(s)) { 6979 return; 6980 } 6981 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra); 6982 break; 6983 case 3: 6984 if (!dc_isar_feature(aa64_fp16, s)) { 6985 unallocated_encoding(s); 6986 return; 6987 } 6988 if (!fp_access_check(s)) { 6989 return; 6990 } 6991 handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra); 6992 break; 6993 default: 6994 unallocated_encoding(s); 6995 } 6996 } 6997 6998 /* Floating point immediate 6999 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0 7000 * +---+---+---+-----------+------+---+------------+-------+------+------+ 7001 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd | 7002 * +---+---+---+-----------+------+---+------------+-------+------+------+ 7003 */ 7004 static void disas_fp_imm(DisasContext *s, uint32_t insn) 7005 { 7006 int rd = extract32(insn, 0, 5); 7007 int imm5 = extract32(insn, 5, 5); 7008 int imm8 = extract32(insn, 13, 8); 7009 int type = extract32(insn, 22, 2); 7010 int mos = extract32(insn, 29, 3); 7011 uint64_t imm; 7012 MemOp sz; 7013 7014 if (mos || imm5) { 7015 unallocated_encoding(s); 7016 return; 7017 } 7018 7019 switch (type) { 7020 case 0: 7021 sz = MO_32; 7022 break; 7023 case 1: 7024 sz = MO_64; 7025 break; 7026 case 3: 7027 sz = MO_16; 7028 if (dc_isar_feature(aa64_fp16, s)) { 7029 break; 7030 } 7031 /* fallthru */ 7032 default: 7033 unallocated_encoding(s); 7034 return; 7035 } 7036 7037 if (!fp_access_check(s)) { 7038 return; 7039 } 7040 7041 imm = vfp_expand_imm(sz, imm8); 7042 write_fp_dreg(s, rd, tcg_constant_i64(imm)); 7043 } 7044 7045 /* Handle floating point <=> fixed point conversions. Note that we can 7046 * also deal with fp <=> integer conversions as a special case (scale == 64) 7047 * OPTME: consider handling that special case specially or at least skipping 7048 * the call to scalbn in the helpers for zero shifts. 7049 */ 7050 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode, 7051 bool itof, int rmode, int scale, int sf, int type) 7052 { 7053 bool is_signed = !(opcode & 1); 7054 TCGv_ptr tcg_fpstatus; 7055 TCGv_i32 tcg_shift, tcg_single; 7056 TCGv_i64 tcg_double; 7057 7058 tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR); 7059 7060 tcg_shift = tcg_constant_i32(64 - scale); 7061 7062 if (itof) { 7063 TCGv_i64 tcg_int = cpu_reg(s, rn); 7064 if (!sf) { 7065 TCGv_i64 tcg_extend = new_tmp_a64(s); 7066 7067 if (is_signed) { 7068 tcg_gen_ext32s_i64(tcg_extend, tcg_int); 7069 } else { 7070 tcg_gen_ext32u_i64(tcg_extend, tcg_int); 7071 } 7072 7073 tcg_int = tcg_extend; 7074 } 7075 7076 switch (type) { 7077 case 1: /* float64 */ 7078 tcg_double = tcg_temp_new_i64(); 7079 if (is_signed) { 7080 gen_helper_vfp_sqtod(tcg_double, tcg_int, 7081 tcg_shift, tcg_fpstatus); 7082 } else { 7083 gen_helper_vfp_uqtod(tcg_double, tcg_int, 7084 tcg_shift, tcg_fpstatus); 7085 } 7086 write_fp_dreg(s, rd, tcg_double); 7087 tcg_temp_free_i64(tcg_double); 7088 break; 7089 7090 case 0: /* float32 */ 7091 tcg_single = tcg_temp_new_i32(); 7092 if (is_signed) { 7093 gen_helper_vfp_sqtos(tcg_single, tcg_int, 7094 tcg_shift, tcg_fpstatus); 7095 } else { 7096 gen_helper_vfp_uqtos(tcg_single, tcg_int, 7097 tcg_shift, tcg_fpstatus); 7098 } 7099 write_fp_sreg(s, rd, tcg_single); 7100 tcg_temp_free_i32(tcg_single); 7101 break; 7102 7103 case 3: /* float16 */ 7104 tcg_single = tcg_temp_new_i32(); 7105 if (is_signed) { 7106 gen_helper_vfp_sqtoh(tcg_single, tcg_int, 7107 tcg_shift, tcg_fpstatus); 7108 } else { 7109 gen_helper_vfp_uqtoh(tcg_single, tcg_int, 7110 tcg_shift, tcg_fpstatus); 7111 } 7112 write_fp_sreg(s, rd, tcg_single); 7113 tcg_temp_free_i32(tcg_single); 7114 break; 7115 7116 default: 7117 g_assert_not_reached(); 7118 } 7119 } else { 7120 TCGv_i64 tcg_int = cpu_reg(s, rd); 7121 TCGv_i32 tcg_rmode; 7122 7123 if (extract32(opcode, 2, 1)) { 7124 /* There are too many rounding modes to all fit into rmode, 7125 * so FCVTA[US] is a special case. 7126 */ 7127 rmode = FPROUNDING_TIEAWAY; 7128 } 7129 7130 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); 7131 7132 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 7133 7134 switch (type) { 7135 case 1: /* float64 */ 7136 tcg_double = read_fp_dreg(s, rn); 7137 if (is_signed) { 7138 if (!sf) { 7139 gen_helper_vfp_tosld(tcg_int, tcg_double, 7140 tcg_shift, tcg_fpstatus); 7141 } else { 7142 gen_helper_vfp_tosqd(tcg_int, tcg_double, 7143 tcg_shift, tcg_fpstatus); 7144 } 7145 } else { 7146 if (!sf) { 7147 gen_helper_vfp_tould(tcg_int, tcg_double, 7148 tcg_shift, tcg_fpstatus); 7149 } else { 7150 gen_helper_vfp_touqd(tcg_int, tcg_double, 7151 tcg_shift, tcg_fpstatus); 7152 } 7153 } 7154 if (!sf) { 7155 tcg_gen_ext32u_i64(tcg_int, tcg_int); 7156 } 7157 tcg_temp_free_i64(tcg_double); 7158 break; 7159 7160 case 0: /* float32 */ 7161 tcg_single = read_fp_sreg(s, rn); 7162 if (sf) { 7163 if (is_signed) { 7164 gen_helper_vfp_tosqs(tcg_int, tcg_single, 7165 tcg_shift, tcg_fpstatus); 7166 } else { 7167 gen_helper_vfp_touqs(tcg_int, tcg_single, 7168 tcg_shift, tcg_fpstatus); 7169 } 7170 } else { 7171 TCGv_i32 tcg_dest = tcg_temp_new_i32(); 7172 if (is_signed) { 7173 gen_helper_vfp_tosls(tcg_dest, tcg_single, 7174 tcg_shift, tcg_fpstatus); 7175 } else { 7176 gen_helper_vfp_touls(tcg_dest, tcg_single, 7177 tcg_shift, tcg_fpstatus); 7178 } 7179 tcg_gen_extu_i32_i64(tcg_int, tcg_dest); 7180 tcg_temp_free_i32(tcg_dest); 7181 } 7182 tcg_temp_free_i32(tcg_single); 7183 break; 7184 7185 case 3: /* float16 */ 7186 tcg_single = read_fp_sreg(s, rn); 7187 if (sf) { 7188 if (is_signed) { 7189 gen_helper_vfp_tosqh(tcg_int, tcg_single, 7190 tcg_shift, tcg_fpstatus); 7191 } else { 7192 gen_helper_vfp_touqh(tcg_int, tcg_single, 7193 tcg_shift, tcg_fpstatus); 7194 } 7195 } else { 7196 TCGv_i32 tcg_dest = tcg_temp_new_i32(); 7197 if (is_signed) { 7198 gen_helper_vfp_toslh(tcg_dest, tcg_single, 7199 tcg_shift, tcg_fpstatus); 7200 } else { 7201 gen_helper_vfp_toulh(tcg_dest, tcg_single, 7202 tcg_shift, tcg_fpstatus); 7203 } 7204 tcg_gen_extu_i32_i64(tcg_int, tcg_dest); 7205 tcg_temp_free_i32(tcg_dest); 7206 } 7207 tcg_temp_free_i32(tcg_single); 7208 break; 7209 7210 default: 7211 g_assert_not_reached(); 7212 } 7213 7214 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 7215 tcg_temp_free_i32(tcg_rmode); 7216 } 7217 7218 tcg_temp_free_ptr(tcg_fpstatus); 7219 } 7220 7221 /* Floating point <-> fixed point conversions 7222 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 7223 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ 7224 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd | 7225 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+ 7226 */ 7227 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn) 7228 { 7229 int rd = extract32(insn, 0, 5); 7230 int rn = extract32(insn, 5, 5); 7231 int scale = extract32(insn, 10, 6); 7232 int opcode = extract32(insn, 16, 3); 7233 int rmode = extract32(insn, 19, 2); 7234 int type = extract32(insn, 22, 2); 7235 bool sbit = extract32(insn, 29, 1); 7236 bool sf = extract32(insn, 31, 1); 7237 bool itof; 7238 7239 if (sbit || (!sf && scale < 32)) { 7240 unallocated_encoding(s); 7241 return; 7242 } 7243 7244 switch (type) { 7245 case 0: /* float32 */ 7246 case 1: /* float64 */ 7247 break; 7248 case 3: /* float16 */ 7249 if (dc_isar_feature(aa64_fp16, s)) { 7250 break; 7251 } 7252 /* fallthru */ 7253 default: 7254 unallocated_encoding(s); 7255 return; 7256 } 7257 7258 switch ((rmode << 3) | opcode) { 7259 case 0x2: /* SCVTF */ 7260 case 0x3: /* UCVTF */ 7261 itof = true; 7262 break; 7263 case 0x18: /* FCVTZS */ 7264 case 0x19: /* FCVTZU */ 7265 itof = false; 7266 break; 7267 default: 7268 unallocated_encoding(s); 7269 return; 7270 } 7271 7272 if (!fp_access_check(s)) { 7273 return; 7274 } 7275 7276 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type); 7277 } 7278 7279 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof) 7280 { 7281 /* FMOV: gpr to or from float, double, or top half of quad fp reg, 7282 * without conversion. 7283 */ 7284 7285 if (itof) { 7286 TCGv_i64 tcg_rn = cpu_reg(s, rn); 7287 TCGv_i64 tmp; 7288 7289 switch (type) { 7290 case 0: 7291 /* 32 bit */ 7292 tmp = tcg_temp_new_i64(); 7293 tcg_gen_ext32u_i64(tmp, tcg_rn); 7294 write_fp_dreg(s, rd, tmp); 7295 tcg_temp_free_i64(tmp); 7296 break; 7297 case 1: 7298 /* 64 bit */ 7299 write_fp_dreg(s, rd, tcg_rn); 7300 break; 7301 case 2: 7302 /* 64 bit to top half. */ 7303 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd)); 7304 clear_vec_high(s, true, rd); 7305 break; 7306 case 3: 7307 /* 16 bit */ 7308 tmp = tcg_temp_new_i64(); 7309 tcg_gen_ext16u_i64(tmp, tcg_rn); 7310 write_fp_dreg(s, rd, tmp); 7311 tcg_temp_free_i64(tmp); 7312 break; 7313 default: 7314 g_assert_not_reached(); 7315 } 7316 } else { 7317 TCGv_i64 tcg_rd = cpu_reg(s, rd); 7318 7319 switch (type) { 7320 case 0: 7321 /* 32 bit */ 7322 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32)); 7323 break; 7324 case 1: 7325 /* 64 bit */ 7326 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64)); 7327 break; 7328 case 2: 7329 /* 64 bits from top half */ 7330 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn)); 7331 break; 7332 case 3: 7333 /* 16 bit */ 7334 tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16)); 7335 break; 7336 default: 7337 g_assert_not_reached(); 7338 } 7339 } 7340 } 7341 7342 static void handle_fjcvtzs(DisasContext *s, int rd, int rn) 7343 { 7344 TCGv_i64 t = read_fp_dreg(s, rn); 7345 TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR); 7346 7347 gen_helper_fjcvtzs(t, t, fpstatus); 7348 7349 tcg_temp_free_ptr(fpstatus); 7350 7351 tcg_gen_ext32u_i64(cpu_reg(s, rd), t); 7352 tcg_gen_extrh_i64_i32(cpu_ZF, t); 7353 tcg_gen_movi_i32(cpu_CF, 0); 7354 tcg_gen_movi_i32(cpu_NF, 0); 7355 tcg_gen_movi_i32(cpu_VF, 0); 7356 7357 tcg_temp_free_i64(t); 7358 } 7359 7360 /* Floating point <-> integer conversions 7361 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0 7362 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ 7363 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd | 7364 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+ 7365 */ 7366 static void disas_fp_int_conv(DisasContext *s, uint32_t insn) 7367 { 7368 int rd = extract32(insn, 0, 5); 7369 int rn = extract32(insn, 5, 5); 7370 int opcode = extract32(insn, 16, 3); 7371 int rmode = extract32(insn, 19, 2); 7372 int type = extract32(insn, 22, 2); 7373 bool sbit = extract32(insn, 29, 1); 7374 bool sf = extract32(insn, 31, 1); 7375 bool itof = false; 7376 7377 if (sbit) { 7378 goto do_unallocated; 7379 } 7380 7381 switch (opcode) { 7382 case 2: /* SCVTF */ 7383 case 3: /* UCVTF */ 7384 itof = true; 7385 /* fallthru */ 7386 case 4: /* FCVTAS */ 7387 case 5: /* FCVTAU */ 7388 if (rmode != 0) { 7389 goto do_unallocated; 7390 } 7391 /* fallthru */ 7392 case 0: /* FCVT[NPMZ]S */ 7393 case 1: /* FCVT[NPMZ]U */ 7394 switch (type) { 7395 case 0: /* float32 */ 7396 case 1: /* float64 */ 7397 break; 7398 case 3: /* float16 */ 7399 if (!dc_isar_feature(aa64_fp16, s)) { 7400 goto do_unallocated; 7401 } 7402 break; 7403 default: 7404 goto do_unallocated; 7405 } 7406 if (!fp_access_check(s)) { 7407 return; 7408 } 7409 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type); 7410 break; 7411 7412 default: 7413 switch (sf << 7 | type << 5 | rmode << 3 | opcode) { 7414 case 0b01100110: /* FMOV half <-> 32-bit int */ 7415 case 0b01100111: 7416 case 0b11100110: /* FMOV half <-> 64-bit int */ 7417 case 0b11100111: 7418 if (!dc_isar_feature(aa64_fp16, s)) { 7419 goto do_unallocated; 7420 } 7421 /* fallthru */ 7422 case 0b00000110: /* FMOV 32-bit */ 7423 case 0b00000111: 7424 case 0b10100110: /* FMOV 64-bit */ 7425 case 0b10100111: 7426 case 0b11001110: /* FMOV top half of 128-bit */ 7427 case 0b11001111: 7428 if (!fp_access_check(s)) { 7429 return; 7430 } 7431 itof = opcode & 1; 7432 handle_fmov(s, rd, rn, type, itof); 7433 break; 7434 7435 case 0b00111110: /* FJCVTZS */ 7436 if (!dc_isar_feature(aa64_jscvt, s)) { 7437 goto do_unallocated; 7438 } else if (fp_access_check(s)) { 7439 handle_fjcvtzs(s, rd, rn); 7440 } 7441 break; 7442 7443 default: 7444 do_unallocated: 7445 unallocated_encoding(s); 7446 return; 7447 } 7448 break; 7449 } 7450 } 7451 7452 /* FP-specific subcases of table C3-6 (SIMD and FP data processing) 7453 * 31 30 29 28 25 24 0 7454 * +---+---+---+---------+-----------------------------+ 7455 * | | 0 | | 1 1 1 1 | | 7456 * +---+---+---+---------+-----------------------------+ 7457 */ 7458 static void disas_data_proc_fp(DisasContext *s, uint32_t insn) 7459 { 7460 if (extract32(insn, 24, 1)) { 7461 /* Floating point data-processing (3 source) */ 7462 disas_fp_3src(s, insn); 7463 } else if (extract32(insn, 21, 1) == 0) { 7464 /* Floating point to fixed point conversions */ 7465 disas_fp_fixed_conv(s, insn); 7466 } else { 7467 switch (extract32(insn, 10, 2)) { 7468 case 1: 7469 /* Floating point conditional compare */ 7470 disas_fp_ccomp(s, insn); 7471 break; 7472 case 2: 7473 /* Floating point data-processing (2 source) */ 7474 disas_fp_2src(s, insn); 7475 break; 7476 case 3: 7477 /* Floating point conditional select */ 7478 disas_fp_csel(s, insn); 7479 break; 7480 case 0: 7481 switch (ctz32(extract32(insn, 12, 4))) { 7482 case 0: /* [15:12] == xxx1 */ 7483 /* Floating point immediate */ 7484 disas_fp_imm(s, insn); 7485 break; 7486 case 1: /* [15:12] == xx10 */ 7487 /* Floating point compare */ 7488 disas_fp_compare(s, insn); 7489 break; 7490 case 2: /* [15:12] == x100 */ 7491 /* Floating point data-processing (1 source) */ 7492 disas_fp_1src(s, insn); 7493 break; 7494 case 3: /* [15:12] == 1000 */ 7495 unallocated_encoding(s); 7496 break; 7497 default: /* [15:12] == 0000 */ 7498 /* Floating point <-> integer conversions */ 7499 disas_fp_int_conv(s, insn); 7500 break; 7501 } 7502 break; 7503 } 7504 } 7505 } 7506 7507 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right, 7508 int pos) 7509 { 7510 /* Extract 64 bits from the middle of two concatenated 64 bit 7511 * vector register slices left:right. The extracted bits start 7512 * at 'pos' bits into the right (least significant) side. 7513 * We return the result in tcg_right, and guarantee not to 7514 * trash tcg_left. 7515 */ 7516 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 7517 assert(pos > 0 && pos < 64); 7518 7519 tcg_gen_shri_i64(tcg_right, tcg_right, pos); 7520 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos); 7521 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp); 7522 7523 tcg_temp_free_i64(tcg_tmp); 7524 } 7525 7526 /* EXT 7527 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0 7528 * +---+---+-------------+-----+---+------+---+------+---+------+------+ 7529 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd | 7530 * +---+---+-------------+-----+---+------+---+------+---+------+------+ 7531 */ 7532 static void disas_simd_ext(DisasContext *s, uint32_t insn) 7533 { 7534 int is_q = extract32(insn, 30, 1); 7535 int op2 = extract32(insn, 22, 2); 7536 int imm4 = extract32(insn, 11, 4); 7537 int rm = extract32(insn, 16, 5); 7538 int rn = extract32(insn, 5, 5); 7539 int rd = extract32(insn, 0, 5); 7540 int pos = imm4 << 3; 7541 TCGv_i64 tcg_resl, tcg_resh; 7542 7543 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) { 7544 unallocated_encoding(s); 7545 return; 7546 } 7547 7548 if (!fp_access_check(s)) { 7549 return; 7550 } 7551 7552 tcg_resh = tcg_temp_new_i64(); 7553 tcg_resl = tcg_temp_new_i64(); 7554 7555 /* Vd gets bits starting at pos bits into Vm:Vn. This is 7556 * either extracting 128 bits from a 128:128 concatenation, or 7557 * extracting 64 bits from a 64:64 concatenation. 7558 */ 7559 if (!is_q) { 7560 read_vec_element(s, tcg_resl, rn, 0, MO_64); 7561 if (pos != 0) { 7562 read_vec_element(s, tcg_resh, rm, 0, MO_64); 7563 do_ext64(s, tcg_resh, tcg_resl, pos); 7564 } 7565 } else { 7566 TCGv_i64 tcg_hh; 7567 typedef struct { 7568 int reg; 7569 int elt; 7570 } EltPosns; 7571 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} }; 7572 EltPosns *elt = eltposns; 7573 7574 if (pos >= 64) { 7575 elt++; 7576 pos -= 64; 7577 } 7578 7579 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64); 7580 elt++; 7581 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64); 7582 elt++; 7583 if (pos != 0) { 7584 do_ext64(s, tcg_resh, tcg_resl, pos); 7585 tcg_hh = tcg_temp_new_i64(); 7586 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64); 7587 do_ext64(s, tcg_hh, tcg_resh, pos); 7588 tcg_temp_free_i64(tcg_hh); 7589 } 7590 } 7591 7592 write_vec_element(s, tcg_resl, rd, 0, MO_64); 7593 tcg_temp_free_i64(tcg_resl); 7594 if (is_q) { 7595 write_vec_element(s, tcg_resh, rd, 1, MO_64); 7596 } 7597 tcg_temp_free_i64(tcg_resh); 7598 clear_vec_high(s, is_q, rd); 7599 } 7600 7601 /* TBL/TBX 7602 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0 7603 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ 7604 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd | 7605 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+ 7606 */ 7607 static void disas_simd_tb(DisasContext *s, uint32_t insn) 7608 { 7609 int op2 = extract32(insn, 22, 2); 7610 int is_q = extract32(insn, 30, 1); 7611 int rm = extract32(insn, 16, 5); 7612 int rn = extract32(insn, 5, 5); 7613 int rd = extract32(insn, 0, 5); 7614 int is_tbx = extract32(insn, 12, 1); 7615 int len = (extract32(insn, 13, 2) + 1) * 16; 7616 7617 if (op2 != 0) { 7618 unallocated_encoding(s); 7619 return; 7620 } 7621 7622 if (!fp_access_check(s)) { 7623 return; 7624 } 7625 7626 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd), 7627 vec_full_reg_offset(s, rm), cpu_env, 7628 is_q ? 16 : 8, vec_full_reg_size(s), 7629 (len << 6) | (is_tbx << 5) | rn, 7630 gen_helper_simd_tblx); 7631 } 7632 7633 /* ZIP/UZP/TRN 7634 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 7635 * +---+---+-------------+------+---+------+---+------------------+------+ 7636 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd | 7637 * +---+---+-------------+------+---+------+---+------------------+------+ 7638 */ 7639 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) 7640 { 7641 int rd = extract32(insn, 0, 5); 7642 int rn = extract32(insn, 5, 5); 7643 int rm = extract32(insn, 16, 5); 7644 int size = extract32(insn, 22, 2); 7645 /* opc field bits [1:0] indicate ZIP/UZP/TRN; 7646 * bit 2 indicates 1 vs 2 variant of the insn. 7647 */ 7648 int opcode = extract32(insn, 12, 2); 7649 bool part = extract32(insn, 14, 1); 7650 bool is_q = extract32(insn, 30, 1); 7651 int esize = 8 << size; 7652 int i, ofs; 7653 int datasize = is_q ? 128 : 64; 7654 int elements = datasize / esize; 7655 TCGv_i64 tcg_res, tcg_resl, tcg_resh; 7656 7657 if (opcode == 0 || (size == 3 && !is_q)) { 7658 unallocated_encoding(s); 7659 return; 7660 } 7661 7662 if (!fp_access_check(s)) { 7663 return; 7664 } 7665 7666 tcg_resl = tcg_const_i64(0); 7667 tcg_resh = is_q ? tcg_const_i64(0) : NULL; 7668 tcg_res = tcg_temp_new_i64(); 7669 7670 for (i = 0; i < elements; i++) { 7671 switch (opcode) { 7672 case 1: /* UZP1/2 */ 7673 { 7674 int midpoint = elements / 2; 7675 if (i < midpoint) { 7676 read_vec_element(s, tcg_res, rn, 2 * i + part, size); 7677 } else { 7678 read_vec_element(s, tcg_res, rm, 7679 2 * (i - midpoint) + part, size); 7680 } 7681 break; 7682 } 7683 case 2: /* TRN1/2 */ 7684 if (i & 1) { 7685 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size); 7686 } else { 7687 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size); 7688 } 7689 break; 7690 case 3: /* ZIP1/2 */ 7691 { 7692 int base = part * elements / 2; 7693 if (i & 1) { 7694 read_vec_element(s, tcg_res, rm, base + (i >> 1), size); 7695 } else { 7696 read_vec_element(s, tcg_res, rn, base + (i >> 1), size); 7697 } 7698 break; 7699 } 7700 default: 7701 g_assert_not_reached(); 7702 } 7703 7704 ofs = i * esize; 7705 if (ofs < 64) { 7706 tcg_gen_shli_i64(tcg_res, tcg_res, ofs); 7707 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res); 7708 } else { 7709 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64); 7710 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res); 7711 } 7712 } 7713 7714 tcg_temp_free_i64(tcg_res); 7715 7716 write_vec_element(s, tcg_resl, rd, 0, MO_64); 7717 tcg_temp_free_i64(tcg_resl); 7718 7719 if (is_q) { 7720 write_vec_element(s, tcg_resh, rd, 1, MO_64); 7721 tcg_temp_free_i64(tcg_resh); 7722 } 7723 clear_vec_high(s, is_q, rd); 7724 } 7725 7726 /* 7727 * do_reduction_op helper 7728 * 7729 * This mirrors the Reduce() pseudocode in the ARM ARM. It is 7730 * important for correct NaN propagation that we do these 7731 * operations in exactly the order specified by the pseudocode. 7732 * 7733 * This is a recursive function, TCG temps should be freed by the 7734 * calling function once it is done with the values. 7735 */ 7736 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn, 7737 int esize, int size, int vmap, TCGv_ptr fpst) 7738 { 7739 if (esize == size) { 7740 int element; 7741 MemOp msize = esize == 16 ? MO_16 : MO_32; 7742 TCGv_i32 tcg_elem; 7743 7744 /* We should have one register left here */ 7745 assert(ctpop8(vmap) == 1); 7746 element = ctz32(vmap); 7747 assert(element < 8); 7748 7749 tcg_elem = tcg_temp_new_i32(); 7750 read_vec_element_i32(s, tcg_elem, rn, element, msize); 7751 return tcg_elem; 7752 } else { 7753 int bits = size / 2; 7754 int shift = ctpop8(vmap) / 2; 7755 int vmap_lo = (vmap >> shift) & vmap; 7756 int vmap_hi = (vmap & ~vmap_lo); 7757 TCGv_i32 tcg_hi, tcg_lo, tcg_res; 7758 7759 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst); 7760 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst); 7761 tcg_res = tcg_temp_new_i32(); 7762 7763 switch (fpopcode) { 7764 case 0x0c: /* fmaxnmv half-precision */ 7765 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst); 7766 break; 7767 case 0x0f: /* fmaxv half-precision */ 7768 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst); 7769 break; 7770 case 0x1c: /* fminnmv half-precision */ 7771 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst); 7772 break; 7773 case 0x1f: /* fminv half-precision */ 7774 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst); 7775 break; 7776 case 0x2c: /* fmaxnmv */ 7777 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst); 7778 break; 7779 case 0x2f: /* fmaxv */ 7780 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst); 7781 break; 7782 case 0x3c: /* fminnmv */ 7783 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst); 7784 break; 7785 case 0x3f: /* fminv */ 7786 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst); 7787 break; 7788 default: 7789 g_assert_not_reached(); 7790 } 7791 7792 tcg_temp_free_i32(tcg_hi); 7793 tcg_temp_free_i32(tcg_lo); 7794 return tcg_res; 7795 } 7796 } 7797 7798 /* AdvSIMD across lanes 7799 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 7800 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ 7801 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | 7802 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ 7803 */ 7804 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) 7805 { 7806 int rd = extract32(insn, 0, 5); 7807 int rn = extract32(insn, 5, 5); 7808 int size = extract32(insn, 22, 2); 7809 int opcode = extract32(insn, 12, 5); 7810 bool is_q = extract32(insn, 30, 1); 7811 bool is_u = extract32(insn, 29, 1); 7812 bool is_fp = false; 7813 bool is_min = false; 7814 int esize; 7815 int elements; 7816 int i; 7817 TCGv_i64 tcg_res, tcg_elt; 7818 7819 switch (opcode) { 7820 case 0x1b: /* ADDV */ 7821 if (is_u) { 7822 unallocated_encoding(s); 7823 return; 7824 } 7825 /* fall through */ 7826 case 0x3: /* SADDLV, UADDLV */ 7827 case 0xa: /* SMAXV, UMAXV */ 7828 case 0x1a: /* SMINV, UMINV */ 7829 if (size == 3 || (size == 2 && !is_q)) { 7830 unallocated_encoding(s); 7831 return; 7832 } 7833 break; 7834 case 0xc: /* FMAXNMV, FMINNMV */ 7835 case 0xf: /* FMAXV, FMINV */ 7836 /* Bit 1 of size field encodes min vs max and the actual size 7837 * depends on the encoding of the U bit. If not set (and FP16 7838 * enabled) then we do half-precision float instead of single 7839 * precision. 7840 */ 7841 is_min = extract32(size, 1, 1); 7842 is_fp = true; 7843 if (!is_u && dc_isar_feature(aa64_fp16, s)) { 7844 size = 1; 7845 } else if (!is_u || !is_q || extract32(size, 0, 1)) { 7846 unallocated_encoding(s); 7847 return; 7848 } else { 7849 size = 2; 7850 } 7851 break; 7852 default: 7853 unallocated_encoding(s); 7854 return; 7855 } 7856 7857 if (!fp_access_check(s)) { 7858 return; 7859 } 7860 7861 esize = 8 << size; 7862 elements = (is_q ? 128 : 64) / esize; 7863 7864 tcg_res = tcg_temp_new_i64(); 7865 tcg_elt = tcg_temp_new_i64(); 7866 7867 /* These instructions operate across all lanes of a vector 7868 * to produce a single result. We can guarantee that a 64 7869 * bit intermediate is sufficient: 7870 * + for [US]ADDLV the maximum element size is 32 bits, and 7871 * the result type is 64 bits 7872 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the 7873 * same as the element size, which is 32 bits at most 7874 * For the integer operations we can choose to work at 64 7875 * or 32 bits and truncate at the end; for simplicity 7876 * we use 64 bits always. The floating point 7877 * ops do require 32 bit intermediates, though. 7878 */ 7879 if (!is_fp) { 7880 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN)); 7881 7882 for (i = 1; i < elements; i++) { 7883 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN)); 7884 7885 switch (opcode) { 7886 case 0x03: /* SADDLV / UADDLV */ 7887 case 0x1b: /* ADDV */ 7888 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt); 7889 break; 7890 case 0x0a: /* SMAXV / UMAXV */ 7891 if (is_u) { 7892 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt); 7893 } else { 7894 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt); 7895 } 7896 break; 7897 case 0x1a: /* SMINV / UMINV */ 7898 if (is_u) { 7899 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt); 7900 } else { 7901 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt); 7902 } 7903 break; 7904 default: 7905 g_assert_not_reached(); 7906 } 7907 7908 } 7909 } else { 7910 /* Floating point vector reduction ops which work across 32 7911 * bit (single) or 16 bit (half-precision) intermediates. 7912 * Note that correct NaN propagation requires that we do these 7913 * operations in exactly the order specified by the pseudocode. 7914 */ 7915 TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 7916 int fpopcode = opcode | is_min << 4 | is_u << 5; 7917 int vmap = (1 << elements) - 1; 7918 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize, 7919 (is_q ? 128 : 64), vmap, fpst); 7920 tcg_gen_extu_i32_i64(tcg_res, tcg_res32); 7921 tcg_temp_free_i32(tcg_res32); 7922 tcg_temp_free_ptr(fpst); 7923 } 7924 7925 tcg_temp_free_i64(tcg_elt); 7926 7927 /* Now truncate the result to the width required for the final output */ 7928 if (opcode == 0x03) { 7929 /* SADDLV, UADDLV: result is 2*esize */ 7930 size++; 7931 } 7932 7933 switch (size) { 7934 case 0: 7935 tcg_gen_ext8u_i64(tcg_res, tcg_res); 7936 break; 7937 case 1: 7938 tcg_gen_ext16u_i64(tcg_res, tcg_res); 7939 break; 7940 case 2: 7941 tcg_gen_ext32u_i64(tcg_res, tcg_res); 7942 break; 7943 case 3: 7944 break; 7945 default: 7946 g_assert_not_reached(); 7947 } 7948 7949 write_fp_dreg(s, rd, tcg_res); 7950 tcg_temp_free_i64(tcg_res); 7951 } 7952 7953 /* DUP (Element, Vector) 7954 * 7955 * 31 30 29 21 20 16 15 10 9 5 4 0 7956 * +---+---+-------------------+--------+-------------+------+------+ 7957 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd | 7958 * +---+---+-------------------+--------+-------------+------+------+ 7959 * 7960 * size: encoded in imm5 (see ARM ARM LowestSetBit()) 7961 */ 7962 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn, 7963 int imm5) 7964 { 7965 int size = ctz32(imm5); 7966 int index; 7967 7968 if (size > 3 || (size == 3 && !is_q)) { 7969 unallocated_encoding(s); 7970 return; 7971 } 7972 7973 if (!fp_access_check(s)) { 7974 return; 7975 } 7976 7977 index = imm5 >> (size + 1); 7978 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd), 7979 vec_reg_offset(s, rn, index, size), 7980 is_q ? 16 : 8, vec_full_reg_size(s)); 7981 } 7982 7983 /* DUP (element, scalar) 7984 * 31 21 20 16 15 10 9 5 4 0 7985 * +-----------------------+--------+-------------+------+------+ 7986 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd | 7987 * +-----------------------+--------+-------------+------+------+ 7988 */ 7989 static void handle_simd_dupes(DisasContext *s, int rd, int rn, 7990 int imm5) 7991 { 7992 int size = ctz32(imm5); 7993 int index; 7994 TCGv_i64 tmp; 7995 7996 if (size > 3) { 7997 unallocated_encoding(s); 7998 return; 7999 } 8000 8001 if (!fp_access_check(s)) { 8002 return; 8003 } 8004 8005 index = imm5 >> (size + 1); 8006 8007 /* This instruction just extracts the specified element and 8008 * zero-extends it into the bottom of the destination register. 8009 */ 8010 tmp = tcg_temp_new_i64(); 8011 read_vec_element(s, tmp, rn, index, size); 8012 write_fp_dreg(s, rd, tmp); 8013 tcg_temp_free_i64(tmp); 8014 } 8015 8016 /* DUP (General) 8017 * 8018 * 31 30 29 21 20 16 15 10 9 5 4 0 8019 * +---+---+-------------------+--------+-------------+------+------+ 8020 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd | 8021 * +---+---+-------------------+--------+-------------+------+------+ 8022 * 8023 * size: encoded in imm5 (see ARM ARM LowestSetBit()) 8024 */ 8025 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn, 8026 int imm5) 8027 { 8028 int size = ctz32(imm5); 8029 uint32_t dofs, oprsz, maxsz; 8030 8031 if (size > 3 || ((size == 3) && !is_q)) { 8032 unallocated_encoding(s); 8033 return; 8034 } 8035 8036 if (!fp_access_check(s)) { 8037 return; 8038 } 8039 8040 dofs = vec_full_reg_offset(s, rd); 8041 oprsz = is_q ? 16 : 8; 8042 maxsz = vec_full_reg_size(s); 8043 8044 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn)); 8045 } 8046 8047 /* INS (Element) 8048 * 8049 * 31 21 20 16 15 14 11 10 9 5 4 0 8050 * +-----------------------+--------+------------+---+------+------+ 8051 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | 8052 * +-----------------------+--------+------------+---+------+------+ 8053 * 8054 * size: encoded in imm5 (see ARM ARM LowestSetBit()) 8055 * index: encoded in imm5<4:size+1> 8056 */ 8057 static void handle_simd_inse(DisasContext *s, int rd, int rn, 8058 int imm4, int imm5) 8059 { 8060 int size = ctz32(imm5); 8061 int src_index, dst_index; 8062 TCGv_i64 tmp; 8063 8064 if (size > 3) { 8065 unallocated_encoding(s); 8066 return; 8067 } 8068 8069 if (!fp_access_check(s)) { 8070 return; 8071 } 8072 8073 dst_index = extract32(imm5, 1+size, 5); 8074 src_index = extract32(imm4, size, 4); 8075 8076 tmp = tcg_temp_new_i64(); 8077 8078 read_vec_element(s, tmp, rn, src_index, size); 8079 write_vec_element(s, tmp, rd, dst_index, size); 8080 8081 tcg_temp_free_i64(tmp); 8082 8083 /* INS is considered a 128-bit write for SVE. */ 8084 clear_vec_high(s, true, rd); 8085 } 8086 8087 8088 /* INS (General) 8089 * 8090 * 31 21 20 16 15 10 9 5 4 0 8091 * +-----------------------+--------+-------------+------+------+ 8092 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd | 8093 * +-----------------------+--------+-------------+------+------+ 8094 * 8095 * size: encoded in imm5 (see ARM ARM LowestSetBit()) 8096 * index: encoded in imm5<4:size+1> 8097 */ 8098 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5) 8099 { 8100 int size = ctz32(imm5); 8101 int idx; 8102 8103 if (size > 3) { 8104 unallocated_encoding(s); 8105 return; 8106 } 8107 8108 if (!fp_access_check(s)) { 8109 return; 8110 } 8111 8112 idx = extract32(imm5, 1 + size, 4 - size); 8113 write_vec_element(s, cpu_reg(s, rn), rd, idx, size); 8114 8115 /* INS is considered a 128-bit write for SVE. */ 8116 clear_vec_high(s, true, rd); 8117 } 8118 8119 /* 8120 * UMOV (General) 8121 * SMOV (General) 8122 * 8123 * 31 30 29 21 20 16 15 12 10 9 5 4 0 8124 * +---+---+-------------------+--------+-------------+------+------+ 8125 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd | 8126 * +---+---+-------------------+--------+-------------+------+------+ 8127 * 8128 * U: unsigned when set 8129 * size: encoded in imm5 (see ARM ARM LowestSetBit()) 8130 */ 8131 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed, 8132 int rn, int rd, int imm5) 8133 { 8134 int size = ctz32(imm5); 8135 int element; 8136 TCGv_i64 tcg_rd; 8137 8138 /* Check for UnallocatedEncodings */ 8139 if (is_signed) { 8140 if (size > 2 || (size == 2 && !is_q)) { 8141 unallocated_encoding(s); 8142 return; 8143 } 8144 } else { 8145 if (size > 3 8146 || (size < 3 && is_q) 8147 || (size == 3 && !is_q)) { 8148 unallocated_encoding(s); 8149 return; 8150 } 8151 } 8152 8153 if (!fp_access_check(s)) { 8154 return; 8155 } 8156 8157 element = extract32(imm5, 1+size, 4); 8158 8159 tcg_rd = cpu_reg(s, rd); 8160 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0)); 8161 if (is_signed && !is_q) { 8162 tcg_gen_ext32u_i64(tcg_rd, tcg_rd); 8163 } 8164 } 8165 8166 /* AdvSIMD copy 8167 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0 8168 * +---+---+----+-----------------+------+---+------+---+------+------+ 8169 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | 8170 * +---+---+----+-----------------+------+---+------+---+------+------+ 8171 */ 8172 static void disas_simd_copy(DisasContext *s, uint32_t insn) 8173 { 8174 int rd = extract32(insn, 0, 5); 8175 int rn = extract32(insn, 5, 5); 8176 int imm4 = extract32(insn, 11, 4); 8177 int op = extract32(insn, 29, 1); 8178 int is_q = extract32(insn, 30, 1); 8179 int imm5 = extract32(insn, 16, 5); 8180 8181 if (op) { 8182 if (is_q) { 8183 /* INS (element) */ 8184 handle_simd_inse(s, rd, rn, imm4, imm5); 8185 } else { 8186 unallocated_encoding(s); 8187 } 8188 } else { 8189 switch (imm4) { 8190 case 0: 8191 /* DUP (element - vector) */ 8192 handle_simd_dupe(s, is_q, rd, rn, imm5); 8193 break; 8194 case 1: 8195 /* DUP (general) */ 8196 handle_simd_dupg(s, is_q, rd, rn, imm5); 8197 break; 8198 case 3: 8199 if (is_q) { 8200 /* INS (general) */ 8201 handle_simd_insg(s, rd, rn, imm5); 8202 } else { 8203 unallocated_encoding(s); 8204 } 8205 break; 8206 case 5: 8207 case 7: 8208 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */ 8209 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5); 8210 break; 8211 default: 8212 unallocated_encoding(s); 8213 break; 8214 } 8215 } 8216 } 8217 8218 /* AdvSIMD modified immediate 8219 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0 8220 * +---+---+----+---------------------+-----+-------+----+---+-------+------+ 8221 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd | 8222 * +---+---+----+---------------------+-----+-------+----+---+-------+------+ 8223 * 8224 * There are a number of operations that can be carried out here: 8225 * MOVI - move (shifted) imm into register 8226 * MVNI - move inverted (shifted) imm into register 8227 * ORR - bitwise OR of (shifted) imm with register 8228 * BIC - bitwise clear of (shifted) imm with register 8229 * With ARMv8.2 we also have: 8230 * FMOV half-precision 8231 */ 8232 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) 8233 { 8234 int rd = extract32(insn, 0, 5); 8235 int cmode = extract32(insn, 12, 4); 8236 int o2 = extract32(insn, 11, 1); 8237 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5); 8238 bool is_neg = extract32(insn, 29, 1); 8239 bool is_q = extract32(insn, 30, 1); 8240 uint64_t imm = 0; 8241 8242 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) { 8243 /* Check for FMOV (vector, immediate) - half-precision */ 8244 if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) { 8245 unallocated_encoding(s); 8246 return; 8247 } 8248 } 8249 8250 if (!fp_access_check(s)) { 8251 return; 8252 } 8253 8254 if (cmode == 15 && o2 && !is_neg) { 8255 /* FMOV (vector, immediate) - half-precision */ 8256 imm = vfp_expand_imm(MO_16, abcdefgh); 8257 /* now duplicate across the lanes */ 8258 imm = dup_const(MO_16, imm); 8259 } else { 8260 imm = asimd_imm_const(abcdefgh, cmode, is_neg); 8261 } 8262 8263 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) { 8264 /* MOVI or MVNI, with MVNI negation handled above. */ 8265 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8, 8266 vec_full_reg_size(s), imm); 8267 } else { 8268 /* ORR or BIC, with BIC negation to AND handled above. */ 8269 if (is_neg) { 8270 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64); 8271 } else { 8272 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64); 8273 } 8274 } 8275 } 8276 8277 /* AdvSIMD scalar copy 8278 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0 8279 * +-----+----+-----------------+------+---+------+---+------+------+ 8280 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd | 8281 * +-----+----+-----------------+------+---+------+---+------+------+ 8282 */ 8283 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn) 8284 { 8285 int rd = extract32(insn, 0, 5); 8286 int rn = extract32(insn, 5, 5); 8287 int imm4 = extract32(insn, 11, 4); 8288 int imm5 = extract32(insn, 16, 5); 8289 int op = extract32(insn, 29, 1); 8290 8291 if (op != 0 || imm4 != 0) { 8292 unallocated_encoding(s); 8293 return; 8294 } 8295 8296 /* DUP (element, scalar) */ 8297 handle_simd_dupes(s, rd, rn, imm5); 8298 } 8299 8300 /* AdvSIMD scalar pairwise 8301 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 8302 * +-----+---+-----------+------+-----------+--------+-----+------+------+ 8303 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd | 8304 * +-----+---+-----------+------+-----------+--------+-----+------+------+ 8305 */ 8306 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) 8307 { 8308 int u = extract32(insn, 29, 1); 8309 int size = extract32(insn, 22, 2); 8310 int opcode = extract32(insn, 12, 5); 8311 int rn = extract32(insn, 5, 5); 8312 int rd = extract32(insn, 0, 5); 8313 TCGv_ptr fpst; 8314 8315 /* For some ops (the FP ones), size[1] is part of the encoding. 8316 * For ADDP strictly it is not but size[1] is always 1 for valid 8317 * encodings. 8318 */ 8319 opcode |= (extract32(size, 1, 1) << 5); 8320 8321 switch (opcode) { 8322 case 0x3b: /* ADDP */ 8323 if (u || size != 3) { 8324 unallocated_encoding(s); 8325 return; 8326 } 8327 if (!fp_access_check(s)) { 8328 return; 8329 } 8330 8331 fpst = NULL; 8332 break; 8333 case 0xc: /* FMAXNMP */ 8334 case 0xd: /* FADDP */ 8335 case 0xf: /* FMAXP */ 8336 case 0x2c: /* FMINNMP */ 8337 case 0x2f: /* FMINP */ 8338 /* FP op, size[0] is 32 or 64 bit*/ 8339 if (!u) { 8340 if (!dc_isar_feature(aa64_fp16, s)) { 8341 unallocated_encoding(s); 8342 return; 8343 } else { 8344 size = MO_16; 8345 } 8346 } else { 8347 size = extract32(size, 0, 1) ? MO_64 : MO_32; 8348 } 8349 8350 if (!fp_access_check(s)) { 8351 return; 8352 } 8353 8354 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 8355 break; 8356 default: 8357 unallocated_encoding(s); 8358 return; 8359 } 8360 8361 if (size == MO_64) { 8362 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 8363 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 8364 TCGv_i64 tcg_res = tcg_temp_new_i64(); 8365 8366 read_vec_element(s, tcg_op1, rn, 0, MO_64); 8367 read_vec_element(s, tcg_op2, rn, 1, MO_64); 8368 8369 switch (opcode) { 8370 case 0x3b: /* ADDP */ 8371 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2); 8372 break; 8373 case 0xc: /* FMAXNMP */ 8374 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst); 8375 break; 8376 case 0xd: /* FADDP */ 8377 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst); 8378 break; 8379 case 0xf: /* FMAXP */ 8380 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst); 8381 break; 8382 case 0x2c: /* FMINNMP */ 8383 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst); 8384 break; 8385 case 0x2f: /* FMINP */ 8386 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst); 8387 break; 8388 default: 8389 g_assert_not_reached(); 8390 } 8391 8392 write_fp_dreg(s, rd, tcg_res); 8393 8394 tcg_temp_free_i64(tcg_op1); 8395 tcg_temp_free_i64(tcg_op2); 8396 tcg_temp_free_i64(tcg_res); 8397 } else { 8398 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 8399 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 8400 TCGv_i32 tcg_res = tcg_temp_new_i32(); 8401 8402 read_vec_element_i32(s, tcg_op1, rn, 0, size); 8403 read_vec_element_i32(s, tcg_op2, rn, 1, size); 8404 8405 if (size == MO_16) { 8406 switch (opcode) { 8407 case 0xc: /* FMAXNMP */ 8408 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst); 8409 break; 8410 case 0xd: /* FADDP */ 8411 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst); 8412 break; 8413 case 0xf: /* FMAXP */ 8414 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst); 8415 break; 8416 case 0x2c: /* FMINNMP */ 8417 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst); 8418 break; 8419 case 0x2f: /* FMINP */ 8420 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst); 8421 break; 8422 default: 8423 g_assert_not_reached(); 8424 } 8425 } else { 8426 switch (opcode) { 8427 case 0xc: /* FMAXNMP */ 8428 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst); 8429 break; 8430 case 0xd: /* FADDP */ 8431 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst); 8432 break; 8433 case 0xf: /* FMAXP */ 8434 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst); 8435 break; 8436 case 0x2c: /* FMINNMP */ 8437 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst); 8438 break; 8439 case 0x2f: /* FMINP */ 8440 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst); 8441 break; 8442 default: 8443 g_assert_not_reached(); 8444 } 8445 } 8446 8447 write_fp_sreg(s, rd, tcg_res); 8448 8449 tcg_temp_free_i32(tcg_op1); 8450 tcg_temp_free_i32(tcg_op2); 8451 tcg_temp_free_i32(tcg_res); 8452 } 8453 8454 if (fpst) { 8455 tcg_temp_free_ptr(fpst); 8456 } 8457 } 8458 8459 /* 8460 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate) 8461 * 8462 * This code is handles the common shifting code and is used by both 8463 * the vector and scalar code. 8464 */ 8465 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, 8466 TCGv_i64 tcg_rnd, bool accumulate, 8467 bool is_u, int size, int shift) 8468 { 8469 bool extended_result = false; 8470 bool round = tcg_rnd != NULL; 8471 int ext_lshift = 0; 8472 TCGv_i64 tcg_src_hi; 8473 8474 if (round && size == 3) { 8475 extended_result = true; 8476 ext_lshift = 64 - shift; 8477 tcg_src_hi = tcg_temp_new_i64(); 8478 } else if (shift == 64) { 8479 if (!accumulate && is_u) { 8480 /* result is zero */ 8481 tcg_gen_movi_i64(tcg_res, 0); 8482 return; 8483 } 8484 } 8485 8486 /* Deal with the rounding step */ 8487 if (round) { 8488 if (extended_result) { 8489 TCGv_i64 tcg_zero = tcg_constant_i64(0); 8490 if (!is_u) { 8491 /* take care of sign extending tcg_res */ 8492 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63); 8493 tcg_gen_add2_i64(tcg_src, tcg_src_hi, 8494 tcg_src, tcg_src_hi, 8495 tcg_rnd, tcg_zero); 8496 } else { 8497 tcg_gen_add2_i64(tcg_src, tcg_src_hi, 8498 tcg_src, tcg_zero, 8499 tcg_rnd, tcg_zero); 8500 } 8501 } else { 8502 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd); 8503 } 8504 } 8505 8506 /* Now do the shift right */ 8507 if (round && extended_result) { 8508 /* extended case, >64 bit precision required */ 8509 if (ext_lshift == 0) { 8510 /* special case, only high bits matter */ 8511 tcg_gen_mov_i64(tcg_src, tcg_src_hi); 8512 } else { 8513 tcg_gen_shri_i64(tcg_src, tcg_src, shift); 8514 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift); 8515 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi); 8516 } 8517 } else { 8518 if (is_u) { 8519 if (shift == 64) { 8520 /* essentially shifting in 64 zeros */ 8521 tcg_gen_movi_i64(tcg_src, 0); 8522 } else { 8523 tcg_gen_shri_i64(tcg_src, tcg_src, shift); 8524 } 8525 } else { 8526 if (shift == 64) { 8527 /* effectively extending the sign-bit */ 8528 tcg_gen_sari_i64(tcg_src, tcg_src, 63); 8529 } else { 8530 tcg_gen_sari_i64(tcg_src, tcg_src, shift); 8531 } 8532 } 8533 } 8534 8535 if (accumulate) { 8536 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src); 8537 } else { 8538 tcg_gen_mov_i64(tcg_res, tcg_src); 8539 } 8540 8541 if (extended_result) { 8542 tcg_temp_free_i64(tcg_src_hi); 8543 } 8544 } 8545 8546 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */ 8547 static void handle_scalar_simd_shri(DisasContext *s, 8548 bool is_u, int immh, int immb, 8549 int opcode, int rn, int rd) 8550 { 8551 const int size = 3; 8552 int immhb = immh << 3 | immb; 8553 int shift = 2 * (8 << size) - immhb; 8554 bool accumulate = false; 8555 bool round = false; 8556 bool insert = false; 8557 TCGv_i64 tcg_rn; 8558 TCGv_i64 tcg_rd; 8559 TCGv_i64 tcg_round; 8560 8561 if (!extract32(immh, 3, 1)) { 8562 unallocated_encoding(s); 8563 return; 8564 } 8565 8566 if (!fp_access_check(s)) { 8567 return; 8568 } 8569 8570 switch (opcode) { 8571 case 0x02: /* SSRA / USRA (accumulate) */ 8572 accumulate = true; 8573 break; 8574 case 0x04: /* SRSHR / URSHR (rounding) */ 8575 round = true; 8576 break; 8577 case 0x06: /* SRSRA / URSRA (accum + rounding) */ 8578 accumulate = round = true; 8579 break; 8580 case 0x08: /* SRI */ 8581 insert = true; 8582 break; 8583 } 8584 8585 if (round) { 8586 tcg_round = tcg_constant_i64(1ULL << (shift - 1)); 8587 } else { 8588 tcg_round = NULL; 8589 } 8590 8591 tcg_rn = read_fp_dreg(s, rn); 8592 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64(); 8593 8594 if (insert) { 8595 /* shift count same as element size is valid but does nothing; 8596 * special case to avoid potential shift by 64. 8597 */ 8598 int esize = 8 << size; 8599 if (shift != esize) { 8600 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift); 8601 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift); 8602 } 8603 } else { 8604 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, 8605 accumulate, is_u, size, shift); 8606 } 8607 8608 write_fp_dreg(s, rd, tcg_rd); 8609 8610 tcg_temp_free_i64(tcg_rn); 8611 tcg_temp_free_i64(tcg_rd); 8612 } 8613 8614 /* SHL/SLI - Scalar shift left */ 8615 static void handle_scalar_simd_shli(DisasContext *s, bool insert, 8616 int immh, int immb, int opcode, 8617 int rn, int rd) 8618 { 8619 int size = 32 - clz32(immh) - 1; 8620 int immhb = immh << 3 | immb; 8621 int shift = immhb - (8 << size); 8622 TCGv_i64 tcg_rn; 8623 TCGv_i64 tcg_rd; 8624 8625 if (!extract32(immh, 3, 1)) { 8626 unallocated_encoding(s); 8627 return; 8628 } 8629 8630 if (!fp_access_check(s)) { 8631 return; 8632 } 8633 8634 tcg_rn = read_fp_dreg(s, rn); 8635 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64(); 8636 8637 if (insert) { 8638 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift); 8639 } else { 8640 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift); 8641 } 8642 8643 write_fp_dreg(s, rd, tcg_rd); 8644 8645 tcg_temp_free_i64(tcg_rn); 8646 tcg_temp_free_i64(tcg_rd); 8647 } 8648 8649 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with 8650 * (signed/unsigned) narrowing */ 8651 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, 8652 bool is_u_shift, bool is_u_narrow, 8653 int immh, int immb, int opcode, 8654 int rn, int rd) 8655 { 8656 int immhb = immh << 3 | immb; 8657 int size = 32 - clz32(immh) - 1; 8658 int esize = 8 << size; 8659 int shift = (2 * esize) - immhb; 8660 int elements = is_scalar ? 1 : (64 / esize); 8661 bool round = extract32(opcode, 0, 1); 8662 MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); 8663 TCGv_i64 tcg_rn, tcg_rd, tcg_round; 8664 TCGv_i32 tcg_rd_narrowed; 8665 TCGv_i64 tcg_final; 8666 8667 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = { 8668 { gen_helper_neon_narrow_sat_s8, 8669 gen_helper_neon_unarrow_sat8 }, 8670 { gen_helper_neon_narrow_sat_s16, 8671 gen_helper_neon_unarrow_sat16 }, 8672 { gen_helper_neon_narrow_sat_s32, 8673 gen_helper_neon_unarrow_sat32 }, 8674 { NULL, NULL }, 8675 }; 8676 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = { 8677 gen_helper_neon_narrow_sat_u8, 8678 gen_helper_neon_narrow_sat_u16, 8679 gen_helper_neon_narrow_sat_u32, 8680 NULL 8681 }; 8682 NeonGenNarrowEnvFn *narrowfn; 8683 8684 int i; 8685 8686 assert(size < 4); 8687 8688 if (extract32(immh, 3, 1)) { 8689 unallocated_encoding(s); 8690 return; 8691 } 8692 8693 if (!fp_access_check(s)) { 8694 return; 8695 } 8696 8697 if (is_u_shift) { 8698 narrowfn = unsigned_narrow_fns[size]; 8699 } else { 8700 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0]; 8701 } 8702 8703 tcg_rn = tcg_temp_new_i64(); 8704 tcg_rd = tcg_temp_new_i64(); 8705 tcg_rd_narrowed = tcg_temp_new_i32(); 8706 tcg_final = tcg_const_i64(0); 8707 8708 if (round) { 8709 tcg_round = tcg_constant_i64(1ULL << (shift - 1)); 8710 } else { 8711 tcg_round = NULL; 8712 } 8713 8714 for (i = 0; i < elements; i++) { 8715 read_vec_element(s, tcg_rn, rn, i, ldop); 8716 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, 8717 false, is_u_shift, size+1, shift); 8718 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd); 8719 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed); 8720 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); 8721 } 8722 8723 if (!is_q) { 8724 write_vec_element(s, tcg_final, rd, 0, MO_64); 8725 } else { 8726 write_vec_element(s, tcg_final, rd, 1, MO_64); 8727 } 8728 8729 tcg_temp_free_i64(tcg_rn); 8730 tcg_temp_free_i64(tcg_rd); 8731 tcg_temp_free_i32(tcg_rd_narrowed); 8732 tcg_temp_free_i64(tcg_final); 8733 8734 clear_vec_high(s, is_q, rd); 8735 } 8736 8737 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */ 8738 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q, 8739 bool src_unsigned, bool dst_unsigned, 8740 int immh, int immb, int rn, int rd) 8741 { 8742 int immhb = immh << 3 | immb; 8743 int size = 32 - clz32(immh) - 1; 8744 int shift = immhb - (8 << size); 8745 int pass; 8746 8747 assert(immh != 0); 8748 assert(!(scalar && is_q)); 8749 8750 if (!scalar) { 8751 if (!is_q && extract32(immh, 3, 1)) { 8752 unallocated_encoding(s); 8753 return; 8754 } 8755 8756 /* Since we use the variable-shift helpers we must 8757 * replicate the shift count into each element of 8758 * the tcg_shift value. 8759 */ 8760 switch (size) { 8761 case 0: 8762 shift |= shift << 8; 8763 /* fall through */ 8764 case 1: 8765 shift |= shift << 16; 8766 break; 8767 case 2: 8768 case 3: 8769 break; 8770 default: 8771 g_assert_not_reached(); 8772 } 8773 } 8774 8775 if (!fp_access_check(s)) { 8776 return; 8777 } 8778 8779 if (size == 3) { 8780 TCGv_i64 tcg_shift = tcg_constant_i64(shift); 8781 static NeonGenTwo64OpEnvFn * const fns[2][2] = { 8782 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 }, 8783 { NULL, gen_helper_neon_qshl_u64 }, 8784 }; 8785 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned]; 8786 int maxpass = is_q ? 2 : 1; 8787 8788 for (pass = 0; pass < maxpass; pass++) { 8789 TCGv_i64 tcg_op = tcg_temp_new_i64(); 8790 8791 read_vec_element(s, tcg_op, rn, pass, MO_64); 8792 genfn(tcg_op, cpu_env, tcg_op, tcg_shift); 8793 write_vec_element(s, tcg_op, rd, pass, MO_64); 8794 8795 tcg_temp_free_i64(tcg_op); 8796 } 8797 clear_vec_high(s, is_q, rd); 8798 } else { 8799 TCGv_i32 tcg_shift = tcg_constant_i32(shift); 8800 static NeonGenTwoOpEnvFn * const fns[2][2][3] = { 8801 { 8802 { gen_helper_neon_qshl_s8, 8803 gen_helper_neon_qshl_s16, 8804 gen_helper_neon_qshl_s32 }, 8805 { gen_helper_neon_qshlu_s8, 8806 gen_helper_neon_qshlu_s16, 8807 gen_helper_neon_qshlu_s32 } 8808 }, { 8809 { NULL, NULL, NULL }, 8810 { gen_helper_neon_qshl_u8, 8811 gen_helper_neon_qshl_u16, 8812 gen_helper_neon_qshl_u32 } 8813 } 8814 }; 8815 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size]; 8816 MemOp memop = scalar ? size : MO_32; 8817 int maxpass = scalar ? 1 : is_q ? 4 : 2; 8818 8819 for (pass = 0; pass < maxpass; pass++) { 8820 TCGv_i32 tcg_op = tcg_temp_new_i32(); 8821 8822 read_vec_element_i32(s, tcg_op, rn, pass, memop); 8823 genfn(tcg_op, cpu_env, tcg_op, tcg_shift); 8824 if (scalar) { 8825 switch (size) { 8826 case 0: 8827 tcg_gen_ext8u_i32(tcg_op, tcg_op); 8828 break; 8829 case 1: 8830 tcg_gen_ext16u_i32(tcg_op, tcg_op); 8831 break; 8832 case 2: 8833 break; 8834 default: 8835 g_assert_not_reached(); 8836 } 8837 write_fp_sreg(s, rd, tcg_op); 8838 } else { 8839 write_vec_element_i32(s, tcg_op, rd, pass, MO_32); 8840 } 8841 8842 tcg_temp_free_i32(tcg_op); 8843 } 8844 8845 if (!scalar) { 8846 clear_vec_high(s, is_q, rd); 8847 } 8848 } 8849 } 8850 8851 /* Common vector code for handling integer to FP conversion */ 8852 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn, 8853 int elements, int is_signed, 8854 int fracbits, int size) 8855 { 8856 TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 8857 TCGv_i32 tcg_shift = NULL; 8858 8859 MemOp mop = size | (is_signed ? MO_SIGN : 0); 8860 int pass; 8861 8862 if (fracbits || size == MO_64) { 8863 tcg_shift = tcg_constant_i32(fracbits); 8864 } 8865 8866 if (size == MO_64) { 8867 TCGv_i64 tcg_int64 = tcg_temp_new_i64(); 8868 TCGv_i64 tcg_double = tcg_temp_new_i64(); 8869 8870 for (pass = 0; pass < elements; pass++) { 8871 read_vec_element(s, tcg_int64, rn, pass, mop); 8872 8873 if (is_signed) { 8874 gen_helper_vfp_sqtod(tcg_double, tcg_int64, 8875 tcg_shift, tcg_fpst); 8876 } else { 8877 gen_helper_vfp_uqtod(tcg_double, tcg_int64, 8878 tcg_shift, tcg_fpst); 8879 } 8880 if (elements == 1) { 8881 write_fp_dreg(s, rd, tcg_double); 8882 } else { 8883 write_vec_element(s, tcg_double, rd, pass, MO_64); 8884 } 8885 } 8886 8887 tcg_temp_free_i64(tcg_int64); 8888 tcg_temp_free_i64(tcg_double); 8889 8890 } else { 8891 TCGv_i32 tcg_int32 = tcg_temp_new_i32(); 8892 TCGv_i32 tcg_float = tcg_temp_new_i32(); 8893 8894 for (pass = 0; pass < elements; pass++) { 8895 read_vec_element_i32(s, tcg_int32, rn, pass, mop); 8896 8897 switch (size) { 8898 case MO_32: 8899 if (fracbits) { 8900 if (is_signed) { 8901 gen_helper_vfp_sltos(tcg_float, tcg_int32, 8902 tcg_shift, tcg_fpst); 8903 } else { 8904 gen_helper_vfp_ultos(tcg_float, tcg_int32, 8905 tcg_shift, tcg_fpst); 8906 } 8907 } else { 8908 if (is_signed) { 8909 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst); 8910 } else { 8911 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst); 8912 } 8913 } 8914 break; 8915 case MO_16: 8916 if (fracbits) { 8917 if (is_signed) { 8918 gen_helper_vfp_sltoh(tcg_float, tcg_int32, 8919 tcg_shift, tcg_fpst); 8920 } else { 8921 gen_helper_vfp_ultoh(tcg_float, tcg_int32, 8922 tcg_shift, tcg_fpst); 8923 } 8924 } else { 8925 if (is_signed) { 8926 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst); 8927 } else { 8928 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst); 8929 } 8930 } 8931 break; 8932 default: 8933 g_assert_not_reached(); 8934 } 8935 8936 if (elements == 1) { 8937 write_fp_sreg(s, rd, tcg_float); 8938 } else { 8939 write_vec_element_i32(s, tcg_float, rd, pass, size); 8940 } 8941 } 8942 8943 tcg_temp_free_i32(tcg_int32); 8944 tcg_temp_free_i32(tcg_float); 8945 } 8946 8947 tcg_temp_free_ptr(tcg_fpst); 8948 8949 clear_vec_high(s, elements << size == 16, rd); 8950 } 8951 8952 /* UCVTF/SCVTF - Integer to FP conversion */ 8953 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, 8954 bool is_q, bool is_u, 8955 int immh, int immb, int opcode, 8956 int rn, int rd) 8957 { 8958 int size, elements, fracbits; 8959 int immhb = immh << 3 | immb; 8960 8961 if (immh & 8) { 8962 size = MO_64; 8963 if (!is_scalar && !is_q) { 8964 unallocated_encoding(s); 8965 return; 8966 } 8967 } else if (immh & 4) { 8968 size = MO_32; 8969 } else if (immh & 2) { 8970 size = MO_16; 8971 if (!dc_isar_feature(aa64_fp16, s)) { 8972 unallocated_encoding(s); 8973 return; 8974 } 8975 } else { 8976 /* immh == 0 would be a failure of the decode logic */ 8977 g_assert(immh == 1); 8978 unallocated_encoding(s); 8979 return; 8980 } 8981 8982 if (is_scalar) { 8983 elements = 1; 8984 } else { 8985 elements = (8 << is_q) >> size; 8986 } 8987 fracbits = (16 << size) - immhb; 8988 8989 if (!fp_access_check(s)) { 8990 return; 8991 } 8992 8993 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size); 8994 } 8995 8996 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */ 8997 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, 8998 bool is_q, bool is_u, 8999 int immh, int immb, int rn, int rd) 9000 { 9001 int immhb = immh << 3 | immb; 9002 int pass, size, fracbits; 9003 TCGv_ptr tcg_fpstatus; 9004 TCGv_i32 tcg_rmode, tcg_shift; 9005 9006 if (immh & 0x8) { 9007 size = MO_64; 9008 if (!is_scalar && !is_q) { 9009 unallocated_encoding(s); 9010 return; 9011 } 9012 } else if (immh & 0x4) { 9013 size = MO_32; 9014 } else if (immh & 0x2) { 9015 size = MO_16; 9016 if (!dc_isar_feature(aa64_fp16, s)) { 9017 unallocated_encoding(s); 9018 return; 9019 } 9020 } else { 9021 /* Should have split out AdvSIMD modified immediate earlier. */ 9022 assert(immh == 1); 9023 unallocated_encoding(s); 9024 return; 9025 } 9026 9027 if (!fp_access_check(s)) { 9028 return; 9029 } 9030 9031 assert(!(is_scalar && is_q)); 9032 9033 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO)); 9034 tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 9035 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 9036 fracbits = (16 << size) - immhb; 9037 tcg_shift = tcg_constant_i32(fracbits); 9038 9039 if (size == MO_64) { 9040 int maxpass = is_scalar ? 1 : 2; 9041 9042 for (pass = 0; pass < maxpass; pass++) { 9043 TCGv_i64 tcg_op = tcg_temp_new_i64(); 9044 9045 read_vec_element(s, tcg_op, rn, pass, MO_64); 9046 if (is_u) { 9047 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); 9048 } else { 9049 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); 9050 } 9051 write_vec_element(s, tcg_op, rd, pass, MO_64); 9052 tcg_temp_free_i64(tcg_op); 9053 } 9054 clear_vec_high(s, is_q, rd); 9055 } else { 9056 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); 9057 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size); 9058 9059 switch (size) { 9060 case MO_16: 9061 if (is_u) { 9062 fn = gen_helper_vfp_touhh; 9063 } else { 9064 fn = gen_helper_vfp_toshh; 9065 } 9066 break; 9067 case MO_32: 9068 if (is_u) { 9069 fn = gen_helper_vfp_touls; 9070 } else { 9071 fn = gen_helper_vfp_tosls; 9072 } 9073 break; 9074 default: 9075 g_assert_not_reached(); 9076 } 9077 9078 for (pass = 0; pass < maxpass; pass++) { 9079 TCGv_i32 tcg_op = tcg_temp_new_i32(); 9080 9081 read_vec_element_i32(s, tcg_op, rn, pass, size); 9082 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus); 9083 if (is_scalar) { 9084 write_fp_sreg(s, rd, tcg_op); 9085 } else { 9086 write_vec_element_i32(s, tcg_op, rd, pass, size); 9087 } 9088 tcg_temp_free_i32(tcg_op); 9089 } 9090 if (!is_scalar) { 9091 clear_vec_high(s, is_q, rd); 9092 } 9093 } 9094 9095 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 9096 tcg_temp_free_ptr(tcg_fpstatus); 9097 tcg_temp_free_i32(tcg_rmode); 9098 } 9099 9100 /* AdvSIMD scalar shift by immediate 9101 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 9102 * +-----+---+-------------+------+------+--------+---+------+------+ 9103 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | 9104 * +-----+---+-------------+------+------+--------+---+------+------+ 9105 * 9106 * This is the scalar version so it works on a fixed sized registers 9107 */ 9108 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn) 9109 { 9110 int rd = extract32(insn, 0, 5); 9111 int rn = extract32(insn, 5, 5); 9112 int opcode = extract32(insn, 11, 5); 9113 int immb = extract32(insn, 16, 3); 9114 int immh = extract32(insn, 19, 4); 9115 bool is_u = extract32(insn, 29, 1); 9116 9117 if (immh == 0) { 9118 unallocated_encoding(s); 9119 return; 9120 } 9121 9122 switch (opcode) { 9123 case 0x08: /* SRI */ 9124 if (!is_u) { 9125 unallocated_encoding(s); 9126 return; 9127 } 9128 /* fall through */ 9129 case 0x00: /* SSHR / USHR */ 9130 case 0x02: /* SSRA / USRA */ 9131 case 0x04: /* SRSHR / URSHR */ 9132 case 0x06: /* SRSRA / URSRA */ 9133 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd); 9134 break; 9135 case 0x0a: /* SHL / SLI */ 9136 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd); 9137 break; 9138 case 0x1c: /* SCVTF, UCVTF */ 9139 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb, 9140 opcode, rn, rd); 9141 break; 9142 case 0x10: /* SQSHRUN, SQSHRUN2 */ 9143 case 0x11: /* SQRSHRUN, SQRSHRUN2 */ 9144 if (!is_u) { 9145 unallocated_encoding(s); 9146 return; 9147 } 9148 handle_vec_simd_sqshrn(s, true, false, false, true, 9149 immh, immb, opcode, rn, rd); 9150 break; 9151 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */ 9152 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */ 9153 handle_vec_simd_sqshrn(s, true, false, is_u, is_u, 9154 immh, immb, opcode, rn, rd); 9155 break; 9156 case 0xc: /* SQSHLU */ 9157 if (!is_u) { 9158 unallocated_encoding(s); 9159 return; 9160 } 9161 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd); 9162 break; 9163 case 0xe: /* SQSHL, UQSHL */ 9164 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd); 9165 break; 9166 case 0x1f: /* FCVTZS, FCVTZU */ 9167 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd); 9168 break; 9169 default: 9170 unallocated_encoding(s); 9171 break; 9172 } 9173 } 9174 9175 /* AdvSIMD scalar three different 9176 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 9177 * +-----+---+-----------+------+---+------+--------+-----+------+------+ 9178 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | 9179 * +-----+---+-----------+------+---+------+--------+-----+------+------+ 9180 */ 9181 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) 9182 { 9183 bool is_u = extract32(insn, 29, 1); 9184 int size = extract32(insn, 22, 2); 9185 int opcode = extract32(insn, 12, 4); 9186 int rm = extract32(insn, 16, 5); 9187 int rn = extract32(insn, 5, 5); 9188 int rd = extract32(insn, 0, 5); 9189 9190 if (is_u) { 9191 unallocated_encoding(s); 9192 return; 9193 } 9194 9195 switch (opcode) { 9196 case 0x9: /* SQDMLAL, SQDMLAL2 */ 9197 case 0xb: /* SQDMLSL, SQDMLSL2 */ 9198 case 0xd: /* SQDMULL, SQDMULL2 */ 9199 if (size == 0 || size == 3) { 9200 unallocated_encoding(s); 9201 return; 9202 } 9203 break; 9204 default: 9205 unallocated_encoding(s); 9206 return; 9207 } 9208 9209 if (!fp_access_check(s)) { 9210 return; 9211 } 9212 9213 if (size == 2) { 9214 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 9215 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 9216 TCGv_i64 tcg_res = tcg_temp_new_i64(); 9217 9218 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN); 9219 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN); 9220 9221 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2); 9222 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res); 9223 9224 switch (opcode) { 9225 case 0xd: /* SQDMULL, SQDMULL2 */ 9226 break; 9227 case 0xb: /* SQDMLSL, SQDMLSL2 */ 9228 tcg_gen_neg_i64(tcg_res, tcg_res); 9229 /* fall through */ 9230 case 0x9: /* SQDMLAL, SQDMLAL2 */ 9231 read_vec_element(s, tcg_op1, rd, 0, MO_64); 9232 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, 9233 tcg_res, tcg_op1); 9234 break; 9235 default: 9236 g_assert_not_reached(); 9237 } 9238 9239 write_fp_dreg(s, rd, tcg_res); 9240 9241 tcg_temp_free_i64(tcg_op1); 9242 tcg_temp_free_i64(tcg_op2); 9243 tcg_temp_free_i64(tcg_res); 9244 } else { 9245 TCGv_i32 tcg_op1 = read_fp_hreg(s, rn); 9246 TCGv_i32 tcg_op2 = read_fp_hreg(s, rm); 9247 TCGv_i64 tcg_res = tcg_temp_new_i64(); 9248 9249 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2); 9250 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res); 9251 9252 switch (opcode) { 9253 case 0xd: /* SQDMULL, SQDMULL2 */ 9254 break; 9255 case 0xb: /* SQDMLSL, SQDMLSL2 */ 9256 gen_helper_neon_negl_u32(tcg_res, tcg_res); 9257 /* fall through */ 9258 case 0x9: /* SQDMLAL, SQDMLAL2 */ 9259 { 9260 TCGv_i64 tcg_op3 = tcg_temp_new_i64(); 9261 read_vec_element(s, tcg_op3, rd, 0, MO_32); 9262 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, 9263 tcg_res, tcg_op3); 9264 tcg_temp_free_i64(tcg_op3); 9265 break; 9266 } 9267 default: 9268 g_assert_not_reached(); 9269 } 9270 9271 tcg_gen_ext32u_i64(tcg_res, tcg_res); 9272 write_fp_dreg(s, rd, tcg_res); 9273 9274 tcg_temp_free_i32(tcg_op1); 9275 tcg_temp_free_i32(tcg_op2); 9276 tcg_temp_free_i64(tcg_res); 9277 } 9278 } 9279 9280 static void handle_3same_64(DisasContext *s, int opcode, bool u, 9281 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm) 9282 { 9283 /* Handle 64x64->64 opcodes which are shared between the scalar 9284 * and vector 3-same groups. We cover every opcode where size == 3 9285 * is valid in either the three-reg-same (integer, not pairwise) 9286 * or scalar-three-reg-same groups. 9287 */ 9288 TCGCond cond; 9289 9290 switch (opcode) { 9291 case 0x1: /* SQADD */ 9292 if (u) { 9293 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm); 9294 } else { 9295 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm); 9296 } 9297 break; 9298 case 0x5: /* SQSUB */ 9299 if (u) { 9300 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm); 9301 } else { 9302 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm); 9303 } 9304 break; 9305 case 0x6: /* CMGT, CMHI */ 9306 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0. 9307 * We implement this using setcond (test) and then negating. 9308 */ 9309 cond = u ? TCG_COND_GTU : TCG_COND_GT; 9310 do_cmop: 9311 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm); 9312 tcg_gen_neg_i64(tcg_rd, tcg_rd); 9313 break; 9314 case 0x7: /* CMGE, CMHS */ 9315 cond = u ? TCG_COND_GEU : TCG_COND_GE; 9316 goto do_cmop; 9317 case 0x11: /* CMTST, CMEQ */ 9318 if (u) { 9319 cond = TCG_COND_EQ; 9320 goto do_cmop; 9321 } 9322 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm); 9323 break; 9324 case 0x8: /* SSHL, USHL */ 9325 if (u) { 9326 gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm); 9327 } else { 9328 gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm); 9329 } 9330 break; 9331 case 0x9: /* SQSHL, UQSHL */ 9332 if (u) { 9333 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm); 9334 } else { 9335 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm); 9336 } 9337 break; 9338 case 0xa: /* SRSHL, URSHL */ 9339 if (u) { 9340 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm); 9341 } else { 9342 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm); 9343 } 9344 break; 9345 case 0xb: /* SQRSHL, UQRSHL */ 9346 if (u) { 9347 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm); 9348 } else { 9349 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm); 9350 } 9351 break; 9352 case 0x10: /* ADD, SUB */ 9353 if (u) { 9354 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm); 9355 } else { 9356 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm); 9357 } 9358 break; 9359 default: 9360 g_assert_not_reached(); 9361 } 9362 } 9363 9364 /* Handle the 3-same-operands float operations; shared by the scalar 9365 * and vector encodings. The caller must filter out any encodings 9366 * not allocated for the encoding it is dealing with. 9367 */ 9368 static void handle_3same_float(DisasContext *s, int size, int elements, 9369 int fpopcode, int rd, int rn, int rm) 9370 { 9371 int pass; 9372 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 9373 9374 for (pass = 0; pass < elements; pass++) { 9375 if (size) { 9376 /* Double */ 9377 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 9378 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 9379 TCGv_i64 tcg_res = tcg_temp_new_i64(); 9380 9381 read_vec_element(s, tcg_op1, rn, pass, MO_64); 9382 read_vec_element(s, tcg_op2, rm, pass, MO_64); 9383 9384 switch (fpopcode) { 9385 case 0x39: /* FMLS */ 9386 /* As usual for ARM, separate negation for fused multiply-add */ 9387 gen_helper_vfp_negd(tcg_op1, tcg_op1); 9388 /* fall through */ 9389 case 0x19: /* FMLA */ 9390 read_vec_element(s, tcg_res, rd, pass, MO_64); 9391 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, 9392 tcg_res, fpst); 9393 break; 9394 case 0x18: /* FMAXNM */ 9395 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst); 9396 break; 9397 case 0x1a: /* FADD */ 9398 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst); 9399 break; 9400 case 0x1b: /* FMULX */ 9401 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst); 9402 break; 9403 case 0x1c: /* FCMEQ */ 9404 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst); 9405 break; 9406 case 0x1e: /* FMAX */ 9407 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst); 9408 break; 9409 case 0x1f: /* FRECPS */ 9410 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst); 9411 break; 9412 case 0x38: /* FMINNM */ 9413 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst); 9414 break; 9415 case 0x3a: /* FSUB */ 9416 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst); 9417 break; 9418 case 0x3e: /* FMIN */ 9419 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst); 9420 break; 9421 case 0x3f: /* FRSQRTS */ 9422 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst); 9423 break; 9424 case 0x5b: /* FMUL */ 9425 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst); 9426 break; 9427 case 0x5c: /* FCMGE */ 9428 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst); 9429 break; 9430 case 0x5d: /* FACGE */ 9431 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst); 9432 break; 9433 case 0x5f: /* FDIV */ 9434 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst); 9435 break; 9436 case 0x7a: /* FABD */ 9437 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst); 9438 gen_helper_vfp_absd(tcg_res, tcg_res); 9439 break; 9440 case 0x7c: /* FCMGT */ 9441 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst); 9442 break; 9443 case 0x7d: /* FACGT */ 9444 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst); 9445 break; 9446 default: 9447 g_assert_not_reached(); 9448 } 9449 9450 write_vec_element(s, tcg_res, rd, pass, MO_64); 9451 9452 tcg_temp_free_i64(tcg_res); 9453 tcg_temp_free_i64(tcg_op1); 9454 tcg_temp_free_i64(tcg_op2); 9455 } else { 9456 /* Single */ 9457 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 9458 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 9459 TCGv_i32 tcg_res = tcg_temp_new_i32(); 9460 9461 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32); 9462 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32); 9463 9464 switch (fpopcode) { 9465 case 0x39: /* FMLS */ 9466 /* As usual for ARM, separate negation for fused multiply-add */ 9467 gen_helper_vfp_negs(tcg_op1, tcg_op1); 9468 /* fall through */ 9469 case 0x19: /* FMLA */ 9470 read_vec_element_i32(s, tcg_res, rd, pass, MO_32); 9471 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, 9472 tcg_res, fpst); 9473 break; 9474 case 0x1a: /* FADD */ 9475 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst); 9476 break; 9477 case 0x1b: /* FMULX */ 9478 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst); 9479 break; 9480 case 0x1c: /* FCMEQ */ 9481 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst); 9482 break; 9483 case 0x1e: /* FMAX */ 9484 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst); 9485 break; 9486 case 0x1f: /* FRECPS */ 9487 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst); 9488 break; 9489 case 0x18: /* FMAXNM */ 9490 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst); 9491 break; 9492 case 0x38: /* FMINNM */ 9493 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst); 9494 break; 9495 case 0x3a: /* FSUB */ 9496 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst); 9497 break; 9498 case 0x3e: /* FMIN */ 9499 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst); 9500 break; 9501 case 0x3f: /* FRSQRTS */ 9502 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst); 9503 break; 9504 case 0x5b: /* FMUL */ 9505 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst); 9506 break; 9507 case 0x5c: /* FCMGE */ 9508 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst); 9509 break; 9510 case 0x5d: /* FACGE */ 9511 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst); 9512 break; 9513 case 0x5f: /* FDIV */ 9514 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst); 9515 break; 9516 case 0x7a: /* FABD */ 9517 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst); 9518 gen_helper_vfp_abss(tcg_res, tcg_res); 9519 break; 9520 case 0x7c: /* FCMGT */ 9521 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst); 9522 break; 9523 case 0x7d: /* FACGT */ 9524 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst); 9525 break; 9526 default: 9527 g_assert_not_reached(); 9528 } 9529 9530 if (elements == 1) { 9531 /* scalar single so clear high part */ 9532 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 9533 9534 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res); 9535 write_vec_element(s, tcg_tmp, rd, pass, MO_64); 9536 tcg_temp_free_i64(tcg_tmp); 9537 } else { 9538 write_vec_element_i32(s, tcg_res, rd, pass, MO_32); 9539 } 9540 9541 tcg_temp_free_i32(tcg_res); 9542 tcg_temp_free_i32(tcg_op1); 9543 tcg_temp_free_i32(tcg_op2); 9544 } 9545 } 9546 9547 tcg_temp_free_ptr(fpst); 9548 9549 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd); 9550 } 9551 9552 /* AdvSIMD scalar three same 9553 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 9554 * +-----+---+-----------+------+---+------+--------+---+------+------+ 9555 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd | 9556 * +-----+---+-----------+------+---+------+--------+---+------+------+ 9557 */ 9558 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn) 9559 { 9560 int rd = extract32(insn, 0, 5); 9561 int rn = extract32(insn, 5, 5); 9562 int opcode = extract32(insn, 11, 5); 9563 int rm = extract32(insn, 16, 5); 9564 int size = extract32(insn, 22, 2); 9565 bool u = extract32(insn, 29, 1); 9566 TCGv_i64 tcg_rd; 9567 9568 if (opcode >= 0x18) { 9569 /* Floating point: U, size[1] and opcode indicate operation */ 9570 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6); 9571 switch (fpopcode) { 9572 case 0x1b: /* FMULX */ 9573 case 0x1f: /* FRECPS */ 9574 case 0x3f: /* FRSQRTS */ 9575 case 0x5d: /* FACGE */ 9576 case 0x7d: /* FACGT */ 9577 case 0x1c: /* FCMEQ */ 9578 case 0x5c: /* FCMGE */ 9579 case 0x7c: /* FCMGT */ 9580 case 0x7a: /* FABD */ 9581 break; 9582 default: 9583 unallocated_encoding(s); 9584 return; 9585 } 9586 9587 if (!fp_access_check(s)) { 9588 return; 9589 } 9590 9591 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm); 9592 return; 9593 } 9594 9595 switch (opcode) { 9596 case 0x1: /* SQADD, UQADD */ 9597 case 0x5: /* SQSUB, UQSUB */ 9598 case 0x9: /* SQSHL, UQSHL */ 9599 case 0xb: /* SQRSHL, UQRSHL */ 9600 break; 9601 case 0x8: /* SSHL, USHL */ 9602 case 0xa: /* SRSHL, URSHL */ 9603 case 0x6: /* CMGT, CMHI */ 9604 case 0x7: /* CMGE, CMHS */ 9605 case 0x11: /* CMTST, CMEQ */ 9606 case 0x10: /* ADD, SUB (vector) */ 9607 if (size != 3) { 9608 unallocated_encoding(s); 9609 return; 9610 } 9611 break; 9612 case 0x16: /* SQDMULH, SQRDMULH (vector) */ 9613 if (size != 1 && size != 2) { 9614 unallocated_encoding(s); 9615 return; 9616 } 9617 break; 9618 default: 9619 unallocated_encoding(s); 9620 return; 9621 } 9622 9623 if (!fp_access_check(s)) { 9624 return; 9625 } 9626 9627 tcg_rd = tcg_temp_new_i64(); 9628 9629 if (size == 3) { 9630 TCGv_i64 tcg_rn = read_fp_dreg(s, rn); 9631 TCGv_i64 tcg_rm = read_fp_dreg(s, rm); 9632 9633 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm); 9634 tcg_temp_free_i64(tcg_rn); 9635 tcg_temp_free_i64(tcg_rm); 9636 } else { 9637 /* Do a single operation on the lowest element in the vector. 9638 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with 9639 * no side effects for all these operations. 9640 * OPTME: special-purpose helpers would avoid doing some 9641 * unnecessary work in the helper for the 8 and 16 bit cases. 9642 */ 9643 NeonGenTwoOpEnvFn *genenvfn; 9644 TCGv_i32 tcg_rn = tcg_temp_new_i32(); 9645 TCGv_i32 tcg_rm = tcg_temp_new_i32(); 9646 TCGv_i32 tcg_rd32 = tcg_temp_new_i32(); 9647 9648 read_vec_element_i32(s, tcg_rn, rn, 0, size); 9649 read_vec_element_i32(s, tcg_rm, rm, 0, size); 9650 9651 switch (opcode) { 9652 case 0x1: /* SQADD, UQADD */ 9653 { 9654 static NeonGenTwoOpEnvFn * const fns[3][2] = { 9655 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 }, 9656 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 }, 9657 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 }, 9658 }; 9659 genenvfn = fns[size][u]; 9660 break; 9661 } 9662 case 0x5: /* SQSUB, UQSUB */ 9663 { 9664 static NeonGenTwoOpEnvFn * const fns[3][2] = { 9665 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 }, 9666 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 }, 9667 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 }, 9668 }; 9669 genenvfn = fns[size][u]; 9670 break; 9671 } 9672 case 0x9: /* SQSHL, UQSHL */ 9673 { 9674 static NeonGenTwoOpEnvFn * const fns[3][2] = { 9675 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 }, 9676 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 }, 9677 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 }, 9678 }; 9679 genenvfn = fns[size][u]; 9680 break; 9681 } 9682 case 0xb: /* SQRSHL, UQRSHL */ 9683 { 9684 static NeonGenTwoOpEnvFn * const fns[3][2] = { 9685 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 }, 9686 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 }, 9687 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 }, 9688 }; 9689 genenvfn = fns[size][u]; 9690 break; 9691 } 9692 case 0x16: /* SQDMULH, SQRDMULH */ 9693 { 9694 static NeonGenTwoOpEnvFn * const fns[2][2] = { 9695 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 }, 9696 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 }, 9697 }; 9698 assert(size == 1 || size == 2); 9699 genenvfn = fns[size - 1][u]; 9700 break; 9701 } 9702 default: 9703 g_assert_not_reached(); 9704 } 9705 9706 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm); 9707 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32); 9708 tcg_temp_free_i32(tcg_rd32); 9709 tcg_temp_free_i32(tcg_rn); 9710 tcg_temp_free_i32(tcg_rm); 9711 } 9712 9713 write_fp_dreg(s, rd, tcg_rd); 9714 9715 tcg_temp_free_i64(tcg_rd); 9716 } 9717 9718 /* AdvSIMD scalar three same FP16 9719 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0 9720 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+ 9721 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd | 9722 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+ 9723 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400 9724 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400 9725 */ 9726 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s, 9727 uint32_t insn) 9728 { 9729 int rd = extract32(insn, 0, 5); 9730 int rn = extract32(insn, 5, 5); 9731 int opcode = extract32(insn, 11, 3); 9732 int rm = extract32(insn, 16, 5); 9733 bool u = extract32(insn, 29, 1); 9734 bool a = extract32(insn, 23, 1); 9735 int fpopcode = opcode | (a << 3) | (u << 4); 9736 TCGv_ptr fpst; 9737 TCGv_i32 tcg_op1; 9738 TCGv_i32 tcg_op2; 9739 TCGv_i32 tcg_res; 9740 9741 switch (fpopcode) { 9742 case 0x03: /* FMULX */ 9743 case 0x04: /* FCMEQ (reg) */ 9744 case 0x07: /* FRECPS */ 9745 case 0x0f: /* FRSQRTS */ 9746 case 0x14: /* FCMGE (reg) */ 9747 case 0x15: /* FACGE */ 9748 case 0x1a: /* FABD */ 9749 case 0x1c: /* FCMGT (reg) */ 9750 case 0x1d: /* FACGT */ 9751 break; 9752 default: 9753 unallocated_encoding(s); 9754 return; 9755 } 9756 9757 if (!dc_isar_feature(aa64_fp16, s)) { 9758 unallocated_encoding(s); 9759 } 9760 9761 if (!fp_access_check(s)) { 9762 return; 9763 } 9764 9765 fpst = fpstatus_ptr(FPST_FPCR_F16); 9766 9767 tcg_op1 = read_fp_hreg(s, rn); 9768 tcg_op2 = read_fp_hreg(s, rm); 9769 tcg_res = tcg_temp_new_i32(); 9770 9771 switch (fpopcode) { 9772 case 0x03: /* FMULX */ 9773 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst); 9774 break; 9775 case 0x04: /* FCMEQ (reg) */ 9776 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst); 9777 break; 9778 case 0x07: /* FRECPS */ 9779 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst); 9780 break; 9781 case 0x0f: /* FRSQRTS */ 9782 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst); 9783 break; 9784 case 0x14: /* FCMGE (reg) */ 9785 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst); 9786 break; 9787 case 0x15: /* FACGE */ 9788 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst); 9789 break; 9790 case 0x1a: /* FABD */ 9791 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst); 9792 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff); 9793 break; 9794 case 0x1c: /* FCMGT (reg) */ 9795 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst); 9796 break; 9797 case 0x1d: /* FACGT */ 9798 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst); 9799 break; 9800 default: 9801 g_assert_not_reached(); 9802 } 9803 9804 write_fp_sreg(s, rd, tcg_res); 9805 9806 9807 tcg_temp_free_i32(tcg_res); 9808 tcg_temp_free_i32(tcg_op1); 9809 tcg_temp_free_i32(tcg_op2); 9810 tcg_temp_free_ptr(fpst); 9811 } 9812 9813 /* AdvSIMD scalar three same extra 9814 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0 9815 * +-----+---+-----------+------+---+------+---+--------+---+----+----+ 9816 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd | 9817 * +-----+---+-----------+------+---+------+---+--------+---+----+----+ 9818 */ 9819 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, 9820 uint32_t insn) 9821 { 9822 int rd = extract32(insn, 0, 5); 9823 int rn = extract32(insn, 5, 5); 9824 int opcode = extract32(insn, 11, 4); 9825 int rm = extract32(insn, 16, 5); 9826 int size = extract32(insn, 22, 2); 9827 bool u = extract32(insn, 29, 1); 9828 TCGv_i32 ele1, ele2, ele3; 9829 TCGv_i64 res; 9830 bool feature; 9831 9832 switch (u * 16 + opcode) { 9833 case 0x10: /* SQRDMLAH (vector) */ 9834 case 0x11: /* SQRDMLSH (vector) */ 9835 if (size != 1 && size != 2) { 9836 unallocated_encoding(s); 9837 return; 9838 } 9839 feature = dc_isar_feature(aa64_rdm, s); 9840 break; 9841 default: 9842 unallocated_encoding(s); 9843 return; 9844 } 9845 if (!feature) { 9846 unallocated_encoding(s); 9847 return; 9848 } 9849 if (!fp_access_check(s)) { 9850 return; 9851 } 9852 9853 /* Do a single operation on the lowest element in the vector. 9854 * We use the standard Neon helpers and rely on 0 OP 0 == 0 9855 * with no side effects for all these operations. 9856 * OPTME: special-purpose helpers would avoid doing some 9857 * unnecessary work in the helper for the 16 bit cases. 9858 */ 9859 ele1 = tcg_temp_new_i32(); 9860 ele2 = tcg_temp_new_i32(); 9861 ele3 = tcg_temp_new_i32(); 9862 9863 read_vec_element_i32(s, ele1, rn, 0, size); 9864 read_vec_element_i32(s, ele2, rm, 0, size); 9865 read_vec_element_i32(s, ele3, rd, 0, size); 9866 9867 switch (opcode) { 9868 case 0x0: /* SQRDMLAH */ 9869 if (size == 1) { 9870 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3); 9871 } else { 9872 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3); 9873 } 9874 break; 9875 case 0x1: /* SQRDMLSH */ 9876 if (size == 1) { 9877 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3); 9878 } else { 9879 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3); 9880 } 9881 break; 9882 default: 9883 g_assert_not_reached(); 9884 } 9885 tcg_temp_free_i32(ele1); 9886 tcg_temp_free_i32(ele2); 9887 9888 res = tcg_temp_new_i64(); 9889 tcg_gen_extu_i32_i64(res, ele3); 9890 tcg_temp_free_i32(ele3); 9891 9892 write_fp_dreg(s, rd, res); 9893 tcg_temp_free_i64(res); 9894 } 9895 9896 static void handle_2misc_64(DisasContext *s, int opcode, bool u, 9897 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, 9898 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus) 9899 { 9900 /* Handle 64->64 opcodes which are shared between the scalar and 9901 * vector 2-reg-misc groups. We cover every integer opcode where size == 3 9902 * is valid in either group and also the double-precision fp ops. 9903 * The caller only need provide tcg_rmode and tcg_fpstatus if the op 9904 * requires them. 9905 */ 9906 TCGCond cond; 9907 9908 switch (opcode) { 9909 case 0x4: /* CLS, CLZ */ 9910 if (u) { 9911 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64); 9912 } else { 9913 tcg_gen_clrsb_i64(tcg_rd, tcg_rn); 9914 } 9915 break; 9916 case 0x5: /* NOT */ 9917 /* This opcode is shared with CNT and RBIT but we have earlier 9918 * enforced that size == 3 if and only if this is the NOT insn. 9919 */ 9920 tcg_gen_not_i64(tcg_rd, tcg_rn); 9921 break; 9922 case 0x7: /* SQABS, SQNEG */ 9923 if (u) { 9924 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn); 9925 } else { 9926 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn); 9927 } 9928 break; 9929 case 0xa: /* CMLT */ 9930 /* 64 bit integer comparison against zero, result is 9931 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and 9932 * subtracting 1. 9933 */ 9934 cond = TCG_COND_LT; 9935 do_cmop: 9936 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0); 9937 tcg_gen_neg_i64(tcg_rd, tcg_rd); 9938 break; 9939 case 0x8: /* CMGT, CMGE */ 9940 cond = u ? TCG_COND_GE : TCG_COND_GT; 9941 goto do_cmop; 9942 case 0x9: /* CMEQ, CMLE */ 9943 cond = u ? TCG_COND_LE : TCG_COND_EQ; 9944 goto do_cmop; 9945 case 0xb: /* ABS, NEG */ 9946 if (u) { 9947 tcg_gen_neg_i64(tcg_rd, tcg_rn); 9948 } else { 9949 tcg_gen_abs_i64(tcg_rd, tcg_rn); 9950 } 9951 break; 9952 case 0x2f: /* FABS */ 9953 gen_helper_vfp_absd(tcg_rd, tcg_rn); 9954 break; 9955 case 0x6f: /* FNEG */ 9956 gen_helper_vfp_negd(tcg_rd, tcg_rn); 9957 break; 9958 case 0x7f: /* FSQRT */ 9959 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env); 9960 break; 9961 case 0x1a: /* FCVTNS */ 9962 case 0x1b: /* FCVTMS */ 9963 case 0x1c: /* FCVTAS */ 9964 case 0x3a: /* FCVTPS */ 9965 case 0x3b: /* FCVTZS */ 9966 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus); 9967 break; 9968 case 0x5a: /* FCVTNU */ 9969 case 0x5b: /* FCVTMU */ 9970 case 0x5c: /* FCVTAU */ 9971 case 0x7a: /* FCVTPU */ 9972 case 0x7b: /* FCVTZU */ 9973 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus); 9974 break; 9975 case 0x18: /* FRINTN */ 9976 case 0x19: /* FRINTM */ 9977 case 0x38: /* FRINTP */ 9978 case 0x39: /* FRINTZ */ 9979 case 0x58: /* FRINTA */ 9980 case 0x79: /* FRINTI */ 9981 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus); 9982 break; 9983 case 0x59: /* FRINTX */ 9984 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus); 9985 break; 9986 case 0x1e: /* FRINT32Z */ 9987 case 0x5e: /* FRINT32X */ 9988 gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus); 9989 break; 9990 case 0x1f: /* FRINT64Z */ 9991 case 0x5f: /* FRINT64X */ 9992 gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus); 9993 break; 9994 default: 9995 g_assert_not_reached(); 9996 } 9997 } 9998 9999 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode, 10000 bool is_scalar, bool is_u, bool is_q, 10001 int size, int rn, int rd) 10002 { 10003 bool is_double = (size == MO_64); 10004 TCGv_ptr fpst; 10005 10006 if (!fp_access_check(s)) { 10007 return; 10008 } 10009 10010 fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR); 10011 10012 if (is_double) { 10013 TCGv_i64 tcg_op = tcg_temp_new_i64(); 10014 TCGv_i64 tcg_zero = tcg_constant_i64(0); 10015 TCGv_i64 tcg_res = tcg_temp_new_i64(); 10016 NeonGenTwoDoubleOpFn *genfn; 10017 bool swap = false; 10018 int pass; 10019 10020 switch (opcode) { 10021 case 0x2e: /* FCMLT (zero) */ 10022 swap = true; 10023 /* fallthrough */ 10024 case 0x2c: /* FCMGT (zero) */ 10025 genfn = gen_helper_neon_cgt_f64; 10026 break; 10027 case 0x2d: /* FCMEQ (zero) */ 10028 genfn = gen_helper_neon_ceq_f64; 10029 break; 10030 case 0x6d: /* FCMLE (zero) */ 10031 swap = true; 10032 /* fall through */ 10033 case 0x6c: /* FCMGE (zero) */ 10034 genfn = gen_helper_neon_cge_f64; 10035 break; 10036 default: 10037 g_assert_not_reached(); 10038 } 10039 10040 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 10041 read_vec_element(s, tcg_op, rn, pass, MO_64); 10042 if (swap) { 10043 genfn(tcg_res, tcg_zero, tcg_op, fpst); 10044 } else { 10045 genfn(tcg_res, tcg_op, tcg_zero, fpst); 10046 } 10047 write_vec_element(s, tcg_res, rd, pass, MO_64); 10048 } 10049 tcg_temp_free_i64(tcg_res); 10050 tcg_temp_free_i64(tcg_op); 10051 10052 clear_vec_high(s, !is_scalar, rd); 10053 } else { 10054 TCGv_i32 tcg_op = tcg_temp_new_i32(); 10055 TCGv_i32 tcg_zero = tcg_constant_i32(0); 10056 TCGv_i32 tcg_res = tcg_temp_new_i32(); 10057 NeonGenTwoSingleOpFn *genfn; 10058 bool swap = false; 10059 int pass, maxpasses; 10060 10061 if (size == MO_16) { 10062 switch (opcode) { 10063 case 0x2e: /* FCMLT (zero) */ 10064 swap = true; 10065 /* fall through */ 10066 case 0x2c: /* FCMGT (zero) */ 10067 genfn = gen_helper_advsimd_cgt_f16; 10068 break; 10069 case 0x2d: /* FCMEQ (zero) */ 10070 genfn = gen_helper_advsimd_ceq_f16; 10071 break; 10072 case 0x6d: /* FCMLE (zero) */ 10073 swap = true; 10074 /* fall through */ 10075 case 0x6c: /* FCMGE (zero) */ 10076 genfn = gen_helper_advsimd_cge_f16; 10077 break; 10078 default: 10079 g_assert_not_reached(); 10080 } 10081 } else { 10082 switch (opcode) { 10083 case 0x2e: /* FCMLT (zero) */ 10084 swap = true; 10085 /* fall through */ 10086 case 0x2c: /* FCMGT (zero) */ 10087 genfn = gen_helper_neon_cgt_f32; 10088 break; 10089 case 0x2d: /* FCMEQ (zero) */ 10090 genfn = gen_helper_neon_ceq_f32; 10091 break; 10092 case 0x6d: /* FCMLE (zero) */ 10093 swap = true; 10094 /* fall through */ 10095 case 0x6c: /* FCMGE (zero) */ 10096 genfn = gen_helper_neon_cge_f32; 10097 break; 10098 default: 10099 g_assert_not_reached(); 10100 } 10101 } 10102 10103 if (is_scalar) { 10104 maxpasses = 1; 10105 } else { 10106 int vector_size = 8 << is_q; 10107 maxpasses = vector_size >> size; 10108 } 10109 10110 for (pass = 0; pass < maxpasses; pass++) { 10111 read_vec_element_i32(s, tcg_op, rn, pass, size); 10112 if (swap) { 10113 genfn(tcg_res, tcg_zero, tcg_op, fpst); 10114 } else { 10115 genfn(tcg_res, tcg_op, tcg_zero, fpst); 10116 } 10117 if (is_scalar) { 10118 write_fp_sreg(s, rd, tcg_res); 10119 } else { 10120 write_vec_element_i32(s, tcg_res, rd, pass, size); 10121 } 10122 } 10123 tcg_temp_free_i32(tcg_res); 10124 tcg_temp_free_i32(tcg_op); 10125 if (!is_scalar) { 10126 clear_vec_high(s, is_q, rd); 10127 } 10128 } 10129 10130 tcg_temp_free_ptr(fpst); 10131 } 10132 10133 static void handle_2misc_reciprocal(DisasContext *s, int opcode, 10134 bool is_scalar, bool is_u, bool is_q, 10135 int size, int rn, int rd) 10136 { 10137 bool is_double = (size == 3); 10138 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 10139 10140 if (is_double) { 10141 TCGv_i64 tcg_op = tcg_temp_new_i64(); 10142 TCGv_i64 tcg_res = tcg_temp_new_i64(); 10143 int pass; 10144 10145 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 10146 read_vec_element(s, tcg_op, rn, pass, MO_64); 10147 switch (opcode) { 10148 case 0x3d: /* FRECPE */ 10149 gen_helper_recpe_f64(tcg_res, tcg_op, fpst); 10150 break; 10151 case 0x3f: /* FRECPX */ 10152 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst); 10153 break; 10154 case 0x7d: /* FRSQRTE */ 10155 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst); 10156 break; 10157 default: 10158 g_assert_not_reached(); 10159 } 10160 write_vec_element(s, tcg_res, rd, pass, MO_64); 10161 } 10162 tcg_temp_free_i64(tcg_res); 10163 tcg_temp_free_i64(tcg_op); 10164 clear_vec_high(s, !is_scalar, rd); 10165 } else { 10166 TCGv_i32 tcg_op = tcg_temp_new_i32(); 10167 TCGv_i32 tcg_res = tcg_temp_new_i32(); 10168 int pass, maxpasses; 10169 10170 if (is_scalar) { 10171 maxpasses = 1; 10172 } else { 10173 maxpasses = is_q ? 4 : 2; 10174 } 10175 10176 for (pass = 0; pass < maxpasses; pass++) { 10177 read_vec_element_i32(s, tcg_op, rn, pass, MO_32); 10178 10179 switch (opcode) { 10180 case 0x3c: /* URECPE */ 10181 gen_helper_recpe_u32(tcg_res, tcg_op); 10182 break; 10183 case 0x3d: /* FRECPE */ 10184 gen_helper_recpe_f32(tcg_res, tcg_op, fpst); 10185 break; 10186 case 0x3f: /* FRECPX */ 10187 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst); 10188 break; 10189 case 0x7d: /* FRSQRTE */ 10190 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst); 10191 break; 10192 default: 10193 g_assert_not_reached(); 10194 } 10195 10196 if (is_scalar) { 10197 write_fp_sreg(s, rd, tcg_res); 10198 } else { 10199 write_vec_element_i32(s, tcg_res, rd, pass, MO_32); 10200 } 10201 } 10202 tcg_temp_free_i32(tcg_res); 10203 tcg_temp_free_i32(tcg_op); 10204 if (!is_scalar) { 10205 clear_vec_high(s, is_q, rd); 10206 } 10207 } 10208 tcg_temp_free_ptr(fpst); 10209 } 10210 10211 static void handle_2misc_narrow(DisasContext *s, bool scalar, 10212 int opcode, bool u, bool is_q, 10213 int size, int rn, int rd) 10214 { 10215 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element 10216 * in the source becomes a size element in the destination). 10217 */ 10218 int pass; 10219 TCGv_i32 tcg_res[2]; 10220 int destelt = is_q ? 2 : 0; 10221 int passes = scalar ? 1 : 2; 10222 10223 if (scalar) { 10224 tcg_res[1] = tcg_constant_i32(0); 10225 } 10226 10227 for (pass = 0; pass < passes; pass++) { 10228 TCGv_i64 tcg_op = tcg_temp_new_i64(); 10229 NeonGenNarrowFn *genfn = NULL; 10230 NeonGenNarrowEnvFn *genenvfn = NULL; 10231 10232 if (scalar) { 10233 read_vec_element(s, tcg_op, rn, pass, size + 1); 10234 } else { 10235 read_vec_element(s, tcg_op, rn, pass, MO_64); 10236 } 10237 tcg_res[pass] = tcg_temp_new_i32(); 10238 10239 switch (opcode) { 10240 case 0x12: /* XTN, SQXTUN */ 10241 { 10242 static NeonGenNarrowFn * const xtnfns[3] = { 10243 gen_helper_neon_narrow_u8, 10244 gen_helper_neon_narrow_u16, 10245 tcg_gen_extrl_i64_i32, 10246 }; 10247 static NeonGenNarrowEnvFn * const sqxtunfns[3] = { 10248 gen_helper_neon_unarrow_sat8, 10249 gen_helper_neon_unarrow_sat16, 10250 gen_helper_neon_unarrow_sat32, 10251 }; 10252 if (u) { 10253 genenvfn = sqxtunfns[size]; 10254 } else { 10255 genfn = xtnfns[size]; 10256 } 10257 break; 10258 } 10259 case 0x14: /* SQXTN, UQXTN */ 10260 { 10261 static NeonGenNarrowEnvFn * const fns[3][2] = { 10262 { gen_helper_neon_narrow_sat_s8, 10263 gen_helper_neon_narrow_sat_u8 }, 10264 { gen_helper_neon_narrow_sat_s16, 10265 gen_helper_neon_narrow_sat_u16 }, 10266 { gen_helper_neon_narrow_sat_s32, 10267 gen_helper_neon_narrow_sat_u32 }, 10268 }; 10269 genenvfn = fns[size][u]; 10270 break; 10271 } 10272 case 0x16: /* FCVTN, FCVTN2 */ 10273 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */ 10274 if (size == 2) { 10275 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env); 10276 } else { 10277 TCGv_i32 tcg_lo = tcg_temp_new_i32(); 10278 TCGv_i32 tcg_hi = tcg_temp_new_i32(); 10279 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 10280 TCGv_i32 ahp = get_ahp_flag(); 10281 10282 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op); 10283 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp); 10284 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp); 10285 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16); 10286 tcg_temp_free_i32(tcg_lo); 10287 tcg_temp_free_i32(tcg_hi); 10288 tcg_temp_free_ptr(fpst); 10289 tcg_temp_free_i32(ahp); 10290 } 10291 break; 10292 case 0x36: /* BFCVTN, BFCVTN2 */ 10293 { 10294 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 10295 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst); 10296 tcg_temp_free_ptr(fpst); 10297 } 10298 break; 10299 case 0x56: /* FCVTXN, FCVTXN2 */ 10300 /* 64 bit to 32 bit float conversion 10301 * with von Neumann rounding (round to odd) 10302 */ 10303 assert(size == 2); 10304 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env); 10305 break; 10306 default: 10307 g_assert_not_reached(); 10308 } 10309 10310 if (genfn) { 10311 genfn(tcg_res[pass], tcg_op); 10312 } else if (genenvfn) { 10313 genenvfn(tcg_res[pass], cpu_env, tcg_op); 10314 } 10315 10316 tcg_temp_free_i64(tcg_op); 10317 } 10318 10319 for (pass = 0; pass < 2; pass++) { 10320 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32); 10321 tcg_temp_free_i32(tcg_res[pass]); 10322 } 10323 clear_vec_high(s, is_q, rd); 10324 } 10325 10326 /* Remaining saturating accumulating ops */ 10327 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u, 10328 bool is_q, int size, int rn, int rd) 10329 { 10330 bool is_double = (size == 3); 10331 10332 if (is_double) { 10333 TCGv_i64 tcg_rn = tcg_temp_new_i64(); 10334 TCGv_i64 tcg_rd = tcg_temp_new_i64(); 10335 int pass; 10336 10337 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 10338 read_vec_element(s, tcg_rn, rn, pass, MO_64); 10339 read_vec_element(s, tcg_rd, rd, pass, MO_64); 10340 10341 if (is_u) { /* USQADD */ 10342 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd); 10343 } else { /* SUQADD */ 10344 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd); 10345 } 10346 write_vec_element(s, tcg_rd, rd, pass, MO_64); 10347 } 10348 tcg_temp_free_i64(tcg_rd); 10349 tcg_temp_free_i64(tcg_rn); 10350 clear_vec_high(s, !is_scalar, rd); 10351 } else { 10352 TCGv_i32 tcg_rn = tcg_temp_new_i32(); 10353 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 10354 int pass, maxpasses; 10355 10356 if (is_scalar) { 10357 maxpasses = 1; 10358 } else { 10359 maxpasses = is_q ? 4 : 2; 10360 } 10361 10362 for (pass = 0; pass < maxpasses; pass++) { 10363 if (is_scalar) { 10364 read_vec_element_i32(s, tcg_rn, rn, pass, size); 10365 read_vec_element_i32(s, tcg_rd, rd, pass, size); 10366 } else { 10367 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32); 10368 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32); 10369 } 10370 10371 if (is_u) { /* USQADD */ 10372 switch (size) { 10373 case 0: 10374 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd); 10375 break; 10376 case 1: 10377 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd); 10378 break; 10379 case 2: 10380 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd); 10381 break; 10382 default: 10383 g_assert_not_reached(); 10384 } 10385 } else { /* SUQADD */ 10386 switch (size) { 10387 case 0: 10388 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd); 10389 break; 10390 case 1: 10391 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd); 10392 break; 10393 case 2: 10394 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd); 10395 break; 10396 default: 10397 g_assert_not_reached(); 10398 } 10399 } 10400 10401 if (is_scalar) { 10402 write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64); 10403 } 10404 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32); 10405 } 10406 tcg_temp_free_i32(tcg_rd); 10407 tcg_temp_free_i32(tcg_rn); 10408 clear_vec_high(s, is_q, rd); 10409 } 10410 } 10411 10412 /* AdvSIMD scalar two reg misc 10413 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 10414 * +-----+---+-----------+------+-----------+--------+-----+------+------+ 10415 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | 10416 * +-----+---+-----------+------+-----------+--------+-----+------+------+ 10417 */ 10418 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) 10419 { 10420 int rd = extract32(insn, 0, 5); 10421 int rn = extract32(insn, 5, 5); 10422 int opcode = extract32(insn, 12, 5); 10423 int size = extract32(insn, 22, 2); 10424 bool u = extract32(insn, 29, 1); 10425 bool is_fcvt = false; 10426 int rmode; 10427 TCGv_i32 tcg_rmode; 10428 TCGv_ptr tcg_fpstatus; 10429 10430 switch (opcode) { 10431 case 0x3: /* USQADD / SUQADD*/ 10432 if (!fp_access_check(s)) { 10433 return; 10434 } 10435 handle_2misc_satacc(s, true, u, false, size, rn, rd); 10436 return; 10437 case 0x7: /* SQABS / SQNEG */ 10438 break; 10439 case 0xa: /* CMLT */ 10440 if (u) { 10441 unallocated_encoding(s); 10442 return; 10443 } 10444 /* fall through */ 10445 case 0x8: /* CMGT, CMGE */ 10446 case 0x9: /* CMEQ, CMLE */ 10447 case 0xb: /* ABS, NEG */ 10448 if (size != 3) { 10449 unallocated_encoding(s); 10450 return; 10451 } 10452 break; 10453 case 0x12: /* SQXTUN */ 10454 if (!u) { 10455 unallocated_encoding(s); 10456 return; 10457 } 10458 /* fall through */ 10459 case 0x14: /* SQXTN, UQXTN */ 10460 if (size == 3) { 10461 unallocated_encoding(s); 10462 return; 10463 } 10464 if (!fp_access_check(s)) { 10465 return; 10466 } 10467 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd); 10468 return; 10469 case 0xc ... 0xf: 10470 case 0x16 ... 0x1d: 10471 case 0x1f: 10472 /* Floating point: U, size[1] and opcode indicate operation; 10473 * size[0] indicates single or double precision. 10474 */ 10475 opcode |= (extract32(size, 1, 1) << 5) | (u << 6); 10476 size = extract32(size, 0, 1) ? 3 : 2; 10477 switch (opcode) { 10478 case 0x2c: /* FCMGT (zero) */ 10479 case 0x2d: /* FCMEQ (zero) */ 10480 case 0x2e: /* FCMLT (zero) */ 10481 case 0x6c: /* FCMGE (zero) */ 10482 case 0x6d: /* FCMLE (zero) */ 10483 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd); 10484 return; 10485 case 0x1d: /* SCVTF */ 10486 case 0x5d: /* UCVTF */ 10487 { 10488 bool is_signed = (opcode == 0x1d); 10489 if (!fp_access_check(s)) { 10490 return; 10491 } 10492 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size); 10493 return; 10494 } 10495 case 0x3d: /* FRECPE */ 10496 case 0x3f: /* FRECPX */ 10497 case 0x7d: /* FRSQRTE */ 10498 if (!fp_access_check(s)) { 10499 return; 10500 } 10501 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd); 10502 return; 10503 case 0x1a: /* FCVTNS */ 10504 case 0x1b: /* FCVTMS */ 10505 case 0x3a: /* FCVTPS */ 10506 case 0x3b: /* FCVTZS */ 10507 case 0x5a: /* FCVTNU */ 10508 case 0x5b: /* FCVTMU */ 10509 case 0x7a: /* FCVTPU */ 10510 case 0x7b: /* FCVTZU */ 10511 is_fcvt = true; 10512 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); 10513 break; 10514 case 0x1c: /* FCVTAS */ 10515 case 0x5c: /* FCVTAU */ 10516 /* TIEAWAY doesn't fit in the usual rounding mode encoding */ 10517 is_fcvt = true; 10518 rmode = FPROUNDING_TIEAWAY; 10519 break; 10520 case 0x56: /* FCVTXN, FCVTXN2 */ 10521 if (size == 2) { 10522 unallocated_encoding(s); 10523 return; 10524 } 10525 if (!fp_access_check(s)) { 10526 return; 10527 } 10528 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd); 10529 return; 10530 default: 10531 unallocated_encoding(s); 10532 return; 10533 } 10534 break; 10535 default: 10536 unallocated_encoding(s); 10537 return; 10538 } 10539 10540 if (!fp_access_check(s)) { 10541 return; 10542 } 10543 10544 if (is_fcvt) { 10545 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); 10546 tcg_fpstatus = fpstatus_ptr(FPST_FPCR); 10547 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 10548 } else { 10549 tcg_rmode = NULL; 10550 tcg_fpstatus = NULL; 10551 } 10552 10553 if (size == 3) { 10554 TCGv_i64 tcg_rn = read_fp_dreg(s, rn); 10555 TCGv_i64 tcg_rd = tcg_temp_new_i64(); 10556 10557 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus); 10558 write_fp_dreg(s, rd, tcg_rd); 10559 tcg_temp_free_i64(tcg_rd); 10560 tcg_temp_free_i64(tcg_rn); 10561 } else { 10562 TCGv_i32 tcg_rn = tcg_temp_new_i32(); 10563 TCGv_i32 tcg_rd = tcg_temp_new_i32(); 10564 10565 read_vec_element_i32(s, tcg_rn, rn, 0, size); 10566 10567 switch (opcode) { 10568 case 0x7: /* SQABS, SQNEG */ 10569 { 10570 NeonGenOneOpEnvFn *genfn; 10571 static NeonGenOneOpEnvFn * const fns[3][2] = { 10572 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, 10573 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, 10574 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 }, 10575 }; 10576 genfn = fns[size][u]; 10577 genfn(tcg_rd, cpu_env, tcg_rn); 10578 break; 10579 } 10580 case 0x1a: /* FCVTNS */ 10581 case 0x1b: /* FCVTMS */ 10582 case 0x1c: /* FCVTAS */ 10583 case 0x3a: /* FCVTPS */ 10584 case 0x3b: /* FCVTZS */ 10585 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0), 10586 tcg_fpstatus); 10587 break; 10588 case 0x5a: /* FCVTNU */ 10589 case 0x5b: /* FCVTMU */ 10590 case 0x5c: /* FCVTAU */ 10591 case 0x7a: /* FCVTPU */ 10592 case 0x7b: /* FCVTZU */ 10593 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0), 10594 tcg_fpstatus); 10595 break; 10596 default: 10597 g_assert_not_reached(); 10598 } 10599 10600 write_fp_sreg(s, rd, tcg_rd); 10601 tcg_temp_free_i32(tcg_rd); 10602 tcg_temp_free_i32(tcg_rn); 10603 } 10604 10605 if (is_fcvt) { 10606 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 10607 tcg_temp_free_i32(tcg_rmode); 10608 tcg_temp_free_ptr(tcg_fpstatus); 10609 } 10610 } 10611 10612 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */ 10613 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, 10614 int immh, int immb, int opcode, int rn, int rd) 10615 { 10616 int size = 32 - clz32(immh) - 1; 10617 int immhb = immh << 3 | immb; 10618 int shift = 2 * (8 << size) - immhb; 10619 GVecGen2iFn *gvec_fn; 10620 10621 if (extract32(immh, 3, 1) && !is_q) { 10622 unallocated_encoding(s); 10623 return; 10624 } 10625 tcg_debug_assert(size <= 3); 10626 10627 if (!fp_access_check(s)) { 10628 return; 10629 } 10630 10631 switch (opcode) { 10632 case 0x02: /* SSRA / USRA (accumulate) */ 10633 gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra; 10634 break; 10635 10636 case 0x08: /* SRI */ 10637 gvec_fn = gen_gvec_sri; 10638 break; 10639 10640 case 0x00: /* SSHR / USHR */ 10641 if (is_u) { 10642 if (shift == 8 << size) { 10643 /* Shift count the same size as element size produces zero. */ 10644 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd), 10645 is_q ? 16 : 8, vec_full_reg_size(s), 0); 10646 return; 10647 } 10648 gvec_fn = tcg_gen_gvec_shri; 10649 } else { 10650 /* Shift count the same size as element size produces all sign. */ 10651 if (shift == 8 << size) { 10652 shift -= 1; 10653 } 10654 gvec_fn = tcg_gen_gvec_sari; 10655 } 10656 break; 10657 10658 case 0x04: /* SRSHR / URSHR (rounding) */ 10659 gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr; 10660 break; 10661 10662 case 0x06: /* SRSRA / URSRA (accum + rounding) */ 10663 gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra; 10664 break; 10665 10666 default: 10667 g_assert_not_reached(); 10668 } 10669 10670 gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size); 10671 } 10672 10673 /* SHL/SLI - Vector shift left */ 10674 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, 10675 int immh, int immb, int opcode, int rn, int rd) 10676 { 10677 int size = 32 - clz32(immh) - 1; 10678 int immhb = immh << 3 | immb; 10679 int shift = immhb - (8 << size); 10680 10681 /* Range of size is limited by decode: immh is a non-zero 4 bit field */ 10682 assert(size >= 0 && size <= 3); 10683 10684 if (extract32(immh, 3, 1) && !is_q) { 10685 unallocated_encoding(s); 10686 return; 10687 } 10688 10689 if (!fp_access_check(s)) { 10690 return; 10691 } 10692 10693 if (insert) { 10694 gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size); 10695 } else { 10696 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size); 10697 } 10698 } 10699 10700 /* USHLL/SHLL - Vector shift left with widening */ 10701 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u, 10702 int immh, int immb, int opcode, int rn, int rd) 10703 { 10704 int size = 32 - clz32(immh) - 1; 10705 int immhb = immh << 3 | immb; 10706 int shift = immhb - (8 << size); 10707 int dsize = 64; 10708 int esize = 8 << size; 10709 int elements = dsize/esize; 10710 TCGv_i64 tcg_rn = new_tmp_a64(s); 10711 TCGv_i64 tcg_rd = new_tmp_a64(s); 10712 int i; 10713 10714 if (size >= 3) { 10715 unallocated_encoding(s); 10716 return; 10717 } 10718 10719 if (!fp_access_check(s)) { 10720 return; 10721 } 10722 10723 /* For the LL variants the store is larger than the load, 10724 * so if rd == rn we would overwrite parts of our input. 10725 * So load everything right now and use shifts in the main loop. 10726 */ 10727 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64); 10728 10729 for (i = 0; i < elements; i++) { 10730 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize); 10731 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0); 10732 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift); 10733 write_vec_element(s, tcg_rd, rd, i, size + 1); 10734 } 10735 } 10736 10737 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */ 10738 static void handle_vec_simd_shrn(DisasContext *s, bool is_q, 10739 int immh, int immb, int opcode, int rn, int rd) 10740 { 10741 int immhb = immh << 3 | immb; 10742 int size = 32 - clz32(immh) - 1; 10743 int dsize = 64; 10744 int esize = 8 << size; 10745 int elements = dsize/esize; 10746 int shift = (2 * esize) - immhb; 10747 bool round = extract32(opcode, 0, 1); 10748 TCGv_i64 tcg_rn, tcg_rd, tcg_final; 10749 TCGv_i64 tcg_round; 10750 int i; 10751 10752 if (extract32(immh, 3, 1)) { 10753 unallocated_encoding(s); 10754 return; 10755 } 10756 10757 if (!fp_access_check(s)) { 10758 return; 10759 } 10760 10761 tcg_rn = tcg_temp_new_i64(); 10762 tcg_rd = tcg_temp_new_i64(); 10763 tcg_final = tcg_temp_new_i64(); 10764 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64); 10765 10766 if (round) { 10767 tcg_round = tcg_constant_i64(1ULL << (shift - 1)); 10768 } else { 10769 tcg_round = NULL; 10770 } 10771 10772 for (i = 0; i < elements; i++) { 10773 read_vec_element(s, tcg_rn, rn, i, size+1); 10774 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, 10775 false, true, size+1, shift); 10776 10777 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); 10778 } 10779 10780 if (!is_q) { 10781 write_vec_element(s, tcg_final, rd, 0, MO_64); 10782 } else { 10783 write_vec_element(s, tcg_final, rd, 1, MO_64); 10784 } 10785 tcg_temp_free_i64(tcg_rn); 10786 tcg_temp_free_i64(tcg_rd); 10787 tcg_temp_free_i64(tcg_final); 10788 10789 clear_vec_high(s, is_q, rd); 10790 } 10791 10792 10793 /* AdvSIMD shift by immediate 10794 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0 10795 * +---+---+---+-------------+------+------+--------+---+------+------+ 10796 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd | 10797 * +---+---+---+-------------+------+------+--------+---+------+------+ 10798 */ 10799 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn) 10800 { 10801 int rd = extract32(insn, 0, 5); 10802 int rn = extract32(insn, 5, 5); 10803 int opcode = extract32(insn, 11, 5); 10804 int immb = extract32(insn, 16, 3); 10805 int immh = extract32(insn, 19, 4); 10806 bool is_u = extract32(insn, 29, 1); 10807 bool is_q = extract32(insn, 30, 1); 10808 10809 /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */ 10810 assert(immh != 0); 10811 10812 switch (opcode) { 10813 case 0x08: /* SRI */ 10814 if (!is_u) { 10815 unallocated_encoding(s); 10816 return; 10817 } 10818 /* fall through */ 10819 case 0x00: /* SSHR / USHR */ 10820 case 0x02: /* SSRA / USRA (accumulate) */ 10821 case 0x04: /* SRSHR / URSHR (rounding) */ 10822 case 0x06: /* SRSRA / URSRA (accum + rounding) */ 10823 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd); 10824 break; 10825 case 0x0a: /* SHL / SLI */ 10826 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd); 10827 break; 10828 case 0x10: /* SHRN */ 10829 case 0x11: /* RSHRN / SQRSHRUN */ 10830 if (is_u) { 10831 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb, 10832 opcode, rn, rd); 10833 } else { 10834 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd); 10835 } 10836 break; 10837 case 0x12: /* SQSHRN / UQSHRN */ 10838 case 0x13: /* SQRSHRN / UQRSHRN */ 10839 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb, 10840 opcode, rn, rd); 10841 break; 10842 case 0x14: /* SSHLL / USHLL */ 10843 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd); 10844 break; 10845 case 0x1c: /* SCVTF / UCVTF */ 10846 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb, 10847 opcode, rn, rd); 10848 break; 10849 case 0xc: /* SQSHLU */ 10850 if (!is_u) { 10851 unallocated_encoding(s); 10852 return; 10853 } 10854 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd); 10855 break; 10856 case 0xe: /* SQSHL, UQSHL */ 10857 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd); 10858 break; 10859 case 0x1f: /* FCVTZS/ FCVTZU */ 10860 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd); 10861 return; 10862 default: 10863 unallocated_encoding(s); 10864 return; 10865 } 10866 } 10867 10868 /* Generate code to do a "long" addition or subtraction, ie one done in 10869 * TCGv_i64 on vector lanes twice the width specified by size. 10870 */ 10871 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res, 10872 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2) 10873 { 10874 static NeonGenTwo64OpFn * const fns[3][2] = { 10875 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 }, 10876 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 }, 10877 { tcg_gen_add_i64, tcg_gen_sub_i64 }, 10878 }; 10879 NeonGenTwo64OpFn *genfn; 10880 assert(size < 3); 10881 10882 genfn = fns[size][is_sub]; 10883 genfn(tcg_res, tcg_op1, tcg_op2); 10884 } 10885 10886 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, 10887 int opcode, int rd, int rn, int rm) 10888 { 10889 /* 3-reg-different widening insns: 64 x 64 -> 128 */ 10890 TCGv_i64 tcg_res[2]; 10891 int pass, accop; 10892 10893 tcg_res[0] = tcg_temp_new_i64(); 10894 tcg_res[1] = tcg_temp_new_i64(); 10895 10896 /* Does this op do an adding accumulate, a subtracting accumulate, 10897 * or no accumulate at all? 10898 */ 10899 switch (opcode) { 10900 case 5: 10901 case 8: 10902 case 9: 10903 accop = 1; 10904 break; 10905 case 10: 10906 case 11: 10907 accop = -1; 10908 break; 10909 default: 10910 accop = 0; 10911 break; 10912 } 10913 10914 if (accop != 0) { 10915 read_vec_element(s, tcg_res[0], rd, 0, MO_64); 10916 read_vec_element(s, tcg_res[1], rd, 1, MO_64); 10917 } 10918 10919 /* size == 2 means two 32x32->64 operations; this is worth special 10920 * casing because we can generally handle it inline. 10921 */ 10922 if (size == 2) { 10923 for (pass = 0; pass < 2; pass++) { 10924 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 10925 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 10926 TCGv_i64 tcg_passres; 10927 MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN); 10928 10929 int elt = pass + is_q * 2; 10930 10931 read_vec_element(s, tcg_op1, rn, elt, memop); 10932 read_vec_element(s, tcg_op2, rm, elt, memop); 10933 10934 if (accop == 0) { 10935 tcg_passres = tcg_res[pass]; 10936 } else { 10937 tcg_passres = tcg_temp_new_i64(); 10938 } 10939 10940 switch (opcode) { 10941 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ 10942 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2); 10943 break; 10944 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ 10945 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2); 10946 break; 10947 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ 10948 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ 10949 { 10950 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64(); 10951 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64(); 10952 10953 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2); 10954 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1); 10955 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE, 10956 tcg_passres, 10957 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2); 10958 tcg_temp_free_i64(tcg_tmp1); 10959 tcg_temp_free_i64(tcg_tmp2); 10960 break; 10961 } 10962 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 10963 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 10964 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */ 10965 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2); 10966 break; 10967 case 9: /* SQDMLAL, SQDMLAL2 */ 10968 case 11: /* SQDMLSL, SQDMLSL2 */ 10969 case 13: /* SQDMULL, SQDMULL2 */ 10970 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2); 10971 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env, 10972 tcg_passres, tcg_passres); 10973 break; 10974 default: 10975 g_assert_not_reached(); 10976 } 10977 10978 if (opcode == 9 || opcode == 11) { 10979 /* saturating accumulate ops */ 10980 if (accop < 0) { 10981 tcg_gen_neg_i64(tcg_passres, tcg_passres); 10982 } 10983 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env, 10984 tcg_res[pass], tcg_passres); 10985 } else if (accop > 0) { 10986 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres); 10987 } else if (accop < 0) { 10988 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres); 10989 } 10990 10991 if (accop != 0) { 10992 tcg_temp_free_i64(tcg_passres); 10993 } 10994 10995 tcg_temp_free_i64(tcg_op1); 10996 tcg_temp_free_i64(tcg_op2); 10997 } 10998 } else { 10999 /* size 0 or 1, generally helper functions */ 11000 for (pass = 0; pass < 2; pass++) { 11001 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 11002 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 11003 TCGv_i64 tcg_passres; 11004 int elt = pass + is_q * 2; 11005 11006 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32); 11007 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32); 11008 11009 if (accop == 0) { 11010 tcg_passres = tcg_res[pass]; 11011 } else { 11012 tcg_passres = tcg_temp_new_i64(); 11013 } 11014 11015 switch (opcode) { 11016 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ 11017 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ 11018 { 11019 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64(); 11020 static NeonGenWidenFn * const widenfns[2][2] = { 11021 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 }, 11022 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 }, 11023 }; 11024 NeonGenWidenFn *widenfn = widenfns[size][is_u]; 11025 11026 widenfn(tcg_op2_64, tcg_op2); 11027 widenfn(tcg_passres, tcg_op1); 11028 gen_neon_addl(size, (opcode == 2), tcg_passres, 11029 tcg_passres, tcg_op2_64); 11030 tcg_temp_free_i64(tcg_op2_64); 11031 break; 11032 } 11033 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ 11034 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ 11035 if (size == 0) { 11036 if (is_u) { 11037 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2); 11038 } else { 11039 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2); 11040 } 11041 } else { 11042 if (is_u) { 11043 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2); 11044 } else { 11045 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2); 11046 } 11047 } 11048 break; 11049 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 11050 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 11051 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */ 11052 if (size == 0) { 11053 if (is_u) { 11054 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2); 11055 } else { 11056 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2); 11057 } 11058 } else { 11059 if (is_u) { 11060 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2); 11061 } else { 11062 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2); 11063 } 11064 } 11065 break; 11066 case 9: /* SQDMLAL, SQDMLAL2 */ 11067 case 11: /* SQDMLSL, SQDMLSL2 */ 11068 case 13: /* SQDMULL, SQDMULL2 */ 11069 assert(size == 1); 11070 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2); 11071 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env, 11072 tcg_passres, tcg_passres); 11073 break; 11074 default: 11075 g_assert_not_reached(); 11076 } 11077 tcg_temp_free_i32(tcg_op1); 11078 tcg_temp_free_i32(tcg_op2); 11079 11080 if (accop != 0) { 11081 if (opcode == 9 || opcode == 11) { 11082 /* saturating accumulate ops */ 11083 if (accop < 0) { 11084 gen_helper_neon_negl_u32(tcg_passres, tcg_passres); 11085 } 11086 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env, 11087 tcg_res[pass], 11088 tcg_passres); 11089 } else { 11090 gen_neon_addl(size, (accop < 0), tcg_res[pass], 11091 tcg_res[pass], tcg_passres); 11092 } 11093 tcg_temp_free_i64(tcg_passres); 11094 } 11095 } 11096 } 11097 11098 write_vec_element(s, tcg_res[0], rd, 0, MO_64); 11099 write_vec_element(s, tcg_res[1], rd, 1, MO_64); 11100 tcg_temp_free_i64(tcg_res[0]); 11101 tcg_temp_free_i64(tcg_res[1]); 11102 } 11103 11104 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, 11105 int opcode, int rd, int rn, int rm) 11106 { 11107 TCGv_i64 tcg_res[2]; 11108 int part = is_q ? 2 : 0; 11109 int pass; 11110 11111 for (pass = 0; pass < 2; pass++) { 11112 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 11113 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 11114 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64(); 11115 static NeonGenWidenFn * const widenfns[3][2] = { 11116 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 }, 11117 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 }, 11118 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 }, 11119 }; 11120 NeonGenWidenFn *widenfn = widenfns[size][is_u]; 11121 11122 read_vec_element(s, tcg_op1, rn, pass, MO_64); 11123 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32); 11124 widenfn(tcg_op2_wide, tcg_op2); 11125 tcg_temp_free_i32(tcg_op2); 11126 tcg_res[pass] = tcg_temp_new_i64(); 11127 gen_neon_addl(size, (opcode == 3), 11128 tcg_res[pass], tcg_op1, tcg_op2_wide); 11129 tcg_temp_free_i64(tcg_op1); 11130 tcg_temp_free_i64(tcg_op2_wide); 11131 } 11132 11133 for (pass = 0; pass < 2; pass++) { 11134 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 11135 tcg_temp_free_i64(tcg_res[pass]); 11136 } 11137 } 11138 11139 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in) 11140 { 11141 tcg_gen_addi_i64(in, in, 1U << 31); 11142 tcg_gen_extrh_i64_i32(res, in); 11143 } 11144 11145 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, 11146 int opcode, int rd, int rn, int rm) 11147 { 11148 TCGv_i32 tcg_res[2]; 11149 int part = is_q ? 2 : 0; 11150 int pass; 11151 11152 for (pass = 0; pass < 2; pass++) { 11153 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 11154 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 11155 TCGv_i64 tcg_wideres = tcg_temp_new_i64(); 11156 static NeonGenNarrowFn * const narrowfns[3][2] = { 11157 { gen_helper_neon_narrow_high_u8, 11158 gen_helper_neon_narrow_round_high_u8 }, 11159 { gen_helper_neon_narrow_high_u16, 11160 gen_helper_neon_narrow_round_high_u16 }, 11161 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 }, 11162 }; 11163 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u]; 11164 11165 read_vec_element(s, tcg_op1, rn, pass, MO_64); 11166 read_vec_element(s, tcg_op2, rm, pass, MO_64); 11167 11168 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2); 11169 11170 tcg_temp_free_i64(tcg_op1); 11171 tcg_temp_free_i64(tcg_op2); 11172 11173 tcg_res[pass] = tcg_temp_new_i32(); 11174 gennarrow(tcg_res[pass], tcg_wideres); 11175 tcg_temp_free_i64(tcg_wideres); 11176 } 11177 11178 for (pass = 0; pass < 2; pass++) { 11179 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32); 11180 tcg_temp_free_i32(tcg_res[pass]); 11181 } 11182 clear_vec_high(s, is_q, rd); 11183 } 11184 11185 /* AdvSIMD three different 11186 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 11187 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ 11188 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd | 11189 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ 11190 */ 11191 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) 11192 { 11193 /* Instructions in this group fall into three basic classes 11194 * (in each case with the operation working on each element in 11195 * the input vectors): 11196 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra 11197 * 128 bit input) 11198 * (2) wide 64 x 128 -> 128 11199 * (3) narrowing 128 x 128 -> 64 11200 * Here we do initial decode, catch unallocated cases and 11201 * dispatch to separate functions for each class. 11202 */ 11203 int is_q = extract32(insn, 30, 1); 11204 int is_u = extract32(insn, 29, 1); 11205 int size = extract32(insn, 22, 2); 11206 int opcode = extract32(insn, 12, 4); 11207 int rm = extract32(insn, 16, 5); 11208 int rn = extract32(insn, 5, 5); 11209 int rd = extract32(insn, 0, 5); 11210 11211 switch (opcode) { 11212 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */ 11213 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */ 11214 /* 64 x 128 -> 128 */ 11215 if (size == 3) { 11216 unallocated_encoding(s); 11217 return; 11218 } 11219 if (!fp_access_check(s)) { 11220 return; 11221 } 11222 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm); 11223 break; 11224 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */ 11225 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */ 11226 /* 128 x 128 -> 64 */ 11227 if (size == 3) { 11228 unallocated_encoding(s); 11229 return; 11230 } 11231 if (!fp_access_check(s)) { 11232 return; 11233 } 11234 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm); 11235 break; 11236 case 14: /* PMULL, PMULL2 */ 11237 if (is_u) { 11238 unallocated_encoding(s); 11239 return; 11240 } 11241 switch (size) { 11242 case 0: /* PMULL.P8 */ 11243 if (!fp_access_check(s)) { 11244 return; 11245 } 11246 /* The Q field specifies lo/hi half input for this insn. */ 11247 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, 11248 gen_helper_neon_pmull_h); 11249 break; 11250 11251 case 3: /* PMULL.P64 */ 11252 if (!dc_isar_feature(aa64_pmull, s)) { 11253 unallocated_encoding(s); 11254 return; 11255 } 11256 if (!fp_access_check(s)) { 11257 return; 11258 } 11259 /* The Q field specifies lo/hi half input for this insn. */ 11260 gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, 11261 gen_helper_gvec_pmull_q); 11262 break; 11263 11264 default: 11265 unallocated_encoding(s); 11266 break; 11267 } 11268 return; 11269 case 9: /* SQDMLAL, SQDMLAL2 */ 11270 case 11: /* SQDMLSL, SQDMLSL2 */ 11271 case 13: /* SQDMULL, SQDMULL2 */ 11272 if (is_u || size == 0) { 11273 unallocated_encoding(s); 11274 return; 11275 } 11276 /* fall through */ 11277 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */ 11278 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */ 11279 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */ 11280 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */ 11281 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 11282 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 11283 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */ 11284 /* 64 x 64 -> 128 */ 11285 if (size == 3) { 11286 unallocated_encoding(s); 11287 return; 11288 } 11289 if (!fp_access_check(s)) { 11290 return; 11291 } 11292 11293 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm); 11294 break; 11295 default: 11296 /* opcode 15 not allocated */ 11297 unallocated_encoding(s); 11298 break; 11299 } 11300 } 11301 11302 /* Logic op (opcode == 3) subgroup of C3.6.16. */ 11303 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) 11304 { 11305 int rd = extract32(insn, 0, 5); 11306 int rn = extract32(insn, 5, 5); 11307 int rm = extract32(insn, 16, 5); 11308 int size = extract32(insn, 22, 2); 11309 bool is_u = extract32(insn, 29, 1); 11310 bool is_q = extract32(insn, 30, 1); 11311 11312 if (!fp_access_check(s)) { 11313 return; 11314 } 11315 11316 switch (size + 4 * is_u) { 11317 case 0: /* AND */ 11318 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0); 11319 return; 11320 case 1: /* BIC */ 11321 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0); 11322 return; 11323 case 2: /* ORR */ 11324 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0); 11325 return; 11326 case 3: /* ORN */ 11327 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0); 11328 return; 11329 case 4: /* EOR */ 11330 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0); 11331 return; 11332 11333 case 5: /* BSL bitwise select */ 11334 gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0); 11335 return; 11336 case 6: /* BIT, bitwise insert if true */ 11337 gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0); 11338 return; 11339 case 7: /* BIF, bitwise insert if false */ 11340 gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0); 11341 return; 11342 11343 default: 11344 g_assert_not_reached(); 11345 } 11346 } 11347 11348 /* Pairwise op subgroup of C3.6.16. 11349 * 11350 * This is called directly or via the handle_3same_float for float pairwise 11351 * operations where the opcode and size are calculated differently. 11352 */ 11353 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode, 11354 int size, int rn, int rm, int rd) 11355 { 11356 TCGv_ptr fpst; 11357 int pass; 11358 11359 /* Floating point operations need fpst */ 11360 if (opcode >= 0x58) { 11361 fpst = fpstatus_ptr(FPST_FPCR); 11362 } else { 11363 fpst = NULL; 11364 } 11365 11366 if (!fp_access_check(s)) { 11367 return; 11368 } 11369 11370 /* These operations work on the concatenated rm:rn, with each pair of 11371 * adjacent elements being operated on to produce an element in the result. 11372 */ 11373 if (size == 3) { 11374 TCGv_i64 tcg_res[2]; 11375 11376 for (pass = 0; pass < 2; pass++) { 11377 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 11378 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 11379 int passreg = (pass == 0) ? rn : rm; 11380 11381 read_vec_element(s, tcg_op1, passreg, 0, MO_64); 11382 read_vec_element(s, tcg_op2, passreg, 1, MO_64); 11383 tcg_res[pass] = tcg_temp_new_i64(); 11384 11385 switch (opcode) { 11386 case 0x17: /* ADDP */ 11387 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2); 11388 break; 11389 case 0x58: /* FMAXNMP */ 11390 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11391 break; 11392 case 0x5a: /* FADDP */ 11393 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11394 break; 11395 case 0x5e: /* FMAXP */ 11396 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11397 break; 11398 case 0x78: /* FMINNMP */ 11399 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11400 break; 11401 case 0x7e: /* FMINP */ 11402 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11403 break; 11404 default: 11405 g_assert_not_reached(); 11406 } 11407 11408 tcg_temp_free_i64(tcg_op1); 11409 tcg_temp_free_i64(tcg_op2); 11410 } 11411 11412 for (pass = 0; pass < 2; pass++) { 11413 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 11414 tcg_temp_free_i64(tcg_res[pass]); 11415 } 11416 } else { 11417 int maxpass = is_q ? 4 : 2; 11418 TCGv_i32 tcg_res[4]; 11419 11420 for (pass = 0; pass < maxpass; pass++) { 11421 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 11422 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 11423 NeonGenTwoOpFn *genfn = NULL; 11424 int passreg = pass < (maxpass / 2) ? rn : rm; 11425 int passelt = (is_q && (pass & 1)) ? 2 : 0; 11426 11427 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32); 11428 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32); 11429 tcg_res[pass] = tcg_temp_new_i32(); 11430 11431 switch (opcode) { 11432 case 0x17: /* ADDP */ 11433 { 11434 static NeonGenTwoOpFn * const fns[3] = { 11435 gen_helper_neon_padd_u8, 11436 gen_helper_neon_padd_u16, 11437 tcg_gen_add_i32, 11438 }; 11439 genfn = fns[size]; 11440 break; 11441 } 11442 case 0x14: /* SMAXP, UMAXP */ 11443 { 11444 static NeonGenTwoOpFn * const fns[3][2] = { 11445 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 }, 11446 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 }, 11447 { tcg_gen_smax_i32, tcg_gen_umax_i32 }, 11448 }; 11449 genfn = fns[size][u]; 11450 break; 11451 } 11452 case 0x15: /* SMINP, UMINP */ 11453 { 11454 static NeonGenTwoOpFn * const fns[3][2] = { 11455 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 }, 11456 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 }, 11457 { tcg_gen_smin_i32, tcg_gen_umin_i32 }, 11458 }; 11459 genfn = fns[size][u]; 11460 break; 11461 } 11462 /* The FP operations are all on single floats (32 bit) */ 11463 case 0x58: /* FMAXNMP */ 11464 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11465 break; 11466 case 0x5a: /* FADDP */ 11467 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11468 break; 11469 case 0x5e: /* FMAXP */ 11470 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11471 break; 11472 case 0x78: /* FMINNMP */ 11473 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11474 break; 11475 case 0x7e: /* FMINP */ 11476 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst); 11477 break; 11478 default: 11479 g_assert_not_reached(); 11480 } 11481 11482 /* FP ops called directly, otherwise call now */ 11483 if (genfn) { 11484 genfn(tcg_res[pass], tcg_op1, tcg_op2); 11485 } 11486 11487 tcg_temp_free_i32(tcg_op1); 11488 tcg_temp_free_i32(tcg_op2); 11489 } 11490 11491 for (pass = 0; pass < maxpass; pass++) { 11492 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); 11493 tcg_temp_free_i32(tcg_res[pass]); 11494 } 11495 clear_vec_high(s, is_q, rd); 11496 } 11497 11498 if (fpst) { 11499 tcg_temp_free_ptr(fpst); 11500 } 11501 } 11502 11503 /* Floating point op subgroup of C3.6.16. */ 11504 static void disas_simd_3same_float(DisasContext *s, uint32_t insn) 11505 { 11506 /* For floating point ops, the U, size[1] and opcode bits 11507 * together indicate the operation. size[0] indicates single 11508 * or double. 11509 */ 11510 int fpopcode = extract32(insn, 11, 5) 11511 | (extract32(insn, 23, 1) << 5) 11512 | (extract32(insn, 29, 1) << 6); 11513 int is_q = extract32(insn, 30, 1); 11514 int size = extract32(insn, 22, 1); 11515 int rm = extract32(insn, 16, 5); 11516 int rn = extract32(insn, 5, 5); 11517 int rd = extract32(insn, 0, 5); 11518 11519 int datasize = is_q ? 128 : 64; 11520 int esize = 32 << size; 11521 int elements = datasize / esize; 11522 11523 if (size == 1 && !is_q) { 11524 unallocated_encoding(s); 11525 return; 11526 } 11527 11528 switch (fpopcode) { 11529 case 0x58: /* FMAXNMP */ 11530 case 0x5a: /* FADDP */ 11531 case 0x5e: /* FMAXP */ 11532 case 0x78: /* FMINNMP */ 11533 case 0x7e: /* FMINP */ 11534 if (size && !is_q) { 11535 unallocated_encoding(s); 11536 return; 11537 } 11538 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32, 11539 rn, rm, rd); 11540 return; 11541 case 0x1b: /* FMULX */ 11542 case 0x1f: /* FRECPS */ 11543 case 0x3f: /* FRSQRTS */ 11544 case 0x5d: /* FACGE */ 11545 case 0x7d: /* FACGT */ 11546 case 0x19: /* FMLA */ 11547 case 0x39: /* FMLS */ 11548 case 0x18: /* FMAXNM */ 11549 case 0x1a: /* FADD */ 11550 case 0x1c: /* FCMEQ */ 11551 case 0x1e: /* FMAX */ 11552 case 0x38: /* FMINNM */ 11553 case 0x3a: /* FSUB */ 11554 case 0x3e: /* FMIN */ 11555 case 0x5b: /* FMUL */ 11556 case 0x5c: /* FCMGE */ 11557 case 0x5f: /* FDIV */ 11558 case 0x7a: /* FABD */ 11559 case 0x7c: /* FCMGT */ 11560 if (!fp_access_check(s)) { 11561 return; 11562 } 11563 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm); 11564 return; 11565 11566 case 0x1d: /* FMLAL */ 11567 case 0x3d: /* FMLSL */ 11568 case 0x59: /* FMLAL2 */ 11569 case 0x79: /* FMLSL2 */ 11570 if (size & 1 || !dc_isar_feature(aa64_fhm, s)) { 11571 unallocated_encoding(s); 11572 return; 11573 } 11574 if (fp_access_check(s)) { 11575 int is_s = extract32(insn, 23, 1); 11576 int is_2 = extract32(insn, 29, 1); 11577 int data = (is_2 << 1) | is_s; 11578 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 11579 vec_full_reg_offset(s, rn), 11580 vec_full_reg_offset(s, rm), cpu_env, 11581 is_q ? 16 : 8, vec_full_reg_size(s), 11582 data, gen_helper_gvec_fmlal_a64); 11583 } 11584 return; 11585 11586 default: 11587 unallocated_encoding(s); 11588 return; 11589 } 11590 } 11591 11592 /* Integer op subgroup of C3.6.16. */ 11593 static void disas_simd_3same_int(DisasContext *s, uint32_t insn) 11594 { 11595 int is_q = extract32(insn, 30, 1); 11596 int u = extract32(insn, 29, 1); 11597 int size = extract32(insn, 22, 2); 11598 int opcode = extract32(insn, 11, 5); 11599 int rm = extract32(insn, 16, 5); 11600 int rn = extract32(insn, 5, 5); 11601 int rd = extract32(insn, 0, 5); 11602 int pass; 11603 TCGCond cond; 11604 11605 switch (opcode) { 11606 case 0x13: /* MUL, PMUL */ 11607 if (u && size != 0) { 11608 unallocated_encoding(s); 11609 return; 11610 } 11611 /* fall through */ 11612 case 0x0: /* SHADD, UHADD */ 11613 case 0x2: /* SRHADD, URHADD */ 11614 case 0x4: /* SHSUB, UHSUB */ 11615 case 0xc: /* SMAX, UMAX */ 11616 case 0xd: /* SMIN, UMIN */ 11617 case 0xe: /* SABD, UABD */ 11618 case 0xf: /* SABA, UABA */ 11619 case 0x12: /* MLA, MLS */ 11620 if (size == 3) { 11621 unallocated_encoding(s); 11622 return; 11623 } 11624 break; 11625 case 0x16: /* SQDMULH, SQRDMULH */ 11626 if (size == 0 || size == 3) { 11627 unallocated_encoding(s); 11628 return; 11629 } 11630 break; 11631 default: 11632 if (size == 3 && !is_q) { 11633 unallocated_encoding(s); 11634 return; 11635 } 11636 break; 11637 } 11638 11639 if (!fp_access_check(s)) { 11640 return; 11641 } 11642 11643 switch (opcode) { 11644 case 0x01: /* SQADD, UQADD */ 11645 if (u) { 11646 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size); 11647 } else { 11648 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size); 11649 } 11650 return; 11651 case 0x05: /* SQSUB, UQSUB */ 11652 if (u) { 11653 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size); 11654 } else { 11655 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size); 11656 } 11657 return; 11658 case 0x08: /* SSHL, USHL */ 11659 if (u) { 11660 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size); 11661 } else { 11662 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size); 11663 } 11664 return; 11665 case 0x0c: /* SMAX, UMAX */ 11666 if (u) { 11667 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size); 11668 } else { 11669 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size); 11670 } 11671 return; 11672 case 0x0d: /* SMIN, UMIN */ 11673 if (u) { 11674 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size); 11675 } else { 11676 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size); 11677 } 11678 return; 11679 case 0xe: /* SABD, UABD */ 11680 if (u) { 11681 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size); 11682 } else { 11683 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size); 11684 } 11685 return; 11686 case 0xf: /* SABA, UABA */ 11687 if (u) { 11688 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size); 11689 } else { 11690 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size); 11691 } 11692 return; 11693 case 0x10: /* ADD, SUB */ 11694 if (u) { 11695 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size); 11696 } else { 11697 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size); 11698 } 11699 return; 11700 case 0x13: /* MUL, PMUL */ 11701 if (!u) { /* MUL */ 11702 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size); 11703 } else { /* PMUL */ 11704 gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b); 11705 } 11706 return; 11707 case 0x12: /* MLA, MLS */ 11708 if (u) { 11709 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size); 11710 } else { 11711 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size); 11712 } 11713 return; 11714 case 0x16: /* SQDMULH, SQRDMULH */ 11715 { 11716 static gen_helper_gvec_3_ptr * const fns[2][2] = { 11717 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h }, 11718 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s }, 11719 }; 11720 gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]); 11721 } 11722 return; 11723 case 0x11: 11724 if (!u) { /* CMTST */ 11725 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size); 11726 return; 11727 } 11728 /* else CMEQ */ 11729 cond = TCG_COND_EQ; 11730 goto do_gvec_cmp; 11731 case 0x06: /* CMGT, CMHI */ 11732 cond = u ? TCG_COND_GTU : TCG_COND_GT; 11733 goto do_gvec_cmp; 11734 case 0x07: /* CMGE, CMHS */ 11735 cond = u ? TCG_COND_GEU : TCG_COND_GE; 11736 do_gvec_cmp: 11737 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd), 11738 vec_full_reg_offset(s, rn), 11739 vec_full_reg_offset(s, rm), 11740 is_q ? 16 : 8, vec_full_reg_size(s)); 11741 return; 11742 } 11743 11744 if (size == 3) { 11745 assert(is_q); 11746 for (pass = 0; pass < 2; pass++) { 11747 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 11748 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 11749 TCGv_i64 tcg_res = tcg_temp_new_i64(); 11750 11751 read_vec_element(s, tcg_op1, rn, pass, MO_64); 11752 read_vec_element(s, tcg_op2, rm, pass, MO_64); 11753 11754 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2); 11755 11756 write_vec_element(s, tcg_res, rd, pass, MO_64); 11757 11758 tcg_temp_free_i64(tcg_res); 11759 tcg_temp_free_i64(tcg_op1); 11760 tcg_temp_free_i64(tcg_op2); 11761 } 11762 } else { 11763 for (pass = 0; pass < (is_q ? 4 : 2); pass++) { 11764 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 11765 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 11766 TCGv_i32 tcg_res = tcg_temp_new_i32(); 11767 NeonGenTwoOpFn *genfn = NULL; 11768 NeonGenTwoOpEnvFn *genenvfn = NULL; 11769 11770 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32); 11771 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32); 11772 11773 switch (opcode) { 11774 case 0x0: /* SHADD, UHADD */ 11775 { 11776 static NeonGenTwoOpFn * const fns[3][2] = { 11777 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 }, 11778 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 }, 11779 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 }, 11780 }; 11781 genfn = fns[size][u]; 11782 break; 11783 } 11784 case 0x2: /* SRHADD, URHADD */ 11785 { 11786 static NeonGenTwoOpFn * const fns[3][2] = { 11787 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 }, 11788 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 }, 11789 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 }, 11790 }; 11791 genfn = fns[size][u]; 11792 break; 11793 } 11794 case 0x4: /* SHSUB, UHSUB */ 11795 { 11796 static NeonGenTwoOpFn * const fns[3][2] = { 11797 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 }, 11798 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 }, 11799 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 }, 11800 }; 11801 genfn = fns[size][u]; 11802 break; 11803 } 11804 case 0x9: /* SQSHL, UQSHL */ 11805 { 11806 static NeonGenTwoOpEnvFn * const fns[3][2] = { 11807 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 }, 11808 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 }, 11809 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 }, 11810 }; 11811 genenvfn = fns[size][u]; 11812 break; 11813 } 11814 case 0xa: /* SRSHL, URSHL */ 11815 { 11816 static NeonGenTwoOpFn * const fns[3][2] = { 11817 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 }, 11818 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 }, 11819 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 }, 11820 }; 11821 genfn = fns[size][u]; 11822 break; 11823 } 11824 case 0xb: /* SQRSHL, UQRSHL */ 11825 { 11826 static NeonGenTwoOpEnvFn * const fns[3][2] = { 11827 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 }, 11828 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 }, 11829 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 }, 11830 }; 11831 genenvfn = fns[size][u]; 11832 break; 11833 } 11834 default: 11835 g_assert_not_reached(); 11836 } 11837 11838 if (genenvfn) { 11839 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2); 11840 } else { 11841 genfn(tcg_res, tcg_op1, tcg_op2); 11842 } 11843 11844 write_vec_element_i32(s, tcg_res, rd, pass, MO_32); 11845 11846 tcg_temp_free_i32(tcg_res); 11847 tcg_temp_free_i32(tcg_op1); 11848 tcg_temp_free_i32(tcg_op2); 11849 } 11850 } 11851 clear_vec_high(s, is_q, rd); 11852 } 11853 11854 /* AdvSIMD three same 11855 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0 11856 * +---+---+---+-----------+------+---+------+--------+---+------+------+ 11857 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd | 11858 * +---+---+---+-----------+------+---+------+--------+---+------+------+ 11859 */ 11860 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn) 11861 { 11862 int opcode = extract32(insn, 11, 5); 11863 11864 switch (opcode) { 11865 case 0x3: /* logic ops */ 11866 disas_simd_3same_logic(s, insn); 11867 break; 11868 case 0x17: /* ADDP */ 11869 case 0x14: /* SMAXP, UMAXP */ 11870 case 0x15: /* SMINP, UMINP */ 11871 { 11872 /* Pairwise operations */ 11873 int is_q = extract32(insn, 30, 1); 11874 int u = extract32(insn, 29, 1); 11875 int size = extract32(insn, 22, 2); 11876 int rm = extract32(insn, 16, 5); 11877 int rn = extract32(insn, 5, 5); 11878 int rd = extract32(insn, 0, 5); 11879 if (opcode == 0x17) { 11880 if (u || (size == 3 && !is_q)) { 11881 unallocated_encoding(s); 11882 return; 11883 } 11884 } else { 11885 if (size == 3) { 11886 unallocated_encoding(s); 11887 return; 11888 } 11889 } 11890 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd); 11891 break; 11892 } 11893 case 0x18 ... 0x31: 11894 /* floating point ops, sz[1] and U are part of opcode */ 11895 disas_simd_3same_float(s, insn); 11896 break; 11897 default: 11898 disas_simd_3same_int(s, insn); 11899 break; 11900 } 11901 } 11902 11903 /* 11904 * Advanced SIMD three same (ARMv8.2 FP16 variants) 11905 * 11906 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0 11907 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+ 11908 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd | 11909 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+ 11910 * 11911 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE 11912 * (register), FACGE, FABD, FCMGT (register) and FACGT. 11913 * 11914 */ 11915 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) 11916 { 11917 int opcode = extract32(insn, 11, 3); 11918 int u = extract32(insn, 29, 1); 11919 int a = extract32(insn, 23, 1); 11920 int is_q = extract32(insn, 30, 1); 11921 int rm = extract32(insn, 16, 5); 11922 int rn = extract32(insn, 5, 5); 11923 int rd = extract32(insn, 0, 5); 11924 /* 11925 * For these floating point ops, the U, a and opcode bits 11926 * together indicate the operation. 11927 */ 11928 int fpopcode = opcode | (a << 3) | (u << 4); 11929 int datasize = is_q ? 128 : 64; 11930 int elements = datasize / 16; 11931 bool pairwise; 11932 TCGv_ptr fpst; 11933 int pass; 11934 11935 switch (fpopcode) { 11936 case 0x0: /* FMAXNM */ 11937 case 0x1: /* FMLA */ 11938 case 0x2: /* FADD */ 11939 case 0x3: /* FMULX */ 11940 case 0x4: /* FCMEQ */ 11941 case 0x6: /* FMAX */ 11942 case 0x7: /* FRECPS */ 11943 case 0x8: /* FMINNM */ 11944 case 0x9: /* FMLS */ 11945 case 0xa: /* FSUB */ 11946 case 0xe: /* FMIN */ 11947 case 0xf: /* FRSQRTS */ 11948 case 0x13: /* FMUL */ 11949 case 0x14: /* FCMGE */ 11950 case 0x15: /* FACGE */ 11951 case 0x17: /* FDIV */ 11952 case 0x1a: /* FABD */ 11953 case 0x1c: /* FCMGT */ 11954 case 0x1d: /* FACGT */ 11955 pairwise = false; 11956 break; 11957 case 0x10: /* FMAXNMP */ 11958 case 0x12: /* FADDP */ 11959 case 0x16: /* FMAXP */ 11960 case 0x18: /* FMINNMP */ 11961 case 0x1e: /* FMINP */ 11962 pairwise = true; 11963 break; 11964 default: 11965 unallocated_encoding(s); 11966 return; 11967 } 11968 11969 if (!dc_isar_feature(aa64_fp16, s)) { 11970 unallocated_encoding(s); 11971 return; 11972 } 11973 11974 if (!fp_access_check(s)) { 11975 return; 11976 } 11977 11978 fpst = fpstatus_ptr(FPST_FPCR_F16); 11979 11980 if (pairwise) { 11981 int maxpass = is_q ? 8 : 4; 11982 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 11983 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 11984 TCGv_i32 tcg_res[8]; 11985 11986 for (pass = 0; pass < maxpass; pass++) { 11987 int passreg = pass < (maxpass / 2) ? rn : rm; 11988 int passelt = (pass << 1) & (maxpass - 1); 11989 11990 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16); 11991 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16); 11992 tcg_res[pass] = tcg_temp_new_i32(); 11993 11994 switch (fpopcode) { 11995 case 0x10: /* FMAXNMP */ 11996 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2, 11997 fpst); 11998 break; 11999 case 0x12: /* FADDP */ 12000 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst); 12001 break; 12002 case 0x16: /* FMAXP */ 12003 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst); 12004 break; 12005 case 0x18: /* FMINNMP */ 12006 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2, 12007 fpst); 12008 break; 12009 case 0x1e: /* FMINP */ 12010 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst); 12011 break; 12012 default: 12013 g_assert_not_reached(); 12014 } 12015 } 12016 12017 for (pass = 0; pass < maxpass; pass++) { 12018 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16); 12019 tcg_temp_free_i32(tcg_res[pass]); 12020 } 12021 12022 tcg_temp_free_i32(tcg_op1); 12023 tcg_temp_free_i32(tcg_op2); 12024 12025 } else { 12026 for (pass = 0; pass < elements; pass++) { 12027 TCGv_i32 tcg_op1 = tcg_temp_new_i32(); 12028 TCGv_i32 tcg_op2 = tcg_temp_new_i32(); 12029 TCGv_i32 tcg_res = tcg_temp_new_i32(); 12030 12031 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16); 12032 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16); 12033 12034 switch (fpopcode) { 12035 case 0x0: /* FMAXNM */ 12036 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst); 12037 break; 12038 case 0x1: /* FMLA */ 12039 read_vec_element_i32(s, tcg_res, rd, pass, MO_16); 12040 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res, 12041 fpst); 12042 break; 12043 case 0x2: /* FADD */ 12044 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst); 12045 break; 12046 case 0x3: /* FMULX */ 12047 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst); 12048 break; 12049 case 0x4: /* FCMEQ */ 12050 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst); 12051 break; 12052 case 0x6: /* FMAX */ 12053 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst); 12054 break; 12055 case 0x7: /* FRECPS */ 12056 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst); 12057 break; 12058 case 0x8: /* FMINNM */ 12059 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst); 12060 break; 12061 case 0x9: /* FMLS */ 12062 /* As usual for ARM, separate negation for fused multiply-add */ 12063 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000); 12064 read_vec_element_i32(s, tcg_res, rd, pass, MO_16); 12065 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res, 12066 fpst); 12067 break; 12068 case 0xa: /* FSUB */ 12069 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst); 12070 break; 12071 case 0xe: /* FMIN */ 12072 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst); 12073 break; 12074 case 0xf: /* FRSQRTS */ 12075 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst); 12076 break; 12077 case 0x13: /* FMUL */ 12078 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst); 12079 break; 12080 case 0x14: /* FCMGE */ 12081 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst); 12082 break; 12083 case 0x15: /* FACGE */ 12084 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst); 12085 break; 12086 case 0x17: /* FDIV */ 12087 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst); 12088 break; 12089 case 0x1a: /* FABD */ 12090 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst); 12091 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff); 12092 break; 12093 case 0x1c: /* FCMGT */ 12094 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst); 12095 break; 12096 case 0x1d: /* FACGT */ 12097 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst); 12098 break; 12099 default: 12100 g_assert_not_reached(); 12101 } 12102 12103 write_vec_element_i32(s, tcg_res, rd, pass, MO_16); 12104 tcg_temp_free_i32(tcg_res); 12105 tcg_temp_free_i32(tcg_op1); 12106 tcg_temp_free_i32(tcg_op2); 12107 } 12108 } 12109 12110 tcg_temp_free_ptr(fpst); 12111 12112 clear_vec_high(s, is_q, rd); 12113 } 12114 12115 /* AdvSIMD three same extra 12116 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0 12117 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+ 12118 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd | 12119 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+ 12120 */ 12121 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) 12122 { 12123 int rd = extract32(insn, 0, 5); 12124 int rn = extract32(insn, 5, 5); 12125 int opcode = extract32(insn, 11, 4); 12126 int rm = extract32(insn, 16, 5); 12127 int size = extract32(insn, 22, 2); 12128 bool u = extract32(insn, 29, 1); 12129 bool is_q = extract32(insn, 30, 1); 12130 bool feature; 12131 int rot; 12132 12133 switch (u * 16 + opcode) { 12134 case 0x10: /* SQRDMLAH (vector) */ 12135 case 0x11: /* SQRDMLSH (vector) */ 12136 if (size != 1 && size != 2) { 12137 unallocated_encoding(s); 12138 return; 12139 } 12140 feature = dc_isar_feature(aa64_rdm, s); 12141 break; 12142 case 0x02: /* SDOT (vector) */ 12143 case 0x12: /* UDOT (vector) */ 12144 if (size != MO_32) { 12145 unallocated_encoding(s); 12146 return; 12147 } 12148 feature = dc_isar_feature(aa64_dp, s); 12149 break; 12150 case 0x03: /* USDOT */ 12151 if (size != MO_32) { 12152 unallocated_encoding(s); 12153 return; 12154 } 12155 feature = dc_isar_feature(aa64_i8mm, s); 12156 break; 12157 case 0x04: /* SMMLA */ 12158 case 0x14: /* UMMLA */ 12159 case 0x05: /* USMMLA */ 12160 if (!is_q || size != MO_32) { 12161 unallocated_encoding(s); 12162 return; 12163 } 12164 feature = dc_isar_feature(aa64_i8mm, s); 12165 break; 12166 case 0x18: /* FCMLA, #0 */ 12167 case 0x19: /* FCMLA, #90 */ 12168 case 0x1a: /* FCMLA, #180 */ 12169 case 0x1b: /* FCMLA, #270 */ 12170 case 0x1c: /* FCADD, #90 */ 12171 case 0x1e: /* FCADD, #270 */ 12172 if (size == 0 12173 || (size == 1 && !dc_isar_feature(aa64_fp16, s)) 12174 || (size == 3 && !is_q)) { 12175 unallocated_encoding(s); 12176 return; 12177 } 12178 feature = dc_isar_feature(aa64_fcma, s); 12179 break; 12180 case 0x1d: /* BFMMLA */ 12181 if (size != MO_16 || !is_q) { 12182 unallocated_encoding(s); 12183 return; 12184 } 12185 feature = dc_isar_feature(aa64_bf16, s); 12186 break; 12187 case 0x1f: 12188 switch (size) { 12189 case 1: /* BFDOT */ 12190 case 3: /* BFMLAL{B,T} */ 12191 feature = dc_isar_feature(aa64_bf16, s); 12192 break; 12193 default: 12194 unallocated_encoding(s); 12195 return; 12196 } 12197 break; 12198 default: 12199 unallocated_encoding(s); 12200 return; 12201 } 12202 if (!feature) { 12203 unallocated_encoding(s); 12204 return; 12205 } 12206 if (!fp_access_check(s)) { 12207 return; 12208 } 12209 12210 switch (opcode) { 12211 case 0x0: /* SQRDMLAH (vector) */ 12212 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size); 12213 return; 12214 12215 case 0x1: /* SQRDMLSH (vector) */ 12216 gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size); 12217 return; 12218 12219 case 0x2: /* SDOT / UDOT */ 12220 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, 12221 u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b); 12222 return; 12223 12224 case 0x3: /* USDOT */ 12225 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b); 12226 return; 12227 12228 case 0x04: /* SMMLA, UMMLA */ 12229 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, 12230 u ? gen_helper_gvec_ummla_b 12231 : gen_helper_gvec_smmla_b); 12232 return; 12233 case 0x05: /* USMMLA */ 12234 gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b); 12235 return; 12236 12237 case 0x8: /* FCMLA, #0 */ 12238 case 0x9: /* FCMLA, #90 */ 12239 case 0xa: /* FCMLA, #180 */ 12240 case 0xb: /* FCMLA, #270 */ 12241 rot = extract32(opcode, 0, 2); 12242 switch (size) { 12243 case 1: 12244 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot, 12245 gen_helper_gvec_fcmlah); 12246 break; 12247 case 2: 12248 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot, 12249 gen_helper_gvec_fcmlas); 12250 break; 12251 case 3: 12252 gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot, 12253 gen_helper_gvec_fcmlad); 12254 break; 12255 default: 12256 g_assert_not_reached(); 12257 } 12258 return; 12259 12260 case 0xc: /* FCADD, #90 */ 12261 case 0xe: /* FCADD, #270 */ 12262 rot = extract32(opcode, 1, 1); 12263 switch (size) { 12264 case 1: 12265 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, 12266 gen_helper_gvec_fcaddh); 12267 break; 12268 case 2: 12269 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, 12270 gen_helper_gvec_fcadds); 12271 break; 12272 case 3: 12273 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot, 12274 gen_helper_gvec_fcaddd); 12275 break; 12276 default: 12277 g_assert_not_reached(); 12278 } 12279 return; 12280 12281 case 0xd: /* BFMMLA */ 12282 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla); 12283 return; 12284 case 0xf: 12285 switch (size) { 12286 case 1: /* BFDOT */ 12287 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot); 12288 break; 12289 case 3: /* BFMLAL{B,T} */ 12290 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q, 12291 gen_helper_gvec_bfmlal); 12292 break; 12293 default: 12294 g_assert_not_reached(); 12295 } 12296 return; 12297 12298 default: 12299 g_assert_not_reached(); 12300 } 12301 } 12302 12303 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q, 12304 int size, int rn, int rd) 12305 { 12306 /* Handle 2-reg-misc ops which are widening (so each size element 12307 * in the source becomes a 2*size element in the destination. 12308 * The only instruction like this is FCVTL. 12309 */ 12310 int pass; 12311 12312 if (size == 3) { 12313 /* 32 -> 64 bit fp conversion */ 12314 TCGv_i64 tcg_res[2]; 12315 int srcelt = is_q ? 2 : 0; 12316 12317 for (pass = 0; pass < 2; pass++) { 12318 TCGv_i32 tcg_op = tcg_temp_new_i32(); 12319 tcg_res[pass] = tcg_temp_new_i64(); 12320 12321 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32); 12322 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env); 12323 tcg_temp_free_i32(tcg_op); 12324 } 12325 for (pass = 0; pass < 2; pass++) { 12326 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 12327 tcg_temp_free_i64(tcg_res[pass]); 12328 } 12329 } else { 12330 /* 16 -> 32 bit fp conversion */ 12331 int srcelt = is_q ? 4 : 0; 12332 TCGv_i32 tcg_res[4]; 12333 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR); 12334 TCGv_i32 ahp = get_ahp_flag(); 12335 12336 for (pass = 0; pass < 4; pass++) { 12337 tcg_res[pass] = tcg_temp_new_i32(); 12338 12339 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16); 12340 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass], 12341 fpst, ahp); 12342 } 12343 for (pass = 0; pass < 4; pass++) { 12344 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32); 12345 tcg_temp_free_i32(tcg_res[pass]); 12346 } 12347 12348 tcg_temp_free_ptr(fpst); 12349 tcg_temp_free_i32(ahp); 12350 } 12351 } 12352 12353 static void handle_rev(DisasContext *s, int opcode, bool u, 12354 bool is_q, int size, int rn, int rd) 12355 { 12356 int op = (opcode << 1) | u; 12357 int opsz = op + size; 12358 int grp_size = 3 - opsz; 12359 int dsize = is_q ? 128 : 64; 12360 int i; 12361 12362 if (opsz >= 3) { 12363 unallocated_encoding(s); 12364 return; 12365 } 12366 12367 if (!fp_access_check(s)) { 12368 return; 12369 } 12370 12371 if (size == 0) { 12372 /* Special case bytes, use bswap op on each group of elements */ 12373 int groups = dsize / (8 << grp_size); 12374 12375 for (i = 0; i < groups; i++) { 12376 TCGv_i64 tcg_tmp = tcg_temp_new_i64(); 12377 12378 read_vec_element(s, tcg_tmp, rn, i, grp_size); 12379 switch (grp_size) { 12380 case MO_16: 12381 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ); 12382 break; 12383 case MO_32: 12384 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ); 12385 break; 12386 case MO_64: 12387 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp); 12388 break; 12389 default: 12390 g_assert_not_reached(); 12391 } 12392 write_vec_element(s, tcg_tmp, rd, i, grp_size); 12393 tcg_temp_free_i64(tcg_tmp); 12394 } 12395 clear_vec_high(s, is_q, rd); 12396 } else { 12397 int revmask = (1 << grp_size) - 1; 12398 int esize = 8 << size; 12399 int elements = dsize / esize; 12400 TCGv_i64 tcg_rn = tcg_temp_new_i64(); 12401 TCGv_i64 tcg_rd = tcg_const_i64(0); 12402 TCGv_i64 tcg_rd_hi = tcg_const_i64(0); 12403 12404 for (i = 0; i < elements; i++) { 12405 int e_rev = (i & 0xf) ^ revmask; 12406 int off = e_rev * esize; 12407 read_vec_element(s, tcg_rn, rn, i, size); 12408 if (off >= 64) { 12409 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi, 12410 tcg_rn, off - 64, esize); 12411 } else { 12412 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize); 12413 } 12414 } 12415 write_vec_element(s, tcg_rd, rd, 0, MO_64); 12416 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64); 12417 12418 tcg_temp_free_i64(tcg_rd_hi); 12419 tcg_temp_free_i64(tcg_rd); 12420 tcg_temp_free_i64(tcg_rn); 12421 } 12422 } 12423 12424 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u, 12425 bool is_q, int size, int rn, int rd) 12426 { 12427 /* Implement the pairwise operations from 2-misc: 12428 * SADDLP, UADDLP, SADALP, UADALP. 12429 * These all add pairs of elements in the input to produce a 12430 * double-width result element in the output (possibly accumulating). 12431 */ 12432 bool accum = (opcode == 0x6); 12433 int maxpass = is_q ? 2 : 1; 12434 int pass; 12435 TCGv_i64 tcg_res[2]; 12436 12437 if (size == 2) { 12438 /* 32 + 32 -> 64 op */ 12439 MemOp memop = size + (u ? 0 : MO_SIGN); 12440 12441 for (pass = 0; pass < maxpass; pass++) { 12442 TCGv_i64 tcg_op1 = tcg_temp_new_i64(); 12443 TCGv_i64 tcg_op2 = tcg_temp_new_i64(); 12444 12445 tcg_res[pass] = tcg_temp_new_i64(); 12446 12447 read_vec_element(s, tcg_op1, rn, pass * 2, memop); 12448 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop); 12449 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2); 12450 if (accum) { 12451 read_vec_element(s, tcg_op1, rd, pass, MO_64); 12452 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1); 12453 } 12454 12455 tcg_temp_free_i64(tcg_op1); 12456 tcg_temp_free_i64(tcg_op2); 12457 } 12458 } else { 12459 for (pass = 0; pass < maxpass; pass++) { 12460 TCGv_i64 tcg_op = tcg_temp_new_i64(); 12461 NeonGenOne64OpFn *genfn; 12462 static NeonGenOne64OpFn * const fns[2][2] = { 12463 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 }, 12464 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 }, 12465 }; 12466 12467 genfn = fns[size][u]; 12468 12469 tcg_res[pass] = tcg_temp_new_i64(); 12470 12471 read_vec_element(s, tcg_op, rn, pass, MO_64); 12472 genfn(tcg_res[pass], tcg_op); 12473 12474 if (accum) { 12475 read_vec_element(s, tcg_op, rd, pass, MO_64); 12476 if (size == 0) { 12477 gen_helper_neon_addl_u16(tcg_res[pass], 12478 tcg_res[pass], tcg_op); 12479 } else { 12480 gen_helper_neon_addl_u32(tcg_res[pass], 12481 tcg_res[pass], tcg_op); 12482 } 12483 } 12484 tcg_temp_free_i64(tcg_op); 12485 } 12486 } 12487 if (!is_q) { 12488 tcg_res[1] = tcg_constant_i64(0); 12489 } 12490 for (pass = 0; pass < 2; pass++) { 12491 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 12492 tcg_temp_free_i64(tcg_res[pass]); 12493 } 12494 } 12495 12496 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd) 12497 { 12498 /* Implement SHLL and SHLL2 */ 12499 int pass; 12500 int part = is_q ? 2 : 0; 12501 TCGv_i64 tcg_res[2]; 12502 12503 for (pass = 0; pass < 2; pass++) { 12504 static NeonGenWidenFn * const widenfns[3] = { 12505 gen_helper_neon_widen_u8, 12506 gen_helper_neon_widen_u16, 12507 tcg_gen_extu_i32_i64, 12508 }; 12509 NeonGenWidenFn *widenfn = widenfns[size]; 12510 TCGv_i32 tcg_op = tcg_temp_new_i32(); 12511 12512 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32); 12513 tcg_res[pass] = tcg_temp_new_i64(); 12514 widenfn(tcg_res[pass], tcg_op); 12515 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size); 12516 12517 tcg_temp_free_i32(tcg_op); 12518 } 12519 12520 for (pass = 0; pass < 2; pass++) { 12521 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 12522 tcg_temp_free_i64(tcg_res[pass]); 12523 } 12524 } 12525 12526 /* AdvSIMD two reg misc 12527 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0 12528 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ 12529 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd | 12530 * +---+---+---+-----------+------+-----------+--------+-----+------+------+ 12531 */ 12532 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn) 12533 { 12534 int size = extract32(insn, 22, 2); 12535 int opcode = extract32(insn, 12, 5); 12536 bool u = extract32(insn, 29, 1); 12537 bool is_q = extract32(insn, 30, 1); 12538 int rn = extract32(insn, 5, 5); 12539 int rd = extract32(insn, 0, 5); 12540 bool need_fpstatus = false; 12541 bool need_rmode = false; 12542 int rmode = -1; 12543 TCGv_i32 tcg_rmode; 12544 TCGv_ptr tcg_fpstatus; 12545 12546 switch (opcode) { 12547 case 0x0: /* REV64, REV32 */ 12548 case 0x1: /* REV16 */ 12549 handle_rev(s, opcode, u, is_q, size, rn, rd); 12550 return; 12551 case 0x5: /* CNT, NOT, RBIT */ 12552 if (u && size == 0) { 12553 /* NOT */ 12554 break; 12555 } else if (u && size == 1) { 12556 /* RBIT */ 12557 break; 12558 } else if (!u && size == 0) { 12559 /* CNT */ 12560 break; 12561 } 12562 unallocated_encoding(s); 12563 return; 12564 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */ 12565 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */ 12566 if (size == 3) { 12567 unallocated_encoding(s); 12568 return; 12569 } 12570 if (!fp_access_check(s)) { 12571 return; 12572 } 12573 12574 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd); 12575 return; 12576 case 0x4: /* CLS, CLZ */ 12577 if (size == 3) { 12578 unallocated_encoding(s); 12579 return; 12580 } 12581 break; 12582 case 0x2: /* SADDLP, UADDLP */ 12583 case 0x6: /* SADALP, UADALP */ 12584 if (size == 3) { 12585 unallocated_encoding(s); 12586 return; 12587 } 12588 if (!fp_access_check(s)) { 12589 return; 12590 } 12591 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd); 12592 return; 12593 case 0x13: /* SHLL, SHLL2 */ 12594 if (u == 0 || size == 3) { 12595 unallocated_encoding(s); 12596 return; 12597 } 12598 if (!fp_access_check(s)) { 12599 return; 12600 } 12601 handle_shll(s, is_q, size, rn, rd); 12602 return; 12603 case 0xa: /* CMLT */ 12604 if (u == 1) { 12605 unallocated_encoding(s); 12606 return; 12607 } 12608 /* fall through */ 12609 case 0x8: /* CMGT, CMGE */ 12610 case 0x9: /* CMEQ, CMLE */ 12611 case 0xb: /* ABS, NEG */ 12612 if (size == 3 && !is_q) { 12613 unallocated_encoding(s); 12614 return; 12615 } 12616 break; 12617 case 0x3: /* SUQADD, USQADD */ 12618 if (size == 3 && !is_q) { 12619 unallocated_encoding(s); 12620 return; 12621 } 12622 if (!fp_access_check(s)) { 12623 return; 12624 } 12625 handle_2misc_satacc(s, false, u, is_q, size, rn, rd); 12626 return; 12627 case 0x7: /* SQABS, SQNEG */ 12628 if (size == 3 && !is_q) { 12629 unallocated_encoding(s); 12630 return; 12631 } 12632 break; 12633 case 0xc ... 0xf: 12634 case 0x16 ... 0x1f: 12635 { 12636 /* Floating point: U, size[1] and opcode indicate operation; 12637 * size[0] indicates single or double precision. 12638 */ 12639 int is_double = extract32(size, 0, 1); 12640 opcode |= (extract32(size, 1, 1) << 5) | (u << 6); 12641 size = is_double ? 3 : 2; 12642 switch (opcode) { 12643 case 0x2f: /* FABS */ 12644 case 0x6f: /* FNEG */ 12645 if (size == 3 && !is_q) { 12646 unallocated_encoding(s); 12647 return; 12648 } 12649 break; 12650 case 0x1d: /* SCVTF */ 12651 case 0x5d: /* UCVTF */ 12652 { 12653 bool is_signed = (opcode == 0x1d) ? true : false; 12654 int elements = is_double ? 2 : is_q ? 4 : 2; 12655 if (is_double && !is_q) { 12656 unallocated_encoding(s); 12657 return; 12658 } 12659 if (!fp_access_check(s)) { 12660 return; 12661 } 12662 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size); 12663 return; 12664 } 12665 case 0x2c: /* FCMGT (zero) */ 12666 case 0x2d: /* FCMEQ (zero) */ 12667 case 0x2e: /* FCMLT (zero) */ 12668 case 0x6c: /* FCMGE (zero) */ 12669 case 0x6d: /* FCMLE (zero) */ 12670 if (size == 3 && !is_q) { 12671 unallocated_encoding(s); 12672 return; 12673 } 12674 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd); 12675 return; 12676 case 0x7f: /* FSQRT */ 12677 if (size == 3 && !is_q) { 12678 unallocated_encoding(s); 12679 return; 12680 } 12681 break; 12682 case 0x1a: /* FCVTNS */ 12683 case 0x1b: /* FCVTMS */ 12684 case 0x3a: /* FCVTPS */ 12685 case 0x3b: /* FCVTZS */ 12686 case 0x5a: /* FCVTNU */ 12687 case 0x5b: /* FCVTMU */ 12688 case 0x7a: /* FCVTPU */ 12689 case 0x7b: /* FCVTZU */ 12690 need_fpstatus = true; 12691 need_rmode = true; 12692 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); 12693 if (size == 3 && !is_q) { 12694 unallocated_encoding(s); 12695 return; 12696 } 12697 break; 12698 case 0x5c: /* FCVTAU */ 12699 case 0x1c: /* FCVTAS */ 12700 need_fpstatus = true; 12701 need_rmode = true; 12702 rmode = FPROUNDING_TIEAWAY; 12703 if (size == 3 && !is_q) { 12704 unallocated_encoding(s); 12705 return; 12706 } 12707 break; 12708 case 0x3c: /* URECPE */ 12709 if (size == 3) { 12710 unallocated_encoding(s); 12711 return; 12712 } 12713 /* fall through */ 12714 case 0x3d: /* FRECPE */ 12715 case 0x7d: /* FRSQRTE */ 12716 if (size == 3 && !is_q) { 12717 unallocated_encoding(s); 12718 return; 12719 } 12720 if (!fp_access_check(s)) { 12721 return; 12722 } 12723 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd); 12724 return; 12725 case 0x56: /* FCVTXN, FCVTXN2 */ 12726 if (size == 2) { 12727 unallocated_encoding(s); 12728 return; 12729 } 12730 /* fall through */ 12731 case 0x16: /* FCVTN, FCVTN2 */ 12732 /* handle_2misc_narrow does a 2*size -> size operation, but these 12733 * instructions encode the source size rather than dest size. 12734 */ 12735 if (!fp_access_check(s)) { 12736 return; 12737 } 12738 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd); 12739 return; 12740 case 0x36: /* BFCVTN, BFCVTN2 */ 12741 if (!dc_isar_feature(aa64_bf16, s) || size != 2) { 12742 unallocated_encoding(s); 12743 return; 12744 } 12745 if (!fp_access_check(s)) { 12746 return; 12747 } 12748 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd); 12749 return; 12750 case 0x17: /* FCVTL, FCVTL2 */ 12751 if (!fp_access_check(s)) { 12752 return; 12753 } 12754 handle_2misc_widening(s, opcode, is_q, size, rn, rd); 12755 return; 12756 case 0x18: /* FRINTN */ 12757 case 0x19: /* FRINTM */ 12758 case 0x38: /* FRINTP */ 12759 case 0x39: /* FRINTZ */ 12760 need_rmode = true; 12761 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1); 12762 /* fall through */ 12763 case 0x59: /* FRINTX */ 12764 case 0x79: /* FRINTI */ 12765 need_fpstatus = true; 12766 if (size == 3 && !is_q) { 12767 unallocated_encoding(s); 12768 return; 12769 } 12770 break; 12771 case 0x58: /* FRINTA */ 12772 need_rmode = true; 12773 rmode = FPROUNDING_TIEAWAY; 12774 need_fpstatus = true; 12775 if (size == 3 && !is_q) { 12776 unallocated_encoding(s); 12777 return; 12778 } 12779 break; 12780 case 0x7c: /* URSQRTE */ 12781 if (size == 3) { 12782 unallocated_encoding(s); 12783 return; 12784 } 12785 break; 12786 case 0x1e: /* FRINT32Z */ 12787 case 0x1f: /* FRINT64Z */ 12788 need_rmode = true; 12789 rmode = FPROUNDING_ZERO; 12790 /* fall through */ 12791 case 0x5e: /* FRINT32X */ 12792 case 0x5f: /* FRINT64X */ 12793 need_fpstatus = true; 12794 if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) { 12795 unallocated_encoding(s); 12796 return; 12797 } 12798 break; 12799 default: 12800 unallocated_encoding(s); 12801 return; 12802 } 12803 break; 12804 } 12805 default: 12806 unallocated_encoding(s); 12807 return; 12808 } 12809 12810 if (!fp_access_check(s)) { 12811 return; 12812 } 12813 12814 if (need_fpstatus || need_rmode) { 12815 tcg_fpstatus = fpstatus_ptr(FPST_FPCR); 12816 } else { 12817 tcg_fpstatus = NULL; 12818 } 12819 if (need_rmode) { 12820 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); 12821 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 12822 } else { 12823 tcg_rmode = NULL; 12824 } 12825 12826 switch (opcode) { 12827 case 0x5: 12828 if (u && size == 0) { /* NOT */ 12829 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0); 12830 return; 12831 } 12832 break; 12833 case 0x8: /* CMGT, CMGE */ 12834 if (u) { 12835 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size); 12836 } else { 12837 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size); 12838 } 12839 return; 12840 case 0x9: /* CMEQ, CMLE */ 12841 if (u) { 12842 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size); 12843 } else { 12844 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size); 12845 } 12846 return; 12847 case 0xa: /* CMLT */ 12848 gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size); 12849 return; 12850 case 0xb: 12851 if (u) { /* ABS, NEG */ 12852 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size); 12853 } else { 12854 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size); 12855 } 12856 return; 12857 } 12858 12859 if (size == 3) { 12860 /* All 64-bit element operations can be shared with scalar 2misc */ 12861 int pass; 12862 12863 /* Coverity claims (size == 3 && !is_q) has been eliminated 12864 * from all paths leading to here. 12865 */ 12866 tcg_debug_assert(is_q); 12867 for (pass = 0; pass < 2; pass++) { 12868 TCGv_i64 tcg_op = tcg_temp_new_i64(); 12869 TCGv_i64 tcg_res = tcg_temp_new_i64(); 12870 12871 read_vec_element(s, tcg_op, rn, pass, MO_64); 12872 12873 handle_2misc_64(s, opcode, u, tcg_res, tcg_op, 12874 tcg_rmode, tcg_fpstatus); 12875 12876 write_vec_element(s, tcg_res, rd, pass, MO_64); 12877 12878 tcg_temp_free_i64(tcg_res); 12879 tcg_temp_free_i64(tcg_op); 12880 } 12881 } else { 12882 int pass; 12883 12884 for (pass = 0; pass < (is_q ? 4 : 2); pass++) { 12885 TCGv_i32 tcg_op = tcg_temp_new_i32(); 12886 TCGv_i32 tcg_res = tcg_temp_new_i32(); 12887 12888 read_vec_element_i32(s, tcg_op, rn, pass, MO_32); 12889 12890 if (size == 2) { 12891 /* Special cases for 32 bit elements */ 12892 switch (opcode) { 12893 case 0x4: /* CLS */ 12894 if (u) { 12895 tcg_gen_clzi_i32(tcg_res, tcg_op, 32); 12896 } else { 12897 tcg_gen_clrsb_i32(tcg_res, tcg_op); 12898 } 12899 break; 12900 case 0x7: /* SQABS, SQNEG */ 12901 if (u) { 12902 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op); 12903 } else { 12904 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op); 12905 } 12906 break; 12907 case 0x2f: /* FABS */ 12908 gen_helper_vfp_abss(tcg_res, tcg_op); 12909 break; 12910 case 0x6f: /* FNEG */ 12911 gen_helper_vfp_negs(tcg_res, tcg_op); 12912 break; 12913 case 0x7f: /* FSQRT */ 12914 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env); 12915 break; 12916 case 0x1a: /* FCVTNS */ 12917 case 0x1b: /* FCVTMS */ 12918 case 0x1c: /* FCVTAS */ 12919 case 0x3a: /* FCVTPS */ 12920 case 0x3b: /* FCVTZS */ 12921 gen_helper_vfp_tosls(tcg_res, tcg_op, 12922 tcg_constant_i32(0), tcg_fpstatus); 12923 break; 12924 case 0x5a: /* FCVTNU */ 12925 case 0x5b: /* FCVTMU */ 12926 case 0x5c: /* FCVTAU */ 12927 case 0x7a: /* FCVTPU */ 12928 case 0x7b: /* FCVTZU */ 12929 gen_helper_vfp_touls(tcg_res, tcg_op, 12930 tcg_constant_i32(0), tcg_fpstatus); 12931 break; 12932 case 0x18: /* FRINTN */ 12933 case 0x19: /* FRINTM */ 12934 case 0x38: /* FRINTP */ 12935 case 0x39: /* FRINTZ */ 12936 case 0x58: /* FRINTA */ 12937 case 0x79: /* FRINTI */ 12938 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus); 12939 break; 12940 case 0x59: /* FRINTX */ 12941 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus); 12942 break; 12943 case 0x7c: /* URSQRTE */ 12944 gen_helper_rsqrte_u32(tcg_res, tcg_op); 12945 break; 12946 case 0x1e: /* FRINT32Z */ 12947 case 0x5e: /* FRINT32X */ 12948 gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus); 12949 break; 12950 case 0x1f: /* FRINT64Z */ 12951 case 0x5f: /* FRINT64X */ 12952 gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus); 12953 break; 12954 default: 12955 g_assert_not_reached(); 12956 } 12957 } else { 12958 /* Use helpers for 8 and 16 bit elements */ 12959 switch (opcode) { 12960 case 0x5: /* CNT, RBIT */ 12961 /* For these two insns size is part of the opcode specifier 12962 * (handled earlier); they always operate on byte elements. 12963 */ 12964 if (u) { 12965 gen_helper_neon_rbit_u8(tcg_res, tcg_op); 12966 } else { 12967 gen_helper_neon_cnt_u8(tcg_res, tcg_op); 12968 } 12969 break; 12970 case 0x7: /* SQABS, SQNEG */ 12971 { 12972 NeonGenOneOpEnvFn *genfn; 12973 static NeonGenOneOpEnvFn * const fns[2][2] = { 12974 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 }, 12975 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 }, 12976 }; 12977 genfn = fns[size][u]; 12978 genfn(tcg_res, cpu_env, tcg_op); 12979 break; 12980 } 12981 case 0x4: /* CLS, CLZ */ 12982 if (u) { 12983 if (size == 0) { 12984 gen_helper_neon_clz_u8(tcg_res, tcg_op); 12985 } else { 12986 gen_helper_neon_clz_u16(tcg_res, tcg_op); 12987 } 12988 } else { 12989 if (size == 0) { 12990 gen_helper_neon_cls_s8(tcg_res, tcg_op); 12991 } else { 12992 gen_helper_neon_cls_s16(tcg_res, tcg_op); 12993 } 12994 } 12995 break; 12996 default: 12997 g_assert_not_reached(); 12998 } 12999 } 13000 13001 write_vec_element_i32(s, tcg_res, rd, pass, MO_32); 13002 13003 tcg_temp_free_i32(tcg_res); 13004 tcg_temp_free_i32(tcg_op); 13005 } 13006 } 13007 clear_vec_high(s, is_q, rd); 13008 13009 if (need_rmode) { 13010 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 13011 tcg_temp_free_i32(tcg_rmode); 13012 } 13013 if (need_fpstatus) { 13014 tcg_temp_free_ptr(tcg_fpstatus); 13015 } 13016 } 13017 13018 /* AdvSIMD [scalar] two register miscellaneous (FP16) 13019 * 13020 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0 13021 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ 13022 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd | 13023 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+ 13024 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00 13025 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800 13026 * 13027 * This actually covers two groups where scalar access is governed by 13028 * bit 28. A bunch of the instructions (float to integral) only exist 13029 * in the vector form and are un-allocated for the scalar decode. Also 13030 * in the scalar decode Q is always 1. 13031 */ 13032 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) 13033 { 13034 int fpop, opcode, a, u; 13035 int rn, rd; 13036 bool is_q; 13037 bool is_scalar; 13038 bool only_in_vector = false; 13039 13040 int pass; 13041 TCGv_i32 tcg_rmode = NULL; 13042 TCGv_ptr tcg_fpstatus = NULL; 13043 bool need_rmode = false; 13044 bool need_fpst = true; 13045 int rmode; 13046 13047 if (!dc_isar_feature(aa64_fp16, s)) { 13048 unallocated_encoding(s); 13049 return; 13050 } 13051 13052 rd = extract32(insn, 0, 5); 13053 rn = extract32(insn, 5, 5); 13054 13055 a = extract32(insn, 23, 1); 13056 u = extract32(insn, 29, 1); 13057 is_scalar = extract32(insn, 28, 1); 13058 is_q = extract32(insn, 30, 1); 13059 13060 opcode = extract32(insn, 12, 5); 13061 fpop = deposit32(opcode, 5, 1, a); 13062 fpop = deposit32(fpop, 6, 1, u); 13063 13064 switch (fpop) { 13065 case 0x1d: /* SCVTF */ 13066 case 0x5d: /* UCVTF */ 13067 { 13068 int elements; 13069 13070 if (is_scalar) { 13071 elements = 1; 13072 } else { 13073 elements = (is_q ? 8 : 4); 13074 } 13075 13076 if (!fp_access_check(s)) { 13077 return; 13078 } 13079 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16); 13080 return; 13081 } 13082 break; 13083 case 0x2c: /* FCMGT (zero) */ 13084 case 0x2d: /* FCMEQ (zero) */ 13085 case 0x2e: /* FCMLT (zero) */ 13086 case 0x6c: /* FCMGE (zero) */ 13087 case 0x6d: /* FCMLE (zero) */ 13088 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd); 13089 return; 13090 case 0x3d: /* FRECPE */ 13091 case 0x3f: /* FRECPX */ 13092 break; 13093 case 0x18: /* FRINTN */ 13094 need_rmode = true; 13095 only_in_vector = true; 13096 rmode = FPROUNDING_TIEEVEN; 13097 break; 13098 case 0x19: /* FRINTM */ 13099 need_rmode = true; 13100 only_in_vector = true; 13101 rmode = FPROUNDING_NEGINF; 13102 break; 13103 case 0x38: /* FRINTP */ 13104 need_rmode = true; 13105 only_in_vector = true; 13106 rmode = FPROUNDING_POSINF; 13107 break; 13108 case 0x39: /* FRINTZ */ 13109 need_rmode = true; 13110 only_in_vector = true; 13111 rmode = FPROUNDING_ZERO; 13112 break; 13113 case 0x58: /* FRINTA */ 13114 need_rmode = true; 13115 only_in_vector = true; 13116 rmode = FPROUNDING_TIEAWAY; 13117 break; 13118 case 0x59: /* FRINTX */ 13119 case 0x79: /* FRINTI */ 13120 only_in_vector = true; 13121 /* current rounding mode */ 13122 break; 13123 case 0x1a: /* FCVTNS */ 13124 need_rmode = true; 13125 rmode = FPROUNDING_TIEEVEN; 13126 break; 13127 case 0x1b: /* FCVTMS */ 13128 need_rmode = true; 13129 rmode = FPROUNDING_NEGINF; 13130 break; 13131 case 0x1c: /* FCVTAS */ 13132 need_rmode = true; 13133 rmode = FPROUNDING_TIEAWAY; 13134 break; 13135 case 0x3a: /* FCVTPS */ 13136 need_rmode = true; 13137 rmode = FPROUNDING_POSINF; 13138 break; 13139 case 0x3b: /* FCVTZS */ 13140 need_rmode = true; 13141 rmode = FPROUNDING_ZERO; 13142 break; 13143 case 0x5a: /* FCVTNU */ 13144 need_rmode = true; 13145 rmode = FPROUNDING_TIEEVEN; 13146 break; 13147 case 0x5b: /* FCVTMU */ 13148 need_rmode = true; 13149 rmode = FPROUNDING_NEGINF; 13150 break; 13151 case 0x5c: /* FCVTAU */ 13152 need_rmode = true; 13153 rmode = FPROUNDING_TIEAWAY; 13154 break; 13155 case 0x7a: /* FCVTPU */ 13156 need_rmode = true; 13157 rmode = FPROUNDING_POSINF; 13158 break; 13159 case 0x7b: /* FCVTZU */ 13160 need_rmode = true; 13161 rmode = FPROUNDING_ZERO; 13162 break; 13163 case 0x2f: /* FABS */ 13164 case 0x6f: /* FNEG */ 13165 need_fpst = false; 13166 break; 13167 case 0x7d: /* FRSQRTE */ 13168 case 0x7f: /* FSQRT (vector) */ 13169 break; 13170 default: 13171 unallocated_encoding(s); 13172 return; 13173 } 13174 13175 13176 /* Check additional constraints for the scalar encoding */ 13177 if (is_scalar) { 13178 if (!is_q) { 13179 unallocated_encoding(s); 13180 return; 13181 } 13182 /* FRINTxx is only in the vector form */ 13183 if (only_in_vector) { 13184 unallocated_encoding(s); 13185 return; 13186 } 13187 } 13188 13189 if (!fp_access_check(s)) { 13190 return; 13191 } 13192 13193 if (need_rmode || need_fpst) { 13194 tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16); 13195 } 13196 13197 if (need_rmode) { 13198 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode)); 13199 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 13200 } 13201 13202 if (is_scalar) { 13203 TCGv_i32 tcg_op = read_fp_hreg(s, rn); 13204 TCGv_i32 tcg_res = tcg_temp_new_i32(); 13205 13206 switch (fpop) { 13207 case 0x1a: /* FCVTNS */ 13208 case 0x1b: /* FCVTMS */ 13209 case 0x1c: /* FCVTAS */ 13210 case 0x3a: /* FCVTPS */ 13211 case 0x3b: /* FCVTZS */ 13212 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); 13213 break; 13214 case 0x3d: /* FRECPE */ 13215 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); 13216 break; 13217 case 0x3f: /* FRECPX */ 13218 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus); 13219 break; 13220 case 0x5a: /* FCVTNU */ 13221 case 0x5b: /* FCVTMU */ 13222 case 0x5c: /* FCVTAU */ 13223 case 0x7a: /* FCVTPU */ 13224 case 0x7b: /* FCVTZU */ 13225 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); 13226 break; 13227 case 0x6f: /* FNEG */ 13228 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); 13229 break; 13230 case 0x7d: /* FRSQRTE */ 13231 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); 13232 break; 13233 default: 13234 g_assert_not_reached(); 13235 } 13236 13237 /* limit any sign extension going on */ 13238 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff); 13239 write_fp_sreg(s, rd, tcg_res); 13240 13241 tcg_temp_free_i32(tcg_res); 13242 tcg_temp_free_i32(tcg_op); 13243 } else { 13244 for (pass = 0; pass < (is_q ? 8 : 4); pass++) { 13245 TCGv_i32 tcg_op = tcg_temp_new_i32(); 13246 TCGv_i32 tcg_res = tcg_temp_new_i32(); 13247 13248 read_vec_element_i32(s, tcg_op, rn, pass, MO_16); 13249 13250 switch (fpop) { 13251 case 0x1a: /* FCVTNS */ 13252 case 0x1b: /* FCVTMS */ 13253 case 0x1c: /* FCVTAS */ 13254 case 0x3a: /* FCVTPS */ 13255 case 0x3b: /* FCVTZS */ 13256 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus); 13257 break; 13258 case 0x3d: /* FRECPE */ 13259 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus); 13260 break; 13261 case 0x5a: /* FCVTNU */ 13262 case 0x5b: /* FCVTMU */ 13263 case 0x5c: /* FCVTAU */ 13264 case 0x7a: /* FCVTPU */ 13265 case 0x7b: /* FCVTZU */ 13266 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus); 13267 break; 13268 case 0x18: /* FRINTN */ 13269 case 0x19: /* FRINTM */ 13270 case 0x38: /* FRINTP */ 13271 case 0x39: /* FRINTZ */ 13272 case 0x58: /* FRINTA */ 13273 case 0x79: /* FRINTI */ 13274 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus); 13275 break; 13276 case 0x59: /* FRINTX */ 13277 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus); 13278 break; 13279 case 0x2f: /* FABS */ 13280 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff); 13281 break; 13282 case 0x6f: /* FNEG */ 13283 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000); 13284 break; 13285 case 0x7d: /* FRSQRTE */ 13286 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus); 13287 break; 13288 case 0x7f: /* FSQRT */ 13289 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus); 13290 break; 13291 default: 13292 g_assert_not_reached(); 13293 } 13294 13295 write_vec_element_i32(s, tcg_res, rd, pass, MO_16); 13296 13297 tcg_temp_free_i32(tcg_res); 13298 tcg_temp_free_i32(tcg_op); 13299 } 13300 13301 clear_vec_high(s, is_q, rd); 13302 } 13303 13304 if (tcg_rmode) { 13305 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus); 13306 tcg_temp_free_i32(tcg_rmode); 13307 } 13308 13309 if (tcg_fpstatus) { 13310 tcg_temp_free_ptr(tcg_fpstatus); 13311 } 13312 } 13313 13314 /* AdvSIMD scalar x indexed element 13315 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 13316 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ 13317 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | 13318 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+ 13319 * AdvSIMD vector x indexed element 13320 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0 13321 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ 13322 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd | 13323 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+ 13324 */ 13325 static void disas_simd_indexed(DisasContext *s, uint32_t insn) 13326 { 13327 /* This encoding has two kinds of instruction: 13328 * normal, where we perform elt x idxelt => elt for each 13329 * element in the vector 13330 * long, where we perform elt x idxelt and generate a result of 13331 * double the width of the input element 13332 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs). 13333 */ 13334 bool is_scalar = extract32(insn, 28, 1); 13335 bool is_q = extract32(insn, 30, 1); 13336 bool u = extract32(insn, 29, 1); 13337 int size = extract32(insn, 22, 2); 13338 int l = extract32(insn, 21, 1); 13339 int m = extract32(insn, 20, 1); 13340 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */ 13341 int rm = extract32(insn, 16, 4); 13342 int opcode = extract32(insn, 12, 4); 13343 int h = extract32(insn, 11, 1); 13344 int rn = extract32(insn, 5, 5); 13345 int rd = extract32(insn, 0, 5); 13346 bool is_long = false; 13347 int is_fp = 0; 13348 bool is_fp16 = false; 13349 int index; 13350 TCGv_ptr fpst; 13351 13352 switch (16 * u + opcode) { 13353 case 0x08: /* MUL */ 13354 case 0x10: /* MLA */ 13355 case 0x14: /* MLS */ 13356 if (is_scalar) { 13357 unallocated_encoding(s); 13358 return; 13359 } 13360 break; 13361 case 0x02: /* SMLAL, SMLAL2 */ 13362 case 0x12: /* UMLAL, UMLAL2 */ 13363 case 0x06: /* SMLSL, SMLSL2 */ 13364 case 0x16: /* UMLSL, UMLSL2 */ 13365 case 0x0a: /* SMULL, SMULL2 */ 13366 case 0x1a: /* UMULL, UMULL2 */ 13367 if (is_scalar) { 13368 unallocated_encoding(s); 13369 return; 13370 } 13371 is_long = true; 13372 break; 13373 case 0x03: /* SQDMLAL, SQDMLAL2 */ 13374 case 0x07: /* SQDMLSL, SQDMLSL2 */ 13375 case 0x0b: /* SQDMULL, SQDMULL2 */ 13376 is_long = true; 13377 break; 13378 case 0x0c: /* SQDMULH */ 13379 case 0x0d: /* SQRDMULH */ 13380 break; 13381 case 0x01: /* FMLA */ 13382 case 0x05: /* FMLS */ 13383 case 0x09: /* FMUL */ 13384 case 0x19: /* FMULX */ 13385 is_fp = 1; 13386 break; 13387 case 0x1d: /* SQRDMLAH */ 13388 case 0x1f: /* SQRDMLSH */ 13389 if (!dc_isar_feature(aa64_rdm, s)) { 13390 unallocated_encoding(s); 13391 return; 13392 } 13393 break; 13394 case 0x0e: /* SDOT */ 13395 case 0x1e: /* UDOT */ 13396 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) { 13397 unallocated_encoding(s); 13398 return; 13399 } 13400 break; 13401 case 0x0f: 13402 switch (size) { 13403 case 0: /* SUDOT */ 13404 case 2: /* USDOT */ 13405 if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) { 13406 unallocated_encoding(s); 13407 return; 13408 } 13409 size = MO_32; 13410 break; 13411 case 1: /* BFDOT */ 13412 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) { 13413 unallocated_encoding(s); 13414 return; 13415 } 13416 size = MO_32; 13417 break; 13418 case 3: /* BFMLAL{B,T} */ 13419 if (is_scalar || !dc_isar_feature(aa64_bf16, s)) { 13420 unallocated_encoding(s); 13421 return; 13422 } 13423 /* can't set is_fp without other incorrect size checks */ 13424 size = MO_16; 13425 break; 13426 default: 13427 unallocated_encoding(s); 13428 return; 13429 } 13430 break; 13431 case 0x11: /* FCMLA #0 */ 13432 case 0x13: /* FCMLA #90 */ 13433 case 0x15: /* FCMLA #180 */ 13434 case 0x17: /* FCMLA #270 */ 13435 if (is_scalar || !dc_isar_feature(aa64_fcma, s)) { 13436 unallocated_encoding(s); 13437 return; 13438 } 13439 is_fp = 2; 13440 break; 13441 case 0x00: /* FMLAL */ 13442 case 0x04: /* FMLSL */ 13443 case 0x18: /* FMLAL2 */ 13444 case 0x1c: /* FMLSL2 */ 13445 if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) { 13446 unallocated_encoding(s); 13447 return; 13448 } 13449 size = MO_16; 13450 /* is_fp, but we pass cpu_env not fp_status. */ 13451 break; 13452 default: 13453 unallocated_encoding(s); 13454 return; 13455 } 13456 13457 switch (is_fp) { 13458 case 1: /* normal fp */ 13459 /* convert insn encoded size to MemOp size */ 13460 switch (size) { 13461 case 0: /* half-precision */ 13462 size = MO_16; 13463 is_fp16 = true; 13464 break; 13465 case MO_32: /* single precision */ 13466 case MO_64: /* double precision */ 13467 break; 13468 default: 13469 unallocated_encoding(s); 13470 return; 13471 } 13472 break; 13473 13474 case 2: /* complex fp */ 13475 /* Each indexable element is a complex pair. */ 13476 size += 1; 13477 switch (size) { 13478 case MO_32: 13479 if (h && !is_q) { 13480 unallocated_encoding(s); 13481 return; 13482 } 13483 is_fp16 = true; 13484 break; 13485 case MO_64: 13486 break; 13487 default: 13488 unallocated_encoding(s); 13489 return; 13490 } 13491 break; 13492 13493 default: /* integer */ 13494 switch (size) { 13495 case MO_8: 13496 case MO_64: 13497 unallocated_encoding(s); 13498 return; 13499 } 13500 break; 13501 } 13502 if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) { 13503 unallocated_encoding(s); 13504 return; 13505 } 13506 13507 /* Given MemOp size, adjust register and indexing. */ 13508 switch (size) { 13509 case MO_16: 13510 index = h << 2 | l << 1 | m; 13511 break; 13512 case MO_32: 13513 index = h << 1 | l; 13514 rm |= m << 4; 13515 break; 13516 case MO_64: 13517 if (l || !is_q) { 13518 unallocated_encoding(s); 13519 return; 13520 } 13521 index = h; 13522 rm |= m << 4; 13523 break; 13524 default: 13525 g_assert_not_reached(); 13526 } 13527 13528 if (!fp_access_check(s)) { 13529 return; 13530 } 13531 13532 if (is_fp) { 13533 fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR); 13534 } else { 13535 fpst = NULL; 13536 } 13537 13538 switch (16 * u + opcode) { 13539 case 0x0e: /* SDOT */ 13540 case 0x1e: /* UDOT */ 13541 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index, 13542 u ? gen_helper_gvec_udot_idx_b 13543 : gen_helper_gvec_sdot_idx_b); 13544 return; 13545 case 0x0f: 13546 switch (extract32(insn, 22, 2)) { 13547 case 0: /* SUDOT */ 13548 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index, 13549 gen_helper_gvec_sudot_idx_b); 13550 return; 13551 case 1: /* BFDOT */ 13552 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index, 13553 gen_helper_gvec_bfdot_idx); 13554 return; 13555 case 2: /* USDOT */ 13556 gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index, 13557 gen_helper_gvec_usdot_idx_b); 13558 return; 13559 case 3: /* BFMLAL{B,T} */ 13560 gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q, 13561 gen_helper_gvec_bfmlal_idx); 13562 return; 13563 } 13564 g_assert_not_reached(); 13565 case 0x11: /* FCMLA #0 */ 13566 case 0x13: /* FCMLA #90 */ 13567 case 0x15: /* FCMLA #180 */ 13568 case 0x17: /* FCMLA #270 */ 13569 { 13570 int rot = extract32(insn, 13, 2); 13571 int data = (index << 2) | rot; 13572 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd), 13573 vec_full_reg_offset(s, rn), 13574 vec_full_reg_offset(s, rm), 13575 vec_full_reg_offset(s, rd), fpst, 13576 is_q ? 16 : 8, vec_full_reg_size(s), data, 13577 size == MO_64 13578 ? gen_helper_gvec_fcmlas_idx 13579 : gen_helper_gvec_fcmlah_idx); 13580 tcg_temp_free_ptr(fpst); 13581 } 13582 return; 13583 13584 case 0x00: /* FMLAL */ 13585 case 0x04: /* FMLSL */ 13586 case 0x18: /* FMLAL2 */ 13587 case 0x1c: /* FMLSL2 */ 13588 { 13589 int is_s = extract32(opcode, 2, 1); 13590 int is_2 = u; 13591 int data = (index << 2) | (is_2 << 1) | is_s; 13592 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd), 13593 vec_full_reg_offset(s, rn), 13594 vec_full_reg_offset(s, rm), cpu_env, 13595 is_q ? 16 : 8, vec_full_reg_size(s), 13596 data, gen_helper_gvec_fmlal_idx_a64); 13597 } 13598 return; 13599 13600 case 0x08: /* MUL */ 13601 if (!is_long && !is_scalar) { 13602 static gen_helper_gvec_3 * const fns[3] = { 13603 gen_helper_gvec_mul_idx_h, 13604 gen_helper_gvec_mul_idx_s, 13605 gen_helper_gvec_mul_idx_d, 13606 }; 13607 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd), 13608 vec_full_reg_offset(s, rn), 13609 vec_full_reg_offset(s, rm), 13610 is_q ? 16 : 8, vec_full_reg_size(s), 13611 index, fns[size - 1]); 13612 return; 13613 } 13614 break; 13615 13616 case 0x10: /* MLA */ 13617 if (!is_long && !is_scalar) { 13618 static gen_helper_gvec_4 * const fns[3] = { 13619 gen_helper_gvec_mla_idx_h, 13620 gen_helper_gvec_mla_idx_s, 13621 gen_helper_gvec_mla_idx_d, 13622 }; 13623 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 13624 vec_full_reg_offset(s, rn), 13625 vec_full_reg_offset(s, rm), 13626 vec_full_reg_offset(s, rd), 13627 is_q ? 16 : 8, vec_full_reg_size(s), 13628 index, fns[size - 1]); 13629 return; 13630 } 13631 break; 13632 13633 case 0x14: /* MLS */ 13634 if (!is_long && !is_scalar) { 13635 static gen_helper_gvec_4 * const fns[3] = { 13636 gen_helper_gvec_mls_idx_h, 13637 gen_helper_gvec_mls_idx_s, 13638 gen_helper_gvec_mls_idx_d, 13639 }; 13640 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), 13641 vec_full_reg_offset(s, rn), 13642 vec_full_reg_offset(s, rm), 13643 vec_full_reg_offset(s, rd), 13644 is_q ? 16 : 8, vec_full_reg_size(s), 13645 index, fns[size - 1]); 13646 return; 13647 } 13648 break; 13649 } 13650 13651 if (size == 3) { 13652 TCGv_i64 tcg_idx = tcg_temp_new_i64(); 13653 int pass; 13654 13655 assert(is_fp && is_q && !is_long); 13656 13657 read_vec_element(s, tcg_idx, rm, index, MO_64); 13658 13659 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 13660 TCGv_i64 tcg_op = tcg_temp_new_i64(); 13661 TCGv_i64 tcg_res = tcg_temp_new_i64(); 13662 13663 read_vec_element(s, tcg_op, rn, pass, MO_64); 13664 13665 switch (16 * u + opcode) { 13666 case 0x05: /* FMLS */ 13667 /* As usual for ARM, separate negation for fused multiply-add */ 13668 gen_helper_vfp_negd(tcg_op, tcg_op); 13669 /* fall through */ 13670 case 0x01: /* FMLA */ 13671 read_vec_element(s, tcg_res, rd, pass, MO_64); 13672 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst); 13673 break; 13674 case 0x09: /* FMUL */ 13675 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst); 13676 break; 13677 case 0x19: /* FMULX */ 13678 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst); 13679 break; 13680 default: 13681 g_assert_not_reached(); 13682 } 13683 13684 write_vec_element(s, tcg_res, rd, pass, MO_64); 13685 tcg_temp_free_i64(tcg_op); 13686 tcg_temp_free_i64(tcg_res); 13687 } 13688 13689 tcg_temp_free_i64(tcg_idx); 13690 clear_vec_high(s, !is_scalar, rd); 13691 } else if (!is_long) { 13692 /* 32 bit floating point, or 16 or 32 bit integer. 13693 * For the 16 bit scalar case we use the usual Neon helpers and 13694 * rely on the fact that 0 op 0 == 0 with no side effects. 13695 */ 13696 TCGv_i32 tcg_idx = tcg_temp_new_i32(); 13697 int pass, maxpasses; 13698 13699 if (is_scalar) { 13700 maxpasses = 1; 13701 } else { 13702 maxpasses = is_q ? 4 : 2; 13703 } 13704 13705 read_vec_element_i32(s, tcg_idx, rm, index, size); 13706 13707 if (size == 1 && !is_scalar) { 13708 /* The simplest way to handle the 16x16 indexed ops is to duplicate 13709 * the index into both halves of the 32 bit tcg_idx and then use 13710 * the usual Neon helpers. 13711 */ 13712 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16); 13713 } 13714 13715 for (pass = 0; pass < maxpasses; pass++) { 13716 TCGv_i32 tcg_op = tcg_temp_new_i32(); 13717 TCGv_i32 tcg_res = tcg_temp_new_i32(); 13718 13719 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32); 13720 13721 switch (16 * u + opcode) { 13722 case 0x08: /* MUL */ 13723 case 0x10: /* MLA */ 13724 case 0x14: /* MLS */ 13725 { 13726 static NeonGenTwoOpFn * const fns[2][2] = { 13727 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 }, 13728 { tcg_gen_add_i32, tcg_gen_sub_i32 }, 13729 }; 13730 NeonGenTwoOpFn *genfn; 13731 bool is_sub = opcode == 0x4; 13732 13733 if (size == 1) { 13734 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx); 13735 } else { 13736 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx); 13737 } 13738 if (opcode == 0x8) { 13739 break; 13740 } 13741 read_vec_element_i32(s, tcg_op, rd, pass, MO_32); 13742 genfn = fns[size - 1][is_sub]; 13743 genfn(tcg_res, tcg_op, tcg_res); 13744 break; 13745 } 13746 case 0x05: /* FMLS */ 13747 case 0x01: /* FMLA */ 13748 read_vec_element_i32(s, tcg_res, rd, pass, 13749 is_scalar ? size : MO_32); 13750 switch (size) { 13751 case 1: 13752 if (opcode == 0x5) { 13753 /* As usual for ARM, separate negation for fused 13754 * multiply-add */ 13755 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000); 13756 } 13757 if (is_scalar) { 13758 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx, 13759 tcg_res, fpst); 13760 } else { 13761 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx, 13762 tcg_res, fpst); 13763 } 13764 break; 13765 case 2: 13766 if (opcode == 0x5) { 13767 /* As usual for ARM, separate negation for 13768 * fused multiply-add */ 13769 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000); 13770 } 13771 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx, 13772 tcg_res, fpst); 13773 break; 13774 default: 13775 g_assert_not_reached(); 13776 } 13777 break; 13778 case 0x09: /* FMUL */ 13779 switch (size) { 13780 case 1: 13781 if (is_scalar) { 13782 gen_helper_advsimd_mulh(tcg_res, tcg_op, 13783 tcg_idx, fpst); 13784 } else { 13785 gen_helper_advsimd_mul2h(tcg_res, tcg_op, 13786 tcg_idx, fpst); 13787 } 13788 break; 13789 case 2: 13790 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst); 13791 break; 13792 default: 13793 g_assert_not_reached(); 13794 } 13795 break; 13796 case 0x19: /* FMULX */ 13797 switch (size) { 13798 case 1: 13799 if (is_scalar) { 13800 gen_helper_advsimd_mulxh(tcg_res, tcg_op, 13801 tcg_idx, fpst); 13802 } else { 13803 gen_helper_advsimd_mulx2h(tcg_res, tcg_op, 13804 tcg_idx, fpst); 13805 } 13806 break; 13807 case 2: 13808 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst); 13809 break; 13810 default: 13811 g_assert_not_reached(); 13812 } 13813 break; 13814 case 0x0c: /* SQDMULH */ 13815 if (size == 1) { 13816 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env, 13817 tcg_op, tcg_idx); 13818 } else { 13819 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env, 13820 tcg_op, tcg_idx); 13821 } 13822 break; 13823 case 0x0d: /* SQRDMULH */ 13824 if (size == 1) { 13825 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env, 13826 tcg_op, tcg_idx); 13827 } else { 13828 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env, 13829 tcg_op, tcg_idx); 13830 } 13831 break; 13832 case 0x1d: /* SQRDMLAH */ 13833 read_vec_element_i32(s, tcg_res, rd, pass, 13834 is_scalar ? size : MO_32); 13835 if (size == 1) { 13836 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env, 13837 tcg_op, tcg_idx, tcg_res); 13838 } else { 13839 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env, 13840 tcg_op, tcg_idx, tcg_res); 13841 } 13842 break; 13843 case 0x1f: /* SQRDMLSH */ 13844 read_vec_element_i32(s, tcg_res, rd, pass, 13845 is_scalar ? size : MO_32); 13846 if (size == 1) { 13847 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env, 13848 tcg_op, tcg_idx, tcg_res); 13849 } else { 13850 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env, 13851 tcg_op, tcg_idx, tcg_res); 13852 } 13853 break; 13854 default: 13855 g_assert_not_reached(); 13856 } 13857 13858 if (is_scalar) { 13859 write_fp_sreg(s, rd, tcg_res); 13860 } else { 13861 write_vec_element_i32(s, tcg_res, rd, pass, MO_32); 13862 } 13863 13864 tcg_temp_free_i32(tcg_op); 13865 tcg_temp_free_i32(tcg_res); 13866 } 13867 13868 tcg_temp_free_i32(tcg_idx); 13869 clear_vec_high(s, is_q, rd); 13870 } else { 13871 /* long ops: 16x16->32 or 32x32->64 */ 13872 TCGv_i64 tcg_res[2]; 13873 int pass; 13874 bool satop = extract32(opcode, 0, 1); 13875 MemOp memop = MO_32; 13876 13877 if (satop || !u) { 13878 memop |= MO_SIGN; 13879 } 13880 13881 if (size == 2) { 13882 TCGv_i64 tcg_idx = tcg_temp_new_i64(); 13883 13884 read_vec_element(s, tcg_idx, rm, index, memop); 13885 13886 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 13887 TCGv_i64 tcg_op = tcg_temp_new_i64(); 13888 TCGv_i64 tcg_passres; 13889 int passelt; 13890 13891 if (is_scalar) { 13892 passelt = 0; 13893 } else { 13894 passelt = pass + (is_q * 2); 13895 } 13896 13897 read_vec_element(s, tcg_op, rn, passelt, memop); 13898 13899 tcg_res[pass] = tcg_temp_new_i64(); 13900 13901 if (opcode == 0xa || opcode == 0xb) { 13902 /* Non-accumulating ops */ 13903 tcg_passres = tcg_res[pass]; 13904 } else { 13905 tcg_passres = tcg_temp_new_i64(); 13906 } 13907 13908 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx); 13909 tcg_temp_free_i64(tcg_op); 13910 13911 if (satop) { 13912 /* saturating, doubling */ 13913 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env, 13914 tcg_passres, tcg_passres); 13915 } 13916 13917 if (opcode == 0xa || opcode == 0xb) { 13918 continue; 13919 } 13920 13921 /* Accumulating op: handle accumulate step */ 13922 read_vec_element(s, tcg_res[pass], rd, pass, MO_64); 13923 13924 switch (opcode) { 13925 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 13926 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres); 13927 break; 13928 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 13929 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres); 13930 break; 13931 case 0x7: /* SQDMLSL, SQDMLSL2 */ 13932 tcg_gen_neg_i64(tcg_passres, tcg_passres); 13933 /* fall through */ 13934 case 0x3: /* SQDMLAL, SQDMLAL2 */ 13935 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env, 13936 tcg_res[pass], 13937 tcg_passres); 13938 break; 13939 default: 13940 g_assert_not_reached(); 13941 } 13942 tcg_temp_free_i64(tcg_passres); 13943 } 13944 tcg_temp_free_i64(tcg_idx); 13945 13946 clear_vec_high(s, !is_scalar, rd); 13947 } else { 13948 TCGv_i32 tcg_idx = tcg_temp_new_i32(); 13949 13950 assert(size == 1); 13951 read_vec_element_i32(s, tcg_idx, rm, index, size); 13952 13953 if (!is_scalar) { 13954 /* The simplest way to handle the 16x16 indexed ops is to 13955 * duplicate the index into both halves of the 32 bit tcg_idx 13956 * and then use the usual Neon helpers. 13957 */ 13958 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16); 13959 } 13960 13961 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) { 13962 TCGv_i32 tcg_op = tcg_temp_new_i32(); 13963 TCGv_i64 tcg_passres; 13964 13965 if (is_scalar) { 13966 read_vec_element_i32(s, tcg_op, rn, pass, size); 13967 } else { 13968 read_vec_element_i32(s, tcg_op, rn, 13969 pass + (is_q * 2), MO_32); 13970 } 13971 13972 tcg_res[pass] = tcg_temp_new_i64(); 13973 13974 if (opcode == 0xa || opcode == 0xb) { 13975 /* Non-accumulating ops */ 13976 tcg_passres = tcg_res[pass]; 13977 } else { 13978 tcg_passres = tcg_temp_new_i64(); 13979 } 13980 13981 if (memop & MO_SIGN) { 13982 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx); 13983 } else { 13984 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx); 13985 } 13986 if (satop) { 13987 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env, 13988 tcg_passres, tcg_passres); 13989 } 13990 tcg_temp_free_i32(tcg_op); 13991 13992 if (opcode == 0xa || opcode == 0xb) { 13993 continue; 13994 } 13995 13996 /* Accumulating op: handle accumulate step */ 13997 read_vec_element(s, tcg_res[pass], rd, pass, MO_64); 13998 13999 switch (opcode) { 14000 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */ 14001 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass], 14002 tcg_passres); 14003 break; 14004 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */ 14005 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass], 14006 tcg_passres); 14007 break; 14008 case 0x7: /* SQDMLSL, SQDMLSL2 */ 14009 gen_helper_neon_negl_u32(tcg_passres, tcg_passres); 14010 /* fall through */ 14011 case 0x3: /* SQDMLAL, SQDMLAL2 */ 14012 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env, 14013 tcg_res[pass], 14014 tcg_passres); 14015 break; 14016 default: 14017 g_assert_not_reached(); 14018 } 14019 tcg_temp_free_i64(tcg_passres); 14020 } 14021 tcg_temp_free_i32(tcg_idx); 14022 14023 if (is_scalar) { 14024 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]); 14025 } 14026 } 14027 14028 if (is_scalar) { 14029 tcg_res[1] = tcg_constant_i64(0); 14030 } 14031 14032 for (pass = 0; pass < 2; pass++) { 14033 write_vec_element(s, tcg_res[pass], rd, pass, MO_64); 14034 tcg_temp_free_i64(tcg_res[pass]); 14035 } 14036 } 14037 14038 if (fpst) { 14039 tcg_temp_free_ptr(fpst); 14040 } 14041 } 14042 14043 /* Crypto AES 14044 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0 14045 * +-----------------+------+-----------+--------+-----+------+------+ 14046 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd | 14047 * +-----------------+------+-----------+--------+-----+------+------+ 14048 */ 14049 static void disas_crypto_aes(DisasContext *s, uint32_t insn) 14050 { 14051 int size = extract32(insn, 22, 2); 14052 int opcode = extract32(insn, 12, 5); 14053 int rn = extract32(insn, 5, 5); 14054 int rd = extract32(insn, 0, 5); 14055 int decrypt; 14056 gen_helper_gvec_2 *genfn2 = NULL; 14057 gen_helper_gvec_3 *genfn3 = NULL; 14058 14059 if (!dc_isar_feature(aa64_aes, s) || size != 0) { 14060 unallocated_encoding(s); 14061 return; 14062 } 14063 14064 switch (opcode) { 14065 case 0x4: /* AESE */ 14066 decrypt = 0; 14067 genfn3 = gen_helper_crypto_aese; 14068 break; 14069 case 0x6: /* AESMC */ 14070 decrypt = 0; 14071 genfn2 = gen_helper_crypto_aesmc; 14072 break; 14073 case 0x5: /* AESD */ 14074 decrypt = 1; 14075 genfn3 = gen_helper_crypto_aese; 14076 break; 14077 case 0x7: /* AESIMC */ 14078 decrypt = 1; 14079 genfn2 = gen_helper_crypto_aesmc; 14080 break; 14081 default: 14082 unallocated_encoding(s); 14083 return; 14084 } 14085 14086 if (!fp_access_check(s)) { 14087 return; 14088 } 14089 if (genfn2) { 14090 gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2); 14091 } else { 14092 gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3); 14093 } 14094 } 14095 14096 /* Crypto three-reg SHA 14097 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0 14098 * +-----------------+------+---+------+---+--------+-----+------+------+ 14099 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd | 14100 * +-----------------+------+---+------+---+--------+-----+------+------+ 14101 */ 14102 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) 14103 { 14104 int size = extract32(insn, 22, 2); 14105 int opcode = extract32(insn, 12, 3); 14106 int rm = extract32(insn, 16, 5); 14107 int rn = extract32(insn, 5, 5); 14108 int rd = extract32(insn, 0, 5); 14109 gen_helper_gvec_3 *genfn; 14110 bool feature; 14111 14112 if (size != 0) { 14113 unallocated_encoding(s); 14114 return; 14115 } 14116 14117 switch (opcode) { 14118 case 0: /* SHA1C */ 14119 genfn = gen_helper_crypto_sha1c; 14120 feature = dc_isar_feature(aa64_sha1, s); 14121 break; 14122 case 1: /* SHA1P */ 14123 genfn = gen_helper_crypto_sha1p; 14124 feature = dc_isar_feature(aa64_sha1, s); 14125 break; 14126 case 2: /* SHA1M */ 14127 genfn = gen_helper_crypto_sha1m; 14128 feature = dc_isar_feature(aa64_sha1, s); 14129 break; 14130 case 3: /* SHA1SU0 */ 14131 genfn = gen_helper_crypto_sha1su0; 14132 feature = dc_isar_feature(aa64_sha1, s); 14133 break; 14134 case 4: /* SHA256H */ 14135 genfn = gen_helper_crypto_sha256h; 14136 feature = dc_isar_feature(aa64_sha256, s); 14137 break; 14138 case 5: /* SHA256H2 */ 14139 genfn = gen_helper_crypto_sha256h2; 14140 feature = dc_isar_feature(aa64_sha256, s); 14141 break; 14142 case 6: /* SHA256SU1 */ 14143 genfn = gen_helper_crypto_sha256su1; 14144 feature = dc_isar_feature(aa64_sha256, s); 14145 break; 14146 default: 14147 unallocated_encoding(s); 14148 return; 14149 } 14150 14151 if (!feature) { 14152 unallocated_encoding(s); 14153 return; 14154 } 14155 14156 if (!fp_access_check(s)) { 14157 return; 14158 } 14159 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn); 14160 } 14161 14162 /* Crypto two-reg SHA 14163 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0 14164 * +-----------------+------+-----------+--------+-----+------+------+ 14165 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd | 14166 * +-----------------+------+-----------+--------+-----+------+------+ 14167 */ 14168 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) 14169 { 14170 int size = extract32(insn, 22, 2); 14171 int opcode = extract32(insn, 12, 5); 14172 int rn = extract32(insn, 5, 5); 14173 int rd = extract32(insn, 0, 5); 14174 gen_helper_gvec_2 *genfn; 14175 bool feature; 14176 14177 if (size != 0) { 14178 unallocated_encoding(s); 14179 return; 14180 } 14181 14182 switch (opcode) { 14183 case 0: /* SHA1H */ 14184 feature = dc_isar_feature(aa64_sha1, s); 14185 genfn = gen_helper_crypto_sha1h; 14186 break; 14187 case 1: /* SHA1SU1 */ 14188 feature = dc_isar_feature(aa64_sha1, s); 14189 genfn = gen_helper_crypto_sha1su1; 14190 break; 14191 case 2: /* SHA256SU0 */ 14192 feature = dc_isar_feature(aa64_sha256, s); 14193 genfn = gen_helper_crypto_sha256su0; 14194 break; 14195 default: 14196 unallocated_encoding(s); 14197 return; 14198 } 14199 14200 if (!feature) { 14201 unallocated_encoding(s); 14202 return; 14203 } 14204 14205 if (!fp_access_check(s)) { 14206 return; 14207 } 14208 gen_gvec_op2_ool(s, true, rd, rn, 0, genfn); 14209 } 14210 14211 static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) 14212 { 14213 tcg_gen_rotli_i64(d, m, 1); 14214 tcg_gen_xor_i64(d, d, n); 14215 } 14216 14217 static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m) 14218 { 14219 tcg_gen_rotli_vec(vece, d, m, 1); 14220 tcg_gen_xor_vec(vece, d, d, n); 14221 } 14222 14223 void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, 14224 uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz) 14225 { 14226 static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 }; 14227 static const GVecGen3 op = { 14228 .fni8 = gen_rax1_i64, 14229 .fniv = gen_rax1_vec, 14230 .opt_opc = vecop_list, 14231 .fno = gen_helper_crypto_rax1, 14232 .vece = MO_64, 14233 }; 14234 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op); 14235 } 14236 14237 /* Crypto three-reg SHA512 14238 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0 14239 * +-----------------------+------+---+---+-----+--------+------+------+ 14240 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd | 14241 * +-----------------------+------+---+---+-----+--------+------+------+ 14242 */ 14243 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn) 14244 { 14245 int opcode = extract32(insn, 10, 2); 14246 int o = extract32(insn, 14, 1); 14247 int rm = extract32(insn, 16, 5); 14248 int rn = extract32(insn, 5, 5); 14249 int rd = extract32(insn, 0, 5); 14250 bool feature; 14251 gen_helper_gvec_3 *oolfn = NULL; 14252 GVecGen3Fn *gvecfn = NULL; 14253 14254 if (o == 0) { 14255 switch (opcode) { 14256 case 0: /* SHA512H */ 14257 feature = dc_isar_feature(aa64_sha512, s); 14258 oolfn = gen_helper_crypto_sha512h; 14259 break; 14260 case 1: /* SHA512H2 */ 14261 feature = dc_isar_feature(aa64_sha512, s); 14262 oolfn = gen_helper_crypto_sha512h2; 14263 break; 14264 case 2: /* SHA512SU1 */ 14265 feature = dc_isar_feature(aa64_sha512, s); 14266 oolfn = gen_helper_crypto_sha512su1; 14267 break; 14268 case 3: /* RAX1 */ 14269 feature = dc_isar_feature(aa64_sha3, s); 14270 gvecfn = gen_gvec_rax1; 14271 break; 14272 default: 14273 g_assert_not_reached(); 14274 } 14275 } else { 14276 switch (opcode) { 14277 case 0: /* SM3PARTW1 */ 14278 feature = dc_isar_feature(aa64_sm3, s); 14279 oolfn = gen_helper_crypto_sm3partw1; 14280 break; 14281 case 1: /* SM3PARTW2 */ 14282 feature = dc_isar_feature(aa64_sm3, s); 14283 oolfn = gen_helper_crypto_sm3partw2; 14284 break; 14285 case 2: /* SM4EKEY */ 14286 feature = dc_isar_feature(aa64_sm4, s); 14287 oolfn = gen_helper_crypto_sm4ekey; 14288 break; 14289 default: 14290 unallocated_encoding(s); 14291 return; 14292 } 14293 } 14294 14295 if (!feature) { 14296 unallocated_encoding(s); 14297 return; 14298 } 14299 14300 if (!fp_access_check(s)) { 14301 return; 14302 } 14303 14304 if (oolfn) { 14305 gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn); 14306 } else { 14307 gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64); 14308 } 14309 } 14310 14311 /* Crypto two-reg SHA512 14312 * 31 12 11 10 9 5 4 0 14313 * +-----------------------------------------+--------+------+------+ 14314 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd | 14315 * +-----------------------------------------+--------+------+------+ 14316 */ 14317 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn) 14318 { 14319 int opcode = extract32(insn, 10, 2); 14320 int rn = extract32(insn, 5, 5); 14321 int rd = extract32(insn, 0, 5); 14322 bool feature; 14323 14324 switch (opcode) { 14325 case 0: /* SHA512SU0 */ 14326 feature = dc_isar_feature(aa64_sha512, s); 14327 break; 14328 case 1: /* SM4E */ 14329 feature = dc_isar_feature(aa64_sm4, s); 14330 break; 14331 default: 14332 unallocated_encoding(s); 14333 return; 14334 } 14335 14336 if (!feature) { 14337 unallocated_encoding(s); 14338 return; 14339 } 14340 14341 if (!fp_access_check(s)) { 14342 return; 14343 } 14344 14345 switch (opcode) { 14346 case 0: /* SHA512SU0 */ 14347 gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0); 14348 break; 14349 case 1: /* SM4E */ 14350 gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e); 14351 break; 14352 default: 14353 g_assert_not_reached(); 14354 } 14355 } 14356 14357 /* Crypto four-register 14358 * 31 23 22 21 20 16 15 14 10 9 5 4 0 14359 * +-------------------+-----+------+---+------+------+------+ 14360 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd | 14361 * +-------------------+-----+------+---+------+------+------+ 14362 */ 14363 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) 14364 { 14365 int op0 = extract32(insn, 21, 2); 14366 int rm = extract32(insn, 16, 5); 14367 int ra = extract32(insn, 10, 5); 14368 int rn = extract32(insn, 5, 5); 14369 int rd = extract32(insn, 0, 5); 14370 bool feature; 14371 14372 switch (op0) { 14373 case 0: /* EOR3 */ 14374 case 1: /* BCAX */ 14375 feature = dc_isar_feature(aa64_sha3, s); 14376 break; 14377 case 2: /* SM3SS1 */ 14378 feature = dc_isar_feature(aa64_sm3, s); 14379 break; 14380 default: 14381 unallocated_encoding(s); 14382 return; 14383 } 14384 14385 if (!feature) { 14386 unallocated_encoding(s); 14387 return; 14388 } 14389 14390 if (!fp_access_check(s)) { 14391 return; 14392 } 14393 14394 if (op0 < 2) { 14395 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2]; 14396 int pass; 14397 14398 tcg_op1 = tcg_temp_new_i64(); 14399 tcg_op2 = tcg_temp_new_i64(); 14400 tcg_op3 = tcg_temp_new_i64(); 14401 tcg_res[0] = tcg_temp_new_i64(); 14402 tcg_res[1] = tcg_temp_new_i64(); 14403 14404 for (pass = 0; pass < 2; pass++) { 14405 read_vec_element(s, tcg_op1, rn, pass, MO_64); 14406 read_vec_element(s, tcg_op2, rm, pass, MO_64); 14407 read_vec_element(s, tcg_op3, ra, pass, MO_64); 14408 14409 if (op0 == 0) { 14410 /* EOR3 */ 14411 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3); 14412 } else { 14413 /* BCAX */ 14414 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3); 14415 } 14416 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1); 14417 } 14418 write_vec_element(s, tcg_res[0], rd, 0, MO_64); 14419 write_vec_element(s, tcg_res[1], rd, 1, MO_64); 14420 14421 tcg_temp_free_i64(tcg_op1); 14422 tcg_temp_free_i64(tcg_op2); 14423 tcg_temp_free_i64(tcg_op3); 14424 tcg_temp_free_i64(tcg_res[0]); 14425 tcg_temp_free_i64(tcg_res[1]); 14426 } else { 14427 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero; 14428 14429 tcg_op1 = tcg_temp_new_i32(); 14430 tcg_op2 = tcg_temp_new_i32(); 14431 tcg_op3 = tcg_temp_new_i32(); 14432 tcg_res = tcg_temp_new_i32(); 14433 tcg_zero = tcg_constant_i32(0); 14434 14435 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32); 14436 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32); 14437 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32); 14438 14439 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20); 14440 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2); 14441 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3); 14442 tcg_gen_rotri_i32(tcg_res, tcg_res, 25); 14443 14444 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32); 14445 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32); 14446 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32); 14447 write_vec_element_i32(s, tcg_res, rd, 3, MO_32); 14448 14449 tcg_temp_free_i32(tcg_op1); 14450 tcg_temp_free_i32(tcg_op2); 14451 tcg_temp_free_i32(tcg_op3); 14452 tcg_temp_free_i32(tcg_res); 14453 } 14454 } 14455 14456 /* Crypto XAR 14457 * 31 21 20 16 15 10 9 5 4 0 14458 * +-----------------------+------+--------+------+------+ 14459 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd | 14460 * +-----------------------+------+--------+------+------+ 14461 */ 14462 static void disas_crypto_xar(DisasContext *s, uint32_t insn) 14463 { 14464 int rm = extract32(insn, 16, 5); 14465 int imm6 = extract32(insn, 10, 6); 14466 int rn = extract32(insn, 5, 5); 14467 int rd = extract32(insn, 0, 5); 14468 14469 if (!dc_isar_feature(aa64_sha3, s)) { 14470 unallocated_encoding(s); 14471 return; 14472 } 14473 14474 if (!fp_access_check(s)) { 14475 return; 14476 } 14477 14478 gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd), 14479 vec_full_reg_offset(s, rn), 14480 vec_full_reg_offset(s, rm), imm6, 16, 14481 vec_full_reg_size(s)); 14482 } 14483 14484 /* Crypto three-reg imm2 14485 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0 14486 * +-----------------------+------+-----+------+--------+------+------+ 14487 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd | 14488 * +-----------------------+------+-----+------+--------+------+------+ 14489 */ 14490 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn) 14491 { 14492 static gen_helper_gvec_3 * const fns[4] = { 14493 gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b, 14494 gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b, 14495 }; 14496 int opcode = extract32(insn, 10, 2); 14497 int imm2 = extract32(insn, 12, 2); 14498 int rm = extract32(insn, 16, 5); 14499 int rn = extract32(insn, 5, 5); 14500 int rd = extract32(insn, 0, 5); 14501 14502 if (!dc_isar_feature(aa64_sm3, s)) { 14503 unallocated_encoding(s); 14504 return; 14505 } 14506 14507 if (!fp_access_check(s)) { 14508 return; 14509 } 14510 14511 gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]); 14512 } 14513 14514 /* C3.6 Data processing - SIMD, inc Crypto 14515 * 14516 * As the decode gets a little complex we are using a table based 14517 * approach for this part of the decode. 14518 */ 14519 static const AArch64DecodeTable data_proc_simd[] = { 14520 /* pattern , mask , fn */ 14521 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same }, 14522 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra }, 14523 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff }, 14524 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc }, 14525 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes }, 14526 { 0x0e000400, 0x9fe08400, disas_simd_copy }, 14527 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */ 14528 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */ 14529 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm }, 14530 { 0x0f000400, 0x9f800400, disas_simd_shift_imm }, 14531 { 0x0e000000, 0xbf208c00, disas_simd_tb }, 14532 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn }, 14533 { 0x2e000000, 0xbf208400, disas_simd_ext }, 14534 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same }, 14535 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra }, 14536 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff }, 14537 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc }, 14538 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise }, 14539 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy }, 14540 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */ 14541 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm }, 14542 { 0x4e280800, 0xff3e0c00, disas_crypto_aes }, 14543 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha }, 14544 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha }, 14545 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 }, 14546 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 }, 14547 { 0xce000000, 0xff808000, disas_crypto_four_reg }, 14548 { 0xce800000, 0xffe00000, disas_crypto_xar }, 14549 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 }, 14550 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 }, 14551 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 }, 14552 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 }, 14553 { 0x00000000, 0x00000000, NULL } 14554 }; 14555 14556 static void disas_data_proc_simd(DisasContext *s, uint32_t insn) 14557 { 14558 /* Note that this is called with all non-FP cases from 14559 * table C3-6 so it must UNDEF for entries not specifically 14560 * allocated to instructions in that table. 14561 */ 14562 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn); 14563 if (fn) { 14564 fn(s, insn); 14565 } else { 14566 unallocated_encoding(s); 14567 } 14568 } 14569 14570 /* C3.6 Data processing - SIMD and floating point */ 14571 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) 14572 { 14573 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) { 14574 disas_data_proc_fp(s, insn); 14575 } else { 14576 /* SIMD, including crypto */ 14577 disas_data_proc_simd(s, insn); 14578 } 14579 } 14580 14581 /* 14582 * Include the generated SME FA64 decoder. 14583 */ 14584 14585 #include "decode-sme-fa64.c.inc" 14586 14587 static bool trans_OK(DisasContext *s, arg_OK *a) 14588 { 14589 return true; 14590 } 14591 14592 static bool trans_FAIL(DisasContext *s, arg_OK *a) 14593 { 14594 s->is_nonstreaming = true; 14595 return true; 14596 } 14597 14598 /** 14599 * is_guarded_page: 14600 * @env: The cpu environment 14601 * @s: The DisasContext 14602 * 14603 * Return true if the page is guarded. 14604 */ 14605 static bool is_guarded_page(CPUARMState *env, DisasContext *s) 14606 { 14607 uint64_t addr = s->base.pc_first; 14608 #ifdef CONFIG_USER_ONLY 14609 return page_get_flags(addr) & PAGE_BTI; 14610 #else 14611 CPUTLBEntryFull *full; 14612 void *host; 14613 int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx); 14614 int flags; 14615 14616 /* 14617 * We test this immediately after reading an insn, which means 14618 * that the TLB entry must be present and valid, and thus this 14619 * access will never raise an exception. 14620 */ 14621 flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx, 14622 false, &host, &full, 0); 14623 assert(!(flags & TLB_INVALID_MASK)); 14624 14625 return full->guarded; 14626 #endif 14627 } 14628 14629 /** 14630 * btype_destination_ok: 14631 * @insn: The instruction at the branch destination 14632 * @bt: SCTLR_ELx.BT 14633 * @btype: PSTATE.BTYPE, and is non-zero 14634 * 14635 * On a guarded page, there are a limited number of insns 14636 * that may be present at the branch target: 14637 * - branch target identifiers, 14638 * - paciasp, pacibsp, 14639 * - BRK insn 14640 * - HLT insn 14641 * Anything else causes a Branch Target Exception. 14642 * 14643 * Return true if the branch is compatible, false to raise BTITRAP. 14644 */ 14645 static bool btype_destination_ok(uint32_t insn, bool bt, int btype) 14646 { 14647 if ((insn & 0xfffff01fu) == 0xd503201fu) { 14648 /* HINT space */ 14649 switch (extract32(insn, 5, 7)) { 14650 case 0b011001: /* PACIASP */ 14651 case 0b011011: /* PACIBSP */ 14652 /* 14653 * If SCTLR_ELx.BT, then PACI*SP are not compatible 14654 * with btype == 3. Otherwise all btype are ok. 14655 */ 14656 return !bt || btype != 3; 14657 case 0b100000: /* BTI */ 14658 /* Not compatible with any btype. */ 14659 return false; 14660 case 0b100010: /* BTI c */ 14661 /* Not compatible with btype == 3 */ 14662 return btype != 3; 14663 case 0b100100: /* BTI j */ 14664 /* Not compatible with btype == 2 */ 14665 return btype != 2; 14666 case 0b100110: /* BTI jc */ 14667 /* Compatible with any btype. */ 14668 return true; 14669 } 14670 } else { 14671 switch (insn & 0xffe0001fu) { 14672 case 0xd4200000u: /* BRK */ 14673 case 0xd4400000u: /* HLT */ 14674 /* Give priority to the breakpoint exception. */ 14675 return true; 14676 } 14677 } 14678 return false; 14679 } 14680 14681 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, 14682 CPUState *cpu) 14683 { 14684 DisasContext *dc = container_of(dcbase, DisasContext, base); 14685 CPUARMState *env = cpu->env_ptr; 14686 ARMCPU *arm_cpu = env_archcpu(env); 14687 CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb); 14688 int bound, core_mmu_idx; 14689 14690 dc->isar = &arm_cpu->isar; 14691 dc->condjmp = 0; 14692 dc->pc_save = dc->base.pc_first; 14693 dc->aarch64 = true; 14694 dc->thumb = false; 14695 dc->sctlr_b = 0; 14696 dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE; 14697 dc->condexec_mask = 0; 14698 dc->condexec_cond = 0; 14699 core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX); 14700 dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx); 14701 dc->tbii = EX_TBFLAG_A64(tb_flags, TBII); 14702 dc->tbid = EX_TBFLAG_A64(tb_flags, TBID); 14703 dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA); 14704 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); 14705 #if !defined(CONFIG_USER_ONLY) 14706 dc->user = (dc->current_el == 0); 14707 #endif 14708 dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL); 14709 dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM); 14710 dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); 14711 dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE); 14712 dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC); 14713 dc->fgt_eret = EX_TBFLAG_A64(tb_flags, FGT_ERET); 14714 dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); 14715 dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL); 14716 dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16; 14717 dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16; 14718 dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE); 14719 dc->bt = EX_TBFLAG_A64(tb_flags, BT); 14720 dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE); 14721 dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV); 14722 dc->ata = EX_TBFLAG_A64(tb_flags, ATA); 14723 dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE); 14724 dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); 14725 dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); 14726 dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); 14727 dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); 14728 dc->vec_len = 0; 14729 dc->vec_stride = 0; 14730 dc->cp_regs = arm_cpu->cp_regs; 14731 dc->features = env->features; 14732 dc->dcz_blocksize = arm_cpu->dcz_blocksize; 14733 14734 #ifdef CONFIG_USER_ONLY 14735 /* In sve_probe_page, we assume TBI is enabled. */ 14736 tcg_debug_assert(dc->tbid & 1); 14737 #endif 14738 14739 /* Single step state. The code-generation logic here is: 14740 * SS_ACTIVE == 0: 14741 * generate code with no special handling for single-stepping (except 14742 * that anything that can make us go to SS_ACTIVE == 1 must end the TB; 14743 * this happens anyway because those changes are all system register or 14744 * PSTATE writes). 14745 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) 14746 * emit code for one insn 14747 * emit code to clear PSTATE.SS 14748 * emit code to generate software step exception for completed step 14749 * end TB (as usual for having generated an exception) 14750 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) 14751 * emit code to generate a software step exception 14752 * end the TB 14753 */ 14754 dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE); 14755 dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS); 14756 dc->is_ldex = false; 14757 14758 /* Bound the number of insns to execute to those left on the page. */ 14759 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; 14760 14761 /* If architectural single step active, limit to 1. */ 14762 if (dc->ss_active) { 14763 bound = 1; 14764 } 14765 dc->base.max_insns = MIN(dc->base.max_insns, bound); 14766 } 14767 14768 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) 14769 { 14770 } 14771 14772 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) 14773 { 14774 DisasContext *dc = container_of(dcbase, DisasContext, base); 14775 target_ulong pc_arg = dc->base.pc_next; 14776 14777 if (tb_cflags(dcbase->tb) & CF_PCREL) { 14778 pc_arg &= ~TARGET_PAGE_MASK; 14779 } 14780 tcg_gen_insn_start(pc_arg, 0, 0); 14781 dc->insn_start = tcg_last_op(); 14782 } 14783 14784 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) 14785 { 14786 DisasContext *s = container_of(dcbase, DisasContext, base); 14787 CPUARMState *env = cpu->env_ptr; 14788 uint64_t pc = s->base.pc_next; 14789 uint32_t insn; 14790 14791 /* Singlestep exceptions have the highest priority. */ 14792 if (s->ss_active && !s->pstate_ss) { 14793 /* Singlestep state is Active-pending. 14794 * If we're in this state at the start of a TB then either 14795 * a) we just took an exception to an EL which is being debugged 14796 * and this is the first insn in the exception handler 14797 * b) debug exceptions were masked and we just unmasked them 14798 * without changing EL (eg by clearing PSTATE.D) 14799 * In either case we're going to take a swstep exception in the 14800 * "did not step an insn" case, and so the syndrome ISV and EX 14801 * bits should be zero. 14802 */ 14803 assert(s->base.num_insns == 1); 14804 gen_swstep_exception(s, 0, 0); 14805 s->base.is_jmp = DISAS_NORETURN; 14806 s->base.pc_next = pc + 4; 14807 return; 14808 } 14809 14810 if (pc & 3) { 14811 /* 14812 * PC alignment fault. This has priority over the instruction abort 14813 * that we would receive from a translation fault via arm_ldl_code. 14814 * This should only be possible after an indirect branch, at the 14815 * start of the TB. 14816 */ 14817 assert(s->base.num_insns == 1); 14818 gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc)); 14819 s->base.is_jmp = DISAS_NORETURN; 14820 s->base.pc_next = QEMU_ALIGN_UP(pc, 4); 14821 return; 14822 } 14823 14824 s->pc_curr = pc; 14825 insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b); 14826 s->insn = insn; 14827 s->base.pc_next = pc + 4; 14828 14829 s->fp_access_checked = false; 14830 s->sve_access_checked = false; 14831 14832 if (s->pstate_il) { 14833 /* 14834 * Illegal execution state. This has priority over BTI 14835 * exceptions, but comes after instruction abort exceptions. 14836 */ 14837 gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate()); 14838 return; 14839 } 14840 14841 if (dc_isar_feature(aa64_bti, s)) { 14842 if (s->base.num_insns == 1) { 14843 /* 14844 * At the first insn of the TB, compute s->guarded_page. 14845 * We delayed computing this until successfully reading 14846 * the first insn of the TB, above. This (mostly) ensures 14847 * that the softmmu tlb entry has been populated, and the 14848 * page table GP bit is available. 14849 * 14850 * Note that we need to compute this even if btype == 0, 14851 * because this value is used for BR instructions later 14852 * where ENV is not available. 14853 */ 14854 s->guarded_page = is_guarded_page(env, s); 14855 14856 /* First insn can have btype set to non-zero. */ 14857 tcg_debug_assert(s->btype >= 0); 14858 14859 /* 14860 * Note that the Branch Target Exception has fairly high 14861 * priority -- below debugging exceptions but above most 14862 * everything else. This allows us to handle this now 14863 * instead of waiting until the insn is otherwise decoded. 14864 */ 14865 if (s->btype != 0 14866 && s->guarded_page 14867 && !btype_destination_ok(insn, s->bt, s->btype)) { 14868 gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype)); 14869 return; 14870 } 14871 } else { 14872 /* Not the first insn: btype must be 0. */ 14873 tcg_debug_assert(s->btype == 0); 14874 } 14875 } 14876 14877 s->is_nonstreaming = false; 14878 if (s->sme_trap_nonstreaming) { 14879 disas_sme_fa64(s, insn); 14880 } 14881 14882 switch (extract32(insn, 25, 4)) { 14883 case 0x0: 14884 if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) { 14885 unallocated_encoding(s); 14886 } 14887 break; 14888 case 0x1: case 0x3: /* UNALLOCATED */ 14889 unallocated_encoding(s); 14890 break; 14891 case 0x2: 14892 if (!disas_sve(s, insn)) { 14893 unallocated_encoding(s); 14894 } 14895 break; 14896 case 0x8: case 0x9: /* Data processing - immediate */ 14897 disas_data_proc_imm(s, insn); 14898 break; 14899 case 0xa: case 0xb: /* Branch, exception generation and system insns */ 14900 disas_b_exc_sys(s, insn); 14901 break; 14902 case 0x4: 14903 case 0x6: 14904 case 0xc: 14905 case 0xe: /* Loads and stores */ 14906 disas_ldst(s, insn); 14907 break; 14908 case 0x5: 14909 case 0xd: /* Data processing - register */ 14910 disas_data_proc_reg(s, insn); 14911 break; 14912 case 0x7: 14913 case 0xf: /* Data processing - SIMD and floating point */ 14914 disas_data_proc_simd_fp(s, insn); 14915 break; 14916 default: 14917 assert(FALSE); /* all 15 cases should be handled above */ 14918 break; 14919 } 14920 14921 /* 14922 * After execution of most insns, btype is reset to 0. 14923 * Note that we set btype == -1 when the insn sets btype. 14924 */ 14925 if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) { 14926 reset_btype(s); 14927 } 14928 } 14929 14930 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) 14931 { 14932 DisasContext *dc = container_of(dcbase, DisasContext, base); 14933 14934 if (unlikely(dc->ss_active)) { 14935 /* Note that this means single stepping WFI doesn't halt the CPU. 14936 * For conditional branch insns this is harmless unreachable code as 14937 * gen_goto_tb() has already handled emitting the debug exception 14938 * (and thus a tb-jump is not possible when singlestepping). 14939 */ 14940 switch (dc->base.is_jmp) { 14941 default: 14942 gen_a64_update_pc(dc, 4); 14943 /* fall through */ 14944 case DISAS_EXIT: 14945 case DISAS_JUMP: 14946 gen_step_complete_exception(dc); 14947 break; 14948 case DISAS_NORETURN: 14949 break; 14950 } 14951 } else { 14952 switch (dc->base.is_jmp) { 14953 case DISAS_NEXT: 14954 case DISAS_TOO_MANY: 14955 gen_goto_tb(dc, 1, 4); 14956 break; 14957 default: 14958 case DISAS_UPDATE_EXIT: 14959 gen_a64_update_pc(dc, 4); 14960 /* fall through */ 14961 case DISAS_EXIT: 14962 tcg_gen_exit_tb(NULL, 0); 14963 break; 14964 case DISAS_UPDATE_NOCHAIN: 14965 gen_a64_update_pc(dc, 4); 14966 /* fall through */ 14967 case DISAS_JUMP: 14968 tcg_gen_lookup_and_goto_ptr(); 14969 break; 14970 case DISAS_NORETURN: 14971 case DISAS_SWI: 14972 break; 14973 case DISAS_WFE: 14974 gen_a64_update_pc(dc, 4); 14975 gen_helper_wfe(cpu_env); 14976 break; 14977 case DISAS_YIELD: 14978 gen_a64_update_pc(dc, 4); 14979 gen_helper_yield(cpu_env); 14980 break; 14981 case DISAS_WFI: 14982 /* 14983 * This is a special case because we don't want to just halt 14984 * the CPU if trying to debug across a WFI. 14985 */ 14986 gen_a64_update_pc(dc, 4); 14987 gen_helper_wfi(cpu_env, tcg_constant_i32(4)); 14988 /* 14989 * The helper doesn't necessarily throw an exception, but we 14990 * must go back to the main loop to check for interrupts anyway. 14991 */ 14992 tcg_gen_exit_tb(NULL, 0); 14993 break; 14994 } 14995 } 14996 } 14997 14998 static void aarch64_tr_disas_log(const DisasContextBase *dcbase, 14999 CPUState *cpu, FILE *logfile) 15000 { 15001 DisasContext *dc = container_of(dcbase, DisasContext, base); 15002 15003 fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first)); 15004 target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size); 15005 } 15006 15007 const TranslatorOps aarch64_translator_ops = { 15008 .init_disas_context = aarch64_tr_init_disas_context, 15009 .tb_start = aarch64_tr_tb_start, 15010 .insn_start = aarch64_tr_insn_start, 15011 .translate_insn = aarch64_tr_translate_insn, 15012 .tb_stop = aarch64_tr_tb_stop, 15013 .disas_log = aarch64_tr_disas_log, 15014 }; 15015