xref: /openbmc/qemu/target/arm/tcg/translate-a64.c (revision 3d7b8974)
1 /*
2  *  AArch64 translation
3  *
4  *  Copyright (c) 2013 Alexander Graf <agraf@suse.de>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "translate.h"
22 #include "translate-a64.h"
23 #include "qemu/log.h"
24 #include "disas/disas.h"
25 #include "arm_ldst.h"
26 #include "semihosting/semihost.h"
27 #include "cpregs.h"
28 
29 static TCGv_i64 cpu_X[32];
30 static TCGv_i64 cpu_pc;
31 
32 /* Load/store exclusive handling */
33 static TCGv_i64 cpu_exclusive_high;
34 
35 static const char *regnames[] = {
36     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
37     "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
38     "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
39     "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
40 };
41 
42 enum a64_shift_type {
43     A64_SHIFT_TYPE_LSL = 0,
44     A64_SHIFT_TYPE_LSR = 1,
45     A64_SHIFT_TYPE_ASR = 2,
46     A64_SHIFT_TYPE_ROR = 3
47 };
48 
49 /*
50  * Helpers for extracting complex instruction fields
51  */
52 
53 /*
54  * For load/store with an unsigned 12 bit immediate scaled by the element
55  * size. The input has the immediate field in bits [14:3] and the element
56  * size in [2:0].
57  */
58 static int uimm_scaled(DisasContext *s, int x)
59 {
60     unsigned imm = x >> 3;
61     unsigned scale = extract32(x, 0, 3);
62     return imm << scale;
63 }
64 
65 /* For load/store memory tags: scale offset by LOG2_TAG_GRANULE */
66 static int scale_by_log2_tag_granule(DisasContext *s, int x)
67 {
68     return x << LOG2_TAG_GRANULE;
69 }
70 
71 /*
72  * Include the generated decoders.
73  */
74 
75 #include "decode-sme-fa64.c.inc"
76 #include "decode-a64.c.inc"
77 
78 /* Table based decoder typedefs - used when the relevant bits for decode
79  * are too awkwardly scattered across the instruction (eg SIMD).
80  */
81 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
82 
83 typedef struct AArch64DecodeTable {
84     uint32_t pattern;
85     uint32_t mask;
86     AArch64DecodeFn *disas_fn;
87 } AArch64DecodeTable;
88 
89 /* initialize TCG globals.  */
90 void a64_translate_init(void)
91 {
92     int i;
93 
94     cpu_pc = tcg_global_mem_new_i64(cpu_env,
95                                     offsetof(CPUARMState, pc),
96                                     "pc");
97     for (i = 0; i < 32; i++) {
98         cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
99                                           offsetof(CPUARMState, xregs[i]),
100                                           regnames[i]);
101     }
102 
103     cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
104         offsetof(CPUARMState, exclusive_high), "exclusive_high");
105 }
106 
107 /*
108  * Return the core mmu_idx to use for A64 "unprivileged load/store" insns
109  */
110 static int get_a64_user_mem_index(DisasContext *s)
111 {
112     /*
113      * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL,
114      * which is the usual mmu_idx for this cpu state.
115      */
116     ARMMMUIdx useridx = s->mmu_idx;
117 
118     if (s->unpriv) {
119         /*
120          * We have pre-computed the condition for AccType_UNPRIV.
121          * Therefore we should never get here with a mmu_idx for
122          * which we do not know the corresponding user mmu_idx.
123          */
124         switch (useridx) {
125         case ARMMMUIdx_E10_1:
126         case ARMMMUIdx_E10_1_PAN:
127             useridx = ARMMMUIdx_E10_0;
128             break;
129         case ARMMMUIdx_E20_2:
130         case ARMMMUIdx_E20_2_PAN:
131             useridx = ARMMMUIdx_E20_0;
132             break;
133         default:
134             g_assert_not_reached();
135         }
136     }
137     return arm_to_core_mmu_idx(useridx);
138 }
139 
140 static void set_btype_raw(int val)
141 {
142     tcg_gen_st_i32(tcg_constant_i32(val), cpu_env,
143                    offsetof(CPUARMState, btype));
144 }
145 
146 static void set_btype(DisasContext *s, int val)
147 {
148     /* BTYPE is a 2-bit field, and 0 should be done with reset_btype.  */
149     tcg_debug_assert(val >= 1 && val <= 3);
150     set_btype_raw(val);
151     s->btype = -1;
152 }
153 
154 static void reset_btype(DisasContext *s)
155 {
156     if (s->btype != 0) {
157         set_btype_raw(0);
158         s->btype = 0;
159     }
160 }
161 
162 static void gen_pc_plus_diff(DisasContext *s, TCGv_i64 dest, target_long diff)
163 {
164     assert(s->pc_save != -1);
165     if (tb_cflags(s->base.tb) & CF_PCREL) {
166         tcg_gen_addi_i64(dest, cpu_pc, (s->pc_curr - s->pc_save) + diff);
167     } else {
168         tcg_gen_movi_i64(dest, s->pc_curr + diff);
169     }
170 }
171 
172 void gen_a64_update_pc(DisasContext *s, target_long diff)
173 {
174     gen_pc_plus_diff(s, cpu_pc, diff);
175     s->pc_save = s->pc_curr + diff;
176 }
177 
178 /*
179  * Handle Top Byte Ignore (TBI) bits.
180  *
181  * If address tagging is enabled via the TCR TBI bits:
182  *  + for EL2 and EL3 there is only one TBI bit, and if it is set
183  *    then the address is zero-extended, clearing bits [63:56]
184  *  + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
185  *    and TBI1 controls addressses with bit 55 == 1.
186  *    If the appropriate TBI bit is set for the address then
187  *    the address is sign-extended from bit 55 into bits [63:56]
188  *
189  * Here We have concatenated TBI{1,0} into tbi.
190  */
191 static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
192                                 TCGv_i64 src, int tbi)
193 {
194     if (tbi == 0) {
195         /* Load unmodified address */
196         tcg_gen_mov_i64(dst, src);
197     } else if (!regime_has_2_ranges(s->mmu_idx)) {
198         /* Force tag byte to all zero */
199         tcg_gen_extract_i64(dst, src, 0, 56);
200     } else {
201         /* Sign-extend from bit 55.  */
202         tcg_gen_sextract_i64(dst, src, 0, 56);
203 
204         switch (tbi) {
205         case 1:
206             /* tbi0 but !tbi1: only use the extension if positive */
207             tcg_gen_and_i64(dst, dst, src);
208             break;
209         case 2:
210             /* !tbi0 but tbi1: only use the extension if negative */
211             tcg_gen_or_i64(dst, dst, src);
212             break;
213         case 3:
214             /* tbi0 and tbi1: always use the extension */
215             break;
216         default:
217             g_assert_not_reached();
218         }
219     }
220 }
221 
222 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
223 {
224     /*
225      * If address tagging is enabled for instructions via the TCR TBI bits,
226      * then loading an address into the PC will clear out any tag.
227      */
228     gen_top_byte_ignore(s, cpu_pc, src, s->tbii);
229     s->pc_save = -1;
230 }
231 
232 /*
233  * Handle MTE and/or TBI.
234  *
235  * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
236  * for the tag to be present in the FAR_ELx register.  But for user-only
237  * mode we do not have a TLB with which to implement this, so we must
238  * remove the top byte now.
239  *
240  * Always return a fresh temporary that we can increment independently
241  * of the write-back address.
242  */
243 
244 TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr)
245 {
246     TCGv_i64 clean = tcg_temp_new_i64();
247 #ifdef CONFIG_USER_ONLY
248     gen_top_byte_ignore(s, clean, addr, s->tbid);
249 #else
250     tcg_gen_mov_i64(clean, addr);
251 #endif
252     return clean;
253 }
254 
255 /* Insert a zero tag into src, with the result at dst. */
256 static void gen_address_with_allocation_tag0(TCGv_i64 dst, TCGv_i64 src)
257 {
258     tcg_gen_andi_i64(dst, src, ~MAKE_64BIT_MASK(56, 4));
259 }
260 
261 static void gen_probe_access(DisasContext *s, TCGv_i64 ptr,
262                              MMUAccessType acc, int log2_size)
263 {
264     gen_helper_probe_access(cpu_env, ptr,
265                             tcg_constant_i32(acc),
266                             tcg_constant_i32(get_mem_index(s)),
267                             tcg_constant_i32(1 << log2_size));
268 }
269 
270 /*
271  * For MTE, check a single logical or atomic access.  This probes a single
272  * address, the exact one specified.  The size and alignment of the access
273  * is not relevant to MTE, per se, but watchpoints do require the size,
274  * and we want to recognize those before making any other changes to state.
275  */
276 static TCGv_i64 gen_mte_check1_mmuidx(DisasContext *s, TCGv_i64 addr,
277                                       bool is_write, bool tag_checked,
278                                       MemOp memop, bool is_unpriv,
279                                       int core_idx)
280 {
281     if (tag_checked && s->mte_active[is_unpriv]) {
282         TCGv_i64 ret;
283         int desc = 0;
284 
285         desc = FIELD_DP32(desc, MTEDESC, MIDX, core_idx);
286         desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
287         desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
288         desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
289         desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(memop));
290         desc = FIELD_DP32(desc, MTEDESC, SIZEM1, memop_size(memop) - 1);
291 
292         ret = tcg_temp_new_i64();
293         gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
294 
295         return ret;
296     }
297     return clean_data_tbi(s, addr);
298 }
299 
300 TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
301                         bool tag_checked, MemOp memop)
302 {
303     return gen_mte_check1_mmuidx(s, addr, is_write, tag_checked, memop,
304                                  false, get_mem_index(s));
305 }
306 
307 /*
308  * For MTE, check multiple logical sequential accesses.
309  */
310 TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
311                         bool tag_checked, int total_size, MemOp single_mop)
312 {
313     if (tag_checked && s->mte_active[0]) {
314         TCGv_i64 ret;
315         int desc = 0;
316 
317         desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
318         desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
319         desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
320         desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
321         desc = FIELD_DP32(desc, MTEDESC, ALIGN, get_alignment_bits(single_mop));
322         desc = FIELD_DP32(desc, MTEDESC, SIZEM1, total_size - 1);
323 
324         ret = tcg_temp_new_i64();
325         gen_helper_mte_check(ret, cpu_env, tcg_constant_i32(desc), addr);
326 
327         return ret;
328     }
329     return clean_data_tbi(s, addr);
330 }
331 
332 /*
333  * Generate the special alignment check that applies to AccType_ATOMIC
334  * and AccType_ORDERED insns under FEAT_LSE2: the access need not be
335  * naturally aligned, but it must not cross a 16-byte boundary.
336  * See AArch64.CheckAlignment().
337  */
338 static void check_lse2_align(DisasContext *s, int rn, int imm,
339                              bool is_write, MemOp mop)
340 {
341     TCGv_i32 tmp;
342     TCGv_i64 addr;
343     TCGLabel *over_label;
344     MMUAccessType type;
345     int mmu_idx;
346 
347     tmp = tcg_temp_new_i32();
348     tcg_gen_extrl_i64_i32(tmp, cpu_reg_sp(s, rn));
349     tcg_gen_addi_i32(tmp, tmp, imm & 15);
350     tcg_gen_andi_i32(tmp, tmp, 15);
351     tcg_gen_addi_i32(tmp, tmp, memop_size(mop));
352 
353     over_label = gen_new_label();
354     tcg_gen_brcondi_i32(TCG_COND_LEU, tmp, 16, over_label);
355 
356     addr = tcg_temp_new_i64();
357     tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm);
358 
359     type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD,
360     mmu_idx = get_mem_index(s);
361     gen_helper_unaligned_access(cpu_env, addr, tcg_constant_i32(type),
362                                 tcg_constant_i32(mmu_idx));
363 
364     gen_set_label(over_label);
365 
366 }
367 
368 /* Handle the alignment check for AccType_ATOMIC instructions. */
369 static MemOp check_atomic_align(DisasContext *s, int rn, MemOp mop)
370 {
371     MemOp size = mop & MO_SIZE;
372 
373     if (size == MO_8) {
374         return mop;
375     }
376 
377     /*
378      * If size == MO_128, this is a LDXP, and the operation is single-copy
379      * atomic for each doubleword, not the entire quadword; it still must
380      * be quadword aligned.
381      */
382     if (size == MO_128) {
383         return finalize_memop_atom(s, MO_128 | MO_ALIGN,
384                                    MO_ATOM_IFALIGN_PAIR);
385     }
386     if (dc_isar_feature(aa64_lse2, s)) {
387         check_lse2_align(s, rn, 0, true, mop);
388     } else {
389         mop |= MO_ALIGN;
390     }
391     return finalize_memop(s, mop);
392 }
393 
394 /* Handle the alignment check for AccType_ORDERED instructions. */
395 static MemOp check_ordered_align(DisasContext *s, int rn, int imm,
396                                  bool is_write, MemOp mop)
397 {
398     MemOp size = mop & MO_SIZE;
399 
400     if (size == MO_8) {
401         return mop;
402     }
403     if (size == MO_128) {
404         return finalize_memop_atom(s, MO_128 | MO_ALIGN,
405                                    MO_ATOM_IFALIGN_PAIR);
406     }
407     if (!dc_isar_feature(aa64_lse2, s)) {
408         mop |= MO_ALIGN;
409     } else if (!s->naa) {
410         check_lse2_align(s, rn, imm, is_write, mop);
411     }
412     return finalize_memop(s, mop);
413 }
414 
415 typedef struct DisasCompare64 {
416     TCGCond cond;
417     TCGv_i64 value;
418 } DisasCompare64;
419 
420 static void a64_test_cc(DisasCompare64 *c64, int cc)
421 {
422     DisasCompare c32;
423 
424     arm_test_cc(&c32, cc);
425 
426     /*
427      * Sign-extend the 32-bit value so that the GE/LT comparisons work
428      * properly.  The NE/EQ comparisons are also fine with this choice.
429       */
430     c64->cond = c32.cond;
431     c64->value = tcg_temp_new_i64();
432     tcg_gen_ext_i32_i64(c64->value, c32.value);
433 }
434 
435 static void gen_rebuild_hflags(DisasContext *s)
436 {
437     gen_helper_rebuild_hflags_a64(cpu_env, tcg_constant_i32(s->current_el));
438 }
439 
440 static void gen_exception_internal(int excp)
441 {
442     assert(excp_is_internal(excp));
443     gen_helper_exception_internal(cpu_env, tcg_constant_i32(excp));
444 }
445 
446 static void gen_exception_internal_insn(DisasContext *s, int excp)
447 {
448     gen_a64_update_pc(s, 0);
449     gen_exception_internal(excp);
450     s->base.is_jmp = DISAS_NORETURN;
451 }
452 
453 static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syndrome)
454 {
455     gen_a64_update_pc(s, 0);
456     gen_helper_exception_bkpt_insn(cpu_env, tcg_constant_i32(syndrome));
457     s->base.is_jmp = DISAS_NORETURN;
458 }
459 
460 static void gen_step_complete_exception(DisasContext *s)
461 {
462     /* We just completed step of an insn. Move from Active-not-pending
463      * to Active-pending, and then also take the swstep exception.
464      * This corresponds to making the (IMPDEF) choice to prioritize
465      * swstep exceptions over asynchronous exceptions taken to an exception
466      * level where debug is disabled. This choice has the advantage that
467      * we do not need to maintain internal state corresponding to the
468      * ISV/EX syndrome bits between completion of the step and generation
469      * of the exception, and our syndrome information is always correct.
470      */
471     gen_ss_advance(s);
472     gen_swstep_exception(s, 1, s->is_ldex);
473     s->base.is_jmp = DISAS_NORETURN;
474 }
475 
476 static inline bool use_goto_tb(DisasContext *s, uint64_t dest)
477 {
478     if (s->ss_active) {
479         return false;
480     }
481     return translator_use_goto_tb(&s->base, dest);
482 }
483 
484 static void gen_goto_tb(DisasContext *s, int n, int64_t diff)
485 {
486     if (use_goto_tb(s, s->pc_curr + diff)) {
487         /*
488          * For pcrel, the pc must always be up-to-date on entry to
489          * the linked TB, so that it can use simple additions for all
490          * further adjustments.  For !pcrel, the linked TB is compiled
491          * to know its full virtual address, so we can delay the
492          * update to pc to the unlinked path.  A long chain of links
493          * can thus avoid many updates to the PC.
494          */
495         if (tb_cflags(s->base.tb) & CF_PCREL) {
496             gen_a64_update_pc(s, diff);
497             tcg_gen_goto_tb(n);
498         } else {
499             tcg_gen_goto_tb(n);
500             gen_a64_update_pc(s, diff);
501         }
502         tcg_gen_exit_tb(s->base.tb, n);
503         s->base.is_jmp = DISAS_NORETURN;
504     } else {
505         gen_a64_update_pc(s, diff);
506         if (s->ss_active) {
507             gen_step_complete_exception(s);
508         } else {
509             tcg_gen_lookup_and_goto_ptr();
510             s->base.is_jmp = DISAS_NORETURN;
511         }
512     }
513 }
514 
515 /*
516  * Register access functions
517  *
518  * These functions are used for directly accessing a register in where
519  * changes to the final register value are likely to be made. If you
520  * need to use a register for temporary calculation (e.g. index type
521  * operations) use the read_* form.
522  *
523  * B1.2.1 Register mappings
524  *
525  * In instruction register encoding 31 can refer to ZR (zero register) or
526  * the SP (stack pointer) depending on context. In QEMU's case we map SP
527  * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
528  * This is the point of the _sp forms.
529  */
530 TCGv_i64 cpu_reg(DisasContext *s, int reg)
531 {
532     if (reg == 31) {
533         TCGv_i64 t = tcg_temp_new_i64();
534         tcg_gen_movi_i64(t, 0);
535         return t;
536     } else {
537         return cpu_X[reg];
538     }
539 }
540 
541 /* register access for when 31 == SP */
542 TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
543 {
544     return cpu_X[reg];
545 }
546 
547 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
548  * representing the register contents. This TCGv is an auto-freed
549  * temporary so it need not be explicitly freed, and may be modified.
550  */
551 TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
552 {
553     TCGv_i64 v = tcg_temp_new_i64();
554     if (reg != 31) {
555         if (sf) {
556             tcg_gen_mov_i64(v, cpu_X[reg]);
557         } else {
558             tcg_gen_ext32u_i64(v, cpu_X[reg]);
559         }
560     } else {
561         tcg_gen_movi_i64(v, 0);
562     }
563     return v;
564 }
565 
566 TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
567 {
568     TCGv_i64 v = tcg_temp_new_i64();
569     if (sf) {
570         tcg_gen_mov_i64(v, cpu_X[reg]);
571     } else {
572         tcg_gen_ext32u_i64(v, cpu_X[reg]);
573     }
574     return v;
575 }
576 
577 /* Return the offset into CPUARMState of a slice (from
578  * the least significant end) of FP register Qn (ie
579  * Dn, Sn, Hn or Bn).
580  * (Note that this is not the same mapping as for A32; see cpu.h)
581  */
582 static inline int fp_reg_offset(DisasContext *s, int regno, MemOp size)
583 {
584     return vec_reg_offset(s, regno, 0, size);
585 }
586 
587 /* Offset of the high half of the 128 bit vector Qn */
588 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
589 {
590     return vec_reg_offset(s, regno, 1, MO_64);
591 }
592 
593 /* Convenience accessors for reading and writing single and double
594  * FP registers. Writing clears the upper parts of the associated
595  * 128 bit vector register, as required by the architecture.
596  * Note that unlike the GP register accessors, the values returned
597  * by the read functions must be manually freed.
598  */
599 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
600 {
601     TCGv_i64 v = tcg_temp_new_i64();
602 
603     tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
604     return v;
605 }
606 
607 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
608 {
609     TCGv_i32 v = tcg_temp_new_i32();
610 
611     tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
612     return v;
613 }
614 
615 static TCGv_i32 read_fp_hreg(DisasContext *s, int reg)
616 {
617     TCGv_i32 v = tcg_temp_new_i32();
618 
619     tcg_gen_ld16u_i32(v, cpu_env, fp_reg_offset(s, reg, MO_16));
620     return v;
621 }
622 
623 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
624  * If SVE is not enabled, then there are only 128 bits in the vector.
625  */
626 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
627 {
628     unsigned ofs = fp_reg_offset(s, rd, MO_64);
629     unsigned vsz = vec_full_reg_size(s);
630 
631     /* Nop move, with side effect of clearing the tail. */
632     tcg_gen_gvec_mov(MO_64, ofs, ofs, is_q ? 16 : 8, vsz);
633 }
634 
635 void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
636 {
637     unsigned ofs = fp_reg_offset(s, reg, MO_64);
638 
639     tcg_gen_st_i64(v, cpu_env, ofs);
640     clear_vec_high(s, false, reg);
641 }
642 
643 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
644 {
645     TCGv_i64 tmp = tcg_temp_new_i64();
646 
647     tcg_gen_extu_i32_i64(tmp, v);
648     write_fp_dreg(s, reg, tmp);
649 }
650 
651 /* Expand a 2-operand AdvSIMD vector operation using an expander function.  */
652 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
653                          GVecGen2Fn *gvec_fn, int vece)
654 {
655     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
656             is_q ? 16 : 8, vec_full_reg_size(s));
657 }
658 
659 /* Expand a 2-operand + immediate AdvSIMD vector operation using
660  * an expander function.
661  */
662 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
663                           int64_t imm, GVecGen2iFn *gvec_fn, int vece)
664 {
665     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
666             imm, is_q ? 16 : 8, vec_full_reg_size(s));
667 }
668 
669 /* Expand a 3-operand AdvSIMD vector operation using an expander function.  */
670 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
671                          GVecGen3Fn *gvec_fn, int vece)
672 {
673     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
674             vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
675 }
676 
677 /* Expand a 4-operand AdvSIMD vector operation using an expander function.  */
678 static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
679                          int rx, GVecGen4Fn *gvec_fn, int vece)
680 {
681     gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
682             vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
683             is_q ? 16 : 8, vec_full_reg_size(s));
684 }
685 
686 /* Expand a 2-operand operation using an out-of-line helper.  */
687 static void gen_gvec_op2_ool(DisasContext *s, bool is_q, int rd,
688                              int rn, int data, gen_helper_gvec_2 *fn)
689 {
690     tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
691                        vec_full_reg_offset(s, rn),
692                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
693 }
694 
695 /* Expand a 3-operand operation using an out-of-line helper.  */
696 static void gen_gvec_op3_ool(DisasContext *s, bool is_q, int rd,
697                              int rn, int rm, int data, gen_helper_gvec_3 *fn)
698 {
699     tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
700                        vec_full_reg_offset(s, rn),
701                        vec_full_reg_offset(s, rm),
702                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
703 }
704 
705 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
706  * an out-of-line helper.
707  */
708 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
709                               int rm, bool is_fp16, int data,
710                               gen_helper_gvec_3_ptr *fn)
711 {
712     TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
713     tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
714                        vec_full_reg_offset(s, rn),
715                        vec_full_reg_offset(s, rm), fpst,
716                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
717 }
718 
719 /* Expand a 3-operand + qc + operation using an out-of-line helper.  */
720 static void gen_gvec_op3_qc(DisasContext *s, bool is_q, int rd, int rn,
721                             int rm, gen_helper_gvec_3_ptr *fn)
722 {
723     TCGv_ptr qc_ptr = tcg_temp_new_ptr();
724 
725     tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
726     tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
727                        vec_full_reg_offset(s, rn),
728                        vec_full_reg_offset(s, rm), qc_ptr,
729                        is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
730 }
731 
732 /* Expand a 4-operand operation using an out-of-line helper.  */
733 static void gen_gvec_op4_ool(DisasContext *s, bool is_q, int rd, int rn,
734                              int rm, int ra, int data, gen_helper_gvec_4 *fn)
735 {
736     tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
737                        vec_full_reg_offset(s, rn),
738                        vec_full_reg_offset(s, rm),
739                        vec_full_reg_offset(s, ra),
740                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
741 }
742 
743 /*
744  * Expand a 4-operand + fpstatus pointer + simd data value operation using
745  * an out-of-line helper.
746  */
747 static void gen_gvec_op4_fpst(DisasContext *s, bool is_q, int rd, int rn,
748                               int rm, int ra, bool is_fp16, int data,
749                               gen_helper_gvec_4_ptr *fn)
750 {
751     TCGv_ptr fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
752     tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
753                        vec_full_reg_offset(s, rn),
754                        vec_full_reg_offset(s, rm),
755                        vec_full_reg_offset(s, ra), fpst,
756                        is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
757 }
758 
759 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
760  * than the 32 bit equivalent.
761  */
762 static inline void gen_set_NZ64(TCGv_i64 result)
763 {
764     tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
765     tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
766 }
767 
768 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
769 static inline void gen_logic_CC(int sf, TCGv_i64 result)
770 {
771     if (sf) {
772         gen_set_NZ64(result);
773     } else {
774         tcg_gen_extrl_i64_i32(cpu_ZF, result);
775         tcg_gen_mov_i32(cpu_NF, cpu_ZF);
776     }
777     tcg_gen_movi_i32(cpu_CF, 0);
778     tcg_gen_movi_i32(cpu_VF, 0);
779 }
780 
781 /* dest = T0 + T1; compute C, N, V and Z flags */
782 static void gen_add64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
783 {
784     TCGv_i64 result, flag, tmp;
785     result = tcg_temp_new_i64();
786     flag = tcg_temp_new_i64();
787     tmp = tcg_temp_new_i64();
788 
789     tcg_gen_movi_i64(tmp, 0);
790     tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
791 
792     tcg_gen_extrl_i64_i32(cpu_CF, flag);
793 
794     gen_set_NZ64(result);
795 
796     tcg_gen_xor_i64(flag, result, t0);
797     tcg_gen_xor_i64(tmp, t0, t1);
798     tcg_gen_andc_i64(flag, flag, tmp);
799     tcg_gen_extrh_i64_i32(cpu_VF, flag);
800 
801     tcg_gen_mov_i64(dest, result);
802 }
803 
804 static void gen_add32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
805 {
806     TCGv_i32 t0_32 = tcg_temp_new_i32();
807     TCGv_i32 t1_32 = tcg_temp_new_i32();
808     TCGv_i32 tmp = tcg_temp_new_i32();
809 
810     tcg_gen_movi_i32(tmp, 0);
811     tcg_gen_extrl_i64_i32(t0_32, t0);
812     tcg_gen_extrl_i64_i32(t1_32, t1);
813     tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
814     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
815     tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
816     tcg_gen_xor_i32(tmp, t0_32, t1_32);
817     tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
818     tcg_gen_extu_i32_i64(dest, cpu_NF);
819 }
820 
821 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
822 {
823     if (sf) {
824         gen_add64_CC(dest, t0, t1);
825     } else {
826         gen_add32_CC(dest, t0, t1);
827     }
828 }
829 
830 /* dest = T0 - T1; compute C, N, V and Z flags */
831 static void gen_sub64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
832 {
833     /* 64 bit arithmetic */
834     TCGv_i64 result, flag, tmp;
835 
836     result = tcg_temp_new_i64();
837     flag = tcg_temp_new_i64();
838     tcg_gen_sub_i64(result, t0, t1);
839 
840     gen_set_NZ64(result);
841 
842     tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
843     tcg_gen_extrl_i64_i32(cpu_CF, flag);
844 
845     tcg_gen_xor_i64(flag, result, t0);
846     tmp = tcg_temp_new_i64();
847     tcg_gen_xor_i64(tmp, t0, t1);
848     tcg_gen_and_i64(flag, flag, tmp);
849     tcg_gen_extrh_i64_i32(cpu_VF, flag);
850     tcg_gen_mov_i64(dest, result);
851 }
852 
853 static void gen_sub32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
854 {
855     /* 32 bit arithmetic */
856     TCGv_i32 t0_32 = tcg_temp_new_i32();
857     TCGv_i32 t1_32 = tcg_temp_new_i32();
858     TCGv_i32 tmp;
859 
860     tcg_gen_extrl_i64_i32(t0_32, t0);
861     tcg_gen_extrl_i64_i32(t1_32, t1);
862     tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
863     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
864     tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
865     tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
866     tmp = tcg_temp_new_i32();
867     tcg_gen_xor_i32(tmp, t0_32, t1_32);
868     tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
869     tcg_gen_extu_i32_i64(dest, cpu_NF);
870 }
871 
872 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
873 {
874     if (sf) {
875         gen_sub64_CC(dest, t0, t1);
876     } else {
877         gen_sub32_CC(dest, t0, t1);
878     }
879 }
880 
881 /* dest = T0 + T1 + CF; do not compute flags. */
882 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
883 {
884     TCGv_i64 flag = tcg_temp_new_i64();
885     tcg_gen_extu_i32_i64(flag, cpu_CF);
886     tcg_gen_add_i64(dest, t0, t1);
887     tcg_gen_add_i64(dest, dest, flag);
888 
889     if (!sf) {
890         tcg_gen_ext32u_i64(dest, dest);
891     }
892 }
893 
894 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
895 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
896 {
897     if (sf) {
898         TCGv_i64 result = tcg_temp_new_i64();
899         TCGv_i64 cf_64 = tcg_temp_new_i64();
900         TCGv_i64 vf_64 = tcg_temp_new_i64();
901         TCGv_i64 tmp = tcg_temp_new_i64();
902         TCGv_i64 zero = tcg_constant_i64(0);
903 
904         tcg_gen_extu_i32_i64(cf_64, cpu_CF);
905         tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
906         tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
907         tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
908         gen_set_NZ64(result);
909 
910         tcg_gen_xor_i64(vf_64, result, t0);
911         tcg_gen_xor_i64(tmp, t0, t1);
912         tcg_gen_andc_i64(vf_64, vf_64, tmp);
913         tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
914 
915         tcg_gen_mov_i64(dest, result);
916     } else {
917         TCGv_i32 t0_32 = tcg_temp_new_i32();
918         TCGv_i32 t1_32 = tcg_temp_new_i32();
919         TCGv_i32 tmp = tcg_temp_new_i32();
920         TCGv_i32 zero = tcg_constant_i32(0);
921 
922         tcg_gen_extrl_i64_i32(t0_32, t0);
923         tcg_gen_extrl_i64_i32(t1_32, t1);
924         tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
925         tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
926 
927         tcg_gen_mov_i32(cpu_ZF, cpu_NF);
928         tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
929         tcg_gen_xor_i32(tmp, t0_32, t1_32);
930         tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
931         tcg_gen_extu_i32_i64(dest, cpu_NF);
932     }
933 }
934 
935 /*
936  * Load/Store generators
937  */
938 
939 /*
940  * Store from GPR register to memory.
941  */
942 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
943                              TCGv_i64 tcg_addr, MemOp memop, int memidx,
944                              bool iss_valid,
945                              unsigned int iss_srt,
946                              bool iss_sf, bool iss_ar)
947 {
948     tcg_gen_qemu_st_i64(source, tcg_addr, memidx, memop);
949 
950     if (iss_valid) {
951         uint32_t syn;
952 
953         syn = syn_data_abort_with_iss(0,
954                                       (memop & MO_SIZE),
955                                       false,
956                                       iss_srt,
957                                       iss_sf,
958                                       iss_ar,
959                                       0, 0, 0, 0, 0, false);
960         disas_set_insn_syndrome(s, syn);
961     }
962 }
963 
964 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
965                       TCGv_i64 tcg_addr, MemOp memop,
966                       bool iss_valid,
967                       unsigned int iss_srt,
968                       bool iss_sf, bool iss_ar)
969 {
970     do_gpr_st_memidx(s, source, tcg_addr, memop, get_mem_index(s),
971                      iss_valid, iss_srt, iss_sf, iss_ar);
972 }
973 
974 /*
975  * Load from memory to GPR register
976  */
977 static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
978                              MemOp memop, bool extend, int memidx,
979                              bool iss_valid, unsigned int iss_srt,
980                              bool iss_sf, bool iss_ar)
981 {
982     tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
983 
984     if (extend && (memop & MO_SIGN)) {
985         g_assert((memop & MO_SIZE) <= MO_32);
986         tcg_gen_ext32u_i64(dest, dest);
987     }
988 
989     if (iss_valid) {
990         uint32_t syn;
991 
992         syn = syn_data_abort_with_iss(0,
993                                       (memop & MO_SIZE),
994                                       (memop & MO_SIGN) != 0,
995                                       iss_srt,
996                                       iss_sf,
997                                       iss_ar,
998                                       0, 0, 0, 0, 0, false);
999         disas_set_insn_syndrome(s, syn);
1000     }
1001 }
1002 
1003 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
1004                       MemOp memop, bool extend,
1005                       bool iss_valid, unsigned int iss_srt,
1006                       bool iss_sf, bool iss_ar)
1007 {
1008     do_gpr_ld_memidx(s, dest, tcg_addr, memop, extend, get_mem_index(s),
1009                      iss_valid, iss_srt, iss_sf, iss_ar);
1010 }
1011 
1012 /*
1013  * Store from FP register to memory
1014  */
1015 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, MemOp mop)
1016 {
1017     /* This writes the bottom N bits of a 128 bit wide vector to memory */
1018     TCGv_i64 tmplo = tcg_temp_new_i64();
1019 
1020     tcg_gen_ld_i64(tmplo, cpu_env, fp_reg_offset(s, srcidx, MO_64));
1021 
1022     if ((mop & MO_SIZE) < MO_128) {
1023         tcg_gen_qemu_st_i64(tmplo, tcg_addr, get_mem_index(s), mop);
1024     } else {
1025         TCGv_i64 tmphi = tcg_temp_new_i64();
1026         TCGv_i128 t16 = tcg_temp_new_i128();
1027 
1028         tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
1029         tcg_gen_concat_i64_i128(t16, tmplo, tmphi);
1030 
1031         tcg_gen_qemu_st_i128(t16, tcg_addr, get_mem_index(s), mop);
1032     }
1033 }
1034 
1035 /*
1036  * Load from memory to FP register
1037  */
1038 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, MemOp mop)
1039 {
1040     /* This always zero-extends and writes to a full 128 bit wide vector */
1041     TCGv_i64 tmplo = tcg_temp_new_i64();
1042     TCGv_i64 tmphi = NULL;
1043 
1044     if ((mop & MO_SIZE) < MO_128) {
1045         tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), mop);
1046     } else {
1047         TCGv_i128 t16 = tcg_temp_new_i128();
1048 
1049         tcg_gen_qemu_ld_i128(t16, tcg_addr, get_mem_index(s), mop);
1050 
1051         tmphi = tcg_temp_new_i64();
1052         tcg_gen_extr_i128_i64(tmplo, tmphi, t16);
1053     }
1054 
1055     tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1056 
1057     if (tmphi) {
1058         tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1059     }
1060     clear_vec_high(s, tmphi != NULL, destidx);
1061 }
1062 
1063 /*
1064  * Vector load/store helpers.
1065  *
1066  * The principal difference between this and a FP load is that we don't
1067  * zero extend as we are filling a partial chunk of the vector register.
1068  * These functions don't support 128 bit loads/stores, which would be
1069  * normal load/store operations.
1070  *
1071  * The _i32 versions are useful when operating on 32 bit quantities
1072  * (eg for floating point single or using Neon helper functions).
1073  */
1074 
1075 /* Get value of an element within a vector register */
1076 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1077                              int element, MemOp memop)
1078 {
1079     int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1080     switch ((unsigned)memop) {
1081     case MO_8:
1082         tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1083         break;
1084     case MO_16:
1085         tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1086         break;
1087     case MO_32:
1088         tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1089         break;
1090     case MO_8|MO_SIGN:
1091         tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1092         break;
1093     case MO_16|MO_SIGN:
1094         tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1095         break;
1096     case MO_32|MO_SIGN:
1097         tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1098         break;
1099     case MO_64:
1100     case MO_64|MO_SIGN:
1101         tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1102         break;
1103     default:
1104         g_assert_not_reached();
1105     }
1106 }
1107 
1108 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1109                                  int element, MemOp memop)
1110 {
1111     int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1112     switch (memop) {
1113     case MO_8:
1114         tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1115         break;
1116     case MO_16:
1117         tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1118         break;
1119     case MO_8|MO_SIGN:
1120         tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1121         break;
1122     case MO_16|MO_SIGN:
1123         tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1124         break;
1125     case MO_32:
1126     case MO_32|MO_SIGN:
1127         tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1128         break;
1129     default:
1130         g_assert_not_reached();
1131     }
1132 }
1133 
1134 /* Set value of an element within a vector register */
1135 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1136                               int element, MemOp memop)
1137 {
1138     int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1139     switch (memop) {
1140     case MO_8:
1141         tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1142         break;
1143     case MO_16:
1144         tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1145         break;
1146     case MO_32:
1147         tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1148         break;
1149     case MO_64:
1150         tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1151         break;
1152     default:
1153         g_assert_not_reached();
1154     }
1155 }
1156 
1157 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1158                                   int destidx, int element, MemOp memop)
1159 {
1160     int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1161     switch (memop) {
1162     case MO_8:
1163         tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1164         break;
1165     case MO_16:
1166         tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1167         break;
1168     case MO_32:
1169         tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1170         break;
1171     default:
1172         g_assert_not_reached();
1173     }
1174 }
1175 
1176 /* Store from vector register to memory */
1177 static void do_vec_st(DisasContext *s, int srcidx, int element,
1178                       TCGv_i64 tcg_addr, MemOp mop)
1179 {
1180     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1181 
1182     read_vec_element(s, tcg_tmp, srcidx, element, mop & MO_SIZE);
1183     tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1184 }
1185 
1186 /* Load from memory to vector register */
1187 static void do_vec_ld(DisasContext *s, int destidx, int element,
1188                       TCGv_i64 tcg_addr, MemOp mop)
1189 {
1190     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1191 
1192     tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), mop);
1193     write_vec_element(s, tcg_tmp, destidx, element, mop & MO_SIZE);
1194 }
1195 
1196 /* Check that FP/Neon access is enabled. If it is, return
1197  * true. If not, emit code to generate an appropriate exception,
1198  * and return false; the caller should not emit any code for
1199  * the instruction. Note that this check must happen after all
1200  * unallocated-encoding checks (otherwise the syndrome information
1201  * for the resulting exception will be incorrect).
1202  */
1203 static bool fp_access_check_only(DisasContext *s)
1204 {
1205     if (s->fp_excp_el) {
1206         assert(!s->fp_access_checked);
1207         s->fp_access_checked = true;
1208 
1209         gen_exception_insn_el(s, 0, EXCP_UDEF,
1210                               syn_fp_access_trap(1, 0xe, false, 0),
1211                               s->fp_excp_el);
1212         return false;
1213     }
1214     s->fp_access_checked = true;
1215     return true;
1216 }
1217 
1218 static bool fp_access_check(DisasContext *s)
1219 {
1220     if (!fp_access_check_only(s)) {
1221         return false;
1222     }
1223     if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
1224         gen_exception_insn(s, 0, EXCP_UDEF,
1225                            syn_smetrap(SME_ET_Streaming, false));
1226         return false;
1227     }
1228     return true;
1229 }
1230 
1231 /*
1232  * Check that SVE access is enabled.  If it is, return true.
1233  * If not, emit code to generate an appropriate exception and return false.
1234  * This function corresponds to CheckSVEEnabled().
1235  */
1236 bool sve_access_check(DisasContext *s)
1237 {
1238     if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
1239         assert(dc_isar_feature(aa64_sme, s));
1240         if (!sme_sm_enabled_check(s)) {
1241             goto fail_exit;
1242         }
1243     } else if (s->sve_excp_el) {
1244         gen_exception_insn_el(s, 0, EXCP_UDEF,
1245                               syn_sve_access_trap(), s->sve_excp_el);
1246         goto fail_exit;
1247     }
1248     s->sve_access_checked = true;
1249     return fp_access_check(s);
1250 
1251  fail_exit:
1252     /* Assert that we only raise one exception per instruction. */
1253     assert(!s->sve_access_checked);
1254     s->sve_access_checked = true;
1255     return false;
1256 }
1257 
1258 /*
1259  * Check that SME access is enabled, raise an exception if not.
1260  * Note that this function corresponds to CheckSMEAccess and is
1261  * only used directly for cpregs.
1262  */
1263 static bool sme_access_check(DisasContext *s)
1264 {
1265     if (s->sme_excp_el) {
1266         gen_exception_insn_el(s, 0, EXCP_UDEF,
1267                               syn_smetrap(SME_ET_AccessTrap, false),
1268                               s->sme_excp_el);
1269         return false;
1270     }
1271     return true;
1272 }
1273 
1274 /* This function corresponds to CheckSMEEnabled. */
1275 bool sme_enabled_check(DisasContext *s)
1276 {
1277     /*
1278      * Note that unlike sve_excp_el, we have not constrained sme_excp_el
1279      * to be zero when fp_excp_el has priority.  This is because we need
1280      * sme_excp_el by itself for cpregs access checks.
1281      */
1282     if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
1283         s->fp_access_checked = true;
1284         return sme_access_check(s);
1285     }
1286     return fp_access_check_only(s);
1287 }
1288 
1289 /* Common subroutine for CheckSMEAnd*Enabled. */
1290 bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req)
1291 {
1292     if (!sme_enabled_check(s)) {
1293         return false;
1294     }
1295     if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) {
1296         gen_exception_insn(s, 0, EXCP_UDEF,
1297                            syn_smetrap(SME_ET_NotStreaming, false));
1298         return false;
1299     }
1300     if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) {
1301         gen_exception_insn(s, 0, EXCP_UDEF,
1302                            syn_smetrap(SME_ET_InactiveZA, false));
1303         return false;
1304     }
1305     return true;
1306 }
1307 
1308 /*
1309  * This utility function is for doing register extension with an
1310  * optional shift. You will likely want to pass a temporary for the
1311  * destination register. See DecodeRegExtend() in the ARM ARM.
1312  */
1313 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1314                               int option, unsigned int shift)
1315 {
1316     int extsize = extract32(option, 0, 2);
1317     bool is_signed = extract32(option, 2, 1);
1318 
1319     if (is_signed) {
1320         switch (extsize) {
1321         case 0:
1322             tcg_gen_ext8s_i64(tcg_out, tcg_in);
1323             break;
1324         case 1:
1325             tcg_gen_ext16s_i64(tcg_out, tcg_in);
1326             break;
1327         case 2:
1328             tcg_gen_ext32s_i64(tcg_out, tcg_in);
1329             break;
1330         case 3:
1331             tcg_gen_mov_i64(tcg_out, tcg_in);
1332             break;
1333         }
1334     } else {
1335         switch (extsize) {
1336         case 0:
1337             tcg_gen_ext8u_i64(tcg_out, tcg_in);
1338             break;
1339         case 1:
1340             tcg_gen_ext16u_i64(tcg_out, tcg_in);
1341             break;
1342         case 2:
1343             tcg_gen_ext32u_i64(tcg_out, tcg_in);
1344             break;
1345         case 3:
1346             tcg_gen_mov_i64(tcg_out, tcg_in);
1347             break;
1348         }
1349     }
1350 
1351     if (shift) {
1352         tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1353     }
1354 }
1355 
1356 static inline void gen_check_sp_alignment(DisasContext *s)
1357 {
1358     /* The AArch64 architecture mandates that (if enabled via PSTATE
1359      * or SCTLR bits) there is a check that SP is 16-aligned on every
1360      * SP-relative load or store (with an exception generated if it is not).
1361      * In line with general QEMU practice regarding misaligned accesses,
1362      * we omit these checks for the sake of guest program performance.
1363      * This function is provided as a hook so we can more easily add these
1364      * checks in future (possibly as a "favour catching guest program bugs
1365      * over speed" user selectable option).
1366      */
1367 }
1368 
1369 /*
1370  * This provides a simple table based table lookup decoder. It is
1371  * intended to be used when the relevant bits for decode are too
1372  * awkwardly placed and switch/if based logic would be confusing and
1373  * deeply nested. Since it's a linear search through the table, tables
1374  * should be kept small.
1375  *
1376  * It returns the first handler where insn & mask == pattern, or
1377  * NULL if there is no match.
1378  * The table is terminated by an empty mask (i.e. 0)
1379  */
1380 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1381                                                uint32_t insn)
1382 {
1383     const AArch64DecodeTable *tptr = table;
1384 
1385     while (tptr->mask) {
1386         if ((insn & tptr->mask) == tptr->pattern) {
1387             return tptr->disas_fn;
1388         }
1389         tptr++;
1390     }
1391     return NULL;
1392 }
1393 
1394 /*
1395  * The instruction disassembly implemented here matches
1396  * the instruction encoding classifications in chapter C4
1397  * of the ARM Architecture Reference Manual (DDI0487B_a);
1398  * classification names and decode diagrams here should generally
1399  * match up with those in the manual.
1400  */
1401 
1402 static bool trans_B(DisasContext *s, arg_i *a)
1403 {
1404     reset_btype(s);
1405     gen_goto_tb(s, 0, a->imm);
1406     return true;
1407 }
1408 
1409 static bool trans_BL(DisasContext *s, arg_i *a)
1410 {
1411     gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s));
1412     reset_btype(s);
1413     gen_goto_tb(s, 0, a->imm);
1414     return true;
1415 }
1416 
1417 
1418 static bool trans_CBZ(DisasContext *s, arg_cbz *a)
1419 {
1420     DisasLabel match;
1421     TCGv_i64 tcg_cmp;
1422 
1423     tcg_cmp = read_cpu_reg(s, a->rt, a->sf);
1424     reset_btype(s);
1425 
1426     match = gen_disas_label(s);
1427     tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
1428                         tcg_cmp, 0, match.label);
1429     gen_goto_tb(s, 0, 4);
1430     set_disas_label(s, match);
1431     gen_goto_tb(s, 1, a->imm);
1432     return true;
1433 }
1434 
1435 static bool trans_TBZ(DisasContext *s, arg_tbz *a)
1436 {
1437     DisasLabel match;
1438     TCGv_i64 tcg_cmp;
1439 
1440     tcg_cmp = tcg_temp_new_i64();
1441     tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, a->rt), 1ULL << a->bitpos);
1442 
1443     reset_btype(s);
1444 
1445     match = gen_disas_label(s);
1446     tcg_gen_brcondi_i64(a->nz ? TCG_COND_NE : TCG_COND_EQ,
1447                         tcg_cmp, 0, match.label);
1448     gen_goto_tb(s, 0, 4);
1449     set_disas_label(s, match);
1450     gen_goto_tb(s, 1, a->imm);
1451     return true;
1452 }
1453 
1454 static bool trans_B_cond(DisasContext *s, arg_B_cond *a)
1455 {
1456     reset_btype(s);
1457     if (a->cond < 0x0e) {
1458         /* genuinely conditional branches */
1459         DisasLabel match = gen_disas_label(s);
1460         arm_gen_test_cc(a->cond, match.label);
1461         gen_goto_tb(s, 0, 4);
1462         set_disas_label(s, match);
1463         gen_goto_tb(s, 1, a->imm);
1464     } else {
1465         /* 0xe and 0xf are both "always" conditions */
1466         gen_goto_tb(s, 0, a->imm);
1467     }
1468     return true;
1469 }
1470 
1471 static void set_btype_for_br(DisasContext *s, int rn)
1472 {
1473     if (dc_isar_feature(aa64_bti, s)) {
1474         /* BR to {x16,x17} or !guard -> 1, else 3.  */
1475         set_btype(s, rn == 16 || rn == 17 || !s->guarded_page ? 1 : 3);
1476     }
1477 }
1478 
1479 static void set_btype_for_blr(DisasContext *s)
1480 {
1481     if (dc_isar_feature(aa64_bti, s)) {
1482         /* BLR sets BTYPE to 2, regardless of source guarded page.  */
1483         set_btype(s, 2);
1484     }
1485 }
1486 
1487 static bool trans_BR(DisasContext *s, arg_r *a)
1488 {
1489     gen_a64_set_pc(s, cpu_reg(s, a->rn));
1490     set_btype_for_br(s, a->rn);
1491     s->base.is_jmp = DISAS_JUMP;
1492     return true;
1493 }
1494 
1495 static bool trans_BLR(DisasContext *s, arg_r *a)
1496 {
1497     TCGv_i64 dst = cpu_reg(s, a->rn);
1498     TCGv_i64 lr = cpu_reg(s, 30);
1499     if (dst == lr) {
1500         TCGv_i64 tmp = tcg_temp_new_i64();
1501         tcg_gen_mov_i64(tmp, dst);
1502         dst = tmp;
1503     }
1504     gen_pc_plus_diff(s, lr, curr_insn_len(s));
1505     gen_a64_set_pc(s, dst);
1506     set_btype_for_blr(s);
1507     s->base.is_jmp = DISAS_JUMP;
1508     return true;
1509 }
1510 
1511 static bool trans_RET(DisasContext *s, arg_r *a)
1512 {
1513     gen_a64_set_pc(s, cpu_reg(s, a->rn));
1514     s->base.is_jmp = DISAS_JUMP;
1515     return true;
1516 }
1517 
1518 static TCGv_i64 auth_branch_target(DisasContext *s, TCGv_i64 dst,
1519                                    TCGv_i64 modifier, bool use_key_a)
1520 {
1521     TCGv_i64 truedst;
1522     /*
1523      * Return the branch target for a BRAA/RETA/etc, which is either
1524      * just the destination dst, or that value with the pauth check
1525      * done and the code removed from the high bits.
1526      */
1527     if (!s->pauth_active) {
1528         return dst;
1529     }
1530 
1531     truedst = tcg_temp_new_i64();
1532     if (use_key_a) {
1533         gen_helper_autia(truedst, cpu_env, dst, modifier);
1534     } else {
1535         gen_helper_autib(truedst, cpu_env, dst, modifier);
1536     }
1537     return truedst;
1538 }
1539 
1540 static bool trans_BRAZ(DisasContext *s, arg_braz *a)
1541 {
1542     TCGv_i64 dst;
1543 
1544     if (!dc_isar_feature(aa64_pauth, s)) {
1545         return false;
1546     }
1547 
1548     dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
1549     gen_a64_set_pc(s, dst);
1550     set_btype_for_br(s, a->rn);
1551     s->base.is_jmp = DISAS_JUMP;
1552     return true;
1553 }
1554 
1555 static bool trans_BLRAZ(DisasContext *s, arg_braz *a)
1556 {
1557     TCGv_i64 dst, lr;
1558 
1559     if (!dc_isar_feature(aa64_pauth, s)) {
1560         return false;
1561     }
1562 
1563     dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m);
1564     lr = cpu_reg(s, 30);
1565     if (dst == lr) {
1566         TCGv_i64 tmp = tcg_temp_new_i64();
1567         tcg_gen_mov_i64(tmp, dst);
1568         dst = tmp;
1569     }
1570     gen_pc_plus_diff(s, lr, curr_insn_len(s));
1571     gen_a64_set_pc(s, dst);
1572     set_btype_for_blr(s);
1573     s->base.is_jmp = DISAS_JUMP;
1574     return true;
1575 }
1576 
1577 static bool trans_RETA(DisasContext *s, arg_reta *a)
1578 {
1579     TCGv_i64 dst;
1580 
1581     dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
1582     gen_a64_set_pc(s, dst);
1583     s->base.is_jmp = DISAS_JUMP;
1584     return true;
1585 }
1586 
1587 static bool trans_BRA(DisasContext *s, arg_bra *a)
1588 {
1589     TCGv_i64 dst;
1590 
1591     if (!dc_isar_feature(aa64_pauth, s)) {
1592         return false;
1593     }
1594     dst = auth_branch_target(s, cpu_reg(s,a->rn), cpu_reg_sp(s, a->rm), !a->m);
1595     gen_a64_set_pc(s, dst);
1596     set_btype_for_br(s, a->rn);
1597     s->base.is_jmp = DISAS_JUMP;
1598     return true;
1599 }
1600 
1601 static bool trans_BLRA(DisasContext *s, arg_bra *a)
1602 {
1603     TCGv_i64 dst, lr;
1604 
1605     if (!dc_isar_feature(aa64_pauth, s)) {
1606         return false;
1607     }
1608     dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m);
1609     lr = cpu_reg(s, 30);
1610     if (dst == lr) {
1611         TCGv_i64 tmp = tcg_temp_new_i64();
1612         tcg_gen_mov_i64(tmp, dst);
1613         dst = tmp;
1614     }
1615     gen_pc_plus_diff(s, lr, curr_insn_len(s));
1616     gen_a64_set_pc(s, dst);
1617     set_btype_for_blr(s);
1618     s->base.is_jmp = DISAS_JUMP;
1619     return true;
1620 }
1621 
1622 static bool trans_ERET(DisasContext *s, arg_ERET *a)
1623 {
1624     TCGv_i64 dst;
1625 
1626     if (s->current_el == 0) {
1627         return false;
1628     }
1629     if (s->fgt_eret) {
1630         gen_exception_insn_el(s, 0, EXCP_UDEF, 0, 2);
1631         return true;
1632     }
1633     dst = tcg_temp_new_i64();
1634     tcg_gen_ld_i64(dst, cpu_env,
1635                    offsetof(CPUARMState, elr_el[s->current_el]));
1636 
1637     translator_io_start(&s->base);
1638 
1639     gen_helper_exception_return(cpu_env, dst);
1640     /* Must exit loop to check un-masked IRQs */
1641     s->base.is_jmp = DISAS_EXIT;
1642     return true;
1643 }
1644 
1645 static bool trans_ERETA(DisasContext *s, arg_reta *a)
1646 {
1647     TCGv_i64 dst;
1648 
1649     if (!dc_isar_feature(aa64_pauth, s)) {
1650         return false;
1651     }
1652     if (s->current_el == 0) {
1653         return false;
1654     }
1655     /* The FGT trap takes precedence over an auth trap. */
1656     if (s->fgt_eret) {
1657         gen_exception_insn_el(s, 0, EXCP_UDEF, a->m ? 3 : 2, 2);
1658         return true;
1659     }
1660     dst = tcg_temp_new_i64();
1661     tcg_gen_ld_i64(dst, cpu_env,
1662                    offsetof(CPUARMState, elr_el[s->current_el]));
1663 
1664     dst = auth_branch_target(s, dst, cpu_X[31], !a->m);
1665 
1666     translator_io_start(&s->base);
1667 
1668     gen_helper_exception_return(cpu_env, dst);
1669     /* Must exit loop to check un-masked IRQs */
1670     s->base.is_jmp = DISAS_EXIT;
1671     return true;
1672 }
1673 
1674 static bool trans_NOP(DisasContext *s, arg_NOP *a)
1675 {
1676     return true;
1677 }
1678 
1679 static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
1680 {
1681     /*
1682      * When running in MTTCG we don't generate jumps to the yield and
1683      * WFE helpers as it won't affect the scheduling of other vCPUs.
1684      * If we wanted to more completely model WFE/SEV so we don't busy
1685      * spin unnecessarily we would need to do something more involved.
1686      */
1687     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1688         s->base.is_jmp = DISAS_YIELD;
1689     }
1690     return true;
1691 }
1692 
1693 static bool trans_WFI(DisasContext *s, arg_WFI *a)
1694 {
1695     s->base.is_jmp = DISAS_WFI;
1696     return true;
1697 }
1698 
1699 static bool trans_WFE(DisasContext *s, arg_WFI *a)
1700 {
1701     /*
1702      * When running in MTTCG we don't generate jumps to the yield and
1703      * WFE helpers as it won't affect the scheduling of other vCPUs.
1704      * If we wanted to more completely model WFE/SEV so we don't busy
1705      * spin unnecessarily we would need to do something more involved.
1706      */
1707     if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1708         s->base.is_jmp = DISAS_WFE;
1709     }
1710     return true;
1711 }
1712 
1713 static bool trans_XPACLRI(DisasContext *s, arg_XPACLRI *a)
1714 {
1715     if (s->pauth_active) {
1716         gen_helper_xpaci(cpu_X[30], cpu_env, cpu_X[30]);
1717     }
1718     return true;
1719 }
1720 
1721 static bool trans_PACIA1716(DisasContext *s, arg_PACIA1716 *a)
1722 {
1723     if (s->pauth_active) {
1724         gen_helper_pacia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1725     }
1726     return true;
1727 }
1728 
1729 static bool trans_PACIB1716(DisasContext *s, arg_PACIB1716 *a)
1730 {
1731     if (s->pauth_active) {
1732         gen_helper_pacib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1733     }
1734     return true;
1735 }
1736 
1737 static bool trans_AUTIA1716(DisasContext *s, arg_AUTIA1716 *a)
1738 {
1739     if (s->pauth_active) {
1740         gen_helper_autia(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1741     }
1742     return true;
1743 }
1744 
1745 static bool trans_AUTIB1716(DisasContext *s, arg_AUTIB1716 *a)
1746 {
1747     if (s->pauth_active) {
1748         gen_helper_autib(cpu_X[17], cpu_env, cpu_X[17], cpu_X[16]);
1749     }
1750     return true;
1751 }
1752 
1753 static bool trans_ESB(DisasContext *s, arg_ESB *a)
1754 {
1755     /* Without RAS, we must implement this as NOP. */
1756     if (dc_isar_feature(aa64_ras, s)) {
1757         /*
1758          * QEMU does not have a source of physical SErrors,
1759          * so we are only concerned with virtual SErrors.
1760          * The pseudocode in the ARM for this case is
1761          *   if PSTATE.EL IN {EL0, EL1} && EL2Enabled() then
1762          *      AArch64.vESBOperation();
1763          * Most of the condition can be evaluated at translation time.
1764          * Test for EL2 present, and defer test for SEL2 to runtime.
1765          */
1766         if (s->current_el <= 1 && arm_dc_feature(s, ARM_FEATURE_EL2)) {
1767             gen_helper_vesb(cpu_env);
1768         }
1769     }
1770     return true;
1771 }
1772 
1773 static bool trans_PACIAZ(DisasContext *s, arg_PACIAZ *a)
1774 {
1775     if (s->pauth_active) {
1776         gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
1777     }
1778     return true;
1779 }
1780 
1781 static bool trans_PACIASP(DisasContext *s, arg_PACIASP *a)
1782 {
1783     if (s->pauth_active) {
1784         gen_helper_pacia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1785     }
1786     return true;
1787 }
1788 
1789 static bool trans_PACIBZ(DisasContext *s, arg_PACIBZ *a)
1790 {
1791     if (s->pauth_active) {
1792         gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
1793     }
1794     return true;
1795 }
1796 
1797 static bool trans_PACIBSP(DisasContext *s, arg_PACIBSP *a)
1798 {
1799     if (s->pauth_active) {
1800         gen_helper_pacib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1801     }
1802     return true;
1803 }
1804 
1805 static bool trans_AUTIAZ(DisasContext *s, arg_AUTIAZ *a)
1806 {
1807     if (s->pauth_active) {
1808         gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
1809     }
1810     return true;
1811 }
1812 
1813 static bool trans_AUTIASP(DisasContext *s, arg_AUTIASP *a)
1814 {
1815     if (s->pauth_active) {
1816         gen_helper_autia(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1817     }
1818     return true;
1819 }
1820 
1821 static bool trans_AUTIBZ(DisasContext *s, arg_AUTIBZ *a)
1822 {
1823     if (s->pauth_active) {
1824         gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], tcg_constant_i64(0));
1825     }
1826     return true;
1827 }
1828 
1829 static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a)
1830 {
1831     if (s->pauth_active) {
1832         gen_helper_autib(cpu_X[30], cpu_env, cpu_X[30], cpu_X[31]);
1833     }
1834     return true;
1835 }
1836 
1837 static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
1838 {
1839     tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1840     return true;
1841 }
1842 
1843 static bool trans_DSB_DMB(DisasContext *s, arg_DSB_DMB *a)
1844 {
1845     /* We handle DSB and DMB the same way */
1846     TCGBar bar;
1847 
1848     switch (a->types) {
1849     case 1: /* MBReqTypes_Reads */
1850         bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1851         break;
1852     case 2: /* MBReqTypes_Writes */
1853         bar = TCG_BAR_SC | TCG_MO_ST_ST;
1854         break;
1855     default: /* MBReqTypes_All */
1856         bar = TCG_BAR_SC | TCG_MO_ALL;
1857         break;
1858     }
1859     tcg_gen_mb(bar);
1860     return true;
1861 }
1862 
1863 static bool trans_ISB(DisasContext *s, arg_ISB *a)
1864 {
1865     /*
1866      * We need to break the TB after this insn to execute
1867      * self-modifying code correctly and also to take
1868      * any pending interrupts immediately.
1869      */
1870     reset_btype(s);
1871     gen_goto_tb(s, 0, 4);
1872     return true;
1873 }
1874 
1875 static bool trans_SB(DisasContext *s, arg_SB *a)
1876 {
1877     if (!dc_isar_feature(aa64_sb, s)) {
1878         return false;
1879     }
1880     /*
1881      * TODO: There is no speculation barrier opcode for TCG;
1882      * MB and end the TB instead.
1883      */
1884     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1885     gen_goto_tb(s, 0, 4);
1886     return true;
1887 }
1888 
1889 static bool trans_CFINV(DisasContext *s, arg_CFINV *a)
1890 {
1891     if (!dc_isar_feature(aa64_condm_4, s)) {
1892         return false;
1893     }
1894     tcg_gen_xori_i32(cpu_CF, cpu_CF, 1);
1895     return true;
1896 }
1897 
1898 static bool trans_XAFLAG(DisasContext *s, arg_XAFLAG *a)
1899 {
1900     TCGv_i32 z;
1901 
1902     if (!dc_isar_feature(aa64_condm_5, s)) {
1903         return false;
1904     }
1905 
1906     z = tcg_temp_new_i32();
1907 
1908     tcg_gen_setcondi_i32(TCG_COND_EQ, z, cpu_ZF, 0);
1909 
1910     /*
1911      * (!C & !Z) << 31
1912      * (!(C | Z)) << 31
1913      * ~((C | Z) << 31)
1914      * ~-(C | Z)
1915      * (C | Z) - 1
1916      */
1917     tcg_gen_or_i32(cpu_NF, cpu_CF, z);
1918     tcg_gen_subi_i32(cpu_NF, cpu_NF, 1);
1919 
1920     /* !(Z & C) */
1921     tcg_gen_and_i32(cpu_ZF, z, cpu_CF);
1922     tcg_gen_xori_i32(cpu_ZF, cpu_ZF, 1);
1923 
1924     /* (!C & Z) << 31 -> -(Z & ~C) */
1925     tcg_gen_andc_i32(cpu_VF, z, cpu_CF);
1926     tcg_gen_neg_i32(cpu_VF, cpu_VF);
1927 
1928     /* C | Z */
1929     tcg_gen_or_i32(cpu_CF, cpu_CF, z);
1930 
1931     return true;
1932 }
1933 
1934 static bool trans_AXFLAG(DisasContext *s, arg_AXFLAG *a)
1935 {
1936     if (!dc_isar_feature(aa64_condm_5, s)) {
1937         return false;
1938     }
1939 
1940     tcg_gen_sari_i32(cpu_VF, cpu_VF, 31);         /* V ? -1 : 0 */
1941     tcg_gen_andc_i32(cpu_CF, cpu_CF, cpu_VF);     /* C & !V */
1942 
1943     /* !(Z | V) -> !(!ZF | V) -> ZF & !V -> ZF & ~VF */
1944     tcg_gen_andc_i32(cpu_ZF, cpu_ZF, cpu_VF);
1945 
1946     tcg_gen_movi_i32(cpu_NF, 0);
1947     tcg_gen_movi_i32(cpu_VF, 0);
1948 
1949     return true;
1950 }
1951 
1952 static bool trans_MSR_i_UAO(DisasContext *s, arg_i *a)
1953 {
1954     if (!dc_isar_feature(aa64_uao, s) || s->current_el == 0) {
1955         return false;
1956     }
1957     if (a->imm & 1) {
1958         set_pstate_bits(PSTATE_UAO);
1959     } else {
1960         clear_pstate_bits(PSTATE_UAO);
1961     }
1962     gen_rebuild_hflags(s);
1963     s->base.is_jmp = DISAS_TOO_MANY;
1964     return true;
1965 }
1966 
1967 static bool trans_MSR_i_PAN(DisasContext *s, arg_i *a)
1968 {
1969     if (!dc_isar_feature(aa64_pan, s) || s->current_el == 0) {
1970         return false;
1971     }
1972     if (a->imm & 1) {
1973         set_pstate_bits(PSTATE_PAN);
1974     } else {
1975         clear_pstate_bits(PSTATE_PAN);
1976     }
1977     gen_rebuild_hflags(s);
1978     s->base.is_jmp = DISAS_TOO_MANY;
1979     return true;
1980 }
1981 
1982 static bool trans_MSR_i_SPSEL(DisasContext *s, arg_i *a)
1983 {
1984     if (s->current_el == 0) {
1985         return false;
1986     }
1987     gen_helper_msr_i_spsel(cpu_env, tcg_constant_i32(a->imm & PSTATE_SP));
1988     s->base.is_jmp = DISAS_TOO_MANY;
1989     return true;
1990 }
1991 
1992 static bool trans_MSR_i_SBSS(DisasContext *s, arg_i *a)
1993 {
1994     if (!dc_isar_feature(aa64_ssbs, s)) {
1995         return false;
1996     }
1997     if (a->imm & 1) {
1998         set_pstate_bits(PSTATE_SSBS);
1999     } else {
2000         clear_pstate_bits(PSTATE_SSBS);
2001     }
2002     /* Don't need to rebuild hflags since SSBS is a nop */
2003     s->base.is_jmp = DISAS_TOO_MANY;
2004     return true;
2005 }
2006 
2007 static bool trans_MSR_i_DIT(DisasContext *s, arg_i *a)
2008 {
2009     if (!dc_isar_feature(aa64_dit, s)) {
2010         return false;
2011     }
2012     if (a->imm & 1) {
2013         set_pstate_bits(PSTATE_DIT);
2014     } else {
2015         clear_pstate_bits(PSTATE_DIT);
2016     }
2017     /* There's no need to rebuild hflags because DIT is a nop */
2018     s->base.is_jmp = DISAS_TOO_MANY;
2019     return true;
2020 }
2021 
2022 static bool trans_MSR_i_TCO(DisasContext *s, arg_i *a)
2023 {
2024     if (dc_isar_feature(aa64_mte, s)) {
2025         /* Full MTE is enabled -- set the TCO bit as directed. */
2026         if (a->imm & 1) {
2027             set_pstate_bits(PSTATE_TCO);
2028         } else {
2029             clear_pstate_bits(PSTATE_TCO);
2030         }
2031         gen_rebuild_hflags(s);
2032         /* Many factors, including TCO, go into MTE_ACTIVE. */
2033         s->base.is_jmp = DISAS_UPDATE_NOCHAIN;
2034         return true;
2035     } else if (dc_isar_feature(aa64_mte_insn_reg, s)) {
2036         /* Only "instructions accessible at EL0" -- PSTATE.TCO is WI.  */
2037         return true;
2038     } else {
2039         /* Insn not present */
2040         return false;
2041     }
2042 }
2043 
2044 static bool trans_MSR_i_DAIFSET(DisasContext *s, arg_i *a)
2045 {
2046     gen_helper_msr_i_daifset(cpu_env, tcg_constant_i32(a->imm));
2047     s->base.is_jmp = DISAS_TOO_MANY;
2048     return true;
2049 }
2050 
2051 static bool trans_MSR_i_DAIFCLEAR(DisasContext *s, arg_i *a)
2052 {
2053     gen_helper_msr_i_daifclear(cpu_env, tcg_constant_i32(a->imm));
2054     /* Exit the cpu loop to re-evaluate pending IRQs. */
2055     s->base.is_jmp = DISAS_UPDATE_EXIT;
2056     return true;
2057 }
2058 
2059 static bool trans_MSR_i_SVCR(DisasContext *s, arg_MSR_i_SVCR *a)
2060 {
2061     if (!dc_isar_feature(aa64_sme, s) || a->mask == 0) {
2062         return false;
2063     }
2064     if (sme_access_check(s)) {
2065         int old = s->pstate_sm | (s->pstate_za << 1);
2066         int new = a->imm * 3;
2067 
2068         if ((old ^ new) & a->mask) {
2069             /* At least one bit changes. */
2070             gen_helper_set_svcr(cpu_env, tcg_constant_i32(new),
2071                                 tcg_constant_i32(a->mask));
2072             s->base.is_jmp = DISAS_TOO_MANY;
2073         }
2074     }
2075     return true;
2076 }
2077 
2078 static void gen_get_nzcv(TCGv_i64 tcg_rt)
2079 {
2080     TCGv_i32 tmp = tcg_temp_new_i32();
2081     TCGv_i32 nzcv = tcg_temp_new_i32();
2082 
2083     /* build bit 31, N */
2084     tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
2085     /* build bit 30, Z */
2086     tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
2087     tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
2088     /* build bit 29, C */
2089     tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
2090     /* build bit 28, V */
2091     tcg_gen_shri_i32(tmp, cpu_VF, 31);
2092     tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
2093     /* generate result */
2094     tcg_gen_extu_i32_i64(tcg_rt, nzcv);
2095 }
2096 
2097 static void gen_set_nzcv(TCGv_i64 tcg_rt)
2098 {
2099     TCGv_i32 nzcv = tcg_temp_new_i32();
2100 
2101     /* take NZCV from R[t] */
2102     tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
2103 
2104     /* bit 31, N */
2105     tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
2106     /* bit 30, Z */
2107     tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
2108     tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
2109     /* bit 29, C */
2110     tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
2111     tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
2112     /* bit 28, V */
2113     tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
2114     tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
2115 }
2116 
2117 static void gen_sysreg_undef(DisasContext *s, bool isread,
2118                              uint8_t op0, uint8_t op1, uint8_t op2,
2119                              uint8_t crn, uint8_t crm, uint8_t rt)
2120 {
2121     /*
2122      * Generate code to emit an UNDEF with correct syndrome
2123      * information for a failed system register access.
2124      * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
2125      * but if FEAT_IDST is implemented then read accesses to registers
2126      * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
2127      * syndrome.
2128      */
2129     uint32_t syndrome;
2130 
2131     if (isread && dc_isar_feature(aa64_ids, s) &&
2132         arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
2133         syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
2134     } else {
2135         syndrome = syn_uncategorized();
2136     }
2137     gen_exception_insn(s, 0, EXCP_UDEF, syndrome);
2138 }
2139 
2140 /* MRS - move from system register
2141  * MSR (register) - move to system register
2142  * SYS
2143  * SYSL
2144  * These are all essentially the same insn in 'read' and 'write'
2145  * versions, with varying op0 fields.
2146  */
2147 static void handle_sys(DisasContext *s, bool isread,
2148                        unsigned int op0, unsigned int op1, unsigned int op2,
2149                        unsigned int crn, unsigned int crm, unsigned int rt)
2150 {
2151     uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
2152                                       crn, crm, op0, op1, op2);
2153     const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
2154     bool need_exit_tb = false;
2155     TCGv_ptr tcg_ri = NULL;
2156     TCGv_i64 tcg_rt;
2157 
2158     if (!ri) {
2159         /* Unknown register; this might be a guest error or a QEMU
2160          * unimplemented feature.
2161          */
2162         qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
2163                       "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
2164                       isread ? "read" : "write", op0, op1, crn, crm, op2);
2165         gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
2166         return;
2167     }
2168 
2169     /* Check access permissions */
2170     if (!cp_access_ok(s->current_el, ri, isread)) {
2171         gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
2172         return;
2173     }
2174 
2175     if (ri->accessfn || (ri->fgt && s->fgt_active)) {
2176         /* Emit code to perform further access permissions checks at
2177          * runtime; this may result in an exception.
2178          */
2179         uint32_t syndrome;
2180 
2181         syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
2182         gen_a64_update_pc(s, 0);
2183         tcg_ri = tcg_temp_new_ptr();
2184         gen_helper_access_check_cp_reg(tcg_ri, cpu_env,
2185                                        tcg_constant_i32(key),
2186                                        tcg_constant_i32(syndrome),
2187                                        tcg_constant_i32(isread));
2188     } else if (ri->type & ARM_CP_RAISES_EXC) {
2189         /*
2190          * The readfn or writefn might raise an exception;
2191          * synchronize the CPU state in case it does.
2192          */
2193         gen_a64_update_pc(s, 0);
2194     }
2195 
2196     /* Handle special cases first */
2197     switch (ri->type & ARM_CP_SPECIAL_MASK) {
2198     case 0:
2199         break;
2200     case ARM_CP_NOP:
2201         return;
2202     case ARM_CP_NZCV:
2203         tcg_rt = cpu_reg(s, rt);
2204         if (isread) {
2205             gen_get_nzcv(tcg_rt);
2206         } else {
2207             gen_set_nzcv(tcg_rt);
2208         }
2209         return;
2210     case ARM_CP_CURRENTEL:
2211         /* Reads as current EL value from pstate, which is
2212          * guaranteed to be constant by the tb flags.
2213          */
2214         tcg_rt = cpu_reg(s, rt);
2215         tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
2216         return;
2217     case ARM_CP_DC_ZVA:
2218         /* Writes clear the aligned block of memory which rt points into. */
2219         if (s->mte_active[0]) {
2220             int desc = 0;
2221 
2222             desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
2223             desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
2224             desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
2225 
2226             tcg_rt = tcg_temp_new_i64();
2227             gen_helper_mte_check_zva(tcg_rt, cpu_env,
2228                                      tcg_constant_i32(desc), cpu_reg(s, rt));
2229         } else {
2230             tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
2231         }
2232         gen_helper_dc_zva(cpu_env, tcg_rt);
2233         return;
2234     case ARM_CP_DC_GVA:
2235         {
2236             TCGv_i64 clean_addr, tag;
2237 
2238             /*
2239              * DC_GVA, like DC_ZVA, requires that we supply the original
2240              * pointer for an invalid page.  Probe that address first.
2241              */
2242             tcg_rt = cpu_reg(s, rt);
2243             clean_addr = clean_data_tbi(s, tcg_rt);
2244             gen_probe_access(s, clean_addr, MMU_DATA_STORE, MO_8);
2245 
2246             if (s->ata) {
2247                 /* Extract the tag from the register to match STZGM.  */
2248                 tag = tcg_temp_new_i64();
2249                 tcg_gen_shri_i64(tag, tcg_rt, 56);
2250                 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2251             }
2252         }
2253         return;
2254     case ARM_CP_DC_GZVA:
2255         {
2256             TCGv_i64 clean_addr, tag;
2257 
2258             /* For DC_GZVA, we can rely on DC_ZVA for the proper fault. */
2259             tcg_rt = cpu_reg(s, rt);
2260             clean_addr = clean_data_tbi(s, tcg_rt);
2261             gen_helper_dc_zva(cpu_env, clean_addr);
2262 
2263             if (s->ata) {
2264                 /* Extract the tag from the register to match STZGM.  */
2265                 tag = tcg_temp_new_i64();
2266                 tcg_gen_shri_i64(tag, tcg_rt, 56);
2267                 gen_helper_stzgm_tags(cpu_env, clean_addr, tag);
2268             }
2269         }
2270         return;
2271     default:
2272         g_assert_not_reached();
2273     }
2274     if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) {
2275         return;
2276     } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
2277         return;
2278     } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) {
2279         return;
2280     }
2281 
2282     if (ri->type & ARM_CP_IO) {
2283         /* I/O operations must end the TB here (whether read or write) */
2284         need_exit_tb = translator_io_start(&s->base);
2285     }
2286 
2287     tcg_rt = cpu_reg(s, rt);
2288 
2289     if (isread) {
2290         if (ri->type & ARM_CP_CONST) {
2291             tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
2292         } else if (ri->readfn) {
2293             if (!tcg_ri) {
2294                 tcg_ri = gen_lookup_cp_reg(key);
2295             }
2296             gen_helper_get_cp_reg64(tcg_rt, cpu_env, tcg_ri);
2297         } else {
2298             tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
2299         }
2300     } else {
2301         if (ri->type & ARM_CP_CONST) {
2302             /* If not forbidden by access permissions, treat as WI */
2303             return;
2304         } else if (ri->writefn) {
2305             if (!tcg_ri) {
2306                 tcg_ri = gen_lookup_cp_reg(key);
2307             }
2308             gen_helper_set_cp_reg64(cpu_env, tcg_ri, tcg_rt);
2309         } else {
2310             tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
2311         }
2312     }
2313 
2314     if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
2315         /*
2316          * A write to any coprocessor regiser that ends a TB
2317          * must rebuild the hflags for the next TB.
2318          */
2319         gen_rebuild_hflags(s);
2320         /*
2321          * We default to ending the TB on a coprocessor register write,
2322          * but allow this to be suppressed by the register definition
2323          * (usually only necessary to work around guest bugs).
2324          */
2325         need_exit_tb = true;
2326     }
2327     if (need_exit_tb) {
2328         s->base.is_jmp = DISAS_UPDATE_EXIT;
2329     }
2330 }
2331 
2332 static bool trans_SYS(DisasContext *s, arg_SYS *a)
2333 {
2334     handle_sys(s, a->l, a->op0, a->op1, a->op2, a->crn, a->crm, a->rt);
2335     return true;
2336 }
2337 
2338 static bool trans_SVC(DisasContext *s, arg_i *a)
2339 {
2340     /*
2341      * For SVC, HVC and SMC we advance the single-step state
2342      * machine before taking the exception. This is architecturally
2343      * mandated, to ensure that single-stepping a system call
2344      * instruction works properly.
2345      */
2346     uint32_t syndrome = syn_aa64_svc(a->imm);
2347     if (s->fgt_svc) {
2348         gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2);
2349         return true;
2350     }
2351     gen_ss_advance(s);
2352     gen_exception_insn(s, 4, EXCP_SWI, syndrome);
2353     return true;
2354 }
2355 
2356 static bool trans_HVC(DisasContext *s, arg_i *a)
2357 {
2358     if (s->current_el == 0) {
2359         unallocated_encoding(s);
2360         return true;
2361     }
2362     /*
2363      * The pre HVC helper handles cases when HVC gets trapped
2364      * as an undefined insn by runtime configuration.
2365      */
2366     gen_a64_update_pc(s, 0);
2367     gen_helper_pre_hvc(cpu_env);
2368     /* Architecture requires ss advance before we do the actual work */
2369     gen_ss_advance(s);
2370     gen_exception_insn_el(s, 4, EXCP_HVC, syn_aa64_hvc(a->imm), 2);
2371     return true;
2372 }
2373 
2374 static bool trans_SMC(DisasContext *s, arg_i *a)
2375 {
2376     if (s->current_el == 0) {
2377         unallocated_encoding(s);
2378         return true;
2379     }
2380     gen_a64_update_pc(s, 0);
2381     gen_helper_pre_smc(cpu_env, tcg_constant_i32(syn_aa64_smc(a->imm)));
2382     /* Architecture requires ss advance before we do the actual work */
2383     gen_ss_advance(s);
2384     gen_exception_insn_el(s, 4, EXCP_SMC, syn_aa64_smc(a->imm), 3);
2385     return true;
2386 }
2387 
2388 static bool trans_BRK(DisasContext *s, arg_i *a)
2389 {
2390     gen_exception_bkpt_insn(s, syn_aa64_bkpt(a->imm));
2391     return true;
2392 }
2393 
2394 static bool trans_HLT(DisasContext *s, arg_i *a)
2395 {
2396     /*
2397      * HLT. This has two purposes.
2398      * Architecturally, it is an external halting debug instruction.
2399      * Since QEMU doesn't implement external debug, we treat this as
2400      * it is required for halting debug disabled: it will UNDEF.
2401      * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
2402      */
2403     if (semihosting_enabled(s->current_el == 0) && a->imm == 0xf000) {
2404         gen_exception_internal_insn(s, EXCP_SEMIHOST);
2405     } else {
2406         unallocated_encoding(s);
2407     }
2408     return true;
2409 }
2410 
2411 /*
2412  * Load/Store exclusive instructions are implemented by remembering
2413  * the value/address loaded, and seeing if these are the same
2414  * when the store is performed. This is not actually the architecturally
2415  * mandated semantics, but it works for typical guest code sequences
2416  * and avoids having to monitor regular stores.
2417  *
2418  * The store exclusive uses the atomic cmpxchg primitives to avoid
2419  * races in multi-threaded linux-user and when MTTCG softmmu is
2420  * enabled.
2421  */
2422 static void gen_load_exclusive(DisasContext *s, int rt, int rt2, int rn,
2423                                int size, bool is_pair)
2424 {
2425     int idx = get_mem_index(s);
2426     TCGv_i64 dirty_addr, clean_addr;
2427     MemOp memop = check_atomic_align(s, rn, size + is_pair);
2428 
2429     s->is_ldex = true;
2430     dirty_addr = cpu_reg_sp(s, rn);
2431     clean_addr = gen_mte_check1(s, dirty_addr, false, rn != 31, memop);
2432 
2433     g_assert(size <= 3);
2434     if (is_pair) {
2435         g_assert(size >= 2);
2436         if (size == 2) {
2437             tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
2438             if (s->be_data == MO_LE) {
2439                 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2440                 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2441             } else {
2442                 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2443                 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2444             }
2445         } else {
2446             TCGv_i128 t16 = tcg_temp_new_i128();
2447 
2448             tcg_gen_qemu_ld_i128(t16, clean_addr, idx, memop);
2449 
2450             if (s->be_data == MO_LE) {
2451                 tcg_gen_extr_i128_i64(cpu_exclusive_val,
2452                                       cpu_exclusive_high, t16);
2453             } else {
2454                 tcg_gen_extr_i128_i64(cpu_exclusive_high,
2455                                       cpu_exclusive_val, t16);
2456             }
2457             tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2458             tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2459         }
2460     } else {
2461         tcg_gen_qemu_ld_i64(cpu_exclusive_val, clean_addr, idx, memop);
2462         tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2463     }
2464     tcg_gen_mov_i64(cpu_exclusive_addr, clean_addr);
2465 }
2466 
2467 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2468                                 int rn, int size, int is_pair)
2469 {
2470     /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2471      *     && (!is_pair || env->exclusive_high == [addr + datasize])) {
2472      *     [addr] = {Rt};
2473      *     if (is_pair) {
2474      *         [addr + datasize] = {Rt2};
2475      *     }
2476      *     {Rd} = 0;
2477      * } else {
2478      *     {Rd} = 1;
2479      * }
2480      * env->exclusive_addr = -1;
2481      */
2482     TCGLabel *fail_label = gen_new_label();
2483     TCGLabel *done_label = gen_new_label();
2484     TCGv_i64 tmp, clean_addr;
2485     MemOp memop;
2486 
2487     /*
2488      * FIXME: We are out of spec here.  We have recorded only the address
2489      * from load_exclusive, not the entire range, and we assume that the
2490      * size of the access on both sides match.  The architecture allows the
2491      * store to be smaller than the load, so long as the stored bytes are
2492      * within the range recorded by the load.
2493      */
2494 
2495     /* See AArch64.ExclusiveMonitorsPass() and AArch64.IsExclusiveVA(). */
2496     clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
2497     tcg_gen_brcond_i64(TCG_COND_NE, clean_addr, cpu_exclusive_addr, fail_label);
2498 
2499     /*
2500      * The write, and any associated faults, only happen if the virtual
2501      * and physical addresses pass the exclusive monitor check.  These
2502      * faults are exceedingly unlikely, because normally the guest uses
2503      * the exact same address register for the load_exclusive, and we
2504      * would have recognized these faults there.
2505      *
2506      * It is possible to trigger an alignment fault pre-LSE2, e.g. with an
2507      * unaligned 4-byte write within the range of an aligned 8-byte load.
2508      * With LSE2, the store would need to cross a 16-byte boundary when the
2509      * load did not, which would mean the store is outside the range
2510      * recorded for the monitor, which would have failed a corrected monitor
2511      * check above.  For now, we assume no size change and retain the
2512      * MO_ALIGN to let tcg know what we checked in the load_exclusive.
2513      *
2514      * It is possible to trigger an MTE fault, by performing the load with
2515      * a virtual address with a valid tag and performing the store with the
2516      * same virtual address and a different invalid tag.
2517      */
2518     memop = size + is_pair;
2519     if (memop == MO_128 || !dc_isar_feature(aa64_lse2, s)) {
2520         memop |= MO_ALIGN;
2521     }
2522     memop = finalize_memop(s, memop);
2523     gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop);
2524 
2525     tmp = tcg_temp_new_i64();
2526     if (is_pair) {
2527         if (size == 2) {
2528             if (s->be_data == MO_LE) {
2529                 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2530             } else {
2531                 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2532             }
2533             tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2534                                        cpu_exclusive_val, tmp,
2535                                        get_mem_index(s), memop);
2536             tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2537         } else {
2538             TCGv_i128 t16 = tcg_temp_new_i128();
2539             TCGv_i128 c16 = tcg_temp_new_i128();
2540             TCGv_i64 a, b;
2541 
2542             if (s->be_data == MO_LE) {
2543                 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt), cpu_reg(s, rt2));
2544                 tcg_gen_concat_i64_i128(c16, cpu_exclusive_val,
2545                                         cpu_exclusive_high);
2546             } else {
2547                 tcg_gen_concat_i64_i128(t16, cpu_reg(s, rt2), cpu_reg(s, rt));
2548                 tcg_gen_concat_i64_i128(c16, cpu_exclusive_high,
2549                                         cpu_exclusive_val);
2550             }
2551 
2552             tcg_gen_atomic_cmpxchg_i128(t16, cpu_exclusive_addr, c16, t16,
2553                                         get_mem_index(s), memop);
2554 
2555             a = tcg_temp_new_i64();
2556             b = tcg_temp_new_i64();
2557             if (s->be_data == MO_LE) {
2558                 tcg_gen_extr_i128_i64(a, b, t16);
2559             } else {
2560                 tcg_gen_extr_i128_i64(b, a, t16);
2561             }
2562 
2563             tcg_gen_xor_i64(a, a, cpu_exclusive_val);
2564             tcg_gen_xor_i64(b, b, cpu_exclusive_high);
2565             tcg_gen_or_i64(tmp, a, b);
2566 
2567             tcg_gen_setcondi_i64(TCG_COND_NE, tmp, tmp, 0);
2568         }
2569     } else {
2570         tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2571                                    cpu_reg(s, rt), get_mem_index(s), memop);
2572         tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2573     }
2574     tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2575     tcg_gen_br(done_label);
2576 
2577     gen_set_label(fail_label);
2578     tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2579     gen_set_label(done_label);
2580     tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2581 }
2582 
2583 static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2584                                  int rn, int size)
2585 {
2586     TCGv_i64 tcg_rs = cpu_reg(s, rs);
2587     TCGv_i64 tcg_rt = cpu_reg(s, rt);
2588     int memidx = get_mem_index(s);
2589     TCGv_i64 clean_addr;
2590     MemOp memop;
2591 
2592     if (rn == 31) {
2593         gen_check_sp_alignment(s);
2594     }
2595     memop = check_atomic_align(s, rn, size);
2596     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop);
2597     tcg_gen_atomic_cmpxchg_i64(tcg_rs, clean_addr, tcg_rs, tcg_rt,
2598                                memidx, memop);
2599 }
2600 
2601 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2602                                       int rn, int size)
2603 {
2604     TCGv_i64 s1 = cpu_reg(s, rs);
2605     TCGv_i64 s2 = cpu_reg(s, rs + 1);
2606     TCGv_i64 t1 = cpu_reg(s, rt);
2607     TCGv_i64 t2 = cpu_reg(s, rt + 1);
2608     TCGv_i64 clean_addr;
2609     int memidx = get_mem_index(s);
2610     MemOp memop;
2611 
2612     if (rn == 31) {
2613         gen_check_sp_alignment(s);
2614     }
2615 
2616     /* This is a single atomic access, despite the "pair". */
2617     memop = check_atomic_align(s, rn, size + 1);
2618     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, rn), true, rn != 31, memop);
2619 
2620     if (size == 2) {
2621         TCGv_i64 cmp = tcg_temp_new_i64();
2622         TCGv_i64 val = tcg_temp_new_i64();
2623 
2624         if (s->be_data == MO_LE) {
2625             tcg_gen_concat32_i64(val, t1, t2);
2626             tcg_gen_concat32_i64(cmp, s1, s2);
2627         } else {
2628             tcg_gen_concat32_i64(val, t2, t1);
2629             tcg_gen_concat32_i64(cmp, s2, s1);
2630         }
2631 
2632         tcg_gen_atomic_cmpxchg_i64(cmp, clean_addr, cmp, val, memidx, memop);
2633 
2634         if (s->be_data == MO_LE) {
2635             tcg_gen_extr32_i64(s1, s2, cmp);
2636         } else {
2637             tcg_gen_extr32_i64(s2, s1, cmp);
2638         }
2639     } else {
2640         TCGv_i128 cmp = tcg_temp_new_i128();
2641         TCGv_i128 val = tcg_temp_new_i128();
2642 
2643         if (s->be_data == MO_LE) {
2644             tcg_gen_concat_i64_i128(val, t1, t2);
2645             tcg_gen_concat_i64_i128(cmp, s1, s2);
2646         } else {
2647             tcg_gen_concat_i64_i128(val, t2, t1);
2648             tcg_gen_concat_i64_i128(cmp, s2, s1);
2649         }
2650 
2651         tcg_gen_atomic_cmpxchg_i128(cmp, clean_addr, cmp, val, memidx, memop);
2652 
2653         if (s->be_data == MO_LE) {
2654             tcg_gen_extr_i128_i64(s1, s2, cmp);
2655         } else {
2656             tcg_gen_extr_i128_i64(s2, s1, cmp);
2657         }
2658     }
2659 }
2660 
2661 /*
2662  * Compute the ISS.SF bit for syndrome information if an exception
2663  * is taken on a load or store. This indicates whether the instruction
2664  * is accessing a 32-bit or 64-bit register. This logic is derived
2665  * from the ARMv8 specs for LDR (Shared decode for all encodings).
2666  */
2667 static bool ldst_iss_sf(int size, bool sign, bool ext)
2668 {
2669 
2670     if (sign) {
2671         /*
2672          * Signed loads are 64 bit results if we are not going to
2673          * do a zero-extend from 32 to 64 after the load.
2674          * (For a store, sign and ext are always false.)
2675          */
2676         return !ext;
2677     } else {
2678         /* Unsigned loads/stores work at the specified size */
2679         return size == MO_64;
2680     }
2681 }
2682 
2683 static bool trans_STXR(DisasContext *s, arg_stxr *a)
2684 {
2685     if (a->rn == 31) {
2686         gen_check_sp_alignment(s);
2687     }
2688     if (a->lasr) {
2689         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2690     }
2691     gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, false);
2692     return true;
2693 }
2694 
2695 static bool trans_LDXR(DisasContext *s, arg_stxr *a)
2696 {
2697     if (a->rn == 31) {
2698         gen_check_sp_alignment(s);
2699     }
2700     gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, false);
2701     if (a->lasr) {
2702         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2703     }
2704     return true;
2705 }
2706 
2707 static bool trans_STLR(DisasContext *s, arg_stlr *a)
2708 {
2709     TCGv_i64 clean_addr;
2710     MemOp memop;
2711     bool iss_sf = ldst_iss_sf(a->sz, false, false);
2712 
2713     /*
2714      * StoreLORelease is the same as Store-Release for QEMU, but
2715      * needs the feature-test.
2716      */
2717     if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
2718         return false;
2719     }
2720     /* Generate ISS for non-exclusive accesses including LASR.  */
2721     if (a->rn == 31) {
2722         gen_check_sp_alignment(s);
2723     }
2724     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2725     memop = check_ordered_align(s, a->rn, 0, true, a->sz);
2726     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
2727                                 true, a->rn != 31, memop);
2728     do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, memop, true, a->rt,
2729               iss_sf, a->lasr);
2730     return true;
2731 }
2732 
2733 static bool trans_LDAR(DisasContext *s, arg_stlr *a)
2734 {
2735     TCGv_i64 clean_addr;
2736     MemOp memop;
2737     bool iss_sf = ldst_iss_sf(a->sz, false, false);
2738 
2739     /* LoadLOAcquire is the same as Load-Acquire for QEMU.  */
2740     if (!a->lasr && !dc_isar_feature(aa64_lor, s)) {
2741         return false;
2742     }
2743     /* Generate ISS for non-exclusive accesses including LASR.  */
2744     if (a->rn == 31) {
2745         gen_check_sp_alignment(s);
2746     }
2747     memop = check_ordered_align(s, a->rn, 0, false, a->sz);
2748     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn),
2749                                 false, a->rn != 31, memop);
2750     do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, memop, false, true,
2751               a->rt, iss_sf, a->lasr);
2752     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2753     return true;
2754 }
2755 
2756 static bool trans_STXP(DisasContext *s, arg_stxr *a)
2757 {
2758     if (a->rn == 31) {
2759         gen_check_sp_alignment(s);
2760     }
2761     if (a->lasr) {
2762         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2763     }
2764     gen_store_exclusive(s, a->rs, a->rt, a->rt2, a->rn, a->sz, true);
2765     return true;
2766 }
2767 
2768 static bool trans_LDXP(DisasContext *s, arg_stxr *a)
2769 {
2770     if (a->rn == 31) {
2771         gen_check_sp_alignment(s);
2772     }
2773     gen_load_exclusive(s, a->rt, a->rt2, a->rn, a->sz, true);
2774     if (a->lasr) {
2775         tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2776     }
2777     return true;
2778 }
2779 
2780 static bool trans_CASP(DisasContext *s, arg_CASP *a)
2781 {
2782     if (!dc_isar_feature(aa64_atomics, s)) {
2783         return false;
2784     }
2785     if (((a->rt | a->rs) & 1) != 0) {
2786         return false;
2787     }
2788 
2789     gen_compare_and_swap_pair(s, a->rs, a->rt, a->rn, a->sz);
2790     return true;
2791 }
2792 
2793 static bool trans_CAS(DisasContext *s, arg_CAS *a)
2794 {
2795     if (!dc_isar_feature(aa64_atomics, s)) {
2796         return false;
2797     }
2798     gen_compare_and_swap(s, a->rs, a->rt, a->rn, a->sz);
2799     return true;
2800 }
2801 
2802 static bool trans_LD_lit(DisasContext *s, arg_ldlit *a)
2803 {
2804     bool iss_sf = ldst_iss_sf(a->sz, a->sign, false);
2805     TCGv_i64 tcg_rt = cpu_reg(s, a->rt);
2806     TCGv_i64 clean_addr = tcg_temp_new_i64();
2807     MemOp memop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
2808 
2809     gen_pc_plus_diff(s, clean_addr, a->imm);
2810     do_gpr_ld(s, tcg_rt, clean_addr, memop,
2811               false, true, a->rt, iss_sf, false);
2812     return true;
2813 }
2814 
2815 static bool trans_LD_lit_v(DisasContext *s, arg_ldlit *a)
2816 {
2817     /* Load register (literal), vector version */
2818     TCGv_i64 clean_addr;
2819     MemOp memop;
2820 
2821     if (!fp_access_check(s)) {
2822         return true;
2823     }
2824     memop = finalize_memop_asimd(s, a->sz);
2825     clean_addr = tcg_temp_new_i64();
2826     gen_pc_plus_diff(s, clean_addr, a->imm);
2827     do_fp_ld(s, a->rt, clean_addr, memop);
2828     return true;
2829 }
2830 
2831 static void op_addr_ldstpair_pre(DisasContext *s, arg_ldstpair *a,
2832                                  TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
2833                                  uint64_t offset, bool is_store, MemOp mop)
2834 {
2835     if (a->rn == 31) {
2836         gen_check_sp_alignment(s);
2837     }
2838 
2839     *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
2840     if (!a->p) {
2841         tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
2842     }
2843 
2844     *clean_addr = gen_mte_checkN(s, *dirty_addr, is_store,
2845                                  (a->w || a->rn != 31), 2 << a->sz, mop);
2846 }
2847 
2848 static void op_addr_ldstpair_post(DisasContext *s, arg_ldstpair *a,
2849                                   TCGv_i64 dirty_addr, uint64_t offset)
2850 {
2851     if (a->w) {
2852         if (a->p) {
2853             tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
2854         }
2855         tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
2856     }
2857 }
2858 
2859 static bool trans_STP(DisasContext *s, arg_ldstpair *a)
2860 {
2861     uint64_t offset = a->imm << a->sz;
2862     TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
2863     MemOp mop = finalize_memop(s, a->sz);
2864 
2865     op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, true, mop);
2866     tcg_rt = cpu_reg(s, a->rt);
2867     tcg_rt2 = cpu_reg(s, a->rt2);
2868     /*
2869      * We built mop above for the single logical access -- rebuild it
2870      * now for the paired operation.
2871      *
2872      * With LSE2, non-sign-extending pairs are treated atomically if
2873      * aligned, and if unaligned one of the pair will be completely
2874      * within a 16-byte block and that element will be atomic.
2875      * Otherwise each element is separately atomic.
2876      * In all cases, issue one operation with the correct atomicity.
2877      */
2878     mop = a->sz + 1;
2879     if (s->align_mem) {
2880         mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
2881     }
2882     mop = finalize_memop_pair(s, mop);
2883     if (a->sz == 2) {
2884         TCGv_i64 tmp = tcg_temp_new_i64();
2885 
2886         if (s->be_data == MO_LE) {
2887             tcg_gen_concat32_i64(tmp, tcg_rt, tcg_rt2);
2888         } else {
2889             tcg_gen_concat32_i64(tmp, tcg_rt2, tcg_rt);
2890         }
2891         tcg_gen_qemu_st_i64(tmp, clean_addr, get_mem_index(s), mop);
2892     } else {
2893         TCGv_i128 tmp = tcg_temp_new_i128();
2894 
2895         if (s->be_data == MO_LE) {
2896             tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2);
2897         } else {
2898             tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt);
2899         }
2900         tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
2901     }
2902     op_addr_ldstpair_post(s, a, dirty_addr, offset);
2903     return true;
2904 }
2905 
2906 static bool trans_LDP(DisasContext *s, arg_ldstpair *a)
2907 {
2908     uint64_t offset = a->imm << a->sz;
2909     TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
2910     MemOp mop = finalize_memop(s, a->sz);
2911 
2912     op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, false, mop);
2913     tcg_rt = cpu_reg(s, a->rt);
2914     tcg_rt2 = cpu_reg(s, a->rt2);
2915 
2916     /*
2917      * We built mop above for the single logical access -- rebuild it
2918      * now for the paired operation.
2919      *
2920      * With LSE2, non-sign-extending pairs are treated atomically if
2921      * aligned, and if unaligned one of the pair will be completely
2922      * within a 16-byte block and that element will be atomic.
2923      * Otherwise each element is separately atomic.
2924      * In all cases, issue one operation with the correct atomicity.
2925      *
2926      * This treats sign-extending loads like zero-extending loads,
2927      * since that reuses the most code below.
2928      */
2929     mop = a->sz + 1;
2930     if (s->align_mem) {
2931         mop |= (a->sz == 2 ? MO_ALIGN_4 : MO_ALIGN_8);
2932     }
2933     mop = finalize_memop_pair(s, mop);
2934     if (a->sz == 2) {
2935         int o2 = s->be_data == MO_LE ? 32 : 0;
2936         int o1 = o2 ^ 32;
2937 
2938         tcg_gen_qemu_ld_i64(tcg_rt, clean_addr, get_mem_index(s), mop);
2939         if (a->sign) {
2940             tcg_gen_sextract_i64(tcg_rt2, tcg_rt, o2, 32);
2941             tcg_gen_sextract_i64(tcg_rt, tcg_rt, o1, 32);
2942         } else {
2943             tcg_gen_extract_i64(tcg_rt2, tcg_rt, o2, 32);
2944             tcg_gen_extract_i64(tcg_rt, tcg_rt, o1, 32);
2945         }
2946     } else {
2947         TCGv_i128 tmp = tcg_temp_new_i128();
2948 
2949         tcg_gen_qemu_ld_i128(tmp, clean_addr, get_mem_index(s), mop);
2950         if (s->be_data == MO_LE) {
2951             tcg_gen_extr_i128_i64(tcg_rt, tcg_rt2, tmp);
2952         } else {
2953             tcg_gen_extr_i128_i64(tcg_rt2, tcg_rt, tmp);
2954         }
2955     }
2956     op_addr_ldstpair_post(s, a, dirty_addr, offset);
2957     return true;
2958 }
2959 
2960 static bool trans_STP_v(DisasContext *s, arg_ldstpair *a)
2961 {
2962     uint64_t offset = a->imm << a->sz;
2963     TCGv_i64 clean_addr, dirty_addr;
2964     MemOp mop;
2965 
2966     if (!fp_access_check(s)) {
2967         return true;
2968     }
2969 
2970     /* LSE2 does not merge FP pairs; leave these as separate operations. */
2971     mop = finalize_memop_asimd(s, a->sz);
2972     op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, true, mop);
2973     do_fp_st(s, a->rt, clean_addr, mop);
2974     tcg_gen_addi_i64(clean_addr, clean_addr, 1 << a->sz);
2975     do_fp_st(s, a->rt2, clean_addr, mop);
2976     op_addr_ldstpair_post(s, a, dirty_addr, offset);
2977     return true;
2978 }
2979 
2980 static bool trans_LDP_v(DisasContext *s, arg_ldstpair *a)
2981 {
2982     uint64_t offset = a->imm << a->sz;
2983     TCGv_i64 clean_addr, dirty_addr;
2984     MemOp mop;
2985 
2986     if (!fp_access_check(s)) {
2987         return true;
2988     }
2989 
2990     /* LSE2 does not merge FP pairs; leave these as separate operations. */
2991     mop = finalize_memop_asimd(s, a->sz);
2992     op_addr_ldstpair_pre(s, a, &clean_addr, &dirty_addr, offset, false, mop);
2993     do_fp_ld(s, a->rt, clean_addr, mop);
2994     tcg_gen_addi_i64(clean_addr, clean_addr, 1 << a->sz);
2995     do_fp_ld(s, a->rt2, clean_addr, mop);
2996     op_addr_ldstpair_post(s, a, dirty_addr, offset);
2997     return true;
2998 }
2999 
3000 static bool trans_STGP(DisasContext *s, arg_ldstpair *a)
3001 {
3002     TCGv_i64 clean_addr, dirty_addr, tcg_rt, tcg_rt2;
3003     uint64_t offset = a->imm << LOG2_TAG_GRANULE;
3004     MemOp mop;
3005     TCGv_i128 tmp;
3006 
3007     if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
3008         return false;
3009     }
3010 
3011     if (a->rn == 31) {
3012         gen_check_sp_alignment(s);
3013     }
3014 
3015     dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3016     if (!a->p) {
3017         tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3018     }
3019 
3020     if (!s->ata) {
3021         /*
3022          * TODO: We could rely on the stores below, at least for
3023          * system mode, if we arrange to add MO_ALIGN_16.
3024          */
3025         gen_helper_stg_stub(cpu_env, dirty_addr);
3026     } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3027         gen_helper_stg_parallel(cpu_env, dirty_addr, dirty_addr);
3028     } else {
3029         gen_helper_stg(cpu_env, dirty_addr, dirty_addr);
3030     }
3031 
3032     mop = finalize_memop(s, a->sz);
3033     clean_addr = gen_mte_checkN(s, dirty_addr, true, false, 2 << a->sz, mop);
3034 
3035     tcg_rt = cpu_reg(s, a->rt);
3036     tcg_rt2 = cpu_reg(s, a->rt2);
3037 
3038     assert(a->sz == 3);
3039 
3040     tmp = tcg_temp_new_i128();
3041     if (s->be_data == MO_LE) {
3042         tcg_gen_concat_i64_i128(tmp, tcg_rt, tcg_rt2);
3043     } else {
3044         tcg_gen_concat_i64_i128(tmp, tcg_rt2, tcg_rt);
3045     }
3046     tcg_gen_qemu_st_i128(tmp, clean_addr, get_mem_index(s), mop);
3047 
3048     op_addr_ldstpair_post(s, a, dirty_addr, offset);
3049     return true;
3050 }
3051 
3052 static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a,
3053                                  TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
3054                                  uint64_t offset, bool is_store, MemOp mop)
3055 {
3056     int memidx;
3057 
3058     if (a->rn == 31) {
3059         gen_check_sp_alignment(s);
3060     }
3061 
3062     *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3063     if (!a->p) {
3064         tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset);
3065     }
3066     memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3067     *clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store,
3068                                         a->w || a->rn != 31,
3069                                         mop, a->unpriv, memidx);
3070 }
3071 
3072 static void op_addr_ldst_imm_post(DisasContext *s, arg_ldst_imm *a,
3073                                   TCGv_i64 dirty_addr, uint64_t offset)
3074 {
3075     if (a->w) {
3076         if (a->p) {
3077             tcg_gen_addi_i64(dirty_addr, dirty_addr, offset);
3078         }
3079         tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
3080     }
3081 }
3082 
3083 static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a)
3084 {
3085     bool iss_sf, iss_valid = !a->w;
3086     TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3087     int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3088     MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
3089 
3090     op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
3091 
3092     tcg_rt = cpu_reg(s, a->rt);
3093     iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3094 
3095     do_gpr_st_memidx(s, tcg_rt, clean_addr, mop, memidx,
3096                      iss_valid, a->rt, iss_sf, false);
3097     op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
3098     return true;
3099 }
3100 
3101 static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a)
3102 {
3103     bool iss_sf, iss_valid = !a->w;
3104     TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3105     int memidx = a->unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
3106     MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
3107 
3108     op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
3109 
3110     tcg_rt = cpu_reg(s, a->rt);
3111     iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3112 
3113     do_gpr_ld_memidx(s, tcg_rt, clean_addr, mop,
3114                      a->ext, memidx, iss_valid, a->rt, iss_sf, false);
3115     op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
3116     return true;
3117 }
3118 
3119 static bool trans_STR_v_i(DisasContext *s, arg_ldst_imm *a)
3120 {
3121     TCGv_i64 clean_addr, dirty_addr;
3122     MemOp mop;
3123 
3124     if (!fp_access_check(s)) {
3125         return true;
3126     }
3127     mop = finalize_memop_asimd(s, a->sz);
3128     op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop);
3129     do_fp_st(s, a->rt, clean_addr, mop);
3130     op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
3131     return true;
3132 }
3133 
3134 static bool trans_LDR_v_i(DisasContext *s, arg_ldst_imm *a)
3135 {
3136     TCGv_i64 clean_addr, dirty_addr;
3137     MemOp mop;
3138 
3139     if (!fp_access_check(s)) {
3140         return true;
3141     }
3142     mop = finalize_memop_asimd(s, a->sz);
3143     op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop);
3144     do_fp_ld(s, a->rt, clean_addr, mop);
3145     op_addr_ldst_imm_post(s, a, dirty_addr, a->imm);
3146     return true;
3147 }
3148 
3149 static void op_addr_ldst_pre(DisasContext *s, arg_ldst *a,
3150                              TCGv_i64 *clean_addr, TCGv_i64 *dirty_addr,
3151                              bool is_store, MemOp memop)
3152 {
3153     TCGv_i64 tcg_rm;
3154 
3155     if (a->rn == 31) {
3156         gen_check_sp_alignment(s);
3157     }
3158     *dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3159 
3160     tcg_rm = read_cpu_reg(s, a->rm, 1);
3161     ext_and_shift_reg(tcg_rm, tcg_rm, a->opt, a->s ? a->sz : 0);
3162 
3163     tcg_gen_add_i64(*dirty_addr, *dirty_addr, tcg_rm);
3164     *clean_addr = gen_mte_check1(s, *dirty_addr, is_store, true, memop);
3165 }
3166 
3167 static bool trans_LDR(DisasContext *s, arg_ldst *a)
3168 {
3169     TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3170     bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3171     MemOp memop;
3172 
3173     if (extract32(a->opt, 1, 1) == 0) {
3174         return false;
3175     }
3176 
3177     memop = finalize_memop(s, a->sz + a->sign * MO_SIGN);
3178     op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, false, memop);
3179     tcg_rt = cpu_reg(s, a->rt);
3180     do_gpr_ld(s, tcg_rt, clean_addr, memop,
3181               a->ext, true, a->rt, iss_sf, false);
3182     return true;
3183 }
3184 
3185 static bool trans_STR(DisasContext *s, arg_ldst *a)
3186 {
3187     TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3188     bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3189     MemOp memop;
3190 
3191     if (extract32(a->opt, 1, 1) == 0) {
3192         return false;
3193     }
3194 
3195     memop = finalize_memop(s, a->sz);
3196     op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, true, memop);
3197     tcg_rt = cpu_reg(s, a->rt);
3198     do_gpr_st(s, tcg_rt, clean_addr, memop, true, a->rt, iss_sf, false);
3199     return true;
3200 }
3201 
3202 static bool trans_LDR_v(DisasContext *s, arg_ldst *a)
3203 {
3204     TCGv_i64 clean_addr, dirty_addr;
3205     MemOp memop;
3206 
3207     if (extract32(a->opt, 1, 1) == 0) {
3208         return false;
3209     }
3210 
3211     if (!fp_access_check(s)) {
3212         return true;
3213     }
3214 
3215     memop = finalize_memop_asimd(s, a->sz);
3216     op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, false, memop);
3217     do_fp_ld(s, a->rt, clean_addr, memop);
3218     return true;
3219 }
3220 
3221 static bool trans_STR_v(DisasContext *s, arg_ldst *a)
3222 {
3223     TCGv_i64 clean_addr, dirty_addr;
3224     MemOp memop;
3225 
3226     if (extract32(a->opt, 1, 1) == 0) {
3227         return false;
3228     }
3229 
3230     if (!fp_access_check(s)) {
3231         return true;
3232     }
3233 
3234     memop = finalize_memop_asimd(s, a->sz);
3235     op_addr_ldst_pre(s, a, &clean_addr, &dirty_addr, true, memop);
3236     do_fp_st(s, a->rt, clean_addr, memop);
3237     return true;
3238 }
3239 
3240 
3241 static bool do_atomic_ld(DisasContext *s, arg_atomic *a, AtomicThreeOpFn *fn,
3242                          int sign, bool invert)
3243 {
3244     MemOp mop = a->sz | sign;
3245     TCGv_i64 clean_addr, tcg_rs, tcg_rt;
3246 
3247     if (a->rn == 31) {
3248         gen_check_sp_alignment(s);
3249     }
3250     mop = check_atomic_align(s, a->rn, mop);
3251     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false,
3252                                 a->rn != 31, mop);
3253     tcg_rs = read_cpu_reg(s, a->rs, true);
3254     tcg_rt = cpu_reg(s, a->rt);
3255     if (invert) {
3256         tcg_gen_not_i64(tcg_rs, tcg_rs);
3257     }
3258     /*
3259      * The tcg atomic primitives are all full barriers.  Therefore we
3260      * can ignore the Acquire and Release bits of this instruction.
3261      */
3262     fn(tcg_rt, clean_addr, tcg_rs, get_mem_index(s), mop);
3263 
3264     if (mop & MO_SIGN) {
3265         switch (a->sz) {
3266         case MO_8:
3267             tcg_gen_ext8u_i64(tcg_rt, tcg_rt);
3268             break;
3269         case MO_16:
3270             tcg_gen_ext16u_i64(tcg_rt, tcg_rt);
3271             break;
3272         case MO_32:
3273             tcg_gen_ext32u_i64(tcg_rt, tcg_rt);
3274             break;
3275         case MO_64:
3276             break;
3277         default:
3278             g_assert_not_reached();
3279         }
3280     }
3281     return true;
3282 }
3283 
3284 TRANS_FEAT(LDADD, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_add_i64, 0, false)
3285 TRANS_FEAT(LDCLR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_and_i64, 0, true)
3286 TRANS_FEAT(LDEOR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_xor_i64, 0, false)
3287 TRANS_FEAT(LDSET, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_or_i64, 0, false)
3288 TRANS_FEAT(LDSMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smax_i64, MO_SIGN, false)
3289 TRANS_FEAT(LDSMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smin_i64, MO_SIGN, false)
3290 TRANS_FEAT(LDUMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umax_i64, 0, false)
3291 TRANS_FEAT(LDUMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umin_i64, 0, false)
3292 TRANS_FEAT(SWP, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_xchg_i64, 0, false)
3293 
3294 static bool trans_LDAPR(DisasContext *s, arg_LDAPR *a)
3295 {
3296     bool iss_sf = ldst_iss_sf(a->sz, false, false);
3297     TCGv_i64 clean_addr;
3298     MemOp mop;
3299 
3300     if (!dc_isar_feature(aa64_atomics, s) ||
3301         !dc_isar_feature(aa64_rcpc_8_3, s)) {
3302         return false;
3303     }
3304     if (a->rn == 31) {
3305         gen_check_sp_alignment(s);
3306     }
3307     mop = check_atomic_align(s, a->rn, a->sz);
3308     clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false,
3309                                 a->rn != 31, mop);
3310     /*
3311      * LDAPR* are a special case because they are a simple load, not a
3312      * fetch-and-do-something op.
3313      * The architectural consistency requirements here are weaker than
3314      * full load-acquire (we only need "load-acquire processor consistent"),
3315      * but we choose to implement them as full LDAQ.
3316      */
3317     do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, mop, false,
3318               true, a->rt, iss_sf, true);
3319     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3320     return true;
3321 }
3322 
3323 static bool trans_LDRA(DisasContext *s, arg_LDRA *a)
3324 {
3325     TCGv_i64 clean_addr, dirty_addr, tcg_rt;
3326     MemOp memop;
3327 
3328     /* Load with pointer authentication */
3329     if (!dc_isar_feature(aa64_pauth, s)) {
3330         return false;
3331     }
3332 
3333     if (a->rn == 31) {
3334         gen_check_sp_alignment(s);
3335     }
3336     dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3337 
3338     if (s->pauth_active) {
3339         if (!a->m) {
3340             gen_helper_autda(dirty_addr, cpu_env, dirty_addr,
3341                              tcg_constant_i64(0));
3342         } else {
3343             gen_helper_autdb(dirty_addr, cpu_env, dirty_addr,
3344                              tcg_constant_i64(0));
3345         }
3346     }
3347 
3348     tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
3349 
3350     memop = finalize_memop(s, MO_64);
3351 
3352     /* Note that "clean" and "dirty" here refer to TBI not PAC.  */
3353     clean_addr = gen_mte_check1(s, dirty_addr, false,
3354                                 a->w || a->rn != 31, memop);
3355 
3356     tcg_rt = cpu_reg(s, a->rt);
3357     do_gpr_ld(s, tcg_rt, clean_addr, memop,
3358               /* extend */ false, /* iss_valid */ !a->w,
3359               /* iss_srt */ a->rt, /* iss_sf */ true, /* iss_ar */ false);
3360 
3361     if (a->w) {
3362         tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), dirty_addr);
3363     }
3364     return true;
3365 }
3366 
3367 static bool trans_LDAPR_i(DisasContext *s, arg_ldapr_stlr_i *a)
3368 {
3369     TCGv_i64 clean_addr, dirty_addr;
3370     MemOp mop = a->sz | (a->sign ? MO_SIGN : 0);
3371     bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3372 
3373     if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3374         return false;
3375     }
3376 
3377     if (a->rn == 31) {
3378         gen_check_sp_alignment(s);
3379     }
3380 
3381     mop = check_ordered_align(s, a->rn, a->imm, false, mop);
3382     dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3383     tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
3384     clean_addr = clean_data_tbi(s, dirty_addr);
3385 
3386     /*
3387      * Load-AcquirePC semantics; we implement as the slightly more
3388      * restrictive Load-Acquire.
3389      */
3390     do_gpr_ld(s, cpu_reg(s, a->rt), clean_addr, mop, a->ext, true,
3391               a->rt, iss_sf, true);
3392     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3393     return true;
3394 }
3395 
3396 static bool trans_STLR_i(DisasContext *s, arg_ldapr_stlr_i *a)
3397 {
3398     TCGv_i64 clean_addr, dirty_addr;
3399     MemOp mop = a->sz;
3400     bool iss_sf = ldst_iss_sf(a->sz, a->sign, a->ext);
3401 
3402     if (!dc_isar_feature(aa64_rcpc_8_4, s)) {
3403         return false;
3404     }
3405 
3406     /* TODO: ARMv8.4-LSE SCTLR.nAA */
3407 
3408     if (a->rn == 31) {
3409         gen_check_sp_alignment(s);
3410     }
3411 
3412     mop = check_ordered_align(s, a->rn, a->imm, true, mop);
3413     dirty_addr = read_cpu_reg_sp(s, a->rn, 1);
3414     tcg_gen_addi_i64(dirty_addr, dirty_addr, a->imm);
3415     clean_addr = clean_data_tbi(s, dirty_addr);
3416 
3417     /* Store-Release semantics */
3418     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3419     do_gpr_st(s, cpu_reg(s, a->rt), clean_addr, mop, true, a->rt, iss_sf, true);
3420     return true;
3421 }
3422 
3423 static bool trans_LD_mult(DisasContext *s, arg_ldst_mult *a)
3424 {
3425     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3426     MemOp endian, align, mop;
3427 
3428     int total;    /* total bytes */
3429     int elements; /* elements per vector */
3430     int r;
3431     int size = a->sz;
3432 
3433     if (!a->p && a->rm != 0) {
3434         /* For non-postindexed accesses the Rm field must be 0 */
3435         return false;
3436     }
3437     if (size == 3 && !a->q && a->selem != 1) {
3438         return false;
3439     }
3440     if (!fp_access_check(s)) {
3441         return true;
3442     }
3443 
3444     if (a->rn == 31) {
3445         gen_check_sp_alignment(s);
3446     }
3447 
3448     /* For our purposes, bytes are always little-endian.  */
3449     endian = s->be_data;
3450     if (size == 0) {
3451         endian = MO_LE;
3452     }
3453 
3454     total = a->rpt * a->selem * (a->q ? 16 : 8);
3455     tcg_rn = cpu_reg_sp(s, a->rn);
3456 
3457     /*
3458      * Issue the MTE check vs the logical repeat count, before we
3459      * promote consecutive little-endian elements below.
3460      */
3461     clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31, total,
3462                                 finalize_memop_asimd(s, size));
3463 
3464     /*
3465      * Consecutive little-endian elements from a single register
3466      * can be promoted to a larger little-endian operation.
3467      */
3468     align = MO_ALIGN;
3469     if (a->selem == 1 && endian == MO_LE) {
3470         align = pow2_align(size);
3471         size = 3;
3472     }
3473     if (!s->align_mem) {
3474         align = 0;
3475     }
3476     mop = endian | size | align;
3477 
3478     elements = (a->q ? 16 : 8) >> size;
3479     tcg_ebytes = tcg_constant_i64(1 << size);
3480     for (r = 0; r < a->rpt; r++) {
3481         int e;
3482         for (e = 0; e < elements; e++) {
3483             int xs;
3484             for (xs = 0; xs < a->selem; xs++) {
3485                 int tt = (a->rt + r + xs) % 32;
3486                 do_vec_ld(s, tt, e, clean_addr, mop);
3487                 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3488             }
3489         }
3490     }
3491 
3492     /*
3493      * For non-quad operations, setting a slice of the low 64 bits of
3494      * the register clears the high 64 bits (in the ARM ARM pseudocode
3495      * this is implicit in the fact that 'rval' is a 64 bit wide
3496      * variable).  For quad operations, we might still need to zero
3497      * the high bits of SVE.
3498      */
3499     for (r = 0; r < a->rpt * a->selem; r++) {
3500         int tt = (a->rt + r) % 32;
3501         clear_vec_high(s, a->q, tt);
3502     }
3503 
3504     if (a->p) {
3505         if (a->rm == 31) {
3506             tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3507         } else {
3508             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3509         }
3510     }
3511     return true;
3512 }
3513 
3514 static bool trans_ST_mult(DisasContext *s, arg_ldst_mult *a)
3515 {
3516     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3517     MemOp endian, align, mop;
3518 
3519     int total;    /* total bytes */
3520     int elements; /* elements per vector */
3521     int r;
3522     int size = a->sz;
3523 
3524     if (!a->p && a->rm != 0) {
3525         /* For non-postindexed accesses the Rm field must be 0 */
3526         return false;
3527     }
3528     if (size == 3 && !a->q && a->selem != 1) {
3529         return false;
3530     }
3531     if (!fp_access_check(s)) {
3532         return true;
3533     }
3534 
3535     if (a->rn == 31) {
3536         gen_check_sp_alignment(s);
3537     }
3538 
3539     /* For our purposes, bytes are always little-endian.  */
3540     endian = s->be_data;
3541     if (size == 0) {
3542         endian = MO_LE;
3543     }
3544 
3545     total = a->rpt * a->selem * (a->q ? 16 : 8);
3546     tcg_rn = cpu_reg_sp(s, a->rn);
3547 
3548     /*
3549      * Issue the MTE check vs the logical repeat count, before we
3550      * promote consecutive little-endian elements below.
3551      */
3552     clean_addr = gen_mte_checkN(s, tcg_rn, true, a->p || a->rn != 31, total,
3553                                 finalize_memop_asimd(s, size));
3554 
3555     /*
3556      * Consecutive little-endian elements from a single register
3557      * can be promoted to a larger little-endian operation.
3558      */
3559     align = MO_ALIGN;
3560     if (a->selem == 1 && endian == MO_LE) {
3561         align = pow2_align(size);
3562         size = 3;
3563     }
3564     if (!s->align_mem) {
3565         align = 0;
3566     }
3567     mop = endian | size | align;
3568 
3569     elements = (a->q ? 16 : 8) >> size;
3570     tcg_ebytes = tcg_constant_i64(1 << size);
3571     for (r = 0; r < a->rpt; r++) {
3572         int e;
3573         for (e = 0; e < elements; e++) {
3574             int xs;
3575             for (xs = 0; xs < a->selem; xs++) {
3576                 int tt = (a->rt + r + xs) % 32;
3577                 do_vec_st(s, tt, e, clean_addr, mop);
3578                 tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3579             }
3580         }
3581     }
3582 
3583     if (a->p) {
3584         if (a->rm == 31) {
3585             tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3586         } else {
3587             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3588         }
3589     }
3590     return true;
3591 }
3592 
3593 static bool trans_ST_single(DisasContext *s, arg_ldst_single *a)
3594 {
3595     int xs, total, rt;
3596     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3597     MemOp mop;
3598 
3599     if (!a->p && a->rm != 0) {
3600         return false;
3601     }
3602     if (!fp_access_check(s)) {
3603         return true;
3604     }
3605 
3606     if (a->rn == 31) {
3607         gen_check_sp_alignment(s);
3608     }
3609 
3610     total = a->selem << a->scale;
3611     tcg_rn = cpu_reg_sp(s, a->rn);
3612 
3613     mop = finalize_memop_asimd(s, a->scale);
3614     clean_addr = gen_mte_checkN(s, tcg_rn, true, a->p || a->rn != 31,
3615                                 total, mop);
3616 
3617     tcg_ebytes = tcg_constant_i64(1 << a->scale);
3618     for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
3619         do_vec_st(s, rt, a->index, clean_addr, mop);
3620         tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3621     }
3622 
3623     if (a->p) {
3624         if (a->rm == 31) {
3625             tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3626         } else {
3627             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3628         }
3629     }
3630     return true;
3631 }
3632 
3633 static bool trans_LD_single(DisasContext *s, arg_ldst_single *a)
3634 {
3635     int xs, total, rt;
3636     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3637     MemOp mop;
3638 
3639     if (!a->p && a->rm != 0) {
3640         return false;
3641     }
3642     if (!fp_access_check(s)) {
3643         return true;
3644     }
3645 
3646     if (a->rn == 31) {
3647         gen_check_sp_alignment(s);
3648     }
3649 
3650     total = a->selem << a->scale;
3651     tcg_rn = cpu_reg_sp(s, a->rn);
3652 
3653     mop = finalize_memop_asimd(s, a->scale);
3654     clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31,
3655                                 total, mop);
3656 
3657     tcg_ebytes = tcg_constant_i64(1 << a->scale);
3658     for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
3659         do_vec_ld(s, rt, a->index, clean_addr, mop);
3660         tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3661     }
3662 
3663     if (a->p) {
3664         if (a->rm == 31) {
3665             tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3666         } else {
3667             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3668         }
3669     }
3670     return true;
3671 }
3672 
3673 static bool trans_LD_single_repl(DisasContext *s, arg_LD_single_repl *a)
3674 {
3675     int xs, total, rt;
3676     TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
3677     MemOp mop;
3678 
3679     if (!a->p && a->rm != 0) {
3680         return false;
3681     }
3682     if (!fp_access_check(s)) {
3683         return true;
3684     }
3685 
3686     if (a->rn == 31) {
3687         gen_check_sp_alignment(s);
3688     }
3689 
3690     total = a->selem << a->scale;
3691     tcg_rn = cpu_reg_sp(s, a->rn);
3692 
3693     mop = finalize_memop_asimd(s, a->scale);
3694     clean_addr = gen_mte_checkN(s, tcg_rn, false, a->p || a->rn != 31,
3695                                 total, mop);
3696 
3697     tcg_ebytes = tcg_constant_i64(1 << a->scale);
3698     for (xs = 0, rt = a->rt; xs < a->selem; xs++, rt = (rt + 1) % 32) {
3699         /* Load and replicate to all elements */
3700         TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3701 
3702         tcg_gen_qemu_ld_i64(tcg_tmp, clean_addr, get_mem_index(s), mop);
3703         tcg_gen_gvec_dup_i64(a->scale, vec_full_reg_offset(s, rt),
3704                              (a->q + 1) * 8, vec_full_reg_size(s), tcg_tmp);
3705         tcg_gen_add_i64(clean_addr, clean_addr, tcg_ebytes);
3706     }
3707 
3708     if (a->p) {
3709         if (a->rm == 31) {
3710             tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
3711         } else {
3712             tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, a->rm));
3713         }
3714     }
3715     return true;
3716 }
3717 
3718 static bool trans_STZGM(DisasContext *s, arg_ldst_tag *a)
3719 {
3720     TCGv_i64 addr, clean_addr, tcg_rt;
3721     int size = 4 << s->dcz_blocksize;
3722 
3723     if (!dc_isar_feature(aa64_mte, s)) {
3724         return false;
3725     }
3726     if (s->current_el == 0) {
3727         return false;
3728     }
3729 
3730     if (a->rn == 31) {
3731         gen_check_sp_alignment(s);
3732     }
3733 
3734     addr = read_cpu_reg_sp(s, a->rn, true);
3735     tcg_gen_addi_i64(addr, addr, a->imm);
3736     tcg_rt = cpu_reg(s, a->rt);
3737 
3738     if (s->ata) {
3739         gen_helper_stzgm_tags(cpu_env, addr, tcg_rt);
3740     }
3741     /*
3742      * The non-tags portion of STZGM is mostly like DC_ZVA,
3743      * except the alignment happens before the access.
3744      */
3745     clean_addr = clean_data_tbi(s, addr);
3746     tcg_gen_andi_i64(clean_addr, clean_addr, -size);
3747     gen_helper_dc_zva(cpu_env, clean_addr);
3748     return true;
3749 }
3750 
3751 static bool trans_STGM(DisasContext *s, arg_ldst_tag *a)
3752 {
3753     TCGv_i64 addr, clean_addr, tcg_rt;
3754 
3755     if (!dc_isar_feature(aa64_mte, s)) {
3756         return false;
3757     }
3758     if (s->current_el == 0) {
3759         return false;
3760     }
3761 
3762     if (a->rn == 31) {
3763         gen_check_sp_alignment(s);
3764     }
3765 
3766     addr = read_cpu_reg_sp(s, a->rn, true);
3767     tcg_gen_addi_i64(addr, addr, a->imm);
3768     tcg_rt = cpu_reg(s, a->rt);
3769 
3770     if (s->ata) {
3771         gen_helper_stgm(cpu_env, addr, tcg_rt);
3772     } else {
3773         MMUAccessType acc = MMU_DATA_STORE;
3774         int size = 4 << GMID_EL1_BS;
3775 
3776         clean_addr = clean_data_tbi(s, addr);
3777         tcg_gen_andi_i64(clean_addr, clean_addr, -size);
3778         gen_probe_access(s, clean_addr, acc, size);
3779     }
3780     return true;
3781 }
3782 
3783 static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a)
3784 {
3785     TCGv_i64 addr, clean_addr, tcg_rt;
3786 
3787     if (!dc_isar_feature(aa64_mte, s)) {
3788         return false;
3789     }
3790     if (s->current_el == 0) {
3791         return false;
3792     }
3793 
3794     if (a->rn == 31) {
3795         gen_check_sp_alignment(s);
3796     }
3797 
3798     addr = read_cpu_reg_sp(s, a->rn, true);
3799     tcg_gen_addi_i64(addr, addr, a->imm);
3800     tcg_rt = cpu_reg(s, a->rt);
3801 
3802     if (s->ata) {
3803         gen_helper_ldgm(tcg_rt, cpu_env, addr);
3804     } else {
3805         MMUAccessType acc = MMU_DATA_LOAD;
3806         int size = 4 << GMID_EL1_BS;
3807 
3808         clean_addr = clean_data_tbi(s, addr);
3809         tcg_gen_andi_i64(clean_addr, clean_addr, -size);
3810         gen_probe_access(s, clean_addr, acc, size);
3811         /* The result tags are zeros.  */
3812         tcg_gen_movi_i64(tcg_rt, 0);
3813     }
3814     return true;
3815 }
3816 
3817 static bool trans_LDG(DisasContext *s, arg_ldst_tag *a)
3818 {
3819     TCGv_i64 addr, clean_addr, tcg_rt;
3820 
3821     if (!dc_isar_feature(aa64_mte_insn_reg, s)) {
3822         return false;
3823     }
3824 
3825     if (a->rn == 31) {
3826         gen_check_sp_alignment(s);
3827     }
3828 
3829     addr = read_cpu_reg_sp(s, a->rn, true);
3830     if (!a->p) {
3831         /* pre-index or signed offset */
3832         tcg_gen_addi_i64(addr, addr, a->imm);
3833     }
3834 
3835     tcg_gen_andi_i64(addr, addr, -TAG_GRANULE);
3836     tcg_rt = cpu_reg(s, a->rt);
3837     if (s->ata) {
3838         gen_helper_ldg(tcg_rt, cpu_env, addr, tcg_rt);
3839     } else {
3840         /*
3841          * Tag access disabled: we must check for aborts on the load
3842          * load from [rn+offset], and then insert a 0 tag into rt.
3843          */
3844         clean_addr = clean_data_tbi(s, addr);
3845         gen_probe_access(s, clean_addr, MMU_DATA_LOAD, MO_8);
3846         gen_address_with_allocation_tag0(tcg_rt, tcg_rt);
3847     }
3848 
3849     if (a->w) {
3850         /* pre-index or post-index */
3851         if (a->p) {
3852             /* post-index */
3853             tcg_gen_addi_i64(addr, addr, a->imm);
3854         }
3855         tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
3856     }
3857     return true;
3858 }
3859 
3860 static bool do_STG(DisasContext *s, arg_ldst_tag *a, bool is_zero, bool is_pair)
3861 {
3862     TCGv_i64 addr, tcg_rt;
3863 
3864     if (a->rn == 31) {
3865         gen_check_sp_alignment(s);
3866     }
3867 
3868     addr = read_cpu_reg_sp(s, a->rn, true);
3869     if (!a->p) {
3870         /* pre-index or signed offset */
3871         tcg_gen_addi_i64(addr, addr, a->imm);
3872     }
3873     tcg_rt = cpu_reg_sp(s, a->rt);
3874     if (!s->ata) {
3875         /*
3876          * For STG and ST2G, we need to check alignment and probe memory.
3877          * TODO: For STZG and STZ2G, we could rely on the stores below,
3878          * at least for system mode; user-only won't enforce alignment.
3879          */
3880         if (is_pair) {
3881             gen_helper_st2g_stub(cpu_env, addr);
3882         } else {
3883             gen_helper_stg_stub(cpu_env, addr);
3884         }
3885     } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
3886         if (is_pair) {
3887             gen_helper_st2g_parallel(cpu_env, addr, tcg_rt);
3888         } else {
3889             gen_helper_stg_parallel(cpu_env, addr, tcg_rt);
3890         }
3891     } else {
3892         if (is_pair) {
3893             gen_helper_st2g(cpu_env, addr, tcg_rt);
3894         } else {
3895             gen_helper_stg(cpu_env, addr, tcg_rt);
3896         }
3897     }
3898 
3899     if (is_zero) {
3900         TCGv_i64 clean_addr = clean_data_tbi(s, addr);
3901         TCGv_i64 zero64 = tcg_constant_i64(0);
3902         TCGv_i128 zero128 = tcg_temp_new_i128();
3903         int mem_index = get_mem_index(s);
3904         MemOp mop = finalize_memop(s, MO_128 | MO_ALIGN);
3905 
3906         tcg_gen_concat_i64_i128(zero128, zero64, zero64);
3907 
3908         /* This is 1 or 2 atomic 16-byte operations. */
3909         tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop);
3910         if (is_pair) {
3911             tcg_gen_addi_i64(clean_addr, clean_addr, 16);
3912             tcg_gen_qemu_st_i128(zero128, clean_addr, mem_index, mop);
3913         }
3914     }
3915 
3916     if (a->w) {
3917         /* pre-index or post-index */
3918         if (a->p) {
3919             /* post-index */
3920             tcg_gen_addi_i64(addr, addr, a->imm);
3921         }
3922         tcg_gen_mov_i64(cpu_reg_sp(s, a->rn), addr);
3923     }
3924     return true;
3925 }
3926 
3927 TRANS_FEAT(STG, aa64_mte_insn_reg, do_STG, a, false, false)
3928 TRANS_FEAT(STZG, aa64_mte_insn_reg, do_STG, a, true, false)
3929 TRANS_FEAT(ST2G, aa64_mte_insn_reg, do_STG, a, false, true)
3930 TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true)
3931 
3932 typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);
3933 
3934 static bool gen_rri(DisasContext *s, arg_rri_sf *a,
3935                     bool rd_sp, bool rn_sp, ArithTwoOp *fn)
3936 {
3937     TCGv_i64 tcg_rn = rn_sp ? cpu_reg_sp(s, a->rn) : cpu_reg(s, a->rn);
3938     TCGv_i64 tcg_rd = rd_sp ? cpu_reg_sp(s, a->rd) : cpu_reg(s, a->rd);
3939     TCGv_i64 tcg_imm = tcg_constant_i64(a->imm);
3940 
3941     fn(tcg_rd, tcg_rn, tcg_imm);
3942     if (!a->sf) {
3943         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3944     }
3945     return true;
3946 }
3947 
3948 /*
3949  * PC-rel. addressing
3950  */
3951 
3952 static bool trans_ADR(DisasContext *s, arg_ri *a)
3953 {
3954     gen_pc_plus_diff(s, cpu_reg(s, a->rd), a->imm);
3955     return true;
3956 }
3957 
3958 static bool trans_ADRP(DisasContext *s, arg_ri *a)
3959 {
3960     int64_t offset = (int64_t)a->imm << 12;
3961 
3962     /* The page offset is ok for CF_PCREL. */
3963     offset -= s->pc_curr & 0xfff;
3964     gen_pc_plus_diff(s, cpu_reg(s, a->rd), offset);
3965     return true;
3966 }
3967 
3968 /*
3969  * Add/subtract (immediate)
3970  */
3971 TRANS(ADD_i, gen_rri, a, 1, 1, tcg_gen_add_i64)
3972 TRANS(SUB_i, gen_rri, a, 1, 1, tcg_gen_sub_i64)
3973 TRANS(ADDS_i, gen_rri, a, 0, 1, a->sf ? gen_add64_CC : gen_add32_CC)
3974 TRANS(SUBS_i, gen_rri, a, 0, 1, a->sf ? gen_sub64_CC : gen_sub32_CC)
3975 
3976 /*
3977  * Add/subtract (immediate, with tags)
3978  */
3979 
3980 static bool gen_add_sub_imm_with_tags(DisasContext *s, arg_rri_tag *a,
3981                                       bool sub_op)
3982 {
3983     TCGv_i64 tcg_rn, tcg_rd;
3984     int imm;
3985 
3986     imm = a->uimm6 << LOG2_TAG_GRANULE;
3987     if (sub_op) {
3988         imm = -imm;
3989     }
3990 
3991     tcg_rn = cpu_reg_sp(s, a->rn);
3992     tcg_rd = cpu_reg_sp(s, a->rd);
3993 
3994     if (s->ata) {
3995         gen_helper_addsubg(tcg_rd, cpu_env, tcg_rn,
3996                            tcg_constant_i32(imm),
3997                            tcg_constant_i32(a->uimm4));
3998     } else {
3999         tcg_gen_addi_i64(tcg_rd, tcg_rn, imm);
4000         gen_address_with_allocation_tag0(tcg_rd, tcg_rd);
4001     }
4002     return true;
4003 }
4004 
4005 TRANS_FEAT(ADDG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, false)
4006 TRANS_FEAT(SUBG_i, aa64_mte_insn_reg, gen_add_sub_imm_with_tags, a, true)
4007 
4008 /* The input should be a value in the bottom e bits (with higher
4009  * bits zero); returns that value replicated into every element
4010  * of size e in a 64 bit integer.
4011  */
4012 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
4013 {
4014     assert(e != 0);
4015     while (e < 64) {
4016         mask |= mask << e;
4017         e *= 2;
4018     }
4019     return mask;
4020 }
4021 
4022 /*
4023  * Logical (immediate)
4024  */
4025 
4026 /*
4027  * Simplified variant of pseudocode DecodeBitMasks() for the case where we
4028  * only require the wmask. Returns false if the imms/immr/immn are a reserved
4029  * value (ie should cause a guest UNDEF exception), and true if they are
4030  * valid, in which case the decoded bit pattern is written to result.
4031  */
4032 bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
4033                             unsigned int imms, unsigned int immr)
4034 {
4035     uint64_t mask;
4036     unsigned e, levels, s, r;
4037     int len;
4038 
4039     assert(immn < 2 && imms < 64 && immr < 64);
4040 
4041     /* The bit patterns we create here are 64 bit patterns which
4042      * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
4043      * 64 bits each. Each element contains the same value: a run
4044      * of between 1 and e-1 non-zero bits, rotated within the
4045      * element by between 0 and e-1 bits.
4046      *
4047      * The element size and run length are encoded into immn (1 bit)
4048      * and imms (6 bits) as follows:
4049      * 64 bit elements: immn = 1, imms = <length of run - 1>
4050      * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
4051      * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
4052      *  8 bit elements: immn = 0, imms = 110 : <length of run - 1>
4053      *  4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
4054      *  2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
4055      * Notice that immn = 0, imms = 11111x is the only combination
4056      * not covered by one of the above options; this is reserved.
4057      * Further, <length of run - 1> all-ones is a reserved pattern.
4058      *
4059      * In all cases the rotation is by immr % e (and immr is 6 bits).
4060      */
4061 
4062     /* First determine the element size */
4063     len = 31 - clz32((immn << 6) | (~imms & 0x3f));
4064     if (len < 1) {
4065         /* This is the immn == 0, imms == 0x11111x case */
4066         return false;
4067     }
4068     e = 1 << len;
4069 
4070     levels = e - 1;
4071     s = imms & levels;
4072     r = immr & levels;
4073 
4074     if (s == levels) {
4075         /* <length of run - 1> mustn't be all-ones. */
4076         return false;
4077     }
4078 
4079     /* Create the value of one element: s+1 set bits rotated
4080      * by r within the element (which is e bits wide)...
4081      */
4082     mask = MAKE_64BIT_MASK(0, s + 1);
4083     if (r) {
4084         mask = (mask >> r) | (mask << (e - r));
4085         mask &= MAKE_64BIT_MASK(0, e);
4086     }
4087     /* ...then replicate the element over the whole 64 bit value */
4088     mask = bitfield_replicate(mask, e);
4089     *result = mask;
4090     return true;
4091 }
4092 
4093 static bool gen_rri_log(DisasContext *s, arg_rri_log *a, bool set_cc,
4094                         void (*fn)(TCGv_i64, TCGv_i64, int64_t))
4095 {
4096     TCGv_i64 tcg_rd, tcg_rn;
4097     uint64_t imm;
4098 
4099     /* Some immediate field values are reserved. */
4100     if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
4101                                 extract32(a->dbm, 0, 6),
4102                                 extract32(a->dbm, 6, 6))) {
4103         return false;
4104     }
4105     if (!a->sf) {
4106         imm &= 0xffffffffull;
4107     }
4108 
4109     tcg_rd = set_cc ? cpu_reg(s, a->rd) : cpu_reg_sp(s, a->rd);
4110     tcg_rn = cpu_reg(s, a->rn);
4111 
4112     fn(tcg_rd, tcg_rn, imm);
4113     if (set_cc) {
4114         gen_logic_CC(a->sf, tcg_rd);
4115     }
4116     if (!a->sf) {
4117         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4118     }
4119     return true;
4120 }
4121 
4122 TRANS(AND_i, gen_rri_log, a, false, tcg_gen_andi_i64)
4123 TRANS(ORR_i, gen_rri_log, a, false, tcg_gen_ori_i64)
4124 TRANS(EOR_i, gen_rri_log, a, false, tcg_gen_xori_i64)
4125 TRANS(ANDS_i, gen_rri_log, a, true, tcg_gen_andi_i64)
4126 
4127 /*
4128  * Move wide (immediate)
4129  */
4130 
4131 static bool trans_MOVZ(DisasContext *s, arg_movw *a)
4132 {
4133     int pos = a->hw << 4;
4134     tcg_gen_movi_i64(cpu_reg(s, a->rd), (uint64_t)a->imm << pos);
4135     return true;
4136 }
4137 
4138 static bool trans_MOVN(DisasContext *s, arg_movw *a)
4139 {
4140     int pos = a->hw << 4;
4141     uint64_t imm = a->imm;
4142 
4143     imm = ~(imm << pos);
4144     if (!a->sf) {
4145         imm = (uint32_t)imm;
4146     }
4147     tcg_gen_movi_i64(cpu_reg(s, a->rd), imm);
4148     return true;
4149 }
4150 
4151 static bool trans_MOVK(DisasContext *s, arg_movw *a)
4152 {
4153     int pos = a->hw << 4;
4154     TCGv_i64 tcg_rd, tcg_im;
4155 
4156     tcg_rd = cpu_reg(s, a->rd);
4157     tcg_im = tcg_constant_i64(a->imm);
4158     tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_im, pos, 16);
4159     if (!a->sf) {
4160         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4161     }
4162     return true;
4163 }
4164 
4165 /*
4166  * Bitfield
4167  */
4168 
4169 static bool trans_SBFM(DisasContext *s, arg_SBFM *a)
4170 {
4171     TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4172     TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4173     unsigned int bitsize = a->sf ? 64 : 32;
4174     unsigned int ri = a->immr;
4175     unsigned int si = a->imms;
4176     unsigned int pos, len;
4177 
4178     if (si >= ri) {
4179         /* Wd<s-r:0> = Wn<s:r> */
4180         len = (si - ri) + 1;
4181         tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
4182         if (!a->sf) {
4183             tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4184         }
4185     } else {
4186         /* Wd<32+s-r,32-r> = Wn<s:0> */
4187         len = si + 1;
4188         pos = (bitsize - ri) & (bitsize - 1);
4189 
4190         if (len < ri) {
4191             /*
4192              * Sign extend the destination field from len to fill the
4193              * balance of the word.  Let the deposit below insert all
4194              * of those sign bits.
4195              */
4196             tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
4197             len = ri;
4198         }
4199 
4200         /*
4201          * We start with zero, and we haven't modified any bits outside
4202          * bitsize, therefore no final zero-extension is unneeded for !sf.
4203          */
4204         tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4205     }
4206     return true;
4207 }
4208 
4209 static bool trans_UBFM(DisasContext *s, arg_UBFM *a)
4210 {
4211     TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4212     TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4213     unsigned int bitsize = a->sf ? 64 : 32;
4214     unsigned int ri = a->immr;
4215     unsigned int si = a->imms;
4216     unsigned int pos, len;
4217 
4218     tcg_rd = cpu_reg(s, a->rd);
4219     tcg_tmp = read_cpu_reg(s, a->rn, 1);
4220 
4221     if (si >= ri) {
4222         /* Wd<s-r:0> = Wn<s:r> */
4223         len = (si - ri) + 1;
4224         tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
4225     } else {
4226         /* Wd<32+s-r,32-r> = Wn<s:0> */
4227         len = si + 1;
4228         pos = (bitsize - ri) & (bitsize - 1);
4229         tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
4230     }
4231     return true;
4232 }
4233 
4234 static bool trans_BFM(DisasContext *s, arg_BFM *a)
4235 {
4236     TCGv_i64 tcg_rd = cpu_reg(s, a->rd);
4237     TCGv_i64 tcg_tmp = read_cpu_reg(s, a->rn, 1);
4238     unsigned int bitsize = a->sf ? 64 : 32;
4239     unsigned int ri = a->immr;
4240     unsigned int si = a->imms;
4241     unsigned int pos, len;
4242 
4243     tcg_rd = cpu_reg(s, a->rd);
4244     tcg_tmp = read_cpu_reg(s, a->rn, 1);
4245 
4246     if (si >= ri) {
4247         /* Wd<s-r:0> = Wn<s:r> */
4248         tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
4249         len = (si - ri) + 1;
4250         pos = 0;
4251     } else {
4252         /* Wd<32+s-r,32-r> = Wn<s:0> */
4253         len = si + 1;
4254         pos = (bitsize - ri) & (bitsize - 1);
4255     }
4256 
4257     tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
4258     if (!a->sf) {
4259         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4260     }
4261     return true;
4262 }
4263 
4264 static bool trans_EXTR(DisasContext *s, arg_extract *a)
4265 {
4266     TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
4267 
4268     tcg_rd = cpu_reg(s, a->rd);
4269 
4270     if (unlikely(a->imm == 0)) {
4271         /*
4272          * tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
4273          * so an extract from bit 0 is a special case.
4274          */
4275         if (a->sf) {
4276             tcg_gen_mov_i64(tcg_rd, cpu_reg(s, a->rm));
4277         } else {
4278             tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, a->rm));
4279         }
4280     } else {
4281         tcg_rm = cpu_reg(s, a->rm);
4282         tcg_rn = cpu_reg(s, a->rn);
4283 
4284         if (a->sf) {
4285             /* Specialization to ROR happens in EXTRACT2.  */
4286             tcg_gen_extract2_i64(tcg_rd, tcg_rm, tcg_rn, a->imm);
4287         } else {
4288             TCGv_i32 t0 = tcg_temp_new_i32();
4289 
4290             tcg_gen_extrl_i64_i32(t0, tcg_rm);
4291             if (a->rm == a->rn) {
4292                 tcg_gen_rotri_i32(t0, t0, a->imm);
4293             } else {
4294                 TCGv_i32 t1 = tcg_temp_new_i32();
4295                 tcg_gen_extrl_i64_i32(t1, tcg_rn);
4296                 tcg_gen_extract2_i32(t0, t0, t1, a->imm);
4297             }
4298             tcg_gen_extu_i32_i64(tcg_rd, t0);
4299         }
4300     }
4301     return true;
4302 }
4303 
4304 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
4305  * Note that it is the caller's responsibility to ensure that the
4306  * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
4307  * mandated semantics for out of range shifts.
4308  */
4309 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
4310                       enum a64_shift_type shift_type, TCGv_i64 shift_amount)
4311 {
4312     switch (shift_type) {
4313     case A64_SHIFT_TYPE_LSL:
4314         tcg_gen_shl_i64(dst, src, shift_amount);
4315         break;
4316     case A64_SHIFT_TYPE_LSR:
4317         tcg_gen_shr_i64(dst, src, shift_amount);
4318         break;
4319     case A64_SHIFT_TYPE_ASR:
4320         if (!sf) {
4321             tcg_gen_ext32s_i64(dst, src);
4322         }
4323         tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
4324         break;
4325     case A64_SHIFT_TYPE_ROR:
4326         if (sf) {
4327             tcg_gen_rotr_i64(dst, src, shift_amount);
4328         } else {
4329             TCGv_i32 t0, t1;
4330             t0 = tcg_temp_new_i32();
4331             t1 = tcg_temp_new_i32();
4332             tcg_gen_extrl_i64_i32(t0, src);
4333             tcg_gen_extrl_i64_i32(t1, shift_amount);
4334             tcg_gen_rotr_i32(t0, t0, t1);
4335             tcg_gen_extu_i32_i64(dst, t0);
4336         }
4337         break;
4338     default:
4339         assert(FALSE); /* all shift types should be handled */
4340         break;
4341     }
4342 
4343     if (!sf) { /* zero extend final result */
4344         tcg_gen_ext32u_i64(dst, dst);
4345     }
4346 }
4347 
4348 /* Shift a TCGv src by immediate, put result in dst.
4349  * The shift amount must be in range (this should always be true as the
4350  * relevant instructions will UNDEF on bad shift immediates).
4351  */
4352 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
4353                           enum a64_shift_type shift_type, unsigned int shift_i)
4354 {
4355     assert(shift_i < (sf ? 64 : 32));
4356 
4357     if (shift_i == 0) {
4358         tcg_gen_mov_i64(dst, src);
4359     } else {
4360         shift_reg(dst, src, sf, shift_type, tcg_constant_i64(shift_i));
4361     }
4362 }
4363 
4364 /* Logical (shifted register)
4365  *   31  30 29 28       24 23   22 21  20  16 15    10 9    5 4    0
4366  * +----+-----+-----------+-------+---+------+--------+------+------+
4367  * | sf | opc | 0 1 0 1 0 | shift | N |  Rm  |  imm6  |  Rn  |  Rd  |
4368  * +----+-----+-----------+-------+---+------+--------+------+------+
4369  */
4370 static void disas_logic_reg(DisasContext *s, uint32_t insn)
4371 {
4372     TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
4373     unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
4374 
4375     sf = extract32(insn, 31, 1);
4376     opc = extract32(insn, 29, 2);
4377     shift_type = extract32(insn, 22, 2);
4378     invert = extract32(insn, 21, 1);
4379     rm = extract32(insn, 16, 5);
4380     shift_amount = extract32(insn, 10, 6);
4381     rn = extract32(insn, 5, 5);
4382     rd = extract32(insn, 0, 5);
4383 
4384     if (!sf && (shift_amount & (1 << 5))) {
4385         unallocated_encoding(s);
4386         return;
4387     }
4388 
4389     tcg_rd = cpu_reg(s, rd);
4390 
4391     if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
4392         /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
4393          * register-register MOV and MVN, so it is worth special casing.
4394          */
4395         tcg_rm = cpu_reg(s, rm);
4396         if (invert) {
4397             tcg_gen_not_i64(tcg_rd, tcg_rm);
4398             if (!sf) {
4399                 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4400             }
4401         } else {
4402             if (sf) {
4403                 tcg_gen_mov_i64(tcg_rd, tcg_rm);
4404             } else {
4405                 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
4406             }
4407         }
4408         return;
4409     }
4410 
4411     tcg_rm = read_cpu_reg(s, rm, sf);
4412 
4413     if (shift_amount) {
4414         shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
4415     }
4416 
4417     tcg_rn = cpu_reg(s, rn);
4418 
4419     switch (opc | (invert << 2)) {
4420     case 0: /* AND */
4421     case 3: /* ANDS */
4422         tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
4423         break;
4424     case 1: /* ORR */
4425         tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
4426         break;
4427     case 2: /* EOR */
4428         tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
4429         break;
4430     case 4: /* BIC */
4431     case 7: /* BICS */
4432         tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
4433         break;
4434     case 5: /* ORN */
4435         tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
4436         break;
4437     case 6: /* EON */
4438         tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
4439         break;
4440     default:
4441         assert(FALSE);
4442         break;
4443     }
4444 
4445     if (!sf) {
4446         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4447     }
4448 
4449     if (opc == 3) {
4450         gen_logic_CC(sf, tcg_rd);
4451     }
4452 }
4453 
4454 /*
4455  * Add/subtract (extended register)
4456  *
4457  *  31|30|29|28       24|23 22|21|20   16|15  13|12  10|9  5|4  0|
4458  * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4459  * |sf|op| S| 0 1 0 1 1 | opt | 1|  Rm   |option| imm3 | Rn | Rd |
4460  * +--+--+--+-----------+-----+--+-------+------+------+----+----+
4461  *
4462  *  sf: 0 -> 32bit, 1 -> 64bit
4463  *  op: 0 -> add  , 1 -> sub
4464  *   S: 1 -> set flags
4465  * opt: 00
4466  * option: extension type (see DecodeRegExtend)
4467  * imm3: optional shift to Rm
4468  *
4469  * Rd = Rn + LSL(extend(Rm), amount)
4470  */
4471 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
4472 {
4473     int rd = extract32(insn, 0, 5);
4474     int rn = extract32(insn, 5, 5);
4475     int imm3 = extract32(insn, 10, 3);
4476     int option = extract32(insn, 13, 3);
4477     int rm = extract32(insn, 16, 5);
4478     int opt = extract32(insn, 22, 2);
4479     bool setflags = extract32(insn, 29, 1);
4480     bool sub_op = extract32(insn, 30, 1);
4481     bool sf = extract32(insn, 31, 1);
4482 
4483     TCGv_i64 tcg_rm, tcg_rn; /* temps */
4484     TCGv_i64 tcg_rd;
4485     TCGv_i64 tcg_result;
4486 
4487     if (imm3 > 4 || opt != 0) {
4488         unallocated_encoding(s);
4489         return;
4490     }
4491 
4492     /* non-flag setting ops may use SP */
4493     if (!setflags) {
4494         tcg_rd = cpu_reg_sp(s, rd);
4495     } else {
4496         tcg_rd = cpu_reg(s, rd);
4497     }
4498     tcg_rn = read_cpu_reg_sp(s, rn, sf);
4499 
4500     tcg_rm = read_cpu_reg(s, rm, sf);
4501     ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
4502 
4503     tcg_result = tcg_temp_new_i64();
4504 
4505     if (!setflags) {
4506         if (sub_op) {
4507             tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4508         } else {
4509             tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4510         }
4511     } else {
4512         if (sub_op) {
4513             gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4514         } else {
4515             gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4516         }
4517     }
4518 
4519     if (sf) {
4520         tcg_gen_mov_i64(tcg_rd, tcg_result);
4521     } else {
4522         tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4523     }
4524 }
4525 
4526 /*
4527  * Add/subtract (shifted register)
4528  *
4529  *  31 30 29 28       24 23 22 21 20   16 15     10 9    5 4    0
4530  * +--+--+--+-----------+-----+--+-------+---------+------+------+
4531  * |sf|op| S| 0 1 0 1 1 |shift| 0|  Rm   |  imm6   |  Rn  |  Rd  |
4532  * +--+--+--+-----------+-----+--+-------+---------+------+------+
4533  *
4534  *    sf: 0 -> 32bit, 1 -> 64bit
4535  *    op: 0 -> add  , 1 -> sub
4536  *     S: 1 -> set flags
4537  * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4538  *  imm6: Shift amount to apply to Rm before the add/sub
4539  */
4540 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4541 {
4542     int rd = extract32(insn, 0, 5);
4543     int rn = extract32(insn, 5, 5);
4544     int imm6 = extract32(insn, 10, 6);
4545     int rm = extract32(insn, 16, 5);
4546     int shift_type = extract32(insn, 22, 2);
4547     bool setflags = extract32(insn, 29, 1);
4548     bool sub_op = extract32(insn, 30, 1);
4549     bool sf = extract32(insn, 31, 1);
4550 
4551     TCGv_i64 tcg_rd = cpu_reg(s, rd);
4552     TCGv_i64 tcg_rn, tcg_rm;
4553     TCGv_i64 tcg_result;
4554 
4555     if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4556         unallocated_encoding(s);
4557         return;
4558     }
4559 
4560     tcg_rn = read_cpu_reg(s, rn, sf);
4561     tcg_rm = read_cpu_reg(s, rm, sf);
4562 
4563     shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4564 
4565     tcg_result = tcg_temp_new_i64();
4566 
4567     if (!setflags) {
4568         if (sub_op) {
4569             tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4570         } else {
4571             tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4572         }
4573     } else {
4574         if (sub_op) {
4575             gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4576         } else {
4577             gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4578         }
4579     }
4580 
4581     if (sf) {
4582         tcg_gen_mov_i64(tcg_rd, tcg_result);
4583     } else {
4584         tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4585     }
4586 }
4587 
4588 /* Data-processing (3 source)
4589  *
4590  *    31 30  29 28       24 23 21  20  16  15  14  10 9    5 4    0
4591  *  +--+------+-----------+------+------+----+------+------+------+
4592  *  |sf| op54 | 1 1 0 1 1 | op31 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
4593  *  +--+------+-----------+------+------+----+------+------+------+
4594  */
4595 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4596 {
4597     int rd = extract32(insn, 0, 5);
4598     int rn = extract32(insn, 5, 5);
4599     int ra = extract32(insn, 10, 5);
4600     int rm = extract32(insn, 16, 5);
4601     int op_id = (extract32(insn, 29, 3) << 4) |
4602         (extract32(insn, 21, 3) << 1) |
4603         extract32(insn, 15, 1);
4604     bool sf = extract32(insn, 31, 1);
4605     bool is_sub = extract32(op_id, 0, 1);
4606     bool is_high = extract32(op_id, 2, 1);
4607     bool is_signed = false;
4608     TCGv_i64 tcg_op1;
4609     TCGv_i64 tcg_op2;
4610     TCGv_i64 tcg_tmp;
4611 
4612     /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4613     switch (op_id) {
4614     case 0x42: /* SMADDL */
4615     case 0x43: /* SMSUBL */
4616     case 0x44: /* SMULH */
4617         is_signed = true;
4618         break;
4619     case 0x0: /* MADD (32bit) */
4620     case 0x1: /* MSUB (32bit) */
4621     case 0x40: /* MADD (64bit) */
4622     case 0x41: /* MSUB (64bit) */
4623     case 0x4a: /* UMADDL */
4624     case 0x4b: /* UMSUBL */
4625     case 0x4c: /* UMULH */
4626         break;
4627     default:
4628         unallocated_encoding(s);
4629         return;
4630     }
4631 
4632     if (is_high) {
4633         TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
4634         TCGv_i64 tcg_rd = cpu_reg(s, rd);
4635         TCGv_i64 tcg_rn = cpu_reg(s, rn);
4636         TCGv_i64 tcg_rm = cpu_reg(s, rm);
4637 
4638         if (is_signed) {
4639             tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4640         } else {
4641             tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4642         }
4643         return;
4644     }
4645 
4646     tcg_op1 = tcg_temp_new_i64();
4647     tcg_op2 = tcg_temp_new_i64();
4648     tcg_tmp = tcg_temp_new_i64();
4649 
4650     if (op_id < 0x42) {
4651         tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4652         tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4653     } else {
4654         if (is_signed) {
4655             tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4656             tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4657         } else {
4658             tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4659             tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4660         }
4661     }
4662 
4663     if (ra == 31 && !is_sub) {
4664         /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4665         tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4666     } else {
4667         tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4668         if (is_sub) {
4669             tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4670         } else {
4671             tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4672         }
4673     }
4674 
4675     if (!sf) {
4676         tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4677     }
4678 }
4679 
4680 /* Add/subtract (with carry)
4681  *  31 30 29 28 27 26 25 24 23 22 21  20  16  15       10  9    5 4   0
4682  * +--+--+--+------------------------+------+-------------+------+-----+
4683  * |sf|op| S| 1  1  0  1  0  0  0  0 |  rm  | 0 0 0 0 0 0 |  Rn  |  Rd |
4684  * +--+--+--+------------------------+------+-------------+------+-----+
4685  */
4686 
4687 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4688 {
4689     unsigned int sf, op, setflags, rm, rn, rd;
4690     TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4691 
4692     sf = extract32(insn, 31, 1);
4693     op = extract32(insn, 30, 1);
4694     setflags = extract32(insn, 29, 1);
4695     rm = extract32(insn, 16, 5);
4696     rn = extract32(insn, 5, 5);
4697     rd = extract32(insn, 0, 5);
4698 
4699     tcg_rd = cpu_reg(s, rd);
4700     tcg_rn = cpu_reg(s, rn);
4701 
4702     if (op) {
4703         tcg_y = tcg_temp_new_i64();
4704         tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4705     } else {
4706         tcg_y = cpu_reg(s, rm);
4707     }
4708 
4709     if (setflags) {
4710         gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4711     } else {
4712         gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4713     }
4714 }
4715 
4716 /*
4717  * Rotate right into flags
4718  *  31 30 29                21       15          10      5  4      0
4719  * +--+--+--+-----------------+--------+-----------+------+--+------+
4720  * |sf|op| S| 1 1 0 1 0 0 0 0 |  imm6  | 0 0 0 0 1 |  Rn  |o2| mask |
4721  * +--+--+--+-----------------+--------+-----------+------+--+------+
4722  */
4723 static void disas_rotate_right_into_flags(DisasContext *s, uint32_t insn)
4724 {
4725     int mask = extract32(insn, 0, 4);
4726     int o2 = extract32(insn, 4, 1);
4727     int rn = extract32(insn, 5, 5);
4728     int imm6 = extract32(insn, 15, 6);
4729     int sf_op_s = extract32(insn, 29, 3);
4730     TCGv_i64 tcg_rn;
4731     TCGv_i32 nzcv;
4732 
4733     if (sf_op_s != 5 || o2 != 0 || !dc_isar_feature(aa64_condm_4, s)) {
4734         unallocated_encoding(s);
4735         return;
4736     }
4737 
4738     tcg_rn = read_cpu_reg(s, rn, 1);
4739     tcg_gen_rotri_i64(tcg_rn, tcg_rn, imm6);
4740 
4741     nzcv = tcg_temp_new_i32();
4742     tcg_gen_extrl_i64_i32(nzcv, tcg_rn);
4743 
4744     if (mask & 8) { /* N */
4745         tcg_gen_shli_i32(cpu_NF, nzcv, 31 - 3);
4746     }
4747     if (mask & 4) { /* Z */
4748         tcg_gen_not_i32(cpu_ZF, nzcv);
4749         tcg_gen_andi_i32(cpu_ZF, cpu_ZF, 4);
4750     }
4751     if (mask & 2) { /* C */
4752         tcg_gen_extract_i32(cpu_CF, nzcv, 1, 1);
4753     }
4754     if (mask & 1) { /* V */
4755         tcg_gen_shli_i32(cpu_VF, nzcv, 31 - 0);
4756     }
4757 }
4758 
4759 /*
4760  * Evaluate into flags
4761  *  31 30 29                21        15   14        10      5  4      0
4762  * +--+--+--+-----------------+---------+----+---------+------+--+------+
4763  * |sf|op| S| 1 1 0 1 0 0 0 0 | opcode2 | sz | 0 0 1 0 |  Rn  |o3| mask |
4764  * +--+--+--+-----------------+---------+----+---------+------+--+------+
4765  */
4766 static void disas_evaluate_into_flags(DisasContext *s, uint32_t insn)
4767 {
4768     int o3_mask = extract32(insn, 0, 5);
4769     int rn = extract32(insn, 5, 5);
4770     int o2 = extract32(insn, 15, 6);
4771     int sz = extract32(insn, 14, 1);
4772     int sf_op_s = extract32(insn, 29, 3);
4773     TCGv_i32 tmp;
4774     int shift;
4775 
4776     if (sf_op_s != 1 || o2 != 0 || o3_mask != 0xd ||
4777         !dc_isar_feature(aa64_condm_4, s)) {
4778         unallocated_encoding(s);
4779         return;
4780     }
4781     shift = sz ? 16 : 24;  /* SETF16 or SETF8 */
4782 
4783     tmp = tcg_temp_new_i32();
4784     tcg_gen_extrl_i64_i32(tmp, cpu_reg(s, rn));
4785     tcg_gen_shli_i32(cpu_NF, tmp, shift);
4786     tcg_gen_shli_i32(cpu_VF, tmp, shift - 1);
4787     tcg_gen_mov_i32(cpu_ZF, cpu_NF);
4788     tcg_gen_xor_i32(cpu_VF, cpu_VF, cpu_NF);
4789 }
4790 
4791 /* Conditional compare (immediate / register)
4792  *  31 30 29 28 27 26 25 24 23 22 21  20    16 15  12  11  10  9   5  4 3   0
4793  * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4794  * |sf|op| S| 1  1  0  1  0  0  1  0 |imm5/rm | cond |i/r |o2|  Rn  |o3|nzcv |
4795  * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4796  *        [1]                             y                [0]       [0]
4797  */
4798 static void disas_cc(DisasContext *s, uint32_t insn)
4799 {
4800     unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4801     TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4802     TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4803     DisasCompare c;
4804 
4805     if (!extract32(insn, 29, 1)) {
4806         unallocated_encoding(s);
4807         return;
4808     }
4809     if (insn & (1 << 10 | 1 << 4)) {
4810         unallocated_encoding(s);
4811         return;
4812     }
4813     sf = extract32(insn, 31, 1);
4814     op = extract32(insn, 30, 1);
4815     is_imm = extract32(insn, 11, 1);
4816     y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
4817     cond = extract32(insn, 12, 4);
4818     rn = extract32(insn, 5, 5);
4819     nzcv = extract32(insn, 0, 4);
4820 
4821     /* Set T0 = !COND.  */
4822     tcg_t0 = tcg_temp_new_i32();
4823     arm_test_cc(&c, cond);
4824     tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
4825 
4826     /* Load the arguments for the new comparison.  */
4827     if (is_imm) {
4828         tcg_y = tcg_temp_new_i64();
4829         tcg_gen_movi_i64(tcg_y, y);
4830     } else {
4831         tcg_y = cpu_reg(s, y);
4832     }
4833     tcg_rn = cpu_reg(s, rn);
4834 
4835     /* Set the flags for the new comparison.  */
4836     tcg_tmp = tcg_temp_new_i64();
4837     if (op) {
4838         gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4839     } else {
4840         gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4841     }
4842 
4843     /* If COND was false, force the flags to #nzcv.  Compute two masks
4844      * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
4845      * For tcg hosts that support ANDC, we can make do with just T1.
4846      * In either case, allow the tcg optimizer to delete any unused mask.
4847      */
4848     tcg_t1 = tcg_temp_new_i32();
4849     tcg_t2 = tcg_temp_new_i32();
4850     tcg_gen_neg_i32(tcg_t1, tcg_t0);
4851     tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
4852 
4853     if (nzcv & 8) { /* N */
4854         tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
4855     } else {
4856         if (TCG_TARGET_HAS_andc_i32) {
4857             tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
4858         } else {
4859             tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
4860         }
4861     }
4862     if (nzcv & 4) { /* Z */
4863         if (TCG_TARGET_HAS_andc_i32) {
4864             tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
4865         } else {
4866             tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
4867         }
4868     } else {
4869         tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
4870     }
4871     if (nzcv & 2) { /* C */
4872         tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
4873     } else {
4874         if (TCG_TARGET_HAS_andc_i32) {
4875             tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
4876         } else {
4877             tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
4878         }
4879     }
4880     if (nzcv & 1) { /* V */
4881         tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
4882     } else {
4883         if (TCG_TARGET_HAS_andc_i32) {
4884             tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
4885         } else {
4886             tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
4887         }
4888     }
4889 }
4890 
4891 /* Conditional select
4892  *   31   30  29  28             21 20  16 15  12 11 10 9    5 4    0
4893  * +----+----+---+-----------------+------+------+-----+------+------+
4894  * | sf | op | S | 1 1 0 1 0 1 0 0 |  Rm  | cond | op2 |  Rn  |  Rd  |
4895  * +----+----+---+-----------------+------+------+-----+------+------+
4896  */
4897 static void disas_cond_select(DisasContext *s, uint32_t insn)
4898 {
4899     unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
4900     TCGv_i64 tcg_rd, zero;
4901     DisasCompare64 c;
4902 
4903     if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
4904         /* S == 1 or op2<1> == 1 */
4905         unallocated_encoding(s);
4906         return;
4907     }
4908     sf = extract32(insn, 31, 1);
4909     else_inv = extract32(insn, 30, 1);
4910     rm = extract32(insn, 16, 5);
4911     cond = extract32(insn, 12, 4);
4912     else_inc = extract32(insn, 10, 1);
4913     rn = extract32(insn, 5, 5);
4914     rd = extract32(insn, 0, 5);
4915 
4916     tcg_rd = cpu_reg(s, rd);
4917 
4918     a64_test_cc(&c, cond);
4919     zero = tcg_constant_i64(0);
4920 
4921     if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4922         /* CSET & CSETM.  */
4923         tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4924         if (else_inv) {
4925             tcg_gen_neg_i64(tcg_rd, tcg_rd);
4926         }
4927     } else {
4928         TCGv_i64 t_true = cpu_reg(s, rn);
4929         TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4930         if (else_inv && else_inc) {
4931             tcg_gen_neg_i64(t_false, t_false);
4932         } else if (else_inv) {
4933             tcg_gen_not_i64(t_false, t_false);
4934         } else if (else_inc) {
4935             tcg_gen_addi_i64(t_false, t_false, 1);
4936         }
4937         tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4938     }
4939 
4940     if (!sf) {
4941         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4942     }
4943 }
4944 
4945 static void handle_clz(DisasContext *s, unsigned int sf,
4946                        unsigned int rn, unsigned int rd)
4947 {
4948     TCGv_i64 tcg_rd, tcg_rn;
4949     tcg_rd = cpu_reg(s, rd);
4950     tcg_rn = cpu_reg(s, rn);
4951 
4952     if (sf) {
4953         tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4954     } else {
4955         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4956         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4957         tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4958         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4959     }
4960 }
4961 
4962 static void handle_cls(DisasContext *s, unsigned int sf,
4963                        unsigned int rn, unsigned int rd)
4964 {
4965     TCGv_i64 tcg_rd, tcg_rn;
4966     tcg_rd = cpu_reg(s, rd);
4967     tcg_rn = cpu_reg(s, rn);
4968 
4969     if (sf) {
4970         tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4971     } else {
4972         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4973         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4974         tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4975         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4976     }
4977 }
4978 
4979 static void handle_rbit(DisasContext *s, unsigned int sf,
4980                         unsigned int rn, unsigned int rd)
4981 {
4982     TCGv_i64 tcg_rd, tcg_rn;
4983     tcg_rd = cpu_reg(s, rd);
4984     tcg_rn = cpu_reg(s, rn);
4985 
4986     if (sf) {
4987         gen_helper_rbit64(tcg_rd, tcg_rn);
4988     } else {
4989         TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4990         tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4991         gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4992         tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4993     }
4994 }
4995 
4996 /* REV with sf==1, opcode==3 ("REV64") */
4997 static void handle_rev64(DisasContext *s, unsigned int sf,
4998                          unsigned int rn, unsigned int rd)
4999 {
5000     if (!sf) {
5001         unallocated_encoding(s);
5002         return;
5003     }
5004     tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
5005 }
5006 
5007 /* REV with sf==0, opcode==2
5008  * REV32 (sf==1, opcode==2)
5009  */
5010 static void handle_rev32(DisasContext *s, unsigned int sf,
5011                          unsigned int rn, unsigned int rd)
5012 {
5013     TCGv_i64 tcg_rd = cpu_reg(s, rd);
5014     TCGv_i64 tcg_rn = cpu_reg(s, rn);
5015 
5016     if (sf) {
5017         tcg_gen_bswap64_i64(tcg_rd, tcg_rn);
5018         tcg_gen_rotri_i64(tcg_rd, tcg_rd, 32);
5019     } else {
5020         tcg_gen_bswap32_i64(tcg_rd, tcg_rn, TCG_BSWAP_OZ);
5021     }
5022 }
5023 
5024 /* REV16 (opcode==1) */
5025 static void handle_rev16(DisasContext *s, unsigned int sf,
5026                          unsigned int rn, unsigned int rd)
5027 {
5028     TCGv_i64 tcg_rd = cpu_reg(s, rd);
5029     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5030     TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5031     TCGv_i64 mask = tcg_constant_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
5032 
5033     tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
5034     tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
5035     tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
5036     tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
5037     tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
5038 }
5039 
5040 /* Data-processing (1 source)
5041  *   31  30  29  28             21 20     16 15    10 9    5 4    0
5042  * +----+---+---+-----------------+---------+--------+------+------+
5043  * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode |  Rn  |  Rd  |
5044  * +----+---+---+-----------------+---------+--------+------+------+
5045  */
5046 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
5047 {
5048     unsigned int sf, opcode, opcode2, rn, rd;
5049     TCGv_i64 tcg_rd;
5050 
5051     if (extract32(insn, 29, 1)) {
5052         unallocated_encoding(s);
5053         return;
5054     }
5055 
5056     sf = extract32(insn, 31, 1);
5057     opcode = extract32(insn, 10, 6);
5058     opcode2 = extract32(insn, 16, 5);
5059     rn = extract32(insn, 5, 5);
5060     rd = extract32(insn, 0, 5);
5061 
5062 #define MAP(SF, O2, O1) ((SF) | (O1 << 1) | (O2 << 7))
5063 
5064     switch (MAP(sf, opcode2, opcode)) {
5065     case MAP(0, 0x00, 0x00): /* RBIT */
5066     case MAP(1, 0x00, 0x00):
5067         handle_rbit(s, sf, rn, rd);
5068         break;
5069     case MAP(0, 0x00, 0x01): /* REV16 */
5070     case MAP(1, 0x00, 0x01):
5071         handle_rev16(s, sf, rn, rd);
5072         break;
5073     case MAP(0, 0x00, 0x02): /* REV/REV32 */
5074     case MAP(1, 0x00, 0x02):
5075         handle_rev32(s, sf, rn, rd);
5076         break;
5077     case MAP(1, 0x00, 0x03): /* REV64 */
5078         handle_rev64(s, sf, rn, rd);
5079         break;
5080     case MAP(0, 0x00, 0x04): /* CLZ */
5081     case MAP(1, 0x00, 0x04):
5082         handle_clz(s, sf, rn, rd);
5083         break;
5084     case MAP(0, 0x00, 0x05): /* CLS */
5085     case MAP(1, 0x00, 0x05):
5086         handle_cls(s, sf, rn, rd);
5087         break;
5088     case MAP(1, 0x01, 0x00): /* PACIA */
5089         if (s->pauth_active) {
5090             tcg_rd = cpu_reg(s, rd);
5091             gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5092         } else if (!dc_isar_feature(aa64_pauth, s)) {
5093             goto do_unallocated;
5094         }
5095         break;
5096     case MAP(1, 0x01, 0x01): /* PACIB */
5097         if (s->pauth_active) {
5098             tcg_rd = cpu_reg(s, rd);
5099             gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5100         } else if (!dc_isar_feature(aa64_pauth, s)) {
5101             goto do_unallocated;
5102         }
5103         break;
5104     case MAP(1, 0x01, 0x02): /* PACDA */
5105         if (s->pauth_active) {
5106             tcg_rd = cpu_reg(s, rd);
5107             gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5108         } else if (!dc_isar_feature(aa64_pauth, s)) {
5109             goto do_unallocated;
5110         }
5111         break;
5112     case MAP(1, 0x01, 0x03): /* PACDB */
5113         if (s->pauth_active) {
5114             tcg_rd = cpu_reg(s, rd);
5115             gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5116         } else if (!dc_isar_feature(aa64_pauth, s)) {
5117             goto do_unallocated;
5118         }
5119         break;
5120     case MAP(1, 0x01, 0x04): /* AUTIA */
5121         if (s->pauth_active) {
5122             tcg_rd = cpu_reg(s, rd);
5123             gen_helper_autia(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5124         } else if (!dc_isar_feature(aa64_pauth, s)) {
5125             goto do_unallocated;
5126         }
5127         break;
5128     case MAP(1, 0x01, 0x05): /* AUTIB */
5129         if (s->pauth_active) {
5130             tcg_rd = cpu_reg(s, rd);
5131             gen_helper_autib(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5132         } else if (!dc_isar_feature(aa64_pauth, s)) {
5133             goto do_unallocated;
5134         }
5135         break;
5136     case MAP(1, 0x01, 0x06): /* AUTDA */
5137         if (s->pauth_active) {
5138             tcg_rd = cpu_reg(s, rd);
5139             gen_helper_autda(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5140         } else if (!dc_isar_feature(aa64_pauth, s)) {
5141             goto do_unallocated;
5142         }
5143         break;
5144     case MAP(1, 0x01, 0x07): /* AUTDB */
5145         if (s->pauth_active) {
5146             tcg_rd = cpu_reg(s, rd);
5147             gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, cpu_reg_sp(s, rn));
5148         } else if (!dc_isar_feature(aa64_pauth, s)) {
5149             goto do_unallocated;
5150         }
5151         break;
5152     case MAP(1, 0x01, 0x08): /* PACIZA */
5153         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5154             goto do_unallocated;
5155         } else if (s->pauth_active) {
5156             tcg_rd = cpu_reg(s, rd);
5157             gen_helper_pacia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5158         }
5159         break;
5160     case MAP(1, 0x01, 0x09): /* PACIZB */
5161         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5162             goto do_unallocated;
5163         } else if (s->pauth_active) {
5164             tcg_rd = cpu_reg(s, rd);
5165             gen_helper_pacib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5166         }
5167         break;
5168     case MAP(1, 0x01, 0x0a): /* PACDZA */
5169         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5170             goto do_unallocated;
5171         } else if (s->pauth_active) {
5172             tcg_rd = cpu_reg(s, rd);
5173             gen_helper_pacda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5174         }
5175         break;
5176     case MAP(1, 0x01, 0x0b): /* PACDZB */
5177         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5178             goto do_unallocated;
5179         } else if (s->pauth_active) {
5180             tcg_rd = cpu_reg(s, rd);
5181             gen_helper_pacdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5182         }
5183         break;
5184     case MAP(1, 0x01, 0x0c): /* AUTIZA */
5185         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5186             goto do_unallocated;
5187         } else if (s->pauth_active) {
5188             tcg_rd = cpu_reg(s, rd);
5189             gen_helper_autia(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5190         }
5191         break;
5192     case MAP(1, 0x01, 0x0d): /* AUTIZB */
5193         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5194             goto do_unallocated;
5195         } else if (s->pauth_active) {
5196             tcg_rd = cpu_reg(s, rd);
5197             gen_helper_autib(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5198         }
5199         break;
5200     case MAP(1, 0x01, 0x0e): /* AUTDZA */
5201         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5202             goto do_unallocated;
5203         } else if (s->pauth_active) {
5204             tcg_rd = cpu_reg(s, rd);
5205             gen_helper_autda(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5206         }
5207         break;
5208     case MAP(1, 0x01, 0x0f): /* AUTDZB */
5209         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5210             goto do_unallocated;
5211         } else if (s->pauth_active) {
5212             tcg_rd = cpu_reg(s, rd);
5213             gen_helper_autdb(tcg_rd, cpu_env, tcg_rd, tcg_constant_i64(0));
5214         }
5215         break;
5216     case MAP(1, 0x01, 0x10): /* XPACI */
5217         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5218             goto do_unallocated;
5219         } else if (s->pauth_active) {
5220             tcg_rd = cpu_reg(s, rd);
5221             gen_helper_xpaci(tcg_rd, cpu_env, tcg_rd);
5222         }
5223         break;
5224     case MAP(1, 0x01, 0x11): /* XPACD */
5225         if (!dc_isar_feature(aa64_pauth, s) || rn != 31) {
5226             goto do_unallocated;
5227         } else if (s->pauth_active) {
5228             tcg_rd = cpu_reg(s, rd);
5229             gen_helper_xpacd(tcg_rd, cpu_env, tcg_rd);
5230         }
5231         break;
5232     default:
5233     do_unallocated:
5234         unallocated_encoding(s);
5235         break;
5236     }
5237 
5238 #undef MAP
5239 }
5240 
5241 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
5242                        unsigned int rm, unsigned int rn, unsigned int rd)
5243 {
5244     TCGv_i64 tcg_n, tcg_m, tcg_rd;
5245     tcg_rd = cpu_reg(s, rd);
5246 
5247     if (!sf && is_signed) {
5248         tcg_n = tcg_temp_new_i64();
5249         tcg_m = tcg_temp_new_i64();
5250         tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
5251         tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
5252     } else {
5253         tcg_n = read_cpu_reg(s, rn, sf);
5254         tcg_m = read_cpu_reg(s, rm, sf);
5255     }
5256 
5257     if (is_signed) {
5258         gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
5259     } else {
5260         gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
5261     }
5262 
5263     if (!sf) { /* zero extend final result */
5264         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5265     }
5266 }
5267 
5268 /* LSLV, LSRV, ASRV, RORV */
5269 static void handle_shift_reg(DisasContext *s,
5270                              enum a64_shift_type shift_type, unsigned int sf,
5271                              unsigned int rm, unsigned int rn, unsigned int rd)
5272 {
5273     TCGv_i64 tcg_shift = tcg_temp_new_i64();
5274     TCGv_i64 tcg_rd = cpu_reg(s, rd);
5275     TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
5276 
5277     tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
5278     shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
5279 }
5280 
5281 /* CRC32[BHWX], CRC32C[BHWX] */
5282 static void handle_crc32(DisasContext *s,
5283                          unsigned int sf, unsigned int sz, bool crc32c,
5284                          unsigned int rm, unsigned int rn, unsigned int rd)
5285 {
5286     TCGv_i64 tcg_acc, tcg_val;
5287     TCGv_i32 tcg_bytes;
5288 
5289     if (!dc_isar_feature(aa64_crc32, s)
5290         || (sf == 1 && sz != 3)
5291         || (sf == 0 && sz == 3)) {
5292         unallocated_encoding(s);
5293         return;
5294     }
5295 
5296     if (sz == 3) {
5297         tcg_val = cpu_reg(s, rm);
5298     } else {
5299         uint64_t mask;
5300         switch (sz) {
5301         case 0:
5302             mask = 0xFF;
5303             break;
5304         case 1:
5305             mask = 0xFFFF;
5306             break;
5307         case 2:
5308             mask = 0xFFFFFFFF;
5309             break;
5310         default:
5311             g_assert_not_reached();
5312         }
5313         tcg_val = tcg_temp_new_i64();
5314         tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
5315     }
5316 
5317     tcg_acc = cpu_reg(s, rn);
5318     tcg_bytes = tcg_constant_i32(1 << sz);
5319 
5320     if (crc32c) {
5321         gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5322     } else {
5323         gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
5324     }
5325 }
5326 
5327 /* Data-processing (2 source)
5328  *   31   30  29 28             21 20  16 15    10 9    5 4    0
5329  * +----+---+---+-----------------+------+--------+------+------+
5330  * | sf | 0 | S | 1 1 0 1 0 1 1 0 |  Rm  | opcode |  Rn  |  Rd  |
5331  * +----+---+---+-----------------+------+--------+------+------+
5332  */
5333 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
5334 {
5335     unsigned int sf, rm, opcode, rn, rd, setflag;
5336     sf = extract32(insn, 31, 1);
5337     setflag = extract32(insn, 29, 1);
5338     rm = extract32(insn, 16, 5);
5339     opcode = extract32(insn, 10, 6);
5340     rn = extract32(insn, 5, 5);
5341     rd = extract32(insn, 0, 5);
5342 
5343     if (setflag && opcode != 0) {
5344         unallocated_encoding(s);
5345         return;
5346     }
5347 
5348     switch (opcode) {
5349     case 0: /* SUBP(S) */
5350         if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5351             goto do_unallocated;
5352         } else {
5353             TCGv_i64 tcg_n, tcg_m, tcg_d;
5354 
5355             tcg_n = read_cpu_reg_sp(s, rn, true);
5356             tcg_m = read_cpu_reg_sp(s, rm, true);
5357             tcg_gen_sextract_i64(tcg_n, tcg_n, 0, 56);
5358             tcg_gen_sextract_i64(tcg_m, tcg_m, 0, 56);
5359             tcg_d = cpu_reg(s, rd);
5360 
5361             if (setflag) {
5362                 gen_sub_CC(true, tcg_d, tcg_n, tcg_m);
5363             } else {
5364                 tcg_gen_sub_i64(tcg_d, tcg_n, tcg_m);
5365             }
5366         }
5367         break;
5368     case 2: /* UDIV */
5369         handle_div(s, false, sf, rm, rn, rd);
5370         break;
5371     case 3: /* SDIV */
5372         handle_div(s, true, sf, rm, rn, rd);
5373         break;
5374     case 4: /* IRG */
5375         if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5376             goto do_unallocated;
5377         }
5378         if (s->ata) {
5379             gen_helper_irg(cpu_reg_sp(s, rd), cpu_env,
5380                            cpu_reg_sp(s, rn), cpu_reg(s, rm));
5381         } else {
5382             gen_address_with_allocation_tag0(cpu_reg_sp(s, rd),
5383                                              cpu_reg_sp(s, rn));
5384         }
5385         break;
5386     case 5: /* GMI */
5387         if (sf == 0 || !dc_isar_feature(aa64_mte_insn_reg, s)) {
5388             goto do_unallocated;
5389         } else {
5390             TCGv_i64 t = tcg_temp_new_i64();
5391 
5392             tcg_gen_extract_i64(t, cpu_reg_sp(s, rn), 56, 4);
5393             tcg_gen_shl_i64(t, tcg_constant_i64(1), t);
5394             tcg_gen_or_i64(cpu_reg(s, rd), cpu_reg(s, rm), t);
5395         }
5396         break;
5397     case 8: /* LSLV */
5398         handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
5399         break;
5400     case 9: /* LSRV */
5401         handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
5402         break;
5403     case 10: /* ASRV */
5404         handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
5405         break;
5406     case 11: /* RORV */
5407         handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
5408         break;
5409     case 12: /* PACGA */
5410         if (sf == 0 || !dc_isar_feature(aa64_pauth, s)) {
5411             goto do_unallocated;
5412         }
5413         gen_helper_pacga(cpu_reg(s, rd), cpu_env,
5414                          cpu_reg(s, rn), cpu_reg_sp(s, rm));
5415         break;
5416     case 16:
5417     case 17:
5418     case 18:
5419     case 19:
5420     case 20:
5421     case 21:
5422     case 22:
5423     case 23: /* CRC32 */
5424     {
5425         int sz = extract32(opcode, 0, 2);
5426         bool crc32c = extract32(opcode, 2, 1);
5427         handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
5428         break;
5429     }
5430     default:
5431     do_unallocated:
5432         unallocated_encoding(s);
5433         break;
5434     }
5435 }
5436 
5437 /*
5438  * Data processing - register
5439  *  31  30 29  28      25    21  20  16      10         0
5440  * +--+---+--+---+-------+-----+-------+-------+---------+
5441  * |  |op0|  |op1| 1 0 1 | op2 |       |  op3  |         |
5442  * +--+---+--+---+-------+-----+-------+-------+---------+
5443  */
5444 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
5445 {
5446     int op0 = extract32(insn, 30, 1);
5447     int op1 = extract32(insn, 28, 1);
5448     int op2 = extract32(insn, 21, 4);
5449     int op3 = extract32(insn, 10, 6);
5450 
5451     if (!op1) {
5452         if (op2 & 8) {
5453             if (op2 & 1) {
5454                 /* Add/sub (extended register) */
5455                 disas_add_sub_ext_reg(s, insn);
5456             } else {
5457                 /* Add/sub (shifted register) */
5458                 disas_add_sub_reg(s, insn);
5459             }
5460         } else {
5461             /* Logical (shifted register) */
5462             disas_logic_reg(s, insn);
5463         }
5464         return;
5465     }
5466 
5467     switch (op2) {
5468     case 0x0:
5469         switch (op3) {
5470         case 0x00: /* Add/subtract (with carry) */
5471             disas_adc_sbc(s, insn);
5472             break;
5473 
5474         case 0x01: /* Rotate right into flags */
5475         case 0x21:
5476             disas_rotate_right_into_flags(s, insn);
5477             break;
5478 
5479         case 0x02: /* Evaluate into flags */
5480         case 0x12:
5481         case 0x22:
5482         case 0x32:
5483             disas_evaluate_into_flags(s, insn);
5484             break;
5485 
5486         default:
5487             goto do_unallocated;
5488         }
5489         break;
5490 
5491     case 0x2: /* Conditional compare */
5492         disas_cc(s, insn); /* both imm and reg forms */
5493         break;
5494 
5495     case 0x4: /* Conditional select */
5496         disas_cond_select(s, insn);
5497         break;
5498 
5499     case 0x6: /* Data-processing */
5500         if (op0) {    /* (1 source) */
5501             disas_data_proc_1src(s, insn);
5502         } else {      /* (2 source) */
5503             disas_data_proc_2src(s, insn);
5504         }
5505         break;
5506     case 0x8 ... 0xf: /* (3 source) */
5507         disas_data_proc_3src(s, insn);
5508         break;
5509 
5510     default:
5511     do_unallocated:
5512         unallocated_encoding(s);
5513         break;
5514     }
5515 }
5516 
5517 static void handle_fp_compare(DisasContext *s, int size,
5518                               unsigned int rn, unsigned int rm,
5519                               bool cmp_with_zero, bool signal_all_nans)
5520 {
5521     TCGv_i64 tcg_flags = tcg_temp_new_i64();
5522     TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
5523 
5524     if (size == MO_64) {
5525         TCGv_i64 tcg_vn, tcg_vm;
5526 
5527         tcg_vn = read_fp_dreg(s, rn);
5528         if (cmp_with_zero) {
5529             tcg_vm = tcg_constant_i64(0);
5530         } else {
5531             tcg_vm = read_fp_dreg(s, rm);
5532         }
5533         if (signal_all_nans) {
5534             gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5535         } else {
5536             gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5537         }
5538     } else {
5539         TCGv_i32 tcg_vn = tcg_temp_new_i32();
5540         TCGv_i32 tcg_vm = tcg_temp_new_i32();
5541 
5542         read_vec_element_i32(s, tcg_vn, rn, 0, size);
5543         if (cmp_with_zero) {
5544             tcg_gen_movi_i32(tcg_vm, 0);
5545         } else {
5546             read_vec_element_i32(s, tcg_vm, rm, 0, size);
5547         }
5548 
5549         switch (size) {
5550         case MO_32:
5551             if (signal_all_nans) {
5552                 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5553             } else {
5554                 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5555             }
5556             break;
5557         case MO_16:
5558             if (signal_all_nans) {
5559                 gen_helper_vfp_cmpeh_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5560             } else {
5561                 gen_helper_vfp_cmph_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
5562             }
5563             break;
5564         default:
5565             g_assert_not_reached();
5566         }
5567     }
5568 
5569     gen_set_nzcv(tcg_flags);
5570 }
5571 
5572 /* Floating point compare
5573  *   31  30  29 28       24 23  22  21 20  16 15 14 13  10    9    5 4     0
5574  * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5575  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | op  | 1 0 0 0 |  Rn  |  op2  |
5576  * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
5577  */
5578 static void disas_fp_compare(DisasContext *s, uint32_t insn)
5579 {
5580     unsigned int mos, type, rm, op, rn, opc, op2r;
5581     int size;
5582 
5583     mos = extract32(insn, 29, 3);
5584     type = extract32(insn, 22, 2);
5585     rm = extract32(insn, 16, 5);
5586     op = extract32(insn, 14, 2);
5587     rn = extract32(insn, 5, 5);
5588     opc = extract32(insn, 3, 2);
5589     op2r = extract32(insn, 0, 3);
5590 
5591     if (mos || op || op2r) {
5592         unallocated_encoding(s);
5593         return;
5594     }
5595 
5596     switch (type) {
5597     case 0:
5598         size = MO_32;
5599         break;
5600     case 1:
5601         size = MO_64;
5602         break;
5603     case 3:
5604         size = MO_16;
5605         if (dc_isar_feature(aa64_fp16, s)) {
5606             break;
5607         }
5608         /* fallthru */
5609     default:
5610         unallocated_encoding(s);
5611         return;
5612     }
5613 
5614     if (!fp_access_check(s)) {
5615         return;
5616     }
5617 
5618     handle_fp_compare(s, size, rn, rm, opc & 1, opc & 2);
5619 }
5620 
5621 /* Floating point conditional compare
5622  *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5  4   3    0
5623  * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5624  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 0 1 |  Rn  | op | nzcv |
5625  * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
5626  */
5627 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
5628 {
5629     unsigned int mos, type, rm, cond, rn, op, nzcv;
5630     TCGLabel *label_continue = NULL;
5631     int size;
5632 
5633     mos = extract32(insn, 29, 3);
5634     type = extract32(insn, 22, 2);
5635     rm = extract32(insn, 16, 5);
5636     cond = extract32(insn, 12, 4);
5637     rn = extract32(insn, 5, 5);
5638     op = extract32(insn, 4, 1);
5639     nzcv = extract32(insn, 0, 4);
5640 
5641     if (mos) {
5642         unallocated_encoding(s);
5643         return;
5644     }
5645 
5646     switch (type) {
5647     case 0:
5648         size = MO_32;
5649         break;
5650     case 1:
5651         size = MO_64;
5652         break;
5653     case 3:
5654         size = MO_16;
5655         if (dc_isar_feature(aa64_fp16, s)) {
5656             break;
5657         }
5658         /* fallthru */
5659     default:
5660         unallocated_encoding(s);
5661         return;
5662     }
5663 
5664     if (!fp_access_check(s)) {
5665         return;
5666     }
5667 
5668     if (cond < 0x0e) { /* not always */
5669         TCGLabel *label_match = gen_new_label();
5670         label_continue = gen_new_label();
5671         arm_gen_test_cc(cond, label_match);
5672         /* nomatch: */
5673         gen_set_nzcv(tcg_constant_i64(nzcv << 28));
5674         tcg_gen_br(label_continue);
5675         gen_set_label(label_match);
5676     }
5677 
5678     handle_fp_compare(s, size, rn, rm, false, op);
5679 
5680     if (cond < 0x0e) {
5681         gen_set_label(label_continue);
5682     }
5683 }
5684 
5685 /* Floating point conditional select
5686  *   31  30  29 28       24 23  22  21 20  16 15  12 11 10 9    5 4    0
5687  * +---+---+---+-----------+------+---+------+------+-----+------+------+
5688  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | cond | 1 1 |  Rn  |  Rd  |
5689  * +---+---+---+-----------+------+---+------+------+-----+------+------+
5690  */
5691 static void disas_fp_csel(DisasContext *s, uint32_t insn)
5692 {
5693     unsigned int mos, type, rm, cond, rn, rd;
5694     TCGv_i64 t_true, t_false;
5695     DisasCompare64 c;
5696     MemOp sz;
5697 
5698     mos = extract32(insn, 29, 3);
5699     type = extract32(insn, 22, 2);
5700     rm = extract32(insn, 16, 5);
5701     cond = extract32(insn, 12, 4);
5702     rn = extract32(insn, 5, 5);
5703     rd = extract32(insn, 0, 5);
5704 
5705     if (mos) {
5706         unallocated_encoding(s);
5707         return;
5708     }
5709 
5710     switch (type) {
5711     case 0:
5712         sz = MO_32;
5713         break;
5714     case 1:
5715         sz = MO_64;
5716         break;
5717     case 3:
5718         sz = MO_16;
5719         if (dc_isar_feature(aa64_fp16, s)) {
5720             break;
5721         }
5722         /* fallthru */
5723     default:
5724         unallocated_encoding(s);
5725         return;
5726     }
5727 
5728     if (!fp_access_check(s)) {
5729         return;
5730     }
5731 
5732     /* Zero extend sreg & hreg inputs to 64 bits now.  */
5733     t_true = tcg_temp_new_i64();
5734     t_false = tcg_temp_new_i64();
5735     read_vec_element(s, t_true, rn, 0, sz);
5736     read_vec_element(s, t_false, rm, 0, sz);
5737 
5738     a64_test_cc(&c, cond);
5739     tcg_gen_movcond_i64(c.cond, t_true, c.value, tcg_constant_i64(0),
5740                         t_true, t_false);
5741 
5742     /* Note that sregs & hregs write back zeros to the high bits,
5743        and we've already done the zero-extension.  */
5744     write_fp_dreg(s, rd, t_true);
5745 }
5746 
5747 /* Floating-point data-processing (1 source) - half precision */
5748 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
5749 {
5750     TCGv_ptr fpst = NULL;
5751     TCGv_i32 tcg_op = read_fp_hreg(s, rn);
5752     TCGv_i32 tcg_res = tcg_temp_new_i32();
5753 
5754     switch (opcode) {
5755     case 0x0: /* FMOV */
5756         tcg_gen_mov_i32(tcg_res, tcg_op);
5757         break;
5758     case 0x1: /* FABS */
5759         tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
5760         break;
5761     case 0x2: /* FNEG */
5762         tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
5763         break;
5764     case 0x3: /* FSQRT */
5765         fpst = fpstatus_ptr(FPST_FPCR_F16);
5766         gen_helper_sqrt_f16(tcg_res, tcg_op, fpst);
5767         break;
5768     case 0x8: /* FRINTN */
5769     case 0x9: /* FRINTP */
5770     case 0xa: /* FRINTM */
5771     case 0xb: /* FRINTZ */
5772     case 0xc: /* FRINTA */
5773     {
5774         TCGv_i32 tcg_rmode;
5775 
5776         fpst = fpstatus_ptr(FPST_FPCR_F16);
5777         tcg_rmode = gen_set_rmode(opcode & 7, fpst);
5778         gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5779         gen_restore_rmode(tcg_rmode, fpst);
5780         break;
5781     }
5782     case 0xe: /* FRINTX */
5783         fpst = fpstatus_ptr(FPST_FPCR_F16);
5784         gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
5785         break;
5786     case 0xf: /* FRINTI */
5787         fpst = fpstatus_ptr(FPST_FPCR_F16);
5788         gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
5789         break;
5790     default:
5791         g_assert_not_reached();
5792     }
5793 
5794     write_fp_sreg(s, rd, tcg_res);
5795 }
5796 
5797 /* Floating-point data-processing (1 source) - single precision */
5798 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
5799 {
5800     void (*gen_fpst)(TCGv_i32, TCGv_i32, TCGv_ptr);
5801     TCGv_i32 tcg_op, tcg_res;
5802     TCGv_ptr fpst;
5803     int rmode = -1;
5804 
5805     tcg_op = read_fp_sreg(s, rn);
5806     tcg_res = tcg_temp_new_i32();
5807 
5808     switch (opcode) {
5809     case 0x0: /* FMOV */
5810         tcg_gen_mov_i32(tcg_res, tcg_op);
5811         goto done;
5812     case 0x1: /* FABS */
5813         gen_helper_vfp_abss(tcg_res, tcg_op);
5814         goto done;
5815     case 0x2: /* FNEG */
5816         gen_helper_vfp_negs(tcg_res, tcg_op);
5817         goto done;
5818     case 0x3: /* FSQRT */
5819         gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
5820         goto done;
5821     case 0x6: /* BFCVT */
5822         gen_fpst = gen_helper_bfcvt;
5823         break;
5824     case 0x8: /* FRINTN */
5825     case 0x9: /* FRINTP */
5826     case 0xa: /* FRINTM */
5827     case 0xb: /* FRINTZ */
5828     case 0xc: /* FRINTA */
5829         rmode = opcode & 7;
5830         gen_fpst = gen_helper_rints;
5831         break;
5832     case 0xe: /* FRINTX */
5833         gen_fpst = gen_helper_rints_exact;
5834         break;
5835     case 0xf: /* FRINTI */
5836         gen_fpst = gen_helper_rints;
5837         break;
5838     case 0x10: /* FRINT32Z */
5839         rmode = FPROUNDING_ZERO;
5840         gen_fpst = gen_helper_frint32_s;
5841         break;
5842     case 0x11: /* FRINT32X */
5843         gen_fpst = gen_helper_frint32_s;
5844         break;
5845     case 0x12: /* FRINT64Z */
5846         rmode = FPROUNDING_ZERO;
5847         gen_fpst = gen_helper_frint64_s;
5848         break;
5849     case 0x13: /* FRINT64X */
5850         gen_fpst = gen_helper_frint64_s;
5851         break;
5852     default:
5853         g_assert_not_reached();
5854     }
5855 
5856     fpst = fpstatus_ptr(FPST_FPCR);
5857     if (rmode >= 0) {
5858         TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
5859         gen_fpst(tcg_res, tcg_op, fpst);
5860         gen_restore_rmode(tcg_rmode, fpst);
5861     } else {
5862         gen_fpst(tcg_res, tcg_op, fpst);
5863     }
5864 
5865  done:
5866     write_fp_sreg(s, rd, tcg_res);
5867 }
5868 
5869 /* Floating-point data-processing (1 source) - double precision */
5870 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
5871 {
5872     void (*gen_fpst)(TCGv_i64, TCGv_i64, TCGv_ptr);
5873     TCGv_i64 tcg_op, tcg_res;
5874     TCGv_ptr fpst;
5875     int rmode = -1;
5876 
5877     switch (opcode) {
5878     case 0x0: /* FMOV */
5879         gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
5880         return;
5881     }
5882 
5883     tcg_op = read_fp_dreg(s, rn);
5884     tcg_res = tcg_temp_new_i64();
5885 
5886     switch (opcode) {
5887     case 0x1: /* FABS */
5888         gen_helper_vfp_absd(tcg_res, tcg_op);
5889         goto done;
5890     case 0x2: /* FNEG */
5891         gen_helper_vfp_negd(tcg_res, tcg_op);
5892         goto done;
5893     case 0x3: /* FSQRT */
5894         gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
5895         goto done;
5896     case 0x8: /* FRINTN */
5897     case 0x9: /* FRINTP */
5898     case 0xa: /* FRINTM */
5899     case 0xb: /* FRINTZ */
5900     case 0xc: /* FRINTA */
5901         rmode = opcode & 7;
5902         gen_fpst = gen_helper_rintd;
5903         break;
5904     case 0xe: /* FRINTX */
5905         gen_fpst = gen_helper_rintd_exact;
5906         break;
5907     case 0xf: /* FRINTI */
5908         gen_fpst = gen_helper_rintd;
5909         break;
5910     case 0x10: /* FRINT32Z */
5911         rmode = FPROUNDING_ZERO;
5912         gen_fpst = gen_helper_frint32_d;
5913         break;
5914     case 0x11: /* FRINT32X */
5915         gen_fpst = gen_helper_frint32_d;
5916         break;
5917     case 0x12: /* FRINT64Z */
5918         rmode = FPROUNDING_ZERO;
5919         gen_fpst = gen_helper_frint64_d;
5920         break;
5921     case 0x13: /* FRINT64X */
5922         gen_fpst = gen_helper_frint64_d;
5923         break;
5924     default:
5925         g_assert_not_reached();
5926     }
5927 
5928     fpst = fpstatus_ptr(FPST_FPCR);
5929     if (rmode >= 0) {
5930         TCGv_i32 tcg_rmode = gen_set_rmode(rmode, fpst);
5931         gen_fpst(tcg_res, tcg_op, fpst);
5932         gen_restore_rmode(tcg_rmode, fpst);
5933     } else {
5934         gen_fpst(tcg_res, tcg_op, fpst);
5935     }
5936 
5937  done:
5938     write_fp_dreg(s, rd, tcg_res);
5939 }
5940 
5941 static void handle_fp_fcvt(DisasContext *s, int opcode,
5942                            int rd, int rn, int dtype, int ntype)
5943 {
5944     switch (ntype) {
5945     case 0x0:
5946     {
5947         TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5948         if (dtype == 1) {
5949             /* Single to double */
5950             TCGv_i64 tcg_rd = tcg_temp_new_i64();
5951             gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
5952             write_fp_dreg(s, rd, tcg_rd);
5953         } else {
5954             /* Single to half */
5955             TCGv_i32 tcg_rd = tcg_temp_new_i32();
5956             TCGv_i32 ahp = get_ahp_flag();
5957             TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
5958 
5959             gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5960             /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5961             write_fp_sreg(s, rd, tcg_rd);
5962         }
5963         break;
5964     }
5965     case 0x1:
5966     {
5967         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
5968         TCGv_i32 tcg_rd = tcg_temp_new_i32();
5969         if (dtype == 0) {
5970             /* Double to single */
5971             gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
5972         } else {
5973             TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
5974             TCGv_i32 ahp = get_ahp_flag();
5975             /* Double to half */
5976             gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, fpst, ahp);
5977             /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5978         }
5979         write_fp_sreg(s, rd, tcg_rd);
5980         break;
5981     }
5982     case 0x3:
5983     {
5984         TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5985         TCGv_ptr tcg_fpst = fpstatus_ptr(FPST_FPCR);
5986         TCGv_i32 tcg_ahp = get_ahp_flag();
5987         tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
5988         if (dtype == 0) {
5989             /* Half to single */
5990             TCGv_i32 tcg_rd = tcg_temp_new_i32();
5991             gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5992             write_fp_sreg(s, rd, tcg_rd);
5993         } else {
5994             /* Half to double */
5995             TCGv_i64 tcg_rd = tcg_temp_new_i64();
5996             gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, tcg_fpst, tcg_ahp);
5997             write_fp_dreg(s, rd, tcg_rd);
5998         }
5999         break;
6000     }
6001     default:
6002         g_assert_not_reached();
6003     }
6004 }
6005 
6006 /* Floating point data-processing (1 source)
6007  *   31  30  29 28       24 23  22  21 20    15 14       10 9    5 4    0
6008  * +---+---+---+-----------+------+---+--------+-----------+------+------+
6009  * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 |  Rn  |  Rd  |
6010  * +---+---+---+-----------+------+---+--------+-----------+------+------+
6011  */
6012 static void disas_fp_1src(DisasContext *s, uint32_t insn)
6013 {
6014     int mos = extract32(insn, 29, 3);
6015     int type = extract32(insn, 22, 2);
6016     int opcode = extract32(insn, 15, 6);
6017     int rn = extract32(insn, 5, 5);
6018     int rd = extract32(insn, 0, 5);
6019 
6020     if (mos) {
6021         goto do_unallocated;
6022     }
6023 
6024     switch (opcode) {
6025     case 0x4: case 0x5: case 0x7:
6026     {
6027         /* FCVT between half, single and double precision */
6028         int dtype = extract32(opcode, 0, 2);
6029         if (type == 2 || dtype == type) {
6030             goto do_unallocated;
6031         }
6032         if (!fp_access_check(s)) {
6033             return;
6034         }
6035 
6036         handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
6037         break;
6038     }
6039 
6040     case 0x10 ... 0x13: /* FRINT{32,64}{X,Z} */
6041         if (type > 1 || !dc_isar_feature(aa64_frint, s)) {
6042             goto do_unallocated;
6043         }
6044         /* fall through */
6045     case 0x0 ... 0x3:
6046     case 0x8 ... 0xc:
6047     case 0xe ... 0xf:
6048         /* 32-to-32 and 64-to-64 ops */
6049         switch (type) {
6050         case 0:
6051             if (!fp_access_check(s)) {
6052                 return;
6053             }
6054             handle_fp_1src_single(s, opcode, rd, rn);
6055             break;
6056         case 1:
6057             if (!fp_access_check(s)) {
6058                 return;
6059             }
6060             handle_fp_1src_double(s, opcode, rd, rn);
6061             break;
6062         case 3:
6063             if (!dc_isar_feature(aa64_fp16, s)) {
6064                 goto do_unallocated;
6065             }
6066 
6067             if (!fp_access_check(s)) {
6068                 return;
6069             }
6070             handle_fp_1src_half(s, opcode, rd, rn);
6071             break;
6072         default:
6073             goto do_unallocated;
6074         }
6075         break;
6076 
6077     case 0x6:
6078         switch (type) {
6079         case 1: /* BFCVT */
6080             if (!dc_isar_feature(aa64_bf16, s)) {
6081                 goto do_unallocated;
6082             }
6083             if (!fp_access_check(s)) {
6084                 return;
6085             }
6086             handle_fp_1src_single(s, opcode, rd, rn);
6087             break;
6088         default:
6089             goto do_unallocated;
6090         }
6091         break;
6092 
6093     default:
6094     do_unallocated:
6095         unallocated_encoding(s);
6096         break;
6097     }
6098 }
6099 
6100 /* Floating-point data-processing (2 source) - single precision */
6101 static void handle_fp_2src_single(DisasContext *s, int opcode,
6102                                   int rd, int rn, int rm)
6103 {
6104     TCGv_i32 tcg_op1;
6105     TCGv_i32 tcg_op2;
6106     TCGv_i32 tcg_res;
6107     TCGv_ptr fpst;
6108 
6109     tcg_res = tcg_temp_new_i32();
6110     fpst = fpstatus_ptr(FPST_FPCR);
6111     tcg_op1 = read_fp_sreg(s, rn);
6112     tcg_op2 = read_fp_sreg(s, rm);
6113 
6114     switch (opcode) {
6115     case 0x0: /* FMUL */
6116         gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6117         break;
6118     case 0x1: /* FDIV */
6119         gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
6120         break;
6121     case 0x2: /* FADD */
6122         gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6123         break;
6124     case 0x3: /* FSUB */
6125         gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
6126         break;
6127     case 0x4: /* FMAX */
6128         gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6129         break;
6130     case 0x5: /* FMIN */
6131         gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6132         break;
6133     case 0x6: /* FMAXNM */
6134         gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6135         break;
6136     case 0x7: /* FMINNM */
6137         gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6138         break;
6139     case 0x8: /* FNMUL */
6140         gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
6141         gen_helper_vfp_negs(tcg_res, tcg_res);
6142         break;
6143     }
6144 
6145     write_fp_sreg(s, rd, tcg_res);
6146 }
6147 
6148 /* Floating-point data-processing (2 source) - double precision */
6149 static void handle_fp_2src_double(DisasContext *s, int opcode,
6150                                   int rd, int rn, int rm)
6151 {
6152     TCGv_i64 tcg_op1;
6153     TCGv_i64 tcg_op2;
6154     TCGv_i64 tcg_res;
6155     TCGv_ptr fpst;
6156 
6157     tcg_res = tcg_temp_new_i64();
6158     fpst = fpstatus_ptr(FPST_FPCR);
6159     tcg_op1 = read_fp_dreg(s, rn);
6160     tcg_op2 = read_fp_dreg(s, rm);
6161 
6162     switch (opcode) {
6163     case 0x0: /* FMUL */
6164         gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6165         break;
6166     case 0x1: /* FDIV */
6167         gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
6168         break;
6169     case 0x2: /* FADD */
6170         gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6171         break;
6172     case 0x3: /* FSUB */
6173         gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
6174         break;
6175     case 0x4: /* FMAX */
6176         gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6177         break;
6178     case 0x5: /* FMIN */
6179         gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6180         break;
6181     case 0x6: /* FMAXNM */
6182         gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6183         break;
6184     case 0x7: /* FMINNM */
6185         gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6186         break;
6187     case 0x8: /* FNMUL */
6188         gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
6189         gen_helper_vfp_negd(tcg_res, tcg_res);
6190         break;
6191     }
6192 
6193     write_fp_dreg(s, rd, tcg_res);
6194 }
6195 
6196 /* Floating-point data-processing (2 source) - half precision */
6197 static void handle_fp_2src_half(DisasContext *s, int opcode,
6198                                 int rd, int rn, int rm)
6199 {
6200     TCGv_i32 tcg_op1;
6201     TCGv_i32 tcg_op2;
6202     TCGv_i32 tcg_res;
6203     TCGv_ptr fpst;
6204 
6205     tcg_res = tcg_temp_new_i32();
6206     fpst = fpstatus_ptr(FPST_FPCR_F16);
6207     tcg_op1 = read_fp_hreg(s, rn);
6208     tcg_op2 = read_fp_hreg(s, rm);
6209 
6210     switch (opcode) {
6211     case 0x0: /* FMUL */
6212         gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6213         break;
6214     case 0x1: /* FDIV */
6215         gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
6216         break;
6217     case 0x2: /* FADD */
6218         gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6219         break;
6220     case 0x3: /* FSUB */
6221         gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
6222         break;
6223     case 0x4: /* FMAX */
6224         gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6225         break;
6226     case 0x5: /* FMIN */
6227         gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6228         break;
6229     case 0x6: /* FMAXNM */
6230         gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6231         break;
6232     case 0x7: /* FMINNM */
6233         gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6234         break;
6235     case 0x8: /* FNMUL */
6236         gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
6237         tcg_gen_xori_i32(tcg_res, tcg_res, 0x8000);
6238         break;
6239     default:
6240         g_assert_not_reached();
6241     }
6242 
6243     write_fp_sreg(s, rd, tcg_res);
6244 }
6245 
6246 /* Floating point data-processing (2 source)
6247  *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
6248  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6249  * | M | 0 | S | 1 1 1 1 0 | type | 1 |  Rm  | opcode | 1 0 |  Rn  |  Rd  |
6250  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6251  */
6252 static void disas_fp_2src(DisasContext *s, uint32_t insn)
6253 {
6254     int mos = extract32(insn, 29, 3);
6255     int type = extract32(insn, 22, 2);
6256     int rd = extract32(insn, 0, 5);
6257     int rn = extract32(insn, 5, 5);
6258     int rm = extract32(insn, 16, 5);
6259     int opcode = extract32(insn, 12, 4);
6260 
6261     if (opcode > 8 || mos) {
6262         unallocated_encoding(s);
6263         return;
6264     }
6265 
6266     switch (type) {
6267     case 0:
6268         if (!fp_access_check(s)) {
6269             return;
6270         }
6271         handle_fp_2src_single(s, opcode, rd, rn, rm);
6272         break;
6273     case 1:
6274         if (!fp_access_check(s)) {
6275             return;
6276         }
6277         handle_fp_2src_double(s, opcode, rd, rn, rm);
6278         break;
6279     case 3:
6280         if (!dc_isar_feature(aa64_fp16, s)) {
6281             unallocated_encoding(s);
6282             return;
6283         }
6284         if (!fp_access_check(s)) {
6285             return;
6286         }
6287         handle_fp_2src_half(s, opcode, rd, rn, rm);
6288         break;
6289     default:
6290         unallocated_encoding(s);
6291     }
6292 }
6293 
6294 /* Floating-point data-processing (3 source) - single precision */
6295 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
6296                                   int rd, int rn, int rm, int ra)
6297 {
6298     TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6299     TCGv_i32 tcg_res = tcg_temp_new_i32();
6300     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6301 
6302     tcg_op1 = read_fp_sreg(s, rn);
6303     tcg_op2 = read_fp_sreg(s, rm);
6304     tcg_op3 = read_fp_sreg(s, ra);
6305 
6306     /* These are fused multiply-add, and must be done as one
6307      * floating point operation with no rounding between the
6308      * multiplication and addition steps.
6309      * NB that doing the negations here as separate steps is
6310      * correct : an input NaN should come out with its sign bit
6311      * flipped if it is a negated-input.
6312      */
6313     if (o1 == true) {
6314         gen_helper_vfp_negs(tcg_op3, tcg_op3);
6315     }
6316 
6317     if (o0 != o1) {
6318         gen_helper_vfp_negs(tcg_op1, tcg_op1);
6319     }
6320 
6321     gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6322 
6323     write_fp_sreg(s, rd, tcg_res);
6324 }
6325 
6326 /* Floating-point data-processing (3 source) - double precision */
6327 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
6328                                   int rd, int rn, int rm, int ra)
6329 {
6330     TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
6331     TCGv_i64 tcg_res = tcg_temp_new_i64();
6332     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
6333 
6334     tcg_op1 = read_fp_dreg(s, rn);
6335     tcg_op2 = read_fp_dreg(s, rm);
6336     tcg_op3 = read_fp_dreg(s, ra);
6337 
6338     /* These are fused multiply-add, and must be done as one
6339      * floating point operation with no rounding between the
6340      * multiplication and addition steps.
6341      * NB that doing the negations here as separate steps is
6342      * correct : an input NaN should come out with its sign bit
6343      * flipped if it is a negated-input.
6344      */
6345     if (o1 == true) {
6346         gen_helper_vfp_negd(tcg_op3, tcg_op3);
6347     }
6348 
6349     if (o0 != o1) {
6350         gen_helper_vfp_negd(tcg_op1, tcg_op1);
6351     }
6352 
6353     gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6354 
6355     write_fp_dreg(s, rd, tcg_res);
6356 }
6357 
6358 /* Floating-point data-processing (3 source) - half precision */
6359 static void handle_fp_3src_half(DisasContext *s, bool o0, bool o1,
6360                                 int rd, int rn, int rm, int ra)
6361 {
6362     TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
6363     TCGv_i32 tcg_res = tcg_temp_new_i32();
6364     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR_F16);
6365 
6366     tcg_op1 = read_fp_hreg(s, rn);
6367     tcg_op2 = read_fp_hreg(s, rm);
6368     tcg_op3 = read_fp_hreg(s, ra);
6369 
6370     /* These are fused multiply-add, and must be done as one
6371      * floating point operation with no rounding between the
6372      * multiplication and addition steps.
6373      * NB that doing the negations here as separate steps is
6374      * correct : an input NaN should come out with its sign bit
6375      * flipped if it is a negated-input.
6376      */
6377     if (o1 == true) {
6378         tcg_gen_xori_i32(tcg_op3, tcg_op3, 0x8000);
6379     }
6380 
6381     if (o0 != o1) {
6382         tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
6383     }
6384 
6385     gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
6386 
6387     write_fp_sreg(s, rd, tcg_res);
6388 }
6389 
6390 /* Floating point data-processing (3 source)
6391  *   31  30  29 28       24 23  22  21  20  16  15  14  10 9    5 4    0
6392  * +---+---+---+-----------+------+----+------+----+------+------+------+
6393  * | M | 0 | S | 1 1 1 1 1 | type | o1 |  Rm  | o0 |  Ra  |  Rn  |  Rd  |
6394  * +---+---+---+-----------+------+----+------+----+------+------+------+
6395  */
6396 static void disas_fp_3src(DisasContext *s, uint32_t insn)
6397 {
6398     int mos = extract32(insn, 29, 3);
6399     int type = extract32(insn, 22, 2);
6400     int rd = extract32(insn, 0, 5);
6401     int rn = extract32(insn, 5, 5);
6402     int ra = extract32(insn, 10, 5);
6403     int rm = extract32(insn, 16, 5);
6404     bool o0 = extract32(insn, 15, 1);
6405     bool o1 = extract32(insn, 21, 1);
6406 
6407     if (mos) {
6408         unallocated_encoding(s);
6409         return;
6410     }
6411 
6412     switch (type) {
6413     case 0:
6414         if (!fp_access_check(s)) {
6415             return;
6416         }
6417         handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
6418         break;
6419     case 1:
6420         if (!fp_access_check(s)) {
6421             return;
6422         }
6423         handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
6424         break;
6425     case 3:
6426         if (!dc_isar_feature(aa64_fp16, s)) {
6427             unallocated_encoding(s);
6428             return;
6429         }
6430         if (!fp_access_check(s)) {
6431             return;
6432         }
6433         handle_fp_3src_half(s, o0, o1, rd, rn, rm, ra);
6434         break;
6435     default:
6436         unallocated_encoding(s);
6437     }
6438 }
6439 
6440 /* Floating point immediate
6441  *   31  30  29 28       24 23  22  21 20        13 12   10 9    5 4    0
6442  * +---+---+---+-----------+------+---+------------+-------+------+------+
6443  * | M | 0 | S | 1 1 1 1 0 | type | 1 |    imm8    | 1 0 0 | imm5 |  Rd  |
6444  * +---+---+---+-----------+------+---+------------+-------+------+------+
6445  */
6446 static void disas_fp_imm(DisasContext *s, uint32_t insn)
6447 {
6448     int rd = extract32(insn, 0, 5);
6449     int imm5 = extract32(insn, 5, 5);
6450     int imm8 = extract32(insn, 13, 8);
6451     int type = extract32(insn, 22, 2);
6452     int mos = extract32(insn, 29, 3);
6453     uint64_t imm;
6454     MemOp sz;
6455 
6456     if (mos || imm5) {
6457         unallocated_encoding(s);
6458         return;
6459     }
6460 
6461     switch (type) {
6462     case 0:
6463         sz = MO_32;
6464         break;
6465     case 1:
6466         sz = MO_64;
6467         break;
6468     case 3:
6469         sz = MO_16;
6470         if (dc_isar_feature(aa64_fp16, s)) {
6471             break;
6472         }
6473         /* fallthru */
6474     default:
6475         unallocated_encoding(s);
6476         return;
6477     }
6478 
6479     if (!fp_access_check(s)) {
6480         return;
6481     }
6482 
6483     imm = vfp_expand_imm(sz, imm8);
6484     write_fp_dreg(s, rd, tcg_constant_i64(imm));
6485 }
6486 
6487 /* Handle floating point <=> fixed point conversions. Note that we can
6488  * also deal with fp <=> integer conversions as a special case (scale == 64)
6489  * OPTME: consider handling that special case specially or at least skipping
6490  * the call to scalbn in the helpers for zero shifts.
6491  */
6492 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
6493                            bool itof, int rmode, int scale, int sf, int type)
6494 {
6495     bool is_signed = !(opcode & 1);
6496     TCGv_ptr tcg_fpstatus;
6497     TCGv_i32 tcg_shift, tcg_single;
6498     TCGv_i64 tcg_double;
6499 
6500     tcg_fpstatus = fpstatus_ptr(type == 3 ? FPST_FPCR_F16 : FPST_FPCR);
6501 
6502     tcg_shift = tcg_constant_i32(64 - scale);
6503 
6504     if (itof) {
6505         TCGv_i64 tcg_int = cpu_reg(s, rn);
6506         if (!sf) {
6507             TCGv_i64 tcg_extend = tcg_temp_new_i64();
6508 
6509             if (is_signed) {
6510                 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
6511             } else {
6512                 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
6513             }
6514 
6515             tcg_int = tcg_extend;
6516         }
6517 
6518         switch (type) {
6519         case 1: /* float64 */
6520             tcg_double = tcg_temp_new_i64();
6521             if (is_signed) {
6522                 gen_helper_vfp_sqtod(tcg_double, tcg_int,
6523                                      tcg_shift, tcg_fpstatus);
6524             } else {
6525                 gen_helper_vfp_uqtod(tcg_double, tcg_int,
6526                                      tcg_shift, tcg_fpstatus);
6527             }
6528             write_fp_dreg(s, rd, tcg_double);
6529             break;
6530 
6531         case 0: /* float32 */
6532             tcg_single = tcg_temp_new_i32();
6533             if (is_signed) {
6534                 gen_helper_vfp_sqtos(tcg_single, tcg_int,
6535                                      tcg_shift, tcg_fpstatus);
6536             } else {
6537                 gen_helper_vfp_uqtos(tcg_single, tcg_int,
6538                                      tcg_shift, tcg_fpstatus);
6539             }
6540             write_fp_sreg(s, rd, tcg_single);
6541             break;
6542 
6543         case 3: /* float16 */
6544             tcg_single = tcg_temp_new_i32();
6545             if (is_signed) {
6546                 gen_helper_vfp_sqtoh(tcg_single, tcg_int,
6547                                      tcg_shift, tcg_fpstatus);
6548             } else {
6549                 gen_helper_vfp_uqtoh(tcg_single, tcg_int,
6550                                      tcg_shift, tcg_fpstatus);
6551             }
6552             write_fp_sreg(s, rd, tcg_single);
6553             break;
6554 
6555         default:
6556             g_assert_not_reached();
6557         }
6558     } else {
6559         TCGv_i64 tcg_int = cpu_reg(s, rd);
6560         TCGv_i32 tcg_rmode;
6561 
6562         if (extract32(opcode, 2, 1)) {
6563             /* There are too many rounding modes to all fit into rmode,
6564              * so FCVTA[US] is a special case.
6565              */
6566             rmode = FPROUNDING_TIEAWAY;
6567         }
6568 
6569         tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
6570 
6571         switch (type) {
6572         case 1: /* float64 */
6573             tcg_double = read_fp_dreg(s, rn);
6574             if (is_signed) {
6575                 if (!sf) {
6576                     gen_helper_vfp_tosld(tcg_int, tcg_double,
6577                                          tcg_shift, tcg_fpstatus);
6578                 } else {
6579                     gen_helper_vfp_tosqd(tcg_int, tcg_double,
6580                                          tcg_shift, tcg_fpstatus);
6581                 }
6582             } else {
6583                 if (!sf) {
6584                     gen_helper_vfp_tould(tcg_int, tcg_double,
6585                                          tcg_shift, tcg_fpstatus);
6586                 } else {
6587                     gen_helper_vfp_touqd(tcg_int, tcg_double,
6588                                          tcg_shift, tcg_fpstatus);
6589                 }
6590             }
6591             if (!sf) {
6592                 tcg_gen_ext32u_i64(tcg_int, tcg_int);
6593             }
6594             break;
6595 
6596         case 0: /* float32 */
6597             tcg_single = read_fp_sreg(s, rn);
6598             if (sf) {
6599                 if (is_signed) {
6600                     gen_helper_vfp_tosqs(tcg_int, tcg_single,
6601                                          tcg_shift, tcg_fpstatus);
6602                 } else {
6603                     gen_helper_vfp_touqs(tcg_int, tcg_single,
6604                                          tcg_shift, tcg_fpstatus);
6605                 }
6606             } else {
6607                 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6608                 if (is_signed) {
6609                     gen_helper_vfp_tosls(tcg_dest, tcg_single,
6610                                          tcg_shift, tcg_fpstatus);
6611                 } else {
6612                     gen_helper_vfp_touls(tcg_dest, tcg_single,
6613                                          tcg_shift, tcg_fpstatus);
6614                 }
6615                 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6616             }
6617             break;
6618 
6619         case 3: /* float16 */
6620             tcg_single = read_fp_sreg(s, rn);
6621             if (sf) {
6622                 if (is_signed) {
6623                     gen_helper_vfp_tosqh(tcg_int, tcg_single,
6624                                          tcg_shift, tcg_fpstatus);
6625                 } else {
6626                     gen_helper_vfp_touqh(tcg_int, tcg_single,
6627                                          tcg_shift, tcg_fpstatus);
6628                 }
6629             } else {
6630                 TCGv_i32 tcg_dest = tcg_temp_new_i32();
6631                 if (is_signed) {
6632                     gen_helper_vfp_toslh(tcg_dest, tcg_single,
6633                                          tcg_shift, tcg_fpstatus);
6634                 } else {
6635                     gen_helper_vfp_toulh(tcg_dest, tcg_single,
6636                                          tcg_shift, tcg_fpstatus);
6637                 }
6638                 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
6639             }
6640             break;
6641 
6642         default:
6643             g_assert_not_reached();
6644         }
6645 
6646         gen_restore_rmode(tcg_rmode, tcg_fpstatus);
6647     }
6648 }
6649 
6650 /* Floating point <-> fixed point conversions
6651  *   31   30  29 28       24 23  22  21 20   19 18    16 15   10 9    5 4    0
6652  * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6653  * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale |  Rn  |  Rd  |
6654  * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
6655  */
6656 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
6657 {
6658     int rd = extract32(insn, 0, 5);
6659     int rn = extract32(insn, 5, 5);
6660     int scale = extract32(insn, 10, 6);
6661     int opcode = extract32(insn, 16, 3);
6662     int rmode = extract32(insn, 19, 2);
6663     int type = extract32(insn, 22, 2);
6664     bool sbit = extract32(insn, 29, 1);
6665     bool sf = extract32(insn, 31, 1);
6666     bool itof;
6667 
6668     if (sbit || (!sf && scale < 32)) {
6669         unallocated_encoding(s);
6670         return;
6671     }
6672 
6673     switch (type) {
6674     case 0: /* float32 */
6675     case 1: /* float64 */
6676         break;
6677     case 3: /* float16 */
6678         if (dc_isar_feature(aa64_fp16, s)) {
6679             break;
6680         }
6681         /* fallthru */
6682     default:
6683         unallocated_encoding(s);
6684         return;
6685     }
6686 
6687     switch ((rmode << 3) | opcode) {
6688     case 0x2: /* SCVTF */
6689     case 0x3: /* UCVTF */
6690         itof = true;
6691         break;
6692     case 0x18: /* FCVTZS */
6693     case 0x19: /* FCVTZU */
6694         itof = false;
6695         break;
6696     default:
6697         unallocated_encoding(s);
6698         return;
6699     }
6700 
6701     if (!fp_access_check(s)) {
6702         return;
6703     }
6704 
6705     handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
6706 }
6707 
6708 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
6709 {
6710     /* FMOV: gpr to or from float, double, or top half of quad fp reg,
6711      * without conversion.
6712      */
6713 
6714     if (itof) {
6715         TCGv_i64 tcg_rn = cpu_reg(s, rn);
6716         TCGv_i64 tmp;
6717 
6718         switch (type) {
6719         case 0:
6720             /* 32 bit */
6721             tmp = tcg_temp_new_i64();
6722             tcg_gen_ext32u_i64(tmp, tcg_rn);
6723             write_fp_dreg(s, rd, tmp);
6724             break;
6725         case 1:
6726             /* 64 bit */
6727             write_fp_dreg(s, rd, tcg_rn);
6728             break;
6729         case 2:
6730             /* 64 bit to top half. */
6731             tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
6732             clear_vec_high(s, true, rd);
6733             break;
6734         case 3:
6735             /* 16 bit */
6736             tmp = tcg_temp_new_i64();
6737             tcg_gen_ext16u_i64(tmp, tcg_rn);
6738             write_fp_dreg(s, rd, tmp);
6739             break;
6740         default:
6741             g_assert_not_reached();
6742         }
6743     } else {
6744         TCGv_i64 tcg_rd = cpu_reg(s, rd);
6745 
6746         switch (type) {
6747         case 0:
6748             /* 32 bit */
6749             tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
6750             break;
6751         case 1:
6752             /* 64 bit */
6753             tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
6754             break;
6755         case 2:
6756             /* 64 bits from top half */
6757             tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
6758             break;
6759         case 3:
6760             /* 16 bit */
6761             tcg_gen_ld16u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_16));
6762             break;
6763         default:
6764             g_assert_not_reached();
6765         }
6766     }
6767 }
6768 
6769 static void handle_fjcvtzs(DisasContext *s, int rd, int rn)
6770 {
6771     TCGv_i64 t = read_fp_dreg(s, rn);
6772     TCGv_ptr fpstatus = fpstatus_ptr(FPST_FPCR);
6773 
6774     gen_helper_fjcvtzs(t, t, fpstatus);
6775 
6776     tcg_gen_ext32u_i64(cpu_reg(s, rd), t);
6777     tcg_gen_extrh_i64_i32(cpu_ZF, t);
6778     tcg_gen_movi_i32(cpu_CF, 0);
6779     tcg_gen_movi_i32(cpu_NF, 0);
6780     tcg_gen_movi_i32(cpu_VF, 0);
6781 }
6782 
6783 /* Floating point <-> integer conversions
6784  *   31   30  29 28       24 23  22  21 20   19 18 16 15         10 9  5 4  0
6785  * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6786  * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
6787  * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
6788  */
6789 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
6790 {
6791     int rd = extract32(insn, 0, 5);
6792     int rn = extract32(insn, 5, 5);
6793     int opcode = extract32(insn, 16, 3);
6794     int rmode = extract32(insn, 19, 2);
6795     int type = extract32(insn, 22, 2);
6796     bool sbit = extract32(insn, 29, 1);
6797     bool sf = extract32(insn, 31, 1);
6798     bool itof = false;
6799 
6800     if (sbit) {
6801         goto do_unallocated;
6802     }
6803 
6804     switch (opcode) {
6805     case 2: /* SCVTF */
6806     case 3: /* UCVTF */
6807         itof = true;
6808         /* fallthru */
6809     case 4: /* FCVTAS */
6810     case 5: /* FCVTAU */
6811         if (rmode != 0) {
6812             goto do_unallocated;
6813         }
6814         /* fallthru */
6815     case 0: /* FCVT[NPMZ]S */
6816     case 1: /* FCVT[NPMZ]U */
6817         switch (type) {
6818         case 0: /* float32 */
6819         case 1: /* float64 */
6820             break;
6821         case 3: /* float16 */
6822             if (!dc_isar_feature(aa64_fp16, s)) {
6823                 goto do_unallocated;
6824             }
6825             break;
6826         default:
6827             goto do_unallocated;
6828         }
6829         if (!fp_access_check(s)) {
6830             return;
6831         }
6832         handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
6833         break;
6834 
6835     default:
6836         switch (sf << 7 | type << 5 | rmode << 3 | opcode) {
6837         case 0b01100110: /* FMOV half <-> 32-bit int */
6838         case 0b01100111:
6839         case 0b11100110: /* FMOV half <-> 64-bit int */
6840         case 0b11100111:
6841             if (!dc_isar_feature(aa64_fp16, s)) {
6842                 goto do_unallocated;
6843             }
6844             /* fallthru */
6845         case 0b00000110: /* FMOV 32-bit */
6846         case 0b00000111:
6847         case 0b10100110: /* FMOV 64-bit */
6848         case 0b10100111:
6849         case 0b11001110: /* FMOV top half of 128-bit */
6850         case 0b11001111:
6851             if (!fp_access_check(s)) {
6852                 return;
6853             }
6854             itof = opcode & 1;
6855             handle_fmov(s, rd, rn, type, itof);
6856             break;
6857 
6858         case 0b00111110: /* FJCVTZS */
6859             if (!dc_isar_feature(aa64_jscvt, s)) {
6860                 goto do_unallocated;
6861             } else if (fp_access_check(s)) {
6862                 handle_fjcvtzs(s, rd, rn);
6863             }
6864             break;
6865 
6866         default:
6867         do_unallocated:
6868             unallocated_encoding(s);
6869             return;
6870         }
6871         break;
6872     }
6873 }
6874 
6875 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
6876  *   31  30  29 28     25 24                          0
6877  * +---+---+---+---------+-----------------------------+
6878  * |   | 0 |   | 1 1 1 1 |                             |
6879  * +---+---+---+---------+-----------------------------+
6880  */
6881 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
6882 {
6883     if (extract32(insn, 24, 1)) {
6884         /* Floating point data-processing (3 source) */
6885         disas_fp_3src(s, insn);
6886     } else if (extract32(insn, 21, 1) == 0) {
6887         /* Floating point to fixed point conversions */
6888         disas_fp_fixed_conv(s, insn);
6889     } else {
6890         switch (extract32(insn, 10, 2)) {
6891         case 1:
6892             /* Floating point conditional compare */
6893             disas_fp_ccomp(s, insn);
6894             break;
6895         case 2:
6896             /* Floating point data-processing (2 source) */
6897             disas_fp_2src(s, insn);
6898             break;
6899         case 3:
6900             /* Floating point conditional select */
6901             disas_fp_csel(s, insn);
6902             break;
6903         case 0:
6904             switch (ctz32(extract32(insn, 12, 4))) {
6905             case 0: /* [15:12] == xxx1 */
6906                 /* Floating point immediate */
6907                 disas_fp_imm(s, insn);
6908                 break;
6909             case 1: /* [15:12] == xx10 */
6910                 /* Floating point compare */
6911                 disas_fp_compare(s, insn);
6912                 break;
6913             case 2: /* [15:12] == x100 */
6914                 /* Floating point data-processing (1 source) */
6915                 disas_fp_1src(s, insn);
6916                 break;
6917             case 3: /* [15:12] == 1000 */
6918                 unallocated_encoding(s);
6919                 break;
6920             default: /* [15:12] == 0000 */
6921                 /* Floating point <-> integer conversions */
6922                 disas_fp_int_conv(s, insn);
6923                 break;
6924             }
6925             break;
6926         }
6927     }
6928 }
6929 
6930 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
6931                      int pos)
6932 {
6933     /* Extract 64 bits from the middle of two concatenated 64 bit
6934      * vector register slices left:right. The extracted bits start
6935      * at 'pos' bits into the right (least significant) side.
6936      * We return the result in tcg_right, and guarantee not to
6937      * trash tcg_left.
6938      */
6939     TCGv_i64 tcg_tmp = tcg_temp_new_i64();
6940     assert(pos > 0 && pos < 64);
6941 
6942     tcg_gen_shri_i64(tcg_right, tcg_right, pos);
6943     tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
6944     tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
6945 }
6946 
6947 /* EXT
6948  *   31  30 29         24 23 22  21 20  16 15  14  11 10  9    5 4    0
6949  * +---+---+-------------+-----+---+------+---+------+---+------+------+
6950  * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | imm4 | 0 |  Rn  |  Rd  |
6951  * +---+---+-------------+-----+---+------+---+------+---+------+------+
6952  */
6953 static void disas_simd_ext(DisasContext *s, uint32_t insn)
6954 {
6955     int is_q = extract32(insn, 30, 1);
6956     int op2 = extract32(insn, 22, 2);
6957     int imm4 = extract32(insn, 11, 4);
6958     int rm = extract32(insn, 16, 5);
6959     int rn = extract32(insn, 5, 5);
6960     int rd = extract32(insn, 0, 5);
6961     int pos = imm4 << 3;
6962     TCGv_i64 tcg_resl, tcg_resh;
6963 
6964     if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
6965         unallocated_encoding(s);
6966         return;
6967     }
6968 
6969     if (!fp_access_check(s)) {
6970         return;
6971     }
6972 
6973     tcg_resh = tcg_temp_new_i64();
6974     tcg_resl = tcg_temp_new_i64();
6975 
6976     /* Vd gets bits starting at pos bits into Vm:Vn. This is
6977      * either extracting 128 bits from a 128:128 concatenation, or
6978      * extracting 64 bits from a 64:64 concatenation.
6979      */
6980     if (!is_q) {
6981         read_vec_element(s, tcg_resl, rn, 0, MO_64);
6982         if (pos != 0) {
6983             read_vec_element(s, tcg_resh, rm, 0, MO_64);
6984             do_ext64(s, tcg_resh, tcg_resl, pos);
6985         }
6986     } else {
6987         TCGv_i64 tcg_hh;
6988         typedef struct {
6989             int reg;
6990             int elt;
6991         } EltPosns;
6992         EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
6993         EltPosns *elt = eltposns;
6994 
6995         if (pos >= 64) {
6996             elt++;
6997             pos -= 64;
6998         }
6999 
7000         read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
7001         elt++;
7002         read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
7003         elt++;
7004         if (pos != 0) {
7005             do_ext64(s, tcg_resh, tcg_resl, pos);
7006             tcg_hh = tcg_temp_new_i64();
7007             read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
7008             do_ext64(s, tcg_hh, tcg_resh, pos);
7009         }
7010     }
7011 
7012     write_vec_element(s, tcg_resl, rd, 0, MO_64);
7013     if (is_q) {
7014         write_vec_element(s, tcg_resh, rd, 1, MO_64);
7015     }
7016     clear_vec_high(s, is_q, rd);
7017 }
7018 
7019 /* TBL/TBX
7020  *   31  30 29         24 23 22  21 20  16 15  14 13  12  11 10 9    5 4    0
7021  * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7022  * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 |  Rm  | 0 | len | op | 0 0 |  Rn  |  Rd  |
7023  * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
7024  */
7025 static void disas_simd_tb(DisasContext *s, uint32_t insn)
7026 {
7027     int op2 = extract32(insn, 22, 2);
7028     int is_q = extract32(insn, 30, 1);
7029     int rm = extract32(insn, 16, 5);
7030     int rn = extract32(insn, 5, 5);
7031     int rd = extract32(insn, 0, 5);
7032     int is_tbx = extract32(insn, 12, 1);
7033     int len = (extract32(insn, 13, 2) + 1) * 16;
7034 
7035     if (op2 != 0) {
7036         unallocated_encoding(s);
7037         return;
7038     }
7039 
7040     if (!fp_access_check(s)) {
7041         return;
7042     }
7043 
7044     tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
7045                        vec_full_reg_offset(s, rm), cpu_env,
7046                        is_q ? 16 : 8, vec_full_reg_size(s),
7047                        (len << 6) | (is_tbx << 5) | rn,
7048                        gen_helper_simd_tblx);
7049 }
7050 
7051 /* ZIP/UZP/TRN
7052  *   31  30 29         24 23  22  21 20   16 15 14 12 11 10 9    5 4    0
7053  * +---+---+-------------+------+---+------+---+------------------+------+
7054  * | 0 | Q | 0 0 1 1 1 0 | size | 0 |  Rm  | 0 | opc | 1 0 |  Rn  |  Rd  |
7055  * +---+---+-------------+------+---+------+---+------------------+------+
7056  */
7057 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
7058 {
7059     int rd = extract32(insn, 0, 5);
7060     int rn = extract32(insn, 5, 5);
7061     int rm = extract32(insn, 16, 5);
7062     int size = extract32(insn, 22, 2);
7063     /* opc field bits [1:0] indicate ZIP/UZP/TRN;
7064      * bit 2 indicates 1 vs 2 variant of the insn.
7065      */
7066     int opcode = extract32(insn, 12, 2);
7067     bool part = extract32(insn, 14, 1);
7068     bool is_q = extract32(insn, 30, 1);
7069     int esize = 8 << size;
7070     int i;
7071     int datasize = is_q ? 128 : 64;
7072     int elements = datasize / esize;
7073     TCGv_i64 tcg_res[2], tcg_ele;
7074 
7075     if (opcode == 0 || (size == 3 && !is_q)) {
7076         unallocated_encoding(s);
7077         return;
7078     }
7079 
7080     if (!fp_access_check(s)) {
7081         return;
7082     }
7083 
7084     tcg_res[0] = tcg_temp_new_i64();
7085     tcg_res[1] = is_q ? tcg_temp_new_i64() : NULL;
7086     tcg_ele = tcg_temp_new_i64();
7087 
7088     for (i = 0; i < elements; i++) {
7089         int o, w;
7090 
7091         switch (opcode) {
7092         case 1: /* UZP1/2 */
7093         {
7094             int midpoint = elements / 2;
7095             if (i < midpoint) {
7096                 read_vec_element(s, tcg_ele, rn, 2 * i + part, size);
7097             } else {
7098                 read_vec_element(s, tcg_ele, rm,
7099                                  2 * (i - midpoint) + part, size);
7100             }
7101             break;
7102         }
7103         case 2: /* TRN1/2 */
7104             if (i & 1) {
7105                 read_vec_element(s, tcg_ele, rm, (i & ~1) + part, size);
7106             } else {
7107                 read_vec_element(s, tcg_ele, rn, (i & ~1) + part, size);
7108             }
7109             break;
7110         case 3: /* ZIP1/2 */
7111         {
7112             int base = part * elements / 2;
7113             if (i & 1) {
7114                 read_vec_element(s, tcg_ele, rm, base + (i >> 1), size);
7115             } else {
7116                 read_vec_element(s, tcg_ele, rn, base + (i >> 1), size);
7117             }
7118             break;
7119         }
7120         default:
7121             g_assert_not_reached();
7122         }
7123 
7124         w = (i * esize) / 64;
7125         o = (i * esize) % 64;
7126         if (o == 0) {
7127             tcg_gen_mov_i64(tcg_res[w], tcg_ele);
7128         } else {
7129             tcg_gen_shli_i64(tcg_ele, tcg_ele, o);
7130             tcg_gen_or_i64(tcg_res[w], tcg_res[w], tcg_ele);
7131         }
7132     }
7133 
7134     for (i = 0; i <= is_q; ++i) {
7135         write_vec_element(s, tcg_res[i], rd, i, MO_64);
7136     }
7137     clear_vec_high(s, is_q, rd);
7138 }
7139 
7140 /*
7141  * do_reduction_op helper
7142  *
7143  * This mirrors the Reduce() pseudocode in the ARM ARM. It is
7144  * important for correct NaN propagation that we do these
7145  * operations in exactly the order specified by the pseudocode.
7146  *
7147  * This is a recursive function, TCG temps should be freed by the
7148  * calling function once it is done with the values.
7149  */
7150 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
7151                                 int esize, int size, int vmap, TCGv_ptr fpst)
7152 {
7153     if (esize == size) {
7154         int element;
7155         MemOp msize = esize == 16 ? MO_16 : MO_32;
7156         TCGv_i32 tcg_elem;
7157 
7158         /* We should have one register left here */
7159         assert(ctpop8(vmap) == 1);
7160         element = ctz32(vmap);
7161         assert(element < 8);
7162 
7163         tcg_elem = tcg_temp_new_i32();
7164         read_vec_element_i32(s, tcg_elem, rn, element, msize);
7165         return tcg_elem;
7166     } else {
7167         int bits = size / 2;
7168         int shift = ctpop8(vmap) / 2;
7169         int vmap_lo = (vmap >> shift) & vmap;
7170         int vmap_hi = (vmap & ~vmap_lo);
7171         TCGv_i32 tcg_hi, tcg_lo, tcg_res;
7172 
7173         tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
7174         tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
7175         tcg_res = tcg_temp_new_i32();
7176 
7177         switch (fpopcode) {
7178         case 0x0c: /* fmaxnmv half-precision */
7179             gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7180             break;
7181         case 0x0f: /* fmaxv half-precision */
7182             gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
7183             break;
7184         case 0x1c: /* fminnmv half-precision */
7185             gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
7186             break;
7187         case 0x1f: /* fminv half-precision */
7188             gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
7189             break;
7190         case 0x2c: /* fmaxnmv */
7191             gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
7192             break;
7193         case 0x2f: /* fmaxv */
7194             gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
7195             break;
7196         case 0x3c: /* fminnmv */
7197             gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
7198             break;
7199         case 0x3f: /* fminv */
7200             gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
7201             break;
7202         default:
7203             g_assert_not_reached();
7204         }
7205         return tcg_res;
7206     }
7207 }
7208 
7209 /* AdvSIMD across lanes
7210  *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
7211  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7212  * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
7213  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
7214  */
7215 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
7216 {
7217     int rd = extract32(insn, 0, 5);
7218     int rn = extract32(insn, 5, 5);
7219     int size = extract32(insn, 22, 2);
7220     int opcode = extract32(insn, 12, 5);
7221     bool is_q = extract32(insn, 30, 1);
7222     bool is_u = extract32(insn, 29, 1);
7223     bool is_fp = false;
7224     bool is_min = false;
7225     int esize;
7226     int elements;
7227     int i;
7228     TCGv_i64 tcg_res, tcg_elt;
7229 
7230     switch (opcode) {
7231     case 0x1b: /* ADDV */
7232         if (is_u) {
7233             unallocated_encoding(s);
7234             return;
7235         }
7236         /* fall through */
7237     case 0x3: /* SADDLV, UADDLV */
7238     case 0xa: /* SMAXV, UMAXV */
7239     case 0x1a: /* SMINV, UMINV */
7240         if (size == 3 || (size == 2 && !is_q)) {
7241             unallocated_encoding(s);
7242             return;
7243         }
7244         break;
7245     case 0xc: /* FMAXNMV, FMINNMV */
7246     case 0xf: /* FMAXV, FMINV */
7247         /* Bit 1 of size field encodes min vs max and the actual size
7248          * depends on the encoding of the U bit. If not set (and FP16
7249          * enabled) then we do half-precision float instead of single
7250          * precision.
7251          */
7252         is_min = extract32(size, 1, 1);
7253         is_fp = true;
7254         if (!is_u && dc_isar_feature(aa64_fp16, s)) {
7255             size = 1;
7256         } else if (!is_u || !is_q || extract32(size, 0, 1)) {
7257             unallocated_encoding(s);
7258             return;
7259         } else {
7260             size = 2;
7261         }
7262         break;
7263     default:
7264         unallocated_encoding(s);
7265         return;
7266     }
7267 
7268     if (!fp_access_check(s)) {
7269         return;
7270     }
7271 
7272     esize = 8 << size;
7273     elements = (is_q ? 128 : 64) / esize;
7274 
7275     tcg_res = tcg_temp_new_i64();
7276     tcg_elt = tcg_temp_new_i64();
7277 
7278     /* These instructions operate across all lanes of a vector
7279      * to produce a single result. We can guarantee that a 64
7280      * bit intermediate is sufficient:
7281      *  + for [US]ADDLV the maximum element size is 32 bits, and
7282      *    the result type is 64 bits
7283      *  + for FMAX*V, FMIN*V, ADDV the intermediate type is the
7284      *    same as the element size, which is 32 bits at most
7285      * For the integer operations we can choose to work at 64
7286      * or 32 bits and truncate at the end; for simplicity
7287      * we use 64 bits always. The floating point
7288      * ops do require 32 bit intermediates, though.
7289      */
7290     if (!is_fp) {
7291         read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
7292 
7293         for (i = 1; i < elements; i++) {
7294             read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
7295 
7296             switch (opcode) {
7297             case 0x03: /* SADDLV / UADDLV */
7298             case 0x1b: /* ADDV */
7299                 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
7300                 break;
7301             case 0x0a: /* SMAXV / UMAXV */
7302                 if (is_u) {
7303                     tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
7304                 } else {
7305                     tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
7306                 }
7307                 break;
7308             case 0x1a: /* SMINV / UMINV */
7309                 if (is_u) {
7310                     tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
7311                 } else {
7312                     tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
7313                 }
7314                 break;
7315             default:
7316                 g_assert_not_reached();
7317             }
7318 
7319         }
7320     } else {
7321         /* Floating point vector reduction ops which work across 32
7322          * bit (single) or 16 bit (half-precision) intermediates.
7323          * Note that correct NaN propagation requires that we do these
7324          * operations in exactly the order specified by the pseudocode.
7325          */
7326         TCGv_ptr fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7327         int fpopcode = opcode | is_min << 4 | is_u << 5;
7328         int vmap = (1 << elements) - 1;
7329         TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
7330                                              (is_q ? 128 : 64), vmap, fpst);
7331         tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
7332     }
7333 
7334     /* Now truncate the result to the width required for the final output */
7335     if (opcode == 0x03) {
7336         /* SADDLV, UADDLV: result is 2*esize */
7337         size++;
7338     }
7339 
7340     switch (size) {
7341     case 0:
7342         tcg_gen_ext8u_i64(tcg_res, tcg_res);
7343         break;
7344     case 1:
7345         tcg_gen_ext16u_i64(tcg_res, tcg_res);
7346         break;
7347     case 2:
7348         tcg_gen_ext32u_i64(tcg_res, tcg_res);
7349         break;
7350     case 3:
7351         break;
7352     default:
7353         g_assert_not_reached();
7354     }
7355 
7356     write_fp_dreg(s, rd, tcg_res);
7357 }
7358 
7359 /* DUP (Element, Vector)
7360  *
7361  *  31  30   29              21 20    16 15        10  9    5 4    0
7362  * +---+---+-------------------+--------+-------------+------+------+
7363  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
7364  * +---+---+-------------------+--------+-------------+------+------+
7365  *
7366  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7367  */
7368 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
7369                              int imm5)
7370 {
7371     int size = ctz32(imm5);
7372     int index;
7373 
7374     if (size > 3 || (size == 3 && !is_q)) {
7375         unallocated_encoding(s);
7376         return;
7377     }
7378 
7379     if (!fp_access_check(s)) {
7380         return;
7381     }
7382 
7383     index = imm5 >> (size + 1);
7384     tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
7385                          vec_reg_offset(s, rn, index, size),
7386                          is_q ? 16 : 8, vec_full_reg_size(s));
7387 }
7388 
7389 /* DUP (element, scalar)
7390  *  31                   21 20    16 15        10  9    5 4    0
7391  * +-----------------------+--------+-------------+------+------+
7392  * | 0 1 0 1 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 0 1 |  Rn  |  Rd  |
7393  * +-----------------------+--------+-------------+------+------+
7394  */
7395 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
7396                               int imm5)
7397 {
7398     int size = ctz32(imm5);
7399     int index;
7400     TCGv_i64 tmp;
7401 
7402     if (size > 3) {
7403         unallocated_encoding(s);
7404         return;
7405     }
7406 
7407     if (!fp_access_check(s)) {
7408         return;
7409     }
7410 
7411     index = imm5 >> (size + 1);
7412 
7413     /* This instruction just extracts the specified element and
7414      * zero-extends it into the bottom of the destination register.
7415      */
7416     tmp = tcg_temp_new_i64();
7417     read_vec_element(s, tmp, rn, index, size);
7418     write_fp_dreg(s, rd, tmp);
7419 }
7420 
7421 /* DUP (General)
7422  *
7423  *  31  30   29              21 20    16 15        10  9    5 4    0
7424  * +---+---+-------------------+--------+-------------+------+------+
7425  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 0 1 1 |  Rn  |  Rd  |
7426  * +---+---+-------------------+--------+-------------+------+------+
7427  *
7428  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7429  */
7430 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
7431                              int imm5)
7432 {
7433     int size = ctz32(imm5);
7434     uint32_t dofs, oprsz, maxsz;
7435 
7436     if (size > 3 || ((size == 3) && !is_q)) {
7437         unallocated_encoding(s);
7438         return;
7439     }
7440 
7441     if (!fp_access_check(s)) {
7442         return;
7443     }
7444 
7445     dofs = vec_full_reg_offset(s, rd);
7446     oprsz = is_q ? 16 : 8;
7447     maxsz = vec_full_reg_size(s);
7448 
7449     tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
7450 }
7451 
7452 /* INS (Element)
7453  *
7454  *  31                   21 20    16 15  14    11  10 9    5 4    0
7455  * +-----------------------+--------+------------+---+------+------+
7456  * | 0 1 1 0 1 1 1 0 0 0 0 |  imm5  | 0 |  imm4  | 1 |  Rn  |  Rd  |
7457  * +-----------------------+--------+------------+---+------+------+
7458  *
7459  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7460  * index: encoded in imm5<4:size+1>
7461  */
7462 static void handle_simd_inse(DisasContext *s, int rd, int rn,
7463                              int imm4, int imm5)
7464 {
7465     int size = ctz32(imm5);
7466     int src_index, dst_index;
7467     TCGv_i64 tmp;
7468 
7469     if (size > 3) {
7470         unallocated_encoding(s);
7471         return;
7472     }
7473 
7474     if (!fp_access_check(s)) {
7475         return;
7476     }
7477 
7478     dst_index = extract32(imm5, 1+size, 5);
7479     src_index = extract32(imm4, size, 4);
7480 
7481     tmp = tcg_temp_new_i64();
7482 
7483     read_vec_element(s, tmp, rn, src_index, size);
7484     write_vec_element(s, tmp, rd, dst_index, size);
7485 
7486     /* INS is considered a 128-bit write for SVE. */
7487     clear_vec_high(s, true, rd);
7488 }
7489 
7490 
7491 /* INS (General)
7492  *
7493  *  31                   21 20    16 15        10  9    5 4    0
7494  * +-----------------------+--------+-------------+------+------+
7495  * | 0 1 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 0 1 1 1 |  Rn  |  Rd  |
7496  * +-----------------------+--------+-------------+------+------+
7497  *
7498  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7499  * index: encoded in imm5<4:size+1>
7500  */
7501 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
7502 {
7503     int size = ctz32(imm5);
7504     int idx;
7505 
7506     if (size > 3) {
7507         unallocated_encoding(s);
7508         return;
7509     }
7510 
7511     if (!fp_access_check(s)) {
7512         return;
7513     }
7514 
7515     idx = extract32(imm5, 1 + size, 4 - size);
7516     write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
7517 
7518     /* INS is considered a 128-bit write for SVE. */
7519     clear_vec_high(s, true, rd);
7520 }
7521 
7522 /*
7523  * UMOV (General)
7524  * SMOV (General)
7525  *
7526  *  31  30   29              21 20    16 15    12   10 9    5 4    0
7527  * +---+---+-------------------+--------+-------------+------+------+
7528  * | 0 | Q | 0 0 1 1 1 0 0 0 0 |  imm5  | 0 0 1 U 1 1 |  Rn  |  Rd  |
7529  * +---+---+-------------------+--------+-------------+------+------+
7530  *
7531  * U: unsigned when set
7532  * size: encoded in imm5 (see ARM ARM LowestSetBit())
7533  */
7534 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
7535                                   int rn, int rd, int imm5)
7536 {
7537     int size = ctz32(imm5);
7538     int element;
7539     TCGv_i64 tcg_rd;
7540 
7541     /* Check for UnallocatedEncodings */
7542     if (is_signed) {
7543         if (size > 2 || (size == 2 && !is_q)) {
7544             unallocated_encoding(s);
7545             return;
7546         }
7547     } else {
7548         if (size > 3
7549             || (size < 3 && is_q)
7550             || (size == 3 && !is_q)) {
7551             unallocated_encoding(s);
7552             return;
7553         }
7554     }
7555 
7556     if (!fp_access_check(s)) {
7557         return;
7558     }
7559 
7560     element = extract32(imm5, 1+size, 4);
7561 
7562     tcg_rd = cpu_reg(s, rd);
7563     read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
7564     if (is_signed && !is_q) {
7565         tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
7566     }
7567 }
7568 
7569 /* AdvSIMD copy
7570  *   31  30  29  28             21 20  16 15  14  11 10  9    5 4    0
7571  * +---+---+----+-----------------+------+---+------+---+------+------+
7572  * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
7573  * +---+---+----+-----------------+------+---+------+---+------+------+
7574  */
7575 static void disas_simd_copy(DisasContext *s, uint32_t insn)
7576 {
7577     int rd = extract32(insn, 0, 5);
7578     int rn = extract32(insn, 5, 5);
7579     int imm4 = extract32(insn, 11, 4);
7580     int op = extract32(insn, 29, 1);
7581     int is_q = extract32(insn, 30, 1);
7582     int imm5 = extract32(insn, 16, 5);
7583 
7584     if (op) {
7585         if (is_q) {
7586             /* INS (element) */
7587             handle_simd_inse(s, rd, rn, imm4, imm5);
7588         } else {
7589             unallocated_encoding(s);
7590         }
7591     } else {
7592         switch (imm4) {
7593         case 0:
7594             /* DUP (element - vector) */
7595             handle_simd_dupe(s, is_q, rd, rn, imm5);
7596             break;
7597         case 1:
7598             /* DUP (general) */
7599             handle_simd_dupg(s, is_q, rd, rn, imm5);
7600             break;
7601         case 3:
7602             if (is_q) {
7603                 /* INS (general) */
7604                 handle_simd_insg(s, rd, rn, imm5);
7605             } else {
7606                 unallocated_encoding(s);
7607             }
7608             break;
7609         case 5:
7610         case 7:
7611             /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
7612             handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
7613             break;
7614         default:
7615             unallocated_encoding(s);
7616             break;
7617         }
7618     }
7619 }
7620 
7621 /* AdvSIMD modified immediate
7622  *  31  30   29  28                 19 18 16 15   12  11  10  9     5 4    0
7623  * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7624  * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh |  Rd  |
7625  * +---+---+----+---------------------+-----+-------+----+---+-------+------+
7626  *
7627  * There are a number of operations that can be carried out here:
7628  *   MOVI - move (shifted) imm into register
7629  *   MVNI - move inverted (shifted) imm into register
7630  *   ORR  - bitwise OR of (shifted) imm with register
7631  *   BIC  - bitwise clear of (shifted) imm with register
7632  * With ARMv8.2 we also have:
7633  *   FMOV half-precision
7634  */
7635 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
7636 {
7637     int rd = extract32(insn, 0, 5);
7638     int cmode = extract32(insn, 12, 4);
7639     int o2 = extract32(insn, 11, 1);
7640     uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
7641     bool is_neg = extract32(insn, 29, 1);
7642     bool is_q = extract32(insn, 30, 1);
7643     uint64_t imm = 0;
7644 
7645     if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
7646         /* Check for FMOV (vector, immediate) - half-precision */
7647         if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
7648             unallocated_encoding(s);
7649             return;
7650         }
7651     }
7652 
7653     if (!fp_access_check(s)) {
7654         return;
7655     }
7656 
7657     if (cmode == 15 && o2 && !is_neg) {
7658         /* FMOV (vector, immediate) - half-precision */
7659         imm = vfp_expand_imm(MO_16, abcdefgh);
7660         /* now duplicate across the lanes */
7661         imm = dup_const(MO_16, imm);
7662     } else {
7663         imm = asimd_imm_const(abcdefgh, cmode, is_neg);
7664     }
7665 
7666     if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
7667         /* MOVI or MVNI, with MVNI negation handled above.  */
7668         tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
7669                              vec_full_reg_size(s), imm);
7670     } else {
7671         /* ORR or BIC, with BIC negation to AND handled above.  */
7672         if (is_neg) {
7673             gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
7674         } else {
7675             gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
7676         }
7677     }
7678 }
7679 
7680 /* AdvSIMD scalar copy
7681  *  31 30  29  28             21 20  16 15  14  11 10  9    5 4    0
7682  * +-----+----+-----------------+------+---+------+---+------+------+
7683  * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 |  Rn  |  Rd  |
7684  * +-----+----+-----------------+------+---+------+---+------+------+
7685  */
7686 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
7687 {
7688     int rd = extract32(insn, 0, 5);
7689     int rn = extract32(insn, 5, 5);
7690     int imm4 = extract32(insn, 11, 4);
7691     int imm5 = extract32(insn, 16, 5);
7692     int op = extract32(insn, 29, 1);
7693 
7694     if (op != 0 || imm4 != 0) {
7695         unallocated_encoding(s);
7696         return;
7697     }
7698 
7699     /* DUP (element, scalar) */
7700     handle_simd_dupes(s, rd, rn, imm5);
7701 }
7702 
7703 /* AdvSIMD scalar pairwise
7704  *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
7705  * +-----+---+-----------+------+-----------+--------+-----+------+------+
7706  * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
7707  * +-----+---+-----------+------+-----------+--------+-----+------+------+
7708  */
7709 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
7710 {
7711     int u = extract32(insn, 29, 1);
7712     int size = extract32(insn, 22, 2);
7713     int opcode = extract32(insn, 12, 5);
7714     int rn = extract32(insn, 5, 5);
7715     int rd = extract32(insn, 0, 5);
7716     TCGv_ptr fpst;
7717 
7718     /* For some ops (the FP ones), size[1] is part of the encoding.
7719      * For ADDP strictly it is not but size[1] is always 1 for valid
7720      * encodings.
7721      */
7722     opcode |= (extract32(size, 1, 1) << 5);
7723 
7724     switch (opcode) {
7725     case 0x3b: /* ADDP */
7726         if (u || size != 3) {
7727             unallocated_encoding(s);
7728             return;
7729         }
7730         if (!fp_access_check(s)) {
7731             return;
7732         }
7733 
7734         fpst = NULL;
7735         break;
7736     case 0xc: /* FMAXNMP */
7737     case 0xd: /* FADDP */
7738     case 0xf: /* FMAXP */
7739     case 0x2c: /* FMINNMP */
7740     case 0x2f: /* FMINP */
7741         /* FP op, size[0] is 32 or 64 bit*/
7742         if (!u) {
7743             if (!dc_isar_feature(aa64_fp16, s)) {
7744                 unallocated_encoding(s);
7745                 return;
7746             } else {
7747                 size = MO_16;
7748             }
7749         } else {
7750             size = extract32(size, 0, 1) ? MO_64 : MO_32;
7751         }
7752 
7753         if (!fp_access_check(s)) {
7754             return;
7755         }
7756 
7757         fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7758         break;
7759     default:
7760         unallocated_encoding(s);
7761         return;
7762     }
7763 
7764     if (size == MO_64) {
7765         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7766         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7767         TCGv_i64 tcg_res = tcg_temp_new_i64();
7768 
7769         read_vec_element(s, tcg_op1, rn, 0, MO_64);
7770         read_vec_element(s, tcg_op2, rn, 1, MO_64);
7771 
7772         switch (opcode) {
7773         case 0x3b: /* ADDP */
7774             tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
7775             break;
7776         case 0xc: /* FMAXNMP */
7777             gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7778             break;
7779         case 0xd: /* FADDP */
7780             gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7781             break;
7782         case 0xf: /* FMAXP */
7783             gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7784             break;
7785         case 0x2c: /* FMINNMP */
7786             gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7787             break;
7788         case 0x2f: /* FMINP */
7789             gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7790             break;
7791         default:
7792             g_assert_not_reached();
7793         }
7794 
7795         write_fp_dreg(s, rd, tcg_res);
7796     } else {
7797         TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7798         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7799         TCGv_i32 tcg_res = tcg_temp_new_i32();
7800 
7801         read_vec_element_i32(s, tcg_op1, rn, 0, size);
7802         read_vec_element_i32(s, tcg_op2, rn, 1, size);
7803 
7804         if (size == MO_16) {
7805             switch (opcode) {
7806             case 0xc: /* FMAXNMP */
7807                 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7808                 break;
7809             case 0xd: /* FADDP */
7810                 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
7811                 break;
7812             case 0xf: /* FMAXP */
7813                 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
7814                 break;
7815             case 0x2c: /* FMINNMP */
7816                 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
7817                 break;
7818             case 0x2f: /* FMINP */
7819                 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
7820                 break;
7821             default:
7822                 g_assert_not_reached();
7823             }
7824         } else {
7825             switch (opcode) {
7826             case 0xc: /* FMAXNMP */
7827                 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7828                 break;
7829             case 0xd: /* FADDP */
7830                 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7831                 break;
7832             case 0xf: /* FMAXP */
7833                 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7834                 break;
7835             case 0x2c: /* FMINNMP */
7836                 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7837                 break;
7838             case 0x2f: /* FMINP */
7839                 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7840                 break;
7841             default:
7842                 g_assert_not_reached();
7843             }
7844         }
7845 
7846         write_fp_sreg(s, rd, tcg_res);
7847     }
7848 }
7849 
7850 /*
7851  * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
7852  *
7853  * This code is handles the common shifting code and is used by both
7854  * the vector and scalar code.
7855  */
7856 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
7857                                     TCGv_i64 tcg_rnd, bool accumulate,
7858                                     bool is_u, int size, int shift)
7859 {
7860     bool extended_result = false;
7861     bool round = tcg_rnd != NULL;
7862     int ext_lshift = 0;
7863     TCGv_i64 tcg_src_hi;
7864 
7865     if (round && size == 3) {
7866         extended_result = true;
7867         ext_lshift = 64 - shift;
7868         tcg_src_hi = tcg_temp_new_i64();
7869     } else if (shift == 64) {
7870         if (!accumulate && is_u) {
7871             /* result is zero */
7872             tcg_gen_movi_i64(tcg_res, 0);
7873             return;
7874         }
7875     }
7876 
7877     /* Deal with the rounding step */
7878     if (round) {
7879         if (extended_result) {
7880             TCGv_i64 tcg_zero = tcg_constant_i64(0);
7881             if (!is_u) {
7882                 /* take care of sign extending tcg_res */
7883                 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
7884                 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
7885                                  tcg_src, tcg_src_hi,
7886                                  tcg_rnd, tcg_zero);
7887             } else {
7888                 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
7889                                  tcg_src, tcg_zero,
7890                                  tcg_rnd, tcg_zero);
7891             }
7892         } else {
7893             tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
7894         }
7895     }
7896 
7897     /* Now do the shift right */
7898     if (round && extended_result) {
7899         /* extended case, >64 bit precision required */
7900         if (ext_lshift == 0) {
7901             /* special case, only high bits matter */
7902             tcg_gen_mov_i64(tcg_src, tcg_src_hi);
7903         } else {
7904             tcg_gen_shri_i64(tcg_src, tcg_src, shift);
7905             tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
7906             tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
7907         }
7908     } else {
7909         if (is_u) {
7910             if (shift == 64) {
7911                 /* essentially shifting in 64 zeros */
7912                 tcg_gen_movi_i64(tcg_src, 0);
7913             } else {
7914                 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
7915             }
7916         } else {
7917             if (shift == 64) {
7918                 /* effectively extending the sign-bit */
7919                 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
7920             } else {
7921                 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
7922             }
7923         }
7924     }
7925 
7926     if (accumulate) {
7927         tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
7928     } else {
7929         tcg_gen_mov_i64(tcg_res, tcg_src);
7930     }
7931 }
7932 
7933 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
7934 static void handle_scalar_simd_shri(DisasContext *s,
7935                                     bool is_u, int immh, int immb,
7936                                     int opcode, int rn, int rd)
7937 {
7938     const int size = 3;
7939     int immhb = immh << 3 | immb;
7940     int shift = 2 * (8 << size) - immhb;
7941     bool accumulate = false;
7942     bool round = false;
7943     bool insert = false;
7944     TCGv_i64 tcg_rn;
7945     TCGv_i64 tcg_rd;
7946     TCGv_i64 tcg_round;
7947 
7948     if (!extract32(immh, 3, 1)) {
7949         unallocated_encoding(s);
7950         return;
7951     }
7952 
7953     if (!fp_access_check(s)) {
7954         return;
7955     }
7956 
7957     switch (opcode) {
7958     case 0x02: /* SSRA / USRA (accumulate) */
7959         accumulate = true;
7960         break;
7961     case 0x04: /* SRSHR / URSHR (rounding) */
7962         round = true;
7963         break;
7964     case 0x06: /* SRSRA / URSRA (accum + rounding) */
7965         accumulate = round = true;
7966         break;
7967     case 0x08: /* SRI */
7968         insert = true;
7969         break;
7970     }
7971 
7972     if (round) {
7973         tcg_round = tcg_constant_i64(1ULL << (shift - 1));
7974     } else {
7975         tcg_round = NULL;
7976     }
7977 
7978     tcg_rn = read_fp_dreg(s, rn);
7979     tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7980 
7981     if (insert) {
7982         /* shift count same as element size is valid but does nothing;
7983          * special case to avoid potential shift by 64.
7984          */
7985         int esize = 8 << size;
7986         if (shift != esize) {
7987             tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
7988             tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
7989         }
7990     } else {
7991         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7992                                 accumulate, is_u, size, shift);
7993     }
7994 
7995     write_fp_dreg(s, rd, tcg_rd);
7996 }
7997 
7998 /* SHL/SLI - Scalar shift left */
7999 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
8000                                     int immh, int immb, int opcode,
8001                                     int rn, int rd)
8002 {
8003     int size = 32 - clz32(immh) - 1;
8004     int immhb = immh << 3 | immb;
8005     int shift = immhb - (8 << size);
8006     TCGv_i64 tcg_rn;
8007     TCGv_i64 tcg_rd;
8008 
8009     if (!extract32(immh, 3, 1)) {
8010         unallocated_encoding(s);
8011         return;
8012     }
8013 
8014     if (!fp_access_check(s)) {
8015         return;
8016     }
8017 
8018     tcg_rn = read_fp_dreg(s, rn);
8019     tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
8020 
8021     if (insert) {
8022         tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
8023     } else {
8024         tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
8025     }
8026 
8027     write_fp_dreg(s, rd, tcg_rd);
8028 }
8029 
8030 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
8031  * (signed/unsigned) narrowing */
8032 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
8033                                    bool is_u_shift, bool is_u_narrow,
8034                                    int immh, int immb, int opcode,
8035                                    int rn, int rd)
8036 {
8037     int immhb = immh << 3 | immb;
8038     int size = 32 - clz32(immh) - 1;
8039     int esize = 8 << size;
8040     int shift = (2 * esize) - immhb;
8041     int elements = is_scalar ? 1 : (64 / esize);
8042     bool round = extract32(opcode, 0, 1);
8043     MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
8044     TCGv_i64 tcg_rn, tcg_rd, tcg_round;
8045     TCGv_i32 tcg_rd_narrowed;
8046     TCGv_i64 tcg_final;
8047 
8048     static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
8049         { gen_helper_neon_narrow_sat_s8,
8050           gen_helper_neon_unarrow_sat8 },
8051         { gen_helper_neon_narrow_sat_s16,
8052           gen_helper_neon_unarrow_sat16 },
8053         { gen_helper_neon_narrow_sat_s32,
8054           gen_helper_neon_unarrow_sat32 },
8055         { NULL, NULL },
8056     };
8057     static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
8058         gen_helper_neon_narrow_sat_u8,
8059         gen_helper_neon_narrow_sat_u16,
8060         gen_helper_neon_narrow_sat_u32,
8061         NULL
8062     };
8063     NeonGenNarrowEnvFn *narrowfn;
8064 
8065     int i;
8066 
8067     assert(size < 4);
8068 
8069     if (extract32(immh, 3, 1)) {
8070         unallocated_encoding(s);
8071         return;
8072     }
8073 
8074     if (!fp_access_check(s)) {
8075         return;
8076     }
8077 
8078     if (is_u_shift) {
8079         narrowfn = unsigned_narrow_fns[size];
8080     } else {
8081         narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
8082     }
8083 
8084     tcg_rn = tcg_temp_new_i64();
8085     tcg_rd = tcg_temp_new_i64();
8086     tcg_rd_narrowed = tcg_temp_new_i32();
8087     tcg_final = tcg_temp_new_i64();
8088 
8089     if (round) {
8090         tcg_round = tcg_constant_i64(1ULL << (shift - 1));
8091     } else {
8092         tcg_round = NULL;
8093     }
8094 
8095     for (i = 0; i < elements; i++) {
8096         read_vec_element(s, tcg_rn, rn, i, ldop);
8097         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8098                                 false, is_u_shift, size+1, shift);
8099         narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
8100         tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
8101         if (i == 0) {
8102             tcg_gen_mov_i64(tcg_final, tcg_rd);
8103         } else {
8104             tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
8105         }
8106     }
8107 
8108     if (!is_q) {
8109         write_vec_element(s, tcg_final, rd, 0, MO_64);
8110     } else {
8111         write_vec_element(s, tcg_final, rd, 1, MO_64);
8112     }
8113     clear_vec_high(s, is_q, rd);
8114 }
8115 
8116 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
8117 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
8118                              bool src_unsigned, bool dst_unsigned,
8119                              int immh, int immb, int rn, int rd)
8120 {
8121     int immhb = immh << 3 | immb;
8122     int size = 32 - clz32(immh) - 1;
8123     int shift = immhb - (8 << size);
8124     int pass;
8125 
8126     assert(immh != 0);
8127     assert(!(scalar && is_q));
8128 
8129     if (!scalar) {
8130         if (!is_q && extract32(immh, 3, 1)) {
8131             unallocated_encoding(s);
8132             return;
8133         }
8134 
8135         /* Since we use the variable-shift helpers we must
8136          * replicate the shift count into each element of
8137          * the tcg_shift value.
8138          */
8139         switch (size) {
8140         case 0:
8141             shift |= shift << 8;
8142             /* fall through */
8143         case 1:
8144             shift |= shift << 16;
8145             break;
8146         case 2:
8147         case 3:
8148             break;
8149         default:
8150             g_assert_not_reached();
8151         }
8152     }
8153 
8154     if (!fp_access_check(s)) {
8155         return;
8156     }
8157 
8158     if (size == 3) {
8159         TCGv_i64 tcg_shift = tcg_constant_i64(shift);
8160         static NeonGenTwo64OpEnvFn * const fns[2][2] = {
8161             { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
8162             { NULL, gen_helper_neon_qshl_u64 },
8163         };
8164         NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
8165         int maxpass = is_q ? 2 : 1;
8166 
8167         for (pass = 0; pass < maxpass; pass++) {
8168             TCGv_i64 tcg_op = tcg_temp_new_i64();
8169 
8170             read_vec_element(s, tcg_op, rn, pass, MO_64);
8171             genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8172             write_vec_element(s, tcg_op, rd, pass, MO_64);
8173         }
8174         clear_vec_high(s, is_q, rd);
8175     } else {
8176         TCGv_i32 tcg_shift = tcg_constant_i32(shift);
8177         static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
8178             {
8179                 { gen_helper_neon_qshl_s8,
8180                   gen_helper_neon_qshl_s16,
8181                   gen_helper_neon_qshl_s32 },
8182                 { gen_helper_neon_qshlu_s8,
8183                   gen_helper_neon_qshlu_s16,
8184                   gen_helper_neon_qshlu_s32 }
8185             }, {
8186                 { NULL, NULL, NULL },
8187                 { gen_helper_neon_qshl_u8,
8188                   gen_helper_neon_qshl_u16,
8189                   gen_helper_neon_qshl_u32 }
8190             }
8191         };
8192         NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
8193         MemOp memop = scalar ? size : MO_32;
8194         int maxpass = scalar ? 1 : is_q ? 4 : 2;
8195 
8196         for (pass = 0; pass < maxpass; pass++) {
8197             TCGv_i32 tcg_op = tcg_temp_new_i32();
8198 
8199             read_vec_element_i32(s, tcg_op, rn, pass, memop);
8200             genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
8201             if (scalar) {
8202                 switch (size) {
8203                 case 0:
8204                     tcg_gen_ext8u_i32(tcg_op, tcg_op);
8205                     break;
8206                 case 1:
8207                     tcg_gen_ext16u_i32(tcg_op, tcg_op);
8208                     break;
8209                 case 2:
8210                     break;
8211                 default:
8212                     g_assert_not_reached();
8213                 }
8214                 write_fp_sreg(s, rd, tcg_op);
8215             } else {
8216                 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
8217             }
8218         }
8219 
8220         if (!scalar) {
8221             clear_vec_high(s, is_q, rd);
8222         }
8223     }
8224 }
8225 
8226 /* Common vector code for handling integer to FP conversion */
8227 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
8228                                    int elements, int is_signed,
8229                                    int fracbits, int size)
8230 {
8231     TCGv_ptr tcg_fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8232     TCGv_i32 tcg_shift = NULL;
8233 
8234     MemOp mop = size | (is_signed ? MO_SIGN : 0);
8235     int pass;
8236 
8237     if (fracbits || size == MO_64) {
8238         tcg_shift = tcg_constant_i32(fracbits);
8239     }
8240 
8241     if (size == MO_64) {
8242         TCGv_i64 tcg_int64 = tcg_temp_new_i64();
8243         TCGv_i64 tcg_double = tcg_temp_new_i64();
8244 
8245         for (pass = 0; pass < elements; pass++) {
8246             read_vec_element(s, tcg_int64, rn, pass, mop);
8247 
8248             if (is_signed) {
8249                 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
8250                                      tcg_shift, tcg_fpst);
8251             } else {
8252                 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
8253                                      tcg_shift, tcg_fpst);
8254             }
8255             if (elements == 1) {
8256                 write_fp_dreg(s, rd, tcg_double);
8257             } else {
8258                 write_vec_element(s, tcg_double, rd, pass, MO_64);
8259             }
8260         }
8261     } else {
8262         TCGv_i32 tcg_int32 = tcg_temp_new_i32();
8263         TCGv_i32 tcg_float = tcg_temp_new_i32();
8264 
8265         for (pass = 0; pass < elements; pass++) {
8266             read_vec_element_i32(s, tcg_int32, rn, pass, mop);
8267 
8268             switch (size) {
8269             case MO_32:
8270                 if (fracbits) {
8271                     if (is_signed) {
8272                         gen_helper_vfp_sltos(tcg_float, tcg_int32,
8273                                              tcg_shift, tcg_fpst);
8274                     } else {
8275                         gen_helper_vfp_ultos(tcg_float, tcg_int32,
8276                                              tcg_shift, tcg_fpst);
8277                     }
8278                 } else {
8279                     if (is_signed) {
8280                         gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
8281                     } else {
8282                         gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
8283                     }
8284                 }
8285                 break;
8286             case MO_16:
8287                 if (fracbits) {
8288                     if (is_signed) {
8289                         gen_helper_vfp_sltoh(tcg_float, tcg_int32,
8290                                              tcg_shift, tcg_fpst);
8291                     } else {
8292                         gen_helper_vfp_ultoh(tcg_float, tcg_int32,
8293                                              tcg_shift, tcg_fpst);
8294                     }
8295                 } else {
8296                     if (is_signed) {
8297                         gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
8298                     } else {
8299                         gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
8300                     }
8301                 }
8302                 break;
8303             default:
8304                 g_assert_not_reached();
8305             }
8306 
8307             if (elements == 1) {
8308                 write_fp_sreg(s, rd, tcg_float);
8309             } else {
8310                 write_vec_element_i32(s, tcg_float, rd, pass, size);
8311             }
8312         }
8313     }
8314 
8315     clear_vec_high(s, elements << size == 16, rd);
8316 }
8317 
8318 /* UCVTF/SCVTF - Integer to FP conversion */
8319 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
8320                                          bool is_q, bool is_u,
8321                                          int immh, int immb, int opcode,
8322                                          int rn, int rd)
8323 {
8324     int size, elements, fracbits;
8325     int immhb = immh << 3 | immb;
8326 
8327     if (immh & 8) {
8328         size = MO_64;
8329         if (!is_scalar && !is_q) {
8330             unallocated_encoding(s);
8331             return;
8332         }
8333     } else if (immh & 4) {
8334         size = MO_32;
8335     } else if (immh & 2) {
8336         size = MO_16;
8337         if (!dc_isar_feature(aa64_fp16, s)) {
8338             unallocated_encoding(s);
8339             return;
8340         }
8341     } else {
8342         /* immh == 0 would be a failure of the decode logic */
8343         g_assert(immh == 1);
8344         unallocated_encoding(s);
8345         return;
8346     }
8347 
8348     if (is_scalar) {
8349         elements = 1;
8350     } else {
8351         elements = (8 << is_q) >> size;
8352     }
8353     fracbits = (16 << size) - immhb;
8354 
8355     if (!fp_access_check(s)) {
8356         return;
8357     }
8358 
8359     handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
8360 }
8361 
8362 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
8363 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
8364                                          bool is_q, bool is_u,
8365                                          int immh, int immb, int rn, int rd)
8366 {
8367     int immhb = immh << 3 | immb;
8368     int pass, size, fracbits;
8369     TCGv_ptr tcg_fpstatus;
8370     TCGv_i32 tcg_rmode, tcg_shift;
8371 
8372     if (immh & 0x8) {
8373         size = MO_64;
8374         if (!is_scalar && !is_q) {
8375             unallocated_encoding(s);
8376             return;
8377         }
8378     } else if (immh & 0x4) {
8379         size = MO_32;
8380     } else if (immh & 0x2) {
8381         size = MO_16;
8382         if (!dc_isar_feature(aa64_fp16, s)) {
8383             unallocated_encoding(s);
8384             return;
8385         }
8386     } else {
8387         /* Should have split out AdvSIMD modified immediate earlier.  */
8388         assert(immh == 1);
8389         unallocated_encoding(s);
8390         return;
8391     }
8392 
8393     if (!fp_access_check(s)) {
8394         return;
8395     }
8396 
8397     assert(!(is_scalar && is_q));
8398 
8399     tcg_fpstatus = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
8400     tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, tcg_fpstatus);
8401     fracbits = (16 << size) - immhb;
8402     tcg_shift = tcg_constant_i32(fracbits);
8403 
8404     if (size == MO_64) {
8405         int maxpass = is_scalar ? 1 : 2;
8406 
8407         for (pass = 0; pass < maxpass; pass++) {
8408             TCGv_i64 tcg_op = tcg_temp_new_i64();
8409 
8410             read_vec_element(s, tcg_op, rn, pass, MO_64);
8411             if (is_u) {
8412                 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8413             } else {
8414                 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8415             }
8416             write_vec_element(s, tcg_op, rd, pass, MO_64);
8417         }
8418         clear_vec_high(s, is_q, rd);
8419     } else {
8420         void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
8421         int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
8422 
8423         switch (size) {
8424         case MO_16:
8425             if (is_u) {
8426                 fn = gen_helper_vfp_touhh;
8427             } else {
8428                 fn = gen_helper_vfp_toshh;
8429             }
8430             break;
8431         case MO_32:
8432             if (is_u) {
8433                 fn = gen_helper_vfp_touls;
8434             } else {
8435                 fn = gen_helper_vfp_tosls;
8436             }
8437             break;
8438         default:
8439             g_assert_not_reached();
8440         }
8441 
8442         for (pass = 0; pass < maxpass; pass++) {
8443             TCGv_i32 tcg_op = tcg_temp_new_i32();
8444 
8445             read_vec_element_i32(s, tcg_op, rn, pass, size);
8446             fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
8447             if (is_scalar) {
8448                 write_fp_sreg(s, rd, tcg_op);
8449             } else {
8450                 write_vec_element_i32(s, tcg_op, rd, pass, size);
8451             }
8452         }
8453         if (!is_scalar) {
8454             clear_vec_high(s, is_q, rd);
8455         }
8456     }
8457 
8458     gen_restore_rmode(tcg_rmode, tcg_fpstatus);
8459 }
8460 
8461 /* AdvSIMD scalar shift by immediate
8462  *  31 30  29 28         23 22  19 18  16 15    11  10 9    5 4    0
8463  * +-----+---+-------------+------+------+--------+---+------+------+
8464  * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
8465  * +-----+---+-------------+------+------+--------+---+------+------+
8466  *
8467  * This is the scalar version so it works on a fixed sized registers
8468  */
8469 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
8470 {
8471     int rd = extract32(insn, 0, 5);
8472     int rn = extract32(insn, 5, 5);
8473     int opcode = extract32(insn, 11, 5);
8474     int immb = extract32(insn, 16, 3);
8475     int immh = extract32(insn, 19, 4);
8476     bool is_u = extract32(insn, 29, 1);
8477 
8478     if (immh == 0) {
8479         unallocated_encoding(s);
8480         return;
8481     }
8482 
8483     switch (opcode) {
8484     case 0x08: /* SRI */
8485         if (!is_u) {
8486             unallocated_encoding(s);
8487             return;
8488         }
8489         /* fall through */
8490     case 0x00: /* SSHR / USHR */
8491     case 0x02: /* SSRA / USRA */
8492     case 0x04: /* SRSHR / URSHR */
8493     case 0x06: /* SRSRA / URSRA */
8494         handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
8495         break;
8496     case 0x0a: /* SHL / SLI */
8497         handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
8498         break;
8499     case 0x1c: /* SCVTF, UCVTF */
8500         handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
8501                                      opcode, rn, rd);
8502         break;
8503     case 0x10: /* SQSHRUN, SQSHRUN2 */
8504     case 0x11: /* SQRSHRUN, SQRSHRUN2 */
8505         if (!is_u) {
8506             unallocated_encoding(s);
8507             return;
8508         }
8509         handle_vec_simd_sqshrn(s, true, false, false, true,
8510                                immh, immb, opcode, rn, rd);
8511         break;
8512     case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
8513     case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
8514         handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
8515                                immh, immb, opcode, rn, rd);
8516         break;
8517     case 0xc: /* SQSHLU */
8518         if (!is_u) {
8519             unallocated_encoding(s);
8520             return;
8521         }
8522         handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
8523         break;
8524     case 0xe: /* SQSHL, UQSHL */
8525         handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
8526         break;
8527     case 0x1f: /* FCVTZS, FCVTZU */
8528         handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
8529         break;
8530     default:
8531         unallocated_encoding(s);
8532         break;
8533     }
8534 }
8535 
8536 /* AdvSIMD scalar three different
8537  *  31 30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
8538  * +-----+---+-----------+------+---+------+--------+-----+------+------+
8539  * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
8540  * +-----+---+-----------+------+---+------+--------+-----+------+------+
8541  */
8542 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
8543 {
8544     bool is_u = extract32(insn, 29, 1);
8545     int size = extract32(insn, 22, 2);
8546     int opcode = extract32(insn, 12, 4);
8547     int rm = extract32(insn, 16, 5);
8548     int rn = extract32(insn, 5, 5);
8549     int rd = extract32(insn, 0, 5);
8550 
8551     if (is_u) {
8552         unallocated_encoding(s);
8553         return;
8554     }
8555 
8556     switch (opcode) {
8557     case 0x9: /* SQDMLAL, SQDMLAL2 */
8558     case 0xb: /* SQDMLSL, SQDMLSL2 */
8559     case 0xd: /* SQDMULL, SQDMULL2 */
8560         if (size == 0 || size == 3) {
8561             unallocated_encoding(s);
8562             return;
8563         }
8564         break;
8565     default:
8566         unallocated_encoding(s);
8567         return;
8568     }
8569 
8570     if (!fp_access_check(s)) {
8571         return;
8572     }
8573 
8574     if (size == 2) {
8575         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8576         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8577         TCGv_i64 tcg_res = tcg_temp_new_i64();
8578 
8579         read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
8580         read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
8581 
8582         tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
8583         gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
8584 
8585         switch (opcode) {
8586         case 0xd: /* SQDMULL, SQDMULL2 */
8587             break;
8588         case 0xb: /* SQDMLSL, SQDMLSL2 */
8589             tcg_gen_neg_i64(tcg_res, tcg_res);
8590             /* fall through */
8591         case 0x9: /* SQDMLAL, SQDMLAL2 */
8592             read_vec_element(s, tcg_op1, rd, 0, MO_64);
8593             gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
8594                                               tcg_res, tcg_op1);
8595             break;
8596         default:
8597             g_assert_not_reached();
8598         }
8599 
8600         write_fp_dreg(s, rd, tcg_res);
8601     } else {
8602         TCGv_i32 tcg_op1 = read_fp_hreg(s, rn);
8603         TCGv_i32 tcg_op2 = read_fp_hreg(s, rm);
8604         TCGv_i64 tcg_res = tcg_temp_new_i64();
8605 
8606         gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
8607         gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
8608 
8609         switch (opcode) {
8610         case 0xd: /* SQDMULL, SQDMULL2 */
8611             break;
8612         case 0xb: /* SQDMLSL, SQDMLSL2 */
8613             gen_helper_neon_negl_u32(tcg_res, tcg_res);
8614             /* fall through */
8615         case 0x9: /* SQDMLAL, SQDMLAL2 */
8616         {
8617             TCGv_i64 tcg_op3 = tcg_temp_new_i64();
8618             read_vec_element(s, tcg_op3, rd, 0, MO_32);
8619             gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
8620                                               tcg_res, tcg_op3);
8621             break;
8622         }
8623         default:
8624             g_assert_not_reached();
8625         }
8626 
8627         tcg_gen_ext32u_i64(tcg_res, tcg_res);
8628         write_fp_dreg(s, rd, tcg_res);
8629     }
8630 }
8631 
8632 static void handle_3same_64(DisasContext *s, int opcode, bool u,
8633                             TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
8634 {
8635     /* Handle 64x64->64 opcodes which are shared between the scalar
8636      * and vector 3-same groups. We cover every opcode where size == 3
8637      * is valid in either the three-reg-same (integer, not pairwise)
8638      * or scalar-three-reg-same groups.
8639      */
8640     TCGCond cond;
8641 
8642     switch (opcode) {
8643     case 0x1: /* SQADD */
8644         if (u) {
8645             gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8646         } else {
8647             gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8648         }
8649         break;
8650     case 0x5: /* SQSUB */
8651         if (u) {
8652             gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8653         } else {
8654             gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8655         }
8656         break;
8657     case 0x6: /* CMGT, CMHI */
8658         /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
8659          * We implement this using setcond (test) and then negating.
8660          */
8661         cond = u ? TCG_COND_GTU : TCG_COND_GT;
8662     do_cmop:
8663         tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
8664         tcg_gen_neg_i64(tcg_rd, tcg_rd);
8665         break;
8666     case 0x7: /* CMGE, CMHS */
8667         cond = u ? TCG_COND_GEU : TCG_COND_GE;
8668         goto do_cmop;
8669     case 0x11: /* CMTST, CMEQ */
8670         if (u) {
8671             cond = TCG_COND_EQ;
8672             goto do_cmop;
8673         }
8674         gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
8675         break;
8676     case 0x8: /* SSHL, USHL */
8677         if (u) {
8678             gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm);
8679         } else {
8680             gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm);
8681         }
8682         break;
8683     case 0x9: /* SQSHL, UQSHL */
8684         if (u) {
8685             gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8686         } else {
8687             gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8688         }
8689         break;
8690     case 0xa: /* SRSHL, URSHL */
8691         if (u) {
8692             gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
8693         } else {
8694             gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
8695         }
8696         break;
8697     case 0xb: /* SQRSHL, UQRSHL */
8698         if (u) {
8699             gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8700         } else {
8701             gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
8702         }
8703         break;
8704     case 0x10: /* ADD, SUB */
8705         if (u) {
8706             tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
8707         } else {
8708             tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
8709         }
8710         break;
8711     default:
8712         g_assert_not_reached();
8713     }
8714 }
8715 
8716 /* Handle the 3-same-operands float operations; shared by the scalar
8717  * and vector encodings. The caller must filter out any encodings
8718  * not allocated for the encoding it is dealing with.
8719  */
8720 static void handle_3same_float(DisasContext *s, int size, int elements,
8721                                int fpopcode, int rd, int rn, int rm)
8722 {
8723     int pass;
8724     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
8725 
8726     for (pass = 0; pass < elements; pass++) {
8727         if (size) {
8728             /* Double */
8729             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
8730             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
8731             TCGv_i64 tcg_res = tcg_temp_new_i64();
8732 
8733             read_vec_element(s, tcg_op1, rn, pass, MO_64);
8734             read_vec_element(s, tcg_op2, rm, pass, MO_64);
8735 
8736             switch (fpopcode) {
8737             case 0x39: /* FMLS */
8738                 /* As usual for ARM, separate negation for fused multiply-add */
8739                 gen_helper_vfp_negd(tcg_op1, tcg_op1);
8740                 /* fall through */
8741             case 0x19: /* FMLA */
8742                 read_vec_element(s, tcg_res, rd, pass, MO_64);
8743                 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
8744                                        tcg_res, fpst);
8745                 break;
8746             case 0x18: /* FMAXNM */
8747                 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8748                 break;
8749             case 0x1a: /* FADD */
8750                 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
8751                 break;
8752             case 0x1b: /* FMULX */
8753                 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
8754                 break;
8755             case 0x1c: /* FCMEQ */
8756                 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8757                 break;
8758             case 0x1e: /* FMAX */
8759                 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
8760                 break;
8761             case 0x1f: /* FRECPS */
8762                 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8763                 break;
8764             case 0x38: /* FMINNM */
8765                 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
8766                 break;
8767             case 0x3a: /* FSUB */
8768                 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8769                 break;
8770             case 0x3e: /* FMIN */
8771                 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
8772                 break;
8773             case 0x3f: /* FRSQRTS */
8774                 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8775                 break;
8776             case 0x5b: /* FMUL */
8777                 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
8778                 break;
8779             case 0x5c: /* FCMGE */
8780                 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8781                 break;
8782             case 0x5d: /* FACGE */
8783                 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8784                 break;
8785             case 0x5f: /* FDIV */
8786                 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
8787                 break;
8788             case 0x7a: /* FABD */
8789                 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
8790                 gen_helper_vfp_absd(tcg_res, tcg_res);
8791                 break;
8792             case 0x7c: /* FCMGT */
8793                 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8794                 break;
8795             case 0x7d: /* FACGT */
8796                 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
8797                 break;
8798             default:
8799                 g_assert_not_reached();
8800             }
8801 
8802             write_vec_element(s, tcg_res, rd, pass, MO_64);
8803         } else {
8804             /* Single */
8805             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
8806             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
8807             TCGv_i32 tcg_res = tcg_temp_new_i32();
8808 
8809             read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
8810             read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
8811 
8812             switch (fpopcode) {
8813             case 0x39: /* FMLS */
8814                 /* As usual for ARM, separate negation for fused multiply-add */
8815                 gen_helper_vfp_negs(tcg_op1, tcg_op1);
8816                 /* fall through */
8817             case 0x19: /* FMLA */
8818                 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8819                 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
8820                                        tcg_res, fpst);
8821                 break;
8822             case 0x1a: /* FADD */
8823                 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
8824                 break;
8825             case 0x1b: /* FMULX */
8826                 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
8827                 break;
8828             case 0x1c: /* FCMEQ */
8829                 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8830                 break;
8831             case 0x1e: /* FMAX */
8832                 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
8833                 break;
8834             case 0x1f: /* FRECPS */
8835                 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8836                 break;
8837             case 0x18: /* FMAXNM */
8838                 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
8839                 break;
8840             case 0x38: /* FMINNM */
8841                 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
8842                 break;
8843             case 0x3a: /* FSUB */
8844                 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
8845                 break;
8846             case 0x3e: /* FMIN */
8847                 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
8848                 break;
8849             case 0x3f: /* FRSQRTS */
8850                 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8851                 break;
8852             case 0x5b: /* FMUL */
8853                 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
8854                 break;
8855             case 0x5c: /* FCMGE */
8856                 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8857                 break;
8858             case 0x5d: /* FACGE */
8859                 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8860                 break;
8861             case 0x5f: /* FDIV */
8862                 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
8863                 break;
8864             case 0x7a: /* FABD */
8865                 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
8866                 gen_helper_vfp_abss(tcg_res, tcg_res);
8867                 break;
8868             case 0x7c: /* FCMGT */
8869                 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8870                 break;
8871             case 0x7d: /* FACGT */
8872                 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
8873                 break;
8874             default:
8875                 g_assert_not_reached();
8876             }
8877 
8878             if (elements == 1) {
8879                 /* scalar single so clear high part */
8880                 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
8881 
8882                 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
8883                 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
8884             } else {
8885                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8886             }
8887         }
8888     }
8889 
8890     clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
8891 }
8892 
8893 /* AdvSIMD scalar three same
8894  *  31 30  29 28       24 23  22  21 20  16 15    11  10 9    5 4    0
8895  * +-----+---+-----------+------+---+------+--------+---+------+------+
8896  * | 0 1 | U | 1 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
8897  * +-----+---+-----------+------+---+------+--------+---+------+------+
8898  */
8899 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
8900 {
8901     int rd = extract32(insn, 0, 5);
8902     int rn = extract32(insn, 5, 5);
8903     int opcode = extract32(insn, 11, 5);
8904     int rm = extract32(insn, 16, 5);
8905     int size = extract32(insn, 22, 2);
8906     bool u = extract32(insn, 29, 1);
8907     TCGv_i64 tcg_rd;
8908 
8909     if (opcode >= 0x18) {
8910         /* Floating point: U, size[1] and opcode indicate operation */
8911         int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
8912         switch (fpopcode) {
8913         case 0x1b: /* FMULX */
8914         case 0x1f: /* FRECPS */
8915         case 0x3f: /* FRSQRTS */
8916         case 0x5d: /* FACGE */
8917         case 0x7d: /* FACGT */
8918         case 0x1c: /* FCMEQ */
8919         case 0x5c: /* FCMGE */
8920         case 0x7c: /* FCMGT */
8921         case 0x7a: /* FABD */
8922             break;
8923         default:
8924             unallocated_encoding(s);
8925             return;
8926         }
8927 
8928         if (!fp_access_check(s)) {
8929             return;
8930         }
8931 
8932         handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
8933         return;
8934     }
8935 
8936     switch (opcode) {
8937     case 0x1: /* SQADD, UQADD */
8938     case 0x5: /* SQSUB, UQSUB */
8939     case 0x9: /* SQSHL, UQSHL */
8940     case 0xb: /* SQRSHL, UQRSHL */
8941         break;
8942     case 0x8: /* SSHL, USHL */
8943     case 0xa: /* SRSHL, URSHL */
8944     case 0x6: /* CMGT, CMHI */
8945     case 0x7: /* CMGE, CMHS */
8946     case 0x11: /* CMTST, CMEQ */
8947     case 0x10: /* ADD, SUB (vector) */
8948         if (size != 3) {
8949             unallocated_encoding(s);
8950             return;
8951         }
8952         break;
8953     case 0x16: /* SQDMULH, SQRDMULH (vector) */
8954         if (size != 1 && size != 2) {
8955             unallocated_encoding(s);
8956             return;
8957         }
8958         break;
8959     default:
8960         unallocated_encoding(s);
8961         return;
8962     }
8963 
8964     if (!fp_access_check(s)) {
8965         return;
8966     }
8967 
8968     tcg_rd = tcg_temp_new_i64();
8969 
8970     if (size == 3) {
8971         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8972         TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
8973 
8974         handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
8975     } else {
8976         /* Do a single operation on the lowest element in the vector.
8977          * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
8978          * no side effects for all these operations.
8979          * OPTME: special-purpose helpers would avoid doing some
8980          * unnecessary work in the helper for the 8 and 16 bit cases.
8981          */
8982         NeonGenTwoOpEnvFn *genenvfn;
8983         TCGv_i32 tcg_rn = tcg_temp_new_i32();
8984         TCGv_i32 tcg_rm = tcg_temp_new_i32();
8985         TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
8986 
8987         read_vec_element_i32(s, tcg_rn, rn, 0, size);
8988         read_vec_element_i32(s, tcg_rm, rm, 0, size);
8989 
8990         switch (opcode) {
8991         case 0x1: /* SQADD, UQADD */
8992         {
8993             static NeonGenTwoOpEnvFn * const fns[3][2] = {
8994                 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
8995                 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
8996                 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
8997             };
8998             genenvfn = fns[size][u];
8999             break;
9000         }
9001         case 0x5: /* SQSUB, UQSUB */
9002         {
9003             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9004                 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
9005                 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
9006                 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
9007             };
9008             genenvfn = fns[size][u];
9009             break;
9010         }
9011         case 0x9: /* SQSHL, UQSHL */
9012         {
9013             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9014                 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
9015                 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
9016                 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
9017             };
9018             genenvfn = fns[size][u];
9019             break;
9020         }
9021         case 0xb: /* SQRSHL, UQRSHL */
9022         {
9023             static NeonGenTwoOpEnvFn * const fns[3][2] = {
9024                 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
9025                 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
9026                 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
9027             };
9028             genenvfn = fns[size][u];
9029             break;
9030         }
9031         case 0x16: /* SQDMULH, SQRDMULH */
9032         {
9033             static NeonGenTwoOpEnvFn * const fns[2][2] = {
9034                 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
9035                 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
9036             };
9037             assert(size == 1 || size == 2);
9038             genenvfn = fns[size - 1][u];
9039             break;
9040         }
9041         default:
9042             g_assert_not_reached();
9043         }
9044 
9045         genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
9046         tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
9047     }
9048 
9049     write_fp_dreg(s, rd, tcg_rd);
9050 }
9051 
9052 /* AdvSIMD scalar three same FP16
9053  *  31 30  29 28       24 23  22 21 20  16 15 14 13    11 10  9  5 4  0
9054  * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9055  * | 0 1 | U | 1 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 | Rn | Rd |
9056  * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
9057  * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
9058  * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
9059  */
9060 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
9061                                                   uint32_t insn)
9062 {
9063     int rd = extract32(insn, 0, 5);
9064     int rn = extract32(insn, 5, 5);
9065     int opcode = extract32(insn, 11, 3);
9066     int rm = extract32(insn, 16, 5);
9067     bool u = extract32(insn, 29, 1);
9068     bool a = extract32(insn, 23, 1);
9069     int fpopcode = opcode | (a << 3) |  (u << 4);
9070     TCGv_ptr fpst;
9071     TCGv_i32 tcg_op1;
9072     TCGv_i32 tcg_op2;
9073     TCGv_i32 tcg_res;
9074 
9075     switch (fpopcode) {
9076     case 0x03: /* FMULX */
9077     case 0x04: /* FCMEQ (reg) */
9078     case 0x07: /* FRECPS */
9079     case 0x0f: /* FRSQRTS */
9080     case 0x14: /* FCMGE (reg) */
9081     case 0x15: /* FACGE */
9082     case 0x1a: /* FABD */
9083     case 0x1c: /* FCMGT (reg) */
9084     case 0x1d: /* FACGT */
9085         break;
9086     default:
9087         unallocated_encoding(s);
9088         return;
9089     }
9090 
9091     if (!dc_isar_feature(aa64_fp16, s)) {
9092         unallocated_encoding(s);
9093     }
9094 
9095     if (!fp_access_check(s)) {
9096         return;
9097     }
9098 
9099     fpst = fpstatus_ptr(FPST_FPCR_F16);
9100 
9101     tcg_op1 = read_fp_hreg(s, rn);
9102     tcg_op2 = read_fp_hreg(s, rm);
9103     tcg_res = tcg_temp_new_i32();
9104 
9105     switch (fpopcode) {
9106     case 0x03: /* FMULX */
9107         gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
9108         break;
9109     case 0x04: /* FCMEQ (reg) */
9110         gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9111         break;
9112     case 0x07: /* FRECPS */
9113         gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9114         break;
9115     case 0x0f: /* FRSQRTS */
9116         gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9117         break;
9118     case 0x14: /* FCMGE (reg) */
9119         gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9120         break;
9121     case 0x15: /* FACGE */
9122         gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9123         break;
9124     case 0x1a: /* FABD */
9125         gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
9126         tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
9127         break;
9128     case 0x1c: /* FCMGT (reg) */
9129         gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9130         break;
9131     case 0x1d: /* FACGT */
9132         gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
9133         break;
9134     default:
9135         g_assert_not_reached();
9136     }
9137 
9138     write_fp_sreg(s, rd, tcg_res);
9139 }
9140 
9141 /* AdvSIMD scalar three same extra
9142  *  31 30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
9143  * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9144  * | 0 1 | U | 1 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
9145  * +-----+---+-----------+------+---+------+---+--------+---+----+----+
9146  */
9147 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
9148                                                    uint32_t insn)
9149 {
9150     int rd = extract32(insn, 0, 5);
9151     int rn = extract32(insn, 5, 5);
9152     int opcode = extract32(insn, 11, 4);
9153     int rm = extract32(insn, 16, 5);
9154     int size = extract32(insn, 22, 2);
9155     bool u = extract32(insn, 29, 1);
9156     TCGv_i32 ele1, ele2, ele3;
9157     TCGv_i64 res;
9158     bool feature;
9159 
9160     switch (u * 16 + opcode) {
9161     case 0x10: /* SQRDMLAH (vector) */
9162     case 0x11: /* SQRDMLSH (vector) */
9163         if (size != 1 && size != 2) {
9164             unallocated_encoding(s);
9165             return;
9166         }
9167         feature = dc_isar_feature(aa64_rdm, s);
9168         break;
9169     default:
9170         unallocated_encoding(s);
9171         return;
9172     }
9173     if (!feature) {
9174         unallocated_encoding(s);
9175         return;
9176     }
9177     if (!fp_access_check(s)) {
9178         return;
9179     }
9180 
9181     /* Do a single operation on the lowest element in the vector.
9182      * We use the standard Neon helpers and rely on 0 OP 0 == 0
9183      * with no side effects for all these operations.
9184      * OPTME: special-purpose helpers would avoid doing some
9185      * unnecessary work in the helper for the 16 bit cases.
9186      */
9187     ele1 = tcg_temp_new_i32();
9188     ele2 = tcg_temp_new_i32();
9189     ele3 = tcg_temp_new_i32();
9190 
9191     read_vec_element_i32(s, ele1, rn, 0, size);
9192     read_vec_element_i32(s, ele2, rm, 0, size);
9193     read_vec_element_i32(s, ele3, rd, 0, size);
9194 
9195     switch (opcode) {
9196     case 0x0: /* SQRDMLAH */
9197         if (size == 1) {
9198             gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
9199         } else {
9200             gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
9201         }
9202         break;
9203     case 0x1: /* SQRDMLSH */
9204         if (size == 1) {
9205             gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
9206         } else {
9207             gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
9208         }
9209         break;
9210     default:
9211         g_assert_not_reached();
9212     }
9213 
9214     res = tcg_temp_new_i64();
9215     tcg_gen_extu_i32_i64(res, ele3);
9216     write_fp_dreg(s, rd, res);
9217 }
9218 
9219 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
9220                             TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
9221                             TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
9222 {
9223     /* Handle 64->64 opcodes which are shared between the scalar and
9224      * vector 2-reg-misc groups. We cover every integer opcode where size == 3
9225      * is valid in either group and also the double-precision fp ops.
9226      * The caller only need provide tcg_rmode and tcg_fpstatus if the op
9227      * requires them.
9228      */
9229     TCGCond cond;
9230 
9231     switch (opcode) {
9232     case 0x4: /* CLS, CLZ */
9233         if (u) {
9234             tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
9235         } else {
9236             tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
9237         }
9238         break;
9239     case 0x5: /* NOT */
9240         /* This opcode is shared with CNT and RBIT but we have earlier
9241          * enforced that size == 3 if and only if this is the NOT insn.
9242          */
9243         tcg_gen_not_i64(tcg_rd, tcg_rn);
9244         break;
9245     case 0x7: /* SQABS, SQNEG */
9246         if (u) {
9247             gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
9248         } else {
9249             gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
9250         }
9251         break;
9252     case 0xa: /* CMLT */
9253         /* 64 bit integer comparison against zero, result is
9254          * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
9255          * subtracting 1.
9256          */
9257         cond = TCG_COND_LT;
9258     do_cmop:
9259         tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
9260         tcg_gen_neg_i64(tcg_rd, tcg_rd);
9261         break;
9262     case 0x8: /* CMGT, CMGE */
9263         cond = u ? TCG_COND_GE : TCG_COND_GT;
9264         goto do_cmop;
9265     case 0x9: /* CMEQ, CMLE */
9266         cond = u ? TCG_COND_LE : TCG_COND_EQ;
9267         goto do_cmop;
9268     case 0xb: /* ABS, NEG */
9269         if (u) {
9270             tcg_gen_neg_i64(tcg_rd, tcg_rn);
9271         } else {
9272             tcg_gen_abs_i64(tcg_rd, tcg_rn);
9273         }
9274         break;
9275     case 0x2f: /* FABS */
9276         gen_helper_vfp_absd(tcg_rd, tcg_rn);
9277         break;
9278     case 0x6f: /* FNEG */
9279         gen_helper_vfp_negd(tcg_rd, tcg_rn);
9280         break;
9281     case 0x7f: /* FSQRT */
9282         gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
9283         break;
9284     case 0x1a: /* FCVTNS */
9285     case 0x1b: /* FCVTMS */
9286     case 0x1c: /* FCVTAS */
9287     case 0x3a: /* FCVTPS */
9288     case 0x3b: /* FCVTZS */
9289         gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9290         break;
9291     case 0x5a: /* FCVTNU */
9292     case 0x5b: /* FCVTMU */
9293     case 0x5c: /* FCVTAU */
9294     case 0x7a: /* FCVTPU */
9295     case 0x7b: /* FCVTZU */
9296         gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_constant_i32(0), tcg_fpstatus);
9297         break;
9298     case 0x18: /* FRINTN */
9299     case 0x19: /* FRINTM */
9300     case 0x38: /* FRINTP */
9301     case 0x39: /* FRINTZ */
9302     case 0x58: /* FRINTA */
9303     case 0x79: /* FRINTI */
9304         gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
9305         break;
9306     case 0x59: /* FRINTX */
9307         gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
9308         break;
9309     case 0x1e: /* FRINT32Z */
9310     case 0x5e: /* FRINT32X */
9311         gen_helper_frint32_d(tcg_rd, tcg_rn, tcg_fpstatus);
9312         break;
9313     case 0x1f: /* FRINT64Z */
9314     case 0x5f: /* FRINT64X */
9315         gen_helper_frint64_d(tcg_rd, tcg_rn, tcg_fpstatus);
9316         break;
9317     default:
9318         g_assert_not_reached();
9319     }
9320 }
9321 
9322 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
9323                                    bool is_scalar, bool is_u, bool is_q,
9324                                    int size, int rn, int rd)
9325 {
9326     bool is_double = (size == MO_64);
9327     TCGv_ptr fpst;
9328 
9329     if (!fp_access_check(s)) {
9330         return;
9331     }
9332 
9333     fpst = fpstatus_ptr(size == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
9334 
9335     if (is_double) {
9336         TCGv_i64 tcg_op = tcg_temp_new_i64();
9337         TCGv_i64 tcg_zero = tcg_constant_i64(0);
9338         TCGv_i64 tcg_res = tcg_temp_new_i64();
9339         NeonGenTwoDoubleOpFn *genfn;
9340         bool swap = false;
9341         int pass;
9342 
9343         switch (opcode) {
9344         case 0x2e: /* FCMLT (zero) */
9345             swap = true;
9346             /* fallthrough */
9347         case 0x2c: /* FCMGT (zero) */
9348             genfn = gen_helper_neon_cgt_f64;
9349             break;
9350         case 0x2d: /* FCMEQ (zero) */
9351             genfn = gen_helper_neon_ceq_f64;
9352             break;
9353         case 0x6d: /* FCMLE (zero) */
9354             swap = true;
9355             /* fall through */
9356         case 0x6c: /* FCMGE (zero) */
9357             genfn = gen_helper_neon_cge_f64;
9358             break;
9359         default:
9360             g_assert_not_reached();
9361         }
9362 
9363         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9364             read_vec_element(s, tcg_op, rn, pass, MO_64);
9365             if (swap) {
9366                 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9367             } else {
9368                 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9369             }
9370             write_vec_element(s, tcg_res, rd, pass, MO_64);
9371         }
9372 
9373         clear_vec_high(s, !is_scalar, rd);
9374     } else {
9375         TCGv_i32 tcg_op = tcg_temp_new_i32();
9376         TCGv_i32 tcg_zero = tcg_constant_i32(0);
9377         TCGv_i32 tcg_res = tcg_temp_new_i32();
9378         NeonGenTwoSingleOpFn *genfn;
9379         bool swap = false;
9380         int pass, maxpasses;
9381 
9382         if (size == MO_16) {
9383             switch (opcode) {
9384             case 0x2e: /* FCMLT (zero) */
9385                 swap = true;
9386                 /* fall through */
9387             case 0x2c: /* FCMGT (zero) */
9388                 genfn = gen_helper_advsimd_cgt_f16;
9389                 break;
9390             case 0x2d: /* FCMEQ (zero) */
9391                 genfn = gen_helper_advsimd_ceq_f16;
9392                 break;
9393             case 0x6d: /* FCMLE (zero) */
9394                 swap = true;
9395                 /* fall through */
9396             case 0x6c: /* FCMGE (zero) */
9397                 genfn = gen_helper_advsimd_cge_f16;
9398                 break;
9399             default:
9400                 g_assert_not_reached();
9401             }
9402         } else {
9403             switch (opcode) {
9404             case 0x2e: /* FCMLT (zero) */
9405                 swap = true;
9406                 /* fall through */
9407             case 0x2c: /* FCMGT (zero) */
9408                 genfn = gen_helper_neon_cgt_f32;
9409                 break;
9410             case 0x2d: /* FCMEQ (zero) */
9411                 genfn = gen_helper_neon_ceq_f32;
9412                 break;
9413             case 0x6d: /* FCMLE (zero) */
9414                 swap = true;
9415                 /* fall through */
9416             case 0x6c: /* FCMGE (zero) */
9417                 genfn = gen_helper_neon_cge_f32;
9418                 break;
9419             default:
9420                 g_assert_not_reached();
9421             }
9422         }
9423 
9424         if (is_scalar) {
9425             maxpasses = 1;
9426         } else {
9427             int vector_size = 8 << is_q;
9428             maxpasses = vector_size >> size;
9429         }
9430 
9431         for (pass = 0; pass < maxpasses; pass++) {
9432             read_vec_element_i32(s, tcg_op, rn, pass, size);
9433             if (swap) {
9434                 genfn(tcg_res, tcg_zero, tcg_op, fpst);
9435             } else {
9436                 genfn(tcg_res, tcg_op, tcg_zero, fpst);
9437             }
9438             if (is_scalar) {
9439                 write_fp_sreg(s, rd, tcg_res);
9440             } else {
9441                 write_vec_element_i32(s, tcg_res, rd, pass, size);
9442             }
9443         }
9444 
9445         if (!is_scalar) {
9446             clear_vec_high(s, is_q, rd);
9447         }
9448     }
9449 }
9450 
9451 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
9452                                     bool is_scalar, bool is_u, bool is_q,
9453                                     int size, int rn, int rd)
9454 {
9455     bool is_double = (size == 3);
9456     TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9457 
9458     if (is_double) {
9459         TCGv_i64 tcg_op = tcg_temp_new_i64();
9460         TCGv_i64 tcg_res = tcg_temp_new_i64();
9461         int pass;
9462 
9463         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9464             read_vec_element(s, tcg_op, rn, pass, MO_64);
9465             switch (opcode) {
9466             case 0x3d: /* FRECPE */
9467                 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
9468                 break;
9469             case 0x3f: /* FRECPX */
9470                 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
9471                 break;
9472             case 0x7d: /* FRSQRTE */
9473                 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
9474                 break;
9475             default:
9476                 g_assert_not_reached();
9477             }
9478             write_vec_element(s, tcg_res, rd, pass, MO_64);
9479         }
9480         clear_vec_high(s, !is_scalar, rd);
9481     } else {
9482         TCGv_i32 tcg_op = tcg_temp_new_i32();
9483         TCGv_i32 tcg_res = tcg_temp_new_i32();
9484         int pass, maxpasses;
9485 
9486         if (is_scalar) {
9487             maxpasses = 1;
9488         } else {
9489             maxpasses = is_q ? 4 : 2;
9490         }
9491 
9492         for (pass = 0; pass < maxpasses; pass++) {
9493             read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
9494 
9495             switch (opcode) {
9496             case 0x3c: /* URECPE */
9497                 gen_helper_recpe_u32(tcg_res, tcg_op);
9498                 break;
9499             case 0x3d: /* FRECPE */
9500                 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
9501                 break;
9502             case 0x3f: /* FRECPX */
9503                 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
9504                 break;
9505             case 0x7d: /* FRSQRTE */
9506                 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
9507                 break;
9508             default:
9509                 g_assert_not_reached();
9510             }
9511 
9512             if (is_scalar) {
9513                 write_fp_sreg(s, rd, tcg_res);
9514             } else {
9515                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
9516             }
9517         }
9518         if (!is_scalar) {
9519             clear_vec_high(s, is_q, rd);
9520         }
9521     }
9522 }
9523 
9524 static void handle_2misc_narrow(DisasContext *s, bool scalar,
9525                                 int opcode, bool u, bool is_q,
9526                                 int size, int rn, int rd)
9527 {
9528     /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
9529      * in the source becomes a size element in the destination).
9530      */
9531     int pass;
9532     TCGv_i32 tcg_res[2];
9533     int destelt = is_q ? 2 : 0;
9534     int passes = scalar ? 1 : 2;
9535 
9536     if (scalar) {
9537         tcg_res[1] = tcg_constant_i32(0);
9538     }
9539 
9540     for (pass = 0; pass < passes; pass++) {
9541         TCGv_i64 tcg_op = tcg_temp_new_i64();
9542         NeonGenNarrowFn *genfn = NULL;
9543         NeonGenNarrowEnvFn *genenvfn = NULL;
9544 
9545         if (scalar) {
9546             read_vec_element(s, tcg_op, rn, pass, size + 1);
9547         } else {
9548             read_vec_element(s, tcg_op, rn, pass, MO_64);
9549         }
9550         tcg_res[pass] = tcg_temp_new_i32();
9551 
9552         switch (opcode) {
9553         case 0x12: /* XTN, SQXTUN */
9554         {
9555             static NeonGenNarrowFn * const xtnfns[3] = {
9556                 gen_helper_neon_narrow_u8,
9557                 gen_helper_neon_narrow_u16,
9558                 tcg_gen_extrl_i64_i32,
9559             };
9560             static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
9561                 gen_helper_neon_unarrow_sat8,
9562                 gen_helper_neon_unarrow_sat16,
9563                 gen_helper_neon_unarrow_sat32,
9564             };
9565             if (u) {
9566                 genenvfn = sqxtunfns[size];
9567             } else {
9568                 genfn = xtnfns[size];
9569             }
9570             break;
9571         }
9572         case 0x14: /* SQXTN, UQXTN */
9573         {
9574             static NeonGenNarrowEnvFn * const fns[3][2] = {
9575                 { gen_helper_neon_narrow_sat_s8,
9576                   gen_helper_neon_narrow_sat_u8 },
9577                 { gen_helper_neon_narrow_sat_s16,
9578                   gen_helper_neon_narrow_sat_u16 },
9579                 { gen_helper_neon_narrow_sat_s32,
9580                   gen_helper_neon_narrow_sat_u32 },
9581             };
9582             genenvfn = fns[size][u];
9583             break;
9584         }
9585         case 0x16: /* FCVTN, FCVTN2 */
9586             /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
9587             if (size == 2) {
9588                 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
9589             } else {
9590                 TCGv_i32 tcg_lo = tcg_temp_new_i32();
9591                 TCGv_i32 tcg_hi = tcg_temp_new_i32();
9592                 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9593                 TCGv_i32 ahp = get_ahp_flag();
9594 
9595                 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
9596                 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, fpst, ahp);
9597                 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, fpst, ahp);
9598                 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
9599             }
9600             break;
9601         case 0x36: /* BFCVTN, BFCVTN2 */
9602             {
9603                 TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
9604                 gen_helper_bfcvt_pair(tcg_res[pass], tcg_op, fpst);
9605             }
9606             break;
9607         case 0x56:  /* FCVTXN, FCVTXN2 */
9608             /* 64 bit to 32 bit float conversion
9609              * with von Neumann rounding (round to odd)
9610              */
9611             assert(size == 2);
9612             gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
9613             break;
9614         default:
9615             g_assert_not_reached();
9616         }
9617 
9618         if (genfn) {
9619             genfn(tcg_res[pass], tcg_op);
9620         } else if (genenvfn) {
9621             genenvfn(tcg_res[pass], cpu_env, tcg_op);
9622         }
9623     }
9624 
9625     for (pass = 0; pass < 2; pass++) {
9626         write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
9627     }
9628     clear_vec_high(s, is_q, rd);
9629 }
9630 
9631 /* Remaining saturating accumulating ops */
9632 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
9633                                 bool is_q, int size, int rn, int rd)
9634 {
9635     bool is_double = (size == 3);
9636 
9637     if (is_double) {
9638         TCGv_i64 tcg_rn = tcg_temp_new_i64();
9639         TCGv_i64 tcg_rd = tcg_temp_new_i64();
9640         int pass;
9641 
9642         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
9643             read_vec_element(s, tcg_rn, rn, pass, MO_64);
9644             read_vec_element(s, tcg_rd, rd, pass, MO_64);
9645 
9646             if (is_u) { /* USQADD */
9647                 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9648             } else { /* SUQADD */
9649                 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9650             }
9651             write_vec_element(s, tcg_rd, rd, pass, MO_64);
9652         }
9653         clear_vec_high(s, !is_scalar, rd);
9654     } else {
9655         TCGv_i32 tcg_rn = tcg_temp_new_i32();
9656         TCGv_i32 tcg_rd = tcg_temp_new_i32();
9657         int pass, maxpasses;
9658 
9659         if (is_scalar) {
9660             maxpasses = 1;
9661         } else {
9662             maxpasses = is_q ? 4 : 2;
9663         }
9664 
9665         for (pass = 0; pass < maxpasses; pass++) {
9666             if (is_scalar) {
9667                 read_vec_element_i32(s, tcg_rn, rn, pass, size);
9668                 read_vec_element_i32(s, tcg_rd, rd, pass, size);
9669             } else {
9670                 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
9671                 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9672             }
9673 
9674             if (is_u) { /* USQADD */
9675                 switch (size) {
9676                 case 0:
9677                     gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9678                     break;
9679                 case 1:
9680                     gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9681                     break;
9682                 case 2:
9683                     gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9684                     break;
9685                 default:
9686                     g_assert_not_reached();
9687                 }
9688             } else { /* SUQADD */
9689                 switch (size) {
9690                 case 0:
9691                     gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9692                     break;
9693                 case 1:
9694                     gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9695                     break;
9696                 case 2:
9697                     gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
9698                     break;
9699                 default:
9700                     g_assert_not_reached();
9701                 }
9702             }
9703 
9704             if (is_scalar) {
9705                 write_vec_element(s, tcg_constant_i64(0), rd, 0, MO_64);
9706             }
9707             write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
9708         }
9709         clear_vec_high(s, is_q, rd);
9710     }
9711 }
9712 
9713 /* AdvSIMD scalar two reg misc
9714  *  31 30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
9715  * +-----+---+-----------+------+-----------+--------+-----+------+------+
9716  * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
9717  * +-----+---+-----------+------+-----------+--------+-----+------+------+
9718  */
9719 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
9720 {
9721     int rd = extract32(insn, 0, 5);
9722     int rn = extract32(insn, 5, 5);
9723     int opcode = extract32(insn, 12, 5);
9724     int size = extract32(insn, 22, 2);
9725     bool u = extract32(insn, 29, 1);
9726     bool is_fcvt = false;
9727     int rmode;
9728     TCGv_i32 tcg_rmode;
9729     TCGv_ptr tcg_fpstatus;
9730 
9731     switch (opcode) {
9732     case 0x3: /* USQADD / SUQADD*/
9733         if (!fp_access_check(s)) {
9734             return;
9735         }
9736         handle_2misc_satacc(s, true, u, false, size, rn, rd);
9737         return;
9738     case 0x7: /* SQABS / SQNEG */
9739         break;
9740     case 0xa: /* CMLT */
9741         if (u) {
9742             unallocated_encoding(s);
9743             return;
9744         }
9745         /* fall through */
9746     case 0x8: /* CMGT, CMGE */
9747     case 0x9: /* CMEQ, CMLE */
9748     case 0xb: /* ABS, NEG */
9749         if (size != 3) {
9750             unallocated_encoding(s);
9751             return;
9752         }
9753         break;
9754     case 0x12: /* SQXTUN */
9755         if (!u) {
9756             unallocated_encoding(s);
9757             return;
9758         }
9759         /* fall through */
9760     case 0x14: /* SQXTN, UQXTN */
9761         if (size == 3) {
9762             unallocated_encoding(s);
9763             return;
9764         }
9765         if (!fp_access_check(s)) {
9766             return;
9767         }
9768         handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
9769         return;
9770     case 0xc ... 0xf:
9771     case 0x16 ... 0x1d:
9772     case 0x1f:
9773         /* Floating point: U, size[1] and opcode indicate operation;
9774          * size[0] indicates single or double precision.
9775          */
9776         opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
9777         size = extract32(size, 0, 1) ? 3 : 2;
9778         switch (opcode) {
9779         case 0x2c: /* FCMGT (zero) */
9780         case 0x2d: /* FCMEQ (zero) */
9781         case 0x2e: /* FCMLT (zero) */
9782         case 0x6c: /* FCMGE (zero) */
9783         case 0x6d: /* FCMLE (zero) */
9784             handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
9785             return;
9786         case 0x1d: /* SCVTF */
9787         case 0x5d: /* UCVTF */
9788         {
9789             bool is_signed = (opcode == 0x1d);
9790             if (!fp_access_check(s)) {
9791                 return;
9792             }
9793             handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
9794             return;
9795         }
9796         case 0x3d: /* FRECPE */
9797         case 0x3f: /* FRECPX */
9798         case 0x7d: /* FRSQRTE */
9799             if (!fp_access_check(s)) {
9800                 return;
9801             }
9802             handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
9803             return;
9804         case 0x1a: /* FCVTNS */
9805         case 0x1b: /* FCVTMS */
9806         case 0x3a: /* FCVTPS */
9807         case 0x3b: /* FCVTZS */
9808         case 0x5a: /* FCVTNU */
9809         case 0x5b: /* FCVTMU */
9810         case 0x7a: /* FCVTPU */
9811         case 0x7b: /* FCVTZU */
9812             is_fcvt = true;
9813             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
9814             break;
9815         case 0x1c: /* FCVTAS */
9816         case 0x5c: /* FCVTAU */
9817             /* TIEAWAY doesn't fit in the usual rounding mode encoding */
9818             is_fcvt = true;
9819             rmode = FPROUNDING_TIEAWAY;
9820             break;
9821         case 0x56: /* FCVTXN, FCVTXN2 */
9822             if (size == 2) {
9823                 unallocated_encoding(s);
9824                 return;
9825             }
9826             if (!fp_access_check(s)) {
9827                 return;
9828             }
9829             handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
9830             return;
9831         default:
9832             unallocated_encoding(s);
9833             return;
9834         }
9835         break;
9836     default:
9837         unallocated_encoding(s);
9838         return;
9839     }
9840 
9841     if (!fp_access_check(s)) {
9842         return;
9843     }
9844 
9845     if (is_fcvt) {
9846         tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
9847         tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
9848     } else {
9849         tcg_fpstatus = NULL;
9850         tcg_rmode = NULL;
9851     }
9852 
9853     if (size == 3) {
9854         TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9855         TCGv_i64 tcg_rd = tcg_temp_new_i64();
9856 
9857         handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
9858         write_fp_dreg(s, rd, tcg_rd);
9859     } else {
9860         TCGv_i32 tcg_rn = tcg_temp_new_i32();
9861         TCGv_i32 tcg_rd = tcg_temp_new_i32();
9862 
9863         read_vec_element_i32(s, tcg_rn, rn, 0, size);
9864 
9865         switch (opcode) {
9866         case 0x7: /* SQABS, SQNEG */
9867         {
9868             NeonGenOneOpEnvFn *genfn;
9869             static NeonGenOneOpEnvFn * const fns[3][2] = {
9870                 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
9871                 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
9872                 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
9873             };
9874             genfn = fns[size][u];
9875             genfn(tcg_rd, cpu_env, tcg_rn);
9876             break;
9877         }
9878         case 0x1a: /* FCVTNS */
9879         case 0x1b: /* FCVTMS */
9880         case 0x1c: /* FCVTAS */
9881         case 0x3a: /* FCVTPS */
9882         case 0x3b: /* FCVTZS */
9883             gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_constant_i32(0),
9884                                  tcg_fpstatus);
9885             break;
9886         case 0x5a: /* FCVTNU */
9887         case 0x5b: /* FCVTMU */
9888         case 0x5c: /* FCVTAU */
9889         case 0x7a: /* FCVTPU */
9890         case 0x7b: /* FCVTZU */
9891             gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_constant_i32(0),
9892                                  tcg_fpstatus);
9893             break;
9894         default:
9895             g_assert_not_reached();
9896         }
9897 
9898         write_fp_sreg(s, rd, tcg_rd);
9899     }
9900 
9901     if (is_fcvt) {
9902         gen_restore_rmode(tcg_rmode, tcg_fpstatus);
9903     }
9904 }
9905 
9906 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
9907 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
9908                                  int immh, int immb, int opcode, int rn, int rd)
9909 {
9910     int size = 32 - clz32(immh) - 1;
9911     int immhb = immh << 3 | immb;
9912     int shift = 2 * (8 << size) - immhb;
9913     GVecGen2iFn *gvec_fn;
9914 
9915     if (extract32(immh, 3, 1) && !is_q) {
9916         unallocated_encoding(s);
9917         return;
9918     }
9919     tcg_debug_assert(size <= 3);
9920 
9921     if (!fp_access_check(s)) {
9922         return;
9923     }
9924 
9925     switch (opcode) {
9926     case 0x02: /* SSRA / USRA (accumulate) */
9927         gvec_fn = is_u ? gen_gvec_usra : gen_gvec_ssra;
9928         break;
9929 
9930     case 0x08: /* SRI */
9931         gvec_fn = gen_gvec_sri;
9932         break;
9933 
9934     case 0x00: /* SSHR / USHR */
9935         if (is_u) {
9936             if (shift == 8 << size) {
9937                 /* Shift count the same size as element size produces zero.  */
9938                 tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
9939                                      is_q ? 16 : 8, vec_full_reg_size(s), 0);
9940                 return;
9941             }
9942             gvec_fn = tcg_gen_gvec_shri;
9943         } else {
9944             /* Shift count the same size as element size produces all sign.  */
9945             if (shift == 8 << size) {
9946                 shift -= 1;
9947             }
9948             gvec_fn = tcg_gen_gvec_sari;
9949         }
9950         break;
9951 
9952     case 0x04: /* SRSHR / URSHR (rounding) */
9953         gvec_fn = is_u ? gen_gvec_urshr : gen_gvec_srshr;
9954         break;
9955 
9956     case 0x06: /* SRSRA / URSRA (accum + rounding) */
9957         gvec_fn = is_u ? gen_gvec_ursra : gen_gvec_srsra;
9958         break;
9959 
9960     default:
9961         g_assert_not_reached();
9962     }
9963 
9964     gen_gvec_fn2i(s, is_q, rd, rn, shift, gvec_fn, size);
9965 }
9966 
9967 /* SHL/SLI - Vector shift left */
9968 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
9969                                  int immh, int immb, int opcode, int rn, int rd)
9970 {
9971     int size = 32 - clz32(immh) - 1;
9972     int immhb = immh << 3 | immb;
9973     int shift = immhb - (8 << size);
9974 
9975     /* Range of size is limited by decode: immh is a non-zero 4 bit field */
9976     assert(size >= 0 && size <= 3);
9977 
9978     if (extract32(immh, 3, 1) && !is_q) {
9979         unallocated_encoding(s);
9980         return;
9981     }
9982 
9983     if (!fp_access_check(s)) {
9984         return;
9985     }
9986 
9987     if (insert) {
9988         gen_gvec_fn2i(s, is_q, rd, rn, shift, gen_gvec_sli, size);
9989     } else {
9990         gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
9991     }
9992 }
9993 
9994 /* USHLL/SHLL - Vector shift left with widening */
9995 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
9996                                  int immh, int immb, int opcode, int rn, int rd)
9997 {
9998     int size = 32 - clz32(immh) - 1;
9999     int immhb = immh << 3 | immb;
10000     int shift = immhb - (8 << size);
10001     int dsize = 64;
10002     int esize = 8 << size;
10003     int elements = dsize/esize;
10004     TCGv_i64 tcg_rn = tcg_temp_new_i64();
10005     TCGv_i64 tcg_rd = tcg_temp_new_i64();
10006     int i;
10007 
10008     if (size >= 3) {
10009         unallocated_encoding(s);
10010         return;
10011     }
10012 
10013     if (!fp_access_check(s)) {
10014         return;
10015     }
10016 
10017     /* For the LL variants the store is larger than the load,
10018      * so if rd == rn we would overwrite parts of our input.
10019      * So load everything right now and use shifts in the main loop.
10020      */
10021     read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
10022 
10023     for (i = 0; i < elements; i++) {
10024         tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
10025         ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
10026         tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
10027         write_vec_element(s, tcg_rd, rd, i, size + 1);
10028     }
10029 }
10030 
10031 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
10032 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
10033                                  int immh, int immb, int opcode, int rn, int rd)
10034 {
10035     int immhb = immh << 3 | immb;
10036     int size = 32 - clz32(immh) - 1;
10037     int dsize = 64;
10038     int esize = 8 << size;
10039     int elements = dsize/esize;
10040     int shift = (2 * esize) - immhb;
10041     bool round = extract32(opcode, 0, 1);
10042     TCGv_i64 tcg_rn, tcg_rd, tcg_final;
10043     TCGv_i64 tcg_round;
10044     int i;
10045 
10046     if (extract32(immh, 3, 1)) {
10047         unallocated_encoding(s);
10048         return;
10049     }
10050 
10051     if (!fp_access_check(s)) {
10052         return;
10053     }
10054 
10055     tcg_rn = tcg_temp_new_i64();
10056     tcg_rd = tcg_temp_new_i64();
10057     tcg_final = tcg_temp_new_i64();
10058     read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
10059 
10060     if (round) {
10061         tcg_round = tcg_constant_i64(1ULL << (shift - 1));
10062     } else {
10063         tcg_round = NULL;
10064     }
10065 
10066     for (i = 0; i < elements; i++) {
10067         read_vec_element(s, tcg_rn, rn, i, size+1);
10068         handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
10069                                 false, true, size+1, shift);
10070 
10071         tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
10072     }
10073 
10074     if (!is_q) {
10075         write_vec_element(s, tcg_final, rd, 0, MO_64);
10076     } else {
10077         write_vec_element(s, tcg_final, rd, 1, MO_64);
10078     }
10079 
10080     clear_vec_high(s, is_q, rd);
10081 }
10082 
10083 
10084 /* AdvSIMD shift by immediate
10085  *  31  30   29 28         23 22  19 18  16 15    11  10 9    5 4    0
10086  * +---+---+---+-------------+------+------+--------+---+------+------+
10087  * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 |  Rn  |  Rd  |
10088  * +---+---+---+-------------+------+------+--------+---+------+------+
10089  */
10090 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
10091 {
10092     int rd = extract32(insn, 0, 5);
10093     int rn = extract32(insn, 5, 5);
10094     int opcode = extract32(insn, 11, 5);
10095     int immb = extract32(insn, 16, 3);
10096     int immh = extract32(insn, 19, 4);
10097     bool is_u = extract32(insn, 29, 1);
10098     bool is_q = extract32(insn, 30, 1);
10099 
10100     /* data_proc_simd[] has sent immh == 0 to disas_simd_mod_imm. */
10101     assert(immh != 0);
10102 
10103     switch (opcode) {
10104     case 0x08: /* SRI */
10105         if (!is_u) {
10106             unallocated_encoding(s);
10107             return;
10108         }
10109         /* fall through */
10110     case 0x00: /* SSHR / USHR */
10111     case 0x02: /* SSRA / USRA (accumulate) */
10112     case 0x04: /* SRSHR / URSHR (rounding) */
10113     case 0x06: /* SRSRA / URSRA (accum + rounding) */
10114         handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
10115         break;
10116     case 0x0a: /* SHL / SLI */
10117         handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10118         break;
10119     case 0x10: /* SHRN */
10120     case 0x11: /* RSHRN / SQRSHRUN */
10121         if (is_u) {
10122             handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
10123                                    opcode, rn, rd);
10124         } else {
10125             handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
10126         }
10127         break;
10128     case 0x12: /* SQSHRN / UQSHRN */
10129     case 0x13: /* SQRSHRN / UQRSHRN */
10130         handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
10131                                opcode, rn, rd);
10132         break;
10133     case 0x14: /* SSHLL / USHLL */
10134         handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
10135         break;
10136     case 0x1c: /* SCVTF / UCVTF */
10137         handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
10138                                      opcode, rn, rd);
10139         break;
10140     case 0xc: /* SQSHLU */
10141         if (!is_u) {
10142             unallocated_encoding(s);
10143             return;
10144         }
10145         handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
10146         break;
10147     case 0xe: /* SQSHL, UQSHL */
10148         handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
10149         break;
10150     case 0x1f: /* FCVTZS/ FCVTZU */
10151         handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
10152         return;
10153     default:
10154         unallocated_encoding(s);
10155         return;
10156     }
10157 }
10158 
10159 /* Generate code to do a "long" addition or subtraction, ie one done in
10160  * TCGv_i64 on vector lanes twice the width specified by size.
10161  */
10162 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
10163                           TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
10164 {
10165     static NeonGenTwo64OpFn * const fns[3][2] = {
10166         { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
10167         { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
10168         { tcg_gen_add_i64, tcg_gen_sub_i64 },
10169     };
10170     NeonGenTwo64OpFn *genfn;
10171     assert(size < 3);
10172 
10173     genfn = fns[size][is_sub];
10174     genfn(tcg_res, tcg_op1, tcg_op2);
10175 }
10176 
10177 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
10178                                 int opcode, int rd, int rn, int rm)
10179 {
10180     /* 3-reg-different widening insns: 64 x 64 -> 128 */
10181     TCGv_i64 tcg_res[2];
10182     int pass, accop;
10183 
10184     tcg_res[0] = tcg_temp_new_i64();
10185     tcg_res[1] = tcg_temp_new_i64();
10186 
10187     /* Does this op do an adding accumulate, a subtracting accumulate,
10188      * or no accumulate at all?
10189      */
10190     switch (opcode) {
10191     case 5:
10192     case 8:
10193     case 9:
10194         accop = 1;
10195         break;
10196     case 10:
10197     case 11:
10198         accop = -1;
10199         break;
10200     default:
10201         accop = 0;
10202         break;
10203     }
10204 
10205     if (accop != 0) {
10206         read_vec_element(s, tcg_res[0], rd, 0, MO_64);
10207         read_vec_element(s, tcg_res[1], rd, 1, MO_64);
10208     }
10209 
10210     /* size == 2 means two 32x32->64 operations; this is worth special
10211      * casing because we can generally handle it inline.
10212      */
10213     if (size == 2) {
10214         for (pass = 0; pass < 2; pass++) {
10215             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10216             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10217             TCGv_i64 tcg_passres;
10218             MemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
10219 
10220             int elt = pass + is_q * 2;
10221 
10222             read_vec_element(s, tcg_op1, rn, elt, memop);
10223             read_vec_element(s, tcg_op2, rm, elt, memop);
10224 
10225             if (accop == 0) {
10226                 tcg_passres = tcg_res[pass];
10227             } else {
10228                 tcg_passres = tcg_temp_new_i64();
10229             }
10230 
10231             switch (opcode) {
10232             case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10233                 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
10234                 break;
10235             case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10236                 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
10237                 break;
10238             case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10239             case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10240             {
10241                 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
10242                 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
10243 
10244                 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
10245                 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
10246                 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
10247                                     tcg_passres,
10248                                     tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
10249                 break;
10250             }
10251             case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10252             case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10253             case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10254                 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10255                 break;
10256             case 9: /* SQDMLAL, SQDMLAL2 */
10257             case 11: /* SQDMLSL, SQDMLSL2 */
10258             case 13: /* SQDMULL, SQDMULL2 */
10259                 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
10260                 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
10261                                                   tcg_passres, tcg_passres);
10262                 break;
10263             default:
10264                 g_assert_not_reached();
10265             }
10266 
10267             if (opcode == 9 || opcode == 11) {
10268                 /* saturating accumulate ops */
10269                 if (accop < 0) {
10270                     tcg_gen_neg_i64(tcg_passres, tcg_passres);
10271                 }
10272                 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
10273                                                   tcg_res[pass], tcg_passres);
10274             } else if (accop > 0) {
10275                 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10276             } else if (accop < 0) {
10277                 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
10278             }
10279         }
10280     } else {
10281         /* size 0 or 1, generally helper functions */
10282         for (pass = 0; pass < 2; pass++) {
10283             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10284             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10285             TCGv_i64 tcg_passres;
10286             int elt = pass + is_q * 2;
10287 
10288             read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
10289             read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
10290 
10291             if (accop == 0) {
10292                 tcg_passres = tcg_res[pass];
10293             } else {
10294                 tcg_passres = tcg_temp_new_i64();
10295             }
10296 
10297             switch (opcode) {
10298             case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10299             case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10300             {
10301                 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
10302                 static NeonGenWidenFn * const widenfns[2][2] = {
10303                     { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10304                     { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10305                 };
10306                 NeonGenWidenFn *widenfn = widenfns[size][is_u];
10307 
10308                 widenfn(tcg_op2_64, tcg_op2);
10309                 widenfn(tcg_passres, tcg_op1);
10310                 gen_neon_addl(size, (opcode == 2), tcg_passres,
10311                               tcg_passres, tcg_op2_64);
10312                 break;
10313             }
10314             case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10315             case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10316                 if (size == 0) {
10317                     if (is_u) {
10318                         gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
10319                     } else {
10320                         gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
10321                     }
10322                 } else {
10323                     if (is_u) {
10324                         gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
10325                     } else {
10326                         gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
10327                     }
10328                 }
10329                 break;
10330             case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10331             case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10332             case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
10333                 if (size == 0) {
10334                     if (is_u) {
10335                         gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
10336                     } else {
10337                         gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
10338                     }
10339                 } else {
10340                     if (is_u) {
10341                         gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
10342                     } else {
10343                         gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10344                     }
10345                 }
10346                 break;
10347             case 9: /* SQDMLAL, SQDMLAL2 */
10348             case 11: /* SQDMLSL, SQDMLSL2 */
10349             case 13: /* SQDMULL, SQDMULL2 */
10350                 assert(size == 1);
10351                 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
10352                 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
10353                                                   tcg_passres, tcg_passres);
10354                 break;
10355             default:
10356                 g_assert_not_reached();
10357             }
10358 
10359             if (accop != 0) {
10360                 if (opcode == 9 || opcode == 11) {
10361                     /* saturating accumulate ops */
10362                     if (accop < 0) {
10363                         gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
10364                     }
10365                     gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
10366                                                       tcg_res[pass],
10367                                                       tcg_passres);
10368                 } else {
10369                     gen_neon_addl(size, (accop < 0), tcg_res[pass],
10370                                   tcg_res[pass], tcg_passres);
10371                 }
10372             }
10373         }
10374     }
10375 
10376     write_vec_element(s, tcg_res[0], rd, 0, MO_64);
10377     write_vec_element(s, tcg_res[1], rd, 1, MO_64);
10378 }
10379 
10380 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
10381                             int opcode, int rd, int rn, int rm)
10382 {
10383     TCGv_i64 tcg_res[2];
10384     int part = is_q ? 2 : 0;
10385     int pass;
10386 
10387     for (pass = 0; pass < 2; pass++) {
10388         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10389         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10390         TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
10391         static NeonGenWidenFn * const widenfns[3][2] = {
10392             { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
10393             { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
10394             { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
10395         };
10396         NeonGenWidenFn *widenfn = widenfns[size][is_u];
10397 
10398         read_vec_element(s, tcg_op1, rn, pass, MO_64);
10399         read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
10400         widenfn(tcg_op2_wide, tcg_op2);
10401         tcg_res[pass] = tcg_temp_new_i64();
10402         gen_neon_addl(size, (opcode == 3),
10403                       tcg_res[pass], tcg_op1, tcg_op2_wide);
10404     }
10405 
10406     for (pass = 0; pass < 2; pass++) {
10407         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10408     }
10409 }
10410 
10411 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
10412 {
10413     tcg_gen_addi_i64(in, in, 1U << 31);
10414     tcg_gen_extrh_i64_i32(res, in);
10415 }
10416 
10417 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
10418                                  int opcode, int rd, int rn, int rm)
10419 {
10420     TCGv_i32 tcg_res[2];
10421     int part = is_q ? 2 : 0;
10422     int pass;
10423 
10424     for (pass = 0; pass < 2; pass++) {
10425         TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10426         TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10427         TCGv_i64 tcg_wideres = tcg_temp_new_i64();
10428         static NeonGenNarrowFn * const narrowfns[3][2] = {
10429             { gen_helper_neon_narrow_high_u8,
10430               gen_helper_neon_narrow_round_high_u8 },
10431             { gen_helper_neon_narrow_high_u16,
10432               gen_helper_neon_narrow_round_high_u16 },
10433             { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
10434         };
10435         NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
10436 
10437         read_vec_element(s, tcg_op1, rn, pass, MO_64);
10438         read_vec_element(s, tcg_op2, rm, pass, MO_64);
10439 
10440         gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
10441 
10442         tcg_res[pass] = tcg_temp_new_i32();
10443         gennarrow(tcg_res[pass], tcg_wideres);
10444     }
10445 
10446     for (pass = 0; pass < 2; pass++) {
10447         write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
10448     }
10449     clear_vec_high(s, is_q, rd);
10450 }
10451 
10452 /* AdvSIMD three different
10453  *   31  30  29 28       24 23  22  21 20  16 15    12 11 10 9    5 4    0
10454  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10455  * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 0 0 |  Rn  |  Rd  |
10456  * +---+---+---+-----------+------+---+------+--------+-----+------+------+
10457  */
10458 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
10459 {
10460     /* Instructions in this group fall into three basic classes
10461      * (in each case with the operation working on each element in
10462      * the input vectors):
10463      * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
10464      *     128 bit input)
10465      * (2) wide 64 x 128 -> 128
10466      * (3) narrowing 128 x 128 -> 64
10467      * Here we do initial decode, catch unallocated cases and
10468      * dispatch to separate functions for each class.
10469      */
10470     int is_q = extract32(insn, 30, 1);
10471     int is_u = extract32(insn, 29, 1);
10472     int size = extract32(insn, 22, 2);
10473     int opcode = extract32(insn, 12, 4);
10474     int rm = extract32(insn, 16, 5);
10475     int rn = extract32(insn, 5, 5);
10476     int rd = extract32(insn, 0, 5);
10477 
10478     switch (opcode) {
10479     case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10480     case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10481         /* 64 x 128 -> 128 */
10482         if (size == 3) {
10483             unallocated_encoding(s);
10484             return;
10485         }
10486         if (!fp_access_check(s)) {
10487             return;
10488         }
10489         handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10490         break;
10491     case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10492     case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10493         /* 128 x 128 -> 64 */
10494         if (size == 3) {
10495             unallocated_encoding(s);
10496             return;
10497         }
10498         if (!fp_access_check(s)) {
10499             return;
10500         }
10501         handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10502         break;
10503     case 14: /* PMULL, PMULL2 */
10504         if (is_u) {
10505             unallocated_encoding(s);
10506             return;
10507         }
10508         switch (size) {
10509         case 0: /* PMULL.P8 */
10510             if (!fp_access_check(s)) {
10511                 return;
10512             }
10513             /* The Q field specifies lo/hi half input for this insn.  */
10514             gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10515                              gen_helper_neon_pmull_h);
10516             break;
10517 
10518         case 3: /* PMULL.P64 */
10519             if (!dc_isar_feature(aa64_pmull, s)) {
10520                 unallocated_encoding(s);
10521                 return;
10522             }
10523             if (!fp_access_check(s)) {
10524                 return;
10525             }
10526             /* The Q field specifies lo/hi half input for this insn.  */
10527             gen_gvec_op3_ool(s, true, rd, rn, rm, is_q,
10528                              gen_helper_gvec_pmull_q);
10529             break;
10530 
10531         default:
10532             unallocated_encoding(s);
10533             break;
10534         }
10535         return;
10536     case 9: /* SQDMLAL, SQDMLAL2 */
10537     case 11: /* SQDMLSL, SQDMLSL2 */
10538     case 13: /* SQDMULL, SQDMULL2 */
10539         if (is_u || size == 0) {
10540             unallocated_encoding(s);
10541             return;
10542         }
10543         /* fall through */
10544     case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10545     case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10546     case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10547     case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10548     case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10549     case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10550     case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10551         /* 64 x 64 -> 128 */
10552         if (size == 3) {
10553             unallocated_encoding(s);
10554             return;
10555         }
10556         if (!fp_access_check(s)) {
10557             return;
10558         }
10559 
10560         handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10561         break;
10562     default:
10563         /* opcode 15 not allocated */
10564         unallocated_encoding(s);
10565         break;
10566     }
10567 }
10568 
10569 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10570 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10571 {
10572     int rd = extract32(insn, 0, 5);
10573     int rn = extract32(insn, 5, 5);
10574     int rm = extract32(insn, 16, 5);
10575     int size = extract32(insn, 22, 2);
10576     bool is_u = extract32(insn, 29, 1);
10577     bool is_q = extract32(insn, 30, 1);
10578 
10579     if (!fp_access_check(s)) {
10580         return;
10581     }
10582 
10583     switch (size + 4 * is_u) {
10584     case 0: /* AND */
10585         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10586         return;
10587     case 1: /* BIC */
10588         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10589         return;
10590     case 2: /* ORR */
10591         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10592         return;
10593     case 3: /* ORN */
10594         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10595         return;
10596     case 4: /* EOR */
10597         gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10598         return;
10599 
10600     case 5: /* BSL bitwise select */
10601         gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
10602         return;
10603     case 6: /* BIT, bitwise insert if true */
10604         gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
10605         return;
10606     case 7: /* BIF, bitwise insert if false */
10607         gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
10608         return;
10609 
10610     default:
10611         g_assert_not_reached();
10612     }
10613 }
10614 
10615 /* Pairwise op subgroup of C3.6.16.
10616  *
10617  * This is called directly or via the handle_3same_float for float pairwise
10618  * operations where the opcode and size are calculated differently.
10619  */
10620 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10621                                    int size, int rn, int rm, int rd)
10622 {
10623     TCGv_ptr fpst;
10624     int pass;
10625 
10626     /* Floating point operations need fpst */
10627     if (opcode >= 0x58) {
10628         fpst = fpstatus_ptr(FPST_FPCR);
10629     } else {
10630         fpst = NULL;
10631     }
10632 
10633     if (!fp_access_check(s)) {
10634         return;
10635     }
10636 
10637     /* These operations work on the concatenated rm:rn, with each pair of
10638      * adjacent elements being operated on to produce an element in the result.
10639      */
10640     if (size == 3) {
10641         TCGv_i64 tcg_res[2];
10642 
10643         for (pass = 0; pass < 2; pass++) {
10644             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10645             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10646             int passreg = (pass == 0) ? rn : rm;
10647 
10648             read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10649             read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10650             tcg_res[pass] = tcg_temp_new_i64();
10651 
10652             switch (opcode) {
10653             case 0x17: /* ADDP */
10654                 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10655                 break;
10656             case 0x58: /* FMAXNMP */
10657                 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10658                 break;
10659             case 0x5a: /* FADDP */
10660                 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10661                 break;
10662             case 0x5e: /* FMAXP */
10663                 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10664                 break;
10665             case 0x78: /* FMINNMP */
10666                 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10667                 break;
10668             case 0x7e: /* FMINP */
10669                 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10670                 break;
10671             default:
10672                 g_assert_not_reached();
10673             }
10674         }
10675 
10676         for (pass = 0; pass < 2; pass++) {
10677             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10678         }
10679     } else {
10680         int maxpass = is_q ? 4 : 2;
10681         TCGv_i32 tcg_res[4];
10682 
10683         for (pass = 0; pass < maxpass; pass++) {
10684             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10685             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10686             NeonGenTwoOpFn *genfn = NULL;
10687             int passreg = pass < (maxpass / 2) ? rn : rm;
10688             int passelt = (is_q && (pass & 1)) ? 2 : 0;
10689 
10690             read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
10691             read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
10692             tcg_res[pass] = tcg_temp_new_i32();
10693 
10694             switch (opcode) {
10695             case 0x17: /* ADDP */
10696             {
10697                 static NeonGenTwoOpFn * const fns[3] = {
10698                     gen_helper_neon_padd_u8,
10699                     gen_helper_neon_padd_u16,
10700                     tcg_gen_add_i32,
10701                 };
10702                 genfn = fns[size];
10703                 break;
10704             }
10705             case 0x14: /* SMAXP, UMAXP */
10706             {
10707                 static NeonGenTwoOpFn * const fns[3][2] = {
10708                     { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
10709                     { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
10710                     { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10711                 };
10712                 genfn = fns[size][u];
10713                 break;
10714             }
10715             case 0x15: /* SMINP, UMINP */
10716             {
10717                 static NeonGenTwoOpFn * const fns[3][2] = {
10718                     { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
10719                     { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
10720                     { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10721                 };
10722                 genfn = fns[size][u];
10723                 break;
10724             }
10725             /* The FP operations are all on single floats (32 bit) */
10726             case 0x58: /* FMAXNMP */
10727                 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10728                 break;
10729             case 0x5a: /* FADDP */
10730                 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10731                 break;
10732             case 0x5e: /* FMAXP */
10733                 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10734                 break;
10735             case 0x78: /* FMINNMP */
10736                 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10737                 break;
10738             case 0x7e: /* FMINP */
10739                 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10740                 break;
10741             default:
10742                 g_assert_not_reached();
10743             }
10744 
10745             /* FP ops called directly, otherwise call now */
10746             if (genfn) {
10747                 genfn(tcg_res[pass], tcg_op1, tcg_op2);
10748             }
10749         }
10750 
10751         for (pass = 0; pass < maxpass; pass++) {
10752             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
10753         }
10754         clear_vec_high(s, is_q, rd);
10755     }
10756 }
10757 
10758 /* Floating point op subgroup of C3.6.16. */
10759 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
10760 {
10761     /* For floating point ops, the U, size[1] and opcode bits
10762      * together indicate the operation. size[0] indicates single
10763      * or double.
10764      */
10765     int fpopcode = extract32(insn, 11, 5)
10766         | (extract32(insn, 23, 1) << 5)
10767         | (extract32(insn, 29, 1) << 6);
10768     int is_q = extract32(insn, 30, 1);
10769     int size = extract32(insn, 22, 1);
10770     int rm = extract32(insn, 16, 5);
10771     int rn = extract32(insn, 5, 5);
10772     int rd = extract32(insn, 0, 5);
10773 
10774     int datasize = is_q ? 128 : 64;
10775     int esize = 32 << size;
10776     int elements = datasize / esize;
10777 
10778     if (size == 1 && !is_q) {
10779         unallocated_encoding(s);
10780         return;
10781     }
10782 
10783     switch (fpopcode) {
10784     case 0x58: /* FMAXNMP */
10785     case 0x5a: /* FADDP */
10786     case 0x5e: /* FMAXP */
10787     case 0x78: /* FMINNMP */
10788     case 0x7e: /* FMINP */
10789         if (size && !is_q) {
10790             unallocated_encoding(s);
10791             return;
10792         }
10793         handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
10794                                rn, rm, rd);
10795         return;
10796     case 0x1b: /* FMULX */
10797     case 0x1f: /* FRECPS */
10798     case 0x3f: /* FRSQRTS */
10799     case 0x5d: /* FACGE */
10800     case 0x7d: /* FACGT */
10801     case 0x19: /* FMLA */
10802     case 0x39: /* FMLS */
10803     case 0x18: /* FMAXNM */
10804     case 0x1a: /* FADD */
10805     case 0x1c: /* FCMEQ */
10806     case 0x1e: /* FMAX */
10807     case 0x38: /* FMINNM */
10808     case 0x3a: /* FSUB */
10809     case 0x3e: /* FMIN */
10810     case 0x5b: /* FMUL */
10811     case 0x5c: /* FCMGE */
10812     case 0x5f: /* FDIV */
10813     case 0x7a: /* FABD */
10814     case 0x7c: /* FCMGT */
10815         if (!fp_access_check(s)) {
10816             return;
10817         }
10818         handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
10819         return;
10820 
10821     case 0x1d: /* FMLAL  */
10822     case 0x3d: /* FMLSL  */
10823     case 0x59: /* FMLAL2 */
10824     case 0x79: /* FMLSL2 */
10825         if (size & 1 || !dc_isar_feature(aa64_fhm, s)) {
10826             unallocated_encoding(s);
10827             return;
10828         }
10829         if (fp_access_check(s)) {
10830             int is_s = extract32(insn, 23, 1);
10831             int is_2 = extract32(insn, 29, 1);
10832             int data = (is_2 << 1) | is_s;
10833             tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
10834                                vec_full_reg_offset(s, rn),
10835                                vec_full_reg_offset(s, rm), cpu_env,
10836                                is_q ? 16 : 8, vec_full_reg_size(s),
10837                                data, gen_helper_gvec_fmlal_a64);
10838         }
10839         return;
10840 
10841     default:
10842         unallocated_encoding(s);
10843         return;
10844     }
10845 }
10846 
10847 /* Integer op subgroup of C3.6.16. */
10848 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
10849 {
10850     int is_q = extract32(insn, 30, 1);
10851     int u = extract32(insn, 29, 1);
10852     int size = extract32(insn, 22, 2);
10853     int opcode = extract32(insn, 11, 5);
10854     int rm = extract32(insn, 16, 5);
10855     int rn = extract32(insn, 5, 5);
10856     int rd = extract32(insn, 0, 5);
10857     int pass;
10858     TCGCond cond;
10859 
10860     switch (opcode) {
10861     case 0x13: /* MUL, PMUL */
10862         if (u && size != 0) {
10863             unallocated_encoding(s);
10864             return;
10865         }
10866         /* fall through */
10867     case 0x0: /* SHADD, UHADD */
10868     case 0x2: /* SRHADD, URHADD */
10869     case 0x4: /* SHSUB, UHSUB */
10870     case 0xc: /* SMAX, UMAX */
10871     case 0xd: /* SMIN, UMIN */
10872     case 0xe: /* SABD, UABD */
10873     case 0xf: /* SABA, UABA */
10874     case 0x12: /* MLA, MLS */
10875         if (size == 3) {
10876             unallocated_encoding(s);
10877             return;
10878         }
10879         break;
10880     case 0x16: /* SQDMULH, SQRDMULH */
10881         if (size == 0 || size == 3) {
10882             unallocated_encoding(s);
10883             return;
10884         }
10885         break;
10886     default:
10887         if (size == 3 && !is_q) {
10888             unallocated_encoding(s);
10889             return;
10890         }
10891         break;
10892     }
10893 
10894     if (!fp_access_check(s)) {
10895         return;
10896     }
10897 
10898     switch (opcode) {
10899     case 0x01: /* SQADD, UQADD */
10900         if (u) {
10901             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size);
10902         } else {
10903             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size);
10904         }
10905         return;
10906     case 0x05: /* SQSUB, UQSUB */
10907         if (u) {
10908             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size);
10909         } else {
10910             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size);
10911         }
10912         return;
10913     case 0x08: /* SSHL, USHL */
10914         if (u) {
10915             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_ushl, size);
10916         } else {
10917             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sshl, size);
10918         }
10919         return;
10920     case 0x0c: /* SMAX, UMAX */
10921         if (u) {
10922             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size);
10923         } else {
10924             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smax, size);
10925         }
10926         return;
10927     case 0x0d: /* SMIN, UMIN */
10928         if (u) {
10929             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umin, size);
10930         } else {
10931             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_smin, size);
10932         }
10933         return;
10934     case 0xe: /* SABD, UABD */
10935         if (u) {
10936             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uabd, size);
10937         } else {
10938             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sabd, size);
10939         }
10940         return;
10941     case 0xf: /* SABA, UABA */
10942         if (u) {
10943             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uaba, size);
10944         } else {
10945             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_saba, size);
10946         }
10947         return;
10948     case 0x10: /* ADD, SUB */
10949         if (u) {
10950             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
10951         } else {
10952             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
10953         }
10954         return;
10955     case 0x13: /* MUL, PMUL */
10956         if (!u) { /* MUL */
10957             gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
10958         } else {  /* PMUL */
10959             gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b);
10960         }
10961         return;
10962     case 0x12: /* MLA, MLS */
10963         if (u) {
10964             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mls, size);
10965         } else {
10966             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_mla, size);
10967         }
10968         return;
10969     case 0x16: /* SQDMULH, SQRDMULH */
10970         {
10971             static gen_helper_gvec_3_ptr * const fns[2][2] = {
10972                 { gen_helper_neon_sqdmulh_h, gen_helper_neon_sqrdmulh_h },
10973                 { gen_helper_neon_sqdmulh_s, gen_helper_neon_sqrdmulh_s },
10974             };
10975             gen_gvec_op3_qc(s, is_q, rd, rn, rm, fns[size - 1][u]);
10976         }
10977         return;
10978     case 0x11:
10979         if (!u) { /* CMTST */
10980             gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_cmtst, size);
10981             return;
10982         }
10983         /* else CMEQ */
10984         cond = TCG_COND_EQ;
10985         goto do_gvec_cmp;
10986     case 0x06: /* CMGT, CMHI */
10987         cond = u ? TCG_COND_GTU : TCG_COND_GT;
10988         goto do_gvec_cmp;
10989     case 0x07: /* CMGE, CMHS */
10990         cond = u ? TCG_COND_GEU : TCG_COND_GE;
10991     do_gvec_cmp:
10992         tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
10993                          vec_full_reg_offset(s, rn),
10994                          vec_full_reg_offset(s, rm),
10995                          is_q ? 16 : 8, vec_full_reg_size(s));
10996         return;
10997     }
10998 
10999     if (size == 3) {
11000         assert(is_q);
11001         for (pass = 0; pass < 2; pass++) {
11002             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11003             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11004             TCGv_i64 tcg_res = tcg_temp_new_i64();
11005 
11006             read_vec_element(s, tcg_op1, rn, pass, MO_64);
11007             read_vec_element(s, tcg_op2, rm, pass, MO_64);
11008 
11009             handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
11010 
11011             write_vec_element(s, tcg_res, rd, pass, MO_64);
11012         }
11013     } else {
11014         for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11015             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11016             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11017             TCGv_i32 tcg_res = tcg_temp_new_i32();
11018             NeonGenTwoOpFn *genfn = NULL;
11019             NeonGenTwoOpEnvFn *genenvfn = NULL;
11020 
11021             read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
11022             read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
11023 
11024             switch (opcode) {
11025             case 0x0: /* SHADD, UHADD */
11026             {
11027                 static NeonGenTwoOpFn * const fns[3][2] = {
11028                     { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
11029                     { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
11030                     { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
11031                 };
11032                 genfn = fns[size][u];
11033                 break;
11034             }
11035             case 0x2: /* SRHADD, URHADD */
11036             {
11037                 static NeonGenTwoOpFn * const fns[3][2] = {
11038                     { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
11039                     { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
11040                     { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
11041                 };
11042                 genfn = fns[size][u];
11043                 break;
11044             }
11045             case 0x4: /* SHSUB, UHSUB */
11046             {
11047                 static NeonGenTwoOpFn * const fns[3][2] = {
11048                     { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
11049                     { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
11050                     { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
11051                 };
11052                 genfn = fns[size][u];
11053                 break;
11054             }
11055             case 0x9: /* SQSHL, UQSHL */
11056             {
11057                 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11058                     { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
11059                     { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
11060                     { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
11061                 };
11062                 genenvfn = fns[size][u];
11063                 break;
11064             }
11065             case 0xa: /* SRSHL, URSHL */
11066             {
11067                 static NeonGenTwoOpFn * const fns[3][2] = {
11068                     { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
11069                     { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
11070                     { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
11071                 };
11072                 genfn = fns[size][u];
11073                 break;
11074             }
11075             case 0xb: /* SQRSHL, UQRSHL */
11076             {
11077                 static NeonGenTwoOpEnvFn * const fns[3][2] = {
11078                     { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
11079                     { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
11080                     { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
11081                 };
11082                 genenvfn = fns[size][u];
11083                 break;
11084             }
11085             default:
11086                 g_assert_not_reached();
11087             }
11088 
11089             if (genenvfn) {
11090                 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
11091             } else {
11092                 genfn(tcg_res, tcg_op1, tcg_op2);
11093             }
11094 
11095             write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11096         }
11097     }
11098     clear_vec_high(s, is_q, rd);
11099 }
11100 
11101 /* AdvSIMD three same
11102  *  31  30  29  28       24 23  22  21 20  16 15    11  10 9    5 4    0
11103  * +---+---+---+-----------+------+---+------+--------+---+------+------+
11104  * | 0 | Q | U | 0 1 1 1 0 | size | 1 |  Rm  | opcode | 1 |  Rn  |  Rd  |
11105  * +---+---+---+-----------+------+---+------+--------+---+------+------+
11106  */
11107 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
11108 {
11109     int opcode = extract32(insn, 11, 5);
11110 
11111     switch (opcode) {
11112     case 0x3: /* logic ops */
11113         disas_simd_3same_logic(s, insn);
11114         break;
11115     case 0x17: /* ADDP */
11116     case 0x14: /* SMAXP, UMAXP */
11117     case 0x15: /* SMINP, UMINP */
11118     {
11119         /* Pairwise operations */
11120         int is_q = extract32(insn, 30, 1);
11121         int u = extract32(insn, 29, 1);
11122         int size = extract32(insn, 22, 2);
11123         int rm = extract32(insn, 16, 5);
11124         int rn = extract32(insn, 5, 5);
11125         int rd = extract32(insn, 0, 5);
11126         if (opcode == 0x17) {
11127             if (u || (size == 3 && !is_q)) {
11128                 unallocated_encoding(s);
11129                 return;
11130             }
11131         } else {
11132             if (size == 3) {
11133                 unallocated_encoding(s);
11134                 return;
11135             }
11136         }
11137         handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
11138         break;
11139     }
11140     case 0x18 ... 0x31:
11141         /* floating point ops, sz[1] and U are part of opcode */
11142         disas_simd_3same_float(s, insn);
11143         break;
11144     default:
11145         disas_simd_3same_int(s, insn);
11146         break;
11147     }
11148 }
11149 
11150 /*
11151  * Advanced SIMD three same (ARMv8.2 FP16 variants)
11152  *
11153  *  31  30  29  28       24 23  22 21 20  16 15 14 13    11 10  9    5 4    0
11154  * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11155  * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 |  Rm  | 0 0 | opcode | 1 |  Rn  |  Rd  |
11156  * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
11157  *
11158  * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
11159  * (register), FACGE, FABD, FCMGT (register) and FACGT.
11160  *
11161  */
11162 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
11163 {
11164     int opcode = extract32(insn, 11, 3);
11165     int u = extract32(insn, 29, 1);
11166     int a = extract32(insn, 23, 1);
11167     int is_q = extract32(insn, 30, 1);
11168     int rm = extract32(insn, 16, 5);
11169     int rn = extract32(insn, 5, 5);
11170     int rd = extract32(insn, 0, 5);
11171     /*
11172      * For these floating point ops, the U, a and opcode bits
11173      * together indicate the operation.
11174      */
11175     int fpopcode = opcode | (a << 3) | (u << 4);
11176     int datasize = is_q ? 128 : 64;
11177     int elements = datasize / 16;
11178     bool pairwise;
11179     TCGv_ptr fpst;
11180     int pass;
11181 
11182     switch (fpopcode) {
11183     case 0x0: /* FMAXNM */
11184     case 0x1: /* FMLA */
11185     case 0x2: /* FADD */
11186     case 0x3: /* FMULX */
11187     case 0x4: /* FCMEQ */
11188     case 0x6: /* FMAX */
11189     case 0x7: /* FRECPS */
11190     case 0x8: /* FMINNM */
11191     case 0x9: /* FMLS */
11192     case 0xa: /* FSUB */
11193     case 0xe: /* FMIN */
11194     case 0xf: /* FRSQRTS */
11195     case 0x13: /* FMUL */
11196     case 0x14: /* FCMGE */
11197     case 0x15: /* FACGE */
11198     case 0x17: /* FDIV */
11199     case 0x1a: /* FABD */
11200     case 0x1c: /* FCMGT */
11201     case 0x1d: /* FACGT */
11202         pairwise = false;
11203         break;
11204     case 0x10: /* FMAXNMP */
11205     case 0x12: /* FADDP */
11206     case 0x16: /* FMAXP */
11207     case 0x18: /* FMINNMP */
11208     case 0x1e: /* FMINP */
11209         pairwise = true;
11210         break;
11211     default:
11212         unallocated_encoding(s);
11213         return;
11214     }
11215 
11216     if (!dc_isar_feature(aa64_fp16, s)) {
11217         unallocated_encoding(s);
11218         return;
11219     }
11220 
11221     if (!fp_access_check(s)) {
11222         return;
11223     }
11224 
11225     fpst = fpstatus_ptr(FPST_FPCR_F16);
11226 
11227     if (pairwise) {
11228         int maxpass = is_q ? 8 : 4;
11229         TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11230         TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11231         TCGv_i32 tcg_res[8];
11232 
11233         for (pass = 0; pass < maxpass; pass++) {
11234             int passreg = pass < (maxpass / 2) ? rn : rm;
11235             int passelt = (pass << 1) & (maxpass - 1);
11236 
11237             read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
11238             read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
11239             tcg_res[pass] = tcg_temp_new_i32();
11240 
11241             switch (fpopcode) {
11242             case 0x10: /* FMAXNMP */
11243                 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
11244                                            fpst);
11245                 break;
11246             case 0x12: /* FADDP */
11247                 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11248                 break;
11249             case 0x16: /* FMAXP */
11250                 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11251                 break;
11252             case 0x18: /* FMINNMP */
11253                 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
11254                                            fpst);
11255                 break;
11256             case 0x1e: /* FMINP */
11257                 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
11258                 break;
11259             default:
11260                 g_assert_not_reached();
11261             }
11262         }
11263 
11264         for (pass = 0; pass < maxpass; pass++) {
11265             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
11266         }
11267     } else {
11268         for (pass = 0; pass < elements; pass++) {
11269             TCGv_i32 tcg_op1 = tcg_temp_new_i32();
11270             TCGv_i32 tcg_op2 = tcg_temp_new_i32();
11271             TCGv_i32 tcg_res = tcg_temp_new_i32();
11272 
11273             read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
11274             read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
11275 
11276             switch (fpopcode) {
11277             case 0x0: /* FMAXNM */
11278                 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11279                 break;
11280             case 0x1: /* FMLA */
11281                 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11282                 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11283                                            fpst);
11284                 break;
11285             case 0x2: /* FADD */
11286                 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
11287                 break;
11288             case 0x3: /* FMULX */
11289                 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
11290                 break;
11291             case 0x4: /* FCMEQ */
11292                 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11293                 break;
11294             case 0x6: /* FMAX */
11295                 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
11296                 break;
11297             case 0x7: /* FRECPS */
11298                 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11299                 break;
11300             case 0x8: /* FMINNM */
11301                 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11302                 break;
11303             case 0x9: /* FMLS */
11304                 /* As usual for ARM, separate negation for fused multiply-add */
11305                 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
11306                 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11307                 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11308                                            fpst);
11309                 break;
11310             case 0xa: /* FSUB */
11311                 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11312                 break;
11313             case 0xe: /* FMIN */
11314                 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
11315                 break;
11316             case 0xf: /* FRSQRTS */
11317                 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11318                 break;
11319             case 0x13: /* FMUL */
11320                 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
11321                 break;
11322             case 0x14: /* FCMGE */
11323                 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11324                 break;
11325             case 0x15: /* FACGE */
11326                 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11327                 break;
11328             case 0x17: /* FDIV */
11329                 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
11330                 break;
11331             case 0x1a: /* FABD */
11332                 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11333                 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
11334                 break;
11335             case 0x1c: /* FCMGT */
11336                 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11337                 break;
11338             case 0x1d: /* FACGT */
11339                 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11340                 break;
11341             default:
11342                 g_assert_not_reached();
11343             }
11344 
11345             write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11346         }
11347     }
11348 
11349     clear_vec_high(s, is_q, rd);
11350 }
11351 
11352 /* AdvSIMD three same extra
11353  *  31   30  29 28       24 23  22  21 20  16  15 14    11  10 9  5 4  0
11354  * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11355  * | 0 | Q | U | 0 1 1 1 0 | size | 0 |  Rm  | 1 | opcode | 1 | Rn | Rd |
11356  * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11357  */
11358 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
11359 {
11360     int rd = extract32(insn, 0, 5);
11361     int rn = extract32(insn, 5, 5);
11362     int opcode = extract32(insn, 11, 4);
11363     int rm = extract32(insn, 16, 5);
11364     int size = extract32(insn, 22, 2);
11365     bool u = extract32(insn, 29, 1);
11366     bool is_q = extract32(insn, 30, 1);
11367     bool feature;
11368     int rot;
11369 
11370     switch (u * 16 + opcode) {
11371     case 0x10: /* SQRDMLAH (vector) */
11372     case 0x11: /* SQRDMLSH (vector) */
11373         if (size != 1 && size != 2) {
11374             unallocated_encoding(s);
11375             return;
11376         }
11377         feature = dc_isar_feature(aa64_rdm, s);
11378         break;
11379     case 0x02: /* SDOT (vector) */
11380     case 0x12: /* UDOT (vector) */
11381         if (size != MO_32) {
11382             unallocated_encoding(s);
11383             return;
11384         }
11385         feature = dc_isar_feature(aa64_dp, s);
11386         break;
11387     case 0x03: /* USDOT */
11388         if (size != MO_32) {
11389             unallocated_encoding(s);
11390             return;
11391         }
11392         feature = dc_isar_feature(aa64_i8mm, s);
11393         break;
11394     case 0x04: /* SMMLA */
11395     case 0x14: /* UMMLA */
11396     case 0x05: /* USMMLA */
11397         if (!is_q || size != MO_32) {
11398             unallocated_encoding(s);
11399             return;
11400         }
11401         feature = dc_isar_feature(aa64_i8mm, s);
11402         break;
11403     case 0x18: /* FCMLA, #0 */
11404     case 0x19: /* FCMLA, #90 */
11405     case 0x1a: /* FCMLA, #180 */
11406     case 0x1b: /* FCMLA, #270 */
11407     case 0x1c: /* FCADD, #90 */
11408     case 0x1e: /* FCADD, #270 */
11409         if (size == 0
11410             || (size == 1 && !dc_isar_feature(aa64_fp16, s))
11411             || (size == 3 && !is_q)) {
11412             unallocated_encoding(s);
11413             return;
11414         }
11415         feature = dc_isar_feature(aa64_fcma, s);
11416         break;
11417     case 0x1d: /* BFMMLA */
11418         if (size != MO_16 || !is_q) {
11419             unallocated_encoding(s);
11420             return;
11421         }
11422         feature = dc_isar_feature(aa64_bf16, s);
11423         break;
11424     case 0x1f:
11425         switch (size) {
11426         case 1: /* BFDOT */
11427         case 3: /* BFMLAL{B,T} */
11428             feature = dc_isar_feature(aa64_bf16, s);
11429             break;
11430         default:
11431             unallocated_encoding(s);
11432             return;
11433         }
11434         break;
11435     default:
11436         unallocated_encoding(s);
11437         return;
11438     }
11439     if (!feature) {
11440         unallocated_encoding(s);
11441         return;
11442     }
11443     if (!fp_access_check(s)) {
11444         return;
11445     }
11446 
11447     switch (opcode) {
11448     case 0x0: /* SQRDMLAH (vector) */
11449         gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlah_qc, size);
11450         return;
11451 
11452     case 0x1: /* SQRDMLSH (vector) */
11453         gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqrdmlsh_qc, size);
11454         return;
11455 
11456     case 0x2: /* SDOT / UDOT */
11457         gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0,
11458                          u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b);
11459         return;
11460 
11461     case 0x3: /* USDOT */
11462         gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_usdot_b);
11463         return;
11464 
11465     case 0x04: /* SMMLA, UMMLA */
11466         gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0,
11467                          u ? gen_helper_gvec_ummla_b
11468                          : gen_helper_gvec_smmla_b);
11469         return;
11470     case 0x05: /* USMMLA */
11471         gen_gvec_op4_ool(s, 1, rd, rn, rm, rd, 0, gen_helper_gvec_usmmla_b);
11472         return;
11473 
11474     case 0x8: /* FCMLA, #0 */
11475     case 0x9: /* FCMLA, #90 */
11476     case 0xa: /* FCMLA, #180 */
11477     case 0xb: /* FCMLA, #270 */
11478         rot = extract32(opcode, 0, 2);
11479         switch (size) {
11480         case 1:
11481             gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, true, rot,
11482                               gen_helper_gvec_fcmlah);
11483             break;
11484         case 2:
11485             gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
11486                               gen_helper_gvec_fcmlas);
11487             break;
11488         case 3:
11489             gen_gvec_op4_fpst(s, is_q, rd, rn, rm, rd, false, rot,
11490                               gen_helper_gvec_fcmlad);
11491             break;
11492         default:
11493             g_assert_not_reached();
11494         }
11495         return;
11496 
11497     case 0xc: /* FCADD, #90 */
11498     case 0xe: /* FCADD, #270 */
11499         rot = extract32(opcode, 1, 1);
11500         switch (size) {
11501         case 1:
11502             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11503                               gen_helper_gvec_fcaddh);
11504             break;
11505         case 2:
11506             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11507                               gen_helper_gvec_fcadds);
11508             break;
11509         case 3:
11510             gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11511                               gen_helper_gvec_fcaddd);
11512             break;
11513         default:
11514             g_assert_not_reached();
11515         }
11516         return;
11517 
11518     case 0xd: /* BFMMLA */
11519         gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla);
11520         return;
11521     case 0xf:
11522         switch (size) {
11523         case 1: /* BFDOT */
11524             gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfdot);
11525             break;
11526         case 3: /* BFMLAL{B,T} */
11527             gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, false, is_q,
11528                               gen_helper_gvec_bfmlal);
11529             break;
11530         default:
11531             g_assert_not_reached();
11532         }
11533         return;
11534 
11535     default:
11536         g_assert_not_reached();
11537     }
11538 }
11539 
11540 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11541                                   int size, int rn, int rd)
11542 {
11543     /* Handle 2-reg-misc ops which are widening (so each size element
11544      * in the source becomes a 2*size element in the destination.
11545      * The only instruction like this is FCVTL.
11546      */
11547     int pass;
11548 
11549     if (size == 3) {
11550         /* 32 -> 64 bit fp conversion */
11551         TCGv_i64 tcg_res[2];
11552         int srcelt = is_q ? 2 : 0;
11553 
11554         for (pass = 0; pass < 2; pass++) {
11555             TCGv_i32 tcg_op = tcg_temp_new_i32();
11556             tcg_res[pass] = tcg_temp_new_i64();
11557 
11558             read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11559             gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11560         }
11561         for (pass = 0; pass < 2; pass++) {
11562             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11563         }
11564     } else {
11565         /* 16 -> 32 bit fp conversion */
11566         int srcelt = is_q ? 4 : 0;
11567         TCGv_i32 tcg_res[4];
11568         TCGv_ptr fpst = fpstatus_ptr(FPST_FPCR);
11569         TCGv_i32 ahp = get_ahp_flag();
11570 
11571         for (pass = 0; pass < 4; pass++) {
11572             tcg_res[pass] = tcg_temp_new_i32();
11573 
11574             read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11575             gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11576                                            fpst, ahp);
11577         }
11578         for (pass = 0; pass < 4; pass++) {
11579             write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11580         }
11581     }
11582 }
11583 
11584 static void handle_rev(DisasContext *s, int opcode, bool u,
11585                        bool is_q, int size, int rn, int rd)
11586 {
11587     int op = (opcode << 1) | u;
11588     int opsz = op + size;
11589     int grp_size = 3 - opsz;
11590     int dsize = is_q ? 128 : 64;
11591     int i;
11592 
11593     if (opsz >= 3) {
11594         unallocated_encoding(s);
11595         return;
11596     }
11597 
11598     if (!fp_access_check(s)) {
11599         return;
11600     }
11601 
11602     if (size == 0) {
11603         /* Special case bytes, use bswap op on each group of elements */
11604         int groups = dsize / (8 << grp_size);
11605 
11606         for (i = 0; i < groups; i++) {
11607             TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11608 
11609             read_vec_element(s, tcg_tmp, rn, i, grp_size);
11610             switch (grp_size) {
11611             case MO_16:
11612                 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
11613                 break;
11614             case MO_32:
11615                 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_IZ);
11616                 break;
11617             case MO_64:
11618                 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11619                 break;
11620             default:
11621                 g_assert_not_reached();
11622             }
11623             write_vec_element(s, tcg_tmp, rd, i, grp_size);
11624         }
11625         clear_vec_high(s, is_q, rd);
11626     } else {
11627         int revmask = (1 << grp_size) - 1;
11628         int esize = 8 << size;
11629         int elements = dsize / esize;
11630         TCGv_i64 tcg_rn = tcg_temp_new_i64();
11631         TCGv_i64 tcg_rd[2];
11632 
11633         for (i = 0; i < 2; i++) {
11634             tcg_rd[i] = tcg_temp_new_i64();
11635             tcg_gen_movi_i64(tcg_rd[i], 0);
11636         }
11637 
11638         for (i = 0; i < elements; i++) {
11639             int e_rev = (i & 0xf) ^ revmask;
11640             int w = (e_rev * esize) / 64;
11641             int o = (e_rev * esize) % 64;
11642 
11643             read_vec_element(s, tcg_rn, rn, i, size);
11644             tcg_gen_deposit_i64(tcg_rd[w], tcg_rd[w], tcg_rn, o, esize);
11645         }
11646 
11647         for (i = 0; i < 2; i++) {
11648             write_vec_element(s, tcg_rd[i], rd, i, MO_64);
11649         }
11650         clear_vec_high(s, true, rd);
11651     }
11652 }
11653 
11654 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11655                                   bool is_q, int size, int rn, int rd)
11656 {
11657     /* Implement the pairwise operations from 2-misc:
11658      * SADDLP, UADDLP, SADALP, UADALP.
11659      * These all add pairs of elements in the input to produce a
11660      * double-width result element in the output (possibly accumulating).
11661      */
11662     bool accum = (opcode == 0x6);
11663     int maxpass = is_q ? 2 : 1;
11664     int pass;
11665     TCGv_i64 tcg_res[2];
11666 
11667     if (size == 2) {
11668         /* 32 + 32 -> 64 op */
11669         MemOp memop = size + (u ? 0 : MO_SIGN);
11670 
11671         for (pass = 0; pass < maxpass; pass++) {
11672             TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11673             TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11674 
11675             tcg_res[pass] = tcg_temp_new_i64();
11676 
11677             read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11678             read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11679             tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11680             if (accum) {
11681                 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11682                 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11683             }
11684         }
11685     } else {
11686         for (pass = 0; pass < maxpass; pass++) {
11687             TCGv_i64 tcg_op = tcg_temp_new_i64();
11688             NeonGenOne64OpFn *genfn;
11689             static NeonGenOne64OpFn * const fns[2][2] = {
11690                 { gen_helper_neon_addlp_s8,  gen_helper_neon_addlp_u8 },
11691                 { gen_helper_neon_addlp_s16,  gen_helper_neon_addlp_u16 },
11692             };
11693 
11694             genfn = fns[size][u];
11695 
11696             tcg_res[pass] = tcg_temp_new_i64();
11697 
11698             read_vec_element(s, tcg_op, rn, pass, MO_64);
11699             genfn(tcg_res[pass], tcg_op);
11700 
11701             if (accum) {
11702                 read_vec_element(s, tcg_op, rd, pass, MO_64);
11703                 if (size == 0) {
11704                     gen_helper_neon_addl_u16(tcg_res[pass],
11705                                              tcg_res[pass], tcg_op);
11706                 } else {
11707                     gen_helper_neon_addl_u32(tcg_res[pass],
11708                                              tcg_res[pass], tcg_op);
11709                 }
11710             }
11711         }
11712     }
11713     if (!is_q) {
11714         tcg_res[1] = tcg_constant_i64(0);
11715     }
11716     for (pass = 0; pass < 2; pass++) {
11717         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11718     }
11719 }
11720 
11721 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11722 {
11723     /* Implement SHLL and SHLL2 */
11724     int pass;
11725     int part = is_q ? 2 : 0;
11726     TCGv_i64 tcg_res[2];
11727 
11728     for (pass = 0; pass < 2; pass++) {
11729         static NeonGenWidenFn * const widenfns[3] = {
11730             gen_helper_neon_widen_u8,
11731             gen_helper_neon_widen_u16,
11732             tcg_gen_extu_i32_i64,
11733         };
11734         NeonGenWidenFn *widenfn = widenfns[size];
11735         TCGv_i32 tcg_op = tcg_temp_new_i32();
11736 
11737         read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11738         tcg_res[pass] = tcg_temp_new_i64();
11739         widenfn(tcg_res[pass], tcg_op);
11740         tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11741     }
11742 
11743     for (pass = 0; pass < 2; pass++) {
11744         write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11745     }
11746 }
11747 
11748 /* AdvSIMD two reg misc
11749  *   31  30  29 28       24 23  22 21       17 16    12 11 10 9    5 4    0
11750  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11751  * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 |  Rn  |  Rd  |
11752  * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11753  */
11754 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
11755 {
11756     int size = extract32(insn, 22, 2);
11757     int opcode = extract32(insn, 12, 5);
11758     bool u = extract32(insn, 29, 1);
11759     bool is_q = extract32(insn, 30, 1);
11760     int rn = extract32(insn, 5, 5);
11761     int rd = extract32(insn, 0, 5);
11762     bool need_fpstatus = false;
11763     int rmode = -1;
11764     TCGv_i32 tcg_rmode;
11765     TCGv_ptr tcg_fpstatus;
11766 
11767     switch (opcode) {
11768     case 0x0: /* REV64, REV32 */
11769     case 0x1: /* REV16 */
11770         handle_rev(s, opcode, u, is_q, size, rn, rd);
11771         return;
11772     case 0x5: /* CNT, NOT, RBIT */
11773         if (u && size == 0) {
11774             /* NOT */
11775             break;
11776         } else if (u && size == 1) {
11777             /* RBIT */
11778             break;
11779         } else if (!u && size == 0) {
11780             /* CNT */
11781             break;
11782         }
11783         unallocated_encoding(s);
11784         return;
11785     case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11786     case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11787         if (size == 3) {
11788             unallocated_encoding(s);
11789             return;
11790         }
11791         if (!fp_access_check(s)) {
11792             return;
11793         }
11794 
11795         handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
11796         return;
11797     case 0x4: /* CLS, CLZ */
11798         if (size == 3) {
11799             unallocated_encoding(s);
11800             return;
11801         }
11802         break;
11803     case 0x2: /* SADDLP, UADDLP */
11804     case 0x6: /* SADALP, UADALP */
11805         if (size == 3) {
11806             unallocated_encoding(s);
11807             return;
11808         }
11809         if (!fp_access_check(s)) {
11810             return;
11811         }
11812         handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
11813         return;
11814     case 0x13: /* SHLL, SHLL2 */
11815         if (u == 0 || size == 3) {
11816             unallocated_encoding(s);
11817             return;
11818         }
11819         if (!fp_access_check(s)) {
11820             return;
11821         }
11822         handle_shll(s, is_q, size, rn, rd);
11823         return;
11824     case 0xa: /* CMLT */
11825         if (u == 1) {
11826             unallocated_encoding(s);
11827             return;
11828         }
11829         /* fall through */
11830     case 0x8: /* CMGT, CMGE */
11831     case 0x9: /* CMEQ, CMLE */
11832     case 0xb: /* ABS, NEG */
11833         if (size == 3 && !is_q) {
11834             unallocated_encoding(s);
11835             return;
11836         }
11837         break;
11838     case 0x3: /* SUQADD, USQADD */
11839         if (size == 3 && !is_q) {
11840             unallocated_encoding(s);
11841             return;
11842         }
11843         if (!fp_access_check(s)) {
11844             return;
11845         }
11846         handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
11847         return;
11848     case 0x7: /* SQABS, SQNEG */
11849         if (size == 3 && !is_q) {
11850             unallocated_encoding(s);
11851             return;
11852         }
11853         break;
11854     case 0xc ... 0xf:
11855     case 0x16 ... 0x1f:
11856     {
11857         /* Floating point: U, size[1] and opcode indicate operation;
11858          * size[0] indicates single or double precision.
11859          */
11860         int is_double = extract32(size, 0, 1);
11861         opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
11862         size = is_double ? 3 : 2;
11863         switch (opcode) {
11864         case 0x2f: /* FABS */
11865         case 0x6f: /* FNEG */
11866             if (size == 3 && !is_q) {
11867                 unallocated_encoding(s);
11868                 return;
11869             }
11870             break;
11871         case 0x1d: /* SCVTF */
11872         case 0x5d: /* UCVTF */
11873         {
11874             bool is_signed = (opcode == 0x1d) ? true : false;
11875             int elements = is_double ? 2 : is_q ? 4 : 2;
11876             if (is_double && !is_q) {
11877                 unallocated_encoding(s);
11878                 return;
11879             }
11880             if (!fp_access_check(s)) {
11881                 return;
11882             }
11883             handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
11884             return;
11885         }
11886         case 0x2c: /* FCMGT (zero) */
11887         case 0x2d: /* FCMEQ (zero) */
11888         case 0x2e: /* FCMLT (zero) */
11889         case 0x6c: /* FCMGE (zero) */
11890         case 0x6d: /* FCMLE (zero) */
11891             if (size == 3 && !is_q) {
11892                 unallocated_encoding(s);
11893                 return;
11894             }
11895             handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
11896             return;
11897         case 0x7f: /* FSQRT */
11898             if (size == 3 && !is_q) {
11899                 unallocated_encoding(s);
11900                 return;
11901             }
11902             break;
11903         case 0x1a: /* FCVTNS */
11904         case 0x1b: /* FCVTMS */
11905         case 0x3a: /* FCVTPS */
11906         case 0x3b: /* FCVTZS */
11907         case 0x5a: /* FCVTNU */
11908         case 0x5b: /* FCVTMU */
11909         case 0x7a: /* FCVTPU */
11910         case 0x7b: /* FCVTZU */
11911             need_fpstatus = true;
11912             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11913             if (size == 3 && !is_q) {
11914                 unallocated_encoding(s);
11915                 return;
11916             }
11917             break;
11918         case 0x5c: /* FCVTAU */
11919         case 0x1c: /* FCVTAS */
11920             need_fpstatus = true;
11921             rmode = FPROUNDING_TIEAWAY;
11922             if (size == 3 && !is_q) {
11923                 unallocated_encoding(s);
11924                 return;
11925             }
11926             break;
11927         case 0x3c: /* URECPE */
11928             if (size == 3) {
11929                 unallocated_encoding(s);
11930                 return;
11931             }
11932             /* fall through */
11933         case 0x3d: /* FRECPE */
11934         case 0x7d: /* FRSQRTE */
11935             if (size == 3 && !is_q) {
11936                 unallocated_encoding(s);
11937                 return;
11938             }
11939             if (!fp_access_check(s)) {
11940                 return;
11941             }
11942             handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
11943             return;
11944         case 0x56: /* FCVTXN, FCVTXN2 */
11945             if (size == 2) {
11946                 unallocated_encoding(s);
11947                 return;
11948             }
11949             /* fall through */
11950         case 0x16: /* FCVTN, FCVTN2 */
11951             /* handle_2misc_narrow does a 2*size -> size operation, but these
11952              * instructions encode the source size rather than dest size.
11953              */
11954             if (!fp_access_check(s)) {
11955                 return;
11956             }
11957             handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11958             return;
11959         case 0x36: /* BFCVTN, BFCVTN2 */
11960             if (!dc_isar_feature(aa64_bf16, s) || size != 2) {
11961                 unallocated_encoding(s);
11962                 return;
11963             }
11964             if (!fp_access_check(s)) {
11965                 return;
11966             }
11967             handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11968             return;
11969         case 0x17: /* FCVTL, FCVTL2 */
11970             if (!fp_access_check(s)) {
11971                 return;
11972             }
11973             handle_2misc_widening(s, opcode, is_q, size, rn, rd);
11974             return;
11975         case 0x18: /* FRINTN */
11976         case 0x19: /* FRINTM */
11977         case 0x38: /* FRINTP */
11978         case 0x39: /* FRINTZ */
11979             rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11980             /* fall through */
11981         case 0x59: /* FRINTX */
11982         case 0x79: /* FRINTI */
11983             need_fpstatus = true;
11984             if (size == 3 && !is_q) {
11985                 unallocated_encoding(s);
11986                 return;
11987             }
11988             break;
11989         case 0x58: /* FRINTA */
11990             rmode = FPROUNDING_TIEAWAY;
11991             need_fpstatus = true;
11992             if (size == 3 && !is_q) {
11993                 unallocated_encoding(s);
11994                 return;
11995             }
11996             break;
11997         case 0x7c: /* URSQRTE */
11998             if (size == 3) {
11999                 unallocated_encoding(s);
12000                 return;
12001             }
12002             break;
12003         case 0x1e: /* FRINT32Z */
12004         case 0x1f: /* FRINT64Z */
12005             rmode = FPROUNDING_ZERO;
12006             /* fall through */
12007         case 0x5e: /* FRINT32X */
12008         case 0x5f: /* FRINT64X */
12009             need_fpstatus = true;
12010             if ((size == 3 && !is_q) || !dc_isar_feature(aa64_frint, s)) {
12011                 unallocated_encoding(s);
12012                 return;
12013             }
12014             break;
12015         default:
12016             unallocated_encoding(s);
12017             return;
12018         }
12019         break;
12020     }
12021     default:
12022         unallocated_encoding(s);
12023         return;
12024     }
12025 
12026     if (!fp_access_check(s)) {
12027         return;
12028     }
12029 
12030     if (need_fpstatus || rmode >= 0) {
12031         tcg_fpstatus = fpstatus_ptr(FPST_FPCR);
12032     } else {
12033         tcg_fpstatus = NULL;
12034     }
12035     if (rmode >= 0) {
12036         tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
12037     } else {
12038         tcg_rmode = NULL;
12039     }
12040 
12041     switch (opcode) {
12042     case 0x5:
12043         if (u && size == 0) { /* NOT */
12044             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
12045             return;
12046         }
12047         break;
12048     case 0x8: /* CMGT, CMGE */
12049         if (u) {
12050             gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cge0, size);
12051         } else {
12052             gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cgt0, size);
12053         }
12054         return;
12055     case 0x9: /* CMEQ, CMLE */
12056         if (u) {
12057             gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_cle0, size);
12058         } else {
12059             gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_ceq0, size);
12060         }
12061         return;
12062     case 0xa: /* CMLT */
12063         gen_gvec_fn2(s, is_q, rd, rn, gen_gvec_clt0, size);
12064         return;
12065     case 0xb:
12066         if (u) { /* ABS, NEG */
12067             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
12068         } else {
12069             gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_abs, size);
12070         }
12071         return;
12072     }
12073 
12074     if (size == 3) {
12075         /* All 64-bit element operations can be shared with scalar 2misc */
12076         int pass;
12077 
12078         /* Coverity claims (size == 3 && !is_q) has been eliminated
12079          * from all paths leading to here.
12080          */
12081         tcg_debug_assert(is_q);
12082         for (pass = 0; pass < 2; pass++) {
12083             TCGv_i64 tcg_op = tcg_temp_new_i64();
12084             TCGv_i64 tcg_res = tcg_temp_new_i64();
12085 
12086             read_vec_element(s, tcg_op, rn, pass, MO_64);
12087 
12088             handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
12089                             tcg_rmode, tcg_fpstatus);
12090 
12091             write_vec_element(s, tcg_res, rd, pass, MO_64);
12092         }
12093     } else {
12094         int pass;
12095 
12096         for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
12097             TCGv_i32 tcg_op = tcg_temp_new_i32();
12098             TCGv_i32 tcg_res = tcg_temp_new_i32();
12099 
12100             read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
12101 
12102             if (size == 2) {
12103                 /* Special cases for 32 bit elements */
12104                 switch (opcode) {
12105                 case 0x4: /* CLS */
12106                     if (u) {
12107                         tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
12108                     } else {
12109                         tcg_gen_clrsb_i32(tcg_res, tcg_op);
12110                     }
12111                     break;
12112                 case 0x7: /* SQABS, SQNEG */
12113                     if (u) {
12114                         gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
12115                     } else {
12116                         gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
12117                     }
12118                     break;
12119                 case 0x2f: /* FABS */
12120                     gen_helper_vfp_abss(tcg_res, tcg_op);
12121                     break;
12122                 case 0x6f: /* FNEG */
12123                     gen_helper_vfp_negs(tcg_res, tcg_op);
12124                     break;
12125                 case 0x7f: /* FSQRT */
12126                     gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
12127                     break;
12128                 case 0x1a: /* FCVTNS */
12129                 case 0x1b: /* FCVTMS */
12130                 case 0x1c: /* FCVTAS */
12131                 case 0x3a: /* FCVTPS */
12132                 case 0x3b: /* FCVTZS */
12133                     gen_helper_vfp_tosls(tcg_res, tcg_op,
12134                                          tcg_constant_i32(0), tcg_fpstatus);
12135                     break;
12136                 case 0x5a: /* FCVTNU */
12137                 case 0x5b: /* FCVTMU */
12138                 case 0x5c: /* FCVTAU */
12139                 case 0x7a: /* FCVTPU */
12140                 case 0x7b: /* FCVTZU */
12141                     gen_helper_vfp_touls(tcg_res, tcg_op,
12142                                          tcg_constant_i32(0), tcg_fpstatus);
12143                     break;
12144                 case 0x18: /* FRINTN */
12145                 case 0x19: /* FRINTM */
12146                 case 0x38: /* FRINTP */
12147                 case 0x39: /* FRINTZ */
12148                 case 0x58: /* FRINTA */
12149                 case 0x79: /* FRINTI */
12150                     gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
12151                     break;
12152                 case 0x59: /* FRINTX */
12153                     gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
12154                     break;
12155                 case 0x7c: /* URSQRTE */
12156                     gen_helper_rsqrte_u32(tcg_res, tcg_op);
12157                     break;
12158                 case 0x1e: /* FRINT32Z */
12159                 case 0x5e: /* FRINT32X */
12160                     gen_helper_frint32_s(tcg_res, tcg_op, tcg_fpstatus);
12161                     break;
12162                 case 0x1f: /* FRINT64Z */
12163                 case 0x5f: /* FRINT64X */
12164                     gen_helper_frint64_s(tcg_res, tcg_op, tcg_fpstatus);
12165                     break;
12166                 default:
12167                     g_assert_not_reached();
12168                 }
12169             } else {
12170                 /* Use helpers for 8 and 16 bit elements */
12171                 switch (opcode) {
12172                 case 0x5: /* CNT, RBIT */
12173                     /* For these two insns size is part of the opcode specifier
12174                      * (handled earlier); they always operate on byte elements.
12175                      */
12176                     if (u) {
12177                         gen_helper_neon_rbit_u8(tcg_res, tcg_op);
12178                     } else {
12179                         gen_helper_neon_cnt_u8(tcg_res, tcg_op);
12180                     }
12181                     break;
12182                 case 0x7: /* SQABS, SQNEG */
12183                 {
12184                     NeonGenOneOpEnvFn *genfn;
12185                     static NeonGenOneOpEnvFn * const fns[2][2] = {
12186                         { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
12187                         { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
12188                     };
12189                     genfn = fns[size][u];
12190                     genfn(tcg_res, cpu_env, tcg_op);
12191                     break;
12192                 }
12193                 case 0x4: /* CLS, CLZ */
12194                     if (u) {
12195                         if (size == 0) {
12196                             gen_helper_neon_clz_u8(tcg_res, tcg_op);
12197                         } else {
12198                             gen_helper_neon_clz_u16(tcg_res, tcg_op);
12199                         }
12200                     } else {
12201                         if (size == 0) {
12202                             gen_helper_neon_cls_s8(tcg_res, tcg_op);
12203                         } else {
12204                             gen_helper_neon_cls_s16(tcg_res, tcg_op);
12205                         }
12206                     }
12207                     break;
12208                 default:
12209                     g_assert_not_reached();
12210                 }
12211             }
12212 
12213             write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12214         }
12215     }
12216     clear_vec_high(s, is_q, rd);
12217 
12218     if (tcg_rmode) {
12219         gen_restore_rmode(tcg_rmode, tcg_fpstatus);
12220     }
12221 }
12222 
12223 /* AdvSIMD [scalar] two register miscellaneous (FP16)
12224  *
12225  *   31  30  29 28  27     24  23 22 21       17 16    12 11 10 9    5 4    0
12226  * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12227  * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
12228  * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
12229  *   mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
12230  *   val:  0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
12231  *
12232  * This actually covers two groups where scalar access is governed by
12233  * bit 28. A bunch of the instructions (float to integral) only exist
12234  * in the vector form and are un-allocated for the scalar decode. Also
12235  * in the scalar decode Q is always 1.
12236  */
12237 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
12238 {
12239     int fpop, opcode, a, u;
12240     int rn, rd;
12241     bool is_q;
12242     bool is_scalar;
12243     bool only_in_vector = false;
12244 
12245     int pass;
12246     TCGv_i32 tcg_rmode = NULL;
12247     TCGv_ptr tcg_fpstatus = NULL;
12248     bool need_fpst = true;
12249     int rmode = -1;
12250 
12251     if (!dc_isar_feature(aa64_fp16, s)) {
12252         unallocated_encoding(s);
12253         return;
12254     }
12255 
12256     rd = extract32(insn, 0, 5);
12257     rn = extract32(insn, 5, 5);
12258 
12259     a = extract32(insn, 23, 1);
12260     u = extract32(insn, 29, 1);
12261     is_scalar = extract32(insn, 28, 1);
12262     is_q = extract32(insn, 30, 1);
12263 
12264     opcode = extract32(insn, 12, 5);
12265     fpop = deposit32(opcode, 5, 1, a);
12266     fpop = deposit32(fpop, 6, 1, u);
12267 
12268     switch (fpop) {
12269     case 0x1d: /* SCVTF */
12270     case 0x5d: /* UCVTF */
12271     {
12272         int elements;
12273 
12274         if (is_scalar) {
12275             elements = 1;
12276         } else {
12277             elements = (is_q ? 8 : 4);
12278         }
12279 
12280         if (!fp_access_check(s)) {
12281             return;
12282         }
12283         handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
12284         return;
12285     }
12286     break;
12287     case 0x2c: /* FCMGT (zero) */
12288     case 0x2d: /* FCMEQ (zero) */
12289     case 0x2e: /* FCMLT (zero) */
12290     case 0x6c: /* FCMGE (zero) */
12291     case 0x6d: /* FCMLE (zero) */
12292         handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
12293         return;
12294     case 0x3d: /* FRECPE */
12295     case 0x3f: /* FRECPX */
12296         break;
12297     case 0x18: /* FRINTN */
12298         only_in_vector = true;
12299         rmode = FPROUNDING_TIEEVEN;
12300         break;
12301     case 0x19: /* FRINTM */
12302         only_in_vector = true;
12303         rmode = FPROUNDING_NEGINF;
12304         break;
12305     case 0x38: /* FRINTP */
12306         only_in_vector = true;
12307         rmode = FPROUNDING_POSINF;
12308         break;
12309     case 0x39: /* FRINTZ */
12310         only_in_vector = true;
12311         rmode = FPROUNDING_ZERO;
12312         break;
12313     case 0x58: /* FRINTA */
12314         only_in_vector = true;
12315         rmode = FPROUNDING_TIEAWAY;
12316         break;
12317     case 0x59: /* FRINTX */
12318     case 0x79: /* FRINTI */
12319         only_in_vector = true;
12320         /* current rounding mode */
12321         break;
12322     case 0x1a: /* FCVTNS */
12323         rmode = FPROUNDING_TIEEVEN;
12324         break;
12325     case 0x1b: /* FCVTMS */
12326         rmode = FPROUNDING_NEGINF;
12327         break;
12328     case 0x1c: /* FCVTAS */
12329         rmode = FPROUNDING_TIEAWAY;
12330         break;
12331     case 0x3a: /* FCVTPS */
12332         rmode = FPROUNDING_POSINF;
12333         break;
12334     case 0x3b: /* FCVTZS */
12335         rmode = FPROUNDING_ZERO;
12336         break;
12337     case 0x5a: /* FCVTNU */
12338         rmode = FPROUNDING_TIEEVEN;
12339         break;
12340     case 0x5b: /* FCVTMU */
12341         rmode = FPROUNDING_NEGINF;
12342         break;
12343     case 0x5c: /* FCVTAU */
12344         rmode = FPROUNDING_TIEAWAY;
12345         break;
12346     case 0x7a: /* FCVTPU */
12347         rmode = FPROUNDING_POSINF;
12348         break;
12349     case 0x7b: /* FCVTZU */
12350         rmode = FPROUNDING_ZERO;
12351         break;
12352     case 0x2f: /* FABS */
12353     case 0x6f: /* FNEG */
12354         need_fpst = false;
12355         break;
12356     case 0x7d: /* FRSQRTE */
12357     case 0x7f: /* FSQRT (vector) */
12358         break;
12359     default:
12360         unallocated_encoding(s);
12361         return;
12362     }
12363 
12364 
12365     /* Check additional constraints for the scalar encoding */
12366     if (is_scalar) {
12367         if (!is_q) {
12368             unallocated_encoding(s);
12369             return;
12370         }
12371         /* FRINTxx is only in the vector form */
12372         if (only_in_vector) {
12373             unallocated_encoding(s);
12374             return;
12375         }
12376     }
12377 
12378     if (!fp_access_check(s)) {
12379         return;
12380     }
12381 
12382     if (rmode >= 0 || need_fpst) {
12383         tcg_fpstatus = fpstatus_ptr(FPST_FPCR_F16);
12384     }
12385 
12386     if (rmode >= 0) {
12387         tcg_rmode = gen_set_rmode(rmode, tcg_fpstatus);
12388     }
12389 
12390     if (is_scalar) {
12391         TCGv_i32 tcg_op = read_fp_hreg(s, rn);
12392         TCGv_i32 tcg_res = tcg_temp_new_i32();
12393 
12394         switch (fpop) {
12395         case 0x1a: /* FCVTNS */
12396         case 0x1b: /* FCVTMS */
12397         case 0x1c: /* FCVTAS */
12398         case 0x3a: /* FCVTPS */
12399         case 0x3b: /* FCVTZS */
12400             gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12401             break;
12402         case 0x3d: /* FRECPE */
12403             gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12404             break;
12405         case 0x3f: /* FRECPX */
12406             gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12407             break;
12408         case 0x5a: /* FCVTNU */
12409         case 0x5b: /* FCVTMU */
12410         case 0x5c: /* FCVTAU */
12411         case 0x7a: /* FCVTPU */
12412         case 0x7b: /* FCVTZU */
12413             gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12414             break;
12415         case 0x6f: /* FNEG */
12416             tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12417             break;
12418         case 0x7d: /* FRSQRTE */
12419             gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12420             break;
12421         default:
12422             g_assert_not_reached();
12423         }
12424 
12425         /* limit any sign extension going on */
12426         tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12427         write_fp_sreg(s, rd, tcg_res);
12428     } else {
12429         for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12430             TCGv_i32 tcg_op = tcg_temp_new_i32();
12431             TCGv_i32 tcg_res = tcg_temp_new_i32();
12432 
12433             read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12434 
12435             switch (fpop) {
12436             case 0x1a: /* FCVTNS */
12437             case 0x1b: /* FCVTMS */
12438             case 0x1c: /* FCVTAS */
12439             case 0x3a: /* FCVTPS */
12440             case 0x3b: /* FCVTZS */
12441                 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12442                 break;
12443             case 0x3d: /* FRECPE */
12444                 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12445                 break;
12446             case 0x5a: /* FCVTNU */
12447             case 0x5b: /* FCVTMU */
12448             case 0x5c: /* FCVTAU */
12449             case 0x7a: /* FCVTPU */
12450             case 0x7b: /* FCVTZU */
12451                 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12452                 break;
12453             case 0x18: /* FRINTN */
12454             case 0x19: /* FRINTM */
12455             case 0x38: /* FRINTP */
12456             case 0x39: /* FRINTZ */
12457             case 0x58: /* FRINTA */
12458             case 0x79: /* FRINTI */
12459                 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12460                 break;
12461             case 0x59: /* FRINTX */
12462                 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12463                 break;
12464             case 0x2f: /* FABS */
12465                 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12466                 break;
12467             case 0x6f: /* FNEG */
12468                 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12469                 break;
12470             case 0x7d: /* FRSQRTE */
12471                 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12472                 break;
12473             case 0x7f: /* FSQRT */
12474                 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12475                 break;
12476             default:
12477                 g_assert_not_reached();
12478             }
12479 
12480             write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12481         }
12482 
12483         clear_vec_high(s, is_q, rd);
12484     }
12485 
12486     if (tcg_rmode) {
12487         gen_restore_rmode(tcg_rmode, tcg_fpstatus);
12488     }
12489 }
12490 
12491 /* AdvSIMD scalar x indexed element
12492  *  31 30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
12493  * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12494  * | 0 1 | U | 1 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
12495  * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12496  * AdvSIMD vector x indexed element
12497  *   31  30  29 28       24 23  22 21  20  19  16 15 12  11  10 9    5 4    0
12498  * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12499  * | 0 | Q | U | 0 1 1 1 1 | size | L | M |  Rm  | opc | H | 0 |  Rn  |  Rd  |
12500  * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12501  */
12502 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12503 {
12504     /* This encoding has two kinds of instruction:
12505      *  normal, where we perform elt x idxelt => elt for each
12506      *     element in the vector
12507      *  long, where we perform elt x idxelt and generate a result of
12508      *     double the width of the input element
12509      * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12510      */
12511     bool is_scalar = extract32(insn, 28, 1);
12512     bool is_q = extract32(insn, 30, 1);
12513     bool u = extract32(insn, 29, 1);
12514     int size = extract32(insn, 22, 2);
12515     int l = extract32(insn, 21, 1);
12516     int m = extract32(insn, 20, 1);
12517     /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12518     int rm = extract32(insn, 16, 4);
12519     int opcode = extract32(insn, 12, 4);
12520     int h = extract32(insn, 11, 1);
12521     int rn = extract32(insn, 5, 5);
12522     int rd = extract32(insn, 0, 5);
12523     bool is_long = false;
12524     int is_fp = 0;
12525     bool is_fp16 = false;
12526     int index;
12527     TCGv_ptr fpst;
12528 
12529     switch (16 * u + opcode) {
12530     case 0x08: /* MUL */
12531     case 0x10: /* MLA */
12532     case 0x14: /* MLS */
12533         if (is_scalar) {
12534             unallocated_encoding(s);
12535             return;
12536         }
12537         break;
12538     case 0x02: /* SMLAL, SMLAL2 */
12539     case 0x12: /* UMLAL, UMLAL2 */
12540     case 0x06: /* SMLSL, SMLSL2 */
12541     case 0x16: /* UMLSL, UMLSL2 */
12542     case 0x0a: /* SMULL, SMULL2 */
12543     case 0x1a: /* UMULL, UMULL2 */
12544         if (is_scalar) {
12545             unallocated_encoding(s);
12546             return;
12547         }
12548         is_long = true;
12549         break;
12550     case 0x03: /* SQDMLAL, SQDMLAL2 */
12551     case 0x07: /* SQDMLSL, SQDMLSL2 */
12552     case 0x0b: /* SQDMULL, SQDMULL2 */
12553         is_long = true;
12554         break;
12555     case 0x0c: /* SQDMULH */
12556     case 0x0d: /* SQRDMULH */
12557         break;
12558     case 0x01: /* FMLA */
12559     case 0x05: /* FMLS */
12560     case 0x09: /* FMUL */
12561     case 0x19: /* FMULX */
12562         is_fp = 1;
12563         break;
12564     case 0x1d: /* SQRDMLAH */
12565     case 0x1f: /* SQRDMLSH */
12566         if (!dc_isar_feature(aa64_rdm, s)) {
12567             unallocated_encoding(s);
12568             return;
12569         }
12570         break;
12571     case 0x0e: /* SDOT */
12572     case 0x1e: /* UDOT */
12573         if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
12574             unallocated_encoding(s);
12575             return;
12576         }
12577         break;
12578     case 0x0f:
12579         switch (size) {
12580         case 0: /* SUDOT */
12581         case 2: /* USDOT */
12582             if (is_scalar || !dc_isar_feature(aa64_i8mm, s)) {
12583                 unallocated_encoding(s);
12584                 return;
12585             }
12586             size = MO_32;
12587             break;
12588         case 1: /* BFDOT */
12589             if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
12590                 unallocated_encoding(s);
12591                 return;
12592             }
12593             size = MO_32;
12594             break;
12595         case 3: /* BFMLAL{B,T} */
12596             if (is_scalar || !dc_isar_feature(aa64_bf16, s)) {
12597                 unallocated_encoding(s);
12598                 return;
12599             }
12600             /* can't set is_fp without other incorrect size checks */
12601             size = MO_16;
12602             break;
12603         default:
12604             unallocated_encoding(s);
12605             return;
12606         }
12607         break;
12608     case 0x11: /* FCMLA #0 */
12609     case 0x13: /* FCMLA #90 */
12610     case 0x15: /* FCMLA #180 */
12611     case 0x17: /* FCMLA #270 */
12612         if (is_scalar || !dc_isar_feature(aa64_fcma, s)) {
12613             unallocated_encoding(s);
12614             return;
12615         }
12616         is_fp = 2;
12617         break;
12618     case 0x00: /* FMLAL */
12619     case 0x04: /* FMLSL */
12620     case 0x18: /* FMLAL2 */
12621     case 0x1c: /* FMLSL2 */
12622         if (is_scalar || size != MO_32 || !dc_isar_feature(aa64_fhm, s)) {
12623             unallocated_encoding(s);
12624             return;
12625         }
12626         size = MO_16;
12627         /* is_fp, but we pass cpu_env not fp_status.  */
12628         break;
12629     default:
12630         unallocated_encoding(s);
12631         return;
12632     }
12633 
12634     switch (is_fp) {
12635     case 1: /* normal fp */
12636         /* convert insn encoded size to MemOp size */
12637         switch (size) {
12638         case 0: /* half-precision */
12639             size = MO_16;
12640             is_fp16 = true;
12641             break;
12642         case MO_32: /* single precision */
12643         case MO_64: /* double precision */
12644             break;
12645         default:
12646             unallocated_encoding(s);
12647             return;
12648         }
12649         break;
12650 
12651     case 2: /* complex fp */
12652         /* Each indexable element is a complex pair.  */
12653         size += 1;
12654         switch (size) {
12655         case MO_32:
12656             if (h && !is_q) {
12657                 unallocated_encoding(s);
12658                 return;
12659             }
12660             is_fp16 = true;
12661             break;
12662         case MO_64:
12663             break;
12664         default:
12665             unallocated_encoding(s);
12666             return;
12667         }
12668         break;
12669 
12670     default: /* integer */
12671         switch (size) {
12672         case MO_8:
12673         case MO_64:
12674             unallocated_encoding(s);
12675             return;
12676         }
12677         break;
12678     }
12679     if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
12680         unallocated_encoding(s);
12681         return;
12682     }
12683 
12684     /* Given MemOp size, adjust register and indexing.  */
12685     switch (size) {
12686     case MO_16:
12687         index = h << 2 | l << 1 | m;
12688         break;
12689     case MO_32:
12690         index = h << 1 | l;
12691         rm |= m << 4;
12692         break;
12693     case MO_64:
12694         if (l || !is_q) {
12695             unallocated_encoding(s);
12696             return;
12697         }
12698         index = h;
12699         rm |= m << 4;
12700         break;
12701     default:
12702         g_assert_not_reached();
12703     }
12704 
12705     if (!fp_access_check(s)) {
12706         return;
12707     }
12708 
12709     if (is_fp) {
12710         fpst = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
12711     } else {
12712         fpst = NULL;
12713     }
12714 
12715     switch (16 * u + opcode) {
12716     case 0x0e: /* SDOT */
12717     case 0x1e: /* UDOT */
12718         gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12719                          u ? gen_helper_gvec_udot_idx_b
12720                          : gen_helper_gvec_sdot_idx_b);
12721         return;
12722     case 0x0f:
12723         switch (extract32(insn, 22, 2)) {
12724         case 0: /* SUDOT */
12725             gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12726                              gen_helper_gvec_sudot_idx_b);
12727             return;
12728         case 1: /* BFDOT */
12729             gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12730                              gen_helper_gvec_bfdot_idx);
12731             return;
12732         case 2: /* USDOT */
12733             gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, index,
12734                              gen_helper_gvec_usdot_idx_b);
12735             return;
12736         case 3: /* BFMLAL{B,T} */
12737             gen_gvec_op4_fpst(s, 1, rd, rn, rm, rd, 0, (index << 1) | is_q,
12738                               gen_helper_gvec_bfmlal_idx);
12739             return;
12740         }
12741         g_assert_not_reached();
12742     case 0x11: /* FCMLA #0 */
12743     case 0x13: /* FCMLA #90 */
12744     case 0x15: /* FCMLA #180 */
12745     case 0x17: /* FCMLA #270 */
12746         {
12747             int rot = extract32(insn, 13, 2);
12748             int data = (index << 2) | rot;
12749             tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
12750                                vec_full_reg_offset(s, rn),
12751                                vec_full_reg_offset(s, rm),
12752                                vec_full_reg_offset(s, rd), fpst,
12753                                is_q ? 16 : 8, vec_full_reg_size(s), data,
12754                                size == MO_64
12755                                ? gen_helper_gvec_fcmlas_idx
12756                                : gen_helper_gvec_fcmlah_idx);
12757         }
12758         return;
12759 
12760     case 0x00: /* FMLAL */
12761     case 0x04: /* FMLSL */
12762     case 0x18: /* FMLAL2 */
12763     case 0x1c: /* FMLSL2 */
12764         {
12765             int is_s = extract32(opcode, 2, 1);
12766             int is_2 = u;
12767             int data = (index << 2) | (is_2 << 1) | is_s;
12768             tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
12769                                vec_full_reg_offset(s, rn),
12770                                vec_full_reg_offset(s, rm), cpu_env,
12771                                is_q ? 16 : 8, vec_full_reg_size(s),
12772                                data, gen_helper_gvec_fmlal_idx_a64);
12773         }
12774         return;
12775 
12776     case 0x08: /* MUL */
12777         if (!is_long && !is_scalar) {
12778             static gen_helper_gvec_3 * const fns[3] = {
12779                 gen_helper_gvec_mul_idx_h,
12780                 gen_helper_gvec_mul_idx_s,
12781                 gen_helper_gvec_mul_idx_d,
12782             };
12783             tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
12784                                vec_full_reg_offset(s, rn),
12785                                vec_full_reg_offset(s, rm),
12786                                is_q ? 16 : 8, vec_full_reg_size(s),
12787                                index, fns[size - 1]);
12788             return;
12789         }
12790         break;
12791 
12792     case 0x10: /* MLA */
12793         if (!is_long && !is_scalar) {
12794             static gen_helper_gvec_4 * const fns[3] = {
12795                 gen_helper_gvec_mla_idx_h,
12796                 gen_helper_gvec_mla_idx_s,
12797                 gen_helper_gvec_mla_idx_d,
12798             };
12799             tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
12800                                vec_full_reg_offset(s, rn),
12801                                vec_full_reg_offset(s, rm),
12802                                vec_full_reg_offset(s, rd),
12803                                is_q ? 16 : 8, vec_full_reg_size(s),
12804                                index, fns[size - 1]);
12805             return;
12806         }
12807         break;
12808 
12809     case 0x14: /* MLS */
12810         if (!is_long && !is_scalar) {
12811             static gen_helper_gvec_4 * const fns[3] = {
12812                 gen_helper_gvec_mls_idx_h,
12813                 gen_helper_gvec_mls_idx_s,
12814                 gen_helper_gvec_mls_idx_d,
12815             };
12816             tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
12817                                vec_full_reg_offset(s, rn),
12818                                vec_full_reg_offset(s, rm),
12819                                vec_full_reg_offset(s, rd),
12820                                is_q ? 16 : 8, vec_full_reg_size(s),
12821                                index, fns[size - 1]);
12822             return;
12823         }
12824         break;
12825     }
12826 
12827     if (size == 3) {
12828         TCGv_i64 tcg_idx = tcg_temp_new_i64();
12829         int pass;
12830 
12831         assert(is_fp && is_q && !is_long);
12832 
12833         read_vec_element(s, tcg_idx, rm, index, MO_64);
12834 
12835         for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12836             TCGv_i64 tcg_op = tcg_temp_new_i64();
12837             TCGv_i64 tcg_res = tcg_temp_new_i64();
12838 
12839             read_vec_element(s, tcg_op, rn, pass, MO_64);
12840 
12841             switch (16 * u + opcode) {
12842             case 0x05: /* FMLS */
12843                 /* As usual for ARM, separate negation for fused multiply-add */
12844                 gen_helper_vfp_negd(tcg_op, tcg_op);
12845                 /* fall through */
12846             case 0x01: /* FMLA */
12847                 read_vec_element(s, tcg_res, rd, pass, MO_64);
12848                 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
12849                 break;
12850             case 0x09: /* FMUL */
12851                 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
12852                 break;
12853             case 0x19: /* FMULX */
12854                 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
12855                 break;
12856             default:
12857                 g_assert_not_reached();
12858             }
12859 
12860             write_vec_element(s, tcg_res, rd, pass, MO_64);
12861         }
12862 
12863         clear_vec_high(s, !is_scalar, rd);
12864     } else if (!is_long) {
12865         /* 32 bit floating point, or 16 or 32 bit integer.
12866          * For the 16 bit scalar case we use the usual Neon helpers and
12867          * rely on the fact that 0 op 0 == 0 with no side effects.
12868          */
12869         TCGv_i32 tcg_idx = tcg_temp_new_i32();
12870         int pass, maxpasses;
12871 
12872         if (is_scalar) {
12873             maxpasses = 1;
12874         } else {
12875             maxpasses = is_q ? 4 : 2;
12876         }
12877 
12878         read_vec_element_i32(s, tcg_idx, rm, index, size);
12879 
12880         if (size == 1 && !is_scalar) {
12881             /* The simplest way to handle the 16x16 indexed ops is to duplicate
12882              * the index into both halves of the 32 bit tcg_idx and then use
12883              * the usual Neon helpers.
12884              */
12885             tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12886         }
12887 
12888         for (pass = 0; pass < maxpasses; pass++) {
12889             TCGv_i32 tcg_op = tcg_temp_new_i32();
12890             TCGv_i32 tcg_res = tcg_temp_new_i32();
12891 
12892             read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
12893 
12894             switch (16 * u + opcode) {
12895             case 0x08: /* MUL */
12896             case 0x10: /* MLA */
12897             case 0x14: /* MLS */
12898             {
12899                 static NeonGenTwoOpFn * const fns[2][2] = {
12900                     { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
12901                     { tcg_gen_add_i32, tcg_gen_sub_i32 },
12902                 };
12903                 NeonGenTwoOpFn *genfn;
12904                 bool is_sub = opcode == 0x4;
12905 
12906                 if (size == 1) {
12907                     gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
12908                 } else {
12909                     tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
12910                 }
12911                 if (opcode == 0x8) {
12912                     break;
12913                 }
12914                 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
12915                 genfn = fns[size - 1][is_sub];
12916                 genfn(tcg_res, tcg_op, tcg_res);
12917                 break;
12918             }
12919             case 0x05: /* FMLS */
12920             case 0x01: /* FMLA */
12921                 read_vec_element_i32(s, tcg_res, rd, pass,
12922                                      is_scalar ? size : MO_32);
12923                 switch (size) {
12924                 case 1:
12925                     if (opcode == 0x5) {
12926                         /* As usual for ARM, separate negation for fused
12927                          * multiply-add */
12928                         tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
12929                     }
12930                     if (is_scalar) {
12931                         gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
12932                                                    tcg_res, fpst);
12933                     } else {
12934                         gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
12935                                                     tcg_res, fpst);
12936                     }
12937                     break;
12938                 case 2:
12939                     if (opcode == 0x5) {
12940                         /* As usual for ARM, separate negation for
12941                          * fused multiply-add */
12942                         tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
12943                     }
12944                     gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
12945                                            tcg_res, fpst);
12946                     break;
12947                 default:
12948                     g_assert_not_reached();
12949                 }
12950                 break;
12951             case 0x09: /* FMUL */
12952                 switch (size) {
12953                 case 1:
12954                     if (is_scalar) {
12955                         gen_helper_advsimd_mulh(tcg_res, tcg_op,
12956                                                 tcg_idx, fpst);
12957                     } else {
12958                         gen_helper_advsimd_mul2h(tcg_res, tcg_op,
12959                                                  tcg_idx, fpst);
12960                     }
12961                     break;
12962                 case 2:
12963                     gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
12964                     break;
12965                 default:
12966                     g_assert_not_reached();
12967                 }
12968                 break;
12969             case 0x19: /* FMULX */
12970                 switch (size) {
12971                 case 1:
12972                     if (is_scalar) {
12973                         gen_helper_advsimd_mulxh(tcg_res, tcg_op,
12974                                                  tcg_idx, fpst);
12975                     } else {
12976                         gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
12977                                                   tcg_idx, fpst);
12978                     }
12979                     break;
12980                 case 2:
12981                     gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
12982                     break;
12983                 default:
12984                     g_assert_not_reached();
12985                 }
12986                 break;
12987             case 0x0c: /* SQDMULH */
12988                 if (size == 1) {
12989                     gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
12990                                                tcg_op, tcg_idx);
12991                 } else {
12992                     gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
12993                                                tcg_op, tcg_idx);
12994                 }
12995                 break;
12996             case 0x0d: /* SQRDMULH */
12997                 if (size == 1) {
12998                     gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
12999                                                 tcg_op, tcg_idx);
13000                 } else {
13001                     gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
13002                                                 tcg_op, tcg_idx);
13003                 }
13004                 break;
13005             case 0x1d: /* SQRDMLAH */
13006                 read_vec_element_i32(s, tcg_res, rd, pass,
13007                                      is_scalar ? size : MO_32);
13008                 if (size == 1) {
13009                     gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
13010                                                 tcg_op, tcg_idx, tcg_res);
13011                 } else {
13012                     gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
13013                                                 tcg_op, tcg_idx, tcg_res);
13014                 }
13015                 break;
13016             case 0x1f: /* SQRDMLSH */
13017                 read_vec_element_i32(s, tcg_res, rd, pass,
13018                                      is_scalar ? size : MO_32);
13019                 if (size == 1) {
13020                     gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
13021                                                 tcg_op, tcg_idx, tcg_res);
13022                 } else {
13023                     gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
13024                                                 tcg_op, tcg_idx, tcg_res);
13025                 }
13026                 break;
13027             default:
13028                 g_assert_not_reached();
13029             }
13030 
13031             if (is_scalar) {
13032                 write_fp_sreg(s, rd, tcg_res);
13033             } else {
13034                 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
13035             }
13036         }
13037 
13038         clear_vec_high(s, is_q, rd);
13039     } else {
13040         /* long ops: 16x16->32 or 32x32->64 */
13041         TCGv_i64 tcg_res[2];
13042         int pass;
13043         bool satop = extract32(opcode, 0, 1);
13044         MemOp memop = MO_32;
13045 
13046         if (satop || !u) {
13047             memop |= MO_SIGN;
13048         }
13049 
13050         if (size == 2) {
13051             TCGv_i64 tcg_idx = tcg_temp_new_i64();
13052 
13053             read_vec_element(s, tcg_idx, rm, index, memop);
13054 
13055             for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13056                 TCGv_i64 tcg_op = tcg_temp_new_i64();
13057                 TCGv_i64 tcg_passres;
13058                 int passelt;
13059 
13060                 if (is_scalar) {
13061                     passelt = 0;
13062                 } else {
13063                     passelt = pass + (is_q * 2);
13064                 }
13065 
13066                 read_vec_element(s, tcg_op, rn, passelt, memop);
13067 
13068                 tcg_res[pass] = tcg_temp_new_i64();
13069 
13070                 if (opcode == 0xa || opcode == 0xb) {
13071                     /* Non-accumulating ops */
13072                     tcg_passres = tcg_res[pass];
13073                 } else {
13074                     tcg_passres = tcg_temp_new_i64();
13075                 }
13076 
13077                 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
13078 
13079                 if (satop) {
13080                     /* saturating, doubling */
13081                     gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
13082                                                       tcg_passres, tcg_passres);
13083                 }
13084 
13085                 if (opcode == 0xa || opcode == 0xb) {
13086                     continue;
13087                 }
13088 
13089                 /* Accumulating op: handle accumulate step */
13090                 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13091 
13092                 switch (opcode) {
13093                 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13094                     tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13095                     break;
13096                 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13097                     tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
13098                     break;
13099                 case 0x7: /* SQDMLSL, SQDMLSL2 */
13100                     tcg_gen_neg_i64(tcg_passres, tcg_passres);
13101                     /* fall through */
13102                 case 0x3: /* SQDMLAL, SQDMLAL2 */
13103                     gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
13104                                                       tcg_res[pass],
13105                                                       tcg_passres);
13106                     break;
13107                 default:
13108                     g_assert_not_reached();
13109                 }
13110             }
13111 
13112             clear_vec_high(s, !is_scalar, rd);
13113         } else {
13114             TCGv_i32 tcg_idx = tcg_temp_new_i32();
13115 
13116             assert(size == 1);
13117             read_vec_element_i32(s, tcg_idx, rm, index, size);
13118 
13119             if (!is_scalar) {
13120                 /* The simplest way to handle the 16x16 indexed ops is to
13121                  * duplicate the index into both halves of the 32 bit tcg_idx
13122                  * and then use the usual Neon helpers.
13123                  */
13124                 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
13125             }
13126 
13127             for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
13128                 TCGv_i32 tcg_op = tcg_temp_new_i32();
13129                 TCGv_i64 tcg_passres;
13130 
13131                 if (is_scalar) {
13132                     read_vec_element_i32(s, tcg_op, rn, pass, size);
13133                 } else {
13134                     read_vec_element_i32(s, tcg_op, rn,
13135                                          pass + (is_q * 2), MO_32);
13136                 }
13137 
13138                 tcg_res[pass] = tcg_temp_new_i64();
13139 
13140                 if (opcode == 0xa || opcode == 0xb) {
13141                     /* Non-accumulating ops */
13142                     tcg_passres = tcg_res[pass];
13143                 } else {
13144                     tcg_passres = tcg_temp_new_i64();
13145                 }
13146 
13147                 if (memop & MO_SIGN) {
13148                     gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
13149                 } else {
13150                     gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
13151                 }
13152                 if (satop) {
13153                     gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
13154                                                       tcg_passres, tcg_passres);
13155                 }
13156 
13157                 if (opcode == 0xa || opcode == 0xb) {
13158                     continue;
13159                 }
13160 
13161                 /* Accumulating op: handle accumulate step */
13162                 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13163 
13164                 switch (opcode) {
13165                 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
13166                     gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
13167                                              tcg_passres);
13168                     break;
13169                 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
13170                     gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
13171                                              tcg_passres);
13172                     break;
13173                 case 0x7: /* SQDMLSL, SQDMLSL2 */
13174                     gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
13175                     /* fall through */
13176                 case 0x3: /* SQDMLAL, SQDMLAL2 */
13177                     gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
13178                                                       tcg_res[pass],
13179                                                       tcg_passres);
13180                     break;
13181                 default:
13182                     g_assert_not_reached();
13183                 }
13184             }
13185 
13186             if (is_scalar) {
13187                 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
13188             }
13189         }
13190 
13191         if (is_scalar) {
13192             tcg_res[1] = tcg_constant_i64(0);
13193         }
13194 
13195         for (pass = 0; pass < 2; pass++) {
13196             write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
13197         }
13198     }
13199 }
13200 
13201 /* Crypto AES
13202  *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
13203  * +-----------------+------+-----------+--------+-----+------+------+
13204  * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13205  * +-----------------+------+-----------+--------+-----+------+------+
13206  */
13207 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
13208 {
13209     int size = extract32(insn, 22, 2);
13210     int opcode = extract32(insn, 12, 5);
13211     int rn = extract32(insn, 5, 5);
13212     int rd = extract32(insn, 0, 5);
13213     int decrypt;
13214     gen_helper_gvec_2 *genfn2 = NULL;
13215     gen_helper_gvec_3 *genfn3 = NULL;
13216 
13217     if (!dc_isar_feature(aa64_aes, s) || size != 0) {
13218         unallocated_encoding(s);
13219         return;
13220     }
13221 
13222     switch (opcode) {
13223     case 0x4: /* AESE */
13224         decrypt = 0;
13225         genfn3 = gen_helper_crypto_aese;
13226         break;
13227     case 0x6: /* AESMC */
13228         decrypt = 0;
13229         genfn2 = gen_helper_crypto_aesmc;
13230         break;
13231     case 0x5: /* AESD */
13232         decrypt = 1;
13233         genfn3 = gen_helper_crypto_aese;
13234         break;
13235     case 0x7: /* AESIMC */
13236         decrypt = 1;
13237         genfn2 = gen_helper_crypto_aesmc;
13238         break;
13239     default:
13240         unallocated_encoding(s);
13241         return;
13242     }
13243 
13244     if (!fp_access_check(s)) {
13245         return;
13246     }
13247     if (genfn2) {
13248         gen_gvec_op2_ool(s, true, rd, rn, decrypt, genfn2);
13249     } else {
13250         gen_gvec_op3_ool(s, true, rd, rd, rn, decrypt, genfn3);
13251     }
13252 }
13253 
13254 /* Crypto three-reg SHA
13255  *  31             24 23  22  21 20  16  15 14    12 11 10 9    5 4    0
13256  * +-----------------+------+---+------+---+--------+-----+------+------+
13257  * | 0 1 0 1 1 1 1 0 | size | 0 |  Rm  | 0 | opcode | 0 0 |  Rn  |  Rd  |
13258  * +-----------------+------+---+------+---+--------+-----+------+------+
13259  */
13260 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
13261 {
13262     int size = extract32(insn, 22, 2);
13263     int opcode = extract32(insn, 12, 3);
13264     int rm = extract32(insn, 16, 5);
13265     int rn = extract32(insn, 5, 5);
13266     int rd = extract32(insn, 0, 5);
13267     gen_helper_gvec_3 *genfn;
13268     bool feature;
13269 
13270     if (size != 0) {
13271         unallocated_encoding(s);
13272         return;
13273     }
13274 
13275     switch (opcode) {
13276     case 0: /* SHA1C */
13277         genfn = gen_helper_crypto_sha1c;
13278         feature = dc_isar_feature(aa64_sha1, s);
13279         break;
13280     case 1: /* SHA1P */
13281         genfn = gen_helper_crypto_sha1p;
13282         feature = dc_isar_feature(aa64_sha1, s);
13283         break;
13284     case 2: /* SHA1M */
13285         genfn = gen_helper_crypto_sha1m;
13286         feature = dc_isar_feature(aa64_sha1, s);
13287         break;
13288     case 3: /* SHA1SU0 */
13289         genfn = gen_helper_crypto_sha1su0;
13290         feature = dc_isar_feature(aa64_sha1, s);
13291         break;
13292     case 4: /* SHA256H */
13293         genfn = gen_helper_crypto_sha256h;
13294         feature = dc_isar_feature(aa64_sha256, s);
13295         break;
13296     case 5: /* SHA256H2 */
13297         genfn = gen_helper_crypto_sha256h2;
13298         feature = dc_isar_feature(aa64_sha256, s);
13299         break;
13300     case 6: /* SHA256SU1 */
13301         genfn = gen_helper_crypto_sha256su1;
13302         feature = dc_isar_feature(aa64_sha256, s);
13303         break;
13304     default:
13305         unallocated_encoding(s);
13306         return;
13307     }
13308 
13309     if (!feature) {
13310         unallocated_encoding(s);
13311         return;
13312     }
13313 
13314     if (!fp_access_check(s)) {
13315         return;
13316     }
13317     gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
13318 }
13319 
13320 /* Crypto two-reg SHA
13321  *  31             24 23  22 21       17 16    12 11 10 9    5 4    0
13322  * +-----------------+------+-----------+--------+-----+------+------+
13323  * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 |  Rn  |  Rd  |
13324  * +-----------------+------+-----------+--------+-----+------+------+
13325  */
13326 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
13327 {
13328     int size = extract32(insn, 22, 2);
13329     int opcode = extract32(insn, 12, 5);
13330     int rn = extract32(insn, 5, 5);
13331     int rd = extract32(insn, 0, 5);
13332     gen_helper_gvec_2 *genfn;
13333     bool feature;
13334 
13335     if (size != 0) {
13336         unallocated_encoding(s);
13337         return;
13338     }
13339 
13340     switch (opcode) {
13341     case 0: /* SHA1H */
13342         feature = dc_isar_feature(aa64_sha1, s);
13343         genfn = gen_helper_crypto_sha1h;
13344         break;
13345     case 1: /* SHA1SU1 */
13346         feature = dc_isar_feature(aa64_sha1, s);
13347         genfn = gen_helper_crypto_sha1su1;
13348         break;
13349     case 2: /* SHA256SU0 */
13350         feature = dc_isar_feature(aa64_sha256, s);
13351         genfn = gen_helper_crypto_sha256su0;
13352         break;
13353     default:
13354         unallocated_encoding(s);
13355         return;
13356     }
13357 
13358     if (!feature) {
13359         unallocated_encoding(s);
13360         return;
13361     }
13362 
13363     if (!fp_access_check(s)) {
13364         return;
13365     }
13366     gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
13367 }
13368 
13369 static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
13370 {
13371     tcg_gen_rotli_i64(d, m, 1);
13372     tcg_gen_xor_i64(d, d, n);
13373 }
13374 
13375 static void gen_rax1_vec(unsigned vece, TCGv_vec d, TCGv_vec n, TCGv_vec m)
13376 {
13377     tcg_gen_rotli_vec(vece, d, m, 1);
13378     tcg_gen_xor_vec(vece, d, d, n);
13379 }
13380 
13381 void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
13382                    uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
13383 {
13384     static const TCGOpcode vecop_list[] = { INDEX_op_rotli_vec, 0 };
13385     static const GVecGen3 op = {
13386         .fni8 = gen_rax1_i64,
13387         .fniv = gen_rax1_vec,
13388         .opt_opc = vecop_list,
13389         .fno = gen_helper_crypto_rax1,
13390         .vece = MO_64,
13391     };
13392     tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &op);
13393 }
13394 
13395 /* Crypto three-reg SHA512
13396  *  31                   21 20  16 15  14  13 12  11  10  9    5 4    0
13397  * +-----------------------+------+---+---+-----+--------+------+------+
13398  * | 1 1 0 0 1 1 1 0 0 1 1 |  Rm  | 1 | O | 0 0 | opcode |  Rn  |  Rd  |
13399  * +-----------------------+------+---+---+-----+--------+------+------+
13400  */
13401 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
13402 {
13403     int opcode = extract32(insn, 10, 2);
13404     int o =  extract32(insn, 14, 1);
13405     int rm = extract32(insn, 16, 5);
13406     int rn = extract32(insn, 5, 5);
13407     int rd = extract32(insn, 0, 5);
13408     bool feature;
13409     gen_helper_gvec_3 *oolfn = NULL;
13410     GVecGen3Fn *gvecfn = NULL;
13411 
13412     if (o == 0) {
13413         switch (opcode) {
13414         case 0: /* SHA512H */
13415             feature = dc_isar_feature(aa64_sha512, s);
13416             oolfn = gen_helper_crypto_sha512h;
13417             break;
13418         case 1: /* SHA512H2 */
13419             feature = dc_isar_feature(aa64_sha512, s);
13420             oolfn = gen_helper_crypto_sha512h2;
13421             break;
13422         case 2: /* SHA512SU1 */
13423             feature = dc_isar_feature(aa64_sha512, s);
13424             oolfn = gen_helper_crypto_sha512su1;
13425             break;
13426         case 3: /* RAX1 */
13427             feature = dc_isar_feature(aa64_sha3, s);
13428             gvecfn = gen_gvec_rax1;
13429             break;
13430         default:
13431             g_assert_not_reached();
13432         }
13433     } else {
13434         switch (opcode) {
13435         case 0: /* SM3PARTW1 */
13436             feature = dc_isar_feature(aa64_sm3, s);
13437             oolfn = gen_helper_crypto_sm3partw1;
13438             break;
13439         case 1: /* SM3PARTW2 */
13440             feature = dc_isar_feature(aa64_sm3, s);
13441             oolfn = gen_helper_crypto_sm3partw2;
13442             break;
13443         case 2: /* SM4EKEY */
13444             feature = dc_isar_feature(aa64_sm4, s);
13445             oolfn = gen_helper_crypto_sm4ekey;
13446             break;
13447         default:
13448             unallocated_encoding(s);
13449             return;
13450         }
13451     }
13452 
13453     if (!feature) {
13454         unallocated_encoding(s);
13455         return;
13456     }
13457 
13458     if (!fp_access_check(s)) {
13459         return;
13460     }
13461 
13462     if (oolfn) {
13463         gen_gvec_op3_ool(s, true, rd, rn, rm, 0, oolfn);
13464     } else {
13465         gen_gvec_fn3(s, true, rd, rn, rm, gvecfn, MO_64);
13466     }
13467 }
13468 
13469 /* Crypto two-reg SHA512
13470  *  31                                     12  11  10  9    5 4    0
13471  * +-----------------------------------------+--------+------+------+
13472  * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode |  Rn  |  Rd  |
13473  * +-----------------------------------------+--------+------+------+
13474  */
13475 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13476 {
13477     int opcode = extract32(insn, 10, 2);
13478     int rn = extract32(insn, 5, 5);
13479     int rd = extract32(insn, 0, 5);
13480     bool feature;
13481 
13482     switch (opcode) {
13483     case 0: /* SHA512SU0 */
13484         feature = dc_isar_feature(aa64_sha512, s);
13485         break;
13486     case 1: /* SM4E */
13487         feature = dc_isar_feature(aa64_sm4, s);
13488         break;
13489     default:
13490         unallocated_encoding(s);
13491         return;
13492     }
13493 
13494     if (!feature) {
13495         unallocated_encoding(s);
13496         return;
13497     }
13498 
13499     if (!fp_access_check(s)) {
13500         return;
13501     }
13502 
13503     switch (opcode) {
13504     case 0: /* SHA512SU0 */
13505         gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
13506         break;
13507     case 1: /* SM4E */
13508         gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
13509         break;
13510     default:
13511         g_assert_not_reached();
13512     }
13513 }
13514 
13515 /* Crypto four-register
13516  *  31               23 22 21 20  16 15  14  10 9    5 4    0
13517  * +-------------------+-----+------+---+------+------+------+
13518  * | 1 1 0 0 1 1 1 0 0 | Op0 |  Rm  | 0 |  Ra  |  Rn  |  Rd  |
13519  * +-------------------+-----+------+---+------+------+------+
13520  */
13521 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13522 {
13523     int op0 = extract32(insn, 21, 2);
13524     int rm = extract32(insn, 16, 5);
13525     int ra = extract32(insn, 10, 5);
13526     int rn = extract32(insn, 5, 5);
13527     int rd = extract32(insn, 0, 5);
13528     bool feature;
13529 
13530     switch (op0) {
13531     case 0: /* EOR3 */
13532     case 1: /* BCAX */
13533         feature = dc_isar_feature(aa64_sha3, s);
13534         break;
13535     case 2: /* SM3SS1 */
13536         feature = dc_isar_feature(aa64_sm3, s);
13537         break;
13538     default:
13539         unallocated_encoding(s);
13540         return;
13541     }
13542 
13543     if (!feature) {
13544         unallocated_encoding(s);
13545         return;
13546     }
13547 
13548     if (!fp_access_check(s)) {
13549         return;
13550     }
13551 
13552     if (op0 < 2) {
13553         TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13554         int pass;
13555 
13556         tcg_op1 = tcg_temp_new_i64();
13557         tcg_op2 = tcg_temp_new_i64();
13558         tcg_op3 = tcg_temp_new_i64();
13559         tcg_res[0] = tcg_temp_new_i64();
13560         tcg_res[1] = tcg_temp_new_i64();
13561 
13562         for (pass = 0; pass < 2; pass++) {
13563             read_vec_element(s, tcg_op1, rn, pass, MO_64);
13564             read_vec_element(s, tcg_op2, rm, pass, MO_64);
13565             read_vec_element(s, tcg_op3, ra, pass, MO_64);
13566 
13567             if (op0 == 0) {
13568                 /* EOR3 */
13569                 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13570             } else {
13571                 /* BCAX */
13572                 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13573             }
13574             tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13575         }
13576         write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13577         write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13578     } else {
13579         TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13580 
13581         tcg_op1 = tcg_temp_new_i32();
13582         tcg_op2 = tcg_temp_new_i32();
13583         tcg_op3 = tcg_temp_new_i32();
13584         tcg_res = tcg_temp_new_i32();
13585         tcg_zero = tcg_constant_i32(0);
13586 
13587         read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13588         read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13589         read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13590 
13591         tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13592         tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13593         tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13594         tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13595 
13596         write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13597         write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13598         write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13599         write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13600     }
13601 }
13602 
13603 /* Crypto XAR
13604  *  31                   21 20  16 15    10 9    5 4    0
13605  * +-----------------------+------+--------+------+------+
13606  * | 1 1 0 0 1 1 1 0 1 0 0 |  Rm  |  imm6  |  Rn  |  Rd  |
13607  * +-----------------------+------+--------+------+------+
13608  */
13609 static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13610 {
13611     int rm = extract32(insn, 16, 5);
13612     int imm6 = extract32(insn, 10, 6);
13613     int rn = extract32(insn, 5, 5);
13614     int rd = extract32(insn, 0, 5);
13615 
13616     if (!dc_isar_feature(aa64_sha3, s)) {
13617         unallocated_encoding(s);
13618         return;
13619     }
13620 
13621     if (!fp_access_check(s)) {
13622         return;
13623     }
13624 
13625     gen_gvec_xar(MO_64, vec_full_reg_offset(s, rd),
13626                  vec_full_reg_offset(s, rn),
13627                  vec_full_reg_offset(s, rm), imm6, 16,
13628                  vec_full_reg_size(s));
13629 }
13630 
13631 /* Crypto three-reg imm2
13632  *  31                   21 20  16 15  14 13 12  11  10  9    5 4    0
13633  * +-----------------------+------+-----+------+--------+------+------+
13634  * | 1 1 0 0 1 1 1 0 0 1 0 |  Rm  | 1 0 | imm2 | opcode |  Rn  |  Rd  |
13635  * +-----------------------+------+-----+------+--------+------+------+
13636  */
13637 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
13638 {
13639     static gen_helper_gvec_3 * const fns[4] = {
13640         gen_helper_crypto_sm3tt1a, gen_helper_crypto_sm3tt1b,
13641         gen_helper_crypto_sm3tt2a, gen_helper_crypto_sm3tt2b,
13642     };
13643     int opcode = extract32(insn, 10, 2);
13644     int imm2 = extract32(insn, 12, 2);
13645     int rm = extract32(insn, 16, 5);
13646     int rn = extract32(insn, 5, 5);
13647     int rd = extract32(insn, 0, 5);
13648 
13649     if (!dc_isar_feature(aa64_sm3, s)) {
13650         unallocated_encoding(s);
13651         return;
13652     }
13653 
13654     if (!fp_access_check(s)) {
13655         return;
13656     }
13657 
13658     gen_gvec_op3_ool(s, true, rd, rn, rm, imm2, fns[opcode]);
13659 }
13660 
13661 /* C3.6 Data processing - SIMD, inc Crypto
13662  *
13663  * As the decode gets a little complex we are using a table based
13664  * approach for this part of the decode.
13665  */
13666 static const AArch64DecodeTable data_proc_simd[] = {
13667     /* pattern  ,  mask     ,  fn                        */
13668     { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
13669     { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
13670     { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
13671     { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
13672     { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
13673     { 0x0e000400, 0x9fe08400, disas_simd_copy },
13674     { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
13675     /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13676     { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
13677     { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
13678     { 0x0e000000, 0xbf208c00, disas_simd_tb },
13679     { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
13680     { 0x2e000000, 0xbf208400, disas_simd_ext },
13681     { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
13682     { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
13683     { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
13684     { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
13685     { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
13686     { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
13687     { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
13688     { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
13689     { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
13690     { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
13691     { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
13692     { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
13693     { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
13694     { 0xce000000, 0xff808000, disas_crypto_four_reg },
13695     { 0xce800000, 0xffe00000, disas_crypto_xar },
13696     { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
13697     { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
13698     { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
13699     { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
13700     { 0x00000000, 0x00000000, NULL }
13701 };
13702 
13703 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
13704 {
13705     /* Note that this is called with all non-FP cases from
13706      * table C3-6 so it must UNDEF for entries not specifically
13707      * allocated to instructions in that table.
13708      */
13709     AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
13710     if (fn) {
13711         fn(s, insn);
13712     } else {
13713         unallocated_encoding(s);
13714     }
13715 }
13716 
13717 /* C3.6 Data processing - SIMD and floating point */
13718 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
13719 {
13720     if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
13721         disas_data_proc_fp(s, insn);
13722     } else {
13723         /* SIMD, including crypto */
13724         disas_data_proc_simd(s, insn);
13725     }
13726 }
13727 
13728 static bool trans_OK(DisasContext *s, arg_OK *a)
13729 {
13730     return true;
13731 }
13732 
13733 static bool trans_FAIL(DisasContext *s, arg_OK *a)
13734 {
13735     s->is_nonstreaming = true;
13736     return true;
13737 }
13738 
13739 /**
13740  * is_guarded_page:
13741  * @env: The cpu environment
13742  * @s: The DisasContext
13743  *
13744  * Return true if the page is guarded.
13745  */
13746 static bool is_guarded_page(CPUARMState *env, DisasContext *s)
13747 {
13748     uint64_t addr = s->base.pc_first;
13749 #ifdef CONFIG_USER_ONLY
13750     return page_get_flags(addr) & PAGE_BTI;
13751 #else
13752     CPUTLBEntryFull *full;
13753     void *host;
13754     int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
13755     int flags;
13756 
13757     /*
13758      * We test this immediately after reading an insn, which means
13759      * that the TLB entry must be present and valid, and thus this
13760      * access will never raise an exception.
13761      */
13762     flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
13763                               false, &host, &full, 0);
13764     assert(!(flags & TLB_INVALID_MASK));
13765 
13766     return full->guarded;
13767 #endif
13768 }
13769 
13770 /**
13771  * btype_destination_ok:
13772  * @insn: The instruction at the branch destination
13773  * @bt: SCTLR_ELx.BT
13774  * @btype: PSTATE.BTYPE, and is non-zero
13775  *
13776  * On a guarded page, there are a limited number of insns
13777  * that may be present at the branch target:
13778  *   - branch target identifiers,
13779  *   - paciasp, pacibsp,
13780  *   - BRK insn
13781  *   - HLT insn
13782  * Anything else causes a Branch Target Exception.
13783  *
13784  * Return true if the branch is compatible, false to raise BTITRAP.
13785  */
13786 static bool btype_destination_ok(uint32_t insn, bool bt, int btype)
13787 {
13788     if ((insn & 0xfffff01fu) == 0xd503201fu) {
13789         /* HINT space */
13790         switch (extract32(insn, 5, 7)) {
13791         case 0b011001: /* PACIASP */
13792         case 0b011011: /* PACIBSP */
13793             /*
13794              * If SCTLR_ELx.BT, then PACI*SP are not compatible
13795              * with btype == 3.  Otherwise all btype are ok.
13796              */
13797             return !bt || btype != 3;
13798         case 0b100000: /* BTI */
13799             /* Not compatible with any btype.  */
13800             return false;
13801         case 0b100010: /* BTI c */
13802             /* Not compatible with btype == 3 */
13803             return btype != 3;
13804         case 0b100100: /* BTI j */
13805             /* Not compatible with btype == 2 */
13806             return btype != 2;
13807         case 0b100110: /* BTI jc */
13808             /* Compatible with any btype.  */
13809             return true;
13810         }
13811     } else {
13812         switch (insn & 0xffe0001fu) {
13813         case 0xd4200000u: /* BRK */
13814         case 0xd4400000u: /* HLT */
13815             /* Give priority to the breakpoint exception.  */
13816             return true;
13817         }
13818     }
13819     return false;
13820 }
13821 
13822 /* C3.1 A64 instruction index by encoding */
13823 static void disas_a64_legacy(DisasContext *s, uint32_t insn)
13824 {
13825     switch (extract32(insn, 25, 4)) {
13826     case 0x5:
13827     case 0xd:      /* Data processing - register */
13828         disas_data_proc_reg(s, insn);
13829         break;
13830     case 0x7:
13831     case 0xf:      /* Data processing - SIMD and floating point */
13832         disas_data_proc_simd_fp(s, insn);
13833         break;
13834     default:
13835         unallocated_encoding(s);
13836         break;
13837     }
13838 }
13839 
13840 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
13841                                           CPUState *cpu)
13842 {
13843     DisasContext *dc = container_of(dcbase, DisasContext, base);
13844     CPUARMState *env = cpu->env_ptr;
13845     ARMCPU *arm_cpu = env_archcpu(env);
13846     CPUARMTBFlags tb_flags = arm_tbflags_from_tb(dc->base.tb);
13847     int bound, core_mmu_idx;
13848 
13849     dc->isar = &arm_cpu->isar;
13850     dc->condjmp = 0;
13851     dc->pc_save = dc->base.pc_first;
13852     dc->aarch64 = true;
13853     dc->thumb = false;
13854     dc->sctlr_b = 0;
13855     dc->be_data = EX_TBFLAG_ANY(tb_flags, BE_DATA) ? MO_BE : MO_LE;
13856     dc->condexec_mask = 0;
13857     dc->condexec_cond = 0;
13858     core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
13859     dc->mmu_idx = core_to_aa64_mmu_idx(core_mmu_idx);
13860     dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
13861     dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
13862     dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
13863     dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
13864 #if !defined(CONFIG_USER_ONLY)
13865     dc->user = (dc->current_el == 0);
13866 #endif
13867     dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
13868     dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
13869     dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
13870     dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE);
13871     dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC);
13872     dc->fgt_eret = EX_TBFLAG_A64(tb_flags, FGT_ERET);
13873     dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
13874     dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
13875     dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
13876     dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
13877     dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
13878     dc->bt = EX_TBFLAG_A64(tb_flags, BT);
13879     dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
13880     dc->unpriv = EX_TBFLAG_A64(tb_flags, UNPRIV);
13881     dc->ata = EX_TBFLAG_A64(tb_flags, ATA);
13882     dc->mte_active[0] = EX_TBFLAG_A64(tb_flags, MTE_ACTIVE);
13883     dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE);
13884     dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM);
13885     dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA);
13886     dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING);
13887     dc->naa = EX_TBFLAG_A64(tb_flags, NAA);
13888     dc->vec_len = 0;
13889     dc->vec_stride = 0;
13890     dc->cp_regs = arm_cpu->cp_regs;
13891     dc->features = env->features;
13892     dc->dcz_blocksize = arm_cpu->dcz_blocksize;
13893 
13894 #ifdef CONFIG_USER_ONLY
13895     /* In sve_probe_page, we assume TBI is enabled. */
13896     tcg_debug_assert(dc->tbid & 1);
13897 #endif
13898 
13899     dc->lse2 = dc_isar_feature(aa64_lse2, dc);
13900 
13901     /* Single step state. The code-generation logic here is:
13902      *  SS_ACTIVE == 0:
13903      *   generate code with no special handling for single-stepping (except
13904      *   that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13905      *   this happens anyway because those changes are all system register or
13906      *   PSTATE writes).
13907      *  SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13908      *   emit code for one insn
13909      *   emit code to clear PSTATE.SS
13910      *   emit code to generate software step exception for completed step
13911      *   end TB (as usual for having generated an exception)
13912      *  SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13913      *   emit code to generate a software step exception
13914      *   end the TB
13915      */
13916     dc->ss_active = EX_TBFLAG_ANY(tb_flags, SS_ACTIVE);
13917     dc->pstate_ss = EX_TBFLAG_ANY(tb_flags, PSTATE__SS);
13918     dc->is_ldex = false;
13919 
13920     /* Bound the number of insns to execute to those left on the page.  */
13921     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
13922 
13923     /* If architectural single step active, limit to 1.  */
13924     if (dc->ss_active) {
13925         bound = 1;
13926     }
13927     dc->base.max_insns = MIN(dc->base.max_insns, bound);
13928 }
13929 
13930 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
13931 {
13932 }
13933 
13934 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13935 {
13936     DisasContext *dc = container_of(dcbase, DisasContext, base);
13937     target_ulong pc_arg = dc->base.pc_next;
13938 
13939     if (tb_cflags(dcbase->tb) & CF_PCREL) {
13940         pc_arg &= ~TARGET_PAGE_MASK;
13941     }
13942     tcg_gen_insn_start(pc_arg, 0, 0);
13943     dc->insn_start = tcg_last_op();
13944 }
13945 
13946 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13947 {
13948     DisasContext *s = container_of(dcbase, DisasContext, base);
13949     CPUARMState *env = cpu->env_ptr;
13950     uint64_t pc = s->base.pc_next;
13951     uint32_t insn;
13952 
13953     /* Singlestep exceptions have the highest priority. */
13954     if (s->ss_active && !s->pstate_ss) {
13955         /* Singlestep state is Active-pending.
13956          * If we're in this state at the start of a TB then either
13957          *  a) we just took an exception to an EL which is being debugged
13958          *     and this is the first insn in the exception handler
13959          *  b) debug exceptions were masked and we just unmasked them
13960          *     without changing EL (eg by clearing PSTATE.D)
13961          * In either case we're going to take a swstep exception in the
13962          * "did not step an insn" case, and so the syndrome ISV and EX
13963          * bits should be zero.
13964          */
13965         assert(s->base.num_insns == 1);
13966         gen_swstep_exception(s, 0, 0);
13967         s->base.is_jmp = DISAS_NORETURN;
13968         s->base.pc_next = pc + 4;
13969         return;
13970     }
13971 
13972     if (pc & 3) {
13973         /*
13974          * PC alignment fault.  This has priority over the instruction abort
13975          * that we would receive from a translation fault via arm_ldl_code.
13976          * This should only be possible after an indirect branch, at the
13977          * start of the TB.
13978          */
13979         assert(s->base.num_insns == 1);
13980         gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
13981         s->base.is_jmp = DISAS_NORETURN;
13982         s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
13983         return;
13984     }
13985 
13986     s->pc_curr = pc;
13987     insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
13988     s->insn = insn;
13989     s->base.pc_next = pc + 4;
13990 
13991     s->fp_access_checked = false;
13992     s->sve_access_checked = false;
13993 
13994     if (s->pstate_il) {
13995         /*
13996          * Illegal execution state. This has priority over BTI
13997          * exceptions, but comes after instruction abort exceptions.
13998          */
13999         gen_exception_insn(s, 0, EXCP_UDEF, syn_illegalstate());
14000         return;
14001     }
14002 
14003     if (dc_isar_feature(aa64_bti, s)) {
14004         if (s->base.num_insns == 1) {
14005             /*
14006              * At the first insn of the TB, compute s->guarded_page.
14007              * We delayed computing this until successfully reading
14008              * the first insn of the TB, above.  This (mostly) ensures
14009              * that the softmmu tlb entry has been populated, and the
14010              * page table GP bit is available.
14011              *
14012              * Note that we need to compute this even if btype == 0,
14013              * because this value is used for BR instructions later
14014              * where ENV is not available.
14015              */
14016             s->guarded_page = is_guarded_page(env, s);
14017 
14018             /* First insn can have btype set to non-zero.  */
14019             tcg_debug_assert(s->btype >= 0);
14020 
14021             /*
14022              * Note that the Branch Target Exception has fairly high
14023              * priority -- below debugging exceptions but above most
14024              * everything else.  This allows us to handle this now
14025              * instead of waiting until the insn is otherwise decoded.
14026              */
14027             if (s->btype != 0
14028                 && s->guarded_page
14029                 && !btype_destination_ok(insn, s->bt, s->btype)) {
14030                 gen_exception_insn(s, 0, EXCP_UDEF, syn_btitrap(s->btype));
14031                 return;
14032             }
14033         } else {
14034             /* Not the first insn: btype must be 0.  */
14035             tcg_debug_assert(s->btype == 0);
14036         }
14037     }
14038 
14039     s->is_nonstreaming = false;
14040     if (s->sme_trap_nonstreaming) {
14041         disas_sme_fa64(s, insn);
14042     }
14043 
14044     if (!disas_a64(s, insn) &&
14045         !disas_sme(s, insn) &&
14046         !disas_sve(s, insn)) {
14047         disas_a64_legacy(s, insn);
14048     }
14049 
14050     /*
14051      * After execution of most insns, btype is reset to 0.
14052      * Note that we set btype == -1 when the insn sets btype.
14053      */
14054     if (s->btype > 0 && s->base.is_jmp != DISAS_NORETURN) {
14055         reset_btype(s);
14056     }
14057 }
14058 
14059 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14060 {
14061     DisasContext *dc = container_of(dcbase, DisasContext, base);
14062 
14063     if (unlikely(dc->ss_active)) {
14064         /* Note that this means single stepping WFI doesn't halt the CPU.
14065          * For conditional branch insns this is harmless unreachable code as
14066          * gen_goto_tb() has already handled emitting the debug exception
14067          * (and thus a tb-jump is not possible when singlestepping).
14068          */
14069         switch (dc->base.is_jmp) {
14070         default:
14071             gen_a64_update_pc(dc, 4);
14072             /* fall through */
14073         case DISAS_EXIT:
14074         case DISAS_JUMP:
14075             gen_step_complete_exception(dc);
14076             break;
14077         case DISAS_NORETURN:
14078             break;
14079         }
14080     } else {
14081         switch (dc->base.is_jmp) {
14082         case DISAS_NEXT:
14083         case DISAS_TOO_MANY:
14084             gen_goto_tb(dc, 1, 4);
14085             break;
14086         default:
14087         case DISAS_UPDATE_EXIT:
14088             gen_a64_update_pc(dc, 4);
14089             /* fall through */
14090         case DISAS_EXIT:
14091             tcg_gen_exit_tb(NULL, 0);
14092             break;
14093         case DISAS_UPDATE_NOCHAIN:
14094             gen_a64_update_pc(dc, 4);
14095             /* fall through */
14096         case DISAS_JUMP:
14097             tcg_gen_lookup_and_goto_ptr();
14098             break;
14099         case DISAS_NORETURN:
14100         case DISAS_SWI:
14101             break;
14102         case DISAS_WFE:
14103             gen_a64_update_pc(dc, 4);
14104             gen_helper_wfe(cpu_env);
14105             break;
14106         case DISAS_YIELD:
14107             gen_a64_update_pc(dc, 4);
14108             gen_helper_yield(cpu_env);
14109             break;
14110         case DISAS_WFI:
14111             /*
14112              * This is a special case because we don't want to just halt
14113              * the CPU if trying to debug across a WFI.
14114              */
14115             gen_a64_update_pc(dc, 4);
14116             gen_helper_wfi(cpu_env, tcg_constant_i32(4));
14117             /*
14118              * The helper doesn't necessarily throw an exception, but we
14119              * must go back to the main loop to check for interrupts anyway.
14120              */
14121             tcg_gen_exit_tb(NULL, 0);
14122             break;
14123         }
14124     }
14125 }
14126 
14127 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
14128                                  CPUState *cpu, FILE *logfile)
14129 {
14130     DisasContext *dc = container_of(dcbase, DisasContext, base);
14131 
14132     fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
14133     target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
14134 }
14135 
14136 const TranslatorOps aarch64_translator_ops = {
14137     .init_disas_context = aarch64_tr_init_disas_context,
14138     .tb_start           = aarch64_tr_tb_start,
14139     .insn_start         = aarch64_tr_insn_start,
14140     .translate_insn     = aarch64_tr_translate_insn,
14141     .tb_stop            = aarch64_tr_tb_stop,
14142     .disas_log          = aarch64_tr_disas_log,
14143 };
14144